diff --git "a/4954.jsonl" "b/4954.jsonl"
new file mode 100644--- /dev/null
+++ "b/4954.jsonl"
@@ -0,0 +1,306 @@
+{"seq_id":"10380764069","text":"'''write code to translate a string in roman numerals to integers'''\n# Plan:\n# create a dict of roman numerals\n# iterate through the given string,\n# if the next char is greater than the current char\n# add current char as negative value\n# else\n# add current char as positive value\nimport sys\n\nromans = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}\n\n\ndef roman_to_arabic(roman):\n arabic = 0\n for i in range(len(roman) - 1): # skip the last char so the if works\n if romans[roman[i]] < romans[roman[i + 1]]:\n arabic -= romans[roman[i]]\n else:\n arabic += romans[roman[i]]\n arabic += romans[roman[-1]] # the final character will always be added\n return arabic\n\n\nif __name__ == '__main__':\n if len(sys.argv) > 1:\n print(roman_to_arabic(sys.argv[1]))\n else:\n print(roman_to_arabic('CLVII'))\n","repo_name":"eyvonne/LeetCodePractice","sub_path":"convertRoman.py","file_name":"convertRoman.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"11036529750","text":"from queue import PriorityQueue\nimport sys\n\n\nif __name__ == \"__main__\":\n k = 10 if len(sys.argv) < 2 else int(sys.argv[1])\n pq = PriorityQueue()\n with open(\"cities.txt\") as f:\n for line in f:\n (city, country, population, latitute, longtude) = line.split(\"\\t\")\n population = int(population)\n if pq.qsize() < k:\n pq.put((population, city, country))\n else:\n if population > pq.queue[0][0]:\n pq.get()\n pq.put((population, city, country))\n for (population, city, country) in reversed(sorted(pq.queue)):\n print(\"%s\\t%s\\t%d\" % (city, country, population))\n","repo_name":"fkarg/uni-stuff","sub_path":"semester_two/algoDat/public/code/vorlesung-10/top-k.py","file_name":"top-k.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"73082779879","text":"from typing import List, Tuple\nimport sys\nimport random\nimport logging\nfrom enum import Enum\nimport copy\n\nclass InvalidMazeSizeError(Exception):\n pass\n\nclass Direction(Enum):\n UP = 0\n DOWN = 1\n LEFT = 2\n RIGHT = 3\n\nclass CellType(Enum):\n PATH = 0\n WALL = 1\n PLAYER = 2\n GOAL = 3\n START = 4\n FOOTPRINT = 5\n\n# 配列だと引数の順番がこんがらがってきたので作成\nclass Coord:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def __str__(self):\n return f\"({self.x}, {self.y})\"\n\n def __eq__(self, other):\n return self.x == other.x and self.y == other.y\n\n def right(self):\n return Coord(self.x+1, self.y)\n\n def left(self):\n return Coord(self.x-1, self.y)\n\n def up(self):\n return Coord(self.x, self.y-1)\n\n def down(self):\n return Coord(self.x, self.y+1)\n\n\nclass Maze:\n _width = 0\n _height = 0\n\n mazeId: str = \"\" # Eq用のハッシュ。毎回生成するのはコストがかかる可能性があるため\n _data = [0][0] # _data[y][x]。 [x][y]ではないので注意\n start: Coord = None\n goal: Coord = None\n _goalProposalList: List[Coord] = [] # 生成途中用\n\n # 変数名がわかりづらいので改名_startPath\n _nextDigProposals: List[Coord] = []\n\n _isCreated = False\n\n _playerPoint: Coord = None\n _playerMovementHistory: List[Coord] = []\n\n\n logger: logging.Logger\n\n\n def __str__(self):\n created = \"created\" if self._isCreated else \"not created\"\n playerInfo = \"doesn't exist\" if self._playerPoint is None else str(self._playerPoint)\n return f\"Maze: ({self._width}, {self._height}): {created}; Player {playerInfo}\"\n\n def __eq__(self, other):\n return type(self) == type(other) and self.mazeId == other.mazeId\\\n and self._playerPoint == other._playerPoint\n\n # Utilities {{{\n def isValidCoord(self, c: Coord) -> bool:\n \"\"\" この迷路内に存在する座標かどうかを判定する\n \"\"\"\n if c.x < 0 or self._width - 1 < c.x:\n return False\n if c.y < 0 or self._height - 1 < c.y:\n return False\n\n return True\n\n def isCellType(self, c: Coord, cellType: CellType) -> bool:\n # andは、前者が失敗していれば後者を実行しない(はず)\n return self.isValidCoord(c) and self._data[c.y][c.x] == cellType\n\n def isGoal(self) -> bool:\n return self._playerPoint == self.goal\n # }}}\n\n # Initializations {{{\n def __init__(self, width: int, height: int, logger=None) -> None:\n if logger is None:\n self.logger = logging.getLogger(\"Maze.{__name__}\")\n logging.Formatter\n _handler = logging.StreamHandler()\n _handler.setLevel(logging.INFO)\n self.logger.setLevel(logging.INFO)\n self.logger.addHandler(_handler)\n else:\n self.logger = logger\n self.logger.info(\"logger was replaced\")\n\n if width < 5 or height < 5 or width % 2 == 0 or height % 2 == 0:\n self.logger.error(\"迷路の縦・横は5以上の奇数である必要があります\")\n raise InvalidMazeSizeError\n\n\n self._width = width\n self._height = height\n\n # self__hash__は、ミュータブルな変数を持つクラスであるため定義できない\n # https://docs.python.org/ja/3/reference/datamodel.html#object.__hash__\n self.mazeId = hash(f\"{self._width}{self._height}{self.start}{self.goal}{str(self._data)}\")\n\n def create(self):\n \"\"\"迷路を生成する\n \"\"\"\n self._isCreated = False\n self._data = [[CellType.WALL for _ in range(0, self._width)] for _ in range(0, self._height)]\n\n firstPos = Coord(random.randint(1, self._width-2), random.randint(1, self._height-2))\n self.dig(firstPos)\n self.setStart(firstPos)\n self.setPlayer(firstPos)\n self._setGoal()\n\n\n def _setGoal(self):\n \"\"\" ゴール地点をself._goalProposalListからランダムに設定する\n \"\"\"\n if self.goal is not None:\n self.logger.debug(\"ゴール地点が既に設定されているため、設定しませんでした\")\n return\n if len(self._goalProposalList) == 0:\n self.logger.debug(\"ゴール地点の候補リストが存在しないため、設定できませんでした\")\n return\n self.goal = random.choice(self._goalProposalList)\n\n def setStart(self, c: Coord) -> None:\n \"\"\" スタート地点を設定する。\n 先に迷路を生成すること。\n \"\"\"\n if not self._isCreated:\n self.logger.info(\"迷路が生成されていないため、スタート地点が設定できませんでした\")\n return\n if not self.isValidCoord(c):\n self.logger.info(\"座標{c}は有効な座標ではないため、スタート地点が設定できませんでした\")\n return\n\n self.start = c\n\n def dig(self, c):\n \"\"\"\n 終了するまで掘る。一マスのみ掘るのではない。\n 再帰だと上限に引っかかる可能性があるため、ループで処理する\n\n 1. 上下左右の方向で、どの方向が掘り進められるのか判定する。掘り進められるかどうかの定義は、現在の座標から1マス先及び2マス先の座標まで壁となっていることである\n 2. 掘り進めることが可能な方向をランダムに選択して、2マス先まで道として、現在位置を更新する\n 3. 現在位置をチェックポイントとしてリストに追加し、保存する\n 4. 掘り進めることができなくなるまで1.~3.を繰り返す\n 5. 掘り進めることができなくなったら、これまで3.に記録していた座標からどこかしらかに掘り進めることが可能な座標を取得してまた1.からの作業を行う\n 6. どこにも掘ることが出来る座標がなくなった場合は、その時点で処理を終了する\n \"\"\"\n\n if not self.isValidCoord(c):\n self.logger.info(f\"座標({c.x}, {c.y})はこの迷路の有効範囲外のため、digに失敗しました\")\n return\n\n self._data[c.y][c.x] = CellType.PATH\n # TODO: この無限ループは最大4回しか呼び出されないはず。\n # Forなどを使った方がわかりやすくなるかもしれない\n #\n # ここの無限ループの趣旨は、「digに与えられた座標の上下左右それぞれについて掘れるか確認し、掘れるなら掘る」\n # なので実際はそこまで無限にループしない\n while True:\n digDirections = []\n self.logger.debug(\"start checking diggable directions\")\n # 必ず2マス同時に掘り進めるため、一マス横が掘れるかどうかは確認しなくて良い\n # (チョットしかわからん)\n if self.isValidCoord(Coord(c.x, c.y - 2)) and self.isCellType(Coord(c.x, c.y-2), CellType.WALL):\n digDirections.append(Direction.UP)\n if self.isValidCoord(Coord(c.x, c.y + 2)) and self.isCellType(Coord(c.x, c.y+2), CellType.WALL):\n digDirections.append(Direction.DOWN)\n if self.isValidCoord(Coord(c.x - 2, c.y)) and self.isCellType(Coord(c.x-2, c.y), CellType.WALL):\n digDirections.append(Direction.LEFT)\n if self.isValidCoord(Coord(c.x + 2, c.y)) and self.isCellType(Coord(c.x+2, c.y), CellType.WALL):\n digDirections.append(Direction.RIGHT)\n\n if len(digDirections) == 0:\n # このif分岐は元の座標から4方向全て掘った後に通る場所なので、\n # ここにくる時点では行き止まりかはわからない\n #\n # 行き止まりならゴールの候補地にする\n if len(list(filter(lambda n: n == True\n , [self.isCellType(Coord(c.x-1,c.y), CellType.WALL)\n ,self.isCellType(Coord(c.x+1,c.y), CellType.WALL)\n ,self.isCellType(Coord(c.x,c.y-1), CellType.WALL)\n ,self.isCellType(Coord(c.x,c.y+1), CellType.WALL)]))) == 3:\n self._goalProposalList.append(c)\n break\n\n direction = random.choice(digDirections)\n self.logger.debug(f\"direction {direction} is chosen by random choice\")\n\n if direction == Direction.UP:\n self._data[c.y - 1][c.x] = CellType.PATH\n self._data[c.y - 2][c.x] = CellType.PATH\n self._nextDigProposals.append(Coord(c.x, c.y - 2))\n elif direction == Direction.DOWN:\n self._data[c.y + 1][c.x] = CellType.PATH\n self._data[c.y + 2][c.x] = CellType.PATH\n self._nextDigProposals.append(Coord(c.x, c.y + 2))\n elif direction == Direction.LEFT:\n self._data[c.y][c.x - 1] = CellType.PATH\n self._data[c.y][c.x - 2] = CellType.PATH\n self._nextDigProposals.append(Coord(c.x - 2, c.y))\n elif direction == Direction.RIGHT:\n self._data[c.y][c.x + 1] = CellType.PATH\n self._data[c.y][c.x + 2] = CellType.PATH\n self._nextDigProposals.append(Coord(c.x + 2, c.y))\n\n nextDigProposalsLen = len(self._nextDigProposals)\n self.logger.debug(f\"nextDigProposalsLen: {nextDigProposalsLen}\")\n if nextDigProposalsLen > 0:\n path = self._nextDigProposals.pop(random.randint(0, nextDigProposalsLen - 1))\n self.dig(path)\n else:\n self._isCreated = True\n # }}}\n\n # Player movements {{{\n def setPlayer(self, c: Coord):\n if c.x < 0 or self._width < c.x:\n self.logger.debug(f\"x座標'{c.x}'は有効範囲外のため、動かせません\")\n return\n if c.y < 0 or self._height < c.y:\n self.logger.debug(f\"y座標'{c.y}'は有効範囲外のため、動かせません\")\n return\n\n if not self.isCellType(c, CellType.PATH):\n self.logger.debug(f\"座標({c.x}, {c.y})は道ではないため、動かせません\")\n return\n\n prevPlayerCoord = self._playerPoint\n self._playerPoint = c\n\n if c in self._playerMovementHistory:\n self._playerMovementHistory.remove(c)\n if prevPlayerCoord in self._playerMovementHistory:\n self._playerMovementHistory.remove(prevPlayerCoord)\n\n if not c in self._playerMovementHistory:\n self._playerMovementHistory.append(c)\n\n\n def movePlayer(self, direction):\n if self._playerPoint is None:\n self.logger.debug(\"プレイヤーが迷路内に存在しないため、動かせません\")\n return\n pos = self._playerPoint\n checkpos: Coord\n if direction == Direction.UP:\n checkpos = Coord(pos.x, pos.y - 1)\n elif direction == Direction.DOWN:\n checkpos = Coord(pos.x, pos.y + 1)\n elif direction == Direction.LEFT:\n checkpos = Coord(pos.x - 1, pos.y)\n elif direction == Direction.RIGHT:\n checkpos = Coord(pos.x + 1, pos.y)\n else:\n self.logger.warn(f\"direction: '{direction}' is not recognized\")\n\n self.setPlayer(checkpos)\n # }}}\n\n def draw(self):\n if not self._isCreated:\n self.logger.info(\"迷路は生成されていないため描画できません\")\n return\n\n result = \"\"\n\n # PLAYER,GOAL, STARTは_data内に存在しないため、突っ込む\n # PLAYERを最後に入れることで、スタート時点やゴール地点にいる際もプレイヤーを表示させる。\n self.logger.debug(f\"data is {self._data}\")\n rendering = copy.deepcopy(self._data)\n for c in self._playerMovementHistory:\n rendering[c.y][c.x] = CellType.FOOTPRINT\n rendering[self.goal.y][self.goal.x] = CellType.GOAL\n rendering[self.start.y][self.start.x] = CellType.START\n if self._playerPoint is not None:\n self.logger.debug(\"Add player to the rendering\")\n rendering[self._playerPoint.y][self._playerPoint.x] = CellType.PLAYER\n\n for row in rendering:\n for cell in row:\n if cell == CellType.PATH:\n result += \" \"\n elif cell == CellType.WALL:\n result += \"#\"\n elif cell == CellType.PLAYER:\n result += \"@\"\n #result += \"[red]@[/red]\"\n elif cell == CellType.GOAL:\n result += \"G\"\n elif cell == CellType.START:\n result += \"S\"\n elif cell == CellType.FOOTPRINT:\n #result += \"[on green] [/on green]\"\n result += \".\"\n else:\n self.logger.error(f\"row: '{row}' is neither PATH nor WALL nor PLAYER\")\n result += \"\\n\"\n\n return result\n","repo_name":"Cj-bc/playground","sub_path":"python/maze/maze.py","file_name":"maze.py","file_ext":"py","file_size_in_byte":13321,"program_lang":"python","lang":"ja","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"}
+{"seq_id":"28013041643","text":"# -*- coding:utf-8 -*-\n\nimport base\nfrom api.html import MYHTMLParser\n\nclass planning(base.base):\n def initialize(self):\n config = {'isDataBase': True}\n base.base.initialize(self, config)\n self.projectService = self.importService('project_planning')\n\n def index(self):\n strMenu = 'planning_manager'\n try:\n intPage = int(self.I('page'))\n except ValueError:\n intPage = 1\n\n # 单页数据条数\n intPageDataNum = 10\n # 分页url\n strPageUrl = '/project/planning?'\n tupData, intRows = self.projectService.get_planningPage(intPage, intPageDataNum)\n for idx, item in enumerate(tupData, 1):\n item['idx'] = (intPage - 1) * intPageDataNum + idx\n item['updateTime'] = self.formatTime(item.get('updateTime'), '%Y-%m-%d')\n\n self.dicViewData['planning'] = tupData\n self.dicViewData['page_html'] = self.page(intPage, intPageDataNum, intRows, strPageUrl)\n\n self.dicViewData['menu'] = strMenu\n self.display('planning', 'project')\n\n def create(self):\n dicArgs = {\n \"title\":self.I('planning_title'),\n \"content\":self.I('planning_content'),\n }\n status = self.projectService.create(dicArgs)\n if status==200:\n strRedirectUrl = '/project/planning'\n else:\n strRedirectUrl = '/500'\n self.redirect(strRedirectUrl)\n\n def details(self):\n planID = self.I('id')\n strMenu = 'planning_manager'\n uid = self.current_user.get('id')\n group = self.projectService.check_group(planID, uid)\n planInfo = self.projectService.details(planID)\n planInfo['updateTime'] = self.formatTime(planInfo.get('updateTime'), '%Y-%m-%d')\n planInfo['createTime'] = self.formatTime(planInfo.get('createTime'), '%Y-%m-%d')\n self.dicViewData['group'] = group\n self.dicViewData['groupData'] = self.importService('admin_user').get_group(uid, 5)\n self.dicViewData['menu'] = strMenu\n self.dicViewData['planInfo'] = planInfo\n #print \"group = \", group\n\n self.display('details', 'project')\n\n def update(self):\n dicArgs = {\n \"id\":self.I('id'),\n \"title\":self.I('planning_title'),\n \"content\":self.I('planning_content'),\n }\n strMenu = 'planning_manager'\n status = self.projectService.update(dicArgs)\n if status==200:\n planID = self.I('id')\n planInfo = self.projectService.details(planID)\n planInfo['updateTime'] = self.formatTime(planInfo.get('updateTime'), '%Y-%m-%d')\n planInfo['createTime'] = self.formatTime(planInfo.get('createTime'), '%Y-%m-%d')\n self.dicViewData['menu'] = strMenu\n self.dicViewData['planInfo'] = planInfo\n self.display('details', 'project')\n else:\n strRedirectUrl = '/500'\n self.redirect(strRedirectUrl)\n\n def load(self):\n sourceData = self.I('planInfo_content')\n file_name = self.I('planInfo_title')+\".docx\"\n path = self.dicConfig['UPLOAD_PATH'] + \"/static/data/\"+file_name\n documents_load= MYHTMLParser(path, self.I('planInfo_title'))\n documents_load.complete(sourceData)\n\n self.set_header('Content-Type', 'application/octet-stream')\n self.set_header('Content-Disposition', 'attachment; filename=' + file_name)\n buf_size = 1024\n with open(path, 'rb') as f:\n while True:\n data = f.read(buf_size)\n if not data:\n break\n self.write(data)\n self.finish()\n try:\n import os\n os.remove(path)\n except Exception:\n pass\n return False\n\n def follow(self):\n strid = self.I('id')\n remark = self.I('remark')\n group = self.I('group')\n uid = self.current_user.get('id')\n if group:\n group = group.split(',')\n status = self.projectService.follow_group(strid, group, uid, remark)\n self.out(status)","repo_name":"yfjelley/remark","sub_path":"admin/controller/project_planning.py","file_name":"project_planning.py","file_ext":"py","file_size_in_byte":4116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"72447443560","text":"from django.contrib import admin\nfrom django.contrib import messages\nfrom django.contrib.admin.helpers import Fieldset\nfrom django.forms.widgets import Media\nfrom django.http import Http404\nfrom django.shortcuts import redirect, render\n\nfrom .models import Root, get_settings_models\nfrom .settings import settings\n\n\nclass FakeOpts(object):\n\tapp_label = 'fake_label'\n\tobject_name = 'fake_name'\n\n\tdef get_ordered_objects(self):\n\t\treturn None\n\n\n@admin.register(Root)\nclass RootSettingsAdmin(admin.ModelAdmin):\n\tdef has_add_permission(self, request):\n\t\treturn False\n\n\tdef has_delete_permission(self, request, obj=None):\n\t\treturn False\n\n\tdef has_change_permission(self, request, obj=None):\n\t\texhausted = object()\n\t\treturn next(get_settings_models(), exhausted) != exhausted\n\n\tdef changelist_view(self, request, extra_context=None):\n\t\tforms = []\n\t\tfieldsets = []\n\t\tmedia = Media()\n\n\t\tfor app, model in get_settings_models():\n\t\t\t# TODO: check permissions for specific models\n\t\t\tinstance = getattr(settings, model._get_settings_object_name())\n\t\t\tmodel_admin = self.get_model_admin(model)\n\t\t\tform_class = model_admin.get_form(request, instance)\n\t\t\tform = form_class(\n\t\t\t\tprefix=model._get_settings_object_name(),\n\t\t\t\tinstance=instance,\n\t\t\t\tdata=request.POST or None,\n\t\t\t\tfiles=request.FILES or None,\n\t\t\t)\n\t\t\tfieldset = Fieldset(\n\t\t\t\tform,\n\t\t\t\tname=app.verbose_name if model._meta.model_name == 'settings' else '{}: {}'.format(app.verbose_name, model._meta.verbose_name.capitalize()),\n\t\t\t\tfields=list(form.fields.keys()) + list(model_admin.readonly_fields),\n\t\t\t\treadonly_fields=model_admin.readonly_fields,\n\t\t\t)\n\t\t\tforms.append(form)\n\t\t\tfieldsets.append(fieldset)\n\t\t\tmedia += model_admin.media\n\t\t\tmedia += form.media\n\n\t\tif not forms:\n\t\t\traise Http404\n\n\t\tif all([form.is_valid() for form in forms]): # need to use list (not generator!) to validate all forms\n\t\t\tfor form in forms:\n\t\t\t\tform.save()\n\t\t\tmessages.info(request, \"Settings saved.\")\n\t\t\treturn redirect(request.get_full_path())\n\n\t\tcontext = {\n\t\t\t'app_name': \"Settings\",\n\t\t\t'forms': forms,\n\t\t\t'fieldsets': fieldsets,\n\t\t\t'opts': FakeOpts(),\n\t\t\t'add': False,\n\t\t\t'change': True,\n\t\t\t'is_popup': False,\n\t\t\t'save_as': False,\n\t\t\t'has_view_permission': True,\n\t\t\t'has_add_permission': False,\n\t\t\t'has_delete_permission': False,\n\t\t\t'has_change_permission': True,\n\t\t\t'has_editable_inline_admin_formsets': False,\n\t\t\t'media': media,\n\t\t}\n\t\tif extra_context:\n\t\t\tcontext.update(extra_context)\n\t\treturn render(request, 'dbsettings/settings.html', context)\n\n\tdef change_view(self, request, object_id, form_url='', extra_context=None):\n\t\traise Http404\n\n\t_model_admins = {}\n\n\tdef get_model_admin(self, model):\n\t\tmodel_admin = self._model_admins.get(model) or admin.ModelAdmin\n\t\treturn model_admin(model, self.admin_site)\n\n\t@classmethod\n\tdef register_model_admin(cls, model, model_admin):\n\t\tcls._model_admins[model] = model_admin\n\n\t@classmethod\n\tdef unregister_model_admin(cls, model):\n\t\tdel cls._model_admins[model]\n\n\t@classmethod\n\tdef register(cls, model):\n\t\t\"\"\"\n\t\tDecorator usage:\n\n\t\t@SettingsAdmin.register(MySettings)\n\t\tclass MySettingsAdmin(admin.Model):\n\t\t\t...\n\t\t\"\"\"\n\t\tdef _register(model_admin):\n\t\t\treturn cls.register_model_admin(model, model_admin)\n\t\treturn _register\n","repo_name":"IlyaSemenov/django-modelsettings","sub_path":"dbsettings/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":3202,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"18"}
+{"seq_id":"23021682015","text":"import hexes, sys\n\nships = [\n {\n 'id': 1,\n 'speed': 0.8\n },\n {\n 'id': 2,\n 'speed': 0.9\n },\n {\n 'id': 3,\n 'speed': 1.0\n },\n {\n 'id': 4,\n 'speed': 1.1\n }\n]\n\nnormal_duration = 30\nrotation_downspeed = 0.8\nclockwise = [[1, 1], [1, 0], [1, -1], [-1, -1], [-1, 0], [-1, 1]]\nangles = [-60, 0, 60, 120, 180, 240]\nmovements_length = 3\n\nclass Movement:\n time_to_me = normal_duration\n rotation = -60\n hex = [65, 25]\n direction = [1, 1]\n\nclass Ship:\n def __init__(self, data):\n self.id = data['id']\n self.speed = data['speed']\n self.active = False\n \n m = Movement()\n m.rotation = -60\n m.hex = [65, 25]\n m.time_to_me = self.get_duration_to_hex(m.hex, None)\n m.direction = [1, 1]\n\n self.movements = [m]\n for _ in range(movements_length - 1):\n self.set_next_hex(None)\n\n def get_duration_to_hex(self, hex, gamestate):\n base = int(round(normal_duration / self.speed))\n if gamestate:\n ice_thickness = gamestate.ice_field.current_field[hex[0]][hex[1]]\n ice_thickness_norm = ice_thickness * 1.0 / 100.0 - 0.5\n return int(round(base * (1.0 + ice_thickness_norm * 2)))\n else:\n return base\n\n def update(self, gamestate):\n if self.active:\n if self.left_ticks > 0:\n self.left_ticks -= 1\n else:\n self.set_next_hex(gamestate)\n # print(self.left_ticks)\n\n def set_next_hex(self, gamestate):\n while len(self.movements) > movements_length - 1:\n self.movements.pop(0)\n self.movements.append(self.next_movement_from(self.movements[-1], gamestate))\n self.left_ticks = self.movements[1].time_to_me\n \n if gamestate:\n new_movement = self.movements[-1]\n gamestate.ice_field.place_ship(new_movement.hex)\n\n if gamestate:\n gamestate.check_if_complete_quest(self.movements[1].hex)\n\n # self.target_hexes = self.get_allowed_neighbours()\n neighbours = self.get_allowed_neighbours()\n target_hexes = []\n for h in neighbours:\n is_target = True\n for m in self.movements:\n if m.hex[0] == h[0] and m.hex[1] == h[1]:\n is_target = False\n if is_target:\n target_hexes.append(h)\n\n self.target_hexes = target_hexes.copy()\n\n def next_movement_from(self, movement, gamestate):\n next = hexes.neighbour_hex(movement.hex[0], movement.hex[1], movement.direction[0], movement.direction[1])\n if next:\n next_movement = Movement()\n next_movement.hex = next\n next_movement.time_to_me = self.get_duration_to_hex(next, gamestate)\n next_movement.rotation = movement.rotation\n next_movement.direction = movement.direction\n else:\n next_cw = self.find_dist(movement, clockwise)\n next_ccw = self.find_dist(movement, clockwise[::-1])\n next_movement = Movement()\n cw = next_cw[0] <= next_ccw[0]\n if cw:\n next_movement.direction = next_cw[1]\n next_movement.hex = next_cw[2]\n next_duration = self.get_duration_to_hex(next_movement.hex, gamestate)\n next_movement.time_to_me = int(round(next_duration * (rotation_downspeed ** next_cw[0])))\n # rotate clockwise\n next_movement.rotation = movement.rotation + next_cw[0] * 60\n else:\n next_movement.direction = next_ccw[1]\n next_movement.hex = next_ccw[2]\n next_duration = self.get_duration_to_hex(next_movement.hex, gamestate)\n next_movement.time_to_me = int(round(next_duration * (rotation_downspeed ** next_ccw[0])))\n # rotate counter clockwise\n next_movement.rotation = movement.rotation - next_ccw[0] * 60\n\n return self.refine_rotation(next_movement)\n \n\n def refine_rotation(self, movement):\n # check rotation\n rot = movement.rotation\n while rot > 240:\n rot -= 360\n while rot < -60:\n rot += 360\n \n index = self.find_index(movement.direction, clockwise)\n if not rot == angles[index]:\n movement.rotation = angles[index]\n return movement\n \n def find_index(self, direction, array):\n for i in range(6):\n if array[i][0] == direction[0] and array[i][1] == direction[1]:\n return i\n return -1\n\n def force_move(self, gamestate, direction):\n # print(direction)\n last = self.movements[-1]\n last.direction = direction\n last.rotation = angles[self.find_index(direction, clockwise)]\n\n def find_dist(self, movement, array):\n index = self.find_index(movement.direction, array)\n dist = 0\n while dist < 6:\n dist += 1\n new_idx = index + dist\n if new_idx >= len(clockwise):\n new_idx -= len(clockwise)\n new_direction = clockwise[new_idx]\n potential_next = hexes.neighbour_hex(movement.hex[0], movement.hex[1], new_direction[0], new_direction[1])\n if potential_next:\n break\n return [dist, new_direction, potential_next]\n\n def get_allowed_neighbours(self):\n neighbours = []\n if len(self.movements):\n m = self.movements[-1]\n for direction in clockwise:\n n = hexes.neighbour_hex(m.hex[0], m.hex[1], direction[0], direction[1])\n if n:\n neighbours.append(n)\n\n return neighbours\n\ndef get_all():\n all = []\n for shp in ships:\n all.append(Ship(shp))\n return all","repo_name":"DenisNP/icebreak-backend","sub_path":"ship.py","file_name":"ship.py","file_ext":"py","file_size_in_byte":5881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"17892139922","text":"import unittest\nimport json\n\nfrom server import app\n\nclass BasicTestCase(unittest.TestCase):\n\n def test_index(self):\n tester = app.test_client(self)\n response = tester.get('/', content_type='html/text')\n self.assertEqual(response.status_code, 200)\n\n def test_getdata(self):\n tester = app.test_client(self)\n response = tester.get('/data', content_type='application/json')\n users = json.loads(response.data)\n self.assertEqual(users[0]['id'], 1)\n self.assertEqual(users[0]['name'], 'Juan')\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"jmbl1685/flask-socketio-tdd","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"27670585910","text":"import sopel\n\n@sopel.module.event('JOIN')\n@sopel.module.rule('.*')\ndef log_join(bot, trigger):\n\tprint('{} has joined {}'.format(trigger.nick, trigger.sender))\n\n@sopel.module.event('PART')\n@sopel.module.rule('.*')\ndef log_part(bot, trigger):\n\tprint('{} has parted {}: \\'{}\\''.format(trigger.nick, trigger.sender, trigger))\n\n@sopel.module.event('QUIT')\n@sopel.module.rule('.*')\ndef log_quit(bot, trigger):\n\tprint('{} has quit: \\'{}\\''.format(trigger.nick, trigger))\n\n@sopel.module.event('KICK')\n@sopel.module.rule('.*')\ndef log_kick(bot, trigger):\n\tprint('{} has been kicked from {} by {}'.format(trigger.args[1], trigger.nick, trigger.sender))\n\n@sopel.module.event('TOPIC')\n@sopel.module.rule('.*')\ndef log_topic_change(bot, trigger):\n\tprint('{} has changed the topic in {} to \\'{}\\''.format(trigger.nick, trigger.sender, trigger))\n\n@sopel.module.event('NICK')\n@sopel.module.rule('.*')\ndef log_nick_change(bot, trigger):\n\tprint('{} is now known as {}'.format(trigger.nick, trigger))\n\n'''\n@sopel.module.event('AWAY')\n@sopel.module.rule('.*')\ndef log_away_change(bot, trigger):\n\tprint('args[1] = {}'.format(trigger.args[1]))\n\tif trigger.args[1]:\n\t\tprint('{} is now away'.format(trigger.nick))\n\telse:\n\t\tprint('{} is no longer away'.format(trigger.nick))\n'''\n\n@sopel.module.rule('.*')\ndef log_normal(bot, trigger):\n\tif trigger.tags.get('intent', '') == 'ACTION':\n\t\tprint('{} has done some kind of action'.format(trigger.nick))\n\telse:\n\t\tprint('{} has said something'.format(trigger.nick))\n\tprint('Trigger: ' + str(dir(trigger)))\n\tprint('Raw: ' + str(trigger))\n\tprint('Host: ' + str(trigger.host))\n\tprint('Hostmask: ' + str(trigger.hostmask))\n\tprint('Sender: ' + str(trigger.sender))\n","repo_name":"xoreos/hk51-bot","sub_path":"logs.py","file_name":"logs.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"19318794867","text":"import socket\nimport threading\nfrom collections import deque\n\nfrom client_socket import ClientSocket\nfrom command.command import Command\nfrom config.connect_config import BUFFER_SIZE\nfrom dto.chat_dto import ChatDTO\n\n\nclass Server:\n def __init__(self, logger, host, port):\n self.logger = logger\n self.host = host\n self.port = port\n self.client_sockets = []\n self.client_commands = deque()\n\n self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.server_socket.bind((self.host, self.port))\n self.server_socket.listen()\n\n self.thread_lock = threading.Lock()\n\n def close_client_socket(self, client_socket: ClientSocket):\n if client_socket in self.client_sockets:\n self.client_sockets.remove(client_socket)\n self.logger.info(\"Rest Clients : %s\", len(self.client_sockets))\n client_socket.close()\n\n def add_command(self, command: Command):\n with self.thread_lock:\n self.client_commands.append(command)\n\n def run(self):\n self.logger.info(\">> Wait\")\n threading.Thread(target=self.process_command).start()\n try:\n while True:\n client_socket, client_addr = self.server_socket.accept()\n recv_dto = ChatDTO.covertFromByteCode(client_socket.recv(BUFFER_SIZE).decode())\n self.logger.info(f\"Connected to client: {client_addr}\")\n\n socket = ClientSocket(self, self.logger, client_socket, client_addr, recv_dto.name)\n threading.Thread(target=socket.communicate).start()\n self.client_sockets.append(socket)\n self.logger.info(\"참가자 수 : %s\", len(self.client_sockets))\n except Exception as e:\n self.logger.error(f\"Server Error : {e}\")\n finally:\n self.server_socket.close()\n\n def broadcast(self, socket, message):\n with self.thread_lock:\n for client_socket in self.client_sockets:\n if client_socket.get_client_socket() != socket:\n client_socket.send(message)\n\n def process_command(self):\n while True:\n while self.client_commands:\n with self.thread_lock:\n command = self.client_commands.popleft()\n command.execute()\n","repo_name":"twotwobread/dudaji-chat-exam","sub_path":"app/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"37588615745","text":"import pandas as pd\nimport tensorflow as tf\n\ndef load_data():\n df = pd.read_csv(\"data/election88/polls.csv\")\n # remove voters without preference\n df = df.dropna()\n # state, edu, age, female, black\n x = df.to_numpy(dtype=int)[:, 5:10]\n x[:, :3] = x[:, :3] - 1\n # vote for bush or not\n y = df['bush'].to_numpy()\n return x, y\n\n# returns prev vote for each state\ndef load_prev_vote():\n df = pd.read_csv(\"data/election88/presvote.csv\")\n prev_vote = df['g76_84pr'].to_numpy()\n return prev_vote\n","repo_name":"julianroth/ADVI","sub_path":"data/election88.py","file_name":"election88.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"}
+{"seq_id":"2841197269","text":"from model.Enums.ActionsEnum import ActionsEnum\r\n\r\nclass ActionExecutor:\r\n\r\n def __init__(self,vk_opeartor,container_operator):\r\n self.dict_compliance = {vk_opeartor : (ActionsEnum.SEND_POSTS_TO_GROUPS,ActionsEnum.SEARCH_GROUPS),\r\n container_operator : (ActionsEnum.ADD_POST,ActionsEnum.EDIT_POST,ActionsEnum.REMOVE_POST,\r\n ActionsEnum.READ_ALL_RECORDS_FROM_DB)}\r\n\r\n def decide_type_of_action(self,name_action):\r\n operation_compliance = self.dict_compliance.items()\r\n for operator,operations in list(operation_compliance):\r\n if(name_action in operations):\r\n return operator\r\n\r\n def execute_action(self,cmd,data):\r\n operator = self.decide_type_of_action(cmd)\r\n if operator is not None :\r\n operator.execute(cmd,data)\r\n\r\n\r\n","repo_name":"SokolnikSergey/VkPoster","sub_path":"model/ActionExecturors/ActionExecutor.py","file_name":"ActionExecutor.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"}
+{"seq_id":"70236772202","text":"# This problem is at its core a graph theory problem.\n# Consider the given matrix's rows - the bunnies, start, bulkhead and\n# any checkpoints - as vertices in a graph. The transitions between the\n# vertices would be the edges of the graph, with the time taken for the\n# transition being the weight of the edge. This would be a directional\n# graph (digraph)since the time taken can be different for the reverse transition.\n# Negative weights (adding time; eg. checkpoints) and negative weight\n# cycles are possible. The goal is to find the path through the graph\n# from start to the bulkhead, whose cumulative weight is less than the\n# given time limit and covers the maximum number of vertices.\n\n# If the graph has at least one negative cycle, we can keep going through\n# the negative cycle to keep adding the required time to get to every bunny\n# and thus reach all the bunnies. Hence, the solution is all bunnies.\n\n# If the graph does not have a negative cycle, we take the following approach.\n# We find all possible paths in the graph with only 2 vertices, 3 vertices etc.,\n# up to the paths with all vertices. For each of these paths, we calculate the\n# the time taken (cumulative weight), and we consider the path as valid only if\n# time taken is less than the time limit. As our goal is to maximize the number\n# of bunnies, as we increment the number of vertices, we consider the possible\n# paths for the maximum number of vertices where weight is less than the time limit.\n# If there are multiple such max. vertices paths, we sort them to return the one\n# starting with the lowest bunny. To get all possible n vertex paths efficiently,\n# we use the logic that every n-vertex path whose weight \"could\" be lesser than max time,\n# can be represented as the sum of an (n-1)-vertex path whose weight is definitely\n# lesser than max time, and an additional vertex. Hence, we can recursively calculate\n# n-vertex paths by caching the valid (n-1)-vertex paths and their weights, and combining\n# them with every other additional vertex possible and adding shortest time to reach\n# that vertex.\n#\n# We get the shortest time to reach the vertex using the Bellman-Ford\n# algorithm. This algorithm iterates over all edges and \"relaxes\" an edge\n# if it decreases the distance from start to the second point of the edge, i.e. update\n# the shortest distance to the second point of the edge. We do this iteratively\n# over all edges n times, where n is number of vertices - 1. This ensures\n# all vertices are accounted for in finding the shortest distance (the -1 is because\n# an edge has 2 vertices and thus last vertex is accounted for by virtue of counting the\n# edges of the rest). We use the Bellman-Ford algorithm as opposed to other algorithms\n# here as this algorithm also provides the flexibility of detecting negative weight\n# cycles. When we iterate over the edges for all vertices, when optimal distances\n# for all vertices are reached, no edges are relaxed in the next iteration. After\n# all the iterations, if no such optimal solution is reached and edge relaxation\n# is still possible (i.e. possible to calculate a shorter distance), then\n# a negative cycle is detected in the graph which can potentially endlessly reduce\n# the distance. This is useful for the trivial case mentioned above.\n\n\ndef solution(matrix, max_time):\n\n # This function implements the Bellman Ford Algorithm to return\n # the shortest distance to every vertex in the graph, given a source\n # vertex (starting vertex). It also returns a boolean for whether\n # a negative cycle is detected in the graph.\n def bellman_ford(start):\n # Initialize distance to all vertices from start as infinity.\n distances = [float('inf')]*len(matrix)\n\n # Distance of start to itself is 0.\n distances[start] = 0\n\n for i in range(len(matrix) - 1):\n num_edges_relaxed = 0\n for x, y, distance in edges:\n if distances[x] + distance < distances[y]:\n num_edges_relaxed += 1\n distances[y] = distances[x] + distance\n\n # If the number of edges relaxed in this iteration is 0,\n # we can terminate the loop prematurely as no vertex was updated\n # and all distances are optimal.\n if num_edges_relaxed == 0:\n break\n\n return distances, True if num_edges_relaxed > 0 else False\n\n # Get list of edges for the given matrix in graph form.\n # Each edge is a tuple of first vertex, second vertex and distance - (x,y,distance)\n def get_edges():\n edges = []\n for x, row in enumerate(matrix):\n for y, distance in enumerate(row):\n edges.append([x, y, distance])\n return edges\n\n\n # This function recursively gets all possible paths with number\n # of vertices n.\n def generate_all_paths(n):\n # If n is 1, return the sorted list of vertices.\n if n == 1:\n return sorted(bunny_vertices)\n else:\n # Construct all n-paths using the cached (n-1)-paths.\n paths = list()\n valid_path_keys = valid_paths.keys()\n for path in valid_path_keys:\n for index in bunny_vertices:\n # A vertex can only appear once in a path.\n if index in path:\n continue\n current_path = path + index\n if current_path[1:] in valid_path_keys:\n paths.append(current_path)\n return paths\n\n # Cache to hold the valid paths\n valid_paths = dict()\n\n # Generate the list of bunny IDs from the matrix.\n bunny_vertices = [str(bunny + 1) for bunny in range(len(matrix) - 2)]\n\n # Generate the list of edges from the graph.\n edges = get_edges()\n\n # Use the Bellman-Ford implementation to test for negative\n # cycles and compute the shortest times from every vertex\n # to every other vertex.\n shortest_time_matrix = []\n negative_cycle = False\n for vertex in range(len(matrix)):\n distances, negative_cycle = bellman_ford(vertex)\n shortest_time_matrix.append(distances)\n\n # If there is a negative cycle, all bunnies can be rescued.\n if negative_cycle > 0:\n return range(len(matrix) - 2)\n\n # Set the start and end vertices.\n start = 0\n end = len(matrix) - 1\n\n # Iterate over all possible path lengths.\n for path_length in range(1, len(bunny_vertices) + 1):\n valid_paths_new = {}\n\n # Generate all possible paths for the current number of vertices.\n paths = generate_all_paths(path_length)\n\n # If there are no valid paths for the current length, there will be no\n # further valid paths of greater length either. Hence we can end the loop.\n if len(paths) <= 0:\n break\n\n # Test every path to see if it is still within the time limit. We do\n # this by using the cached weight of the sub path and adding the shortest\n # time to reach the additional vertex (pre-computed from the Bellman-Ford).\n for path in paths:\n if len(path) >= 2:\n sub_path = path[:-1]\n sub_path_time = valid_paths[sub_path]\n path_time = sub_path_time - \\\n shortest_time_matrix[int(sub_path[-1])][end] + \\\n shortest_time_matrix[int(sub_path[-1])][int(path[-1])] + \\\n shortest_time_matrix[int(path[-1])][end]\n else:\n path_time = shortest_time_matrix[start][int(path[0])] + \\\n shortest_time_matrix[int(path[0])][end]\n\n if path_time <= max_time:\n valid_paths_new[path] = path_time\n\n # If there are no valid paths for the current length, there will be no\n # further valid paths of greater length either. Hence we can end the loop.\n if len(valid_paths_new) <= 0:\n break\n\n # Replace the cached paths with the newly computed paths. We only\n # need to cache the results of 1 previous iteration as we are working\n # towards path with maximum vertices and we don't need anything other\n # than 1 iteration prior, to do so.\n valid_paths = valid_paths_new\n\n # Sort the final paths with maximum number of vertices to get the path starting\n # with minimum ID bunny.\n sorted_paths = sorted([sorted(path) for path in valid_paths.keys()])\n\n # The first element of the array is our final answer.\n return [int(a) - 1 for a in sorted_paths[0]]","repo_name":"luna-lovegoods-codelab/MyGoogleFoobarChallenges","sub_path":"running_with_bunnies.py","file_name":"running_with_bunnies.py","file_ext":"py","file_size_in_byte":8595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"21396236133","text":"import os\r\nimport glob\r\nimport time\r\nimport RPi.GPIO as GPIO\r\n\r\n#these tow lines mount the device:\r\nos.system('modprobe w1-gpio')\r\nos.system('modprobe w1-therm')\r\n \r\nbase_dir = '/sys/bus/w1/devices/'\r\ndevice_path = glob.glob(base_dir + '28*')[0] #get file path of sensor\r\nrom = device_path.split('/')[-1] #get rom name\r\n\r\ndef read_temp_raw():\r\n with open(device_path +'/w1_slave','r') as f:\r\n valid, temp = f.readlines()\r\n return valid, temp\r\n \r\ndef read_temp():\r\n valid, temp = read_temp_raw()\r\n\r\n while 'YES' not in valid:\r\n time.sleep(0.2)\r\n valid, temp = read_temp_raw()\r\n\r\n pos = temp.index('t=')\r\n if pos != -1:\r\n #read the temperature .\r\n temp_string = temp[pos+2:]\r\n temp_c = float(temp_string)/1000.0 \r\n temp_f = temp_c * (9.0 / 5.0) + 32.0\r\n return temp_c, temp_f\r\n \r\ndef maintemp():\r\n print(' ROM: '+ rom)\r\n\r\n while True:\r\n c, f = read_temp()\r\n print('C={:,.3f} F={:,.3f}'.format(c, f))\r\n time.sleep(1)\r\n\r\n# cold shit\r\ndef gpio3():\r\n GPIO.setup(3, GPIO.OUT)\r\n GPIO.output(3, GPIO.LOW) # ON\r\n time.sleep(5)\r\n GPIO.output(3, GPIO.HIGH) # OFF\r\n\r\n# hot shit\r\ndef gpio5():\r\n GPIO.setup(5, GPIO.OUT)\r\n GPIO.output(5, GPIO.LOW) # ON\r\n time.sleep(5)\r\n GPIO.output(5, GPIO.HIGH) # OFF\r\n\r\ndef test():\r\n GPIO.setmode(GPIO.BOARD)\r\n gpio3()\r\n GPIO.cleanup()\r\n\r\ndef main():\r\n temp, _ = read_temp()\r\n print(temp)\r\n x = int(input(\"Please input the temperature: \"))\r\n t = int(input(\"Please input the time: \"))\r\n GPIO.setmode(GPIO.BOARD)\r\n GPIO.setup(3, GPIO.OUT)\r\n GPIO.setup(5, GPIO.OUT)\r\n start = time.time()\r\n while True:\r\n GPIO.setmode(GPIO.BOARD)\r\n temp, _ = read_temp()\r\n print(temp)\r\n if (temp <= x):\r\n GPIO.output(5, GPIO.LOW) # open 5\r\n print(\"OPEN HOT WATER\")\r\n GPIO.output(3, GPIO.HIGH) # close 3\r\n print(\"CLOSE COLD WATER\")\r\n if (temp >= x):\r\n GPIO.output(3, GPIO.LOW) # open 3\r\n print(\"OPEN COLD WATER\")\r\n GPIO.output(5, GPIO.HIGH) # close 5\r\n print(\"CLOSE HOT WATER\")\r\n end = time.time()\r\n if (end - start >= t):\r\n GPIO.output(3, GPIO.HIGH) # close 3\r\n GPIO.output(5, GPIO.HIGH) # close 5\r\n GPIO.cleanup()\r\n break\r\n\r\nmain()\r\n","repo_name":"frozen0601/Shower-Temperature-Control","sub_path":"control.py","file_name":"control.py","file_ext":"py","file_size_in_byte":2375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"6220256817","text":"import datetime\nimport json\n\nimport gevent\nfrom datamanage.lite.dmonitor.constants import ALERT_CODES, ALERT_LEVELS, ALERT_TYPES\nfrom datamanage.lite.dmonitor.models import AlertShield\nfrom datamanage.utils.api import CCApi, DataflowApi, MetaApi\nfrom datamanage.utils.time_tools import tznow\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom common.api import AuthApi\nfrom common.auth.exceptions import PermissionDeniedError\nfrom common.decorators import trace_gevent\nfrom common.local import get_request_username\nfrom common.log import logger\n\n\nclass BaseMixin(object):\n MAX_COUNT = 200000\n\n def get_table_records(self, table_name, fields=\"*\", max_record_count=5000, update_duration=0):\n sqls = []\n if update_duration:\n update_time = tznow() - datetime.timedelta(seconds=update_duration)\n sqls.append(\n \"\"\"\n SELECT {fields} FROM {table_name} WHERE updated_at >= '{update_time}' OR created_at >= '{update_time}'\n \"\"\".format(\n fields=fields if isinstance(fields, str) else \",\".join(fields),\n table_name=table_name,\n update_time=update_time.strftime(\"%Y-%m-%d %H:%M:%S\"),\n )\n )\n else:\n try:\n res = MetaApi.complex_search(\n {\n \"statement\": \"SELECT count(*) as total_count FROM {table_name}\".format(table_name=table_name),\n \"backend\": \"mysql\",\n },\n raise_exception=True,\n )\n\n total_count = res.data[0].get(\"total_count\", self.MAX_COUNT)\n except Exception as e:\n logger.error(_(\"Complex search meta data error: {error}\").format(error=e))\n total_count = max_record_count\n\n index = 0\n query_step_count = 5000\n while index < total_count:\n sqls.append(\n \"\"\"\n SELECT {fields} FROM {table_name} limit {index}, {step}\n \"\"\".format(\n fields=fields if isinstance(fields, str) else \",\".join(fields),\n table_name=table_name,\n index=index,\n step=query_step_count,\n )\n )\n index += query_step_count\n\n return self.multiple_complex_search(sqls)\n\n def multiple_complex_search(self, sqls):\n result_list = []\n gevent_tasks = []\n\n def multiple_complex_search_task(sql, result):\n try:\n res = MetaApi.complex_search(\n {\n \"statement\": sql,\n \"backend\": \"mysql\",\n }\n )\n result.extend(res.data or [])\n except Exception as e:\n logger.error(\"查询元数据Complex Search接口失败, ERROR: %s\" % e)\n\n for sql in sqls:\n gevent_tasks.append(gevent.spawn(multiple_complex_search_task, sql, result_list))\n\n gevent.joinall(gevent_tasks)\n return result_list\n\n @trace_gevent()\n def fetch_dataflow_infos(self, dataflow_infos, dataflow_ids=None, bk_username=None):\n bk_username = bk_username or get_request_username()\n if len(dataflow_ids) == 0:\n return\n try:\n if dataflow_ids is not None:\n res = DataflowApi.flows.list({\"flow_id\": list(dataflow_ids), \"bk_username\": bk_username})\n else:\n res = DataflowApi.flows.list({\"bk_username\": bk_username})\n\n if res.is_success():\n for dataflow in res.data:\n flow_id = str(dataflow.get(\"flow_id\"))\n dataflow_infos[flow_id] = dataflow\n except Exception as e:\n logger.error(\"无法获取告警对象的flow信息, ERROR: %s\" % e)\n\n def fetch_dataflow_multiprocess(self, dataflow_infos, dataflow_ids=None, bk_username=None):\n from gevent import monkey\n\n monkey.patch_all()\n\n dataflow_ids = list(dataflow_ids)\n gevent_tasks = []\n for i in range(len(dataflow_ids) // 100 + 1):\n gevent_tasks.append(\n gevent.spawn(\n self.fetch_dataflow_infos,\n dataflow_infos,\n dataflow_ids[i * 100 : (i + 1) * 100],\n bk_username,\n )\n )\n gevent.joinall(gevent_tasks)\n\n @trace_gevent()\n def fetch_rawdata_infos(self, raw_data_infos, raw_data_ids=None):\n try:\n if raw_data_ids is not None:\n sql = \"\"\"\n SELECT id, raw_data_name, raw_data_alias, bk_biz_id FROM access_raw_data\n WHERE id in ({rawdata_list})\n \"\"\".format(\n rawdata_list=\",\".join(list(raw_data_ids))\n )\n else:\n sql = \"\"\"\n SELECT id, raw_data_name, raw_data_alias, bk_biz_id FROM access_raw_data\n \"\"\"\n complex_search_result = self.multiple_complex_search([sql])\n for item in complex_search_result:\n id = str(item.get(\"id\"))\n raw_data_infos[id] = item\n except Exception as e:\n logger.error(\"无法获取告警对象rawdata信息, ERROR: %s\" % e)\n\n @trace_gevent()\n def fetch_biz_infos(self, biz_infos, biz_ids=None):\n try:\n res = CCApi.get_app_list()\n if res.is_success():\n for item in res.data:\n bk_biz_id = int(item.get(\"ApplicationID\"))\n if biz_ids is None or bk_biz_id in biz_ids:\n biz_infos[bk_biz_id] = {\n \"bk_biz_id\": item.get(\"ApplicationID\"),\n \"bk_biz_name\": item.get(\"ApplicationName\"),\n \"maintainers\": item.get(\"Maintainers\"),\n \"description\": item.get(\"AppSummary\"),\n }\n except Exception as e:\n logger.error(\"获取业务信息失败, ERROR: %s\" % e)\n\n def fetch_project_infos(self, project_infos, project_ids=None):\n try:\n if project_ids is not None:\n res = MetaApi.projects.list({\"project_ids\": project_ids})\n else:\n res = MetaApi.projects.list()\n\n if res.is_success():\n for item in res.data:\n project_id = item.get(\"project_id\")\n project_infos[project_id] = item\n except Exception as e:\n logger.error(\"获取项目信息失败, ERROR: %s\" % e)\n\n @trace_gevent()\n def fetch_alert_shields(self, alert_shields, shield_time=None):\n try:\n shield_time = shield_time or tznow().strftime(\"%Y-%m-%d %H:%M:%S\")\n new_alert_shields = list(\n AlertShield.objects.filter(active=True, start_time__lte=shield_time, end_time__gte=shield_time).values()\n )\n for item in new_alert_shields:\n item[\"dimensions\"] = json.loads(new_alert_shields)\n alert_shields.extend(new_alert_shields)\n except Exception as e:\n logger.error(\"获取告警屏蔽信息失败, ERROR: %s\" % e)\n\n def get_bizs_by_username(self, username):\n try:\n bk_bizs = AuthApi.list_user_scope_dimensions(\n {\n \"action_id\": \"raw_data.update\",\n \"dimension\": \"bk_biz_id\",\n \"bk_username\": username,\n }\n ).data\n return bk_bizs\n except Exception as e:\n logger.error(\"获取用户有权限的业务列表失败: %s\" % e)\n return []\n\n def get_projects_by_username(self, username):\n try:\n projects = AuthApi.list_user_perm_scopes(\n {\n \"user_id\": username,\n \"show_display\": True,\n \"action_id\": \"project.manage_flow\",\n }\n ).data\n return projects\n except Exception as e:\n logger.error(\"获取用户有权限的项目列表失败: %s\" % e)\n return []\n\n def check_permission(self, action_id, object_id, bk_username):\n res = AuthApi.check_user_perm(\n {\n \"user_id\": get_request_username(),\n \"action_id\": action_id,\n \"object_id\": object_id,\n },\n raise_exception=True,\n )\n if res.data is False:\n raise PermissionDeniedError()\n\n def check_batch_permissions(self, action_id, object_ids, bk_username):\n res = AuthApi.batch_check(\n {\n \"permissions\": list(\n map(\n lambda object_id: {\n \"object_id\": object_id,\n \"user_id\": bk_username,\n \"action_id\": action_id,\n },\n object_ids,\n )\n )\n },\n raise_exception=True,\n )\n for item in res.data:\n if item.get(\"result\") is False:\n raise PermissionDeniedError(_(\"权限不足({})\").format(item.get(\"object_id\")))\n\n def check_dimension_match(self, dimension_conditions, item_dimensions):\n for key, value in list(dimension_conditions.items()):\n if key in item_dimensions:\n if isinstance(value, list):\n in_candidate = False\n for candidate_value in value:\n if str(item_dimensions[key]) == (candidate_value):\n in_candidate = True\n\n if in_candidate:\n continue\n else:\n if str(item_dimensions[key]) == str(value):\n continue\n return False\n else:\n return False\n return True\n\n def summary_alerts(self, alert_list, group=None):\n response = self.get_default_summary()\n if group:\n response[\"groups\"] = {}\n\n for alert_detail in alert_list:\n alert_code = alert_detail[\"alert_code\"]\n alert_type = alert_detail[\"alert_type\"]\n alert_level = alert_detail[\"alert_level\"]\n\n self.statistics_alert(response, alert_code, alert_type, alert_level)\n if group and (group in alert_detail or group in alert_detail[\"dimensions\"]):\n group_value = alert_detail.get(group, alert_detail[\"dimensions\"].get(group, \"\"))\n if group_value not in response[\"groups\"]:\n response[\"groups\"][group_value] = self.get_default_summary()\n self.statistics_alert(response[\"groups\"][group_value], alert_code, alert_type, alert_level)\n\n return response\n\n def get_default_summary(self):\n template = {\n \"alert_count\": 0,\n \"alert_codes\": {},\n \"alert_levels\": {},\n \"alert_types\": {},\n }\n for alert_code in ALERT_CODES:\n template[\"alert_codes\"][alert_code] = 0\n for alert_type in ALERT_TYPES:\n template[\"alert_types\"][alert_type] = 0\n for alert_level in ALERT_LEVELS:\n template[\"alert_levels\"][alert_level] = 0\n return template\n\n def statistics_alert(self, summary, alert_code, alert_type, alert_level):\n summary[\"alert_count\"] += 1\n if alert_code in summary[\"alert_codes\"]:\n summary[\"alert_codes\"][alert_code] += 1\n if alert_type in summary[\"alert_types\"]:\n summary[\"alert_types\"][alert_type] += 1\n if alert_level in summary[\"alert_levels\"]:\n summary[\"alert_levels\"][alert_level] += 1\n","repo_name":"Tencent/bk-base","sub_path":"src/api/datamanage/lite/dmonitor/mixins/base_mixins.py","file_name":"base_mixins.py","file_ext":"py","file_size_in_byte":11953,"program_lang":"python","lang":"en","doc_type":"code","stars":85,"dataset":"github-code","pt":"18"}
+{"seq_id":"34387601993","text":"''' Define the LSTM model '''\nimport sys\nimport random\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport torch.nn.functional as F\nfrom transformer.Modules import Linear, SELayer, Swish\n\nclass LSTM(nn.Module):\n def __init__(self, n_trg_vocab = 43, \n d_words = 512, \n hidden_size = 512,\n num_layers = 2):\n super().__init__()\n self.lstm = torch.nn.LSTM(d_words, hidden_size, num_layers = num_layers, batch_first = True, bidirectional = True)\n self.linear = torch.nn.Linear(hidden_size*2, n_trg_vocab)\n self.sub_rate = 4\n\n def forward(self, x):\n out, _ = self.lstm(x)\n # subsampling\n if out.shape[1]%self.sub_rate != 0:\n shape = out.shape\n out = out[:,:shape[1]-shape[1]%self.sub_rate,:]\n out = torch.split(out, self.sub_rate, dim=1)\n out = [torch.mean(out_enc, dim=1, keepdim=True) for out_enc in out]\n out = torch.cat(out, dim=1)\n\n out = self.linear(out)\n out = F.log_softmax(out, dim=2)\n\n return out\n\n\nclass LingLSTM(nn.Module):\n def __init__(self, n_trg_vocab = 43, \n d_words = 512, \n hidden_size = 512,\n num_layers = 2):\n super().__init__()\n self.code_emb = torch.nn.Embedding(n_trg_vocab, d_words)\n self.head = LSTM(n_trg_vocab=n_trg_vocab,\n d_words=d_words,\n hidden_size=hidden_size,\n num_layers=num_layers\n )\n\n def forward(self, x):\n v_emb = self.code_emb(x)\n return self.head(v_emb)\n\n\n# encoder-decoder scheme seq2seq network\n\nclass Encoder(nn.Module):\n def __init__(self,\n input_size = 512,\n hidden_size = 512,\n n_layers = 2,\n dropout = 0.5):\n super().__init__()\n self.hidden_size = hidden_size\n self.n_layers = n_layers\n # self.linear = nn.Linear(input_size, embedding_size)\n self.rnn = nn.LSTM(input_size, hidden_size, num_layers = n_layers, dropout = dropout, batch_first = True)\n # self.dropout = nn.Dropout(dropout)\n\n def forward(self, x):\n \"\"\"\n x: input batch data, size: [sequence len, batch size, feature size]\n for the argoverse trajectory data, size(x) is [20, batch size, 2]\n \"\"\"\n # embedded: [sequence len, batch size, embedding size]\n # embedded = self.dropout(F.relu(self.linear(x)))\n # you can checkout https://pytorch.org/docs/stable/nn.html?highlight=lstm#torch.nn.LSTM\n # for details of the return tensor\n # briefly speaking, output coontains the output of last layer for each time step\n # hidden and cell contains the last time step hidden and cell state of each layer\n # we only use hidden and cell as context to feed into decoder\n output, (hidden, cell) = self.rnn(x)\n # hidden = [n layers * n directions, batch size, hidden size]\n # cell = [n layers * n directions, batch size, hidden size]\n # the n direction is 1 since we are not using bidirectional RNNs\n return hidden, cell\n\nclass Decoder(nn.Module):\n def __init__(self,\n output_size = 43,\n embedding_size = 512,\n hidden_size = 512,\n n_layers = 4,\n dropout = 0.5):\n super().__init__()\n self.output_size = output_size\n self.hidden_size = hidden_size\n self.n_layers = n_layers\n\n self.embedding = nn.Linear(embedding_size, embedding_size)\n self.rnn = nn.LSTM(embedding_size, hidden_size, n_layers, dropout = dropout, batch_first = True)\n # self.linear = nn.Linear(hidden_size, output_size)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x, hidden, cell):\n \"\"\"\n x : input batch data, size(x): [batch size, feature size]\n notice x only has two dimensions since the input is batchs\n of last coordinate of observed trajectory\n so the sequence length has been removed.\n \"\"\"\n # add sequence dimension to x, to allow use of nn.LSTM\n # after this, size(x) will be [1, batch size, feature size]\n # x = x.unsqueeze(1)\n\n # embedded = [batch size, 1, embedding size]\n embedded = self.dropout(F.relu(self.embedding(x)))\n\n #output = [seq len, batch size, hid dim * n directions]\n #hidden = [n layers * n directions, batch size, hid dim]\n #cell = [n layers * n directions, batch size, hid dim]\n \n #seq len and n directions will always be 1 in the decoder, therefore:\n #output = [1, batch size, hidden size]\n #hidden = [n layers, batch size, hidden size]\n #cell = [n layers, batch size, hidden size]\n output, (hidden, cell) = self.rnn(embedded, (hidden, cell))\n\n # prediction = [batch size, output size]\n # prediction = self.linear(output.squeeze(0))\n\n # return prediction, hidden, cell\n return output, hidden, cell\n\nclass Seq2Seq(nn.Module):\n def __init__(self, n_trg_vocab = 43, \n d_words = 512, \n hidden_size = 512,\n num_layers = 2):\n super().__init__()\n self.code_emb = torch.nn.Embedding(n_trg_vocab, d_words)\n self.projection = nn.Sequential(\n nn.LayerNorm(d_words),\n Swish(),\n Linear(d_words, d_words)#,\n # nn.LayerNorm(d_words),\n # Swish(),\n # Linear(d_words, d_words), # add 1 layer to projection\n )\n self.head = Seq2SeqHead()\n\n def forward(self, x, y, target_len, teacher_forcing_ratio = 0.5):\n x = self.code_emb(x)\n y = self.code_emb(y)\n x = self.projection(x)\n y = self.projection(y)\n return self.head(x, y, target_len, teacher_forcing_ratio)\n\n \n\nclass Seq2SeqHead(nn.Module):\n # def __init__(self, encoder, decoder, device):\n def __init__(self, n_trg_vocab = 43, \n d_words = 512, \n hidden_size = 512,\n num_layers = 2):\n super().__init__()\n # self.code_emb = torch.nn.Embedding(n_trg_vocab, d_words)\n self.layernorm = nn.LayerNorm(d_words)\n self.encoder = Encoder(input_size=d_words, hidden_size=hidden_size, n_layers=num_layers, dropout=0.5)\n self.decoder = Decoder(output_size=n_trg_vocab, embedding_size=hidden_size, hidden_size=hidden_size, n_layers=num_layers, dropout=0.5)\n self.linear = nn.Linear(hidden_size, n_trg_vocab)\n \n self.hidden_size = hidden_size\n self.device = torch.device('cuda:0')\n # self.device = torch.device('cpu')\n\n # assert encoder.hidden_size == decoder.hidden_size, \\\n # \"Hidden dimensions of encoder and decoder must be equal!\"\n # assert encoder.n_layers == decoder.n_layers, \\\n # \"Encoder and decoder must have equal number of layers!\"\n\n def forward(self, x, y, target_len, teacher_forcing_ratio = 0.5):\n \"\"\"\n x = [batch size, seq_len]\n y = [batch size, seq_len]\n for our argoverse motion forecasting dataset\n observed sequence len is 20, target sequence len is 30\n feature size for now is just 2 (x and y)\n\n teacher_forcing_ratio is probability of using teacher forcing\n e.g. if teacher_forcing_ratio is 0.75 we use ground-truth inputs 75% of the time\n \"\"\"\n # batch_size = x.shape[1]\n # target_len = y.shape[1]\n # y = self.code_emb(y)\n \n # tensor to store decoder outputs of each time step\n outputs = torch.zeros(y.shape[0], target_len, self.hidden_size).to(self.device)\n \n # last hidden state of the encoder is used as the initial hidden state of the decoder\n # x = self.code_emb(x)\n x = self.layernorm(x)\n y = self.layernorm(y)\n\n hidden, cell = self.encoder(x)\n\n # first input to decoder is last coordinates of x\n decoder_input = x[:, -1::1, :]\n \n for i in range(target_len):\n # run decode for one time step\n output, hidden, cell = self.decoder(decoder_input, hidden, cell)\n\n # place predictions in a tensor holding predictions for each time step\n outputs[:, i] = output\n\n # decide if we are going to use teacher forcing or not\n teacher_forcing = random.random() < teacher_forcing_ratio\n\n # output is the same shape as input, [batch_size, feature size]\n # so we can use output directly as input or use true lable depending on\n # teacher_forcing is true or not\n decoder_input = y[:, i:i+1, :] if teacher_forcing else output\n\n feature = outputs\n outputs = self.linear(outputs)\n outputs = F.log_softmax(outputs, dim=2)\n\n return feature, outputs\n\nif __name__ == \"__main__\":\n # lstm = LingLSTM()\n lstm = Seq2Seq()\n test_ts = torch.tensor([ 3, 32, 41, 22, 34, 41, 14, 22, 38, 41, 14, 22, 38, 41, 5, 27, 41, 7, 25, 41, 3, 27, 41, 11, 33])\n test_ts = test_ts.unsqueeze(0)\n feature, out = lstm(test_ts, test_ts, 6)\n # out = lstm(test_ts)\n print(feature.shape)\n print(out.shape)","repo_name":"YuxuanZHANG0713/FedCSR","sub_path":"transformer/LSTM.py","file_name":"LSTM.py","file_ext":"py","file_size_in_byte":9335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"19772807785","text":"from logging import getLogger\n\nfrom derl.tracker import get_tracker\n\n_logger = getLogger(__name__)\n_tracker = get_tracker()\n\n\ndef output(files: list, stats: bool = False):\n if len(files) == 0:\n _logger.debug(\"No matched files for output\")\n return\n\n for file in files:\n print(file, end=\"\")\n\n if stats:\n print(_tracker)\n","repo_name":"tpiekarski/derl","sub_path":"src/derl/outputer.py","file_name":"outputer.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"18"}
+{"seq_id":"39105525647","text":"\nfrom typing import AsyncIterable\n\nimport pytest\n\nfrom package.db.models.Chat import PublicChat\nfrom package.db.models.Message import Message, PublicMessageWithAuthor\nfrom tests.shared.AuthClient import AuthClient\n\nNUMBER_OF_MESSAGES = 20\n\n\n@pytest.fixture()\nasync def messages(\n auth_client: AuthClient,\n chat: PublicChat\n) -> AsyncIterable[list[PublicMessageWithAuthor]]:\n messages = []\n ids = []\n for _ in range(NUMBER_OF_MESSAGES):\n message = await auth_client.send_message(chat.id)\n ids.append(message.id)\n messages.append(message)\n\n yield messages\n\n affected = await Message.filter(\n id__in=ids\n ).delete()\n\n assert affected > 0\n","repo_name":"batreller/messenger-backend","sub_path":"tests/test_message/fixtures/messages.py","file_name":"messages.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"41458605757","text":"from datetime import datetime\nfrom threading import Condition\nfrom typing import Dict, Any, Optional\n\nfrom schema import Schema # type: ignore\n\nfrom common.client_server_protocols import (\n save_game_client_schema,\n save_game_server_schema,\n)\nfrom server.client_comms.base_client_response import BaseClientResponse\nfrom server.database_management.database_manager import DatabaseManager, DatabaseGame\n\n\nclass SaveGameClientResponse(BaseClientResponse):\n def __init__(self, message: Dict[str, Any]) -> None:\n \"\"\"\n C'tor for response handler that saves an existing game in the database manager\n :param message: Message info from client\n \"\"\"\n super().__init__(message=message)\n self._db_success: Optional[bool] = None\n self._db_complete_cv: Condition = Condition()\n self._sent_message_schema: Schema = save_game_client_schema\n self._response_message_schema: Schema = save_game_server_schema\n self._response_message[\"protocol_type\"] = self._response_message_schema.schema[\n \"protocol_type\"\n ]\n\n def respond(self) -> Dict[str, Any]:\n \"\"\"\n Respond to the client through the server comms manager\n :return Message to send to client\n \"\"\"\n # Check schema of incoming message is ok\n if not self._sent_message_schema.is_valid(self._sent_message):\n self._response_message[\"success\"] = False\n return self._response_message\n\n # Update game in database\n dbg: DatabaseGame = DatabaseGame(\n complete=self._sent_message[\"complete\"],\n board_state=self._sent_message[\"board_state\"],\n next_turn=self._sent_message[\"next_turn\"],\n last_save=datetime.now(),\n )\n DatabaseManager().update_game(\n callback=self.__game_updated_callback,\n game_id=self._sent_message[\"game_id\"],\n database_game=dbg,\n )\n\n # Wait for database manager to complete task\n with self._db_complete_cv:\n while self._db_success is None:\n self._db_complete_cv.wait()\n\n # Return the response message\n self._response_message[\"success\"] = self._db_success\n return self._response_message\n\n def __game_updated_callback(self, success: bool) -> None:\n \"\"\"\n Callback for when the game has finished being saved in the database manager\n :param success: Whether game was updated successfully\n \"\"\"\n # Notify class that database has completed its task\n with self._db_complete_cv:\n self._db_success = success\n self._db_complete_cv.notify()\n","repo_name":"LucasEby/Reversi-REV-","sub_path":"server/client_comms/save_game_client_response.py","file_name":"save_game_client_response.py","file_ext":"py","file_size_in_byte":2672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"9248370914","text":"import psycopg2 as conector\n\n\nclass AppDB:\n def __init__(self):\n print(\"Método construtor!\")\n\n#---------------------------------------------------------------------------------------------------------\n# Método para abrir conexão\n#---------------------------------------------------------------------------------------------------------\n def abrirConexao(self):\n try:\n self.conexao = conector.connect(\n database='Aula4Python',\n user='postgres',\n password='051094',\n host='127.0.0.1',\n port='5432')\n print(\"Conexao com o Banco estabelecida\")\n except (Exception, conector.Error) as err:\n if self.conexao:\n print(\"Falha ao se conectar ao Banco de Dados\", err)\n\n#---------------------------------------------------------------------------------------------------------\n# Método para selecionar dados\n#---------------------------------------------------------------------------------------------------------\n def selecionarDados(self):\n try:\n self.abrirConexao()\n cursor = self.conexao.cursor()\n\n cursor.execute(\"\"\"SELECT * FROM \"PRODUTO\" \"\"\")\n print(\"operação de seleção realizada com sucesso\")\n registros = cursor.fetchall()\n print(registros)\n\n except (Exception, conector.Error) as err:\n print(\"Error in select operation\", err)\n\n finally:\n if self.conexao:\n cursor.close()\n self.conexao.close()\n print(\" A conexão com o Postgres foi fechada\")\n return registros\n\n#---------------------------------------------------------------------------------------------------------\n# Método para inserir Dados\n#---------------------------------------------------------------------------------------------------------\n def inserirDados(self, codigo, nome, preco):\n try:\n self.abrirConexao()\n cursor = self.conexao.cursor()\n\n postgres_insert_query=\"\"\"INSERT INTO \"PRODUTO\" (\"CODIGO\", \"NOME\", \"PRECO\") VALUES(%s,%s,%s)\"\"\"\n record_to_insert = (codigo,nome, preco)\n cursor.execute(postgres_insert_query,record_to_insert)\n self.conexao.commit()\n count = cursor.rowcount\n print(count, \"Registro inserido com sucesso na tabela PRODUTO\")\n except (Exception, conector.Error) as err:\n if(self.conexao):\n print(\"Falha ao inserir registro na tabela PRODUTO\", err)\n finally:\n if(self.conexao):\n cursor.close()\n self.conexao.close()\n print(\" A conexão com o Postgres foi fechada.\")\n\n#---------------------------------------------------------------------------------------------------------\n# Método para atualiza dados\n#---------------------------------------------------------------------------------------------------------\n def atualizarDados(self, codigo, nome, preco):\n try:\n self.abrirConexao()\n cursor = self.conexao.cursor()\n\n print(\"Registros antes da atualização\")\n sql_select_query = \"\"\"SELECT * FROM \"PRODUTO\" WHERE \"CODIGO\" = %s \"\"\"\n cursor.execute(sql_select_query,(codigo,))\n registros = cursor.fetchone()\n print(registros)\n\n # Atualizar registro\n sql_select_query = \"\"\"UPDATE \"PRODUTO\" SET \"NOME\" = %s, \"PRECO\" = %s WHERE \"CODIGO\" = %s\"\"\"\n cursor.execute(sql_select_query,(nome, preco,codigo))\n self.conexao.commit()\n\n count= cursor.rowcount\n print(count, \"Registro atualizado com sucesso!\")\n print(\"Registro Depois da Atualização\")\n sql_select_query = \"\"\"SELECT * FROM \"PRODUTO\" WHERE \"CODIGO\"=%s\"\"\"\n cursor.execute(sql_select_query, (codigo,))\n registros = cursor.fetchone()\n print(registros)\n\n except conector.Error as err:\n print(\"Erro na Atualização\", err)\n finally:\n if(self.conexao):\n cursor.close()\n self.conexao.close()\n print(\"A conexão com o Postgres foi fechada.\")\n\n#---------------------------------------------------------------------------------------------------------\n# Método para excluir Dados\n#---------------------------------------------------------------------------------------------------------\n def excluirDados(self, codigo):\n try:\n self.abrirConexao()\n cursor = self.conexao.cursor()\n\n sql_delete_query = \"\"\"DELETE FROM \"PRODUTO\" WHERE \"CODIGO\"=%s \"\"\"\n cursor.execute(sql_delete_query, (codigo,))\n self.conexao.commit()\n\n count= cursor.rowcount\n print(count, \"Registro excluido com sucesso!\")\n except conector.Error as err:\n print(\"Erro na Exclusão\", err)\n finally:\n if(self.conexao):\n cursor.close()\n self.conexao.close()\n print(\"A conexão com o Postgres foi fechada.\")\n\n\"\"\"#---------------------------------------------------------------------------------------------------------\n# Criando Registros e instanciando a Classe AppDB\n#---------------------------------------------------------------------------------------------------------\nlista = []\ncarros = [\"Ferrari\", \"Lamborghini\",\"Punto\",\"Gol\",\"Golf\",\"HRV\",\"CIVC\",\"KWID\",\"Sandero\",\"Uno\",\"Argo\",\"T-cros\",\"Virtus\",\"Bugatti\"]\ni=500\nfor n in range(0,10):\n for carro in carros:\n i+=1\n if carro == \"Ferrari\" or carro == \"Lamborghini\" or carro==\"Bugatti\":\n lista.append((i,f'{carro} {n}', 1000000+n*10000))\n lista.append((i,f'{carro} {n}', 84800+n*100))\n\n\nbanco = AppDB()\nfor item in lista:\n banco.inserirDados(item[0],item[1],item[2])\"\"\"","repo_name":"igoradriano/manipulacao-dados-python-bd","sub_path":"cap-4/programa/crud.py","file_name":"crud.py","file_ext":"py","file_size_in_byte":5985,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"19297804890","text":"from operator import itemgetter\r\nfrom sklearn.metrics import confusion_matrix\r\nimport numpy as np\r\nimport pprint as pp\r\nimport scipy.spatial.distance as sp\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ndef plot_class(ks, ws):\r\n \"\"\"Plots the classification effectiveness as a function of k\"\"\"\r\n fig, ax = plt.subplots()\r\n ax.set_xlabel(\"K\")\r\n ax.set_ylabel(\"Classification Accuracy\")\r\n ax.set_title(\"K-NN Classification Accuracy\")\r\n ax.scatter(ks, ws)\r\n \r\n\r\n\r\ndef classify(k, distances):\r\n \"\"\"Using K-NN, classifies the testing points USING the training points\"\"\"\r\n distances_k = sorted(distances, key=itemgetter(0))[:k]\r\n counts = [0, 0, 0]\r\n for d in distances_k:\r\n counts[d[1]] += 1\r\n return max(enumerate(counts), key=itemgetter(1))[0]+1\r\n\r\n\r\ndef knn(k, train, test):\r\n \"\"\"Uses K-NN to classify each of the test points\"\"\"\r\n predicted, actual = [], []\r\n for test_w, test_patterns in enumerate(test):\r\n for test_x in test_patterns:\r\n distances = []\r\n for train_w, train_patterns in enumerate(train):\r\n for train_x in train_patterns:\r\n distances.append((sp.euclidean(test_x, train_x), train_w))\r\n predicted.append(classify(k, distances))\r\n actual.append(test_w+1)\r\n\r\n cm = confusion_matrix(actual, predicted)\r\n w = sum([cm[i][j] for i in range(len(cm)) for j in range(len(cm[i])) \\\r\n if i == j]) / len(actual)\r\n print(\"K={}, error={}\\n\".format(k,round(w, 3)), cm, end=\"\\n\\n\")\r\n return w\r\n \r\n\r\ndef main():\r\n print()\r\n fp, train, test = open(\"iris_data.txt\"), [[], [], []], [[], [], []]\r\n for line in fp:\r\n line = line.strip().split()\r\n if line: \r\n if len(train[int(line[-1])-1]) < 25:\r\n train[int(line[-1])-1].append([float(l) for l in line[:-1]])\r\n else:\r\n test[int(line[-1])-1].append([float(l) for l in line[:-1]])\r\n \r\n ws, ks = [], [i for i in range(1, 25, 4)]\r\n for k in ks:\r\n ws.append(knn(k, train, test))\r\n plot_class(ks, ws)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"Brenhein/HW4-Pattern-Recognition","sub_path":"Code/q7.py","file_name":"q7.py","file_ext":"py","file_size_in_byte":2134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"26489729544","text":"import json\nimport logging\n\nfrom kafka import KafkaConsumer\nimport os\nimport time\n\n# kafka configs\nKAFKA_TOPIC = os.getenv('KAFKA_TOPIC', 'input-topic')\nKAFKA_BOOTSTRAP_SERVERS = os.getenv('KAFKA_BOOTSTRAP_SERVERS', 'localhost:9092')\n\n# log configuration\nlog_level = os.getenv('LOG_LEVEL', 'INFO')\nlogging.basicConfig(format='%(levelname)s:%(message)s', level=getattr(logging, log_level))\nlogger = logging.getLogger(__name__)\n\n\nconsumer: KafkaConsumer = None\ncounter = 0\nstart_time = time.time()\nNBR_OF_MESSAGES = 10000\n\n\ndef start_consuming():\n global counter\n for msg in consumer:\n logger.debug(f\"consumed: {msg}\")\n topic = msg.topic\n data = json.loads(msg.value)\n logger.debug(f\"topic {topic} data: {data}\")\n counter += 1\n # time.sleep(0.015)\n if counter >= NBR_OF_MESSAGES:\n break\n elapsed_time = time.time() - start_time\n print(f'Read {counter} messages in {elapsed_time:.2f} seconds')\n consumer.close()\n\n\nif __name__ == \"__main__\":\n consumer = KafkaConsumer(KAFKA_TOPIC, bootstrap_servers=[KAFKA_BOOTSTRAP_SERVERS],\n enable_auto_commit=True, group_id='kafka-python')\n\n # 2. start consuming messages\n start_consuming()\n","repo_name":"pedrodeoliveira/kafka-streaming-tests","sub_path":"src/python/consumers/kafka_python.py","file_name":"kafka_python.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"}
+{"seq_id":"13119184152","text":"class Node:\n\tdef __init__(self,data):\n\t\tself.val=data\n\t\tself.left=None\n\t\tself.right=None\n#Function to print diagonal view\ndef diagonalprint(root):\n\t#base case\n\tif root is None:\n\t\treturn \n# queue of treenode\n\tq=[]\n\t#Append root\n\tq.append(root)\n\t#Append delimiter\n\tq.append(None)\n\n\twhile len(q)>0:\n\t\ttemp=q.pop(0)\n\t\t#If current is delimiter then insert another \n\t\t#for next diagonal and cout nextline\n\t\tif not temp:\n\t\t\t#If queue is empty then return \n\t\t\tif len(q)==0:\n\t\t\t\treturn \n\t\t\t\t#Print output on nextline\n\t\t\tprint(' ')\n\t\t\t#append delimiter again \n\t\t\tq.append(None)\n\n\t\telse:\n\t\t\twhile temp:\n\t\t\t\tprint(temp.val,end=' ')\n\t\t\t\t#If left child is present\n\t\t\t\t#append into queue\n\t\t\t\tif temp.left:\n\t\t\t\t\tq.append(temp.left)\n\t\t\t\t#current equals to right child\n\t\t\t\ttemp=temp.right\n\nroot = Node(8) \nroot.left = Node(3) \nroot.right = Node(10) \nroot.left.left = Node(1) \nroot.left.right = Node(6) \nroot.right.right = Node(14) \nroot.right.right.left = Node(13) \nroot.left.right.left = Node(4) \nroot.left.right.right = Node(7) \nprint('Diagonal Traversal of tree is :') \ndiagonalprint(root) ","repo_name":"mepky/data-structure-and-algorithm","sub_path":"Tree/diagonal_traversal_iterative_methode.py","file_name":"diagonal_traversal_iterative_methode.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"}
+{"seq_id":"73646905960","text":"import logging\nlogging.basicConfig(level=logging.DEBUG, format='%(levelname)s %(asctime)s %(processName)s %(message)s')\n\nfrom tests.utils import parse_2in4\n\nfrom itertools import product\nfrom multiprocessing import Pool\nfrom os.path import dirname\nfrom unittest import TestCase\n\nimport dwave_networkx as dwnx\nimport networkx as nx\n\nfrom placeandroute.problemgraph import parse_cnf\nfrom placeandroute.tilebased.chimera_tiles import load_chimera, expand_solution\nfrom placeandroute.tilebased.heuristic import TilePlacementHeuristic, Constraint\nfrom placeandroute.tilebased.parallel import ParallelPlacementHeuristic\nfrom placeandroute.tilebased.utils import cnf_to_constraints, show_result\n\ndef mapl(func, seq):\n return list(map(func, seq))\n\ndef test_result(tile_graph, cnf, heur):\n chains = heur.chains\n for constraint in cnf:\n assert constraint in heur.constraint_placement, heur.constraint_placement\n var_rows, mapped_to = heur.constraint_placement[constraint]\n for v1, v2 in product(*var_rows):\n if v1 == v2: continue\n ch1 = chains[v1]\n ch2 = chains[v2]\n assert nx.is_connected(tile_graph.subgraph(ch1))\n assert nx.is_connected(tile_graph.subgraph(ch2))\n assert any(tile_graph.has_edge(n1, n2) for n1, n2 in product(ch1, ch2)), \\\n (v1, v2, constraint, mapped_to, ch1, ch2)\n\n\n\nclass TestTileBased(TestCase):\n def test2(self):\n cs = [Constraint([[1, 2, 3], [4, 5]]), Constraint([[2, 3, 4], [5, 6]])]\n s = 3\n chs, g, orig = load_chimera(s)\n h = TilePlacementHeuristic(cs, g, chs)\n h.run()\n xdict = expand_solution(g, h.chains, orig)\n #show_result(s, xdict)\n\n def test3(self):\n with open(dirname(__file__) + \"/../simple60.cnf\") as f:\n cnf = (parse_cnf(f))\n cnf = [mapl(lambda x: x // 2, clause) for clause in cnf[:130]]\n cs = list(cnf_to_constraints(cnf, max(max(x) for x in cnf)))\n s = 16\n chs, g, orig = load_chimera(s)\n h = TilePlacementHeuristic(cs, g, chs)\n #print(h.run(stop_first=True))\n #for c, t in h.constraint_placement.items():\n # print(c.tile, t)\n #print(repr(h.chains))\n test_result(g, cs, h)\n xdict = expand_solution(g, h.chains, orig)\n show_result(orig, xdict)\n\n def test4(self):\n with open(dirname(__file__) + \"/../simple60.cnf\") as f:\n cnf = (parse_cnf(f))\n cnf = [mapl(lambda x: x // 6, clause) for clause in cnf[:50]]\n cs = list(cnf_to_constraints(cnf, max(max(x) for x in cnf)))\n s = 8\n chs, g, orig = load_chimera(s)\n h = TilePlacementHeuristic(cs, g, chs)\n #print(h.run(stop_first=True))\n #for c, t in h.constraint_placement.items():\n # print(c.tile, t)\n #print(repr(h.chains))\n test_result(g, cs, h)\n xdict = expand_solution(g, h.chains, orig)\n show_result(s, xdict)\n\n def test_par(self):\n with open(dirname(__file__) + \"/../simple60.cnf\") as f:\n cnf = (parse_cnf(f))\n cnf = [mapl(lambda x: x // 6, clause) for clause in cnf[:50]]\n cs = list(cnf_to_constraints(cnf, max(max(x) for x in cnf)))\n s = 16\n chs, g, orig = load_chimera(s)\n h = ParallelPlacementHeuristic(cs, g, chs)\n pool = Pool()\n #print(h.par_run(pool, stop_first=False))\n #for c, t in h.constraint_placement.items():\n #print(c.tile, t)\n #print(repr(h.chains))\n test_result(g, cs, h)\n xdict = expand_solution(g, h.chains, orig)\n show_result(orig, xdict)\n\n def test_sgen(self):\n with open(dirname(__file__) + \"/../sgen-twoinfour-s80-g4-0.bench\") as f:\n cnf = (parse_2in4(f))\n nvars = max(max(clause) for clause in cnf)\n ancilla = nvars + 1\n cs = []\n for clause in cnf:\n c = Constraint()\n first, rest = clause[0], clause[1:]\n for second in rest:\n c.add_possible_placement([[ancilla, first, second],[ancilla+1]+[x for x in rest if x != second]])\n ancilla += 2\n cs.append(c)\n\n choices, quotient_graph, original_graph = load_chimera(16)\n #quotient_graph, choices = g, chs\n pool = Pool()\n h = ParallelPlacementHeuristic(cs, quotient_graph, choices)\n #print (h.par_run(pool, stop_first=True))\n xdict = (expand_solution(quotient_graph, h.chains, original_graph))\n show_result(original_graph, xdict)\n\n","repo_name":"boothby/placeandroute","sub_path":"tests/test_tilebased.py","file_name":"test_tilebased.py","file_ext":"py","file_size_in_byte":4541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"72931042601","text":"\n# LC134. Gas Station - on a circle\ndef canCompleteCircuit(self, gas: List[int], cost: List[int]) -> int: # O(n)\n if not gas or not cost: return -1\n total = current = 0\n start_station = 0\n for i, (g, c) in enumerate(zip(gas, cost)):\n exp = g - c\n total += exp # accumulate total diff of gas - cost\n current += exp\n if current < 0:\n current = 0\n start_station = i+1\n return start_station if total >= 0 else -1\n\n# LC1710. Maximum Units on a Truck\ndef maximumUnits(self, boxTypes: List[List[int]], truckSize: int) -> int:\n boxes = sorted(boxTypes, key=lambda x: x[1], reverse=True) # O(nlogn)\n total, counts = 0, 0 # greedy: box count 1 with max units\n for c, u in boxes:\n if c <= truckSize - counts:\n counts += c\n total += c * u\n else:\n total += (truckSize - counts) * u\n counts = truckSize\n break\n return total\n\n# LC1648. Sell Diminishing-Valued Colored Balls\ndef maxProfit(self, inv: List[int], orders: int) -> int:\n # https://leetcode.com/problems/sell-diminishing-valued-colored-balls/discuss/927522/Python-n-log-n-690-ms\n arr=sorted(Counter(inv).items(), reverse=True)+[(0,0)]\n ans, ind, width = 0, 0, 0\n\n while orders>0: # constraint: sum(inv) >= orders\n width += arr[ind][1] # number of ball\n sell = min(orders, width * (arr[ind][0] - arr[ind+1][0])) # sell diff to flatten\n whole, remainder= divmod(sell, width)\n price_w = width * whole * (arr[ind][0] + arr[ind][0] - (whole-1)) // 2\n price_r = remainder * (arr[ind][0] - whole)\n ans += price_w + price_r\n orders -= sell\n ind += 1\n return ans % 1_000_000_007\n\n# LC1326. Minimum Number of Taps to Open to Water a Garden\ndef minTaps(self, n: int, ranges: List[int]) -> int:\n jumps = [0]*(n+1)\n for x, r in enumerate(ranges):\n l = max(0, x-r)\n jumps[l] = max(jumps[l], x+r)\n step = start = end = 0\n while end < n:\n start, end = end+1, max(jumps[start:end+1]) # greedy on max\n if start > end: return -1\n step += 1\n return step\n\n\n\n# LC1306. Jump Game III O(n)\ndef canReach(self, arr: List[int], start: int) -> bool: # O(n) runtime and space\n if 0 <= start < len(arr) and arr[start] >= 0: # DFS\n if arr[start] == 0: return True\n arr[start] = - arr[start] # visited\n return self.canReach(arr, start + arr[start]) or self.canReach(arr, start - arr[start])\n return False # existing array can be restored back\ndef canReach(self, arr: List[int], start: int) -> bool: # O(n) runtime and space\n q, n = [start], len(arr) # BFS\n while q:\n node = q.pop(0)\n if arr[node] == 0: return True\n if arr[node] < 0: continue # visited\n arr[node] = -arr[node] # mark as visited\n for i in [node + arr[node], node - arr[node]]:\n if 0 <= i < n: q.append(i)\n return False\n\n# LC55. Jump Game\ndef canJump(self, nums: List[int]) -> bool: # greedy, O(n)\n target = len(nums) - 1 # we start from end go backward to front.\n for i in range(len(nums) - 2, -1, -1):\n if i + nums[i] >= target: target = i # find earliest i such that\n return target == 0 # we can reach target from i, and repeat this process to 1st.\n\n# LC45. Jump Game II\ndef jump(self, nums: List[int]) -> int:\n farthest = current_jump_end = jumps = 0\n for i in range(len(nums) - 1):\n # we continuously find the how far we can reach in the current jump\n farthest = max(farthest, i + nums[i])\n # if we have come to the end of the current jump,\n # we need to make another jump\n if i == current_jump_end:\n jumps += 1\n current_jump_end = farthest\n return jumps\n\n# LC818. Race Car\ndef racecar(self, target): # O(t logt)\n @lru_cache(None)\n def dp(t):\n n = t.bit_length()\n p1= 2 ** (n-1)\n p2 = p1 * 2 # 2 ** n\n\n if p2 - 1 == t: return n # nA\n\n res = dp(p2 - 1 - t) + n + 1 # nA R and then dp(p2-1-t) backward, i.e., we pass target and come back\n for m in range(n-1): # (n-1)A R and then m steps\n res = min(res, dp(t - p1 + 2 ** m) + n + m + 1)\n return res\n return dp(target)\n\n\n\n\n","repo_name":"bigfrog10/python-data-structure-algo-tutorial","sub_path":"leetcode/src/a_dsa/d4_arrays_int/greedy.py","file_name":"greedy.py","file_ext":"py","file_size_in_byte":4272,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"}
+{"seq_id":"17726379350","text":"from django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, redirect\nfrom .models import User\nfrom .forms import NewUserForm\nfrom django.contrib.auth import login\nfrom django.contrib import messages\n\n\n# Create your views here.\n\ndef signup(request):\n if request.method == \"POST\":\n form = NewUserForm(request.POST)\n if form.is_valid():\n user = form.save()\n login(request, user)\n return redirect(\"index\")\n else:\n form = NewUserForm()\n return render(request, 'account/register.html', {\"form\": form})\n\n\n@login_required\ndef account(request):\n return render(request, 'account/account.html')\n\n\n@login_required\ndef address(request):\n if request.method == \"POST\":\n pid = request.POST.get('pid')\n first_name = request.POST.get('first_name')\n last_name = request.POST.get('last_name')\n address = request.POST.get('address')\n zip = request.POST.get('zip')\n phone = request.POST.get('phone')\n User.objects.filter(id=pid).update(first_name=first_name, last_name=last_name, address=address, zip=zip,\n phone=phone)\n return redirect(\"address\")\n else:\n pass\n return render(request, 'account/address.html')\n","repo_name":"sunnypy22/jewellery","sub_path":"account/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"73656639080","text":"#!/usr/bin/env python3\n\nimport freenet.lib.utils as utils\n\n\nclass ip_match(object):\n __ip_rules = None\n __ipv6_rules = None\n\n __host_timer = None\n\n def __init__(self):\n self.__ip_rules = {}\n self.__ipv6_rules = {}\n\n def __check_format(self, subnet, prefix):\n prefix = int(prefix)\n if prefix < 1: return False\n\n if utils.is_ipv4_address(subnet) and prefix > 32: return False\n if utils.is_ipv6_address(subnet) and prefix > 128: return False\n if not utils.is_ipv6_address(subnet) and not utils.is_ipv4_address(subnet): return False\n\n return True\n\n def add_rule(self, subnet, prefix):\n check_rs = self.__check_format(subnet, prefix)\n if not check_rs: return False\n\n is_ipv6 = False\n if utils.is_ipv6_address(subnet): is_ipv6 = True\n if not utils.check_subnet_fmt(subnet, prefix, is_ipv6=is_ipv6): return False\n\n if is_ipv6:\n subnet = utils.calc_subnet(subnet, prefix, is_ipv6=True)\n else:\n subnet = utils.calc_subnet(subnet, prefix, is_ipv6=False)\n\n name = \"%s/%s\" % (subnet, prefix,)\n if is_ipv6:\n self.__ipv6_rules[name] = (subnet, prefix,)\n else:\n self.__ip_rules[name] = (subnet, prefix,)\n\n return True\n\n def match(self, ipaddr, is_ipv6=False):\n if is_ipv6:\n rules = self.__ipv6_rules\n else:\n rules = self.__ip_rules\n result = False\n\n if is_ipv6:\n n = 128\n else:\n n = 32\n\n while n > 0:\n subnet = utils.calc_subnet(ipaddr, n, is_ipv6=is_ipv6)\n name = \"%s/%s\" % (subnet, n)\n n -= 1\n\n if name not in rules: continue\n result = True\n break\n\n return result\n\n def clear(self):\n self.__ip_rules = {}\n self.__ipv6_rules = {}\n\n\n\"\"\"\nimport freenet.lib.file_parser as fp\n\nm = ip_match()\nresults = fp.parse_ip_subnet_file(\"../../fdslight_etc/ip_rules.txt\")\n\nfor sub, prefix in results: m.add_rule(sub, prefix)\n\nprint(m.match(\"223.5.5.5\"))\n\"\"\"","repo_name":"fdslight/fdslight","sub_path":"freenet/lib/ip_match.py","file_name":"ip_match.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","stars":139,"dataset":"github-code","pt":"18"}
+{"seq_id":"16001554039","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.metrics import confusion_matrix\n\nclass Model:\n def __init__(self, train_set, test_set):\n self.train_set = train_set\n self.test_set = test_set\n self.knn = KNeighborsClassifier(n_neighbors=3)\n self.predicted_labels = []\n\n \n def train(self):\n self.knn.fit(self.train_set.reshape(), self.train_set.labels)\n \n def test(self):\n self.predicted_labels = self.knn.predict(self.test_set.reshape())\n return self.predicted_labels\n \n def accuracy(self):\n return np.mean(self.predicted_labels == self.test_set.labels)\n \n def plot_sample(self, ok):\n sample = [i for i in range(len(self.predicted_labels)) if (self.predicted_labels[i] == self.test_set.labels[i]) == ok]\n \n fig, axes = plt.subplots(nrows=3, ncols=5, figsize=(8, 6))\n fig.suptitle(f'Sample of {\"valid\" if ok else \"invalid\"} results')\n for i, ax in enumerate(axes.flat):\n ax.imshow(self.test_set.images[sample[i]], cmap=\"gray\")\n ax.set_title(f\"Predicted: {self.predicted_labels[sample[i]]}\")\n ax.axis(\"off\")\n plt.tight_layout()\n plt.show()\n \n def plot_confusion_matrix(self):\n cm = confusion_matrix(self.test_set.labels, self.predicted_labels)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation=\"nearest\", cmap=plt.cm.Blues)\n ax.figure.colorbar(im, ax=ax)\n ax.set(xticks=np.arange(cm.shape[1]), yticks=np.arange(cm.shape[0]),\n xticklabels=np.arange(10), yticklabels=np.arange(10),\n xlabel=\"Predicted label\", ylabel=\"True label\")\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], \"d\"),\n ha=\"center\", va=\"center\", color=\"white\" if cm[i, j] > cm.max() / 2 else \"black\")\n fig.tight_layout()\n plt.show()\n ","repo_name":"nem0z/py-mnist","sub_path":"src/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"24976946243","text":"import ast\nimport pprint\nfrom src.parser import *\nfrom src.data_processor import *\nfrom transformers import BertModel, BertTokenizer, AdamW\nfrom sklearn.metrics import precision_recall_fscore_support\n\n\ndef make_model(args):\n ''' Making models here according to parameters in settings and args '''\n\n print('Making model...', file=settings.SHELL_OUT_FILE, flush=True)\n\n bert_model = BertModel.from_pretrained(\n args.bert_dir + args.bert_file + '.tar.gz')\n\n optimizer = AdamW(bert_model.parameters(), lr=args.lr, weight_decay=args.l2)\n criterion = nn.CrossEntropyLoss()\n\n print('Done\\n', file=settings.SHELL_OUT_FILE, flush=True)\n\n return bert_model, optimizer, criterion\n\n\ndef save_best_model(model, prefix, name, total_step):\n file_name = prefix + name\n state = {'step': total_step + 1,\n 'state_dict': model.state_dict()}\n torch.save(state, file_name)\n\n\ndef evaluate(criterion, output, label, outputs, labels):\n \"\"\"\n Compute loss and record results here.\n Designed for dev set and test set.\n\n Args:\n criterion: Loss function of your model.\n output: output by your model.\n label: golden label of your output data respectively.\n outputs: a list of outputs saving all outputs.\n labels: a list of golden labels of outputs respectively.\n\n Return:\n loss: total loss of the output.\n outputs: a extended list with same type of content.\n labels: a extended list with same type of content.\n \"\"\"\n\n with torch.no_grad():\n loss = criterion(output, label)\n output = np.argmax(output.cpu().numpy(), axis=-1).tolist()\n label = label.cpu().numpy().astype(np.int).tolist()\n\n outputs.extend(output)\n labels.extend(label)\n return loss.item()\n\n\ndef run(args):\n # Initial parameters and build directories\n settings.USE_CUDA = args.use_cuda\n\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n\n # Decide output stream\n if args.shell_print == 'file':\n settings.SHELL_OUT_FILE = open(args.output_dir + 'shell_out', 'a+', encoding='utf-8')\n else:\n settings.SHELL_OUT_FILE = sys.stdout\n\n # Build model and data reader/processor\n tokenizer = BertTokenizer.from_pretrained(\n args.bert_dir + args.bert_file + '-vocab.txt')\n processor = BDProcessor(tokenizer, args.max_seq_length)\n reader = BDReader(args.batch_size)\n\n # Load/Write labels\n label_path = os.path.join(args.output_dir + 'labels.txt')\n print('Loading labels', file=settings.SHELL_OUT_FILE, flush=True)\n if os.path.exists(label_path):\n with open(label_path, 'r', encoding='utf-8') as f:\n contents = f.read()\n labels_dict = ast.literal_eval(contents)\n else:\n labels_dict = reader.get_labels()\n with open(label_path, 'w', encoding='utf-8') as f:\n pprint.pprint(labels_dict, stream=f)\n\n # Init\n best_acc = 0\n best_recall = 0\n best_fval = 0\n best_loss = 1e9\n\n if args.do_train:\n # Create model\n model, optimizer, criterion = make_model(args)\n # Load model if it exists\n file_name = args.output_dir + 'model_' + args.suffix\n step = 0\n if os.path.exists(file_name):\n state = torch.load(file_name)\n model.load_state_dict(state['state_dict'])\n step = state['step']\n if args.multi_gpu:\n model = nn.DataParallel(model)\n model = model.cuda() if settings.USE_CUDA else model\n\n train_examples = reader.get_train_examples(args.data_dir)\n total_train_examples = len(train_examples)\n loss_train = []\n for ep in range(args.epoch):\n print(\"######## Training ########\", file=settings.SHELL_OUT_FILE, flush=True)\n print('Epoch:', ep, file=settings.SHELL_OUT_FILE, flush=True)\n model.train()\n print(\"\\rTrain Step: {} Loss: {}\".format(step, 0), file=settings.SHELL_OUT_FILE, flush=True) # end='\\r',\n\n for i, example in enumerate(train_examples):\n step += 1\n\n inputs = processor.convert_examples_to_tensor(example)\n labels = processor.convert_labels_to_tensor(example.labels, labels_dict)\n prediction = model(*inputs)\n loss = criterion(prediction, labels)\n\n loss_train.append(loss.item())\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if step % 10 == 0:\n print(\"\\rTrain Step: {} Loss: {}\".format(step, loss.item()),\n file=settings.SHELL_OUT_FILE, flush=True) # end='\\r',\n if args.do_eval:\n print(\"\\n######## Evaluating ########\", file=settings.SHELL_OUT_FILE, flush=True)\n eval_examples = reader.get_dev_examples(args.data_dir)\n output_eval = []\n label_eval = []\n loss_eval_all = 0\n model.eval()\n total_eval_examples = len(eval_examples)\n with torch.no_grad():\n print(\"\\rEval Step: {}/{}\".format(0, total_eval_examples),\n end='\\r', file=settings.SHELL_OUT_FILE, flush=True)\n for i, example in enumerate(eval_examples):\n if (i + 1) % 100 == 0:\n print(\"\\rEval Step: {}/{}\".format(i + 1, total_eval_examples),\n end='\\r', file=settings.SHELL_OUT_FILE, flush=True)\n\n inputs = processor.convert_examples_to_tensor(example)\n labels = processor.convert_labels_to_tensor(example.labels, labels_dict)\n prediction = model(*inputs)\n\n loss = evaluate(criterion, prediction, labels,\n output_eval, label_eval)\n loss_eval_all += loss\n\n print(\"\\rEval Step: {}/{}\".format(total_eval_examples,\n total_eval_examples),\n file=settings.SHELL_OUT_FILE, flush=True)\n\n loss_eval_all /= total_eval_examples\n print('Loss:', loss_eval_all,\n file=settings.SHELL_OUT_FILE, flush=True)\n\n acc, recall, fval, _ = \\\n precision_recall_fscore_support(label_eval, output_eval, average='binary')\n print(\"Accuracy:\", acc, file=settings.SHELL_OUT_FILE, flush=True)\n print(\"Recall:\", recall, file=settings.SHELL_OUT_FILE, flush=True)\n print(\"F-score\", fval, file=settings.SHELL_OUT_FILE, flush=True)\n\n save_model = copy.deepcopy(model)\n save_model = save_model.module.cpu() if args.multi_gpu \\\n else save_model.cpu()\n\n prefix = args.output_dir + 'model_'\n # save last model\n save_best_model(model, prefix, 'last', step)\n\n # save model with best accuracy on dev set\n if acc > best_acc:\n best_acc = acc\n save_best_model(model, prefix, 'acc', step)\n # save model with best recall on dev set\n if recall > best_recall:\n best_recall = recall\n save_best_model(model, prefix, 'recall', step)\n # save model with best f1-score on dev set\n if fval > best_fval:\n best_fval = fval\n save_best_model(model, prefix, 'fval', step)\n # save model with best loss on dev set\n if loss_eval_all < best_loss:\n best_loss = loss_eval_all\n save_best_model(model, prefix, 'loss', step)\n print(file=settings.SHELL_OUT_FILE, flush=True)\n\n print(\"\\rTrain Step: {} Loss: {}\".format(step, sum(loss_train) / total_train_examples),\n file=settings.SHELL_OUT_FILE, flush=True)\n\n if args.do_predict:\n print(\"######### Testing ########\", file=settings.SHELL_OUT_FILE, flush=True)\n test_examples = reader.get_test_examples(args.data_dir)\n\n for suffix in ['last', 'acc', 'recall', 'fval', 'loss']:\n # Create model\n model, optimizer, criterion = make_model(args)\n # Load model if it exists\n file_name = args.output_dir + 'model_' + suffix\n\n if os.path.exists(file_name):\n state = torch.load(file_name)\n model.load_state_dict(state['state_dict'])\n else:\n continue\n if args.multi_gpu:\n model = nn.DataParallel(model)\n model = model.cuda() if settings.USE_CUDA else model\n\n loss_test = 0\n output_test = []\n label_test = []\n model.eval()\n total_test_examples = len(test_examples)\n with torch.no_grad():\n for i, example in enumerate(test_examples):\n if (i + 1) % 100 == 0:\n print(\"\\rTest Step: {}/{}\".format(i + 1, total_test_examples),\n end='\\r', file=settings.SHELL_OUT_FILE, flush=True)\n\n inputs = processor.convert_examples_to_tensor(example)\n labels = processor.convert_labels_to_tensor(example.labels, labels_dict)\n predictions = model(*inputs)\n\n loss = evaluate(criterion, predictions, labels,\n output_test, label_test)\n loss_test += loss\n\n print(\"\\n#### \" + suffix.upper() + \" ####\", file=settings.SHELL_OUT_FILE, flush=True)\n loss_test /= total_test_examples\n print(\"Loss:\", loss_test, file=settings.SHELL_OUT_FILE, flush=True)\n\n output_file_name = args.output_dir + 'result_' + suffix\n with open(output_file_name, 'w', encoding='utf-8') as f:\n pprint.pprint(output_test, f)\n\n acc, recall, fval, _ = precision_recall_fscore_support(label_test, output_test, average='binary')\n print(\"Accuracy:\", acc, file=settings.SHELL_OUT_FILE, flush=True)\n print(\"Recall:\", recall, file=settings.SHELL_OUT_FILE, flush=True)\n print(\"F-score\", fval, file=settings.SHELL_OUT_FILE, flush=True)\n print(file=settings.SHELL_OUT_FILE, flush=True)\n\n\nif __name__ == '__main__':\n run(args)\n","repo_name":"lrscy/Sentimental-Analysis","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11142,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"}
+{"seq_id":"23541971048","text":"import random\n\ntry:\n num = int(input(\"Age: \"))\n y = str(input(\"Name: \"))\n z = \"Your quantity of chromosomes:\"\n f = random.randrange(1, 100)\n print(z, f, \"\\nThis is so much,\", y)\nexcept:\n print(\"! Invalid input !\")\n","repo_name":"ontonyy/python_codes","sub_path":"less_need/training_exercises/randomy.py","file_name":"randomy.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"73198662441","text":"Gladiator = genMonster(\"Gladiator\", (131, 6080), \"a Gladiator\")\nGladiator.setOutfit(78, 3, 79, 114)\nGladiator.setTargetChance(10)\nGladiator.bloodType(\"blood\")\nGladiator.setHealth(185)\nGladiator.setExperience(90)\nGladiator.setSpeed(200)\nGladiator.walkAround(1,1,1) # energy, fire, poison\nGladiator.setBehavior(summonable=0, hostile=1, illusionable=0, convinceable=470, pushable=1, pushItems=0, pushCreatures=0, targetDistance=1, runOnHealth=18)\nGladiator.voices(\"You are no match for me!\", \"Feel my prowess.\", \"Take this!\")\nGladiator.setImmunity(0,0,0) # paralyze, invisible, lifedrain\nGladiator.setDefense(15, fire=1.0, earth=1.0, energy=1.0, ice=1.0, holy=0.9, death=1.05, physical=0.95, drown=1.0)\nGladiator.regMelee(90)\nGladiator.loot( (\"plate shield\", 9.25), (\"mace\", 10.75), (\"chain helmet\", 4.75), (2148, 100, 28), (\"sword\", 10.75), (\"meat\", 20.5), (\"brass armor\", 1.75), (\"steel shield\", 0.75), (\"iron helmet\", 0.25), (\"belted cape\", 0.5) )","repo_name":"novasdream/PyOT","sub_path":"data/monsters/Humans/Outlaws/Gladiator.py","file_name":"Gladiator.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"}
+{"seq_id":"40521380176","text":"import os\r\nfrom turtle import shape\r\nimport pandas as pd\r\nimport wget\r\nimport csv\r\nimport numpy as np\r\nimport cv2\r\nimport tqdm\r\n\r\ndef GaussianMask(sizeX,sizeY,sigma=33,center = None, fix =1):\r\n '''\r\n this function is a gussianmask:\r\n @param: sizeX: the img x size\r\n sizeY: the img y size\r\n sigma: sigma for gaussen\r\n center: gaussian mean\r\n fix: gaussian max\r\n @return: mask of gaussian\r\n '''\r\n x = np.arange(0, sizeX, 1, float)\r\n y = np.arange(0, sizeY, 1, float)\r\n x, y = np.meshgrid(x,y) \r\n if center is None:\r\n x0 = sizeX // 2\r\n y0 = sizeY // 2\r\n else:\r\n if np.isnan(center[0])==False and np.isnan(center[1])==False: \r\n x0 = center[0]\r\n y0 = center[1] \r\n else:\r\n return np.zeros((sizeY,sizeX))\r\n\r\n return fix*np.exp(-4*np.log(2) * ((x-x0)**2 + (y-y0)**2) / sigma**2)\r\n\r\n\r\n\r\ndef downloadCSV(settingfolder,csv_folder,prefix,destination):\r\n '''\r\n Use the fuc build the orignal snapshot download url and download them\r\n @Author: Ciheng Zhang\r\n @params:\r\n settingfolder: the folder save some settingfiles(relationship between web_id and study_id)\r\n csv_folder: the folder include all the csv file(F_Ux_Sx.csv)\r\n prefix: source url prefix of eyecedo\r\n destination: which folder aims to save orignal figures\r\n @return: null\r\n '''\r\n settings = {}\r\n with open(settingfolder + 'webs.csv', 'r') as csvFile:\r\n csvsettings = csv.reader(csvFile)\r\n settings = {row[1]:row[0] for row in csvsettings}\r\n print(settings)\r\n \r\n fileList = os.listdir(csv_folder)\r\n allDict = {}\r\n for iter in fileList:\r\n if iter.endswith(\".csv\"):\r\n print(iter)\r\n downloadList = []\r\n df = pd.read_csv(csv_folder + iter)\r\n if len(df) > 1:\r\n for i in range(len(df)):\r\n iterStudyId = settings[str(df[\"web_id\"][i])]\r\n iterUrl = prefix + \"/study_\" + str(iterStudyId) + \"/web_\" + str(df[\"web_id\"][i]) + \"/\" + str(df[\"screenshot\"][i])\r\n if iterUrl not in downloadList:\r\n downloadList.append(iterUrl)\r\n # print(\"\\033[1;35m start dwonload from {}\\033[0m\".format(iterUrl))\r\n allDict.update({iter:downloadList}) \r\n # print(allDict)\r\n for key in allDict:\r\n for iter in allDict[key]:\r\n # print(destination + '/' + key.split('.')[0] + '-' + iter.split('/')[-1])\r\n wget.download(iter,destination + '/' + key.split('.')[0] + '-' + iter.split('/')[-1])\r\n\r\n\r\n\r\n \r\n\r\n\r\n\r\ndef plotHeatmap(path, alpha = 0.5, threshold = 10):\r\n '''\r\n Use this function to draw the eye heatmap\r\n @param: \r\n path: the root folder where include the cvsfiles,\r\n (datafolder named orignal for jpgs, \r\n csv folder for csvfiles named Fixations, \r\n setting folder for webs.csv which is export from sql include the relation bewteen study_id and web_id)\r\n alpha: merge rate imgfile and heatmap\r\n threshold: heatmap threshold(0-255)\r\n @return heatmap\r\n '''\r\n print(\"\\033[1;35m start heatmap generation!\\033[0m\")\r\n imgNameList = []\r\n datapath = path + '/orignal/'\r\n csvpath = path + '/Fixations/'\r\n savePath = path + '/label/'\r\n for iter in os.listdir(datapath):\r\n if iter.endswith('.jpg'):\r\n imgNameList.append(iter)\r\n # 这里要加上读出文件LIST\r\n\r\n for iterImg in tqdm.tqdm(imgNameList):\r\n # 这里读取长宽\r\n # 判断是不是已经处理完成\r\n judgName = iterImg.split('.')[0] + '_pureheat.png' \r\n if judgName not in os.listdir(savePath):\r\n\r\n img = cv2.imread(datapath + iterImg)\r\n size = img.shape\r\n w = size[1]\r\n h = size[0]\r\n\r\n heatmap = np.zeros((h,w),np.float32)\r\n\r\n # 这里要改成需要的文件名字\r\n csvName = os.path.splitext(iterImg)[0]\r\n csvName = csvName.split('-')[0]\r\n screenshotName = os.path.splitext(iterImg)[0].split('-')[1] + '.jpg'\r\n csvName = csvName + '.csv'\r\n df = pd.read_csv(csvpath + csvName)\r\n\r\n # print(df[df['screenshot'] == screenshotName])\r\n maxDuration = int(df[df['screenshot'] == screenshotName]['duration_x'].max()) \r\n minDuration = int(df[df['screenshot'] == screenshotName]['duration_x'].min()) \r\n # maxDuration = int(df['duration_x'].max()) \r\n # minDuration = int(df['duration_x'].min()) \r\n\r\n for index,row in df.iterrows():\r\n if row['screenshot'] == screenshotName:\r\n xPos = int(row['x']) + int(row['scrollPositionX'])\r\n yPos = int(row['y']) + int(row['scrollPositionY'])\r\n duration = (int(row['duration_x']) - minDuration + 0.1)/(maxDuration - minDuration + 0.1)\r\n heatmap += GaussianMask(w, h, 100 * duration,(xPos,yPos),1) \r\n\r\n heatmap /= np.amax(heatmap)\r\n heatmap *= 255\r\n heatmap = heatmap.astype('uint8')\r\n\r\n heatmap_color = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)\r\n\r\n mask = np.where(heatmap<=threshold, 1, 0)\r\n mask = np.reshape(mask, (h, w, 1))\r\n mask = np.repeat(mask, 3, axis=2)\r\n\r\n # Marge images\r\n marge = img*mask + heatmap_color*(1-mask)\r\n marge = marge.astype(\"uint8\")\r\n # heatmap = cv2.applyColorMap(heatmap,cv2.COLORMAP_JET)\r\n # cv2.imshow('demo',heatmap)\r\n marge = cv2.addWeighted(img, 1-alpha, marge,alpha,0)\r\n\r\n saveName = iterImg.split('.')[0]\r\n # print(\"saveName:\"+saveName)\r\n\r\n pureheatSaveName = savePath + saveName + '_pureheat.png'\r\n heatmapSaveName = savePath + saveName + '_heatmap.png'\r\n # cutPhotoSaveName = savePath + '/gaze-image/' + saveName + '_cut.png'\r\n \r\n heatmap = cv2.applyColorMap(heatmap,cv2.COLORMAP_JET)\r\n # cv2.imshow(\"pureheat\", heatmap)\r\n # cv2.waitKey(0)\r\n # cv2.imshow(\"heatmap\", marge)\r\n # cv2.waitKey(0)\r\n \r\n cv2.imwrite(pureheatSaveName,heatmap)\r\n cv2.imwrite(heatmapSaveName,marge)\r\n # cv2.imwrite(cutPhotoSaveName,img)\r\n \r\n print(\"\\033[1;35m generation finished!!\\033[0m\")\r\n\r\n \r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n _schema = \"https://eyevido.de/portal/img/user_uploads/user_913577351/study_[study_id]/web_[web_id]/[ table “web_results” column “screenshot”]\"\r\n _prefix = \"https://eyevido.de/portal/img/user_uploads/user_913577351\"\r\n _csv_folder = \"E:/My_MA/EyeVido/Fixations/\"\r\n _setting_folder = \"E:/My_MA/EyeVido/Settings/\"\r\n _destination_foler = \"E:/My_MA/EyeVido/orignal/\"\r\n _path = \"E:/My_MA/EyeVido/\"\r\n downloadCSV(settingfolder=_setting_folder,csv_folder= _csv_folder, prefix=_prefix, destination=_destination_foler)\r\n plotHeatmap(path=_path)\r\n\r\n","repo_name":"ZackCHZhang/WebToGaze","sub_path":"utils_function/eyevedo_data_transform/downloadSource.py","file_name":"downloadSource.py","file_ext":"py","file_size_in_byte":7207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"37769679721","text":"# -*- coding: iso-8859-1 -*-\r\n# PFT_Channel_List - Philips Flat TV Series 5500 Channel List Sort via CSV\r\n# Copyright (C) 2019 https://github.com/monkeymia/PFT_Channel_List\r\n# \r\n# This program is free software: you can redistribute it and/or modify\r\n# it under the terms of the GNU General Public License as published by\r\n# the Free Software Foundation, either version 3 of the License, or\r\n# (at your option) any later version.\r\n# \r\n# This program is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU General Public License for more details.\r\n# \r\n# You should have received a copy of the GNU General Public License\r\n# along with this program. If not, see .\r\n\r\nfrom __future__ import print_function\r\nimport string\r\nimport sys\r\nimport os\r\nimport os.path\r\nimport xml.etree.ElementTree as ET\r\nimport Mixin_CSV_TXT_XML\r\n\r\nclass Debug_XML (Mixin_CSV_TXT_XML.Mixin_CSV_TXT_XML) : \r\n\r\n def __call__ (self) :\r\n cwd = os.getcwd ()\r\n for fname in os.listdir (cwd) : \r\n if fname.lower ().endswith (\".xml\") : \r\n tree = self.xml_read_file \\\r\n (os.path.join (cwd, fname))\r\n text = self.convert (tree)\r\n self.txt_write_file \\\r\n ( os.path.join (cwd, \"%s.debug\" % fname)\r\n , text\r\n )\r\n \r\n # end def \r\n\r\n def convert (self, tree) :\r\n e0 = tree.getroot ()\r\n lines = []\r\n indent = \"\"\r\n lines.append (\"%s%s\" % (indent, e0.tag))\r\n indent = \" \" * 1\r\n for k, v in sorted (e0.attrib.items ()) :\r\n lines.append (\"%s%s='%s'\" % (indent, k, v))\r\n for e1 in e0 : \r\n indent = \" \" * 2\r\n lines.append (\"%s%s\" % (indent, e1.tag))\r\n indent = \" \" * 3\r\n for k, v in sorted (e1.attrib.items ()) :\r\n lines.append (\"%s%s='%s'\" % (indent, k, v))\r\n for e2 in e1 : \r\n indent = \" \" * 4\r\n lines.append (\"%s%s\" % (indent, e2.tag))\r\n indent = \" \" * 5\r\n for k, v in sorted (e2.attrib.items ()) :\r\n lines.append (\"%s%s='%s'\" % (indent, k, v))\r\n for e3 in e2 : \r\n indent = \" \" * 6\r\n lines.append (\"%s%s\" % (indent, e3.tag))\r\n indent = \" \" * 7\r\n for k, v in sorted (e3.attrib.items ()) :\r\n lines.append (\"%s%s='%s'\" % (indent, k, v))\r\n return \"\\n\".join (lines)\r\n # end def \r\n\r\n# end class\r\n\r\nif __name__ == \"__main__\" : \r\n ce = Debug_XML ()\r\n ce ()\r\n# END\r\n\r\n","repo_name":"monkeymia/PFT_Channel_List","sub_path":"Debug_XML.py","file_name":"Debug_XML.py","file_ext":"py","file_size_in_byte":2790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"25579299039","text":"# https://atcoder.jp/contests/abc105/tasks/abc105_d\n\n\nimport sys\nread = sys.stdin.readline\n\n\ndef read_ints():\n return list(map(int, read().split()))\n\n\nclass cumsum1d: # 一次元累積和クラス\n def __init__(self, ls: list):\n '''\n 1次元リストを受け取る\n '''\n from itertools import accumulate\n self.ls_accum = [0] + list(accumulate(ls))\n\n def total(self, i, j):\n # もとの配列lsにおける[i,j)の中合計\n return self.ls_accum[j] - self.ls_accum[i]\n\n\nfrom collections import Counter\nN, M = read_ints()\nA = read_ints()\n\n# 素直にl,rを全探索はできない\n# 累積和を割れば0sumrangesの亜種\nA_cum = cumsum1d(A)\ntmp = []\nfor a in A_cum.ls_accum:\n tmp.append(a % M)\ncnts = Counter(tmp)\n\nans = 0\nfor c in cnts.values():\n ans += c * (c - 1) // 2\n\nprint(ans)\n","repo_name":"masakiaota/kyoupuro","sub_path":"practice/D_ABC/abc105_d.py","file_name":"abc105_d.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"}
+{"seq_id":"27056092902","text":"#Apresentar os resultados de uma tabuada de multiplicar (de 1 até 10) de um número qualquer.\r\n\r\nnumero = int(input('Informe um número para exibir sua tabuada: '))\r\n\r\ncontador = 0\r\nmultiplicador = 0\r\n\r\nwhile contador < 11:\r\n print('{} X {} = {}'.format(numero, multiplicador, numero * contador))\r\n multiplicador = multiplicador + 1\r\n contador = contador + 1","repo_name":"RenaanRabelo/Logica-de-programacao","sub_path":"Manzano pg 46/exerA.py","file_name":"exerA.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"39974344628","text":"from typing import Dict, List\n\nimport requests\n\n\nclass RedditMeme:\n RED_ENDPOINT = \"https://www.reddit.com/r/memes/search.json\"\n RANDOM_ENDPOINT = \"https://www.reddit.com/r/memes/random.json\"\n status_code = None\n\n def _parse(self, query: str = None, limit: int = 1, sort: str = None, endpoint: str = None) -> List[Dict]:\n try:\n headers = {\n 'User-agent': 'your bot 0.1'\n }\n payload = {\n \"limit\": limit,\n \"sort\": sort\n }\n if query != 'random':\n payload[\"q\"] = query.replace(\"-\", \"_\").strip()\n res = self.query(endpoint=endpoint, headers=headers, payload=payload)\n data = []\n if res:\n for i in res:\n data.append({\n \"title\": i[\"data\"][\"title\"],\n \"url\": i[\"data\"][\"url\"],\n \"score\": i[\"data\"][\"score\"],\n \"author\": i[\"data\"][\"author_fullname\"],\n \"type\": i[\"data\"][\"post_hint\"]\n })\n return data[:limit] if len(data) > 0 else [{}]\n except Exception as error:\n print(error)\n raise Exception(\"Problem with downloading meme\")\n\n def query(self, endpoint: str, headers: Dict, payload: Dict) -> List[Dict]:\n r = requests.get(url=endpoint, headers=headers, params=payload)\n self.status_code = r.status_code\n if r.status_code == 200:\n res = r.json()[\"data\"][\"children\"] if \"data\" in r.json() else r.json()[0][\"data\"][\"children\"]\n return res\n return [{}]\n\n @property\n def hot(self, query: str = \"dark humor\", limit: int = 1):\n return self._parse(query=query, limit=limit, sort=\"hot\", endpoint=self.RED_ENDPOINT)\n\n @property\n def fresh(self, query: str = \"dark humor\", limit: int = 1):\n return self._parse(query=query, limit=limit, sort=\"new\", endpoint=self.RED_ENDPOINT)\n\n @property\n def random(self, limit: int = 1):\n return self._parse(query=\"random\", limit=limit, sort=\"random\", endpoint=self.RANDOM_ENDPOINT)\n\n def by_query(self, query: str = \"\", limit: int = 1):\n return self._parse(query=query, limit=limit, sort=\"\", endpoint=self.RED_ENDPOINT)\n","repo_name":"kengenal/echis","sub_path":"echis/modules/meme.py","file_name":"meme.py","file_ext":"py","file_size_in_byte":2315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"33329146080","text":"from apscheduler.schedulers.background import BackgroundScheduler\nfrom .jobs import some_task\ndef start_jobs():\n\n print(\"starting scheduled cron jobs\")\n \n scheduler = BackgroundScheduler()\n \n #Set cron to runs every 1 min.\n cron_job = {'month': '*', 'day': '*', 'hour': '*', 'minute':'*/1'}\n \n #Add our task to scheduler.\n scheduler.add_job(some_task, 'cron', **cron_job)\n \n\n#And finally start. \n scheduler.start()","repo_name":"mrfinesse47/django-cron-every-minute","sub_path":"my_app/scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"30739625745","text":"import numpy, os.path\nnp = numpy\nimport warnings\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import classification_report, f1_score\nfrom sklearn.model_selection import GridSearchCV, train_test_split, cross_val_score\nfrom sklearn.svm import SVC\nfrom sklearn.preprocessing import OneHotEncoder, normalize\nimport scipy.stats\nfrom timeit import default_timer as timer\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout\nfrom keras.optimizers import SGD\nfrom keras.utils import to_categorical\nfrom tensorflow import nn\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom collections import Counter\nfrom imblearn.over_sampling import RandomOverSampler\n\n\ndef load_abalone(path='abalone'):\n data = numpy.genfromtxt(os.path.join(path, 'abalone.data'), delimiter=',', dtype=str)\n X = data[:, :-1]\n Y = data[:,-1].reshape(-1,1)\n # One hot encoding for column sex\n sex_col = X[:, 0]\n ohe = OneHotEncoder(sparse=False)\n encoded_X = ohe.fit_transform(sex_col.reshape(-1, 1))\n # Concatenate encoded columns to original features\n X = np.delete(X, 0, 1)\n X = np.column_stack((encoded_X, X))\n\n return X.astype(float), Y.astype(float)\n\n\ndef grid_search_cv(estimator, param_grid, score, train_X, train_Y, test_X, test_Y, nn=False):\n # Grid Search Cross Validation for hyperparameter tuning\n # https://scikit-learn.org/stable/auto_examples/model_selection/plot_grid_search_digits.html\n start = timer()\n\n if score is None:\n print(\"# Tuning hyper-parameters with default scoring: \")\n else:\n print(\"# Tuning hyper-parameters for %s\" % score)\n gscv = GridSearchCV(estimator=estimator, param_grid=param_grid, scoring=score, n_jobs=-1)\n warnings.filterwarnings(\"ignore\", category=UserWarning)\n gscv.fit(train_X, train_Y)\n\n print(\"Grid scores on development set: for \")\n means = gscv.cv_results_['mean_test_score']\n stds = gscv.cv_results_['std_test_score']\n for mean, std, params in zip(means, stds, gscv.cv_results_['params']):\n print(\"%0.3f (+/-%0.03f) for %r\" % (mean, std * 2, params))\n print()\n print(\"Best parameters set found on development set:\", gscv.best_params_)\n\n if not nn:\n print(\"\\nDetailed classification report:\")\n print(\"The model is trained on the full development set.\")\n print(\"The scores are computed on the full evaluation set.\")\n y_true, y_pred = test_Y, gscv.predict(test_X)\n print(classification_report(y_true, y_pred, zero_division=0))\n end = timer()\n print(\"Time elapsed in Grid Search CV for hyperparameter tuning: %.2f seconds\" % (end - start))\n return gscv.best_estimator_\n\n\ndef evaluate(model, test_features, test_labels):\n # Evaluate model performances (mean absolute percentage error)\n # https://towardsdatascience.com/hyperparameter-tuning-the-random-forest-in-python-using-scikit-learn-28d2aa77dd74\n predictions = model.predict(test_features)\n score = f1_score(test_labels, predictions, average='weighted')\n print('F1 Score = {:0.4f}.'.format(score))\n return score\n\n\ndef mean_confidence_interval(data, confidence=0.95):\n a = 1.0 * np.array(data)\n n = len(a)\n m, se = np.mean(a), scipy.stats.sem(a)\n h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)\n print(\"Mean: %0.3f (+/-%0.03f)\" % (m, h))\n print(\"Confidence interval (95 percent): (%0.03f, %0.03f)\" % (m + h, m - h))\n\n\ndef base_best_eval(base_model, best_model, test_X, test_Y):\n # Evaluate model performances before and after tuning hyperparameters\n # (compare base model with best GridSearchCV model)\n print(\"\\nBase model performance: \")\n base_accuracy = evaluate(base_model, test_X, test_Y)\n print(\"Tuned model performance: \")\n gscv_accuracy = evaluate(best_model, test_X, test_Y)\n print('Improvement of {:0.2f}% after tuning hyperparameters.'.format(\n 100 * (gscv_accuracy - base_accuracy) / base_accuracy))\n\n\ndef random_forest(train_X, test_X, train_Y, test_Y):\n # Get Random Forest with Bagging model\n rf = RandomForestClassifier(random_state=42)\n # Fit model on training data\n rf.fit(train_X, train_Y)\n\n # Tune hyperparameters with GridSearchCV\n param = {'n_estimators': [100, 115, 130, 145], 'min_samples_leaf': [1, 2, 3],\n 'min_samples_split': [2, 3, 4], 'max_depth': [25, 30, 35]}\n score = 'f1_weighted'\n best_rf = grid_search_cv(rf, param, score, train_X, train_Y, test_X, test_Y)\n\n # Evaluate model performances before and after tuning hyperparameters\n base_best_eval(rf, best_rf, test_X, test_Y)\n\n # k-fold Cross Validation for Random Forest with Bagging after hyperparameter tuning\n cv_scores = []\n for i in range(10):\n warnings.filterwarnings(\"ignore\", category=UserWarning)\n s = cross_val_score(best_rf, test_X, test_Y, cv=5, scoring='f1_weighted')\n cv_scores = np.append(cv_scores, s)\n\n # Performance of Random Forest with Bagging\n print(\"\\nPerformance of Random Forest with Bagging: \")\n mean_confidence_interval(cv_scores)\n\n\ndef support_vector_machines(train_X, test_X, train_Y, test_Y):\n base_svm = SVC(kernel='rbf', cache_size=1900)\n linear_svm = SVC(kernel='linear', cache_size=1900)\n\n # Normalize data\n train_X = normalize(train_X)\n test_X = normalize(test_X)\n\n # Fit model on training data\n linear_svm.fit(train_X, train_Y)\n base_svm.fit(train_X, train_Y)\n\n # Tune hyperparameters with GridSearchCV\n param = {'C': np.logspace(-2, 3, 6), 'kernel': ['rbf', 'poly', 'sigmoid'],\n 'gamma': np.logspace(-2, 1, 6)}\n score = 'f1_weighted'\n best_svm = grid_search_cv(base_svm, param, score, train_X, train_Y, test_X, test_Y)\n\n # Evaluate model performances before and after tuning hyperparameters\n base_best_eval(base_svm, best_svm, test_X, test_Y)\n\n # k-fold Cross Validation for Random Forest with Bagging after hyperparameter tuning\n cv_scores = []\n for i in range(10):\n warnings.filterwarnings(\"ignore\", category=UserWarning)\n s = cross_val_score(best_svm, test_X, test_Y, cv=5, scoring='f1_weighted')\n cv_scores = np.append(cv_scores, s)\n\n # Performance of Random Forest with Bagging\n print(\"\\nPerformance of Support Vector Machine: \")\n mean_confidence_interval(cv_scores)\n\n print(\"Base SVM Train Score: \", base_svm.score(test_X, test_Y))\n print(\"Best SVM Train Score: \", best_svm.score(test_X, test_Y))\n\n\ndef create_nn_model(epochs, learning_rate=0.01, momentum=0.0, init_mode='uniform'):\n # Define model\n model = Sequential()\n model.add(Dense(64, kernel_initializer=init_mode, activation=nn.relu, input_dim=10))\n model.add(Dropout(0.1))\n model.add(Dense(64, kernel_initializer=init_mode, activation=nn.relu))\n model.add(Dense(28, kernel_initializer=init_mode, activation=nn.softmax))\n # Define optimizer\n decay_rate = learning_rate / epochs\n sgd = SGD(lr=learning_rate, momentum=momentum, decay=decay_rate, nesterov=False)\n # Compile model\n model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])\n return model\n\n\ndef neural_network(train_X, test_X, train_Y, test_Y):\n # https://towardsdatascience.com/simple-guide-to-hyperparameter-tuning-in-neural-networks-3fe03dad8594\n # Normalize data\n train_X = normalize(train_X)\n test_X = normalize(test_X)\n input_dim = train_X.shape[1]\n\n # Convert class vectors to binary class matrices\n num_classes = 28\n train_Y = to_categorical(train_Y, num_classes)\n test_Y = to_categorical(test_Y, num_classes)\n\n epochs = 30\n batch_size = input_dim\n\n # Create model\n nn = KerasClassifier(build_fn=create_nn_model, epochs=epochs, batch_size=batch_size, verbose=0)\n nn.fit(train_X, train_Y, verbose=0)\n\n # Tune hyperparameters with GridSearchCV\n param = {'epochs': [50, 60, 70], 'batch_size': [2**i for i in range(5, 8, 1)],\n 'learning_rate': [0.01, 0.1, 1], 'momentum': [0.8, 0.9, 1.0]}\n score = None\n best_nn = grid_search_cv(nn, param, score, train_X, train_Y, test_X, test_Y, nn=True)\n best_nn.fit(train_X, train_Y, verbose=0)\n\n # Classification report (precision, recall, f1-score)\n print(\"\\nDetailed classification report:\")\n print(\"The model is trained on the full development set.\")\n print(\"The scores are computed on the full evaluation set.\")\n best_y_pred = best_nn.predict(test_X, verbose=0)\n y_true = np.argmax(test_Y, axis=1)\n print(classification_report(y_true, best_y_pred, zero_division=0))\n\n # Evaluate model performances before and after tuning hyperparameters\n # (compare base model with best GridSearchCV model)\n base_y_pred = nn.predict(test_X, verbose=0)\n print(\"\\nBase model performance: \")\n base_accuracy = f1_score(y_true, base_y_pred, average='weighted')\n print('F1 Score = {:0.4f}.'.format(base_accuracy))\n print(\"Tuned model performance: \")\n gscv_accuracy = f1_score(y_true, best_y_pred, average='weighted')\n print('F1 Score = {:0.4f}.'.format(gscv_accuracy))\n print('Improvement of {:0.2f}% after tuning hyperparameters.'.format(\n 100 * (gscv_accuracy - base_accuracy) / base_accuracy))\n\n # k-fold Cross Validation for Random Forest with Bagging after hyperparameter tuning\n cv_scores = []\n for i in range(10):\n warnings.filterwarnings(\"ignore\", category=UserWarning)\n s = cross_val_score(estimator=best_nn, X=test_X, y=y_true, cv=5, scoring='f1_weighted')\n cv_scores = np.append(cv_scores, s)\n\n # Performance of Neural Network\n print(\"\\nPerformance of Neural Network: \")\n mean_confidence_interval(cv_scores)\n\n\ndef main():\n np.random.seed(42)\n X, y = load_abalone()\n for ele in np.nditer(y, op_flags=['readwrite']):\n if ele == 29:\n ele[...] = ele - 2\n else:\n ele[...] = ele - 1\n\n # summarize class distribution\n y_1d = np.reshape(y, (y.shape[0],))\n print('Original dataset shape %s' % Counter(y_1d))\n\n # define pipeline\n oversample = RandomOverSampler(random_state=42)\n # transform the dataset\n X, y = oversample.fit_resample(X, y)\n\n num_data = 1000\n data = np.column_stack((X, y))\n print(data.shape)\n np.random.shuffle(data)\n data = data[:num_data, :]\n print(data.shape)\n X = data[:, :-1]\n y = data[:, -1]\n\n y_1d = np.reshape(y, (y.shape[0],))\n\n # summarize the new class distribution\n print('Resampled dataset shape %s' % Counter(y_1d))\n\n # Split dataset into training and testing sets\n train_X, test_X, train_Y, test_Y = train_test_split(X, y, test_size=0.2, random_state=42)\n\n # Random Forest with Bagging\n rf_start = timer()\n random_forest(train_X, test_X, train_Y, test_Y)\n rf_end = timer()\n print(\"Time elapsed in Random Forest with Bagging: %.2f seconds\" % (rf_end - rf_start))\n\n # Support Vector Machines\n svm_start = timer()\n support_vector_machines(train_X, test_X, train_Y, test_Y)\n svm_end = timer()\n print(\"Time elapsed in Support Vector Machines: %.2f seconds\" % (svm_end - svm_start))\n\n # Neural Network\n nn_start = timer()\n neural_network(train_X, test_X, train_Y, test_Y)\n nn_end = timer()\n print(\"Time elapsed in Neural Network: %.2f seconds\" % (nn_end - nn_start))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"stephanieeechang/ml-final-project","sub_path":"fp.py","file_name":"fp.py","file_ext":"py","file_size_in_byte":11334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"25638056711","text":"Score = 0\n\nQuestions = {1 : \"What is the full form of CPU ?\",\n 2 : \"What is the full form of GPU ?\",\n 3 : \"What is the full form of MBR ?\",\n 4 : \"What is the full form of GPT ?\",\n 5 : \"What is the full form of IIT ?\"}\n\nwhile True:\n RULE = \"\"\"\n You have a score of 0 you have to answer the questions to increase your points.\n If you hit the hiscore of 4 your score will be stored in a hiscore.txt file.\n\n Good luck!\n \"\"\"\n print(RULE)\n StartGame = input('Please Enter to continue: ')\n\n \n QuesDic = {1 : \"What is the full form of CPU ?\",\n 2 : \"What is the full form of GPU ?\",\n 3 : \"What is the full form of MBR ?\",\n 4 : \"What is the full form of GPT ?\",\n 5 : \"What is the full form of IIT ?\"}\n\n print(f\"Q.1) {QuesDic[1]}\")\n\n Ans1 = input(\"Enter you answer here: \")\n Ans1 = Ans1.lower()\n\n if Ans1 == 'central processing unit':\n print(\"Your answer is correct!\")\n Score += 1\n print(\"Your score is increased by 1\")\n\n else :\n print(\"Your answer is wrong!\")\n\n print(f\"Q.2) {QuesDic[2]}\")\n\n Ans2 = input(\"Enter your answer here: \")\n Ans2 = Ans2.lower()\n\n if Ans2 == 'graphic processing unit' or Ans2 == 'graphics processing unit':\n print(\"Your answer is correct!\")\n Score += 1\n print(\"Your score is increased by 1\")\n\n else :\n print(\"Your answer is wrong!\")\n\n print(f\"Q.3) {QuesDic[3]}\")\n\n Ans3 = input(\"Enter your answer here: \")\n Ans3 = Ans3.lower()\n\n if Ans3 == 'master boot record':\n print(\"Your answer is correct!\")\n Score += 1\n print(\"Your score is increased by 1\")\n\n else:\n print(\"Your answer is wrong!\")\n\n print(f\"Q.4) {QuesDic[4]}\")\n\n Ans4 = input(\"Enter your answer here: \")\n Ans4 = Ans4.lower()\n\n if Ans4 == 'guid partition table':\n print(\"Your answer is correct!\")\n Score += 1\n print(\"Your score is increased by 1\")\n\n else:\n print(\"Your answer is wrong!\")\n\n print(f\"Q.5) {QuesDic[5]}\")\n\n Ans5 = input(\"Enter your answer here: \")\n Ans5 = Ans5.lower()\n\n if Ans5 == 'indian institute of technology':\n print(\"Your answer is correct!\")\n Score += 1\n print(\"Your score is increased by 1\")\n\n else:\n print(\"Your answer is wrong\")\n\n print('Thanks for playing my game!\\nGame Made BY - Divyanshu Pawar\\nPlease see my other projects too this is my github: https://github.com/FrozenClue\\n')\n\n StartGameAgain = input(\"If you want to start the game again, Type yes or no: \")\n StartGameAgain = StartGameAgain.lower()\n\n if 'y' in StartGameAgain:\n continue\n\n else:\n break\n\n","repo_name":"Cool-PY/My-python-Mini-Projects","sub_path":"Quiz_Game/QUIZ_GAME.py","file_name":"QUIZ_GAME.py","file_ext":"py","file_size_in_byte":2711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"11983953129","text":"import logging\nfrom json import loads\nfrom logging import Logger\nfrom typing import Any\nfrom typing import Union\n\nimport requests\nfrom requests import Response\n\n\ndef _warn_if_unsuccessful(logger: Logger, response: Response):\n if not response.ok:\n warning_msg = \"\"\"request to url: %s finished with non-ok response: %s %s %s\"\"\"\n logger.warning(warning_msg, response.url, response.status_code, response.headers, response.text)\n\n\ndef _get_response_content(response: Response) -> Any:\n content_type: str = str(response.headers.get('Content-Type'))\n if content_type.startswith('application/json'):\n return loads(response.text)\n elif content_type.startswith('text'):\n return response.text\n return response.content\n\n\nclass HttpClient:\n\n def __init__(self, base_url: str):\n self.logger = logging.getLogger(self.__class__.__name__)\n self.session = requests.Session()\n self.base_url = base_url\n\n def request(self, method: str = 'GET', path: str = '', headers: dict = None, params: dict = None,\n data: bytes = None, json: Union[dict, list] = None) -> Any:\n url = self.base_url + path\n\n response = self.session.request(method, url, headers=headers, params=params, data=data, json=json)\n _warn_if_unsuccessful(self.logger, response)\n\n return _get_response_content(response)\n","repo_name":"bioker/pybioker","sub_path":"bioker/http/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"3018578468","text":"import inspect\nfrom typing import Callable, Dict\n\nfrom volatility.cli.volshell import shellplugin\nfrom volatility.framework.configuration import requirements\n\n\nclass Volshell(shellplugin.Volshell):\n \"\"\"Shell environment to directly interact with a windows memory image\"\"\"\n\n @classmethod\n def get_requirements(cls):\n return (super().get_requirements() + [\n requirements.SymbolTableRequirement(name = \"nt_symbols\", description = \"Windows kernel symbols\"),\n requirements.IntRequirement(name = 'pid', description = \"Process ID\", optional = True)\n ])\n\n def list_processes(self):\n \"\"\"Lists all the processes in the primary layer\"\"\"\n\n # We only use the object factory to demonstrate how to use one\n layer_name = self.config['primary']\n kvo = self.context.memory[layer_name].config['kernel_virtual_offset']\n ntkrnlmp = self.context.module(self.config['nt_symbols'], layer_name = layer_name, offset = kvo)\n\n ps_aph_offset = ntkrnlmp.get_symbol(\"PsActiveProcessHead\").address\n list_entry = ntkrnlmp.object(type_name = \"_LIST_ENTRY\", offset = kvo + ps_aph_offset)\n\n # This is example code to demonstrate how to use symbol_space directly, rather than through a module:\n #\n # ```\n # reloff = self.context.symbol_space.get_type(\n # self.config['nt_symbols'] + constants.BANG + \"_EPROCESS\").relative_child_offset(\n # \"ActiveProcessLinks\")\n # ```\n #\n # Note: \"nt!_EPROCESS\" could have been used, but would rely on the \"nt\" symbol table not already\n # having been present. Strictly, the value of the requirement should be joined with the BANG character\n # defined in the constants file\n reloff = ntkrnlmp.get_type(\"_EPROCESS\").relative_child_offset(\"ActiveProcessLinks\")\n eproc = ntkrnlmp.object(type_name = \"_EPROCESS\", offset = list_entry.vol.offset - reloff)\n\n for proc in eproc.ActiveProcessLinks:\n yield proc\n\n def load_functions(self) -> Dict[str, Callable]:\n result = super().load_functions()\n result.update({'ps': lambda: list(self.list_processes())})\n return result\n\n def run(self, additional_locals = None):\n # Determine locals\n curframe = inspect.currentframe()\n\n # Provide some OS-agnostic convenience elements for ease\n layer_name = self.config['primary']\n kvo = self.context.memory[layer_name].config['kernel_virtual_offset']\n nt = self.context.module(self.config['nt_symbols'], layer_name = layer_name, offset = kvo)\n ps = lambda: list(self.list_processes())\n\n pid = self.config.get('pid', None)\n eproc = None\n if pid:\n for _x in ps():\n if _x.UniqueProcessId == pid:\n eproc = _x\n break\n\n return super().run(curframe.f_locals)\n","repo_name":"drkmrin78/volatility3","sub_path":"volatility/cli/volshell/windows.py","file_name":"windows.py","file_ext":"py","file_size_in_byte":2917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"5884286673","text":"from Bio import SeqIO\nfrom Bio.Seq import Seq\nfrom Bio.SeqRecord import SeqRecord\nfrom Bio.Data import IUPACData\nfrom pydna.common_sub_strings import common_sub_strings\n\n\nclass AmpliconSearch:\n def __init__(self):\n # Initialize the AmpliconSearch object with any necessary parameters\n self.primer_file = None \n #primer_dict = self.create_primer_dict(primer_file)\n #primer_deg_dict = self.create_deg_primer_dict(primer_dict)\n #return primer_deg_dict\n\n def create_deg_primer_dict(self, primer_file):\n self.primer_file = primer_file\n self.primer_deg_dict = {}\n \n self.primer_dict = self.create_primer_dict(primer_file)\n self.primer_items = iter(self.primer_dict.items())\n for primer_name, primer_seq in self.primer_items:\n self.primer_deg_dict[primer_name] = self.expand_degenerate_primers(primer_seq)\n return self.primer_deg_dict\n\n def create_primer_dict(self, primer_file):\n primer_dict = {}\n with open(primer_file, 'r') as file:\n for record in SeqIO.parse(file, 'fasta'):\n primer_name = record.id\n primer_sequence = str(record.seq)\n primer_dict[primer_name] = primer_sequence\n file.close()\n if len(primer_dict) % 2 != 0:\n raise ValueError(\"A primer in the provided file does not have a \"\n \"matching pair. Ensure each primer in the file has \"\n \"a corresponding forward and reverse primer.\")\n return primer_dict\n\n def degenerate_bases(self):\n \"\"\"Return a dictionary of degenerate bases and their expansions.\"\"\"\n # Filter out non-degenerate bases\n return {k: v for k, v in IUPACData.ambiguous_dna_values.items() if len(v) > 1}\n\n def expand_degenerate_primers(self, primer):\n \"\"\"Expand a degenerate primer into all possible sequences.\"\"\"\n bases = self.degenerate_bases()\n sequences = ['']\n\n for nucleotide in primer:\n if nucleotide in bases:\n sequences = [prefix + base for prefix in sequences for base in bases[nucleotide]]\n else:\n sequences = [prefix + nucleotide for prefix in sequences]\n return sequences\n\n\n def find_amplicons(self, sequence, primer_dict, maximum_length):\n \"\"\"Find amplicons given potentially degenerate primers.\"\"\"\n primer_items = iter(primer_dict.items())\n #sequence = sequence.upper() \n amplicons = []\n for primer_name1, primer_seq1 in primer_items:\n primer_name2, primer_seq2 = next(primer_items)\n primer_combo = primer_name1 + \"+\" + primer_name2\n primer_seq1_rc = [str(Seq(seq).reverse_complement()) for seq in primer_seq1]\n primer_seq2_rc = [str(Seq(seq).reverse_complement()) for seq in primer_seq2]\n sequence_str = str(sequence.seq)\n match_F_array= [common_sub_strings(F, sequence_str, limit=len(F)) for F in primer_seq1]\n match_R_array= [common_sub_strings(R, sequence_str, limit=len(R)) for R in primer_seq2]\n match_F_rc_array= [common_sub_strings(F, sequence_str, limit=len(F)) for F in primer_seq1_rc]\n match_R_rc_array= [common_sub_strings(R, sequence_str, limit=len(R)) for R in primer_seq2_rc]\n i=0\n for match_F in match_F_array:\n for f in match_F:\n for match_R_rc in match_R_rc_array:\n for r in match_R_rc: \n length = r[1] + r[2] - f[1]\n if length > 0 and length < maximum_length:\n barcode = sequence.id + \"_\" + primer_combo + \"_LEN\" + str(length) + \"_\" + str(i)\n amplicon = Seq(sequence_str[f[1]:r[1]+r[2]])\n amplicon_r = SeqRecord(amplicon,\n id = barcode,\n name = sequence.id)\n amplicons.append(amplicon_r)\n #amplicons[combo_name] = sequence_str[f[1]:r[1]+r[2]]\n i += 1\n i=0 \n for match_F_rc in match_F_rc_array:\n for f in match_F_rc:\n for match_R in match_R_array:\n for r in match_R:\n length = f[1] + f[2] - r[1]\n if length > 0 and length < maximum_length:\n barcode = sequence.id + \"_\" + primer_combo + \"_rev_LEN\" + str(length) + \"_\" + str(i)\n #amplicons[combo_name] = sequence_str[r[1]:f[1]+f[2]]\n amplicon = Seq(sequence_str[r[1]:f[1]+f[2]])\n amplicon_r = SeqRecord(amplicon,\n id = barcode,\n name = sequence.id)\n amplicons.append(amplicon_r)\n i += 1\n return amplicons\n\n\n\n ","repo_name":"philcharron-cfia/biogrinder","sub_path":"src/AmpliconSearch.py","file_name":"AmpliconSearch.py","file_ext":"py","file_size_in_byte":5218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"30908608679","text":"import time\nimport sys\nimport os\nimport asyncio\nimport logging\n\n\nfrom enum import Enum\nfrom redis_clone.redis_parser import Parser, Protocol_2_Data_Types\nfrom redis_clone.response_builder import ResponseBuilder\n\nlogger = logging.getLogger(__name__)\n\nHOST = os.environ.get(\"REDIS_HOST\", \"0.0.0.0\")\nPORT = os.environ.get(\"REDIS_PORT\", 9999)\n\n\nclass Protocol_2_Commands(Enum):\n \"\"\"\n Some common redis commands\n \"\"\"\n\n SET = \"SET\"\n GET = \"GET\"\n DEL = \"DEL\"\n EXISTS = \"EXISTS\"\n INCR = \"INCR\"\n DECR = \"DECR\"\n PING = \"PING\"\n ECHO = \"ECHO\"\n \nclass ExpiryValue:\n def __init__(self, value, expiry_seconds=None, expiry_milliseconds=None, expiry_unix_timestamp_seconds=None, expiry_unix_timestamp_milliseconds=None) -> None:\n self.value = value\n self.expiry_seconds = time.time() + expiry_seconds if expiry_seconds else None\n self.expiry_milliseconds = time.time() * 1000 + expiry_milliseconds if expiry_milliseconds else None\n self.expiry_unix_timestamp_seconds = expiry_unix_timestamp_seconds\n self.expiry_unix_timestamp_milliseconds = expiry_unix_timestamp_milliseconds\n\n def get_value(self):\n if self.expiry_milliseconds:\n if self.expiry_milliseconds < int(time.time() * 1000):\n return None\n elif self.expiry_seconds:\n if self.expiry_seconds < int(time.time()):\n return None\n elif self.expiry_unix_timestamp_milliseconds:\n if self.expiry_unix_timestamp_milliseconds < int(time.time() * 1000):\n return None\n elif self.expiry_unix_timestamp_seconds:\n if self.expiry_unix_timestamp_seconds < int(time.time()):\n return None\n\n return self.value\n \n def get_expiry_seconds(self):\n return self.expiry_seconds\n\n def get_expiry_milliseconds(self):\n return self.expiry_milliseconds\n\n def get_expiry_unix_timestamp_seconds(self):\n return self.expiry_unix_timestamp_seconds\n \n def get_expiry_unix_timestamp_milliseconds(self):\n return self.expiry_unix_timestamp_milliseconds\n\n\n\nclass RedisServer:\n def __init__(self, host, port) -> None:\n self.host = host\n self.port = port\n self.parser = Parser(protocol_version=2)\n self.response_builder = ResponseBuilder(protocol_version=2)\n self.data_store = {}\n self.running = False\n\n async def start(self):\n logger.info(\"Starting server...\")\n self.server = await asyncio.start_server(\n self._handle_connection, self.host, self.port\n )\n async with self.server:\n await self.server.serve_forever()\n\n async def _handle_connection(self, reader, writer):\n addr = writer.get_extra_info(\"peername\")\n logger.info(f\"Connection established with {addr}\")\n\n while True:\n data = await reader.read(1024)\n if not data:\n break\n\n logger.info(f\"Received data: {data}\")\n command_name, command_args = self.parser.parse_client_request(data)\n logger.info(f\"Command name: {command_name}\")\n logger.info(f\"Command args: {command_args}\")\n response = self._process_command(command_name, command_args)\n logger.info(f\"Response: {response}\")\n writer.write(response)\n await writer.drain()\n\n logger.info(f\"Connection closed with {addr}\")\n writer.close()\n await writer.wait_closed()\n\n def _process_command(self, command_name, command_args) -> bytes:\n # Convert command name to uppercase\n command_name = command_name.upper()\n if command_name == Protocol_2_Commands.PING.value:\n return self.response_builder.build_response(\n Protocol_2_Data_Types.SIMPLE_STRING, \"PONG\"\n )\n elif command_name == Protocol_2_Commands.ECHO.value:\n # Echo command returns the same string\n if len(command_args) == 0:\n return self.response_builder.build_response(\n Protocol_2_Data_Types.ERROR,\n \"ERR wrong number of arguments for 'ECHO' command\",\n )\n return self.response_builder.build_response(\n Protocol_2_Data_Types.SIMPLE_STRING, \" \".join(command_args)\n )\n elif command_name == Protocol_2_Commands.SET.value:\n return self._handle_set_command(command_args)\n\n elif command_name == Protocol_2_Commands.GET.value:\n # Only 1 argument required key\n if len(command_args) != 1:\n return self.response_builder.build_response(\n Protocol_2_Data_Types.ERROR,\n \"ERR wrong number of arguments for 'GET' command\",\n )\n key = command_args[0]\n value = None\n if key not in self.data_store:\n return self.response_builder.build_response(\n Protocol_2_Data_Types.BULK_STRING\n )\n else:\n # Check the key is of type ExpiryValue\n # This is to ensure uniformity in the when setting and getting values\n if isinstance(self.data_store[key], ExpiryValue):\n value = self.data_store[key].get_value()\n\n if value is None:\n self._delete_expired_key(key)\n \n return self.response_builder.build_response(\n Protocol_2_Data_Types.BULK_STRING, value\n )\n \n elif command_name == Protocol_2_Commands.DEL.value:\n # Minimum 1 argument required key\n if len(command_args) < 1:\n return self.response_builder.build_response(\n Protocol_2_Data_Types.ERROR,\n \"ERR wrong number of arguments for 'DEL' command\",\n )\n \n keys_deleted = 0\n for key in command_args:\n if key in self.data_store:\n del self.data_store[key]\n keys_deleted += 1 \n \n return self.response_builder.build_response(\n Protocol_2_Data_Types.INTEGER, keys_deleted\n )\n \n\n return self.response_builder.build_response(\n Protocol_2_Data_Types.ERROR, \"ERR unknown command '{}'\".format(command_name)\n )\n \n def _handle_set_command(self, command_args):\n # Minimum 2 arguments required key and value\n if len(command_args) < 2:\n return self.response_builder.build_response(\n Protocol_2_Data_Types.ERROR,\n \"ERR wrong number of arguments for 'SET' command\",\n )\n key = command_args[0]\n value = command_args[1]\n \n subarg_values = {\n \"EX\": None, # seconds\n \"PX\": None, # milliseconds\n \"EXAT\": None, # unix timestamp in seconds\n \"PXAT\": None, # unix timestamp in milliseconds\n \"KEEPTTL\": None, # keep the ttl of the key boolean\n \"GET\": None, # return the value of the key booelan\n \"NX\": None, # set if key does not exist boolean\n \"XX\": None, # set if key exists boolean\n }\n\n # Check set command has optional arguments\n if len(command_args) > 2:\n # Subargs are in format (arg, value)\n for subarg in command_args[2:]:\n subarg_values[subarg[0]] = subarg[1]\n \n # Process subargs\n # Check keepttl is not set with any other expiry subarg\n if subarg_values[\"KEEPTTL\"] and (subarg_values[\"EX\"] or subarg_values[\"PX\"] or subarg_values[\"EXAT\"] or subarg_values[\"PXAT\"]):\n return self.response_builder.build_response(\n Protocol_2_Data_Types.ERROR,\n \"ERR invalid expire command syntax\",\n )\n \n # Return error if both NX and XX are set\n if subarg_values[\"NX\"] and subarg_values[\"XX\"]:\n return self.response_builder.build_response(\n Protocol_2_Data_Types.ERROR,\n \"ERR XX and NX options at the same time are not compatible\",\n )\n \n # Handle NX\n # NX -- Only set the key if it does not already exist.\n if subarg_values[\"NX\"]:\n if key in self.data_store:\n return self.response_builder.build_response(\n Protocol_2_Data_Types.BULK_STRING\n )\n else:\n return self._assign_key_to_value(key, value, subarg_values)\n \n # Handle XX\n # XX -- Only set the key if it already exists.\n if subarg_values[\"XX\"]:\n if key not in self.data_store:\n return self.response_builder.build_response(\n Protocol_2_Data_Types.BULK_STRING\n )\n else:\n return self._assign_key_to_value(key, value, subarg_values)\n \n # Handle GET\n # GET -- Return the value of key\n if subarg_values[\"GET\"]:\n if key in self.data_store:\n return self.response_builder.build_response(\n Protocol_2_Data_Types.BULK_STRING, self.data_store[key].get_value()\n )\n else:\n return self.response_builder.build_response(\n Protocol_2_Data_Types.BULK_STRING\n )\n \n # Handle KEEPTTL\n # KEEPTTL -- Retain the time to live associated with the key.\n if subarg_values[\"KEEPTTL\"]:\n if key in self.data_store:\n self.data_store[key] = ExpiryValue(\n value=value,\n expiry_seconds=self.data_store[key].get_expiry_seconds(),\n expiry_milliseconds=self.data_store[key].get_expiry_milliseconds(),\n expiry_unix_timestamp_seconds=self.data_store[key].get_expiry_unix_timestamp_seconds(),\n expiry_unix_timestamp_milliseconds=self.data_store[key].get_expiry_unix_timestamp_milliseconds(),\n )\n \n return self.response_builder.build_response(\n Protocol_2_Data_Types.SIMPLE_STRING, \"OK\"\n )\n else:\n return self.response_builder.build_response(\n Protocol_2_Data_Types.BULK_STRING\n )\n \n # Normal case for set\n return self._assign_key_to_value(key, value, subarg_values)\n\n def _assign_key_to_value(self, key, value, subargs):\n try:\n self.data_store[key] = ExpiryValue(\n value=value,\n expiry_seconds=int(subargs[\"EX\"]) if subargs[\"EX\"] else None,\n expiry_milliseconds=int(subargs[\"PX\"]) if subargs[\"PX\"] else None,\n expiry_unix_timestamp_seconds=int(subargs[\"EXAT\"]) if subargs[\"EXAT\"] else None,\n expiry_unix_timestamp_milliseconds=int(subargs[\"PXAT\"]) if subargs[\"PXAT\"] else None,\n )\n return self.response_builder.build_response(\n Protocol_2_Data_Types.SIMPLE_STRING, \"OK\"\n )\n except ValueError:\n return self.response_builder.build_response(\n Protocol_2_Data_Types.ERROR,\n \"ERR value is not an integer or out of range\",\n )\n \n def _delete_expired_key(self, key):\n if key in self.data_store:\n del self.data_store[key]\n \n def stop(self):\n logger.info(\"Stopping server...\")\n self.server.close()\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO)\n server = RedisServer(host=HOST, port=PORT)\n asyncio.run(server.start())\n","repo_name":"generalpy101/redis-clone","sub_path":"redis_clone/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":11772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"71921733480","text":"import torch\nimport torch.nn as nn\n\nfrom torch.onnx import register_custom_op_symbolic\nfrom torch.onnx.symbolic_helper import parse_args\n\n# Define custom symbolic function\n@parse_args(\"v\", \"v\", \"f\", \"i\")\ndef symbolic_foo_forward(g, input1, input2, attr1, attr2):\n return g.op(\"custom_domain::Foo\", input1, input2, attr1_f=attr1, attr2_i=attr2)\n\n# Register custom symbolic function\nregister_custom_op_symbolic(\"custom_ops::foo_forward\", symbolic_foo_forward, 9)\n\nclass FooModel(torch.nn.Module):\n def __init__(self, attr1, attr2):\n super(FooModel, self).__init__()\n self.attr1 = attr1\n self.attr2 = attr2\n\n def forward(self, input1, input2):\n # Calling custom op\n return torch.ops.custom_ops.foo_forward(input1, input2, self.attr1, self.attr2)\n\nmodel = FooModel(2, 3)\ntorch.onnx.export(\n model,\n (torch.Tensor(1), torch.Tensor(2)),\n \"model.onnx\",\n # only needed if you want to specify an opset version > 1.\n custom_opsets={\"custom_domain\": 2},\noperator_export_type=torch.onnx.OperatorExportTypes.ONNX_FALLTHROUGH)","repo_name":"LexcaliburR/notebook","sub_path":"pytorch/src/xxx.py","file_name":"xxx.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"}
+{"seq_id":"41140155661","text":"import torch\nfrom torch.nn import functional as F\n\nfrom .utils import get_pair_indices, reduce_tensor\n# for type hint\nfrom typing import Optional\nfrom torch import Tensor\n\nfrom ..types import SimilarityType, DistanceLossType\n\n\ndef toone(a):\n a1 = a.max(1)[1]\n a1 = a1.view(-1, 1)\n tar = torch.zeros((len(a1), 100), device=a.device).scatter_(1, a1, 1)\n return tar\n\n\ndef sharpen(x: Tensor, temperature: float) -> Tensor:\n sharpened_x = x ** (1 / temperature)\n return sharpened_x / sharpened_x.sum(dim=1, keepdim=True)\n\n\ndef softmax_cross_entropy_loss(logits: Tensor, targets: Tensor, dim: int = 1, reduction: str = 'none') -> Tensor:\n \"\"\"\n :param logits: (labeled_batch_size, num_classes) model output of the labeled data\n :param targets: (labeled_batch_size, num_classes) labels distribution for the data\n :param dim: the dimension or dimensions to reduce\n :param reduction: choose from 'mean', 'sum', and 'none'\n :return:\n \"\"\"\n loss = -torch.sum(F.log_softmax(logits, dim=dim) * targets, dim=dim)\n\n return loss\n\n\ndef tosim(x):\n ctu = (torch.zeros(x.shape[1], x.shape[1])).cuda()\n for i in range(x.shape[1]):\n ctu[i][i] = 1\n sim = (torch.zeros(x.shape[0], x.shape[1])).cuda()\n\n for i in range(sim.shape[0]):\n for i1 in range(sim.shape[1]):\n sim[i][i1] = torch.cosine_similarity(x[i], ctu[i1], dim=-1)\n # sim = sim /torch.sum(sim, dim=1, keepdim=True)\n return sim\n\n\ndef toonehot(x):\n weizhi = x.max(dim=1)[1]\n zero = (torch.zeros(x.shape[0], x.shape[1])).cuda()\n for i, i1 in zip(weizhi, zero):\n i1[i] = 1\n return zero\n\n\nclass PairLoss:\n def __init__(self,\n similarity_metric: SimilarityType,\n distance_loss_metric: DistanceLossType,\n confidence_threshold: float,\n similarity_threshold: float,\n similarity_type: str,\n distance_loss_type: str,\n reduction: str = \"mean\"):\n self.confidence_threshold = confidence_threshold\n self.similarity_threshold = similarity_threshold\n\n self.similarity_type = similarity_type\n self.distance_loss_type = distance_loss_type\n\n self.reduction = reduction\n\n self.similarity_metric = similarity_metric\n self.distance_loss_metric = distance_loss_metric\n\n def __call__(self,\n logits: Tensor,\n probs: Tensor,\n targets: Tensor,\n *args,\n indices: Optional[Tensor] = None,\n **kwargs) -> Tensor:\n \"\"\"\n\n Args:\n logits: (batch_size, num_classes) predictions of batch data\n probs: (batch_size, num_classes) softmax probs logits\n targets: (batch_size, num_classes) one-hot labels\n\n Returns: Pair loss value as a Tensor.\n\n \"\"\"\n if indices is None:\n indices = get_pair_indices(targets, ordered_pair=True)\n total_size = len(indices) // 2\n\n i_indices, j_indices = indices[:, 0], indices[:, 1]\n targets_max_prob = targets.max(dim=1).values\n\n return self.compute_loss(logits_j=logits[j_indices],\n probs_j=probs[j_indices],\n targets_i=targets[i_indices],\n targets_j=targets[j_indices],\n targets_i_max_prob=targets_max_prob[i_indices],\n total_size=total_size)\n\n def compute_loss(self,\n logits_j: Tensor,\n probs_j: Tensor,\n targets_i: Tensor,\n targets_j: Tensor,\n targets_i_max_prob: Tensor,\n total_size: int):\n # conf_mask should not track gradient\n ti = sharpen(targets_i, 0.5)\n conf_mask = (ti.max(1)[0] > self.confidence_threshold).detach().float()\n # sime = tosim(targets_i)\n one = toone(targets_i)\n\n similarities: Tensor = self.get_similarity(targets_i=targets_i,\n targets_j=targets_j,\n dim=1)\n # sim_mask should not track gradient\n sim_mask = F.threshold(similarities, self.similarity_threshold, 0).detach()\n\n distance = softmax_cross_entropy_loss(logits_j, one, dim=1, reduction=\"none\")\n loss = conf_mask * sim_mask * distance\n\n if self.reduction == \"mean\":\n loss = torch.sum(loss) / total_size\n elif self.reduction == \"sum\":\n loss = torch.sum(loss)\n\n return loss\n\n def get_similarity(self,\n targets_i: Tensor,\n targets_j: Tensor,\n *args,\n **kwargs) -> Tensor:\n x, y = targets_i, targets_j\n\n return self.similarity_metric(x, y, *args, **kwargs)\n\n def get_distance_loss(self,\n logits: Tensor,\n probs: Tensor,\n targets: Tensor,\n *args,\n **kwargs) -> Tensor:\n if self.distance_loss_type == \"prob\":\n x, y = probs, targets\n else:\n x, y = logits, targets\n\n return self.distance_loss_metric(x, y, *args, **kwargs)\n","repo_name":"GZHU-DVL/DTS-SimL","sub_path":"loss/pair_loss/pair_loss.py","file_name":"pair_loss.py","file_ext":"py","file_size_in_byte":5371,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"}
+{"seq_id":"34633034732","text":"import tkinter as tk\nfrom tkinter.font import Font\nfrom front_end.livre_page.frame_livre import frame_livre\n\nclass gestion_structuration_1:\n def cree_tab_coord_rectangle_arrondie(x1, y1, x2, y2, radius):\n tab = [x1+radius , y1,\n x1+radius , y1,\n x2-radius , y1,\n x2-radius , y1,\n x2 , y1,\n x2 , y1+radius,\n x2 , y1+radius,\n x2 , y2-radius,\n x2 , y2-radius,\n x2 , y2,\n x2 - radius,y2,\n x2 - radius,y2,\n x1 + radius,y2,\n x1 + radius,y2,\n x1,y2,\n x1 , y2 - radius,\n x1 , y2 - radius,\n x1 , y1 + radius,\n x1 , y1 + radius,\n x1,y1] \n return tab \n\n __hauteur_entete= 120\n __hauteur_pied= 80\n __hauteur_minimale_centre= 400\n __hauteur_polygone_pied= 40\n __espace_entre_block= 20\n __espace_avec_bordure_fenetre= 80\n __espace_entre_logo_titre= 50\n __ecart_largeur_dans_pied= 150\n __raduis_polynome_pied= 30\n __largeur_minimale_fenetre= 900\n\n#nous prenons l'entete comme element plus large\n def largeur_minimale_visibilite():\n return gestion_structuration_1.__largeur_minimale_fenetre\n\n#la hauteur minimale de visbilite\n def hauteur_minimale_visiblilite():\n return gestion_structuration_1.__hauteur_entete+ gestion_structuration_1.__hauteur_minimale_centre+ gestion_structuration_1.__hauteur_pied+ 3* gestion_structuration_1.__espace_entre_block \n\n def __init__(self, feuille: tk.Canvas, nom_premiere_page, couleur, police_grand_ecrit, police_petit_ecris, photo, couleur_bordure) :\n self.__feuille= feuille\n self.__largeur_logo= 100\n self.__nom_elt_entete= \"dans_entete\"\n self.__nom_pied_page= \"pied_page\"\n self.__creer_entete(couleur, police_grand_ecrit, police_petit_ecris, photo)\n self.__cree_centre_et_pied(nom_premiere_page, couleur_bordure, police_petit_ecris)\n self.__feuille.configure(scrollregion=(0, 0, gestion_structuration_1.largeur_minimale_visibilite(), gestion_structuration_1.hauteur_minimale_visiblilite()))\n\n def __creer_entete(self, couleur, police_grand_ecrit: Font , police_petit_ecris : Font, photo):\n nom_appli= \"APPLICATION DE GESTION DES PATIENTS\"\n slogan= \"la santé de tous, notre préocupation\"\n self._rectangle_entete= self.__feuille.create_rectangle(0, 0, gestion_structuration_1.__largeur_minimale_fenetre, gestion_structuration_1.__hauteur_entete, fill=couleur, width=0)\n x= (gestion_structuration_1.__largeur_minimale_fenetre)/2 - self.__largeur_logo\n y= gestion_structuration_1.__hauteur_entete/4\n self.__feuille.create_text(x, y, fill= \"white\", text= nom_appli, font= police_grand_ecrit, anchor= \"c\", tags=[self.__nom_elt_entete])\n espace= 2\n y+= police_grand_ecrit.metrics(\"linespace\")+ espace\n self.__feuille.create_text(x, y, fill= \"black\", text= slogan, font= police_petit_ecris, anchor= \"c\", tags=[self.__nom_elt_entete])\n x+= (police_grand_ecrit.measure(nom_appli)+ self.__largeur_logo) /2+ gestion_structuration_1.__espace_entre_logo_titre \n self.__feuille.create_image(x, gestion_structuration_1.__hauteur_entete/2, image= photo, tags=[self.__nom_elt_entete])\n\n def __cree_centre_et_pied(self, nom_premiere_page, couleur_bordure, police_petit_ecris : Font):\n larg= 2\n haut= 2\n x= gestion_structuration_1.__espace_avec_bordure_fenetre\n y= gestion_structuration_1.__hauteur_entete+ gestion_structuration_1.__espace_entre_block\n self.__conteneur_page= frame_livre(self.__feuille, larg, haut, nom_premiere_page, \"white\")\n self.__cadre_centrale= self.__feuille.create_window(x, y, height= larg, width= haut, window= self.__conteneur_page.get_frame_conserveur(), anchor=\"nw\") \n y= y+ haut+ gestion_structuration_1.__espace_entre_block\n self.__creer_pied(x, y, larg, couleur_bordure, police_petit_ecris)\n\n def __creer_pied(self, x, y, larg, couleur_bordure, police_petit_ecris : Font):\n self.__rectangle_pied= self.__feuille.create_rectangle(x, y+ gestion_structuration_1.__hauteur_polygone_pied/2, x+larg, y+gestion_structuration_1.__hauteur_pied, fill=\"white\", tags=[self.__nom_pied_page], width=0) \n x+= gestion_structuration_1.__ecart_largeur_dans_pied\n larg-= 2*gestion_structuration_1.__ecart_largeur_dans_pied\n tab_coord_petit_polinome= gestion_structuration_1.cree_tab_coord_rectangle_arrondie(x, y, x+ larg, y+ gestion_structuration_1.__hauteur_polygone_pied, gestion_structuration_1.__raduis_polynome_pied) \n self.__petit_polynome= self.__feuille.create_polygon(tab_coord_petit_polinome, smooth=True, fill= \"white\", tags= [self.__nom_pied_page], width=1, outline= couleur_bordure)\n mot_en_bas= \"la santé avant tout\"\n self.__ecris_pied= self.__feuille.create_text(x +larg/2, y+ gestion_structuration_1.__hauteur_polygone_pied/2, fill= \"black\", text= mot_en_bas, font= police_petit_ecris, anchor= \"c\", tags=[self.__nom_pied_page])\n\n def accomodation(self, larg, haut):\n self.__accomode_entete(larg)\n self.__accomde_centre_et_pied(larg- 2*gestion_structuration_1.__espace_avec_bordure_fenetre, haut- gestion_structuration_1.__hauteur_entete- gestion_structuration_1.__espace_entre_block)\n\n def __accomde_centre_et_pied(self, larg, haut):\n haut -= gestion_structuration_1.__hauteur_pied + 2*gestion_structuration_1.__espace_entre_block\n hauteur_centre= float(self.__feuille.itemcget(self.__cadre_centrale, \"height\"))\n largeur_centre= float(self.__feuille.itemcget(self.__cadre_centrale, \"width\"))\n larg_minimlae_centre= gestion_structuration_1.__largeur_minimale_fenetre- 2*gestion_structuration_1.__espace_avec_bordure_fenetre\n if(larg larg ):\n larg= gestion_structuration_1.__largeur_minimale_fenetre\n if((tab_coord[2]- tab_coord[0]) != larg):\n tab_coord[2]= tab_coord[0]+ larg\n self.__feuille.coords(self._rectangle_entete, tab_coord)\n tab_coord_contenu= self.__feuille.bbox(self.__nom_elt_entete)\n self.__feuille.move(self.__nom_elt_entete, ((tab_coord[0]+ tab_coord[2])/2- (tab_coord_contenu[2]+ tab_coord_contenu[0])/2), 0)\n \n def get_conteneur_page(self):\n return self.__conteneur_page \n\n","repo_name":"WissalManseri/GestionDesPatients","sub_path":"code_python/front_end/gestionaire_graphisme/gestion_premiere_structuration.py","file_name":"gestion_premiere_structuration.py","file_ext":"py","file_size_in_byte":8180,"program_lang":"python","lang":"fr","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"}
+{"seq_id":"20784400941","text":"\nnumber1 = input(\"First number: \") \n\nnumber2 = input(\"\\nSecond number: \") \n\n \n\n# Adding two numbers \n\n# User might also enter float numbers \n\nsum = float(number1) + float(number2) \nprint(\"The sum of {0} and {1} is {2}\" .format(number1, number2, sum)) \n","repo_name":"Phebedevadattam/Programs","sub_path":"add2num.py","file_name":"add2num.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"34786722674","text":"from typing import List\n\nfrom pydantic import BaseModel, Field\n\nfrom app.apis.schemas.base_schema import EntityID\n\n\nclass ProductBase(EntityID):\n name: str = Field(..., max_length=255)\n calories: float\n fat: float\n carbs: float\n protein: float\n upvotes: int\n downvotes: int\n\n class Config:\n \"\"\"Map Pydantic model with ORM model\"\"\"\n\n orm_mode = True\n allow_population_by_field_name = True\n\n\nclass ProductsList(BaseModel):\n products: List[ProductBase]\n\n\nclass ProductCreate(BaseModel):\n name: str = Field(..., max_length=255)\n calories: float\n fat: float\n carbs: float\n protein: float\n\n class Config:\n \"\"\"Map Pydantic model with ORM model\"\"\"\n\n orm_mode = True\n allow_population_by_field_name = True\n\n\nclass ProductUpdate(BaseModel):\n name: str | None = Field(None, max_length=255)\n calories: float | None\n fat: float | None\n carbs: float | None\n protein: float | None\n upvotes: int | None\n downvotes: int | None\n\n class Config:\n \"\"\"Map Pydantic model with ORM model\"\"\"\n\n orm_mode = True\n allow_population_by_field_name = True\n","repo_name":"hutanmihai/FMI","sub_path":"Anul II/Semestrul II/MDS/ProiectMDS/calorie-tracker-backend/app/apis/schemas/product_schema.py","file_name":"product_schema.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"}
+{"seq_id":"33183248854","text":"import cv2\r\nimport mediapipe as mp\r\nimport time\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom mpl_toolkits import mplot3d\r\nimport matplotlib.animation\r\n\r\n\r\ncap = cv2.VideoCapture(0)\r\n\r\nmpHands = mp.solutions.hands\r\nhands = mpHands.Hands()\r\nmpDraw = mp.solutions.drawing_utils\r\npTime = 0\r\nbrightness = 10\r\n\r\nfig = plt.figure()\r\nax = plt.axes(projection='3d')\r\nax.legend()\r\n\r\ndef animate(i):\r\n success, img = cap.read()\r\n cap.set(10, brightness)\r\n imgRGB = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\r\n results = hands.process(imgRGB)\r\n # print(results.multi_hand_landmarks)\r\n\r\n if results.multi_hand_landmarks:\r\n for handLms in results.multi_hand_landmarks:\r\n x_arr = []\r\n y_arr = []\r\n z_arr = []\r\n for id, lm in enumerate(handLms.landmark):\r\n x_arr.append(lm.x)\r\n y_arr.append(lm.y)\r\n z_arr.append(lm.z)\r\n\r\n h, w, c = img.shape\r\n cx, cy = int(lm.x*w), int(lm.y*h)\r\n # print(\"id:\",id,\" | cx,cy:\", cx, cy)\r\n\r\n color = 'green'\r\n marker = 'o'\r\n linestyle = 'dashed'\r\n linestyle_solid = 'solid'\r\n linewidth = 2\r\n markersize = 12\r\n\r\n plt.cla()\r\n #Thumb\r\n ax.plot3D(x_arr[0:5], y_arr[0:5], z_arr[0:5],\r\n color = 'sandybrown',\r\n linewidth = 3)\r\n\r\n #Index\r\n ax.plot3D(x_arr[5:9], y_arr[5:9], z_arr[5:9],\r\n color = 'aqua',\r\n linewidth = 3)\r\n\r\n #Middle\r\n ax.plot3D(x_arr[9:13], y_arr[9:13], z_arr[9:13],\r\n color = 'gold',\r\n linewidth = 3\r\n )\r\n #Ring\r\n ax.plot3D(x_arr[13:17], y_arr[13:17], z_arr[13:17],\r\n color = 'lightcoral',\r\n linewidth = 3\r\n )\r\n #Pinky\r\n ax.plot3D(x_arr[17:21], y_arr[17:21], z_arr[17:21] ,\r\n color = 'lightskyblue',\r\n linewidth = 3\r\n )\r\n\r\n ax.plot3D([x_arr[0],x_arr[17]], [y_arr[0],y_arr[17]], [z_arr[0],z_arr[17]] ,\r\n color = 'darkseagreen',\r\n linewidth = 3\r\n )\r\n ax.plot3D([x_arr[0], x_arr[5]], [y_arr[0], y_arr[5]], [z_arr[0], z_arr[5]] ,\r\n color = 'darkseagreen',\r\n linewidth = 3\r\n )\r\n ax.plot3D([x_arr[2], x_arr[5]], [y_arr[2], y_arr[5]], [z_arr[2], z_arr[5]],\r\n color = 'darkseagreen',\r\n linestyle = 'dashed',\r\n linewidth = 3\r\n )\r\n ax.plot3D(x_arr[5:18:4], y_arr[5:18:4], z_arr[5:18:4],\r\n color = 'darkseagreen',\r\n linestyle = 'dashed',\r\n linewidth = 3\r\n )\r\n\r\n ax.scatter3D(x_arr, y_arr, z_arr, 'ro')\r\n\r\n ax.set_xlabel('X')\r\n ax.set_ylabel('Y')\r\n ax.set_zlabel('Z')\r\n\r\n print(\"_________________\")\r\n mpDraw.draw_landmarks(img, handLms, mpHands.HAND_CONNECTIONS)\r\n\r\n cv2.imshow('Webcam', img)\r\n\r\nani = matplotlib.animation.FuncAnimation(fig, animate, frames=2, interval=100, repeat=True)\r\n\r\nplt.show()\r\n","repo_name":"D-2000-99/OpenCV","sub_path":"Hand_detect/3D_Hand_plot.py","file_name":"3D_Hand_plot.py","file_ext":"py","file_size_in_byte":3459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"74727546003","text":"#!/usr/bin/python3\n\"\"\"Test base class.\"\"\"\n\nimport unittest\nfrom models.base import Base\nfrom models.rectangle import Rectangle\nfrom models.square import Square\nimport json\n\n\nclass TestBaseClass(unittest.TestCase):\n \"\"\"Testing base class.\"\"\"\n\n def test_1(self):\n \"\"\"Test normal case.\"\"\"\n a = Base()\n b = Base()\n c = Base()\n self.assertEqual(a.id, 1)\n self.assertEqual(b.id, 2)\n self.assertEqual(c.id, 3)\n\n def test_2(self):\n \"\"\"Change id.\"\"\"\n a = Base()\n b = Base()\n c = Base(15)\n d = Base()\n self.assertEqual(a.id, 4)\n self.assertEqual(b.id, 5)\n self.assertEqual(c.id, 15)\n self.assertEqual(d.id, 6)\n\n def test_3(self):\n \"\"\"Change id.\"\"\"\n a = Base()\n b = Base()\n c = Base(1)\n d = Base()\n e = Base(3)\n self.assertEqual(a.id, 7)\n self.assertEqual(b.id, 8)\n self.assertEqual(c.id, 1)\n self.assertEqual(d.id, 9)\n self.assertEqual(e.id, 3)\n\n def test_4(self):\n \"\"\"Negative ids.\"\"\"\n a = Base(-4)\n b = Base(-5)\n self.assertEqual(a.id, -4)\n self.assertEqual(b.id, -5)\n\n def test_4_1(self):\n \"\"\"Type_error test.\"\"\"\n with self.assertRaises(TypeError):\n a = Base(\"5\")\n\n def test_5(self):\n \"\"\"To_json_string method.\"\"\"\n r1 = Rectangle(10, 7, 2, 8)\n dictionary = r1.to_dictionary()\n json_dictionary = Base.to_json_string([dictionary])\n self.assertEqual(\n dictionary, {'x': 2, 'y': 8, 'id': 10, 'height': 7, 'width': 10})\n self.assertTrue(type(dictionary) is dict)\n self.assertEqual(\n json_dictionary,\n '[{\"x\": 2, \"y\": 8, \"id\": 10, \"height\": 7, \"width\": 10}]')\n self.assertTrue(type(json_dictionary) is str)\n\n def test_6(self):\n \"\"\"Save_to_file method.\"\"\"\n r1 = Rectangle(10, 7, 2, 8)\n r2 = Rectangle(2, 4)\n Rectangle.save_to_file([r1, r2])\n with open(\"Rectangle.json\", \"r\") as file:\n a = [\n {\"x\": 2, \"y\": 8, \"id\": 11, \"height\": 7, \"width\": 10},\n {\"x\": 0, \"y\": 0, \"id\": 12, \"height\": 4, \"width\": 2}\n ]\n self.assertEqual(a, json.loads(file.read()))\n\n r1 = Square(10, 2, 8, id=11)\n r2 = Square(2, id=12)\n Square.save_to_file([r1, r2])\n with open(\"Square.json\", \"r\") as file:\n a = [\n {\"x\": 2, \"y\": 8, \"id\": 11, \"size\": 10},\n {\"x\": 0, \"y\": 0, \"id\": 12, \"size\": 2}\n ]\n self.assertEqual(a, json.loads(file.read()))\n\n def test_6_1(self):\n \"\"\"Passing None end empty list to save_to_file method.\"\"\"\n Rectangle.save_to_file(None)\n with open(\"Rectangle.json\", \"r\") as file:\n self.assertEqual([], json.loads(file.read()))\n\n Rectangle.save_to_file([])\n with open(\"Rectangle.json\", \"r\") as file:\n self.assertEqual([], json.loads(file.read()))\n\n Square.save_to_file(None)\n with open(\"Square.json\", \"r\") as file:\n self.assertEqual([], json.loads(file.read()))\n\n Square.save_to_file([])\n with open(\"Square.json\", \"r\") as file:\n self.assertEqual([], json.loads(file.read()))\n\n def test_7(self):\n \"\"\"From_json_string method.\"\"\"\n list_input = [\n {'id': 89, 'width': 10, 'height': 4},\n {'id': 7, 'width': 1, 'height': 7}\n ]\n json_list_input = Rectangle.to_json_string(list_input)\n list_output = Rectangle.from_json_string(json_list_input)\n self.assertTrue(type(list_input) is list)\n self.assertEqual(\n list_input, [\n {'id': 89, 'width': 10, 'height': 4},\n {'id': 7, 'width': 1, 'height': 7}])\n self.assertTrue(type(json_list_input) is str)\n self.assertEqual(\n json.loads(json_list_input),\n [\n {\"id\": 89, \"width\": 10, \"height\": 4},\n {\"id\": 7, \"width\": 1, \"height\": 7}\n ]\n )\n self.assertTrue(type(list_output) is list)\n self.assertEqual(\n list_output,\n [\n {'id': 89, 'width': 10, 'height': 4},\n {'id': 7, 'width': 1, 'height': 7}\n ]\n )\n\n def test_8(self):\n \"\"\"Create method.\"\"\"\n r1 = Rectangle(3, 5, 1)\n r1_dictionary = r1.to_dictionary()\n r2 = Rectangle.create(**r1_dictionary)\n self.assertEqual(f\"{r1}\", \"[Rectangle] (13) 1/0 - 3/5\")\n self.assertEqual(f\"{r2}\", \"[Rectangle] (13) 1/0 - 3/5\")\n self.assertFalse(r1 is r2)\n self.assertFalse(r1 == r2)\n\n def test_9(self):\n \"\"\"Load_from_file method.\"\"\"\n r1 = Rectangle(10, 7, 2, 8)\n r2 = Rectangle(2, 4)\n list_rectangles_input = [r1, r2]\n Rectangle.save_to_file(list_rectangles_input)\n list_rectangles_output = Rectangle.load_from_file()\n self.assertEqual(\n f\"{list_rectangles_input[0]}\", \"[Rectangle] (15) 2/8 - 10/7\")\n self.assertEqual(\n f\"{list_rectangles_input[1]}\", \"[Rectangle] (16) 0/0 - 2/4\")\n self.assertEqual(\n f\"{list_rectangles_output[0]}\", \"[Rectangle] (15) 2/8 - 10/7\")\n self.assertEqual(\n f\"{list_rectangles_output[1]}\", \"[Rectangle] (16) 0/0 - 2/4\")\n s1 = Square(5)\n s2 = Square(7, 9, 1)\n list_squares_input = [s1, s2]\n Square.save_to_file(list_squares_input)\n list_squares_output = Square.load_from_file()\n self.assertEqual(f\"{list_squares_input[0]}\", \"[Square] (19) 0/0 - 5\")\n self.assertEqual(f\"{list_squares_input[1]}\", \"[Square] (20) 9/1 - 7\")\n self.assertEqual(f\"{list_squares_output[0]}\", \"[Square] (19) 0/0 - 5\")\n self.assertEqual(f\"{list_squares_output[1]}\", \"[Square] (20) 9/1 - 7\")\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"mmubarak0/alx-higher_level_programming","sub_path":"0x0C-python-almost_a_circle/tests/test_models/test_base.py","file_name":"test_base.py","file_ext":"py","file_size_in_byte":5974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"71485304724","text":"import torch\nimport torch.multiprocessing as mp\nimport os\nimport torch.distributed as dist\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.nn.parallel import DistributedDataParallel as DDP\n\ndef setup(rank,world_size):\n os.environ['MASTER_ADDR']='localhost'\n os.environ['MASTER_PORT']='12355'\n dist.init_process_group(\"gloo\",rank=rank,world_size=world_size)\n\nclass ToyModel(nn.Module):\n def __init__(self) -> None:\n super().__init__()\n self.net1=nn.Linear(10,10)\n self.relu=nn.ReLU()\n self.net2=nn.Linear(10,5)\n\n def forward(self,x):\n return self.net2(self.relu(self.net1(x)))\n\ndef clean_up():\n dist.destroy_process_group()\n\n\ndef basic(rank,world_size):\n\n print(f'running basic ddp in rank:{rank}')\n setup(rank,world_size)\n model=ToyModel().to(rank)\n\n ddp_model=DDP(model,device_ids=[rank])\n\n loss_fn=nn.MSELoss()\n optimizer=optim.SGD(ddp_model.parameters(),lr=0.001)\n dist.barrier()\n\n optimizer.zero_grad()\n ouput=ddp_model(torch.rand(20,10))\n labels=torch.rand(20,5).to(rank)\n loss_fn(ouput,labels).backward()\n optimizer.step()\n\n dist.barrier()\n\n clean_up()\n\ndef run_demo(demo_fn,world_size):\n\n # mp.spawn enable share memory\n mp.spawn(demo_fn,\n args=(world_size,),\n nprocs=world_size,\n join=True)\n\nif __name__ == \"__main__\":\n run_demo(basic,4)","repo_name":"Chenqll/example","sub_path":"torch_ddp/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"4003159045","text":"from datetime import datetime, time\nfrom typing import Annotated\n\nfrom fastapi import Query\n\n# Geospatial types\nLongitudeCoordinate = Annotated[float, Query(ge=-180.0, le=180.0)]\nLatitudeCoordinate = Annotated[float, Query(ge=-90.0, le=90.0)]\n\nLongLat = tuple[LongitudeCoordinate, LatitudeCoordinate]\nBoundingBox = tuple[LongLat, LongLat]\nPolygon = list[LongLat]\n\n\ndef to_4_corners(b: BoundingBox) -> list[list[float]]:\n lleft, lright, uleft, uright = [b[0], [b[0][1], b[1][0]], [b[1][0], b[0][1]], b[1]]\n return [lleft, lright, uleft, uright]\n\n\n# Time types\nTime = time\nDatetime = datetime\n\n# A type for time pairs like 17:00 - 19:30\nTimeSlotFlexible = tuple[Time, Time]\n\n# A type for time pairs like 2023-03-31T12:30:00 - 2023-03-31T14:30:00\nTimeSlotFixed = tuple[Datetime, Datetime]\n\n\n# Trust scores\nTrustScore = int\nUserTrustScore = TrustScore\nLocationTrustScore = TrustScore\n","repo_name":"ActivityRadar/backend","sub_path":"backend/util/types.py","file_name":"types.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"}
+{"seq_id":"36265803243","text":"import random\n\nNUM_OF_ROUNDS = 2\nNUM_OF_ATTEMPTS = 5\nresults = []\n\nfor q in range(NUM_OF_ROUNDS):\n number = random.randint(0, 10)\n for i in range(NUM_OF_ATTEMPTS):\n while True:\n try:\n guess = int(input(f\"Guess the number between 0--10 (attempt {i+1}): \"))\n except:\n print(\"Sorry, that didn't work.\")\n continue\n if 0 <= guess and guess <= 10:\n break\n else:\n print(\"You should guess between 0 and 10.\")\n\n if guess == number:\n break\n\n if guess == number:\n print(\"Congrats, next round!\")\n results.append(NUM_OF_ATTEMPTS-i)\n else:\n print(f\"You didn't make it, the correct number was {number}.\")\n results.append(0)\n\nscore = sum(results)/NUM_OF_ROUNDS\nprint(f\"Your average was {score:.2f}.\")\n\n","repo_name":"dbosk/intropy","sub_path":"modules/containers/tutorial/guess.py","file_name":"guess.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"30"}
+{"seq_id":"74345335124","text":"from ghidra.program.util import DefinedDataIterator\nfrom ghidra.app.util import XReferenceUtil\n\nfuncName = 'onCreate_android.os.Bundle_void'\nfunc = getGlobalFunctions(funcName)[0]\n\n#fm = currentProgram.getFunctionManager()\n#funcs = fm.getFunctions(True)\n#for func in funcs:\n\n\nfor string in DefinedDataIterator.definedStrings(currentProgram):\n for ref in XReferenceUtil.getXRefList(string):\n print(string, ref)","repo_name":"alelibe/Graph-based-Malware-Detection","sub_path":"Archivio ghidra/printRefs.py","file_name":"printRefs.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"30"}
+{"seq_id":"45826668577","text":"import pandas as pd\n\n\nclass WordData:\n \"\"\"\n Class to init and store the word data.\n\n Reads the csv file with training data, stores the data and prepares the data for further processing.\n \"\"\"\n\n def __init__(self, filename, nlp):\n \"\"\"\n :param filename: String\n Path to .csv file\n\n :param raw_data: List\n List of dicts containing \"patterns\", \"responses\" and \"tags\".\n\n :param all_words: List\n List of all unique pattern words\n\n :param tags: List\n List of all unique tags\n\n :param xy: tuple ([words], tag)\n Tuples containing ([words], tag) for each pattern\n \"\"\"\n\n self.nlp = nlp\n self.csv = pd.read_csv(filename)\n self.raw_data = self.convert(self.csv)\n self.all_words, self.tags, self.xy = self.init_words()\n\n\n def details(self):\n \"\"\"\n Prints details of the data to the console.\n\n :return: None\n \"\"\"\n print(\"Dataset:\")\n print(\" unique words:\", len(self.all_words))\n print(\" samples:\", len(self.xy))\n print(\" tags:\", len(self.tags))\n print()\n\n def show(self):\n \"\"\"\n Prints the data to the console.\n\n :return: None\n \"\"\"\n for dic in self.raw_data:\n print(\"tag:\", dic['tag'])\n print(\"patterns:\")\n for pat in dic['patterns']:\n print(\" \", pat)\n print(\"response:\", dic['response'])\n print()\n\n def convert(self, csv):\n \"\"\"\n Converts a pandas file to a structured dictionary.\n\n :param csv: pandas dataframe\n trainingdata.csv file\n :return: list of dicts containing \"patterns\", \"responses\" and \"tags\"\n \"\"\"\n\n raw_data = []\n seen_tags = []\n for i in range(len(csv)):\n tag = csv['Tag'][i]\n pattern = csv['Input'][i]\n output = csv['Output'][i]\n\n if tag not in seen_tags:\n seen_tags.append(tag)\n dic = {\n 'tag': tag,\n 'patterns': [pattern],\n 'response': output}\n raw_data.append(dic)\n else:\n for dic in raw_data:\n if dic['tag'] == tag:\n dic['patterns'].append(pattern)\n return raw_data\n\n\n def init_words(self):\n \"\"\"\n Initializes the word data.\n\n Reads the raw data, tokenizes and stems the words using the NLP class.\n It then forms ngrams of size n_gram_size (initialized in main.py).\n\n :return:\n all_words: list of all unique pattern words\n tags: list of all unique tags\n xy: tuples containing ([words], tag) for each pattern\n \"\"\"\n\n all_words = []\n tags = []\n xy = []\n\n for i, chat in enumerate(self.raw_data):\n tag = chat['tag']\n tags.append(tag)\n\n for pattern in chat['patterns']:\n # tokenize, filter and stem the sentences\n words = self.nlp.tokenize(pattern)\n words = [self.nlp.stem(w) for w in words]\n\n ngrams = self.nlp.to_ngrams(words)\n all_words.extend(ngrams)\n\n # spell corrects to counteract stemming mistakes\n words_corrected = [self.nlp.spell.correction(w) for w in words]\n ngrams_corrected = self.nlp.to_ngrams(words_corrected)\n\n xy.append((ngrams_corrected, tag))\n\n print(\"Words from tag\", i+1, \"processed\")\n print()\n all_words = sorted(set(all_words))\n tags = sorted(set(tags))\n return all_words, tags, xy\n","repo_name":"RoelHeijden/RU-MSDT-chatbot-project","sub_path":"back-end/Chatbot/DataProcessing/wordData.py","file_name":"wordData.py","file_ext":"py","file_size_in_byte":3741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"36726438929","text":"# https://colab.research.google.com/drive/1V-F3erKkPun-vNn28BoOc6ENKmfo8kDh?usp=sharing#scrollTo=PlAqR7PJmvTL\r\nfrom csv import writer\r\nfrom http import server\r\nimport cv2,time,os,numpy as np\r\nfrom classes.detection_services.detection_service import IDetectionService\r\nfrom utils_lib.utils_functions import runcmd\r\nimport torch\r\nclass Yolov5DetectionService(IDetectionService):\r\n\r\n np.random.seed(123)\r\n model=None\r\n default_model_input_size=640\r\n\r\n def clean_memory(self):\r\n print(\"CALL DESTRUCTER FROM Yolov5DetectionService\")\r\n if self.model:\r\n del self.model\r\n # tf.keras.backend.clear_session()\r\n # del self\r\n \r\n def __init__(self):\r\n self.classFile =\"coco.names\" \r\n self.modelName=None\r\n # self.cacheDir=None\r\n self.classesList=None\r\n self.detection_method_list = [ \r\n {'name': 'yolov5n' },\r\n # {'name': 'yolov5n6' },\r\n {'name': 'yolov5s' },\r\n # {'name': 'yolov5s6' },\r\n {'name': 'yolov5m' },\r\n {'name': 'yolov5l' },\r\n {'name': 'yolov5x' } \r\n ]\r\n self.init_object_detection_models_list()\r\n \r\n def service_name(self):\r\n return \"torch hub YOLOV5 detection service V 1.0\"\r\n\r\n def load_model(self,model=None):\r\n self.selected_model = next(m for m in self.detection_method_list_with_url if m[\"name\"] == model)\r\n self.modelName= self.selected_model['name']\r\n self.model = torch.hub.load('ultralytics/yolov5', self.modelName ) \r\n self.readClasses()\r\n \r\n def init_object_detection_models_list(self):\r\n self.detection_method_list_with_url=self.detection_method_list\r\n\r\n def get_object_detection_models(self):\r\n return self.detection_method_list \r\n \r\n def detect_objects(self, frame,boxes_plotting=True ):\r\n start_time = time.perf_counter()\r\n if self.network_input_size!=None and self.network_input_size != self.default_model_input_size:\r\n self.default_model_input_size=self.network_input_size\r\n print(\"UPDATE YOLO V5 NETWORK INPUT SIZE ... \"+str(self.default_model_input_size))\r\n # frame=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n labels, cord , inference_time = self.score_frame(frame)\r\n if boxes_plotting:\r\n frame , _ = self.plot_boxes((labels, cord ), frame,threshold=self.threshold,nms_threshold=self.nms_threshold,boxes_plotting=True)\r\n fps = 1 / np.round(time.perf_counter()-start_time,3)\r\n self.addFrameFps(frame,fps)\r\n return frame,inference_time\r\n else:\r\n return self.plot_boxes((labels, cord ), frame,threshold=self.threshold,nms_threshold=self.nms_threshold,boxes_plotting=False)\r\n \r\n def score_frame(self, frame):\r\n # self.model.to(self.device)\r\n frame = [frame] \r\n start_time = time.perf_counter()\r\n results = self.model(frame,size=self.default_model_input_size)\r\n end_time = time.perf_counter()\r\n labels, cord = results.xyxyn[0][:, -1], results.xyxyn[0][:, :-1]\r\n return labels, cord , np.round(end_time - start_time, 4)\r\n\r\n def plot_boxes(self, results, frame,threshold,nms_threshold,boxes_plotting=True):\r\n boxes=[]\r\n confidences=[]\r\n classes_ids=[]\r\n\r\n labels, cord = results\r\n n = len(labels)\r\n x_shape, y_shape = frame.shape[1], frame.shape[0]\r\n for i in range(n):\r\n row = cord[i]\r\n if float(row[4]) >= threshold:\r\n confidences.append(float(row[4]))\r\n classes_ids.append(int(labels[i]))\r\n x1, y1, x2, y2 = int(row[0]*x_shape), int(row[1]*y_shape), int(row[2]*x_shape), int(row[3]*y_shape)\r\n bgr = (0, 255, 0)\r\n box = np.array([x1,y1,x2, y2])\r\n boxes.append(box) \r\n \r\n indices = cv2.dnn.NMSBoxes(boxes,confidences,score_threshold=threshold,nms_threshold=nms_threshold)\r\n\r\n raw_detection_data=[]\r\n\r\n for i in indices:\r\n x1, y1, x2, y2 = boxes[i]\r\n label = self.classesList[classes_ids[i]]\r\n color = self.colors_list[classes_ids[i]]\r\n conf = confidences[i]\r\n if (boxes_plotting): \r\n displayText = '{}: {:.2f}'.format(label, conf) \r\n cv2.rectangle(frame,(x1,y1),(x2,y2),color=color,thickness=2)\r\n cv2.putText(frame, displayText, (x1,y1-2),cv2.FONT_HERSHEY_PLAIN, 1.5,color,2)\r\n else:\r\n raw_detection_data.append(([x1, y1, x2-x1, y2-y1],conf,label))\r\n\r\n return frame,raw_detection_data\r\n \r\n\r\n ","repo_name":"ayoubalami/tracking_project__py_src","sub_path":"classes/detection_services/yolov5_detection_service.py","file_name":"yolov5_detection_service.py","file_ext":"py","file_size_in_byte":4837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"42204473292","text":"import gensim\nimport nltk\nimport pandas\nimport pickle\nimport os\nimport collections\nimport numpy\n\nimport recommender\nimport doc2vec\nimport lsa\n#- UI\n# - Like previous assignments\n# - Ask if indexing (LSA (3 versions???), doc2vec, maybe repr for recommender part)\n# - should_index() + index()\n# - Loop which of [below + quit] should do\n#- keyword search\n# - Title search\n# - Like word embedding assignment\n# - Title search in genre\n# - Like prev, except filter by genre?\n# - Before, after?\n#- Recommender\n# - Get ratings based on sentiments of reviews (NOT reviews in ratings.csv)\n# - Rank all unwatched movies with $r_ij = \\sum(k, sim(u_i, u_k) * (r_kj - \\avg(r_k))) / ratings of j$,\n# where r is ratings[uid][movie] and u is users[uid]\n# - Then print reverse-sorted list of unwatched movies by ratings\n\n## TODO: work out index format\n\nINDEX = 'index.pkl'\n\ndef main():\n index = get_index()\n quit = False\n while not quit:\n quit = dispatch(index)\n\ndef should_index():\n \"\"\" Asks the user if the index should be generated, with confirmation for unusual cases \"\"\"\n if ask(\"Generate the index? \"):\n return not (os.path.exists(INDEX) and not ask(\"Index exists. Regenerate the index? \"))\n else:\n return not os.path.exists(INDEX) and ask(\"Index does not exist. Generate the index? \")\n\ndef get_index():\n if should_index():\n return index()\n else:\n return read_index()\n\n## Call all your indexing functions in here.\ndef index():\n \"\"\" Function to do all the indexing, returns the index after writing it \"\"\"\n index = {}\n movies = pandas.read_csv('movies.csv')\n moviesDF = pandas.read_csv('moviesDF.csv')\n index['movies'] = movies\n print('Making doc2vec index')\n index['doc2vec'] = doc2vec.index(movies['title'][:100])\n print('Making recommender index')\n index['recommender'] = recommender.index(moviesDF[:100])\n print('Making LSA index')\n index['lsa'] = lsa.index(movies['title'][:100], 0.3)\n pickle.dump(index, open(INDEX, 'wb'))\n return index\n\ndef read_index():\n return pickle.load(open(INDEX, 'rb'))\n\ndef dispatch(index):\n PROMPT = \"[Search] by title, search by title in [genre], [recommend] a movie, or [quit]? \"\n inp = input(PROMPT)\n inp = inp.strip().lower()\n if inp == '':\n print('Respond with a prefix of one of search, genre, recommend, or quit')\n return False\n if 'search'.startswith(inp):\n search(index)\n return False\n elif 'genre'.startswith(inp):\n ## Do a genre search\n return False\n elif 'recommend'.startswith(inp):\n recommend(index)\n return False\n elif 'quit'.startswith(inp):\n # Tell the outer loop to finish\n return True\n else:\n print('Respond with a prefix of one of search, genre, recommend, or quit')\n return False\n\ndef search(index):\n query = input(\"Searching for: \")\n i = index['doc2vec']\n for result in doc2vec.query(query, i):\n print(index['movies']['movieId'][result], index['movies']['title'][result])\n \ndef recommend(index):\n query = int(input(\"User ID (number): \"))\n i = index['recommender']\n for result in recommender.query(query, i):\n if numpy.isnan(result):\n break\n print(index['movies']['movieId'][result], index['movies']['title'][result])\n\ndef ask(prompt):\n res = input(prompt)\n if res.lower() in ['y', 'yes']:\n return True\n return False\n\nif __name__ == '__main__': # So the program only starts if it's not getting imported\n main()\n","repo_name":"liate7/cs398c-final-project","sub_path":"project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":3587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"71612577684","text":"from datetime import datetime\n\n\ndef mounth_(date):\n mon = {'январ': 'jan', 'феврал': 'feb', 'март': 'mar', 'апрел': 'apr', 'ма': 'may',\n 'июн': 'jun', 'июл': 'jul', 'август': 'aug', 'сентябр': 'sep', 'октябр': 'oct', 'ноябр': 'nov',\n 'декабр': 'dec'}\n date.lower()\n date_ = date.split()\n if date_[0] == 'сегодня':\n da = 0\n elif date_[0] == 'вчера':\n da = 1\n else:\n day = date_[0]\n date_ = date_[1][:-1]\n m = mon[date_]\n datee = day+' '+m+' '+str(datetime.today().year)\n d = datetime.strptime(datee, '%d %b %Y')\n da = int((datetime.now() - d).days)\n return da\n\n\n\n\n# date = '23 Июля'\n# x = mounth_(date)\n# print(x)","repo_name":"paldess/avito-scrapy","sub_path":"date_mounth.py","file_name":"date_mounth.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"41898025639","text":"# pylint: disable=import-error\n\nimport string\nimport re\nimport numpy as np\nimport torch\n\nNEGATION_WORDS = {'not', 'no', 'didn', 'didnt', 'wont',\n 'dont', 'don', 'doesnt', 'doesn', 'shouldnt', 'shouldn'}\n\nSTOP_WORDS = {'also', 'ltd', 'once', 'll', 'make', 'he', 'through', 'all', 'top', 'from', 'or', 's',\n 'hereby', 'so', 'yours', 'since', 'meanwhile', 're', 'over', 'mrs', 'thereafter',\n 'ca', 'move', 'mill', 'such', 'wherever', 'on', 'besides', 'few', 'does', 'yet', 'y',\n 'much', 'my', 'him', 'yourselves', 'as', 'ours', 'therefore', 'amongst', 'due', 'mr',\n 'here', 'may', 'onto', 'it', 'whose', 'himself', 'least', 'i', 'what', 'many', 'd',\n 'hereafter', 'anything', 'of', 'whoever', 'made', 'be', 'sometimes', 'put', 'found',\n 'than', 'although', 'anyway', 'seems', 'you', 'under', 'above', 'themselves', 'thus',\n 'a', 'con', 'when', 'why', 'back', 'until', 'first', 'theirs', 'describe', 'because',\n 'always', 'too', 'across', 't', 'anyhow', 'her', 'ourselves', 'latterly', 'six', 'an',\n 'somewhere', 'else', 'for', 'really', 'up', 'among', 'used', 'whenever', 'during',\n 'nowhere', 'nothing', 'if', 'afterwards', 'that', 'whereas', 'elsewhere', 'along',\n 'been', 'both', 'etc', 'ie', 'might', 'into', 'inc', 'with', 'formerly', 'there',\n 'will', 'own', 'seemed', 'though', 'was', 'whereupon', 'just', 'except', 'has',\n 'your', 'do', 'around', 'herein', 'anywhere', 'rd', 'now', 'sincere', 'this', 'me',\n 'throughout', 'unless', 'against', 'out', 'most', 'various', 'others', 'them', 'th',\n 'eleven', 'am', 'indeed', 'name', 'his', 'often', 'yourself', 'only', 'kg', 'take',\n 'everything', 'cry', 'and', 'quite', 'itself', 'in', 'to', 'well', 'namely', 'thru',\n 'see', 'would', 'which', 'beforehand', 'myself', 'having', 'however', 'go', 'did',\n 'below', 'those', 'st', 'computer', 'several', 'whether', 'have', 'between', 'any',\n 'becoming', 'thereby', 'while', 'were', 'whole', 'latter', 'but', 'km', 'amount',\n 'either', 'herself', 'whereafter', 'never', 'system', 'un', 'find', 'please', 'o',\n 'hereupon', 'thin', 'give', 'third', 'every', 'doing', 'our', 'towards', 'another',\n 'before', 'within', 'mine', 'almost', 'mostly', 'down', 'de', 'seeming', 'moreover',\n 'some', 'us', 'former', 'call', 'should', 'she', 'even', 'beyond', 'became', 'other',\n 'show', 'eg', 'about', 'side', 'its', 'these', 'rather', 'alone', 'nd', 'after',\n 'already', 'keep', 'more', 'behind', 'thick', 'together', 'upon', 'interest', 'dr',\n 'otherwise', 'full', 'can', 'next', 'last', 'bill', 'their', 'hers', 'hence', 'by',\n 'become', 'something', 'who', 'further', 'someone', 'must', 'say', 'each', 'very',\n 'whom', 'again', 'then', 'we', 'same', 'via', 'where', 'per', 'are', 'the', 'still',\n 'toward', 'anyone', 'therein', 'being', 'off', 'perhaps', 'is', 'had', 'co', 'at',\n 'done', 'everywhere', 'less', 'wherein', 'could', 'ma', 'sometime', 'seem', 'somehow',\n 'beside', 'whatever', 'whereby', 'ever', 'everyone', 'nevertheless', 'serious',\n 'using', 'becomes', 'enough', 'how', 'bottom', 've', 'regarding', 'm', 'they', 'part',\n 'front', 'fill', 'get', 'nobody', 'detail'}\n\nurl_rx = re.compile(r\"http\\S+|www\\S+|@\\w+|#\\w+\")\nhtml_rx = re.compile(r'<.*?>')\nmulti_dot_rx = re.compile(r'\\.{2,}')\nesc_rx = re.compile(r'\\\\[ntr]')\n\nalpha_table = str.maketrans({char: ' ' if char not in (\n '?', '!', '.') and not char.isalpha() else char for char in string.punctuation + string.digits})\n\n\ndef get_average_word_embeddings(model, docs):\n \"\"\"\n Calcualte average word embeddings for list of docs using word vector model.\n\n Args:\n model (class): Word vector model\n docs (array-like): List of docs of tokens.\n\n Returns:\n ndarray: Average word embeddings for the input documents.\n \"\"\"\n filtered_tokens = [\n [token for token in doc if token in model.wv.key_to_index]\n for doc in docs]\n\n doc_embeddings = np.zeros(\n (len(filtered_tokens), model.vector_size), dtype=np.float32)\n\n for idx, tokens in enumerate(filtered_tokens):\n if tokens:\n doc_embeddings[idx] = np.mean(model.wv[tokens], axis=0)\n\n return doc_embeddings\n\n\ndef load_torch_model(model, path, train=False):\n \"\"\"\n Load PyTorch model to GPU for inference.\n\n Args:\n model (nn.Module): Model class to load state dict.\n path (str): Model path.\n\n Returns:\n Torch.nn.Module: Loaded PyTorch model\n \"\"\"\n checkpoint = torch.load(path)\n\n model.load_state_dict(checkpoint['model_state_dict'])\n\n if train:\n return model, checkpoint['optimizer_state_dict'], checkpoint['scheduler_state_dict']\n\n model.eval()\n return model\n\n\ndef train_word_vector_algo(model, texts, path, update=True, save=True, epochs=30, min_count=5):\n \"\"\"\n Train word vector algorithm and save it locally.\n\n Args:\n model (class): Intialized instance of word vector model. (Either Word2Vec or FastText).\n texts (list): List of tokens from documents.\n path (str): Path to save trained model.\n update (bool, optional): Flag indicating whether to update pretrained model.\n Defaults to True.\n \"\"\"\n model.build_vocab(texts, update=update, min_count=min_count)\n model.train(texts,\n total_examples=model.corpus_count,\n epochs=epochs)\n if save:\n model.save(path)\n\n\ndef tokenize(text):\n \"\"\"\n Clean and tokenize text for processing.\n\n Args:\n text (str): Text/Review to be tokenized.\n\n Returns:\n list: List of cleaned tokens generated from text.\n \"\"\"\n text = url_rx.sub(' ', text).lower()\n text = html_rx.sub(' ', text)\n text = esc_rx.sub(' ', text)\n\n # Replace anything other than alphabets -- ?, !, . will be sentence stoppers -- needed for\n # sentence tokenization.\n text = multi_dot_rx.sub('.', text)\n text = text.translate(alpha_table)\n text = text.replace('.', ' . ').replace('!', ' ! ').replace('?', ' ? ')\n tokens = text.split()\n\n tokens = [token\n for token in tokens if token not in STOP_WORDS]\n\n for i, token in enumerate(tokens[:-1]):\n if token in NEGATION_WORDS:\n tokens[i:i+2] = ['negation_' + tokens[i+1], '']\n\n # return ' '.join([token for token in tokens if len(token) > 1])\n return tokens\n\n\ndef clean_for_wv(doc):\n \"\"\"\n Clean unneccesary/meaningless tokens from generated tokens.\n\n Args:\n doc (list): List of tokens from documents.\n\n Returns:\n list: List of filtered tokens for documents.\n \"\"\"\n return [[token for token in tokens if len(token) > 1] for tokens in doc]\n\n\ndef downsampling(label, text):\n \"\"\"\n Downsample majority class in binary classification to balance class.\n\n Args:\n label (list): List of labels.\n text (list): List of documents.\n\n Returns:\n tuple: Downsampled labels and documents.\n \"\"\"\n pos_idx = [idx for idx, x in enumerate(label) if x == 1]\n neg_idx = [idx for idx, x in enumerate(label) if x == 0]\n\n # no need to shuflle since it will be shuffled in train_test_split.\n if len(pos_idx) < len(neg_idx):\n downsampled_idx = pos_idx + neg_idx[:len(pos_idx)]\n else:\n downsampled_idx = neg_idx + pos_idx[:len(neg_idx)]\n\n return [label[i] for i in downsampled_idx], [text[i] for i in downsampled_idx]\n","repo_name":"intellistream/SentiStream","sub_path":"SentiStream/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7736,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"30"}
+{"seq_id":"42442035264","text":"from animal import Animal\r\nclass Perro:\r\n def __init__ (self):\r\n self.propietario=\"\"\r\n self.fecha_vacuncion=\"\"\r\n self.animal=Animal()\r\n\r\n def emitir_sonido(self):\r\n if self.edad<=3:\r\n print(\"auf auf\")\r\n\r\n if self.edad>3:\r\n print(\"Guau Guau\")\r\n\r\n \r\n\r\n ","repo_name":"Santiago093/Granja","sub_path":"perro.py","file_name":"perro.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"10080869732","text":"def computegrade(grade): \n try:\n score=float(grade)\n if score<0.1 or score>1.0:\n print(\"Bad Score\")\n except:\n print(\"Bad Score\")\n exit(0)\n if 1.0>=score>=0.9:\n print(\"A\")\n elif 0.8<=score<0.9:\n print(\"B\")\n elif 0.7<=score<0.8:\n print(\"C\")\n elif 0.6<=score<0.7:\n print(\"D\")\n elif score<0.6:\n print(\"No Grade\")\n\ncomputegrade(1.1)\n","repo_name":"ankursingh2111/Python_Project","sub_path":"grade.py","file_name":"grade.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"24183985664","text":"# https://atcoder.jp/contests/abc104/tasks/abc104_c\n\nimport itertools\nimport collections\nimport bisect\nimport math\n\ndef main():\n D, G = map(int, input().split())\n p = []\n c = []\n for _ in range(D):\n pi, ci = map(int, input().split())\n p.append(pi)\n c.append(ci)\n\n\n # big value\n INF = int(1e15)\n ans = INF\n # Bit 全探索\n pattern = 1 << D # 1 をリストの長さだけ左にシフトする\n for i in range(pattern):\n scope = []\n for j in range(D):\n if (i >> j) & 1:\n scope.append(j)\n\n score = 0\n result = 0\n for s in scope:\n score += (s + 1) * p[s] * 100 + c[s]\n result += p[s]\n\n # print(\"i = {}, score = {}, result = {}\".format(i, score, result))\n if score < G:\n for idx in range(D - 1, -1, -1):\n if idx not in scope:\n rest = G - score\n count = min(math.ceil(rest / ((idx + 1) * 100)), p[idx])\n # print(\"idx = {}, count = {}\".format(idx, count))\n score += (idx + 1) * count * 100\n result += count\n break\n\n # print(\"i = {}, score = {}, result = {}\".format(i, score, result))\n if score >= G:\n ans = min(ans, result)\n\n print(ans)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"takecian/ProgrammingStudyLog","sub_path":"AtCoder/ABC/104/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"30"}
+{"seq_id":"18933665270","text":"import math\n\nfrom wolframclient.language import wl, wlexpr\nfrom wolframclient.evaluation import WolframLanguageSession\nfrom wolframclient.deserializers import WXFConsumer\n\n_MATHEMATICA_PATH_ = 'F:\\\\Mathematica\\\\12.0\\\\MathKernel.exe'\n\nComplex = wl.Complex\n\n\nclass MathConsumer(WXFConsumer):\n \"\"\"Implement a consumer with basic arithmetic operation.\"\"\"\n\n # Specific convertion for Pi, other symbols use the default method.\n def consume_symbol(self, current_token, tokens, **kwargs):\n # Convert symbol Pi to its numeric value as defined in Python\n if current_token.data == 'Pi':\n return math.pi\n else:\n return super().consume_symbol(current_token, tokens, **kwargs)\n\n # Associate heads with the method to convert them to Python types.\n DISPATCH = {\n Complex: 'build_complex',\n wl.Rational: 'build_rational',\n wl.Plus: 'build_plus',\n wl.Times: 'build_times'\n }\n\n # Overload the method that builds functions.\n def build_function(self, head, args, **kwargs):\n # check if there is a specific function associated to the function head\n builder_func = self.DISPATCH.get(head, None)\n if builder_func is not None:\n try:\n # get the class method and apply it to the arguments.\n return getattr(self, builder_func)(*args)\n except Exception:\n # instead of failing, fallback to default case.\n return super().build_function(head, args, **kwargs)\n # heads not listed in DISPATCH are delegated to parent's method\n else:\n return super().build_function(head, args, **kwargs)\n\n def build_plus(self, *args):\n total = 0\n for arg in args:\n total = total + arg\n return total\n\n def build_times(self, *args):\n total = 1\n for arg in args:\n total = total * arg\n return total\n\n def build_rational(self, *args):\n if len(args) != 2:\n raise ValueError('Rational format not supported.')\n return args[0] / args[1]\n\n def build_complex(self, *args):\n if len(args) != 2:\n raise ValueError('Complex format not supported.')\n return complex(args[0], args[1])\n\n\nclass MathLink:\n\n def __init__(self):\n self.session = WolframLanguageSession(_MATHEMATICA_PATH_)\n\n def Call(self, cmd: str):\n return self.session.evaluate(wlexpr(cmd))\n\n def Quit(self):\n self.session.stop()\n\n\ndef CallMath():\n session = WolframLanguageSession(_MATHEMATICA_PATH_)\n [_, resr, resi] = session.evaluate(wlexpr('Quiet[If[res = Check[NIntegrate[1/(x + 0.5 I), {x, 0, 1}], False, {NIntegrate::slwcon, NIntegrate::ncvb}]; BooleanQ[res], {False, 0, 0}, {True, Re[res], Im[res]}], {NIntegrate::slwcon, NIntegrate::ncvb}]'))\n # complex_result = binary_deserialize(res, consumer=MathConsumer())\n print(resr)\n print(resi)\n session.stop()\n","repo_name":"NBAlexis/ContourAStar","sub_path":"MathematicaIntegrator/Constants.py","file_name":"Constants.py","file_ext":"py","file_size_in_byte":2955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"2324892422","text":"from wtforms import HiddenField\n\nfrom .base import BaseModelView\n\n\nclass UserView(BaseModelView):\n column_list = [\"first_name\", \"last_name\", \"email\",\n \"role\", \"unique_id\", \"is_deleted\",\n \"last_login\"]\n column_searchable_list = (\"first_name\", \"last_name\", \"email\")\n\n form_extra_fields = {\n \"tenant_id\": HiddenField()\n }\n\n form_columns = [\n \"role\",\n \"first_name\",\n \"last_name\",\n \"email\",\n \"password\",\n \"tenant_id\",\n ]\n\n\nclass UserGroupView(BaseModelView):\n form_columns = [\n \"name\",\n \"description\",\n \"group_code\",\n \"role\",\n \"idp_entity_id\",\n \"idp_url\",\n \"idp_x509cert\",\n \"idp_first_name\",\n \"idp_last_name\",\n \"idp_username\",\n \"idp_email\",\n \"idp_usertype\",\n \"tenant_id\",\n \"is_default\"\n ]\n form_extra_fields = {\"tenant_id\": HiddenField()}\n column_list = [\"name\", \"is_default\"]\n","repo_name":"spoorthythimmaiah/regalix","sub_path":"sharedemos-dev-two/sharedemos/admin/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"13780424041","text":"import numpy as np\nimport os\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.nn as nn\nimport torchvision.transforms as transforms\nfrom torchvision import models\nfrom torchvision.models.vgg import VGG\n\nclass _DenseUpsamplingConvModule(nn.Module):\n def __init__(self, down_factor, in_dim, num_classes):\n super(_DenseUpsamplingConvModule, self).__init__()\n upsample_dim = (down_factor ** 2) * num_classes\n self.conv = nn.Conv2d(in_dim, upsample_dim, kernel_size=3, padding=1)\n self.bn = nn.BatchNorm2d(upsample_dim)\n self.relu = nn.ReLU(inplace=True)\n self.pixel_shuffle = nn.PixelShuffle(down_factor)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n x = self.relu(x)\n x = self.pixel_shuffle(x)\n return x\n\nclass HDC(nn.Module):\n def __init__(self, n_classes):\n super(HDC, self).__init__()\n resnet = models.resnet152(pretrained = True)\n self.layer0 = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool)\n self.layer1 = resnet.layer1\n self.layer2 = resnet.layer2\n self.layer3 = resnet.layer3\n self.layer4 = resnet.layer4\n\n for n, m in self.layer3.named_modules():\n if 'conv2' in n or 'downsample.0' in n:\n m.stride = (1, 1)\n for n, m in self.layer4.named_modules():\n if 'conv2' in n or 'downsample.0' in n:\n m.stride = (1, 1)\n layer3_group_config = [1, 2, 5, 9]\n for idx in range(len(self.layer3)):\n self.layer3[idx].conv2.dilation = (layer3_group_config[idx % 4], layer3_group_config[idx % 4])\n self.layer3[idx].conv2.padding = (layer3_group_config[idx % 4], layer3_group_config[idx % 4])\n layer4_group_config = [5, 9, 17]\n for idx in range(len(self.layer4)):\n self.layer4[idx].conv2.dilation = (layer4_group_config[idx], layer4_group_config[idx])\n self.layer4[idx].conv2.padding = (layer4_group_config[idx], layer4_group_config[idx])\n\n self.duc = _DenseUpsamplingConvModule(8, 2048, n_classes)\n def forward(self, x):\n x = self.layer0(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n x = self.duc(x)\n return x","repo_name":"zaq851017/My_Image_Segmentation","sub_path":"network/backup/HDC.py","file_name":"HDC.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"}
+{"seq_id":"10801733316","text":"#-----------------------------------------------------------------------Config de l'assistant itself-----------------------------------------------------------------------------------\n#Capacités de l'assistant\nSKILLS = [\"fermer\", \"ouvrir\", \"chercher\", \"heure\", \"lumière\", \"lire agenda\"]\n\n#Commandes pour exploiter ces capacités\nCOMMANDS = {\n \"fermer\" : [\"laisse-moi\", \"ferme-toi\", \"arrête-toi\"],\n \"ouvrir\" : [\"ouvre\", \"ouvrir\", \"lancer\", \"lance\", \"démarrer\", \"démarre\"],\n \"heure\" : [\"quelle heure est-il\", \"il est quelle heure\"],\n \"lumière\" : [\"allume\", \"éteins\"],\n \"lire agenda\": [\"qu'est-ce que j'ai de prévu\", \"quel est le programme\"],\n \"chercher\" : [\"cherche sur youtube\", \"cherche sur internet\", \"cherche sur wikipedia\", \"recherche sur youtube\", \"recherche sur internet\", \"recherche sur wikipedia\", \"mets-moi sur youtube\", \"cherche\", \"recherche\"]\n}\n\n#Comment appeller l'assistant\nCALL_WORDS = [\"assistant\"]\n\n#Ce que l'assistant peut répondre quand on le rappelle\nLINES = [\"Que puis-je faire pour vous ?\", \"Plaît-il ?\", \"Me voilà\", \"C'est bien moi, je vous écoute\", \"Je vous écoute\"]\n\n\n#--------------------------------------------------------------Config des applications que l'assistant peut ouvrir---------------------------------------------------------------------\n\n#Applications connues\nKNOWN_APPS = [\"code\", \"brave\", \"discord\", \"wps\", \"workFile\", \"minecraft\"]\n\n#Alias des applications\nAPPS = {\n \"code\" : [\"vscode\", \"visual studio\", \"code\", \"vs\"],\n \"brave\" : [\"brave\", \"internet\"],\n \"discord\" : [\"discord\"],\n \"wps\" : [\"wps\", \"word\", \"éditeur\"],\n \"workFile\" : [\"dossier travail\", \"travail\"],\n \"minecraft\" : [\"minecraft\"]\n}\n\n#Chemins vers les applications\nPATHS = {\n \"code\" : \"D:\\\\programmes\\\\Microsoft VS Code\\\\Code.exe\",\n \"brave\" : \"C:\\\\Program Files\\\\BraveSoftware\\\\Brave-Browser\\\\Application\\\\brave.exe\",\n \"discord\" : \"D:\\\\programmes\\\\Discord\\\\Update.exe --processStart Discord.exe\",\n \"wps\" : \"C:\\\\Users\\\\Administrateur\\\\AppData\\\\Local\\\\Kingsoft\\\\WPS Office\\\\ksolaunch.exe\",\n \"workFile\" : 'explorer /select,\"D:\\\\Travail\\\\\"',\n \"minecraft\" : \"D:\\\\Jeux\\\\Minecraft\\\\MinecraftLauncher.exe\"\n}\n\n#Noms conviviaux des applications\nNAMES = {\n \"code\" : \"Visual Studio Code\",\n \"brave\" : \"Brave\",\n \"discord\" : \"Discord\",\n \"wps\" : \"WPS\",\n \"workFile\" : \"Dossier de Travail\",\n \"minecraft\" : \"Minecraft\"\n}\n\n\n#----------------------------------------------------------------------Config de la recherche par l'assistant--------------------------------------------------------------------------\n\nDDGO_SEARCH_TYPES = [\"duckduckgo\", \"recherche\"]\n\n\n#----------------------------------------------------------------------------Config du calendrier--------------------------------------------------------------------------------------\n\nDAYS = [\"lundi\", \"mardi\", \"mercredi\", \"jeudi\", \"vendredi\", \"samedi\", \"dimanche\"]\nMONTHS = [\"janvier\", \"février\", \"mars\", \"avril\", \"mai\", \"juin\", \"juillet\", \"août\", \"septembre\", \"octobre\", \"novembre\", \"décembre\"]\n\n#------------------------------------------------------------------------------Config de la météo--------------------------------------------------------------------------------------\n\nAPI_KEY = \"\" #Your api key for weatherbit\nCITIES = {\n \"Tours\" : [47.38333, 0.68333]\n}\n\n#----------------------------------------------------------------------------Config des ampoules---------------------------------------------------------------------------------------\n\n#Différentes ampoules que l'assistant peut contrôler\nBULBS = [\"spot chambre\"]\n\n#Table de correspondance entre le nom de l'ampoule et son IP\nBULBS_IPS = {\n \"spot chambre\" : \"192.168.0.136\"\n}\n\nBULB_ROOMS = {\n \"chambre\" : [\"spot chambre\"]\n}\n\nROOMS = [\"chambre\"]\n","repo_name":"Nalmac/AssistantSarah","sub_path":"constants_safe.py","file_name":"constants_safe.py","file_ext":"py","file_size_in_byte":3756,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"17434460856","text":"import csv\r\nimport re\r\nimport sys\r\nfrom functions import *\r\nimport xlsxwriter\r\n\r\n\r\ndef queueStatsToExcel(fileName, excelName):\r\n\r\n fileR = open(fileName)\r\n fileLines = fileR.readlines()\r\n workbook = xlsxwriter.Workbook(str(excelName), {'strings_to_numbers': True})\r\n ws = workbook.add_worksheet()\r\n ws.set_column('B:AG', 12)\r\n queueNum = 0\r\n row = 0\r\n portName = 0\r\n resultList = []\r\n PRINT = 0\r\n PRINTP = 0\r\n\r\n ### Start of loop\r\n queueNum = 0\r\n portNum = 888\r\n resultList = []\r\n portName = 0\r\n NOQUEUE = 999\r\n queueNum = NOQUEUE\r\n\r\n for line in fileLines:\r\n\r\n #print(\"currentline is \" + line)\r\n resultList = parseQueueRate(line,portName,queueNum)\r\n #portName = getPortName(resultList)\r\n queueType = getQueueType(resultList)\r\n queueNum = getQueueNum(resultList)\r\n #print(str(portName),PRINT)\r\n # Check if the portName is null, if it is then\r\n # we have not started looking at intf counters yet\r\n if (int(resultList[0]) == 0):\r\n print1(\"portname is null\",PRINT)\r\n continue\r\n\r\n # Check if we are parsing info for a new port\r\n if (str(portName) != str(resultList[0])):\r\n print1(\"portname not equal to \" + str(resultList[0]),PRINT)\r\n row = row + 1\r\n portName = resultList[0]\r\n # Write the portName in the excel\r\n print1(\"about to write \" + resultList[0] + \"row \" + str(row),PRINT)\r\n # maybe ws.write(row,0,portName) should work as well\r\n ws.write(row,int(resultList[1]),portName)\r\n continue\r\n\r\n # Check if this is not a counter line\r\n if queueType == 0:\r\n print1(\"queuetype is zero\",PRINT)\r\n continue\r\n\r\n #print(\"TOTAL RETULT IS \\n \" + str(resultList) + \"row \" + str(row))\r\n resultList[0] = row\r\n writeToExcel(resultList,ws,1)\r\n ### end of loop\r\n\r\n # Write the header in Excel\r\n queueHeaders(ws)\r\n\r\n\r\n # Closing file\r\n #outputFile.close()\r\n print1(\"TOTAL RETULT IS \\n \" + str(resultList),PRINT)\r\n workbook.close()\r\n fileR.close()\r\n\r\nif __name__ == \"__main__\":\r\n file_name = sys.argv[1]\r\n input_name = file_name.rsplit('.', 1)\r\n out_name = str(input_name[0]) + \".xlsx\"\r\n # First Arg is the name of the file to be parse\r\n # Second arg is the name ouf the excel file to be outputed\r\n # queueStatsToExcel(\"queue_rate_output.txt\",\"rate.xlsx\")\r\nqueueStatsToExcel(file_name, out_name)\r\n","repo_name":"kcorkins/switch-logs","sub_path":"scripts/queue_rate.py","file_name":"queue_rate.py","file_ext":"py","file_size_in_byte":2519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"5384280448","text":"import os\nimport shutil\nimport typing\nimport json\nimport numpy as np\nimport argparse as ap\n\nos.environ['TF_GPU_THREAD_MODE'] = 'gpu_private'\n# os.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"0\"\n\nfrom GavinBackend.GavinCore.models import TransformerIntegration, tf, tfds, PerformerIntegration, FNetIntegration, PreTrainedEmbeddingTransformerIntegration, \\\n RotaryTransformerIntegration\nfrom GavinBackend.GavinCore.datasets import DatasetAPICreator, DatasetDirectFromFileAPICreator\nfrom GavinBackend.GavinCore.load_data import load_tokenized_data\nfrom GavinBackend.GavinCore.callbacks import PredictCallback\n\n_MODEL_TYPES = {\n 'transformer': TransformerIntegration,\n 'performer': PerformerIntegration,\n 'fnet': FNetIntegration,\n 'rotary_transformer': RotaryTransformerIntegration,\n 'pretrained_embedding_transformer': PreTrainedEmbeddingTransformerIntegration\n}\n\n\ndef _get_embedding_idx(embedding_path):\n embedding_idx = {}\n with open(embedding_path, 'r', encoding='utf-8') as f:\n for line in f:\n word, coefs = line.split(maxsplit=1)\n coefs = np.fromstring(coefs, 'f', sep=' ')\n embedding_idx[word] = coefs\n return embedding_idx\n\n\ndef _get_embedding_matrix(embedding_idx, tokenizer: tfds.deprecated.text.SubwordTextEncoder):\n i_dff = int(embedding_idx.get(list(embedding_idx.keys())[0]).shape[0])\n embedding_matrix = np.zeros((len(tokenizer.subwords) + 1, i_dff))\n for i, word in enumerate(tokenizer.subwords):\n embedding_vector = embedding_idx.get(word)\n if embedding_vector is not None and embedding_vector.shape[0] == i_dff:\n embedding_matrix[i] = embedding_vector\n return embedding_matrix, i_dff\n\n\ndef _valid_path(string, bool_=False):\n if os.path.isfile(string):\n if bool_:\n return True\n return string\n elif bool_:\n return False\n else:\n raise FileNotFoundError(string)\n\n\ndef _dir_path(string):\n if \"https://\" in string:\n return string\n if os.path.isdir(string):\n return string\n else:\n raise NotADirectoryError(string)\n\n\ndef _valid_args(args):\n if args.model == 'pretrained_embedding_transformer' and args.embedding_file is None:\n raise ValueError(\"Embedding file must be specified for pretrained_embedding_transformer\")\n if args.model == 'performer' and args.features is None:\n raise ValueError(\"Number of features must be specified for performer\")\n if not _valid_path(args.tokenizer_file + \".subwords\") and not _valid_path(args.tokenizer_file + \".json\"):\n raise FileNotFoundError(args.tokenizer_file)\n if \"https\" not in args.dataset_path:\n if not all(_valid_path(os.path.join(args.dataset_path, args.dataset_name + ext), bool_=True) for ext in ['.from', '.to']) and \\\n not all(_valid_path(os.path.join(args.dataset_path, args.dataset_name + ext), bool_=True) for ext in ['-from.BIN', '-to.BIN']):\n raise FileNotFoundError(args.dataset_name)\n if not os.path.exists(args.logdir):\n os.makedirs(args.logdir)\n return True\n\n\ndef _get_train_data(max_samples, dataset_path, file_name, model, buffer_size, batch_size, python_legacy=False, cpp_legacy=False,\n use_memory_loaders=True):\n if python_legacy or use_memory_loaders:\n questions, answers = load_tokenized_data(max_samples=max_samples,\n data_path=dataset_path,\n filename=file_name,\n s_token=model.start_token,\n e_token=model.end_token, max_len=model.max_len,\n python_legacy=python_legacy,\n cpp_legacy=cpp_legacy)\n\n questions = tf.keras.preprocessing.sequence.pad_sequences(questions, maxlen=model.max_len, padding='post')\n answers = tf.keras.preprocessing.sequence.pad_sequences(answers, maxlen=model.max_len, padding='post')\n d_t, d_v = DatasetAPICreator.create_data_objects(questions, answers, buffer_size=buffer_size,\n batch_size=batch_size,\n vocab_size=model.vocab_size)\n else:\n path_to = os.path.join(dataset_path, \"{}-{}.BIN\")\n # noinspection StrFormat\n d_t, d_v = DatasetDirectFromFileAPICreator.create_data_objects(questions_file=path_to.format(file_name, \"from\"),\n answers_file=path_to.format(file_name, \"to\"),\n buffer_size=buffer_size,\n batch_size=batch_size,\n vocab_size=model.vocab_size,\n max_length=model.max_len,\n number_of_samples=max_samples,\n start_token=model.start_token[0],\n end_token=model.end_token[0],\n padding_value=0)\n\n return d_t, d_v\n\n\ndef _validate_kwargs(kwargs: typing.Dict, model_type: str):\n must_have_keys = ['num_layers', 'units', 'd_model', 'num_heads', 'base_log_dir', 'dropout',\n 'max_len', 'tokenizer', 'name', 'save_freq', 'batch_size', 'mixed', 'metadata']\n _unique_keys = {'performer': ['num_features'],\n 'pretrained_embedding_transformer': ['embedding_matrix']}\n for key in must_have_keys:\n if key not in kwargs:\n raise ValueError(f\"Missing key {key} in kwargs\")\n if model_type in _unique_keys.keys():\n for key in _unique_keys[model_type]:\n if key not in kwargs:\n raise ValueError(f\"Missing key {key} in kwargs for model type {model_type}\")\n\n\ndef _load_metadata(metadata_path) -> typing.Dict:\n with open(metadata_path, 'r') as f:\n metadata = json.load(f)\n return metadata\n\n\ndef _load_previous_if_exists(model_type: str, log_dir: str,\n model_name: str) -> typing.Optional[typing.Tuple[\n typing.Union[TransformerIntegration, PerformerIntegration, FNetIntegration, PreTrainedEmbeddingTransformerIntegration, RotaryTransformerIntegration],\n typing.Dict]]:\n if _valid_path(os.path.join(log_dir, model_name), bool_=True):\n metadata_path = os.path.join(log_dir, model_name, \"config/\", \"metadata.json\")\n if not _valid_path(metadata_path, bool_=True):\n print(\"Metadata file not found, checking for config.json instead\")\n metadata_path = os.path.join(os.path.dirname(metadata_path), \"config.json\")\n if not _valid_path(metadata_path, bool_=True):\n print(\"config.json not found, removing model, likely invalid.\")\n shutil.rmtree(os.path.join(log_dir, model_name))\n\n print(\"Loading previous model\")\n model = _MODEL_TYPES[model_type].load(log_dir, model_name)\n metadata = _load_metadata(metadata_path)\n else:\n model = None\n metadata = None\n return model, metadata\n\n\ndef _freq_type(arg: typing.Union[str, int]) -> typing.Union[str, int]:\n if type(arg) in [str, int]:\n return arg if not arg.isnumeric() else int(arg)\n\n\ndef main():\n parser = ap.ArgumentParser()\n parser.add_argument('--python-legacy', action=ap.BooleanOptionalAction, default=False, help=\"Use the legacy python implementation of the data loading\")\n parser.add_argument('--cpp-legacy', action=ap.BooleanOptionalAction, default=False, help=\"Use the legacy C++ implementation of the data loading\")\n parser.add_argument('-dp', '--dataset-path', type=_dir_path, required=True, help='Path to the dataset')\n parser.add_argument('-dn', '--dataset-name', type=str, required=True, help=\"Dataset name\") # dataset_file_name\n parser.add_argument('--model', type=str, default='transformer', choices=_MODEL_TYPES.keys(), help='Model type')\n parser.add_argument('--mixed-precision', action=ap.BooleanOptionalAction, default=False, help='Use mixed precision training')\n parser.add_argument('--logdir', type=str, default='./logs/', help='Directory to save logs and checkpoints')\n parser.add_argument('--model-name', type=str, required=True, help='Name of the model to be saved')\n parser.add_argument('--embedding-file', type=_valid_path, help='Path to embedding file (only for pretrained_embedding_transformer)')\n parser.add_argument('--max-samples', type=int, default=None, help='Maximum number of samples to load from dataset', required=True)\n parser.add_argument('--batch-size', type=int, help='Maximum batch size to use for training', required=True)\n parser.add_argument('--buffer-size', type=int, help='Maximum buffer size to use for training', default=20_000)\n parser.add_argument('--tokenizer-file', type=str, help='Tokenizer to use', required=True)\n parser.add_argument('--epochs', type=int, help=\"Number of epochs to train for\", required=True)\n parser.add_argument('--max-seq-length', type=int, help='Maximum sequence length to use for training', required=True)\n parser.add_argument('--layers', type=int, help='Number of layers to use for training', required=True)\n parser.add_argument('--d-model', type=int, help='Model dimension to use for training', required=True)\n parser.add_argument('--heads', type=int, help='Number of heads to use for training', required=True)\n parser.add_argument('--dff', type=int, help='Feed forward dimension to use for training', required=True)\n parser.add_argument('--dropout', type=float, help='Dropout to use for training', required=True)\n parser.add_argument('--save-every', type=_freq_type, help='Save every n epochs/steps (\"epoch\" for epochs, or number for every n steps)', default='epoch')\n parser.add_argument('--features', type=int, help='Number of features to use for training (only for performer)')\n parser.add_argument('--streaming', action=ap.BooleanOptionalAction, default=False, help='Use streaming the dataset for training (slow, will increase step time)')\n\n args = parser.parse_args()\n _valid_args(args)\n tokenizer = tfds.deprecated.text.SubwordTextEncoder.load_from_file(args.tokenizer_file)\n\n model_kwargs = {\n 'num_layers': args.layers,\n 'units': args.dff,\n 'd_model': args.d_model,\n 'num_heads': args.heads,\n 'base_log_dir': args.logdir,\n 'dropout': args.dropout,\n 'max_len': args.max_seq_length,\n 'tokenizer': tokenizer,\n 'name': args.model_name,\n 'save_freq': args.save_every,\n 'batch_size': args.batch_size,\n 'mixed': args.mixed_precision,\n 'metadata': {'max_samples': args.max_samples, 'batch_size': args.batch_size, 'buffer_size': args.buffer_size}\n }\n\n if args.model == 'pretrained_embedding_transformer':\n matrix, d_model = _get_embedding_matrix(_get_embedding_idx(args.embedding_file), tokenizer)\n print(f\"You selected {args.d_model} however a value of {d_model} was used for D_MODEL because the embedding file was {d_model} in size.\")\n model_kwargs['embedding_matrix'] = matrix\n model_kwargs['d_model'] = d_model\n elif args.model == 'performer':\n model_kwargs['num_features'] = args.features\n train_model(args.model, model_kwargs, max_samples=args.max_samples, dataset_path=args.dataset_path, dataset_name=args.dataset_name,\n buffer_size=args.buffer_size, epochs=args.epochs, streaming=args.streaming, python_legacy=args.python_legacy, cpp_legacy=args.cpp_legacy,\n tokenizer=tokenizer, batch_size=args.batch_size)\n\n\ndef train_model(model_type: typing.AnyStr, model_kwargs: typing.Dict, max_samples: int,\n batch_size: int, buffer_size: int, dataset_path: typing.AnyStr, dataset_name: typing.AnyStr,\n python_legacy: bool, cpp_legacy: bool, streaming: bool, tokenizer: tfds.deprecated.text.SubwordTextEncoder,\n epochs: int):\n \"\"\"\n Train a model\n :param model_type:\n The type of model to train\n :param model_kwargs:\n The kwargs to pass to the model\n :param max_samples:\n The maximum number of samples to load from the dataset\n :param batch_size:\n The batch size to use for training\n :param buffer_size:\n The buffer size to use for training\n :param dataset_path:\n The path to the dataset\n :param dataset_name:\n The name of the dataset\n :param python_legacy:\n Use the legacy python implementation of the data loading\n :param cpp_legacy:\n Use the legacy C++ implementation of the data loading\n :param streaming:\n Use streaming the dataset for training (slow, will increase step time)\n :param tokenizer:\n The tokenizer to use\n :param epochs:\n The number of epochs to train for\n :return:\n \"\"\"\n _validate_kwargs(model_kwargs, model_type)\n\n if model_kwargs['mixed']:\n tf.keras.mixed_precision.set_global_policy('mixed_float16')\n\n model, metadata = _load_previous_if_exists(model_type, log_dir=model_kwargs['base_log_dir'], model_name=model_kwargs['name'])\n if model is None:\n model = _MODEL_TYPES[model_type](**model_kwargs)\n if metadata:\n max_samples = metadata['max_samples']\n batch_size = metadata['batch_size']\n buffer_size = metadata['buffer_size']\n\n dataset_train, dataset_val = _get_train_data(max_samples=max_samples, batch_size=batch_size, buffer_size=buffer_size,\n dataset_path=dataset_path, file_name=dataset_name, model=model,\n python_legacy=python_legacy, cpp_legacy=cpp_legacy, use_memory_loaders=not streaming)\n\n callbacks = model.get_default_callbacks()\n callbacks.pop(1)\n callbacks.insert(1, tf.keras.callbacks.TensorBoard(log_dir=model.log_dir, update_freq=model.save_freq,\n embeddings_metadata=os.path.join(model.log_dir, \"metadata.tsv\"),\n profile_batch=(100, 110), embeddings_freq=5))\n callbacks.pop(2)\n callbacks.insert(2, PredictCallback(tokenizer=tokenizer, start_token=model.start_token, end_token=model.end_token,\n max_length=model.max_len, log_dir=model.log_dir, update_freq=model.save_freq,\n wrapper_model=model))\n\n callbacks.append(tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, verbose=1, restore_best_weights=True))\n\n model.fit(dataset_train, validation_dataset=dataset_val, epochs=epochs, callbacks=callbacks)\n model.model.summary()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Gavin-Development/GavinTraining","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":15132,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"30"}
+{"seq_id":"39178075344","text":"# https://leetcode.com/problems/remove-nth-node-from-end-of-list\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:\n l = 0\n ans = prev = ListNode(0, head)\n cur = head\n while head: \n l += 1\n head = head.next\n n = l - n \n \n while cur: \n if n != 0:\n prev = prev.next\n cur = cur.next\n else: \n prev.next = cur.next\n break \n n -= 1\n return ans.next","repo_name":"thuankxk2701/SolutionLeetCode","sub_path":"0001-0999/0019. [Medium] Remove Nth Node From End of List.py","file_name":"0019. [Medium] Remove Nth Node From End of List.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"}
+{"seq_id":"28543991461","text":"# izriše dve sliki ukrivljenih puščic v datoteki \"puscica-.png\" in \"puscica+.png\", kjer - in + označujeta orientacijo puščic\nfrom PIL import Image, ImageDraw\nimport os\nimport math\n\nfrom pathlib import Path\n\n\ncwd = os.getcwd()\nstarš = Path(__file__).parent\n\nos.chdir(starš)\n\nos.chdir(\"slike\")\n\ndef shrani(destinacija, slika): # brez končnice\n pot = destinacija + \".png\"\n slika.save(pot)\n\ndef sind(angle):\n return math.sin(math.radians(angle))\n\ndef cosd(angle):\n return math.cos(math.radians(angle))\n\ndef koordinate_črt(kot, kot_črt, dolžina_črt, konica, predznak): # vrne nabor dveh točk\n prva_konica = (konica[0] + cosd(kot - kot_črt - predznak * kotni_popravek) * dolžina_črt, konica[1] + sind(kot - kot_črt - predznak * kotni_popravek) * dolžina_črt)\n druga_konica = (konica[0] + cosd(kot + kot_črt - predznak * kotni_popravek) * dolžina_črt, konica[1] + sind(kot + kot_črt - predznak * kotni_popravek) * dolžina_črt)\n return (prva_konica, druga_konica)\n # predznak je samo popravek za kot\n\n\nbarva_črt = (0, 0, 0)\n\ndimenzija = 100\n\ndebelina = 5\n\nout = Image.new(\"RGBA\", (dimenzija, dimenzija), (255, 0, 0, 0)) # A na koncu kratice RGBA označuje alfo (transparentnost)\ndraw = ImageDraw.Draw(out, mode=\"RGBA\")\n\npolmer = 40 # polmer krožnice, na kateri teče puščica\n\nkot_črt = 30 # lahko spreminjaš (ampak boš mogoče moral spremeniti tudi popravek)\ndolžina_črt = 20 # dolžina krakov\n\nkot_loka = -150 # kot ukrivljenega dela puščice\n\nkot = kot_loka + 90 # to je samo za usmerjenje krakov puščice\n\n# popravek zaradi iluzije, kjer kraki puščice ne izgledajo centrirani zaradi ukrivljanja puščice\nkotni_popravek = 19 # za kot -150 izgleda 19 kar dober popravek\n\nkonica = (dimenzija / 2 + polmer * cosd(kot_loka), dimenzija / 2 + polmer * sind(kot_loka)) # koordinate konice puščice \n\nprva_konica, druga_konica = koordinate_črt(kot, kot_črt, dolžina_črt, konica, -1) # koordinate obeh koncev krakov\n\n# ta spremenljivka bo enaka za obe sliki (se ne spreminja)\nokvir = ((dimenzija / 2 - polmer, dimenzija / 2 - polmer), (dimenzija / 2 + polmer, dimenzija / 2 + polmer)) # koordinate leve zgornje točke in desne spodnje točke okvirja, v katerem leži krožnica za puščico\n\ndraw.arc(okvir, kot_loka, -kot_loka, fill=barva_črt, width=debelina) # nariši lok\n\ndraw.line((konica, prva_konica), fill=barva_črt, width=debelina) # prvi krak\ndraw.line((konica, druga_konica), fill=barva_črt, width=debelina) # drugi krak\n\nime = \"puscica+\"\nshrani(ime, out)\n\n\n\nkot_loka = -kot_loka # zrcaljenje\n\nout = Image.new(\"RGBA\", (dimenzija, dimenzija), (255, 0, 0, 0))\ndraw = ImageDraw.Draw(out, mode=\"RGBA\")\n\nkot = 180 - kot # suplementarni kot, zaradi zrcaljenja slike\nkonica = (dimenzija / 2 - polmer * cosd(kot_loka), dimenzija / 2 - polmer * sind(kot_loka)) # konica se tudi zrcali\n\nprva_konica, druga_konica = koordinate_črt(kot, kot_črt, dolžina_črt, konica, 1) # 1 je samo popravek za kot\ndraw.arc(okvir, 180 - kot_loka, kot_loka - 180, fill=barva_črt, width=debelina)\n\ndraw.line((konica, prva_konica), fill=barva_črt, width=debelina)\ndraw.line((konica, druga_konica), fill=barva_črt, width=debelina)\n\nime = \"puscica-\"\nshrani(ime, out)","repo_name":"TGlinsek/Projektna-naloga","sub_path":"grafika-puscice.py","file_name":"grafika-puscice.py","file_ext":"py","file_size_in_byte":3228,"program_lang":"python","lang":"sl","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"}
+{"seq_id":"40617827898","text":"#!/usr/bin/env python3\n\nimport argparse, os\nimport pyeudaq\nimport numpy as np\nfrom pathlib import Path\n\n\nparser=argparse.ArgumentParser(description='DPTS data dumper')\nparser.add_argument('filename', help='eudaq .raw filepath')\nparser.add_argument('dpts', help='EUDAQ ID of the picoscope producer (example: \"DPTS\" or \"DPTS_0\")')\nparser.add_argument('--threshold','-t',type=int,default=18,help='only dump events with signals above this threshold')\nparser.add_argument('-i',type=int,default=0,help='Event offset')\nparser.add_argument('-n',type=int,default=1,help='Number of events to look at. Dump all the events if 0')\nparser.add_argument('--output-dir','-o',type=str,default=\"./dumped_waveforms\",help='output directory path where to save dumped waveforms')\nargs=parser.parse_args()\n\n\n# extract the run from the file name and create a folder where to save dumped waveforms (if it does not exist)\nparent_folder = args.output_dir\nif not os.path.isdir(parent_folder):\n os.makedirs(parent_folder)\ndata_folder = parent_folder + r\"/{}\".format(Path(args.filename).stem)\nif not os.path.isdir(data_folder):\n os.makedirs(data_folder)\n# read the .raw file and then extract the waveforms\nfr=pyeudaq.FileReader('native',args.filename)\n# skip some events as passed by the argument -i\nfor _ in range(args.i): # _ can be used as a variable in looping\n fr.GetNextEvent()\n# if n=0 dump all the events over thr, otherwise dump only the events over thr found in the first n frames\ncounter = args.n if args.n else -1\nwhile counter != 0:\n try:\n ev=fr.GetNextEvent()\n sevs=ev.GetSubEvents()\n if sevs is None: break\n for sev in sevs:\n if sev.GetDescription()==args.dpts:\n e=sev.GetBlock(0)\n d=np.frombuffer(e,dtype=np.int8)\n if args.threshold is None or np.max(d)>args.threshold:\n d.shape=(2,len(d)//2)\n np.save(data_folder + '/dump%04d.npy'%ev.GetEventN(),d)\n counter -= 1\n except AttributeError as ae:\n print(\"All the events in {} are dumped\".format(args.filename))\n break","repo_name":"lhuth/eudaq","sub_path":"user/ITS3/scripts/DPTSDump.py","file_name":"DPTSDump.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"30"}
+{"seq_id":"24185424814","text":"class Solution(object):\n def gcd(self, a, b):\n if b == 0:\n return a\n if a % b == 0:\n return b\n else:\n return self.gcd(b, a % b)\n\n def gcdOfStrings(self, str1, str2):\n \"\"\"\n :type str1: str\n :type str2: str\n :rtype: str\n \"\"\"\n ans = ''\n\n len1 = len(str1)\n len2 = len(str2)\n gcd_len = self.gcd(max(len1, len2), min(len1, len2))\n\n candidate = ''\n for c1, c2 in zip(list(str1), list(str2)):\n if c1 == c2:\n candidate += c1\n else:\n break\n # print(candidate)\n\n if len(candidate) == 0:\n return ''\n\n candidate = candidate[:gcd_len]\n\n while len(candidate) > 0:\n if str1.count(candidate) == len1 // len(candidate) and str2.count(candidate) == len2 // len(candidate):\n break\n else:\n candidate = candidate[:-1]\n if len(candidate) == 0:\n break\n\n return candidate","repo_name":"takecian/ProgrammingStudyLog","sub_path":"LeetCode/1000/1071.py","file_name":"1071.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"30"}
+{"seq_id":"5569441142","text":"import uvicorn\nfrom fastapi import FastAPI, Response, Request, UploadFile, File\nfrom fastapi.templating import Jinja2Templates\nfrom prediction_Validation_Insertion import pred_validation # need to fix\nfrom trainmodel import trainModel\nfrom training_Validation_Insertion import train_validation\nfrom predictFromModel import prediction\nfrom typing import Dict\nimport pandas as pd\nimport numpy as np\n# For Cross Origin\n\n# app = FastAPI(debug = True)\n\nfrom fastapi import FastAPI\n#from elasticapm.contrib.starlette import make_apm_client, ElasticAPM\n\n'''\napm = make_apm_client(\n {\n 'SERVICE_NAME': 'FDP_APM',\n 'ELASTIC_APM_SERVER_URL': 'http://localhost:8200',\n }\n\n)\n'''\napp = FastAPI(debug=True)\n#app.add_middleware(ElasticAPM, client=apm)\n\ntemplates = Jinja2Templates(directory=\"templates\")\n\n\n# route to dashboard(UI)\n#@app.get(\"/\")\n#def dashboard(request: predictclient):\n # \"\"\"\n # displays the financial distress prediction dashboard/homepage\n # \"\"\"\n # return templates.TemplateResponse(\"dashboard.html\", {\n # \"request\": request\n # })\n\n\n@app.post(\"/training\") # Training batch file\nasync def training(json_data: Dict):\n try:\n # if val is not None:\n if json_data['json_data'] is not None:\n path = json_data['json_data']\n train_valObj = train_validation(path, 'Batch') # object initialization\n train_valObj.train_validation() # calling the training_validation function\n trainModelObj = trainModel() # object initialization\n path = trainModelObj.trainingModel() # training the model for the files in the table\n return {\"message\": path}\n\n except ValueError:\n return Response(\"Error Occurred! %s\" % ValueError)\n except KeyError:\n return Response(\"Error Occurred! %s\" % KeyError)\n except Exception as e:\n return 'Error Occurred!'\n # return Response(\"Training successfull!!\")\n\n\n@app.post(\"/trainclient\") # training via upload\ndef trainclient(file: UploadFile = File(...)):\n \"\"\"\n trains the financial distress prediction\n \"\"\"\n contents = file.file.read()\n\n from io import BytesIO\n data = BytesIO(contents)\n data = pd.read_csv(data)\n print(data.head(5))\n try:\n train_valObj = train_validation(data, 'UI') # object initialization\n train_valObj.train_validation() # calling the training_validation function\n trainModelObj = trainModel() # object initialization\n path = trainModelObj.trainingModel() # training the model for the files in the table\n print(\"Training is completed\")\n return {\"message\": path}\n\n except ValueError:\n return Response(\"Error Occurred! %s\" % ValueError)\n except KeyError:\n return Response(\"Error Occurred! %s\" % KeyError)\n except Exception as e:\n return 'Error Occurred!'\n\n\n@app.post(\"/predictFileupload\") # predicting via file upload\ndef predictFileupload(file: UploadFile = File(...)):\n \"\"\"\n trains the financial distress prediction\n \"\"\"\n try:\n\n contents = file.file.read()\n\n from io import BytesIO\n data = BytesIO(contents)\n data = pd.read_csv(data)\n print(data.head(5))\n data.replace('?', np.NaN, inplace=True)\n pred_val = pred_validation(data, 'UP') # object initialization\n pred_val.prediction_validation() # calling the prediction_validation function\n pred = prediction(data, 'UP') # object initialization\n # predicting for dataset present in database\n path = pred.predictionFromModel()\n # return Response(\"Prediction File created at %s!!!\" % Data)\n return path\n\n except ValueError:\n return Response(\"Error Occurred! %s\" % ValueError)\n except KeyError:\n return Response(\"Error Occurred! %s\" % KeyError)\n except Exception as e:\n return Response(\"Error Occurred! %s\" % e)\n\n\n# Oredictibg by UI\n@app.post(\"/predictclient/{val}/{val1}/{val2}/{val3}/{val4}/{val5}/{val6}/{val7}/{val8}/{val9}/\")\nasync def predictRouteClient(val: str, val1: str, val2: str, val3: str, val4: str, val5: str, val6: str, val7: str,\n val8: str, val9: str):\n try:\n print(\"Start Predicting\")\n list_data = [val, val1, val2, val3, val4, val5, val6, val7, val8, val9]\n data = pd.DataFrame(list_data)\n data = data.astype(float)\n data = data.T\n print(data.head(5))\n pred_val = pred_validation(data, 'UI') # object initialization\n pred_val.prediction_validation() # calling the prediction_validation function\n pred = prediction(data, 'UI') # object initialization\n # predicting for dataset present in database\n path = pred.predictionFromModel()\n # return Response(\"Prediction File created at %s!!!\" % Data)\n # return path\n\n return {\"message\": path}\n # return {\"message\": \"Parameter1 \"+val+\" Parameter 2 \"+val1 +\" Parameter 3 \"+val2 +\" Parameter 4 \"+val3 +\" Parameter 5 \"+val4 +\" Parameter 6 \"+val5+\" Parameter 7 \"+val6 +\" Parameter 8 \"+val7 +\" Parameter 9 \"+val8 +\" Parameter 10 \"+val9 }\n except ValueError:\n return Response(\"Error Occurred! %s\" % ValueError)\n except KeyError:\n return Response(\"Error Occurred! %s\" % KeyError)\n except Exception as e:\n return Response(\"Error Occurred! %s\" % e)\n\n\n@app.post(\"/predict\") # Predicting via batch upload\nasync def predictRouteClient(json_data: Dict):\n try:\n print(\"Start Predicting\")\n if json_data['json_data'] is not None:\n path = json_data['json_data']\n\n pred_val = pred_validation(path, 'Batch') # object initialization\n\n pred_val.prediction_validation() # calling the prediction_validation function\n\n pred = prediction(path, 'Batch') # object initialization\n\n # predicting for dataset present in database\n path = pred.predictionFromModel()\n return {\"message\": path}\n\n except ValueError:\n return Response(\"Error Occurred! %s\" % ValueError)\n except KeyError:\n return Response(\"Error Occurred! %s\" % KeyError)\n except Exception as e:\n return Response(\"Error Occurred! %s\" % e)\n\n\n'''\n@app.post(\"/predictclient\")\nasync def predictRouteupload(json_data: Dict):\n try:\n print(\"Start Predicting\")\n\n if json_data.get('Data') is not None:\n Data = json_data.get('Data')\n Df = pd.DataFrame.from_dict(Data)\n Df = Df.astype(float)\n Df = Df.T\n schema_path = 'schema_prediction_ui.json'\n with open(schema_path, 'r') as f:\n dic = json.load(f)\n f.close()\n column_names = dic['ColName']\n Df.columns = column_names\n pred_val = pred_validation(Df,'UI') # object initialization\n\n pred_val.prediction_validation() # calling the prediction_validation function\n\n pred = prediction(Df,'UI') # object initialization\n\n # predicting for dataset present in database\n path = pred.predictionFromModel()\n return Response(\"Prediction File created at %s!!!\" % Data)\n\n except ValueError:\n return Response(\"Error Occurred! %s\" % ValueError)\n except KeyError:\n return Response(\"Error Occurred! %s\" % KeyError)\n except Exception as e:\n return Response(\"Error Occurred! %s\" % e)\n'''\n\nif __name__ == \"__main__\":\n uvicorn.run(app, host=\"0.0.0.0\", port=8000)\n\n# uvicorn app:app --port 5000\n","repo_name":"RavJain007/FinanceDistress-FDP","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7512,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"}
+{"seq_id":"70998786004","text":"from multiprocessing import Process\nimport numpy as np\nfrom trainer import RAM_Trainer\n\n\nNUM_AGENTS = 4\n\nagents = []\n\nfor i in range(NUM_AGENTS):\n a = RAM_Trainer()\n agents.append(a)\n\n\nrewards = []\nfor a in agents:\n p = Process(target=a.train_episode())\n p.start()\n p.join()\n\n\nfor a in agents:\n print(\"-\",a.rewards, a.done)\n\nprint(\"end\")","repo_name":"eduardodisanti/deep_reinforcement_learning_solutions","sub_path":"breakout_ram/train_breakout_Q_multi.py","file_name":"train_breakout_Q_multi.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"16815621058","text":"def pesel(number):\r\n months = {1: \" January\", 2: \" February\", 3: \" March\", 4: \" April\", 5: \" May\", 6: \" June\", 7: \" July\",\r\n 8: \" August\", 9: \" September\", 10: \" November\", 11: \" October\", 12: \" December\"}\r\n if len(number) == 11:\r\n try:\r\n x = int(number)\r\n if x % 10 == ((9 * int(number[0]) + 7 * int(number[1]) + 3 * int(number[2]) + int(number[3]) + 9 * int(number[4]) +\r\n 7 * int(number[5]) + 3 * int(number[6]) + int(number[7]) + 9 * int(number[8]) + 7 * int(number[9])) % 10):\r\n\r\n day = 10 * int(number[2]) + int(number[3])\r\n\r\n if day < 13:\r\n print(number[4] + number[5] + months[day] + \" 19\" + number[0] + number[1])\r\n\r\n elif day < 33:\r\n day2 = day - 20\r\n print(number[4] + number[5] + months[day2] + \" 20\" + number[0] + number[1])\r\n\r\n elif day < 53:\r\n day2 = day - 40\r\n print(number[4] + number[5] + months[day2] + \" 21\" + number[0] + number[1])\r\n\r\n elif day < 73:\r\n day2 = day - 60\r\n print(number[4] + number[5] + months[day2] + \" 22\" + number[0] + number[1])\r\n\r\n elif day < 93:\r\n day2 = day - 80\r\n print(number[4] + number[5] + months[day2] + \" 18\" + number[0] + number[1])\r\n return True\r\n\r\n else:\r\n return False\r\n except ValueError:\r\n return False\r\n\r\n else:\r\n return False\r\n","repo_name":"Wojtur123/Veryfication","sub_path":"pesel.py","file_name":"pesel.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"29582299187","text":"from itertools import product\nfrom functools import cache\n\n\ndef task1(fn):\n with open(fn) as fh:\n lines = fh.read().splitlines()\n\n pos = [int(n) for n in [line.split()[-1] for line in lines]]\n\n scores = [0, 0]\n rolls = 0\n dice = 0\n while True:\n dice = dice % 100 + 1\n if not rolls % 2:\n pos[0] += sum([dice, dice+1, dice+2])\n scores[0] += pos[0] % 10 if pos[0] % 10 else 10\n else:\n pos[1] += sum([dice, dice+1, dice+2])\n scores[1] += pos[1] % 10 if pos[1] % 10 else 10\n dice += 2\n rolls += 3\n if any(filter(lambda x: x >= 1000, scores)):\n break\n\n return min(scores) * rolls\n\n\n@cache\ndef play(p1, s1, p2, s2):\n w1, w2 = 0, 0\n for m1, m2, m3 in product((1, 2, 3), (1, 2, 3), (1, 2, 3)):\n p1_new = (p1 + m1 + m2 + m3) % 10 if (p1 + m1 + m2 + m3) % 10 else 10\n s1_new = s1 + p1_new\n if s1_new >= 21:\n w1 += 1\n else:\n w2_new, w1_new = play(p2, s2, p1_new, s1_new)\n w1 += w1_new\n w2 += w2_new\n\n return w1, w2\n\n\ndef task2(fn):\n with open(fn) as fh:\n lines = fh.read().splitlines()\n\n p1, p2 = [int(n) for n in [line.split()[-1] for line in lines]]\n\n return max(play(p1, 0, p2, 0))\n\n\nassert task1('test_input0.txt') == 739785\nprint(task1('input.txt'))\n\nassert task2('test_input0.txt') == 444356092776315\nprint(task2('input.txt'))\n","repo_name":"venthur/aoc","sub_path":"2021/21/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"20518540481","text":"from __future__ import division\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nY, scattering, scattering_and_wall = np.loadtxt('../data/rutherford.txt', unpack=True)\n\n\nN_2 = scattering_and_wall/600\ndelta_N_2 = np.power(scattering_and_wall,1/2)/600\nN_2_minus_wall = scattering/600\ndelta_N_2_minus_wall = np.power(scattering,1/2)/600\n\ntheory_Y= np.loadtxt('theory_Y')\ntheory_scattering = np.loadtxt('theory_scattering')\n\nfig, ax = plt.subplots()\n\nN_0_times_G = 0.17\n\nax.plot(100*theory_Y, N_0_times_G*theory_scattering, label=\"Prediction\\nwith $N_0*G={:.2f}$\".format(N_0_times_G))\nax.errorbar(Y, N_2, delta_N_2, marker='o',ls='', label='Measurement')\nax.set_ylabel('Detection Rate $N_2$')\nax.set_xlabel('Distance from foil to detector $Y$ (cm)')\nax.legend(loc=4)\nplt.savefig('../figures/N_2_prediction_and_data.pdf')\n","repo_name":"Gtwomd/advancedlab2","sub_path":"code/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"42282216819","text":"from watermelon import App, render_template, request, Response\nfrom server import *\n\napp = App()\n\n@app.route('/')\ndef welcome():\n return 'Welcome!!!'\n\n@app.route('/dynamic/')\ndef say_name(name):\n return \"Hi there \"+name+\"!\"\n\n@app.route('/text')\ndef reading():\n template = render_template('test.txt')\n return template\n\n@app.route('/form')\ndef create_form():\n resp = render_template('test_form.html',\n title='Hello!',\n message='Enter Values')\n return resp\n\n@app.route('/submitted', method='POST')\ndef submitted():\n row = request.args.get('row')\n col = request.args.get('col')\n to_row = request.args.get('to_row')\n to_col = request.args.get('to_col') \n row = int(row)\n col = int(col)\n to_row = int(to_row)\n to_col = int(to_col)\n return \"Form Submitted! %d%d%d%d\" %(row, col, to_row, to_col)\n\n@app.route('/make_response')\ndef make_response():\n response = Response()\n response.txt = open('static/giphy.gif').read()\n response.add_header_item('Content-Type', 'image/gif')\n return response\n\n@app.route('/stack')\ndef render():\n title = 'This is the Title'\n another_title = ' This is another title'\n lst = [1,2,3,4]\n my_dict = {'k' : 'v', 'x' : 'y' }\n resp = render_template('test_stack.html', \n Title=title, \n another_title=another_title, \n lst=lst, \n my_dict=my_dict)\n return resp\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"julieqiu/watermelon","sub_path":"example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"22210779127","text":"class Node(object):\n def __init__(self, value):\n self.children = []\n self.value = value\n\n def add_child(self, node):\n self.children.append(node)\n\ndef dfs(root, value):\n if not value or not root:\n return None\n\n if root.value == value:\n return root\n\n for child in root.children:\n node = dfs(child, value)\n if node:\n return node\n\n return None\n","repo_name":"cinjon/recurse","sub_path":"tree_search.py","file_name":"tree_search.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"71245902166","text":"#!/usr/bin/env python\n#=======================================================================================\n# computeBindingEnergies.py\n# Plots three things:\n# - real helium binding energies from the file '../../../benchmarks/tungsten.txt'\n# - computed helium binding energies with real formation energies from the file \n# 'formationEnergies.dat'\n# - computed helium binding energies with fitted formation energies from the file\n# 'outputFile.dat' created by formationEnergiesFit.py\n# The formula used to compute binding energies is:\n# Eb(He_x, V_y) = Ef(He_x-1, V_y) + Ef(He) - Ef(He_x, V_y)\n# where you have to set Ef(He)\n# Can compute the squared distance between real data and computed data by uncommenting \n# the end of this script\n#=======================================================================================\n\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom pylab import *\nfrom scipy import stats\nfrom scipy import interpolate\n\n## Set the value of the Helium formation energy to use\nHeFormation = 6.15\n \n## Create plots\nfig = plt.figure()\nenergiesComputed = plt.subplot(312)\nenergiesTrue = plt.subplot(311)\nenergiesFit = plt.subplot(313)\n\n## Load helium number, vacancy number, and formation energy from 'formationEnergies.dat'\nHe, V, formationEnergy = loadtxt('formationEnergies.dat', usecols = (1,0,2) , unpack=True)\n\n## List to store which vacancy numbers are used\nVList = []\n\n## Loop on possible vacancy numbers\nfor i in range(1, 50):\n \n ## Filter on the vacancy number\n HeFiltered = He[V == i]\n formationEnergyFiltered = formationEnergy[V == i]\n VFiltered = i \n \n ## If data exists \n if len(HeFiltered) > 0:\n\n ## Initialize the previous value of formation energy\n energyPrevious = formationEnergyFiltered[0]\n\n ## Declare lists where binding energies will be stored\n bindingEnergy = [] \n \n ## Loop on all the HeFiltered elements\n for k in range(1, len(HeFiltered)):\n \n ## Compute binding energy assuming Ef(He, 0) = HeFormation\n value = energyPrevious + HeFormation - formationEnergyFiltered[k]\n bindingEnergy.append(value)\n energyPrevious = formationEnergyFiltered[k]\n \n ## Remove first item of the list because the binding energy cannot be computed for it\n HeFiltered = np.delete(HeFiltered, 0, axis=0)\n VFiltered = np.delete(VFiltered, 0, axis=0)\n \n ## Plot binding energy = f(He/V)\n energiesComputed.plot(HeFiltered/VFiltered, bindingEnergy, color=(0.02*i,0,1-0.02*i), linewidth=1.5)\n \n ## Store which vacancy numbers were used\n VList.append(VFiltered)\n\n## Load helium number, vacancy number, and formation energy from 'outputFile.dat'\nHe, V, formationEnergy = loadtxt('outputFile.dat', usecols = (1,0,2) , unpack=True)\n\n## Loop on possible vacancy numbers\nfor i in range(1, 50):\n \n ## Filter on the vacancy number\n HeFiltered = He[V == i]\n formationEnergyFiltered = formationEnergy[V == i]\n VFiltered = i \n \n ## If data exists \n if len(HeFiltered) > 0:\n\n ## Initialize the previous value of formation energy\n energyPrevious = formationEnergyFiltered[0]\n\n ## Declare lists where binding energies will be stored\n bindingEnergy = [] \n \n ## Loop on all the HeFiltered elements\n for k in range(1, len(HeFiltered)):\n \n ## Compute binding energy assuming Ef(He, 0) = HeFormation\n value = energyPrevious + HeFormation - formationEnergyFiltered[k]\n bindingEnergy.append(value)\n energyPrevious = formationEnergyFiltered[k]\n \n ## Remove first item of the list because the binding energy cannot be computed for it\n HeFiltered = np.delete(HeFiltered, 0, axis=0)\n VFiltered = np.delete(VFiltered, 0, axis=0)\n \n ## Plot binding energy = f(He/V)\n energiesFit.plot(HeFiltered/VFiltered, bindingEnergy, color=(0.02*i,0,1-0.02*i), linewidth=1.5)\n \n## Load helium number, vacancy number, and binding energy from 'tungsten.txt'\nHe, V, bindingEnergy = loadtxt('../../../benchmarks/tungsten.txt', usecols = (0,1,3) , unpack=True) \n\n## Loop on possible vacancy numbers\nfor i in range(0, len(VList)):\n \n ## Filter on the vacancy number\n HeFiltered = He[V == VList[i]]\n bindingEnergyFiltered = bindingEnergy[V == VList[i]]\n VFiltered = VList[i]\n \n ## If data exists \n if len(HeFiltered) > 0:\n \n ## Plot binding energy = f(He/V)\n energiesTrue.plot(HeFiltered/VFiltered, bindingEnergyFiltered, color=(0.02*VList[i],0,1-0.02*VList[i]), linewidth=1.5) \n\n## Set the same X and Y scale for all plots\nenergiesTrue.set_xlim([0, 8])\nenergiesTrue.set_ylim([1, 7])\nenergiesComputed.set_xlim([0, 8])\nenergiesComputed.set_ylim([1, 7])\nenergiesFit.set_xlim([0, 8])\nenergiesFit.set_ylim([1, 7])\n\n## Title and axis\nenergiesTrue.set_title(\"Energies from Benchmark\", fontsize=20)\nenergiesTrue.set_ylabel(\"Helium binding energy\",fontsize=16)\nenergiesTrue.grid()\nenergiesComputed.set_title(\"Energies computed from formation energies\", fontsize=20)\nenergiesComputed.set_ylabel(\"Helium binding energy\",fontsize=16)\nenergiesComputed.grid()\nenergiesFit.set_title(\"Energies computed from fitted formation energies\", fontsize=20)\nenergiesFit.set_xlabel(\"Helium/Vacancy number\",fontsize=16)\nenergiesFit.set_ylabel(\"Helium binding energy\",fontsize=16)\nenergiesFit.grid()\n\n## Uncomment the following to compute the least squares\n# \n# ## Compute Least Squares\n# leastSquareComputed = 0.\n# leastSquareFit = 0.\n# \n# ## Load helium number, vacancy number, and formation energy from 'formationEnergies.dat'\n# He, V, formationEnergyComputed = loadtxt('formationEnergies.dat', usecols = (1,0,2) , unpack=True)\n# \n# ## Load formation energy from 'outputFile.dat'\n# He, V, formationEnergyFit = loadtxt('outputFile.dat', usecols = (1,0,2) , unpack=True)\n# \n# ## Load helium number, vacancy number, and binding energy from 'tungsten.txt'\n# HeTrue, VTrue, bindingEnergyTrue = loadtxt('../../../benchmarks/tungsten.txt', usecols = (0,1,3) , unpack=True) \n# \n# ## Loop on possible vacancy numbers\n# for i in range(1, 50):\n# \n# ## Filter on the vacancy number\n# HeF = He[V == i]\n# formationEnergyComputedF = formationEnergyComputed[V == i]\n# formationEnergyFitF = formationEnergyFit[V == i]\n# VF = i \n# \n# ## If data exists \n# if len(HeF) > 0:\n# \n# ## Previous energies\n# previousEnergyComputed = formationEnergyComputedF[0]\n# previousEnergyFit = formationEnergyFitF[0]\n# \n# ## Loop on Helium number\n# for k in range(1, len(HeF)):\n# \n# ## Filter on Helium number and vacancy number \n# bindingEnergyTrueF = bindingEnergyTrue[HeTrue == HeF[k]]\n# VTrueF = VTrue[HeTrue == HeF[k]]\n# bindingEnergyTrueFF = bindingEnergyTrueF[VTrueF == VF]\n# \n# ## Compute binding energies\n# bindingEnergyComputed = previousEnergyComputed + HeFormation - formationEnergyComputedF[k]\n# previousEnergyComputed = formationEnergyComputedF[k]\n# bindingEnergyFit = previousEnergyFit + HeFormation - formationEnergyFitF[k]\n# previousEnergyFit = formationEnergyFitF[k]\n# \n# ## Compute the squared distance\n# rComputed = math.pow((bindingEnergyComputed - bindingEnergyTrueFF), 2)\n# rFit = math.pow((bindingEnergyFit - bindingEnergyTrueFF), 2)\n# \n# ## Add to the total value\n# leastSquareComputed = leastSquareComputed + rComputed\n# leastSquareFit = leastSquareFit + rFit\n# \n# ## Print the least squares\n# print \"Distance from real data to computed ones: \", leastSquareComputed\n# print \"Distance from real data to fitted ones: \", leastSquareFit\n \n## Show the plots\nplt.show() \n ","repo_name":"ORNL-Fusion/xolotl","sub_path":"UQ/fits/src/computeBindingEnergies.py","file_name":"computeBindingEnergies.py","file_ext":"py","file_size_in_byte":8077,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"30"}
+{"seq_id":"30738073908","text":"'https://www.hackerearth.com/practice/algorithms/string-algorithm/basics-of-string-manipulation/practice-problems/algorithm/rotation-1-38ecf5a7/'\n\n'''\nYou are given two strings S and T of the same length N. Your task is to convert the string S into T by doing some operations. \nIn an operation, you can delete the first character of the string S and append any character at the end of the string. \nYou are required to determine the minimum number of operations to convert S into T.\n\nInput format:\nFirst line: Single integer denoting the length of the strings\nSecond line: String S\nThird line: String T\nOutput format:\nPrint a single integer that represents the answer to the question.\n'''\n\nimport difflib\ndef rotation_(S, T):\n # Partially accepted\n \n seq_mat = difflib.SequenceMatcher(a=S, b=T)\n match = seq_mat.find_longest_match(alo=0, ahi=len(S), blo=0, bhi=len(T))\n common = S[match.a : match.a + match.size]\n print(match)\n print(common)\n \n return match.a\n \n \ndef rotation(S, T):\n counter = 0\n for i in range(N):\n if(T.find(S[i : N])== -1):\n counter+=1\n\n return counter\n\n\nif __name__ == '__main__':\n # N = int(input())\n # S = input() # string\n # T = input() # string\n\n N = 7\n S = 'aaxaabc'\n T = 'aabcaax'\n \n N = 113\n S = 'ndafmffmuuwjzqpquwjhuftohawpfegsjvnxwipwqlswvawogjuyiqtzsgpwgosegmuuhpzwchejuiitumyescxxyecnsatcbfpseqzowvdjyvchg'\n T = 'zqpquwjhuftohawpfegsjvnxwipwqlswvawogjuyiqtzsgpwgosegmuuhpzwchejuiitumyescxxyecnsatcbfpseqzowvdjyvchgavqnonmkwgqp'\n result = 12\n \n result = rotation(S, T)\n \n print(result) # -> 3 or 12","repo_name":"H0r4c3/Challenges","sub_path":"HackerEarth_Challenges/rotation(partially_accepted).py","file_name":"rotation(partially_accepted).py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"}
+{"seq_id":"41930335390","text":"\nfrom constant import TICK,description2,api\nimport requests,argparse,json,sys\nimport time\n\n\nparser = argparse.ArgumentParser(\"dev.akademi bot sell and buy scoin\", description2)\nparser.add_argument(\"--stoplimit\", \"-s\", help=\"How many day laters\", required=False, default=-1)\nparser.add_argument(\"--buylimit\", \"-b\", help=\"How many days to next?\", required=False, default=-1)\nparser.add_argument(\"--number\", \"-n\", help=\"Which number to inform\", required=False, default='+905346639019')\nargs = parser.parse_args()\n\ndef getData():\n re = None\n try:\n re = requests.get(TICK)\n if re.status_code == 200:\n j = json.loads(re.text)\n return j\n except Exception as e:\n pass\n return False\n\n\ndef sendSms(number,message):\n result = api.call('sms.send', 'SMS', number, message, None)\n return result\n\ndef check(value):\n stop=int(args.stoplimit)\n buy =int(args.buylimit)\n if value < stop and stop != -1:\n print(\"Your stop limit occurs current value is {} $\".format(value))\n sendSms(args.number,\"Your stop limit occurs current value is {} $\".format(value))\n exit()\n elif abs(value -buy ) < 3 and buy != -1:\n print(\"Your stop limit occurs current value is {} $\".format(value))\n sendSms(args.number, \"Your buy limit ocurs current value is {} $\".format(value))\n exit()\n\n\ndef start():\n prev=0\n data = getData()\n print(\"Current value is {}\".format(data['value']))\n\n if int(args.stoplimit) != -1 and int(args.stoplimit) > data['value']:\n print(\"Stop limit can not lowwer than current Value\")\n\n while True:\n data=getData()\n #print(\"Current value is {}\".format(data['value']))\n if data['date']==prev:\n continue\n else:\n prev=data['date']\n print(\"Current value is {}\".format(data['value']))\n check(data['value'])\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n\n if args.stoplimit==-1 and args.buylimit==-1:\n print(\"Parameters not entered\")\n else:\n start()\n\n\n\n\n\n\n\n\n\n\n","repo_name":"altuntasfatih/dev-akademi","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"6490795729","text":"# 2630.py\nimport sys\nfrom typing import List\n\nsys.stdin = open('input/2630')\n\n\ndef my(n:int, board:List[List[int]])->List[int]:\n def dfs(sr,sc,l):\n cnt =[0,0]\n box_sum = 0\n for r in range(l):\n box_sum+=sum(board[sr+r][sc:sc+l])\n\n if box_sum == l**2:\n cnt[1]+=1\n return cnt\n elif box_sum==0:\n cnt[0]+=1\n return cnt\n else :\n next_l = l//2\n box1= dfs(sr,sc,next_l)\n box2= dfs(sr+next_l,sc,next_l)\n box3= dfs(sr,sc+next_l,next_l)\n box4= dfs(sr+next_l,sc+next_l,next_l)\n cnt[0] = box1[0]+box2[0]+box3[0]+box4[0]\n cnt[1] = box1[1]+box2[1]+box3[1]+box4[1]\n return cnt\n\n return dfs(0,0,n)\n\nTC = int(input())\nfor test_case in range(1, TC + 1):\n n = int(sys.stdin.readline().rstrip())\n board= []\n for _ in range(n):\n board.append(list(map(int,sys.stdin.readline().rstrip().split())))\n answer = my(n,board)\n for i in answer:\n print(i)\n","repo_name":"SangMin-Code/python","sub_path":"baekjoon/2630.py","file_name":"2630.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"10172512647","text":"'''\nCreated on 2017-08-01 13:02\n\n@ product name : PyCharm Community Edition\n\n@ author : yoda\n'''\n\nfrom mymod.print import *\nimport numpy as np\nimport pandas as pd\n\n\"\"\"\n문서 전처리\n\n모든 데이터 분석 모형은 숫자로 구성된 고정 차원 벡터를 독립 변수로 하고 있으므로 \n문서(document)를 분석을 하는 경우에도 숫자로 구성된 특징 벡터(feature vector)를 \n문서로부터 추출하는 과정이 필요하다. \n이러한 과정을 문서 전처리(document preprocessing)라고 한다.\n\"\"\"\n\n\n\"\"\"\nBOW (Bag of Words)\n문서를 숫자 벡터로 변환하는 가장 기본적인 방법은 BOW (Bag of Words) 이다. \nBOW 방법에서는 전체 문서 {D1,D2,…,Dn}를 구성하는 \n고정된 단어장(vocabulary) {W1,W2,…,Wm}를 만들고 \nDi라는 개별 문서에 단어장에 해당하는 단어들이 포함되어 있는지를 표시하는 방법이다.\n먄약 단어 Wj가 문서Di에 있으면 Xij = 1\n\"\"\"\n\n\"\"\"\nScikit-Learn 의 문서 전처리 기능\n\nScikit-Learn 의 feature_extraction.text 서브 패키지는 다음과 같은 문서 전처리용 클래스를 제공한다.\n\nCountVectorizer:\n문서 집합으로부터 단어의 수를 세어 카운트 행렬을 만든다.\nTfidfVectorizer:\n문서 집합으로부터 단어의 수를 세고 TF-IDF 방식으로 단어의 가중치를 조정한 카운트 행렬을 만든다.\nHashingVectorizer:\nhashing trick 을 사용하여 빠르게 카운트 행렬을 만든다.\n\"\"\"\n\nfrom sklearn.feature_extraction.text import CountVectorizer\ncorpus = [\n 'This is the first document.',\n 'This is the second second document.',\n 'And the third one.',\n 'Is this the first document?',\n 'The last document?',\n]\nvect = CountVectorizer()\nvect.fit(corpus)\nvect.vocabulary_\nprint(vect.vocabulary_)\nprint(vect.transform(corpus).toarray())\n\nimport urllib.request as ur\nimport json\nimport string\nimport konlpy\nfrom konlpy.utils import pprint\nfrom konlpy.tag import Hannanum\nimport matplotlib as mpl\nimport matplotlib.pylab as plt\nhannanum = Hannanum()\n\nurl = \"https://www.datascienceschool.net/download-notebook/708e711429a646818b9dcbb581e0c10a/\"\nrequest = ur.Request(url)\ndata = ur.urlopen(request)\njson = json.loads(data.read())\ncell = [\"\\n\".join(c[\"source\"]) for c in json[\"cells\"] if c[\"cell_type\"] == u\"markdown\"]\n\n# 명사로 list화\ndocs = [w for w in hannanum.nouns(\" \".join(cell)) if ((not w[0].isnumeric()) and (w[0] not in string.punctuation))]\n\n# CountVectorizer: 문서 집합으로부터 단어의 수를 세어 카운트 행렬을 만든다.\ndiv()\nvect = CountVectorizer().fit(docs)\nprint(vect.vocabulary_)\n\n# array화\nprint(vect.transform(docs).toarray())\n\n# stop words : 필요없는 영어단어 삭제\nfrom nltk.corpus import stopwords\nvect = CountVectorizer(stop_words=\"english\").fit(docs)\nprint(vect.vocabulary_)\n\n# 토큰(token)\ndiv()\n# 한글자씩 자르기\nvect = CountVectorizer(analyzer=\"char\").fit(docs)\nprint(vect.vocabulary_)\n\nimport nltk\nvect = CountVectorizer(tokenizer=nltk.word_tokenize).fit(docs)\nprint(vect.vocabulary_)\n\n\n# n-그램\n# n-그램은 단어장 생성에 사용할 토큰의 크기를 결정한다.\n# 1-그램은 토큰 하나만 단어로 사용하며\n# 2-그램은 두 개의 연결된 토큰을 하나의 단어로 사용한다.\ndiv()\nvect = CountVectorizer(ngram_range=(2,2)).fit(docs)\nprint(vect.vocabulary_)\n\n\"\"\"\n빈도수\n\nmax_df, min_df ��수를 사용하여 문서에서 토큰이 나타난 횟수를 기준으로 단어장을 구성할 수도 있다. \n토큰의 빈도가 max_df로 지정한 값을 초과 하거나 min_df로 지정한 값보다 작은 경우에는 무시한다. \n인수 값은 정수인 경우 횟수, 부동소수점인 경우 비중을 뜻한다.\n\"\"\"\nvect = CountVectorizer(max_df=4, min_df=2).fit(docs)\nprint(vect.vocabulary_)\nprint(vect.stop_words_)\n\n\"\"\"\nTF-IDF\nTF-IDF(Term Frequency – Inverse Document Frequency) 인코딩은 \n단어를 갯수 그대로 카운트하지 않고 모든 문서에 공통적으로 들어있는 단어의 경우 \n문서 구별 능력이 떨어진다고 보아 가중치를 축소하는 방법이다.\n\"\"\"\nfrom sklearn.feature_extraction.text import TfidfVectorizer\ntfidv = TfidfVectorizer().fit(cell)\n","repo_name":"Junghyo/pycharm","sub_path":"data_science/basic_informal_data/practice03_document_preprocess.py","file_name":"practice03_document_preprocess.py","file_ext":"py","file_size_in_byte":4212,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"71742514644","text":"#!/usr/bin/env python3\nimport random\nfrom datetime import datetime\nimport os\nfrom functools import reduce\nimport pickle\nimport sys\n\nrandom.seed(datetime.now())\n\nfilenamezz = \"https://digitalcorpora.s3.amazonaws.com/corpora/files/govdocs1/zipfiles/{}.zip\"\n\n\ndef download_file(number):\n print()\n print()\n print('-' * 80)\n number = f\"{number:03d}\"\n fname = filenamezz.format(number)\n print(f\"Downloading {fname}... \")\n os.system(f\"wget {fname}\")\n\nfor i in range(25):\n filename = \"null\"\n num = -1\n while True:\n num = random.randint(0, 1000)\n filename = f\"{num:03d}.zip\"\n if not os.path.exists(filename):\n break\n download_file(num)\n \n","repo_name":"bhattacharjee/RansomFoRRT","sub_path":"helper_scripts/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"5724890936","text":"#!/usr/bin/env python\n# -*- coding: utf8 -*\n\nimport os\nfrom system.system import DATA_PATH, PYTHON_PATH, BACKUP_PATH, FIGURE_PATH\n\nPATH = os.path.join(DATA_PATH, 'InitialValue')\nPROGRAM_PATH = os.path.join(PYTHON_PATH, 'InitialValueSimulation')\nPATH_FIGURE = os.path.join(FIGURE_PATH, 'InitialValue')\nPATH_INITIAL_TRACER = os.path.join(DATA_PATH, 'InitialTracer')\n\nDB_PATH = os.path.join(PATH, 'Database', 'InitialValue_Database.db')\nPATH_CONCENTRATION_TYP = {'constant': 'Constant_InitialValue', 'vector': 'Vector_InitialValue'}\nPATH_DIFFERENT_TRACER = {1: 'OneTracer', 2: 'TwoTracer', 3: 'ThreeTracer', 4: 'FourTracer', 5: 'FiveTracer'}\nPATH_DISTRIBUTION = '{:s}Distribution'\nPATH_TRACER_DISTRIBUTION = '{:s}_distribution'\n\nDEFAULT_PYTHONPATH = os.path.join(PYTHON_PATH, 'util') + ':' + os.path.join(PYTHON_PATH, 'initialValue')\n\n\nPARAMETERID_MAX = 100\n\nPATTERN_JOBFILE = 'Jobfile.{:s}.ParameterId_{:0>3d}.Concentration_Typ_{:s}_Num_{:d}_DifferentTracer_{:d}.Timestep_{:d}dt.txt'\nPATTERN_LOGFILE = 'Logfile.{:s}.ParameterId_{:0>3d}.Concentration_Typ_{:s}_Num_{:d}_DifferentTracer_{:d}.Timestep_{:d}dt.log'\nPATTERN_JOBOUTPUT = 'Joboutput.{:s}.ParameterId_{:0>3d}.Concentration_Typ_{:s}_Num_{:d}_DifferentTracer_{:d}.Timestep_{:d}dt.out'\n\nPATTERN_TRACER_INITIAL_CONCENTRATION = 'InitialValue_Tracer_{:d}_{:0>3d}.petsc'\n\n#Pattern for figure filenames\nPATTERN_FIGURE_SPINUP = 'Spinup.{:s}.ParameterId_{:0>3d}.pdf'\nPATTERN_FIGURE_NORM = '{:s}{:s}Norm.{:s}.ParameterId_{:0>3d}.pdf'\nPATTERN_FIGURE_SPINUP_NORM = 'ScatterPlot.SpinupNorm_{:s}{:s}.{:s}.pdf'\nPATTERN_FIGURE_SURFACE = 'Surface.InitialValue.{:s}.{:d}dt.ParameterId_{:d}.{:s}.{:s}.{:s}.Depth_{:s}.relError_{}.diff_{}.pdf'\n\n","repo_name":"slawig/bgc-initialValue","sub_path":"initialValue/initialValue/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"5733324083","text":"import unittest\nfrom unittest.mock import patch\nfrom Tests_homework.data_base import documents\nfrom Tests_homework.data_base import directories\nfrom Tests_homework import secretary_prog\nfrom copy import deepcopy\nimport sys\nfrom contextlib import contextmanager\nfrom io import StringIO\n\n\n@contextmanager\ndef captured_output():\n old_out = sys.stdout\n try:\n sys.stdout = StringIO()\n yield sys.stdout\n finally:\n sys.stdout = old_out\n\n\nclass SecretaryProgTester(unittest.TestCase):\n\n def setUp(self):\n self.normal_doc_n = '11-2'\n self.normal_doc_name = \"Геннадий Покемонов \\n\"\n self.doc_without_name = '1456 4568'\n self.normal_shelf = '1'\n self.shelf_to_moveon = '3'\n self.new_doc_number = '3333'\n self.new_doc_type = 'test doc'\n self.new_doc_name = 'Petr Tester'\n self.new_doc_wrong_shelf = '777'\n self.normal_shelf_list = '1, 2, 3'\n self.new_shelf = '5'\n\n # @unittest.skip(' ')\n def test_people_by_number(self):\n\n # тестируем три случая ввода - пустой ввод, полноценный номер документа, номер документа не имеющего\n # имени владельца. Эти параметры указаны в side_effect\n\n # Список образцов нормального вывода тестируемого метода\n normal_message_list = [secretary_prog.warning_doc_num,\n self.normal_doc_name,\n f'Для документа {self.doc_without_name} не указан владелец\\n'\n ]\n with patch('Tests_homework.secretary_prog.documents', deepcopy(documents)),\\\n patch('Tests_homework.secretary_prog.directories', deepcopy(directories)):\n\n with patch('Tests_homework.secretary_prog.input', side_effect=['',\n self.normal_doc_n,\n self.doc_without_name]):\n for normal_message in normal_message_list:\n with captured_output() as out:\n self.assertEqual(secretary_prog.people_by_number(), None)\n\n message = out.getvalue()\n self.assertEqual(message, f'{normal_message}\\n')\n\n # @unittest.skip(' ')\n def test_all_doc_list(self):\n\n with patch('Tests_homework.secretary_prog.documents', deepcopy(documents)), \\\n patch('Tests_homework.secretary_prog.directories', deepcopy(directories)):\n\n with captured_output() as out:\n self.assertEqual(secretary_prog.all_doc_list(), None)\n\n self.assertEqual(type(out.getvalue()), str)\n self.assertEqual(len(out.getvalue().splitlines()), len(secretary_prog.documents))\n\n # @unittest.skip(' ')\n def test_doc_shelf_find(self):\n normal_message_list = [secretary_prog.warning_doc_num,\n f'Этот документ лежит на полке {self.normal_shelf}'\n ]\n\n with patch('Tests_homework.secretary_prog.documents', deepcopy(documents)), \\\n patch('Tests_homework.secretary_prog.directories', deepcopy(directories)):\n\n with patch('Tests_homework.secretary_prog.input', side_effect=['', self.normal_doc_n]):\n for normal_message in normal_message_list:\n with captured_output() as out:\n self.assertEqual(secretary_prog.doc_shell_find(), None)\n\n message = out.getvalue()\n self.assertEqual(message, f'{normal_message}\\n')\n\n # @unittest.skip('')\n def test_add_new_doc(self):\n\n with patch('Tests_homework.secretary_prog.documents', deepcopy(documents)), \\\n patch('Tests_homework.secretary_prog.directories', deepcopy(directories)):\n\n # Тест попытки добавления с ошибочной полкой - провал попытки\n with patch('Tests_homework.secretary_prog.input', side_effect=[self.new_doc_number,\n self.new_doc_type,\n self.new_doc_name,\n self.new_doc_wrong_shelf\n ]):\n\n before = len(secretary_prog.documents)\n\n with captured_output() as out:\n self.assertEqual(secretary_prog.add_new_doc(), None)\n\n after = len(secretary_prog.documents)\n\n message = out.getvalue()\n self.assertEqual(message, f'{secretary_prog.warning_shelf} ' \n f'\\nСейчас есть полки с номерами {self.normal_shelf_list}\\n')\n self.assertEqual(after, before)\n\n # Тест удачной попытки добавления\n with patch('Tests_homework.secretary_prog.input', side_effect=[self.new_doc_number,\n self.new_doc_type,\n self.new_doc_name,\n self.normal_shelf\n ]):\n before = len(secretary_prog.documents)\n with captured_output() as out:\n self.assertEqual(secretary_prog.add_new_doc(), None)\n\n after = len(secretary_prog.documents)\n\n message = out.getvalue()\n self.assertEqual(message, f'\\nДокумент с номером {self.new_doc_number}' \n f' добавлен на полку {self.normal_shelf} \\n\\n')\n self.assertGreater(after, before)\n\n def test_del_doc(self):\n\n with patch('Tests_homework.secretary_prog.documents', deepcopy(documents)), \\\n patch('Tests_homework.secretary_prog.directories', deepcopy(directories)):\n\n # Тестируем попытку успешного удаления\n with patch('Tests_homework.secretary_prog.input', side_effect=[self.normal_doc_n]):\n\n before = len(secretary_prog.documents)\n\n with captured_output() as out:\n self.assertEqual(secretary_prog.del_doc(), None)\n\n after = len(secretary_prog.documents)\n\n message = out.getvalue()\n self.assertEqual(message, f'Документ c номером {self.normal_doc_n} удалён из базы. \\n\\n')\n self.assertGreater(before, after)\n\n # Тестируем неудачную попытку удаления\n with patch('Tests_homework.secretary_prog.input', side_effect=[self.new_doc_number]):\n before = len(secretary_prog.documents)\n\n with captured_output() as out:\n self.assertEqual(secretary_prog.del_doc(), None)\n\n after = len(secretary_prog.documents)\n\n message = out.getvalue()\n self.assertEqual(message, f'{secretary_prog.warning_doc_num}\\n')\n self.assertEqual(before, after)\n\n def test_doc_move(self):\n\n with patch('Tests_homework.secretary_prog.documents', deepcopy(documents)), \\\n patch('Tests_homework.secretary_prog.directories', deepcopy(directories)):\n\n with patch('Tests_homework.secretary_prog.input', side_effect = [self.normal_doc_n, self.shelf_to_moveon]):\n\n shelf_len_before = len(secretary_prog.directories[self.shelf_to_moveon])\n\n with captured_output() as out:\n self.assertEqual(secretary_prog.doc_move(), None)\n\n shelf_len_after = len(secretary_prog.directories[self.shelf_to_moveon])\n\n message = out.getvalue()\n self.assertEqual(message, f'документ \"{self.normal_doc_n}\" перемещён на полку '\n f'{self.shelf_to_moveon} \\n\\n')\n self.assertGreater(shelf_len_after, shelf_len_before)\n\n def test_make_new_shelf(self):\n\n with patch('Tests_homework.secretary_prog.documents', deepcopy(documents)), \\\n patch('Tests_homework.secretary_prog.directories', deepcopy(directories)):\n\n with patch('Tests_homework.secretary_prog.input', side_effect = [self.new_shelf]):\n\n shelfs_before = len(secretary_prog.directories)\n\n with captured_output() as out:\n self.assertEqual(secretary_prog.make_new_shelf(), None)\n\n shelfs_after = len(secretary_prog.directories)\n\n message = out.getvalue()\n self.assertGreater(shelfs_after, shelfs_before)\n self.assertEqual(message, f'Новая полка с номером {self.new_shelf} создана \\n\\n')\n\n def test_all_doc_owners(self):\n\n with patch('Tests_homework.secretary_prog.documents', deepcopy(documents)), \\\n patch('Tests_homework.secretary_prog.directories', deepcopy(directories)):\n\n with captured_output() as out:\n self.assertEqual(secretary_prog.all_doc_owners(), None)\n\n message = out.getvalue()\n self.assertEqual(message, 'В базе данных о документах есть информация о следующих владельцах:\\n\\n'\n 'Василий Гупкин\\n'\n 'Геннадий Покемонов\\n'\n 'Аристарх Павлов\\n \\n'\n 'Для документа 332 не указан владелец\\n'\n 'Для документа 1456 4568 не указан владелец\\n \\n')\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"AlexVoruok/NetologyHomework","sub_path":"Tests_homework/test_secretary.py","file_name":"test_secretary.py","file_ext":"py","file_size_in_byte":10386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"20516711276","text":"#!/usr/bin/python3\n\nrucksacks = []\n\nf = open(\"data.dat\", \"r\")\n\ndef Priority(letter):\n prio = ord(letter) - 96\n if prio < 0:\n prio += 58\n return prio\n\ntotal_prio = 0\n\nfor line in f.readlines():\n line = line[:-1]\n inv_size = len(line)\n half = int(inv_size/2)\n rucksack = line\n side_a = line[:half]\n side_b = line[half:]\n in_common = []\n for letter in side_a:\n if letter in side_b:\n in_common.append(letter)\n total_prio += Priority(letter)\n break\n print(inv_size, half, side_a, side_b, in_common, rucksack)\n\nprint(total_prio)\n","repo_name":"aehogan/adventofcode_2022","sub_path":"3/script_1.py","file_name":"script_1.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"30"}
+{"seq_id":"31996891528","text":"import argparse\nimport sys\nfrom typing import List\n\nimport pytest\n\nfrom e3sm_diags.parameter.core_parameter import CoreParameter\nfrom e3sm_diags.parser.area_mean_time_series_parser import AreaMeanTimeSeriesParser\nfrom e3sm_diags.parser.arm_diags_parser import ARMDiagsParser\nfrom e3sm_diags.parser.core_parser import CoreParser\nfrom e3sm_diags.parser.diurnal_cycle_parser import DiurnalCycleParser\nfrom e3sm_diags.parser.enso_diags_parser import EnsoDiagsParser\nfrom e3sm_diags.parser.meridional_mean_2d_parser import MeridionalMean2dParser\nfrom e3sm_diags.parser.qbo_parser import QboParser\nfrom e3sm_diags.parser.streamflow_parser import StreamflowParser\nfrom e3sm_diags.parser.tc_analysis_parser import TCAnalysisParser\nfrom e3sm_diags.parser.zonal_mean_2d_parser import ZonalMean2dParser\nfrom e3sm_diags.parser.zonal_mean_2d_stratosphere_parser import (\n ZonalMean2dStratosphereParser,\n)\n\n\nclass TestCoreParser:\n @pytest.fixture(autouse=True)\n def setup(self):\n # The base arguments added to `CoreParser.parser` through\n # `CoreParser.add_arguments()`.\n self.base_args = [\n \"parameters\",\n \"other_parameters\",\n \"num_workers\",\n \"scheduler_addr\",\n \"granulate\",\n \"selectors\",\n \"set_name\",\n \"reference_data_set\",\n \"reference_data_path\",\n \"ref_timeseries_input\",\n \"ref_start_yr\",\n \"ref_end_yr\",\n \"ref_start_time_slice\",\n \"ref_end_time_slice\",\n \"ref_name\",\n \"ref_file\",\n \"test_data_set\",\n \"test_data_path\",\n \"test_timeseries_input\",\n \"test_start_yr\",\n \"test_end_yr\",\n \"test_start_time_slice\",\n \"test_end_time_slice\",\n \"test_file\",\n \"results_dir\",\n \"sets\",\n \"dataset\",\n \"run_type\",\n \"variables\",\n \"plevs\",\n \"plot_plevs\",\n \"plot_log_plevs\",\n \"seasons\",\n \"regions\",\n \"regrid_tool\",\n \"regrid_method\",\n \"case_id\",\n \"output_format\",\n \"output_format_subplot\",\n \"canvas_size_w\",\n \"canvas_size_h\",\n \"figsize\",\n \"dpi\",\n \"arrows\",\n \"logo\",\n \"contour_levels\",\n \"diff_levels\",\n \"reference_name\",\n \"test_name\",\n \"short_test_name\",\n \"diff_name\",\n \"main_title\",\n \"reference_title\",\n \"test_title\",\n \"diff_title\",\n \"reference_colormap\",\n \"test_colormap\",\n \"diff_colormap\",\n \"reference_units\",\n \"test_units\",\n \"diff_units\",\n \"backend\",\n \"multiprocessing\",\n \"save_netcdf\",\n \"no_viewer\",\n \"debug\",\n ]\n\n def test__init__(self):\n CoreParser()\n\n def test_check_values_of_params_does_not_raise_error(self):\n param = CoreParameter()\n param.reference_data_path = \"path\"\n param.test_data_path = \"path\"\n param.results_dir = \"path\"\n\n # Should not raise RunTimeError\n CoreParser.check_values_of_params([param])\n\n def test_add_arguments_adds_base_args(self):\n parser = CoreParser()\n assert _is_args_added(parser, self.base_args)\n\n def test_parse_args_sets_cmd_used_and_returns_list_of_args(self):\n parser = CoreParser()\n\n result = parser.parse_args()\n result_args = list(vars(result).keys())\n\n assert result_args == self.base_args\n assert parser.cmd_used == sys.argv\n\n def test_view_args_returns_parser_namespace(self):\n parser = CoreParser()\n\n result = parser.view_args()\n assert isinstance(result, argparse.Namespace)\n\n result_args = list(vars(result).keys())\n assert result_args == self.base_args\n\n @pytest.mark.xfail\n def test_get_parameters_returns_cmdline_parameters(self):\n assert 0\n\n @pytest.mark.xfail\n def test_get_parameters_returns_orig_parameters(self):\n assert 0\n\n @pytest.mark.xfail\n def test_get_parameters_returns_other_parameters(self):\n assert 0\n\n @pytest.mark.xfail\n def test_get_parameters_returns_default_vars(self):\n assert 0\n\n @pytest.mark.xfail\n def test_get_parameters_returns_cmd_default_vars(self):\n assert 0\n\n @pytest.mark.xfail\n def test_get_parameters_returns_parameter_from_defaults_of_the_command_line_arg(\n self,\n ):\n assert 0\n\n @pytest.mark.xfail\n def test_get_parameters_returns_parameter_from_defaults_of_the_parameter_class(\n self,\n ):\n assert 0\n\n @pytest.mark.xfail\n def test_get_parameters_returns_cartesian_product_of_granulate_attr(self):\n assert 0\n\n @pytest.mark.xfail\n def test_get_cfg_parameters_returns_parameters_created_by_running_from_CLI(\n self,\n ):\n # FIXME: Should we deprecate this method for running `e3sm_diags`?\n # https://e3sm-project.github.io/e3sm_diags/_build/html/main/config-run.html#e3sm-diags-p-older-method\n assert 0\n\n @pytest.mark.xfail\n def test_get_cfg_parameters_returns_parameters_created_by_cfg_file(self):\n assert 0\n\n @pytest.mark.xfail\n def test_get_cfg_parameters_checks_values_in_cfg_file_and_returns_parameter(\n self,\n ):\n assert 0\n\n @pytest.mark.xfail\n def test_get_cfg_parameters_only_uses_argparse_values_and_returns_parameters(\n self,\n ):\n assert 0\n\n @pytest.mark.xfail\n def test_select_returns_cmdline_parameters_that_are_subset_of_the_main_parameters(\n self,\n ):\n assert 0\n\n @pytest.mark.xfail\n def test_select_returns_orig_parameters_that_are_subset_of_the_main_parameters(\n self,\n ):\n assert 0\n\n @pytest.mark.xfail\n def test_select_returns_other_parameters_that_are_subset_of_the_main_parameters(\n self,\n ):\n assert 0\n\n\ndef test_area_mean_time_series_parser_initializes():\n parser = AreaMeanTimeSeriesParser()\n custom_args = [\n \"ref_names\",\n \"ref_timeseries_input\",\n \"test_timeseries_input\",\n \"start_yr\",\n \"end_yr\",\n ]\n\n assert _is_args_added(parser, custom_args)\n\n\ndef test_arms_diags_parser_initializes():\n parser = ARMDiagsParser()\n custom_args = [\"ref_names\"]\n\n assert _is_args_added(parser, custom_args)\n\n\ndef test_diurnal_cycle_parser_initializes():\n parser = DiurnalCycleParser()\n custom_args = [\n \"ref_timeseries_input\",\n \"test_timeseries_input\",\n \"start_yr\",\n \"end_yr\",\n ]\n\n assert _is_args_added(parser, custom_args)\n\n\ndef test_enso_diags_parser_initializes():\n parser = EnsoDiagsParser()\n custom_args = [\n \"ref_names\",\n \"ref_timeseries_input\",\n \"test_timeseries_input\",\n \"start_yr\",\n \"end_yr\",\n ]\n\n assert _is_args_added(parser, custom_args)\n\n\ndef test_meridional_mean_2d_parser_initializes():\n parser = MeridionalMean2dParser()\n custom_args = [\n \"plevs\",\n \"plot_plevs\",\n \"plot_log_plevs\",\n ]\n\n assert _is_args_added(parser, custom_args)\n\n\ndef test_qbo_parser_initializes():\n parser = QboParser()\n custom_args = [\n \"ref_timeseries_input\",\n \"test_timeseries_input\",\n \"start_yr\",\n \"end_yr\",\n ]\n\n assert _is_args_added(parser, custom_args)\n\n\ndef test_streamflow_parser_initializes():\n parser = StreamflowParser()\n custom_args = [\n \"gauges_path\",\n \"max_num_gauges\",\n \"print_statements\",\n \"ref_timeseries_input\",\n \"test_timeseries_input\",\n \"start_yr\",\n \"end_yr\",\n ]\n\n assert _is_args_added(parser, custom_args)\n\n\ndef test_tc_analysis_parser_initializes():\n TCAnalysisParser()\n\n\ndef test_zonal_mean_2d_parser_initializes():\n parser = ZonalMean2dParser()\n custom_args = [\"plevs\", \"plot_plevs\", \"plot_log_plevs\"]\n\n assert _is_args_added(parser, custom_args)\n\n\ndef test_zonal_mean_2d_stratosphere_parser_initializes():\n parser = ZonalMean2dStratosphereParser()\n custom_args = [\"plevs\", \"plot_plevs\", \"plot_log_plevs\"]\n\n assert _is_args_added(parser, custom_args)\n\n\ndef _is_args_added(parser: CoreParser, custom_args: List[str]) -> bool:\n \"\"\"Checks the parser's custom args are added to argparse.ArgumentParser.\n\n Parameters\n ----------\n parser : CoreParser\n The CoreParser-based object.\n custom_args : List[str]\n The list of custom arguments for this parser.\n\n Returns\n -------\n bool\n True if all custom arguments are added, else False.\n \"\"\"\n namespace, _ = parser.parser.parse_known_args()\n namespace_args = vars(namespace).keys()\n\n for arg in custom_args:\n if arg not in namespace_args:\n return False\n\n return True\n","repo_name":"E3SM-Project/e3sm_diags","sub_path":"tests/e3sm_diags/test_parsers.py","file_name":"test_parsers.py","file_ext":"py","file_size_in_byte":8944,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"30"}
+{"seq_id":"43613672668","text":"#!/usr/bin/env python\n\n# Name: Luke Heary\n# Date: 2/22/19\n\nimport socket\nimport optparse\nimport sys\n\ndef main():\n\n parser = optparse.OptionParser()\n options, args = parser.parse_args()\n\n ips = []\n sockets = []\n firstSock = []\n switch = True\n\n infoPath = args[0]\n for x in args[1:]:\n ips.append(x)\n stuff = x.split(\":\")\n address = stuff[0]\n port = stuff[1]\n\n if switch:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((address, int(port)))\n firstSock.append(sock)\n switch = False\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sockets.append(sock)\n sock.connect((address, int(port)))\n\n sendUserData(infoPath, firstSock)\n sendSampleData(sockets)\n\ndef sendUserData(infoPath, sockets):\n file = open(infoPath, \"r\")\n\n records = []\n counter = 0\n record = []\n for line in file:\n line = line.strip(\"\\n\")\n if line != '':\n if counter < 3:\n record.append(line)\n counter += 1\n else:\n record.append(line)\n recordStr = ':'.join(str(e) for e in record) + \"][\"\n sockets[0].send(recordStr)\n record = []\n counter = 0\n\n return records\n\ndef sendSampleData(sockets):\n #file = open(\"samples.dat\", \"r\")\n for line in sys.stdin:\n line = line.strip(\"\\n\")\n lineArray = line.split(\", \")\n destIndex = int(lineArray[0]) - 1 # gets the first index and subtracts the value by 1 for interacting\n line = ':'.join(str(e) for e in lineArray) + \"][\"\n destSocket = sockets[destIndex]\n destSocket.send(line)\n\nmain()","repo_name":"lukeheary/DCN_Project2","sub_path":"HealthMon.py","file_name":"HealthMon.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"21431779499","text":"#https://github.com/ijgnd/anki__reviewer_deck_and_card_info_sidebar/blob/master/src/deck_and_card_info_during_review/helper_functions.py\n\nimport time\n\nfrom aqt import mw\n\n\ndef due_day(card):\n if card.queue <= 0:\n return \"\"\n else:\n if card.queue in (2,3):\n if card.odue:\n myvalue = card.odue\n else:\n myvalue = card.due\n mydue = time.time()+((myvalue - mw.col.sched.today)*86400)\n else:\n if card.odue:\n mydue = card.odue\n else:\n mydue = card.due\n try:\n out = time.strftime(\"%Y-%m-%d\", time.localtime(mydue)) \n except:\n out = \"\"\n return out\n\n\ndef is_early_review_then_return_percentage_interval(card):\n due = card.odue if card.odid else card.due\n if not due > mw.col.sched.today:\n return False\n else:\n if card.queue == 1: #learn\n return False\n elif card.queue == 0 and card.type == 0: #new\n return False\n else:\n try:\n lastRev = due - card.ivl\n elapsed = mw.col.sched.today - lastRev\n p = elapsed/float(card.ivl) * 100\n pf = \"{0:.2f}\".format(p) + \" %\"\n return pf\n except ZeroDivisionError:\n return False\n","repo_name":"kelvinojedaepn/AnkiAddons21","sub_path":"addons21/2140680811/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"74509323605","text":"# -*- coding: utf-8 -*-\nn = int(input())\nsum = 0\ncnt = 0\nfor i in range(1,n):\n sum = i*(i+1)/2\n if sum > n: break\n if sum <= n:\n cnt = i\n \n\nprint(cnt)\n \n\n\n'''\n1789. 수들의 합\nhttps://www.acmicpc.net/problem/1789\n\n문제\n서로 다른 N개의 자연수의 합이 S라고 한다. S를 알 때, 자연수 N의 최댓값은 얼마일까?\n\n입력\n첫째 줄에 자연수 S(1 ≤ S ≤ 4,294,967,295)가 주어진다.\n\n출력\n첫째 줄에 자연수 N의 최댓값을 출력한다.\n1~n까지의 합\n: n(n+1)/2\n'''","repo_name":"Haneul99/Baekjoon","sub_path":"1001-2000/1701-1800/1789.py","file_name":"1789.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"6742733478","text":"'''Funções podem e ou precisam possuir certas variáveis, essas variáveis normalmente são variáveis \"locais\" isto é\numa variável local só pode ser visivel em um certo bloco: Ex:'''\ndef linha():\n print(\"-\"*30)\ndef funcao_ex(b):\n b += 4 # Dentro dessa função temos algumas variáveis, essas são variáveis locais\n c = 2 # So existem dentro dessa função(bloco)\n print(f\"A dentro vale {a}.\")\n linha()\n print(f\"B dentro vale {b}.\")\n linha()\n print(f\"C dentro vale {c}.\")\n\n\na = 5 # esta veriável ela tem o escopo global, pois está fora de qualquer bloco que possa existir no programa\nfuncao_ex(a)\nlinha()\nprint(f\"A fora vale {a}.\")\n\n'''Quando estamos criando uma função as vezes precisamos que certos valores recebam alguma resposta, para isso usamos o comando\n\"return\", esse comando serve para que um função possa alocar o seu resultado em alguma variável ou até mesmo na propria função EX:'''\nlinha()\ndef soma(a = 0, b = 0, c = 0):\n s = a + b + c\n #print(s) # nesse exemplo quando chamamos a função ela apenas restornará um print, então caso precisemos fazer o uso varias vezes \n # essa função pode ser que seja um incomodo coloca-las em um único print.\n # Por isso ao invés de usar esse formato podemos usar o \"return\" para que a resposta dessa função seja colocada em uma variável EX:\n return(s)# Assim podemos clocar varios resultados dessa função em um único print\n\nr1 = soma(5, 7, 9)\nr2 = soma(7, 3)\nr3 = soma(8)\n\nprint(f\"O resultado é {r1}, {r2}, {r3}.\")\n","repo_name":"zefelipe19/ExerciciosPython","sub_path":"Mundo 3/Aula 20 - Funções/2_Funções no Python Pt2 (101 - 106).py","file_name":"2_Funções no Python Pt2 (101 - 106).py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"12973209097","text":"import sys\nimport time\nfrom epics import Motor, PV\n\nif __name__ == \"__main__\":\n if not len(sys.argv) == 3:\n print(\">>>>Error!! Call the script as follows: 'python timed_scan.py \")\n exit(1) \n\n chFNAME=PV(\"X02DA-SCAN-CAM1:FILPRE\")\n ch_scan_go = PV(\"X02DA-SCAN-SCN1:GO\")\n \n \"\"\"\n (1) Set varsX\n \"\"\"\n timing = int( sys.argv[1] ) ## in seconds\n n_scans = int( sys.argv[2] ) ## number \n filename = chFNAME.get(as_string=True)\n\n \"\"\"\n (2) Set name\n \"\"\"\n for kk in range(1,n_scans+1):\n # PREPARE new name\n filename_new = filename + '_T' + str(kk).zfill(3) + '_'\n print(filename_new)\n \n # SET NEW FILENAME\n chFNAME.put(filename_new, wait=True)\n\n # LAUNCH NEW SCAN\n ch_scan_go.put(1, wait=True)\n\n # WAIT\n time.sleep(timing)\n time.sleep(timing)\n","repo_name":"gnudo/python-scripts","sub_path":"timed_scan.py","file_name":"timed_scan.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"5823294163","text":"class Listing:\n def __init__(self, id: str = '', title: str = '', url: str = '', date: str = '', pics: list[str] = [], price: str = '', odometer: str = ''):\n self.id = id\n self.title = title\n self.url = url\n self.date = date\n self.pics = pics\n self.price = price\n self.odometer = odometer\n\n def to_dict(self):\n return {\n 'id': self.trim,\n 'title': self.title,\n 'url': self.url,\n 'date': self.date,\n 'pics': self.pics,\n 'price': self.price,\n 'odometer': self.odometer\n }\n\n @classmethod\n def from_dict(cls, data: dict):\n return cls(\n id = data.get('id'),\n title = data.get('title'),\n url = data.get('url'),\n date = data.get('date'),\n pics = data.get('pics'),\n price = data.get('price'),\n odomoeter = data.get('odometer')\n )","repo_name":"GCnomore/craig_scrap","sub_path":"data/model/listing_model.py","file_name":"listing_model.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"24180069272","text":"import pymongo \r\nfrom bson.son import SON\r\nfrom flask import Flask, render_template\r\nimport pandas as pd\r\nimport os\r\n\r\napp = Flask(__name__)\r\n@app.route(\"/\")\r\ndef home():\r\n # mongoDB hosted on an AWS EC2 instance (public IP: 54.173.225.16)\r\n myclient = pymongo.MongoClient(\"mongodb://54.173.225.16:27017/\")\r\n\r\n mydb = myclient[\"mydatabase\"] #create database\r\n \r\n #display data headers\r\n print(myclient.list_database_names())\r\n print(mydb.list_collection_names())\r\n\r\n # weeks = [\"week1\", \"week2\", \"week3\", \"week4\", \"week5\"]\r\n\r\n def aggregate_function(week, mydb=mydb):\r\n collection = mydb[week]\r\n print(\"Week: \", week)\r\n\r\n # the aggregate pipeline here counts song frequencies,\r\n # sorts based on those frequencies,\r\n # and eliminates null values \r\n # note: the bson package is by default installed with pymongo\r\n aggregated = collection.aggregate([{\"$unwind\": \"$_id\"},\r\n {\"$match\": {\"artist\": {\"$ne\": None}}},\r\n {\"$group\": {\"_id\": [\"$song\", \"$artist\"], \"num_played\": {\"$sum\":1}}},\r\n {\"$sort\": SON([(\"num_played\", -1), (\"_id\", -1)])},\r\n { \"$limit\": 10 }])\r\n \r\n # separate the items to display: song and artist\r\n aggregated_list = list(aggregated)\r\n top_ten = {}\r\n # pprint.pprint(aggregated_list)\r\n for idx, item in enumerate(aggregated_list):\r\n # print(item['_id'][0])\r\n top_ten[idx] = {\"Song\": item['_id'][0], \"Artist\": item['_id'][1]}\r\n\r\n return top_ten\r\n \r\n # a template folder is necessary for flask.render_template()\r\n path = \"templates\"\r\n # Check whether the templates path exists in the directory\r\n isExist = os.path.exists(path)\r\n\r\n if not isExist:\r\n os.makedirs(path)\r\n\r\n # for week in weeks:\r\n # aggregated_dict[week] = aggregate_function(week)\r\n\r\n # html tables\r\n df1 = pd.DataFrame.from_dict(aggregate_function('week1'))\r\n df2 = pd.DataFrame.from_dict(aggregate_function('week2'))\r\n df3 = pd.DataFrame.from_dict(aggregate_function('week3'))\r\n df4 = pd.DataFrame.from_dict(aggregate_function('week4'))\r\n df5 = pd.DataFrame.from_dict(aggregate_function('week5'))\r\n\r\n os.chdir(\"./templates/\")\r\n\r\n with open(\"top.html\", \"w\", encoding=\"utf-8\") as file:\r\n # create HTML table to display query results\r\n file.write(\"\\n\\n Top 10 Songs and Artists Each Week
\" + \\\r\n \"\\n\\n\\n Week 1\" + \"\\n\\n\\n\" + \\\r\n df1.to_html() + \"\\n\\n\\n\" + \\\r\n \"Week 2\" + \"\\n\\n\\n\" + \\\r\n df2.to_html() + \"\\n\\n\\n\" + \\\r\n \"Week 3\" + \"\\n\\n\\n\" + \\\r\n df3.to_html() + \"\\n\\n\\n\" + \\\r\n \"Week 4\" + \"\\n\\n\\n\" + \\\r\n df4.to_html() + \"\\n\\n\\n\" + \\\r\n \"Week 5\" + \"\\n\\n\\n\" + \\\r\n df5.to_html() +\r\n \"\")\r\n \r\n\r\n # pprint.pprint(aggregated_dict)\r\n\r\n return render_template(\"top.html\")\r\n # return 'Hello, Flask'\r\n\r\nif __name__ == '__main__':\r\n app.run()\r\n\r\n\r\n\r\n\r\n","repo_name":"rara-by/Sparkify1-AWS-EC2-Flask","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3195,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"}
+{"seq_id":"34972432664","text":"import tweepy\nfrom tweepy import OAuthHandler\n\n\ndef get_auth_token(_filepath):\n # Takes a text file containing four lines of twitter app credential keys and returns\n # a tweepy token to be used in future functions\n\n key_file_path = _filepath # Standard filepath 'learn/twitter/twitter_keys'\n keys = []\n with open(key_file_path) as file:\n keys = file.read().splitlines()\n\n consumer_key = keys[0]\n consumer_secret = keys[1]\n access_token = keys[2]\n access_secret = keys[3]\n\n # Setup OAuthentication and access tokens\n auth = OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_secret)\n api = tweepy.API(auth)\n\n return api\n","repo_name":"chrolss/twitter_mine","sub_path":"src/data/tweepy_auth.py","file_name":"tweepy_auth.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"18489925655","text":"from battle.static import ENGAGEMENT, HOUGEKI_CUTIN\nfrom data.static import EQUIP_TYPE_2\nfrom calc.base_logic import BattleLogicBase\nfrom calc.hougeki.static import HOUGEKI_CUTIN_MODIFIER, HOUGEKI_SPATTACK_MODIFIER\n\ndef calculate_special_attack_modifier(base: BattleLogicBase, cutin: int):\n if cutin in HOUGEKI_CUTIN_MODIFIER:\n return HOUGEKI_CUTIN_MODIFIER[cutin]\n \n if cutin in SPATTACK_MAP:\n return SPATTACK_MAP[cutin](base)\n\n else:\n return 1\n\ndef nelson_touch(base: BattleLogicBase):\n if base.engagement == ENGAGEMENT.RED_T:\n return HOUGEKI_SPATTACK_MODIFIER.NELSON_TOUCH_RED_T\n return HOUGEKI_SPATTACK_MODIFIER.NELSON_TOUCH\n\ndef nagato_broadside(base: BattleLogicBase):\n is_main_attacker = base.attacker_id == base.attacker_fleet[0]\n partner_ship_id = base.attacker_fleet[1]\n\n if is_main_attacker:\n cutin_modifier = HOUGEKI_SPATTACK_MODIFIER.NAGATO_BROADSIDE_BASE\n else:\n cutin_modifier = HOUGEKI_SPATTACK_MODIFIER.NAGATO_BROADSIDE_PARTNER_BASE\n\n # Mutsu K2 partner bonus\n if partner_ship_id == 573:\n cutin_modifier *= 1.2 if is_main_attacker else 1.4\n\n # Mutsu Kai partner bonus\n elif partner_ship_id == 276 or partner_ship_id == 81:\n cutin_modifier *= 1.15 if is_main_attacker else 1.35\n\n # Nelson Kai partner bonus\n elif partner_ship_id == 576:\n cutin_modifier *= 1.1 if is_main_attacker else 1.25\n\n if base.has_equip_type2(EQUIP_TYPE_2.AP_SHELL):\n cutin_modifier *= 1.35\n\n if base.has_surface_radar():\n cutin_modifier *= 1.15\n\n return cutin_modifier\n\ndef mutsu_broadside(base: BattleLogicBase):\n is_main_attacker = base.attacker_id == base.attacker_fleet[0]\n partner_ship_id = base.attacker_fleet[1]\n\n if is_main_attacker:\n cutin_modifier = HOUGEKI_SPATTACK_MODIFIER.NAGATO_BROADSIDE_BASE\n else:\n cutin_modifier = HOUGEKI_SPATTACK_MODIFIER.NAGATO_BROADSIDE_PARTNER_BASE\n\n # Mutsu K2 partner bonus\n if partner_ship_id == 541:\n cutin_modifier *= 1.2 if is_main_attacker else 1.4\n\n # Mutsu Kai partner bonus\n elif partner_ship_id == 275 or partner_ship_id == 80:\n cutin_modifier *= 1.15 if is_main_attacker else 1.35\n\n if base.has_equip_type2(EQUIP_TYPE_2.AP_SHELL):\n cutin_modifier *= 1.35\n\n if base.has_surface_radar():\n cutin_modifier *= 1.15\n\n return cutin_modifier\n\ndef colorado_special(base: BattleLogicBase):\n is_main_attacker = base.attacker_id == base.attacker_fleet[0]\n is_second_attacker = base.attacker_id == base.attacker_fleet[1]\n\n if is_main_attacker:\n cutin_modifier = HOUGEKI_SPATTACK_MODIFIER.COLORADO_SPECIAL_BASE\n else:\n cutin_modifier = HOUGEKI_SPATTACK_MODIFIER.COLORADO_SPECIAL_PARTNER_BASE\n\n # Second and third ships get bonus mod if they are a partner ship\n if base.attacker.id in {275, 541, 276, 573, 571, 576, 601, 1496, 913, 918}:\n cutin_modifier *= 1.15 if is_second_attacker else 1.17\n \n if base.has_equip_type2(EQUIP_TYPE_2.AP_SHELL):\n cutin_modifier *= 1.35\n\n if base.has_surface_radar():\n cutin_modifier *= 1.15\n\n # SG Radar LM bonus\n if base.has_equipment(456):\n cutin_modifier *= 1.15\n\n return cutin_modifier\n\n\ndef yamato_3ship(base: BattleLogicBase):\n second_ship_id = base.attacker_fleet[1]\n third_ship_id = base.attacker_fleet[2]\n is_third_attacker = base.attacker_id == third_ship_id\n is_second_attacker = base.attacker_id == second_ship_id\n\n if is_third_attacker:\n cutin_modifier = HOUGEKI_SPATTACK_MODIFIER.YAMATO_3SHIP_THIRD_PARTNER_BASE\n else:\n cutin_modifier = HOUGEKI_SPATTACK_MODIFIER.YAMATO_3SHIP_BASE\n\n # Third attacker does not gain partner and rangefinder bonuses\n\n if not is_third_attacker:\n\n if is_second_attacker:\n # Second Shot Yamato-class bonus\n if second_ship_id in {911, 916, 546} or third_ship_id in {911, 916, 546}:\n cutin_modifier *= 1.2\n\n # Second Shot Nagato-class Bonus\n elif second_ship_id in {541, 573} or third_ship_id in {541, 573}:\n cutin_modifier *= 1.1\n \n # Second Shot Ise-class Bonus\n elif second_ship_id in {553, 554} or third_ship_id in {553, 554}:\n cutin_modifier *= 1.05\n\n # Flagship bonus for Yamato, Nagato and Ise-classes\n elif second_ship_id in {911, 916, 546, 541, 573, 553, 554} or third_ship_id in {911, 916, 546, 541, 573, 553, 554}:\n cutin_modifier *= 1.1\n\n # Rangefinder bonus\n if base.has_equipment_in_array({142, 460}):\n cutin_modifier *= 1.1\n\n if base.has_equip_type2(EQUIP_TYPE_2.AP_SHELL):\n cutin_modifier *= 1.35\n\n if base.has_surface_radar():\n cutin_modifier *= 1.15\n\n return cutin_modifier\n\ndef yamato_2ship(base: BattleLogicBase):\n partner_ship_id = base.attacker_fleet[1]\n is_main_attacker = base.attacker_id == base.attacker_fleet[0]\n\n if is_main_attacker:\n cutin_modifier = HOUGEKI_SPATTACK_MODIFIER.YAMATO_2SHIP_BASE\n else:\n cutin_modifier = HOUGEKI_SPATTACK_MODIFIER.YAMATO_2SHIP_PARTNER_BASE\n\n # Yamato-class K2 bonus\n if partner_ship_id in [546, 911]:\n cutin_modifier *= 1.1 if is_main_attacker else 1.2\n\n elif partner_ship_id == 916:\n cutin_modifier *= 1.1 if is_main_attacker else 1.25\n\n if base.has_equip_type2(EQUIP_TYPE_2.AP_SHELL):\n cutin_modifier *= 1.35\n\n if base.has_surface_radar():\n cutin_modifier *= 1.15\n\n # Rangefinder bonus\n if base.has_equipment_in_array({142, 460}):\n cutin_modifier *= 1.1\n\n return cutin_modifier\n\nSPATTACK_MAP = {\n HOUGEKI_CUTIN.NELSON_TOUCH: nelson_touch,\n HOUGEKI_CUTIN.NAGATO_SPECIAL: nagato_broadside,\n HOUGEKI_CUTIN.MUTSU_SPECIAL: mutsu_broadside,\n HOUGEKI_CUTIN.COLORADO_SPECIAL: colorado_special,\n HOUGEKI_CUTIN.YAMATO_3SHIP_CUTIN: yamato_3ship,\n HOUGEKI_CUTIN.YAMATO_2SHIP_CUTIN: yamato_2ship\n}\n","repo_name":"sorewachigauyo/kc-tsundb-eventbattle","sub_path":"calc/hougeki/special_attack.py","file_name":"special_attack.py","file_ext":"py","file_size_in_byte":5993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"32842011930","text":"# http://acm.sgu.ru/problem.php?contest=0&problem=194\n# 194. Reactor Cooling\n# find max circulation ( no source no sink), with both upper and lower limit on edge \nfrom Dinic import Graph\n\nV,E=tuple(map(int,input().split()))\n# index V and V+1 reserved for virtual source and sink \nres_graph=[[0]*(V+2) for _ in range(V+2)]\nd=[0]*(V)\nedges=[]\nfor _ in range(E):\n u,v,floor,ceil=tuple(map(int,input().split()))\n # convert to 0 indexed \n edges.append((u-1,v-1,floor))\n res_graph[u-1][v-1]=ceil-floor \n d[v-1]-=floor\n d[u-1]+=floor \nfor i in range(V):\n if d[i]>0:#too much coming in, send extra to virtual sink\n res_graph[i][V+1]=d[i]\n elif d[i]<0:# too much going out, send extra into the vertex from virtual source\n res_graph[V][i]=-d[i]\ng=Graph(res_graph)\ng.Dinic(V,V+1)\n\n \nif g.max_flow< sum(val for val in d if val>0):\n print ('No')\nelse:\n print ('Yes')\n for i in range(len(edges)):\n u,v,floor=edges[i]\n print (g.flow[u][v]+floor)\n\n\n","repo_name":"luliu31415926/programming_contest_workbook","sub_path":"sgu_194_reactor_cooling.py","file_name":"sgu_194_reactor_cooling.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"37485581238","text":"import os\nfrom unittest import mock\n\nimport numpy as np\nimport pytest\nfrom npe2 import DynamicPlugin\nfrom npe2.manifest.contributions import SampleDataURI\nfrom qtpy.QtWidgets import QLabel, QRadioButton\n\nfrom napari._app_model import get_app\nfrom napari._qt.dialogs.qt_reader_dialog import (\n QtReaderDialog,\n open_with_dialog_choices,\n prepare_remaining_readers,\n)\nfrom napari.errors.reader_errors import ReaderPluginError\nfrom napari.settings import get_settings\n\n\n@pytest.fixture\ndef reader_dialog(qtbot):\n def _reader_dialog(**kwargs):\n widget = QtReaderDialog(**kwargs)\n widget.show()\n qtbot.addWidget(widget)\n\n return widget\n\n return _reader_dialog\n\n\ndef test_reader_dialog_buttons(reader_dialog):\n widg = reader_dialog(\n readers={'display name': 'plugin-name', 'display 2': 'plugin2'}\n )\n assert len(widg.findChildren(QRadioButton)) == 2\n\n\ndef test_reader_defaults(reader_dialog, tmpdir):\n file_pth = tmpdir.join('my_file.tif')\n widg = reader_dialog(pth=file_pth, readers={'p1': 'p1', 'p2': 'p2'})\n\n assert widg.findChild(QLabel).text().startswith('Choose reader')\n assert widg._get_plugin_choice() == 'p1'\n assert widg.persist_checkbox.isChecked()\n\n\ndef test_reader_with_error_message(reader_dialog):\n widg = reader_dialog(error_message='Test Error')\n assert widg.findChild(QLabel).text().startswith('Test Error')\n\n\ndef test_reader_dir_with_extension(tmpdir, reader_dialog):\n dir_name = tmpdir.mkdir('my_dir.zarr')\n widg = reader_dialog(pth=dir_name, readers={'p1': 'p1', 'p2': 'p2'})\n assert hasattr(widg, 'persist_checkbox')\n assert (\n widg.persist_checkbox.text()\n == \"Remember this choice for files with a .zarr extension\"\n )\n\n\ndef test_reader_dir(tmpdir, reader_dialog):\n dir_name = tmpdir.mkdir('my_dir')\n widg = reader_dialog(pth=dir_name, readers={'p1': 'p1', 'p2': 'p2'})\n assert (\n widg._persist_text\n == f'Remember this choice for folders labeled as {dir_name}{os.sep}.'\n )\n\n\ndef test_get_plugin_choice(tmpdir, reader_dialog):\n file_pth = tmpdir.join('my_file.tif')\n widg = reader_dialog(pth=file_pth, readers={'p1': 'p1', 'p2': 'p2'})\n reader_btns = widg.reader_btn_group.buttons()\n\n reader_btns[1].toggle()\n assert widg._get_plugin_choice() == 'p2'\n\n reader_btns[0].toggle()\n assert widg._get_plugin_choice() == 'p1'\n\n\ndef test_get_persist_choice(tmpdir, reader_dialog):\n file_pth = tmpdir.join('my_file.tif')\n widg = reader_dialog(pth=file_pth, readers={'p1': 'p1', 'p2': 'p2'})\n assert widg._get_persist_choice()\n\n widg.persist_checkbox.toggle()\n assert not widg._get_persist_choice()\n\n\ndef test_prepare_dialog_options_no_readers():\n with pytest.raises(ReaderPluginError) as e:\n prepare_remaining_readers(\n ['my-file.fake'], 'fake-reader', RuntimeError('Reading failed')\n )\n assert 'Tried to read my-file.fake with plugin fake-reader' in str(e.value)\n\n\ndef test_prepare_dialog_options_multiple_plugins(builtins):\n pth = 'my-file.tif'\n\n readers = prepare_remaining_readers(\n [pth],\n None,\n RuntimeError(f'Multiple plugins found capable of reading {pth}'),\n )\n assert builtins.name in readers\n\n\ndef test_prepare_dialog_options_removes_plugin(tmp_plugin: DynamicPlugin):\n tmp2 = tmp_plugin.spawn(register=True)\n\n @tmp_plugin.contribute.reader(filename_patterns=['*.fake'])\n def _(path):\n ...\n\n @tmp2.contribute.reader(filename_patterns=['*.fake'])\n def _(path):\n ...\n\n readers = prepare_remaining_readers(\n ['my-file.fake'],\n tmp_plugin.name,\n RuntimeError('Reader failed'),\n )\n assert tmp2.name in readers\n assert tmp_plugin.name not in readers\n\n\ndef test_open_sample_data_shows_all_readers(\n make_napari_viewer,\n tmp_plugin: DynamicPlugin,\n):\n \"\"\"Checks that sample data callback `_add_sample` shows all readers.\"\"\"\n # Test for bug fixed in #6058\n tmp2 = tmp_plugin.spawn(register=True)\n\n @tmp_plugin.contribute.reader(filename_patterns=['*.fake'])\n def _(path):\n ...\n\n @tmp2.contribute.reader(filename_patterns=['*.fake'])\n def _(path):\n ...\n\n my_sample = SampleDataURI(\n key='tmp-sample',\n display_name='Temp Sample',\n uri='some-path/some-file.fake',\n )\n tmp_plugin.manifest.contributions.sample_data = [my_sample]\n\n app = get_app()\n # required so setup steps run in init of `Viewer` and `Window`\n viewer = make_napari_viewer()\n # Ensure that `handle_gui_reading`` is not passed the sample plugin name\n with mock.patch(\n 'napari._qt.dialogs.qt_reader_dialog.handle_gui_reading'\n ) as mock_read:\n app.commands.execute_command('tmp_plugin:tmp-sample')\n\n mock_read.assert_called_once_with(\n ['some-path/some-file.fake'],\n viewer.window._qt_viewer,\n stack=False,\n )\n\n\ndef test_open_with_dialog_choices_persist(\n builtins, make_napari_viewer, tmp_path\n):\n pth = tmp_path / 'my-file.npy'\n np.save(pth, np.random.random((10, 10)))\n\n viewer = make_napari_viewer()\n open_with_dialog_choices(\n display_name=builtins.display_name,\n persist=True,\n extension='.npy',\n readers={builtins.name: builtins.display_name},\n paths=[str(pth)],\n stack=False,\n qt_viewer=viewer.window._qt_viewer,\n )\n assert len(viewer.layers) == 1\n # make sure extension was saved with *\n assert get_settings().plugins.extension2reader['*.npy'] == builtins.name\n\n\ndef test_open_with_dialog_choices_raises(make_napari_viewer):\n viewer = make_napari_viewer()\n\n get_settings().plugins.extension2reader = {}\n with pytest.raises(ValueError):\n open_with_dialog_choices(\n display_name='Fake Plugin',\n persist=True,\n extension='.fake',\n readers={'fake-plugin': 'Fake Plugin'},\n paths=['my-file.fake'],\n stack=False,\n qt_viewer=viewer.window._qt_viewer,\n )\n # settings weren't saved because reading failed\n assert not get_settings().plugins.extension2reader\n","repo_name":"napari/napari","sub_path":"napari/_qt/dialogs/_tests/test_reader_dialog.py","file_name":"test_reader_dialog.py","file_ext":"py","file_size_in_byte":6142,"program_lang":"python","lang":"en","doc_type":"code","stars":1800,"dataset":"github-code","pt":"39"}
+{"seq_id":"39187398981","text":"\n\n\nimport requests\nimport json\nimport io\nurl='https://siyas.atlassian.net//rest/api/2/issue'\nheader={\n \"Accept\": \"application/json\",\n \"Content-Type\": \"application/json\"\n}\nwith io.open(\"issue.csv\",\"r\",encoding=\"utf-8\")as f1:\n data=f1.read()\n f1.close()\n data=data.split(\"\\n\")\nfor rows in data:\n details=json.dumps({\n \"fields\": {\n \"project\":\n {\n \"key\": rows.split(\",\")[0]\n },\n \"summary\": rows.split(\",\")[1],\n \"description\": rows.split(\",\")[2],\n \"issuetype\": {\n \"name\": \"Task\"\n }, \n }\n})\n\n response=requests.post(url,headers=header,data=details,auth=(\"priyasingh21@navgurukul.org\",\"okE2cGJzACCpOBDXzy9y36C7\"))\n print(response.text)\n","repo_name":"priyasingh890/Jira-software","sub_path":"create_jira_issue_in_bulk.py","file_name":"create_jira_issue_in_bulk.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"86507803713","text":"\"\"\"\nCreated Sept 5 2022\n\n:author: Matt Runyon\n\nDescription\n-----------\n\nThis module contains a class that produces simulated data from a vehicle.\n\"\"\"\n\n# %% IMPORTS\n\nimport os\nimport random\nimport string\nimport time\n\nfrom http_client import HTTPClient # pylint: disable=C0411\nfrom location import Location\n\n# %% CONSTANTS\n\nDEFAULT_HTTP_PORT = 5000\nDEFAULT_MAKE = 'Ford'\nDEFAULT_VEHICLE_REPORT_DELAY = 3 # seconds\nSTREAM_METRIC_ID = \"id\"\nSTREAM_METRIC_MAKE = \"make\"\nSTREAM_METRIC_MODEL = \"model\"\nSTREAM_METRIC_POS_X = \"position_x\"\nSTREAM_METRIC_POS_Y = \"position_y\"\nSTREAM_METRIC_POS_Z = \"position_z\"\nSTREAM_METRIC_SPEED = \"speed\"\nSTREAM_METRIC_TIME = \"timestamp\"\nSTREAM_METRIC_VIN = \"vin\"\nMODEL_CHOICES = ['Maverick', 'Escape', 'F-150', 'Explorer', 'Mustang',\n 'Bronco', 'Edge', 'Expedition']\nVIN_LEN = 17\n\n# %% FUNCTIONS\n\ndef generate_vin():\n \"\"\"Generate a random VIN number.\n\n Returns:\n str: The VIN number.\n \"\"\"\n\n vin = ''\n for _ in range(VIN_LEN):\n vin += random.choice(string.ascii_uppercase + string.digits)\n return vin\n\n# %% CLASSES\n\n\nclass Vehicle:\n \"\"\"A vehicle that can stream its own performance metrics.\"\"\"\n\n # -------------------------------------------------------------------------\n def __init__(self):\n \"\"\"Constructor.\"\"\"\n\n self.vin = generate_vin()\n self.get_config()\n if self.model is None:\n self.model = random.choice(MODEL_CHOICES)\n self.driving = False\n self.http_client = HTTPClient(http_server=self.http_server,\n http_port=self.http_port,\n http_rule=self.http_rule)\n self.gps = Location(vx=self.velocity_x,\n vy=self.velocity_y,\n vz=self.velocity_z)\n if self.auto_start:\n self.start_trip()\n\n # -------------------------------------------------------------------------\n def get_config(self):\n \"\"\"Try to get configuration details through various, prioritized means.\n\n Priority 1: Check for environment variables.\n Priority 2: Check default config file.\n\n Returns:\n None.\n \"\"\"\n\n self.http_server = os.environ.get('PRODUCER_HTTP_SERVER')\n self.http_port = int(os.environ.get('PRODUCER_HTTP_PORT'))\n self.http_rule = os.environ.get('PRODUCER_HTTP_RULE')\n self.make = os.environ.get('VEHICLE_MAKE', DEFAULT_MAKE)\n self.model = os.environ.get('VEHICLE_MODEL')\n self.auto_start = os.environ.get('AUTO_START')\n self.report_delay = float(os.environ.get('VEHICLE_REPORT_DELAY',\n DEFAULT_VEHICLE_REPORT_DELAY))\n self.velocity_x = float(os.environ.get('VEHICLE_VELOCITY_X'))\n self.velocity_y = float(os.environ.get('VEHICLE_VELOCITY_Y'))\n self.velocity_z = float(os.environ.get('VEHICLE_VELOCITY_Z'))\n print(\"Dumping Vehicle config data:\\n\")\n print(f\"http_server: {self.http_server}\")\n print(f\"http_port: {self.http_port}\")\n print(f\"http_rule: {self.http_rule}\")\n print(f\"make: {self.make}\")\n print(f\"model: {self.model}\")\n print(f\"auto_start: {self.auto_start}\")\n print(f\"report_delay: {self.report_delay}\")\n print(f\"velocity_x: {self.velocity_x}\")\n print(f\"velocity_y: {self.velocity_y}\")\n print(f\"velocity_z: {self.velocity_z}\")\n\n # -------------------------------------------------------------------------\n def get_position(self):\n \"\"\"Get the position of the vehicle.\n\n Returns:\n tuple: The Cartesian position in Euclidean 3-Space.\n \"\"\"\n\n return (self.gps.x_of_t, self.gps.y_of_t, self.gps.z_of_t)\n\n # -------------------------------------------------------------------------\n def get_speed(self):\n \"\"\"Get the speed of the vehicle.\n\n Returns:\n float: the net speed of the vehicle in m/s.\n \"\"\"\n\n return self.gps.compute_speed()\n\n # -------------------------------------------------------------------------\n def start_trip(self):\n \"\"\"Start the trip.\n\n Returns:\n None.\n \"\"\"\n\n self.gps.start_trip()\n self.driving = True\n self.run()\n\n # -------------------------------------------------------------------------\n def stop_trip(self):\n \"\"\"Start the trip.\n\n Returns:\n None.\n \"\"\"\n\n self.gps.stop_trip()\n self.driving = False\n\n # -------------------------------------------------------------------------\n def report(self):\n \"\"\"Report diagnostics to server.\n\n Returns:\n requests.Response: The HTTP Response object.\n \"\"\"\n\n speed = self.get_speed()\n position = self.get_position()\n results = {STREAM_METRIC_TIME: time.time(),\n STREAM_METRIC_MAKE: self.make,\n STREAM_METRIC_MODEL: self.model,\n STREAM_METRIC_POS_X: position[0],\n STREAM_METRIC_POS_Y: position[1],\n STREAM_METRIC_POS_Z: position[2],\n STREAM_METRIC_SPEED: speed,\n STREAM_METRIC_VIN: self.vin}\n return self.http_client.send(data=results)\n\n # -------------------------------------------------------------------------\n def run(self):\n \"\"\"Main loop.\n\n Returns:\n None.\n \"\"\"\n\n while True:\n if self.driving:\n self.report()\n time.sleep(self.report_delay)\n else:\n break\n","repo_name":"M4tt-/kafka-python-postgres-etl","sub_path":"src/vehicle/vehicle.py","file_name":"vehicle.py","file_ext":"py","file_size_in_byte":5645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"44291854204","text":"from setuptools import setup, find_packages\n\n\ndef read_requirements(filename=\"requirements.txt\"):\n \"Read the requirements\"\n with open(filename) as f:\n return [line.strip() for line in f \\\n if line.strip() and \\\n line[0].strip() != '#' and \\\n not line.startswith('-e ')]\n\n\ndef get_version(filename='gnuplot_data/version.py', name='VERSION'):\n \"Get the version\"\n with open(filename) as f:\n s = f.read()\n d = {}\n exec(s, d)\n return d[name]\n\n\nsetup(\n name='python-gnuplot-data',\n version='0.3.0',\n author='Dave Gabrielson',\n author_email='Dave.Gabrielson@gmail.com',\n packages=find_packages(),\n zip_safe=False,\n install_requires=['setuptools',],\n)\n","repo_name":"dgabrielson/python-gnuplot-data","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"32393406012","text":"\"\"\"AxFiles field type functions - before, after / insert, update, delete\"\"\"\nimport os\nimport uuid\nimport shutil\nimport backend.misc as ax_misc\n\n\nasync def before_update(db_session, field, before_form, tobe_form, action,\n current_user):\n \"\"\" Set isTmp of file to False. So file_view route will work with /uploads,\n not /tmp forlder \"\"\"\n del db_session, before_form, tobe_form, action, current_user\n if field.value:\n for file in field.value:\n if file['isTmp']:\n file['isTmp'] = False\n return field.value\n\n\nasync def after_update(db_session, field, before_form, tobe_form, action,\n current_user):\n \"\"\"\n Moves uploaded files from /tmp// folder to\n /uploads/form_row_field_file////\n Returns:\n Object: Returns updated value of current field\"\"\"\n del before_form, action, current_user, db_session\n value_guids = []\n form_guid = str(tobe_form.guid)\n row_guid = str(uuid.UUID(str(tobe_form.row_guid)))\n field_guid = str(field.guid)\n\n if field.value and field.needs_sql_update:\n for file in field.value:\n value_guids.append(file['guid'])\n tmp_folder = os.path.join(ax_misc.tmp_root_dir, file['guid'])\n tmp_path = os.path.join(tmp_folder, file['name'])\n dist_folder = os.path.join(\n ax_misc.uploads_root_dir,\n 'form_row_field_file',\n form_guid,\n row_guid,\n field_guid,\n file['guid'])\n dist_path = os.path.join(dist_folder, file['name'])\n\n # if file exists in tmp - move it to row folder\n if os.path.lexists(tmp_path) is True:\n if os.path.exists(dist_folder) is False:\n os.makedirs(dist_folder)\n shutil.move(tmp_path, dist_path)\n shutil.rmtree(tmp_folder)\n\n # if form.row.field directory contains sub dirs with guid wich is not\n # in current value -> then file was deleted from field data,\n # We must delete this file from filesystem\n if field.needs_sql_update:\n field_folder = os.path.join(\n ax_misc.uploads_root_dir,\n 'form_row_field_file',\n form_guid,\n row_guid,\n field_guid\n )\n if os.path.exists(field_folder) is True:\n for root, dirs, _ in os.walk(field_folder):\n del root\n for dir_name in dirs:\n if dir_name not in value_guids:\n dir_to_delete = os.path.join(field_folder, dir_name)\n shutil.rmtree(dir_to_delete)\n\n return field.value\n\n\nasync def before_insert(db_session, field, before_form, tobe_form, action,\n current_user):\n \"\"\" Do the same as before_update \"\"\"\n return await before_update(\n db_session, field, before_form, tobe_form, action, current_user)\n\n\nasync def after_insert(db_session, field, before_form, tobe_form, action,\n current_user):\n \"\"\" Do the same as after_update \"\"\"\n return await after_update(\n db_session, field, before_form, tobe_form, action, current_user)\n\n\nasync def after_delete(db_session, field, before_form, tobe_form, action,\n current_user):\n \"\"\"\n Deletes all files uploaded for current row\n Returns:\n Object: Returns updated value of current field\"\"\"\n del before_form, action, current_user, db_session\n form_guid = str(tobe_form.guid)\n row_guid = str(uuid.UUID(str(tobe_form.row_guid)))\n row_folder = os.path.join(\n ax_misc.uploads_root_dir,\n 'form_row_field_file',\n form_guid,\n row_guid\n )\n\n if os.path.exists(row_folder) is True:\n shutil.rmtree(row_folder)\n\n return field.value\n","repo_name":"Shist/Zhukouski_Pavel_BSU_Projects","sub_path":"Calculation methods/CalcMethods_Lab_3_V15_Task_3_15/venv/Lib/site-packages/ax/backend/fields/AxFiles.py","file_name":"AxFiles.py","file_ext":"py","file_size_in_byte":3905,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"39"}
+{"seq_id":"69834408115","text":"import numpy as np\nimport math\n\n\ndef RotX(rad):\n\treturn np.array([\t[1,\t\t\t\t\t0,\t\t\t\t\t0\t\t\t\t], \n\t\t\t\t\t\t[0,\t\t\t\t\tmath.cos(rad),\t\t-math.sin(rad)\t],\n\t\t\t\t\t\t[0,\t\t\t\t\tmath.sin(rad),\t\tmath.cos(rad)\t]])\n\ndef RotY(rad):\n\treturn np.array([\t[math.cos(rad),\t\t0,\t\t\t\t\tmath.sin(rad)\t],\n\t\t\t\t\t\t[0,\t\t\t\t\t1,\t\t\t\t\t0\t\t\t\t],\n\t\t\t\t\t\t[-math.sin(rad),\t0,\t\t\t\t\tmath.cos(rad)\t]])\n\ndef RotZ(rad):\n\treturn np.array([\t[math.cos(rad),\t\t-math.sin(rad),\t\t0\t\t\t\t],\n\t\t\t\t\t\t[math.sin(rad),\t\tmath.cos(rad), 0 ],\n\t\t\t\t\t\t[0,\t\t\t\t\t0,\t\t\t\t\t1\t\t\t\t]])\n\nclass PerspectiveCamera:\n\n UNDISTORT_N_ITER = 3\n\n def __init__(self, image_size, intrinsic_parameter, distortion_coefficient, rvec, tvec): \n\n ## public\n # Image size = [width, height]\n self.image_size = image_size\n\n ## private\n # Intrinsic parameter\n self._fx\t= intrinsic_parameter[0]\n self._fy\t= intrinsic_parameter[1]\n self._cx\t= intrinsic_parameter[2]\n self._cy\t= intrinsic_parameter[3]\n self._skew\t= intrinsic_parameter[4]\n \n self._k1\t= distortion_coefficient[0]\n self._k2\t= distortion_coefficient[1]\n self._k3\t= distortion_coefficient[2]\n self._p1\t= distortion_coefficient[3]\n self._p2\t= distortion_coefficient[4]\n \n self._rvec = rvec\n self._tvec = np.reshape(tvec, (3,1))\n \n\n self.InitCameraMatrix()\n \n def InitCameraMatrix(self):\n \n ############################\n ##### Intrinsic matrix #####\n ############################\n self._K = np.array([[self._fx, self._skew, self._cx],\n [0,\t\t\tself._fy,\tself._cy],\n [0,\t\t\t0,\t\t\t1\t\t]])\n \n ############################\n ##### Extrinsic matrix #####\n ############################\n \n Rz = RotZ(math.radians(-90.0))\n Ry = RotY(math.radians(-90.0))\n Rx = RotX(math.radians(180.0))\n \n RzRy = np.matmul(Rz, Ry)\n R_tf = np.matmul(RzRy, Rx)\n \n \n Rz = RotZ(math.radians(self._rvec[2])) # yaw\n Ry = RotY(math.radians(self._rvec[1])) # pitch\n Rx = RotX(math.radians(self._rvec[0])) # roll\n \n RzRy = np.matmul(Rz, Ry)\n R_inst = np.matmul(RzRy, Rx)\n \n self._R = np.matmul(R_tf, R_inst)\n self._t = np.matmul(self._R, (-1) * self._tvec)\n \n \n #############################\n ##### Projection matrix #####\n #############################\n \n Rt = np.concatenate((self._R, self._t), axis=1)\n self._P = np.matmul(self._K, Rt)\n \n \n #############################\n ##### Homography matrix #####\n #############################\n \n P_z0 = np.delete(self._P, 2, axis=1)\n self._H = np.linalg.inv(P_z0)\n \n\n def img2wld(self, img):\n img = self.image_undistort(img)\n img_homo = np.reshape(np.append(img, [1]), (3,1))\n\n wld = np.matmul(self._H, img_homo)\n wld = wld / wld[2]\n wld[2] = 0\n\n return wld\n\n\n def wld2img(self, wld):\n wld_homo = np.reshape(np.append(wld, [1]), (4,1))\n\n img_homo = np.matmul(self._P, wld_homo)\n img_homo = img_homo / img_homo[2]\n img = np.resize(img_homo, (2,1))\n \n img = self.image_distort(img)\n \n return img\n \n \n def image_undistort(self, pt_src):\n pt_norm = self.image_normalize(pt_src)\n\n pt_norm_init = pt_norm\n for iter in range(self.UNDISTORT_N_ITER):\n pt_dist = self.image_distort_normal(pt_norm)\n err = pt_dist - pt_norm_init\n pt_norm -= err\n\n pt_dst = self.image_denormalize(pt_norm)\n\n return pt_dst\n \n \n def image_distort(pt_src):\n pt_norm = self.image_normalize(pt_src)\n pt_dist = self.image_distort_normal(pt_norm)\n pt_dst = self.image_denormalize(pt_dist)\n\n return pt_dst\n\n\n def image_normalize(self, pt_img):\n pt_norm = np.zeros((2,1))\n pt_norm[1] = (pt_img[1] - self._cy) / self._fy\n pt_norm[0] = (pt_img[0] - self._cx - self._skew * pt_norm[1]) / self._fx\n\n return pt_norm\n\n\n def image_denormalize(self, pt_norm):\n pt_img = np.zeros((2,1))\n pt_img[0] = self._fx * pt_norm[0] + self._cx + self._skew * pt_norm[1]\n pt_img[1] = self._fy * pt_norm[1] + self._cy\n\n return pt_img\n\n\n def image_distort_normal(self, pt_norm):\n # compute radial distortion\n r2 = pt_norm[0] * pt_norm[0] + pt_norm[1] * pt_norm[1];\n \n alpha = self._k1 * (r2) \\\n + self._k2 * (r2 * r2) \\\n + self._k3 * (r2 * r2 * r2);\n \n # compute tangential distortion\n dxTangential = 2 * self._p1 * pt_norm[0] * pt_norm[1] + self._p2 * (r2 + 2 * pt_norm[0] * pt_norm[0]);\n dyTangential = self._p1 * (r2 + 2 * pt_norm[1] * pt_norm[1]) + 2 * self._p2 * pt_norm[0] * pt_norm[1];\n \n pt_dist = np.zeros((2,1))\n pt_dist[0] = pt_norm[0] + pt_norm[0] * alpha + dxTangential;\n pt_dist[1] = pt_norm[1] + pt_norm[1] * alpha + dyTangential;\n \n return pt_dist\n","repo_name":"Levin-bitsensing/Incident_Detection","sub_path":"Trk_PerspectiveCamera.py","file_name":"Trk_PerspectiveCamera.py","file_ext":"py","file_size_in_byte":5248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"73539938034","text":"import pandas as pd\nimport numpy as np\nimport yaml\nimport os,sys\nimport dill\nfrom housing.exception import HousingException\nfrom housing.constants import *\n\n\ndef read_yaml_file(file_path:str) -> dict:\n try:\n with open(file_path, 'rb') as yaml_file:\n return yaml.safe_load(yaml_file)\n except Exception as e:\n raise HousingException(e,sys) from e\n\ndef load_data(file_path:str, schema_file_path:str) -> pd.DataFrame:\n try:\n dataset_schema = read_yaml_file(schema_file_path)\n schema = dataset_schema[DATASET_SCHEMA_COLUMN_KEY]\n\n df = pd.read_csv(file_path)\n\n error_message = ''\n \n for column in df.columns:\n if column in schema.keys():\n df[column].astype(schema[column])\n else:\n error_message += f\"{column} not present in schema columns\"\n if len(error_message) > 0:\n raise Exception(error_message)\n return df \n\n except Exception as e:\n raise HousingException(e,sys) from e\n\ndef save_numpy_array_data(file_path:str,array:np.array):\n try:\n dir_path = os.path.dirname(file_path)\n os.makedirs(dir_path,exist_ok=True)\n with open(file_path,'wb') as f:\n np.save(f,array)\n except Exception as e:\n raise HousingException(e,sys) from e \n\ndef save_object(file_path:str,obj:str):\n try:\n dir_path = os.path.dirname(file_path)\n os.makedirs(dir_path,exist_ok=True)\n with open(file_path,'wb') as f:\n dill.dump(obj,f)\n except Exception as e:\n raise HousingException(e,sys) from e","repo_name":"Gokulkrishn/Linear-Regression","sub_path":"housing/utils/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"13554050100","text":"def convert_header_values(header_values: tuple) -> tuple:\n \"\"\"\n Convert header values to meaningful coordinates and grid steps.\n\n :param header_values: A tuple containing header values.\n :return: A tuple containing (min_lat, min_lon, max_lon, grid_step_lat, grid_step_lon).\n \"\"\"\n minY, _, minX, maxX, dy, dx, multiplier, _ = header_values\n\n min_lat = minY / multiplier\n min_lon = minX / multiplier\n max_lon = maxX / multiplier\n grid_step_lat = dy / multiplier\n grid_step_lon = dx / multiplier\n\n return min_lat, min_lon, max_lon, grid_step_lat, grid_step_lon\n\n\ndef calculate_offset(header_values: tuple, request_weather_data) -> int:\n \"\"\"\n Calculate the data offset based on header values and request coordinates.\n\n :param header_values: A tuple containing header values.\n :param request_weather_data: An instance of WeatherRequest containing request parameters.\n :return: The calculated offset.\n :raises ValueError: If the provided coordinates are outside the valid range.\n \"\"\"\n minY, minX, maxX, dy, dx = convert_header_values(header_values)\n\n lat_index = int((request_weather_data.lat - minY) / dy)\n lon_index = int((request_weather_data.lon - minX) / dx)\n\n offset = (lat_index * int((maxX - minX) / dx) + lon_index) * 4\n\n return offset\n","repo_name":"ZemtsovOleg/weather_forecast_api","sub_path":"weather_forecast_api/calculate_offset.py","file_name":"calculate_offset.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"21381831674","text":"from typing import List\n\n\nclass Solution:\n def equalizeWater(self, buckets: List[int], loss: int) -> float:\n low, high = min(buckets), max(buckets)\n\n while high - low >= 1e-5:\n middle = low + (high - low) / 2\n\n if self._is_valid(buckets, middle, loss):\n low = middle\n else:\n high = middle\n\n return low\n\n def _is_valid(self, buckets, gallon, loss):\n count = 0\n\n for bucket in buckets:\n if bucket >= gallon:\n count += (bucket - gallon) * (1 - loss / 100)\n else:\n count -= (gallon - bucket)\n\n return count >= 0\n","repo_name":"LeetCode101/LeetCode-Python","sub_path":"leetcode/algorithms/p2137_pour_water_between_buckets_to_make_water_levels_equal.py","file_name":"p2137_pour_water_between_buckets_to_make_water_levels_equal.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"39"}
+{"seq_id":"8144866291","text":"import pytest\n\nfrom sklearn.datasets import load_breast_cancer\nfrom src import SubgradientSVMClassifier\n\n\n@pytest.fixture\ndef data():\n return load_breast_cancer(return_X_y=True)\n\n\ndef test_linear_svm(data):\n X, y = data\n svm = SubgradientSVMClassifier()\n svm.fit(X, y)\n # Check fit variables\n assert hasattr(svm, 'classes_')\n assert hasattr(svm, 'coef_')\n assert hasattr(svm, 'history_')\n\n y_pred = svm.predict(X)\n assert y_pred.shape == (X.shape[0],)\n # Assume that the accuracy is higher than 85% (actually ~89%)\n assert svm.score(X, y) > 0.85\n\n\ndef test_kernelized_svm(data):\n X, y = data\n svm = SubgradientSVMClassifier(kernel=\"rbf\")\n svm.fit(X, y)\n y_pred = svm.predict(X)\n\n assert y_pred.shape == (X.shape[0],)\n # Assume that the accuracy is higher than 95% (actually ~98%)\n assert svm.score(X, y) > 0.95\n\n\ndef test_polyak_step(data):\n X, y = data\n svm = SubgradientSVMClassifier(step_size_rule=\"polyak\", alpha=10)\n svm.fit(X, y)\n\n # Assume that the accuracy is higher than 90% (actually ~93%)\n assert svm.score(X, y) > 0.9\n\n svm.set_params(**{\"loss\": \"logistic\"})\n svm.fit(X, y)\n # Assume that the accuracy is higher than 90% (actually ~92%)\n assert svm.score(X, y) > 0.9\n\n svm.set_params(**{\"loss\": \"quadratic\"})\n svm.fit(X, y)\n # Assume that the accuracy is higher than 85% (actually ~88%)\n assert svm.score(X, y) > 0.85\n","repo_name":"ozzwoy/Qualification-Work","sub_path":"src/tests/test_breast_cancer.py","file_name":"test_breast_cancer.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"19811105771","text":"# staff handling\n\nfrom flask import abort\nfrom . import models \n\n\ndef get_all():\n staff = models.Staff.query.all()\n staff_dicts = map(lambda staff: staff.to_dict(), staff)\n if staff is None:\n abort(404, f\"Staff not found\")\n return list(staff_dicts)\n\ndef get_one(id):\n staff = models.Staff.query.filter_by(id=id).first()\n if staff is None:\n abort(404, f\"Staff with identifier {id} not found\")\n return staff.to_dict()\n","repo_name":"appshore/weird-salads-back","sub_path":"app/staff.py","file_name":"staff.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"19878880323","text":"\"\"\"\nLazy Func Descriptor\n--------------------\n\nContains the LazyFuncFieldDescriptor class and function\n\"\"\"\nfrom dataclasses import field\nfrom typing import Any, Callable, Generic, Mapping, Optional, Type, TypeVar\n\nTClass = TypeVar(\"TClass\")\nTRet = TypeVar(\"TRet\")\n\n\nclass LazyFuncFieldDescriptor(Generic[TClass, TRet]):\n \"\"\"\n A class of Descriptor-typed fields to be used in the lazy_func_field function.\n\n https://docs.python.org/3/library/dataclasses.html#descriptor-typed-fields\n \"\"\"\n\n def __init__(self, gen: Callable[[TClass], TRet]):\n self._gen = gen\n\n def __set_name__(self, owner, name):\n ...\n\n def __get__(self, instance: Optional[TClass], t: Type) -> TRet:\n if instance is None:\n # Tell the data class that there is no default value.\n raise AttributeError()\n\n return self._gen(instance)\n\n def __set__(self, instance: Any, value: Any) -> None:\n raise NotImplementedError()\n\n\ndef lazy_func_field(\n value_generator: Callable[[TClass], TRet],\n repr: bool = True,\n hash: Optional[bool] = None,\n compare: bool = False,\n metadata: Optional[Mapping] = None,\n) -> TRet:\n \"\"\"\n Allows you to define a dataclass field which returns the value of a function evaluated (lazily) upon accessing the\n attribute.\n\n These fields are read-only.\n\n We lie a bit about the return type so that pyright will be happy type checking our functions.\n \"\"\"\n return field(\n init=False,\n default=LazyFuncFieldDescriptor(value_generator),\n repr=repr,\n hash=hash,\n compare=compare,\n metadata=metadata,\n ) # type: ignore\n","repo_name":"GSS-Cogs/csvcubed","sub_path":"src/csvcubed/inspect/lazyfuncdescriptor.py","file_name":"lazyfuncdescriptor.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"39"}
+{"seq_id":"37879959449","text":"# 给定一个已排序的正整数数组 nums ,和一个正整数 n 。从 [1, n] 区间内选取任意个数字补充到 nums 中,使得 [1, n] 区间内的任何数字\n# 都可以用 nums 中某几个数字的和来表示。\n#\n# 请返回 满足上述要求的最少需要补充的数字个数 。\n#\n#\n#\n# 示例 1:\n#\n#\n# 输入: nums = [1,3], n = 6\n# 输出: 1\n# 解释:\n# 根据 nums 里现有的组合 [1], [3], [1,3],可以得出 1, 3, 4。\n# 现在如果我们将 2 添加到 nums 中, 组合变为: [1], [2], [3], [1,3], [2,3], [1,2,3]。\n# 其和可以表示数字 1, 2, 3, 4, 5, 6,能够覆盖 [1, 6] 区间里所有的数。\n# 所以我们最少需要添加一个数字。\n#\n# 示例 2:\n#\n#\n# 输入: nums = [1,5,10], n = 20\n# 输出: 2\n# 解释: 我们需要添加 [2,4]。\n#\n#\n# 示例 3:\n#\n#\n# 输入: nums = [1,2,2], n = 5\n# 输出: 0\n#\n#\n#\n#\n# 提示:\n#\n#\n# 1 <= nums.length <= 1000\n# 1 <= nums[i] <= 10⁴\n# nums 按 升序排列\n# 1 <= n <= 2³¹ - 1\n#\n#\n# Related Topics 贪心 数组\n# 👍 354 👎 0\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\nclass Solution:\n def minPatches(self, nums: List[int], n: int) -> int:\n cnt = 0\n lst = [1]\n i = 0\n mx_sum = 0\n while mx_sum < n:\n if i < len(nums):\n if nums[i] <= mx_sum + 1:\n mx_sum += nums[i]\n i += 1\n else:\n mx_sum += mx_sum + 1\n cnt += 1\n else:\n mx_sum += mx_sum + 1\n cnt += 1\n return cnt\n\n# leetcode submit region end(Prohibit modification and deletion)\n","repo_name":"aa694849243/leetcode_cj","sub_path":"301-400/330. 按要求补齐数组.py","file_name":"330. 按要求补齐数组.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"31339358950","text":"import sys\nfrom collections import deque\nIN = sys.stdin.read()\n\nG = [list(line) for line in IN.strip().splitlines()]\nQp1 = deque(((r, c), 0) for r in range(len(G)) for c in range(len(G[r])) if G[r][c] in [\"S\"])\nQp2 = deque(((r, c), 0) for r in range(len(G)) for c in range(len(G[r])) if G[r][c] in [\"a\", \"S\"])\n\n\ndef solve(Q, G):\n seen = set()\n while Q:\n (r, c), step = Q.popleft()\n if G[r][c] == \"E\":\n return step\n if (r, c) in seen:\n continue\n seen.add((r, c))\n for dr, dc in [(1, 0), (-1, 0), (0, 1), (0, -1)]:\n if 0 <= dr+r < len(G) and 0 <= dc+c < len(G[0]):\n pos = ord(\"a\") if G[r][c] == \"S\" else ord(G[r][c])\n nbr = ord(\"z\") if G[dr+r][dc+c] == \"E\" else ord(G[dr+r][dc+c])\n if pos - nbr >= -1:\n Q.append(((dr+r, dc+c), step + 1))\n return False\n\nprint(solve(Qp1, G))\nprint(solve(Qp2, G))\n","repo_name":"kim-wexler/adventofcode","sub_path":"2022/day12/day12.py","file_name":"day12.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"75141570032","text":"from collections import deque\ndef bfs(v):\n q = deque()\n q.append(v)\n visit[v] = 1\n while q:\n v = q.popleft()\n for e in check[v]:\n if not visit[e]:\n visit[e] = 1\n q.append(e)\n\nN, M = map(int,input().split())\ncheck = [[] for _ in range(N+1)]\nvisit = [0] * (N+1)\nfor m in range(M):\n A, B = map(int,input().split())\n check[A].append(B)\n check[B].append(A)\n\ncnt = 0\nfor i in range(1,N+1):\n if visit[i] == 0:\n bfs(i)\n cnt += 1\n\nprint(cnt)","repo_name":"rlfslf111/Algorithm","sub_path":"백준_11724_연결 요소의 개수_길민규.py","file_name":"백준_11724_연결 요소의 개수_길민규.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"11004952304","text":"import tkinter as tk\nfrom tkinter import ttk\nfrom tkinter import messagebox\n\n# 사용자 정의 함수부\ndef buildGUI():\n global check # 이벤트 핸들러에서 접근 위해\n check = tk.IntVar() # 체크 상태 저장 위해\n check_btn = ttk.Checkbutton(win, text='옵션을 선택하세요',\n variable=check,\n command=open_dialog_box)\n \n check_btn.pack()\n\ndef open_dialog_box():\n if check.get() == 1:\n messagebox.showinfo('확인', '옵션 선택')\n else:\n messagebox.showinfo('확인', '옵션 해제')\n\n# 주 프로그램부\nwin = tk.Tk() # 기본 윈도우 객체 반환\nwin.title('버튼 위젯 예')\nbuildGUI() # 화면 구성\nwin.mainloop() # 윈도우에서 다양한 이벤트 처리 시작을 지시\n","repo_name":"tkeenaver/pykosmes","sub_path":"tkinter/tkinter14.py","file_name":"tkinter14.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"}
+{"seq_id":"5628242177","text":"import os\nimport json\nimport numpy as np\nimport pickle\n\nclass WordTable():\n def __init__(self, dim_embed, max_sent_len, save_file):\n self.idx2word = []\n self.word2idx = {}\n self.word2vec = {}\n self.word_freq = []\n self.num_words = 0\n self.dim_embed = dim_embed\n self.max_sent_len = max_sent_len\n self.save_file = save_file\n\n def load_glove(self, glove_dir):\n self.word2vec = {}\n glove_file = os.path.join(glove_dir, 'glove.6B.'+str(self.dim_embed)+'d.txt')\n print(\"Loading Glove data from %s\" %(glove_file))\n\n with open(glove_file) as f:\n for line in f:\n l = line.split()\n self.word2vec[l[0]] = [float(x) for x in l[1:]]\n\n print(\"Glove data loaded\")\n\n def build(self, sentences):\n word_count = {}\n for sent in sentences:\n for w in sent.lower().split(' '):\n word_count[w] = word_count.get(w, 0) + 1\n if w not in self.word2vec:\n self.word2vec[w] = np.random.uniform(0.0, 1.0, (self.dim_embed))\n\n sorted_word_count = sorted(list(word_count.items()), key=lambda x: x[1], reverse=True) \n self.num_words = len(sorted_word_count)\n\n for idx in range(self.num_words):\n word, freq = sorted_word_count[idx]\n self.idx2word.append(word)\n self.word2idx[word] = idx\n self.word_freq.append(freq * 1.0)\n\n self.word_freq = np.array(self.word_freq)\n self.word_freq /= np.sum(self.word_freq)\n self.word_freq = np.log(self.word_freq)\n self.word_freq -= np.max(self.word_freq)\n\n self.filter_word2vec()\n\n def filter_word2vec(self):\n word2vec = {}\n for w in self.word2idx:\n word2vec[w] = self.word2vec[w] \n self.word2vec = word2vec\n\n def symbolize_sent(self, sent):\n indices = np.zeros(self.max_sent_len).astype(np.int32)\n masks = np.zeros(self.max_sent_len)\n words = np.array([self.word2idx[w] for w in sent.lower().split(' ')])\n indices[:len(words)] = words\n masks[:len(words)] = 1.0\n return indices, masks\n\n def indices_to_sent(self, indices):\n words = [self.idx2word[i] for i in indices]\n if words[-1] != '.':\n words.append('.')\n punctuation = np.argmax(np.array(words) == '.') + 1\n words = words[:punctuation]\n res = ' '.join(words)\n res = res.replace(' .', '.')\n return res\n\n def save(self):\n pickle.dump([self.idx2word, self.word2idx, self.word2vec, self.word_freq, self.num_words], open(self.save_file, 'wb'))\n\n def load(self):\n self.idx2word, self.word2idx, self.word2vec, self.word_freq, self.num_words = pickle.load(open(self.save_file, 'rb'))\n\n","repo_name":"bityangke/image_captioning","sub_path":"utils/words.py","file_name":"words.py","file_ext":"py","file_size_in_byte":2817,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"39"}
+{"seq_id":"39397853886","text":"'''\nFigures out the command from user input and writes it to logfile\n'''\n\nimport os\nfrom dotenv import load_dotenv\nfrom analysis import get_score\nfrom setup import setup_dependency\n\nload_dotenv()\nLOG_LEVEL = os.getenv(\"LOG_LEVEL\")\nLOG_FILE = os.getenv(\"LOG_FILE\")\n\ndef get_command(input_command):\n '''\n Get's input command from user\n '''\n command_dict = {'install': \"INSTALL\", 'test': 'TEST'}\n\n # Any string besides \"install\" and \"test\" may be a url set\n command_dict.setdefault(input_command, \"URL_SET\")\n\n return command_dict.get(input_command)\n\ndef figure_out_command(user_input):\n '''\n Interpretes input command from user\n '''\n user_command = get_command(user_input)\n if user_command == \"INSTALL\":\n try:\n print(\"starting install\")\n setup_dependency()\n except RuntimeError:\n print(\"Dependency does not install correctly\")\n\n if user_command == \"URL_SET\":\n try:\n print(\"starting scoring with \" + user_input)\n get_score(user_input, {\"test\": 1})\n\n except RuntimeError:\n print(\"Score evaluation does not work correctly\")\n\n if user_command == \"TEST\":\n print(\"starting tests\")\n\n return 0\n\ndef find_log_mode():\n '''\n Finds the log mode in the log level dictionary\n '''\n log_level_dic = {'0': 'SILENT', '1': 'NORMAL', '2': 'DEBUG'}\n return log_level_dic.get(LOG_LEVEL)\n\ndef write_log_file():\n '''\n Writes to the log file\n '''\n current_mode = find_log_mode()\n if current_mode == 'NORMAL':\n print(\"log mode: Normal\")\n elif current_mode == 'DEBUG':\n print(\"log mode: debug\")\n","repo_name":"Purdue-ECE-461/project-2-project-2-16","sub_path":"command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"21033678497","text":"import numpy as np\nimport glob\nimport os\nimport configparser\nimport shutil\nimport random\nimport requests\nimport json\nimport logging\nimport requests\nimport json\nimport sys\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\ndef get_filenames(folder):\n filenames = set()\n\n for path in glob.glob(os.path.join(folder, '*.txt')):\n filename = os.path.split(path)[-1]\n filenames.add(filename)\n\n return filenames\n\ndef download_images(endpoint_url, image_filenames):\n session = requests.Session()\n\n total_images = len(image_filenames)\n digit_len = len(str(total_images)) # For pretty status updates\n\n logger.info(\"Downloading missing images:\")\n for idx,image_filename in enumerate(image_filenames):\n url = f'{endpoint_url}/{image_filename}'\n destination = f'download/images/{image_filename}'\n if not os.path.isfile(destination):\n try:\n response = session.get(url, timeout=5)\n if response.ok:\n with open(destination, 'wb') as f:\n f.write(response.content)\n logger.info(f\"[{idx:{digit_len}d}/{total_images}] {image_filename}\")\n else:\n logger.error(f\"Failed to download {url}. HTTP status code: {response.status_code}\")\n except requests.exceptions.RequestException as err:\n logger.error(f\"Error occurred: {err}\")\n raise SystemExit(\"Timeout occurred. Exiting the program.\")\n else:\n logger.info(f\"[{idx:{digit_len}d}/{total_images}] {image_filename} (Cached)\")\n\nrandom.seed(42)\n\ndef shuffle_and_split(sources, train_ratio=0.7):\n # Separate sources into empty and non-empty\n empty_sources = [source for source in sources if source is None ]\n non_empty_sources = [source for source in sources if source is not None]\n\n # Shuffle and split only the non-empty sources\n random.shuffle(non_empty_sources)\n split_index = int(train_ratio * len(non_empty_sources))\n\n # Append empty sources to the training set\n train_sources = non_empty_sources[:split_index] + empty_sources\n val_sources = non_empty_sources[split_index:]\n\n return train_sources, val_sources\n\ndef copy_files(filenames, src_folder, dst_folder):\n for filename in filenames:\n file_extension = '.png' if 'images' in src_folder else '.txt'\n src_file = os.path.join(src_folder, filename + file_extension)\n dest_file = os.path.join(dst_folder, filename + file_extension)\n shutil.copyfile(src_file, dest_file)\n\nconfig = configparser.ConfigParser()\nconfig.read('frameextractor.ini')\n\nENDPOINT_URL = config['S3']['ENDPOINT_URL']\n\nif len(sys.argv) != 2:\n print('Usage: python split_data.py downloads/')\n sys.exit(1)\n\njson_file = sys.argv[1]\n\nwith open(json_file, 'r') as file:\n uuid_source_pairs = json.load(file)\n\nlabel_filenames = get_filenames('download/labels')\nlabel_filenames = np.array(list(label_filenames))\n\n# Check and download missing images\nimage_filenames = [filename.replace('.txt', '.png') for filename in label_filenames]\ndownload_images(ENDPOINT_URL, image_filenames)\n\n# Fetch source per uuid\nuuids = set([filename.replace('.txt', '') for filename in label_filenames])\n\n# Group filenames by source\nuuid_source_pairs = {uuid: source for uuid, source in uuid_source_pairs.items() if uuid in uuids}\n\nsource_to_filenames = {}\nfor uuid, source in uuid_source_pairs.items():\n if source not in source_to_filenames:\n source_to_filenames[source] = []\n source_to_filenames[source].append(uuid)\n\n# Split sources\nsources = list(source_to_filenames.keys())\ntrain_sources, val_sources = shuffle_and_split(sources)\n\nif not os.path.exists('data'):\n for folder in ['images', 'labels']:\n for split in ['train', 'val']:\n os.makedirs(f'data/{folder}/{split}')\n\n# Copy files to train/val folders\nfor source in train_sources:\n copy_files(source_to_filenames[source], 'download/labels', 'data/labels/train')\n copy_files(source_to_filenames[source], 'download/images', 'data/images/train')\n\nfor source in val_sources:\n copy_files(source_to_filenames[source], 'download/labels', 'data/labels/val')\n copy_files(source_to_filenames[source], 'download/images', 'data/images/val')\n","repo_name":"iank/catflow-train","sub_path":"split_data.py","file_name":"split_data.py","file_ext":"py","file_size_in_byte":4311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"14935346275","text":"import tempfile\nfrom typing import Callable, List\nfrom icolos.core.containers.generic import GenericData\nfrom icolos.core.containers.gmx_state import GromacsState\nfrom icolos.utils.enums.execution_enums import ExecutionPlatformEnum\nfrom icolos.utils.enums.step_enums import StepGromacsEnum\nfrom icolos.utils.enums.program_parameters import GromacsEnum\nfrom icolos.core.workflow_steps.gromacs.base import StepGromacsBase\nfrom pydantic import BaseModel\nfrom icolos.core.workflow_steps.step import _LE\nimport os\n\nfrom icolos.utils.execute_external.gromacs import GromacsExecutor\nfrom icolos.utils.general.parallelization import Parallelizer, SubtaskContainer\n\n_GE = GromacsEnum()\n_SGE = StepGromacsEnum()\n_ERE = ExecutionPlatformEnum\n\n\nclass StepGMXMDrun(StepGromacsBase, BaseModel):\n \"\"\"\n Launch gmx mdrun\n \"\"\"\n\n topol: GromacsState = None\n\n def __init__(self, **data):\n super().__init__(**data)\n\n self._initialize_backend(executor=GromacsExecutor)\n self._check_backend_availability()\n\n def _get_log_file(self, tmp_dir):\n \"\"\"\n Find and parse the log file\n \"\"\"\n log_file = [f for f in os.listdir(tmp_dir) if f.endswith(\".log\")]\n assert len(log_file) == 1\n with open(os.path.join(tmp_dir, log_file[0]), \"r\") as f:\n data = f.readlines()\n return data\n\n def _tail_log_file(self, tmp_dir):\n \"\"\"\n Log the last 50 lines of the log file to capture performance metrics from the run\n\n \"\"\"\n log_file = self._get_log_file(tmp_dir)\n\n for line in log_file[-50:]:\n self._logger_blank.log(line, _LE.INFO)\n\n def execute_mdrun(self, path: str, index: int):\n \"\"\"\n Make a single call to mdrun\n \"\"\"\n flag_dict = (\n {\n \"-s\": _SGE.STD_TPR,\n \"-c\": _SGE.STD_STRUCTURE,\n \"-e\": _SGE.STD_EDR,\n \"-cpo\": _SGE.STD_CPT,\n \"-x\": _SGE.STD_XTC,\n }\n if not self.data.generic.get_files_by_extension(\"cpt\")\n else {\"-cpi\", os.path.join(path, \"state.cpt\")}\n )\n\n arguments = self._parse_arguments(flag_dict)\n self._backend_executor.execute(\n command=_GE.MDRUN, arguments=arguments, location=path, check=True\n )\n\n def execute_parallel_simulations(self, work_dirs, run_func: Callable):\n # attach the index of the workdir\n work_dirs = [(idx, wkdir) for idx, wkdir in enumerate(work_dirs)]\n self._subtask_container = SubtaskContainer(\n max_tries=self.execution.failure_policy.n_tries\n )\n self._subtask_container.load_data(work_dirs)\n parallelizer = Parallelizer(func=run_func)\n n = 1\n while self._subtask_container.done() is False:\n next_batch = self._get_sublists(get_first_n_lists=self._get_number_cores())\n _ = [sub.increment_tries() for element in next_batch for sub in element]\n _ = [sub.set_status_failed() for element in next_batch for sub in element]\n paths, indices = self.prepare_jobs(next_batch)\n parallelizer.execute_parallel(path=paths, index=indices)\n n += 1\n\n def prepare_jobs(self, batch) -> List[tuple]:\n paths, indices = [], []\n for task in batch:\n for element in task:\n # tuple of (idx, dirpath)\n paths.append(element.data[1])\n indices.append(element.data[0])\n return paths, indices\n\n def run_single_tpr(self, tmp_dir: str):\n \"\"\"\n Normal gmx mdrun call, if multiple structures are loaded into the topology, run them in parallel according to the parallelizer settings\n \"\"\"\n # if we have multiple structures, run the simulations externally, in parallel\n work_dirs = [tempfile.mkdtemp(dir=tmp_dir) for _ in range(len(self.topol.tprs))]\n\n # prepare tmpdirs with tpr files\n for path, tpr in zip(work_dirs, self.topol.tprs.values()):\n tpr.write(path)\n\n # if > 1, instantiate a parallelizer, load the paths in and execute in parallel, user should be using the slurm/SGE interface to request extern resources\n if len(work_dirs) > 1:\n self.execute_parallel_simulations(work_dirs, run_func=self.execute_mdrun)\n else:\n tmp_dir = work_dirs[0]\n self.execute_mdrun(tmp_dir, index=0)\n\n # now parse the outputs\n for index, path in enumerate(work_dirs):\n # set a structure other than confout.gro e.g. if a pdb output has been set\n struct = (\n self.settings.arguments.parameters[\"-c\"]\n if \"-c\" in self.settings.arguments.parameters.keys()\n else _SGE.STD_STRUCTURE\n )\n self.topol.set_structure(path, file=struct, index=index)\n self.topol.set_trajectory(path, index=index)\n self.topol.set_log(path, index=index)\n self.topol.set_edr(path, index=index)\n try:\n self.topol.set_cpt(path, index=index)\n except FileNotFoundError:\n self._logger.log(\"No checkpoint file generated\", _LE.DEBUG)\n\n def run_multidir_sim(self, tmp_dir: str):\n \"\"\"\n Runs a multidir simulation, allowing for replex simulations. Several conditions are required for this running mode\n 1) the previous step in the workflow should have been an iterator to produce n tpr files. This must have been run with single_dir mode ON and remove_temprorary_files OFF, so we can extract files from those workflows' tmpdirs\n \"\"\"\n if not self.execution.platform == _ERE.SLURM:\n self._logger.log(\n \"WARNING: Running HREX simulation without external resources! Normally this should be run as a separate batch job\",\n _LE.WARNING,\n )\n\n # extract the tprs from the topol object, write to separate tmpdirs\n work_dirs = [tempfile.mkdtemp(dir=tmp_dir) for _ in range(len(self.topol.tprs))]\n self._logger.log(\n f\"Initiating gmx multidir run in directories {', '.join(work_dirs)}\",\n _LE.DEBUG,\n )\n for path, tpr in zip(work_dirs, self.topol.tprs.values()):\n tpr.write(path)\n\n # note, this must be a multiple of the number of simulations\n tasks = self.execution.resources.tasks\n # map the PP and PME tasks to the GPUs\n\n command = f\"mpirun -np {tasks} gmx_mpi mdrun -multidir {' '.join(work_dirs)}\"\n arguments = self._parse_arguments(flag_dict={\"-x\": _SGE.STD_XTC})\n self._backend_executor.execute(\n command=command, arguments=arguments, location=tmp_dir, check=True\n )\n # udpate the structures to the new coordinates\n for i, work_dir in enumerate(work_dirs):\n self.topol.set_structure(work_dir, index=i)\n self.topol.set_trajectory(work_dir, index=i)\n self.topol.set_tpr(work_dir, index=i)\n self.topol.set_log(work_dir, index=i)\n self.topol.set_edr(path, index=i)\n try:\n self.topol.set_cpt(path, index=i)\n except FileNotFoundError:\n self._logger.log(\"No checkpoint file generated\", _LE.DEBUG)\n\n def execute(self):\n\n tmp_dir = self._make_tmpdir()\n self.topol = self.get_topol()\n if self.data.generic.get_files_by_extension(\"cpt\"):\n # a cpt file has been passed, simply restart\n print(self.data.generic.get_file_names_by_extension(\"gpt\"))\n self._run_checkpoint_files(self.data.generic.get_files_by_extension(\"cpt\"))\n\n self.execution.parallelization.max_length_sublists = 1\n # pickle the topol to the mdrun dir, if something goes wrong/the job dies, the workflow can be picked up where we left off by unpickling the topology object\n self.pickle_topol(self.topol, tmp_dir)\n multidir = self._get_additional_setting(_SGE.MULTIDIR, default=False)\n if multidir:\n self.run_multidir_sim(tmp_dir)\n else:\n self.run_single_tpr(tmp_dir)\n self._remove_temporary(tmp_dir)\n","repo_name":"MolecularAI/Icolos","sub_path":"src/icolos/core/workflow_steps/gromacs/mdrun.py","file_name":"mdrun.py","file_ext":"py","file_size_in_byte":8171,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"39"}
+{"seq_id":"17857791798","text":"''' subscriber '''\nimport logging\n\nimport requests\nfrom flask import (Blueprint, jsonify, make_response, redirect,\n render_template, request, session, url_for)\nfrom flask.wrappers import Response\nfrom werkzeug.wrappers import Response as ResponseBase\n\nimport setting\nfrom celery_task.task_mail_sys import mail_login_code\nfrom models.subscriberdb import SubscriberLoginTokenDB\nfrom module.subscriber import Subscriber\n\nVIEW_SUBSCRIBER = Blueprint('subscriber', __name__, url_prefix='/subscriber')\n\n\n@VIEW_SUBSCRIBER.route('/infomsg')\ndef info_msg() -> ResponseBase:\n ''' info message '''\n if request.args.get('all'):\n show_info = ('000', )\n else:\n show_info = session.get('show_info', [])\n\n return Response(\n response=render_template(\n './subscriber_error.html', show_info=show_info),\n status=session.get('status_code', 401),\n mimetype='text/html',\n )\n\n\n@VIEW_SUBSCRIBER.route('/code/', methods=('GET', 'POST'))\ndef code_page(code: str) -> str | ResponseBase:\n ''' code page '''\n # pylint: disable=too-many-return-statements\n if request.method == 'GET':\n return render_template('./subscriber.html')\n\n if request.method == 'POST':\n resp = requests.post('https://hcaptcha.com/siteverify',\n timeout=10,\n data={'response': request.form['h-captcha-response'],\n 'secret': setting.HCAPTCHA_TOKEN,\n 'remoteip': request.headers.get('X-REAL-IP')}).json()\n\n logging.info('hcaptcha: %s', resp)\n\n if not (resp['success'] and resp['hostname'] == 'coscup.org'):\n session['show_info'] = ('001', )\n session['status_code'] = 404\n return redirect(url_for('subscriber.info_msg', _scheme='https', _external=True))\n\n ser = Subscriber(mail=request.form['mail'])\n if not ser or not ser.data:\n session['show_info'] = ('001', )\n session['status_code'] = 404\n return redirect(url_for('subscriber.info_msg', _scheme='https', _external=True))\n\n if not ser.data['status']:\n session['show_info'] = ('008', )\n session['status_code'] = 200\n return redirect(url_for('subscriber.info_msg', _scheme='https', _external=True))\n\n if ser.verify_admin_code(code):\n mail_login_code.apply_async(kwargs={'mail': ser.data['_id']})\n\n session['show_info'] = ('002', )\n session['status_code'] = 200\n return redirect(url_for('subscriber.info_msg', _scheme='https', _external=True))\n\n session['show_info'] = ('001', )\n session['status_code'] = 404\n return redirect(url_for('subscriber.info_msg', _scheme='https', _external=True))\n\n return ''\n\n\n@VIEW_SUBSCRIBER.route('/token/')\ndef token_code(code: str) -> str | ResponseBase:\n ''' token '''\n if request.method == 'GET':\n ser = Subscriber.verify_login(_type='code', code=code)\n if not ser or not ser.data: # type: ignore\n session['show_info'] = ('003', )\n session['status_code'] = 404\n return redirect(url_for('subscriber.info_msg', _scheme='https', _external=True))\n\n Subscriber.make_code_disabled(code=code)\n session['s_login_token'] = ser.make_login('token') # type: ignore\n\n return redirect(url_for('subscriber.intro', _scheme='https', _external=True))\n\n return ''\n\n\n@VIEW_SUBSCRIBER.route('/verify_mail/', methods=('GET', 'POST'))\ndef verify_mail(code: str) -> str | ResponseBase:\n ''' verify mail '''\n # pylint: disable=too-many-return-statements\n if request.method == 'GET':\n token = SubscriberLoginTokenDB().find_one({'_id': code})\n if not token:\n session['show_info'] = ('001', )\n session['status_code'] = 404\n return redirect(url_for('subscriber.info_msg', _scheme='https', _external=True))\n\n ser = Subscriber(mail=token['uni_mail'])\n if not ser or not ser.data:\n session['show_info'] = ('001', )\n session['status_code'] = 404\n return redirect(url_for('subscriber.info_msg', _scheme='https', _external=True))\n\n if ser.data['verified_email']:\n session['show_info'] = ('005', )\n session['status_code'] = 200\n return redirect(url_for('subscriber.info_msg', _scheme='https', _external=True))\n\n return render_template('./subscriber_verify_mail.html')\n\n if request.method == 'POST':\n if not request.form['iamok']:\n return make_response({}, 401)\n\n resp = requests.post('https://hcaptcha.com/siteverify',\n timeout=10,\n data={'response': request.form['h-captcha-response'],\n 'secret': setting.HCAPTCHA_TOKEN,\n 'remoteip': request.headers.get('X-REAL-IP')}).json()\n\n logging.info('hcaptcha: %s', resp)\n\n if not (resp['success'] and resp['hostname'] == 'coscup.org'):\n session['show_info'] = ('001', )\n session['status_code'] = 404\n return redirect(url_for('subscriber.info_msg', _scheme='https', _external=True))\n\n token = SubscriberLoginTokenDB().find_one({'_id': code})\n if not token:\n session['show_info'] = ('001', )\n session['status_code'] = 404\n return redirect(url_for('subscriber.info_msg', _scheme='https', _external=True))\n\n ser = Subscriber(mail=token['uni_mail'])\n if not ser or not ser.data:\n session['show_info'] = ('001', )\n session['status_code'] = 404\n return redirect(url_for('subscriber.info_msg', _scheme='https', _external=True))\n\n ser.verify_login(_type='verify_mail', code=code)\n\n session['show_info'] = ('005', )\n session['status_code'] = 200\n return redirect(url_for('subscriber.info_msg', _scheme='https', _external=True))\n\n return ''\n\n\n@VIEW_SUBSCRIBER.route('/intro', methods=('GET', 'POST'))\ndef intro() -> str | ResponseBase:\n ''' intro '''\n # pylint: disable=too-many-return-statements\n if 's_login_token' not in session:\n session['show_info'] = ('003', )\n session['status_code'] = 404\n return redirect(url_for('subscriber.info_msg', _scheme='https', _external=True))\n\n user = Subscriber.verify_login(\n _type='token', code=session['s_login_token'])\n if not user or not user.data: # type: ignore\n session['show_info'] = ('003', )\n session['status_code'] = 404\n return redirect(url_for('subscriber.info_msg', _scheme='https', _external=True))\n\n if request.method == 'GET':\n return render_template('./subscriber_intro.html')\n\n if request.method == 'POST':\n post_data = request.get_json()\n\n if 'casename' not in post_data:\n return make_response({}, 401)\n\n if post_data['casename'] == 'get':\n user_data = user.data # type:ignore\n user_login_token_data = user.login_token_data # type:ignore\n data = {\n 'name': user_data['name'],\n 'mails': user_data['mails'],\n 'login_since': user_login_token_data['created_at'],\n 'unsubscribe': not user_data['status'],\n }\n return jsonify({'data': data})\n\n if post_data['casename'] == 'update':\n data = post_data['data']\n\n update = {}\n update['name'] = data['name'].strip()\n update['status'] = not bool(data['unsubscribe'])\n\n user.update_date(data=update) # type: ignore\n\n return jsonify({})\n\n return ''\n\n\n@VIEW_SUBSCRIBER.route('/clean', methods=('GET', 'POST'))\ndef clean() -> ResponseBase:\n ''' clean '''\n session.pop('s_login_token', None)\n\n session['show_info'] = ('004', )\n session['status_code'] = 200\n return redirect(url_for('subscriber.info_msg', _scheme='https', _external=True))\n","repo_name":"COSCUP/subscribe","sub_path":"view/subscriber.py","file_name":"subscriber.py","file_ext":"py","file_size_in_byte":8076,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"39"}
+{"seq_id":"75141197232","text":"def bfs(N,M,cnt):\n visit = set()\n current = set()\n current.add(N)\n\n while 1:\n if M in current:\n return cnt\n else:\n cnt += 1\n tmp = []\n while current:\n tmp_cur = current.pop()\n tmp.append(tmp_cur)\n visit.add(tmp_cur)\n for i in range(len(tmp)):\n if tmp[i] + 1 not in visit:\n if tmp[i] + 1 <= 1000000:\n current.add(tmp[i]+1)\n if tmp[i] - 1 not in visit:\n if tmp[i] - 1 > 0:\n current.add(tmp[i]-1)\n if tmp[i] * 2 not in visit:\n if tmp[i] * 2 <= 1000000:\n current.add(tmp[i]*2)\n if tmp[i] - 10 not in visit:\n if tmp[i] - 10 > 0:\n current.add(tmp[i]-10)\n\nfor t in range(1,int(input())+1):\n N, M = map(int,input().split())\n ans = bfs(N,M,0)\n print('#{} {}'.format(t,ans))","repo_name":"rlfslf111/Algorithm","sub_path":"5247_연산_길민규.py","file_name":"5247_연산_길민규.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"74202229554","text":"def solution(n, stations, w):\n answer = 0\n\n scope = (2*w) + 1\n\n start = 1\n for i in stations:\n between = (i-w) - start\n\n if between < 1:\n pass\n else:\n need = between // scope\n if between % scope == 0:\n answer += need\n else:\n answer += need + 1\n\n start = i+w+1\n \n if start <= n:\n between = n - start\n \n if between == 0:\n return answer + 1\n else:\n need = between // scope\n\n if between % scope == 0:\n return answer + need\n else:\n return answer + need + 1\n\n return answer\n\nif __name__ == \"__main__\":\n print(solution(11, [4,11], 1))","repo_name":"hooong/baekjoon","sub_path":"programmers/setupbasestation.py","file_name":"setupbasestation.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"32922259862","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: jan schuetzke (iai)\n\nDefinitions of different Neural Network Architecture\n\"\"\"\n\nfrom tensorflow.keras import layers, optimizers\nfrom tensorflow.keras.models import Model\n\nimport models.basic_blocks as blocks\n\ndef vgg16(in_dim=3250, dropout_val=0.2, classes=28, opt=optimizers.Adam(lr=0.0005),\n trainable='all'):\n input_layer = layers.Input(shape=(in_dim, 1), name=\"input\")\n cb = blocks.get_conv1d_block(input_layer, shrink=False, kernel=5,\n filters_conv=6, block_name='cb1',\n mp_size=2, mp_stride=2)\n cb = layers.Dropout(dropout_val, name='dropout_cb1')(cb)\n cb = blocks.get_conv1d_block(cb, shrink=False, kernel=5,\n filters_conv=16, block_name='cb2_1',\n pooling=False)\n cb = blocks.get_conv1d_block(cb, shrink=False, kernel=5,\n filters_conv=16, block_name='cb2_2',\n mp_size=2, mp_stride=2)\n cb = layers.Dropout(dropout_val, name='dropout_cb2')(cb)\n cb = blocks.get_conv1d_block(cb, shrink=False, kernel=5,\n filters_conv=32, block_name='cb3_1',\n pooling=False)\n cb = blocks.get_conv1d_block(cb, shrink=False, kernel=5,\n filters_conv=32, block_name='cb3_2',\n mp_size=2, mp_stride=2)\n cb = layers.Dropout(dropout_val, name='dropout_cb3')(cb)\n cb = blocks.get_conv1d_block(cb, shrink=False, kernel=5,\n filters_conv=64, block_name='cb4_1',\n pooling=False)\n cb = blocks.get_conv1d_block(cb, shrink=False, kernel=5,\n filters_conv=64, block_name='cb4_2',\n mp_size=2, mp_stride=2)\n cb = layers.Dropout(dropout_val, name='dropout_cb4')(cb)\n out = layers.Flatten(name='flat')(cb)\n out = layers.Dense(120, activation='relu',\n kernel_initializer='he_uniform',\n name='dense0')(out)\n out = layers.Dense(84, activation='relu',\n kernel_initializer='he_uniform',\n name='dense1')(out)\n out = layers.Dense(186, activation='relu',\n kernel_initializer='he_uniform',\n name='dense2')(out)\n last = layers.Dense(classes, activation='softmax', \n name='output')(out)\n model = Model(inputs=input_layer, outputs=last)\n if trainable == 'all':\n pass\n elif trainable == 'last':\n for i in range(len(model.layers)-5):\n model.layers[i].trainable = False\n elif trainable == 'final':\n for i in range(len(model.layers)-1):\n model.layers[i].trainable = False\n else:\n raise ValueError(f'option {trainable} for trainable not recognized!')\n model.compile(optimizer=opt, loss='categorical_crossentropy', \n metrics=['categorical_accuracy'])\n return model","repo_name":"jschuetzke/siamese-1d","sub_path":"models/classifiers.py","file_name":"classifiers.py","file_ext":"py","file_size_in_byte":3099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"33764305173","text":"import sys, os, importlib, importlib.machinery, pdb, traceback, subprocess, os.path\n\n\ndef load_scripts(rel_path: str, module_name: str, file: str):\n dir_path = os.path.dirname(os.path.realpath(__file__))\n dir_path += rel_path\n sys.path.append(dir_path)\n loader = importlib.machinery.SourceFileLoader(\n module_name, os.path.join(dir_path, file)\n )\n module = loader.load_module()\n return module\n","repo_name":"ducpham-indi/ResearchComponent","sub_path":"gameloader.py","file_name":"gameloader.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"29252093868","text":"#!/usr/bin/env python3\n\n# TODO after setting leverage set all pairs to \"Order by Value\"\nfrom os import name\nfrom time import sleep\nimport json\nimport requests\nimport asyncio\nfrom pybit.unified_trading import WebSocket\nfrom pybit.unified_trading import HTTP\nfrom botSettings import *\nimport ccxt\nfrom pprint import pprint\n\nHEADER = '\\033[95m'\nOKBLUE = '\\033[94m'\nOKCYAN = '\\033[96m'\nOKGREEN = '\\033[92m'\nWARNING = '\\033[93m'\nFAIL = '\\033[91m'\nENDC = '\\033[0m'\nBOLD = '\\033[1m'\nUNDERLINE = '\\033[4m'\n\nblacklist = BLACKLIST.split(\",\")\nwhitelist = []\n\ndef handle_message(message):\n if checkIfTradable(message):\n print(\"Placing order ...\")\n placeOrder(message)\n else:\n print(\"Liquidated volume to low !\")\n\nws = WebSocket(\n testnet=False,\n channel_type=\"linear\" \n)\n\nsession = HTTP(\n testnet=False,\n api_key=API_KEY,\n api_secret=API_SECRET,\n)\n\nexchange_id = 'bybit'\nexchange_class = getattr(ccxt, exchange_id)\nexchange = exchange_class({\n 'apiKey': API_KEY,\n 'secret': API_SECRET,\n})\n\n#exchange.set_sandbox_mode(True) # activates testnet mode\n#exchange.options['defaultType'] = 'swap'\nmarkets = exchange.load_markets()\n\n#just a ccxt test call\nret = exchange.fetchBalance ()\nprint(ret['USDT'])\n\ndef get_symbols():\n print(\"Fetching USDT symbols ...\")\n response = requests.get(\"https://api.bybit.com/v2/public/symbols\")\n response.raise_for_status()\n data = response.json()\n usdt_symbols = [symbol for symbol in data[\"result\"] if symbol[\"quote_currency\"] == \"USDT\"]\n print(\"Done !\")\n return usdt_symbols\n\n\ndef get_ticker_info(symbol_name):\n response = requests.get(f\"https://api.bybit.com/v2/public/tickers?symbol={symbol_name}\")\n response.raise_for_status()\n return response.json()[\"result\"][0]\n\n\ndef min_order_value(current_price, min_order_size):\n return current_price * min_order_size\n\n\ndef getCoinsToTrade(usdt_symbols):\n line = f'Only coins with a price under {TRADE_COIN_MAX_ORDER_VALUE} USDT will be traded!'\n print(line)\n print(\"Fetching USDT symbols to trade ...\")\n liquidationCandidates = []\n\n for symbol in usdt_symbols:\n ticker_info = get_ticker_info(symbol[\"name\"])\n current_price = float(ticker_info[\"last_price\"])\n min_order_size = float(symbol[\"lot_size_filter\"][\"min_trading_qty\"])\n min_order_value_result = min_order_value(current_price, min_order_size)\n \n if min_order_value_result < TRADE_COIN_MAX_ORDER_VALUE:\n line = f'{symbol[\"name\"]}, {current_price}, {min_order_size}, {min_order_value_result:.6f}'\n print(line)\n liquidationCandidates.append(symbol[\"name\"])\n print(\"Done !\")\n\n return liquidationCandidates\n\n\nasync def subsribeLiquidations(symbol_list):\n for symbol in symbol_list:\n line = f'Subscribing to liquidation stream for {symbol}'\n print(line)\n try:\n ws.liquidation_stream(symbol, handle_message)\n except:\n print(\"Error subscribing to pair.\")\n print(\"Done !\")\n\n\ndef checkIfTradable(liquidation_message):\n #print(liquidation_message)\n size = float(liquidation_message[\"data\"][\"size\"])\n price = float(liquidation_message[\"data\"][\"price\"])\n pair = liquidation_message[\"data\"][\"symbol\"]\n volume = size * price\n line = f'Got liquidatino for {liquidation_message[\"data\"][\"symbol\"]}; Side {liquidation_message[\"data\"][\"side\"]}; Liquidated volume {str(volume)}'\n print (line)\n if (volume > MIN_LIQUIDATION_VOLUME) and (pair in whitelist):\n line = f'Got pair {liquidation_message[\"data\"][\"symbol\"]}; Side {liquidation_message[\"data\"][\"side\"]}; Liquidated volume {str(volume)}'\n print(OKGREEN + line + ENDC)\n return True\n else:\n return False\n\n\ndef placeOrder(liquidation_message):\n liquidated_pair = liquidation_message[\"data\"][\"symbol\"]\n if liquidated_pair in blacklist:\n line = f'Pair {liquidated_pair} is on blacklist!' \n print(line)\n return\n else:\n liquidated_pair_price = liquidation_message[\"data\"][\"price\"]\n liquidated_side = liquidation_message[\"data\"][\"side\"]\n orderSize_percentage = float(getWalletBalance()) * float(PERCENT_ORDER_SIZE)\n order_cost = orderSize_percentage * float(liquidated_pair_price)\n \n order_pair_ccxt = liquidated_pair[ : liquidated_pair.find(\"USDT\")] + \"/USDT:USDT\"\n #order = exchange.createMarketBuyOrder(order_pair_ccxt, order_cost)\n #print(order)\n if liquidated_side == 'Sell':\n order_side = \"sell\"\n #order = exchange.createMarketSellOrder(order_pair_ccxt, order_cost)\n else:\n order_side = \"buy\"\n #order = exchange.createMarketBuyOrder(order_pair_ccxt, order_cost)\n line = f'liquidated_price={liquidated_pair_price}\\nliquidated_side = {liquidated_side}\\nBalance = {getWalletBalance()}\\norderSize_percentage = {orderSize_percentage}\\norder_cost = {order_cost}'\n print(line)\n print(\"Placing order not implemented yet!\")\n #print (order)\n # print(session.place_order(\n # category=\"linear\",\n # symbol=order_pair,\n # side=order_side,\n # orderType=\"Market\",\n # qty=str(orderSize),\n # ))\n \n order = exchange.createOrder (order_pair_ccxt, 'market', order_side, 1, None, {'qty': 1})\n print(order)\n \n\n\ndef getWalletBalance():\n walletInfo = session.get_wallet_balance(accountType=\"CONTRACT\")\n return float(walletInfo[\"result\"][\"list\"][0][\"coin\"][1][\"walletBalance\"])\n\n\nasync def main():\n walletInfo = session.get_wallet_balance(accountType=\"CONTRACT\")\n walletBalance = walletInfo[\"result\"][\"list\"][0][\"coin\"][1][\"walletBalance\"]\n equity = walletInfo[\"result\"][\"list\"][0][\"coin\"][1][\"equity\"]\n totalPositionIM = walletInfo[\"result\"][\"list\"][0][\"coin\"][1][\"totalPositionIM\"]\n unrealisedPnl = walletInfo[\"result\"][\"list\"][0][\"coin\"][1][\"unrealisedPnl\"]\n cumRealisedPnl = walletInfo[\"result\"][\"list\"][0][\"coin\"][1][\"cumRealisedPnl\"]\n line = f'Balance: {walletBalance}\\nEquity: {equity}\\nuPnL: {unrealisedPnl}\\ncum PnL:{cumRealisedPnl}'\n print(line)\n \n usdt_symbols = get_symbols()\n global whitelist\n whitelist = getCoinsToTrade(usdt_symbols)\n\n # set position mode and leverage\n for element in whitelist:\n symbol = element[ : element.find(\"USDT\")] + \"/USDT:USDT\"\n line = f'Setting MarginMode for {symbol}'\n print(line)\n \n try:\n result = exchange.setMarginMode(MARGIN_MODE, symbol, params = {\"buyLeverage\": int(LEVERAGE), \"sellLeverage\": int(LEVERAGE)})\n line = f'{OKGREEN}OK!{ENDC} :: {result}'\n print(line)\n except Exception as e:\n line = f'{FAIL}Failed!{ENDC} :: {e}'\n print(line)\n\n line = f'Setting leverage for {symbol}'\n print (line)\n try:\n result = exchange.setLeverage(LEVERAGE, symbol, params = {})\n line = f'{OKGREEN}OK!{ENDC} :: {result}'\n print(line)\n except Exception as e:\n line = f'{FAIL}Failed!{ENDC} :: {e}'\n print(line)\n\n line = f'Setting position mode to One-way for {symbol}'\n print (line)\n try:\n result = exchange.set_position_mode(hedged = False, symbol = symbol)\n line = f'{OKGREEN}OK!{ENDC} :: {result}'\n print(line)\n except Exception as e:\n line = f'{FAIL}Failed!{ENDC} :: {e}'\n print(line)\n\n\n await subsribeLiquidations(whitelist)\n\n while True:\n sleep(1)\n\nif __name__ == '__main__':\n asyncio.run(main())\n","repo_name":"rush0815/PyBit","sub_path":"pybit-Bot.py","file_name":"pybit-Bot.py","file_ext":"py","file_size_in_byte":7653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"41385408740","text":"def Trees(n):\n global tree\n global x\n if n <= N:\n Trees(n * 2)\n tree[n] = x\n x += 1\n Trees(n * 2 + 1)\n\n\n\nT = int(input())\nfor tc in range(1, T+1):\n N = int(input())\n tree = [0]*(N+1)\n x = 1\n Trees(1)\n result1 = tree[1]\n result2 = tree[N//2]\n print(f\"#{tc} {result1} {result2}\")\n","repo_name":"WoongKi1115/TIL","sub_path":"algo/0915/이진탐색.py","file_name":"이진탐색.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"31517582212","text":"import functools\n\n\nclass PlanetError(Exception):\n pass\n\n\nclass SpaceAge(object):\n EARTHYEAR = 31557600\n PERIODS = {\n 'mercury': 0.2408467,\n 'venus': 0.61519726,\n 'earth': 1.0,\n 'mars': 1.8808158,\n 'jupiter': 11.862615,\n 'saturn': 29.447498,\n 'uranus': 84.016846,\n 'neptune': 164.79132,\n }\n\n def __init__(self, age):\n self.seconds = age\n\n def on_planet(self, planet):\n if planet not in self.PERIODS:\n raise PlanetError(\n \"{} is not a planet in the solar system.\".format(planet))\n return round(self.seconds / self.PERIODS[planet] / self.EARTHYEAR, 2)\n\n def __getattr__(self, attr):\n try:\n planet = attr.split('_')[1]\n except IndexError:\n raise AttributeError(\"No such attribute '{}'\".format(attr))\n return functools.partial(self.on_planet, planet=planet)\n","repo_name":"shdev/exercism-python","sub_path":"space-age/space_age.py","file_name":"space_age.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"17600006469","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 19 15:25:23 2019\n\n@author: bruno\n\"\"\"\n#%%\nimport tensorflow as tf\nimport numpy as np\nimport datetime\nfrom tensorflow.keras.datasets import fashion_mnist\nimport matplotlib.pyplot as plt\n#%% Load data base\n(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()\nplt.imshow(x_train[5])\n#%%\n# Images normalizer\nx_train = x_train / 255.0\nx_test = x_test / 255.0 \n\n# Reshape vector images for MLP One dimensional\nx_train = x_train.reshape(-1, 28*28)\nx_test = x_test.reshape(-1, 28*28)\n#%% Criate model arquiteture\nclass Network():\n def __init__(self, input_shape, number_class ):\n self.model = tf.keras.models.Sequential()\n self.input_shape = input_shape\n self.number_class = number_class\n \n def net(self):\n self.model.add(tf.keras.layers.Dense(units = 128, activation = 'relu', input_shape=(self.input_shape)))\n self.model.add(tf.keras.layers.Dropout(0.2))\n self.model.add(tf.keras.layers.Dense(units=128, activation='relu'))\n self.model.add(tf.keras.layers.Dropout(0.2))\n self.model.add(tf.keras.layers.Dense(units=128, activation='relu'))\n self.model.add(tf.keras.layers.Dropout(0.2))\n self.model.add(tf.keras.layers.Dense(units=128, activation='relu'))\n self.model.add(tf.keras.layers.Dense(units = self.number_class, activation = 'softmax'))\n self.model.compile(optimizer = 'adam', loss = 'sparse_categorical_crossentropy', metrics = ['sparse_categorical_accuracy'])\n return self.model\n \n#%% Instantiating the Neural Network\ninput_shape = (784, )\nnumber_class = 10\nobj_net = Network(input_shape, number_class)\nnetwork = obj_net.net()\nnetwork.summary() \n#%% Training the neural Network\nnetwork.fit(x_train, y_train, epochs =30)\n\n#%% Test model data base test\ntest_loss, test_accuracy = network.evaluate(x_test, y_test)\n\n#%% Save model\nmodel_json = network.to_json()\nwith open(\"fashion_model.json\", \"w\") as json_file:\n json_file.write(model_json)\n\n#%% Save weights\nnetwork.save_weights(\"fashion_model.h5\")","repo_name":"brunoprp/TensorFlow-2.0","sub_path":"Image-classification-MLP/mlp.py","file_name":"mlp.py","file_ext":"py","file_size_in_byte":2094,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"39"}
+{"seq_id":"19731183120","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 8 10:32:33 2019\n\n@author: kkrista\n\"\"\"\nimport os\nimport csv\nimport shutil\nfrom functions_blindedScoring import randFilenameGen\n\nanimalDir = '/Volumes/SharedX/Neuro-Leventhal/data/mouseSkilledReaching/'\n\nAB_maskedFilename = '/Volumes/SharedX/Neuro-Leventhal/data/mouseSkilledReaching/blindedScoring/Alli_C/masked_AC.csv'\nAB_untranslatedDir = '/Volumes/SharedX/Neuro-Leventhal/data/mouseSkilledReaching/blindedScoring/Alli_C/unTranslated_AC/'\nAB_translatedDir = '/Volumes/SharedX/Neuro-Leventhal/data/mouseSkilledReaching/blindedScoring/Alli_C/toScore/'\n\nstillCutting = '7062_20190827_CC2_03'\n\n# Initialize Variables\nallAnimals=[]\nallFolders = os.listdir(animalDir)\nmaskDict = []\noriginalNames = []\nmaskedNames = []\n\n# Get all untranslated files\nAB_untranslated = os.listdir(AB_untranslatedDir)\nAB_untranslated = [item for item in AB_untranslated if item.endswith('.csv')]\nfor item in AB_untranslated:\n newItem = item.split('_')\n newItem = '_'.join(newItem[:-1])\n AB_untranslated[AB_untranslated.index(item)] = newItem\n \nwith open(AB_maskedFilename, mode='r') as fin:\n rd = csv.DictReader(fin)\n for row in rd:\n originalNames.append(row['Original Name'])\n maskedNames.append(row['New Name'])\n maskDict.append({'Original Name':row['Original Name'],'New Name':row['New Name']})\n\n# Get all animal folders\nfor file in allFolders:\n if 'et' in file[:2]:\n # Collect the files that have 'et', denoting 'Ear Tag' into one list\n allAnimals.append(file)\nallAnimals.sort()\n\nfor animal in allAnimals:\n \n # Define training directory for animal\n currAnDir=animalDir+animal+'/Training/'\n \n if not os.path.isdir(currAnDir):\n # If there is no 'Training' directory, skip this animal\n continue\n \n # Get contents of 'Training' directory\n allTrainDays=os.listdir(currAnDir)\n allTrainDays.sort()\n \n # Loop through training days\n for day in allTrainDays:\n\n if ('.MP4' in day):\n # Skip .MP4 files in 'Training' directory\n continue\n\n # Define training day directory\n currDayDir=currAnDir+day\n\n if not os.path.isdir(currDayDir):\n # Skip 'Training/*' items that are not directories\n continue\n\n # Identify where we're at in the code, in case of issues\n print('Checking: ' + day)\n\n # Get all contents of the training day directory\n allFiles=os.listdir(currDayDir)\n \n # Get existing reach directories\n existingReachDir=[file for file in allFiles if 'Reaches' in file]\n \n # If no reach directories exist, move to the next training day\n if len(existingReachDir) == 0:\n continue\n \n # Create names to check against untranslated files\n vidID = day.strip('et')\n vidID = vidID.split('_')\n vidID = '_'.join(vidID[:-1])\n \n for item in existingReachDir:\n vidNum = item.strip('Reaches')\n dayOrigVidName = ('_'.join([vidID,vidNum]))\n \n # Check if the dayOrigVidName is in the untranslated files\n if dayOrigVidName in AB_untranslated or dayOrigVidName in originalNames:\n continue\n else:\n print('Translating Videos: ' + dayOrigVidName)\n \n newName = randFilenameGen()\n \n while newName in maskedNames:\n newName = randFilenameGen()\n \n originalNames.append(dayOrigVidName)\n maskedNames.append(newName)\n \n maskDict.append({'Original Name':dayOrigVidName,'New Name':newName})\n \n reachDir = currDayDir + '/' + item + '/'\n \n allReaches = os.listdir(reachDir)\n allReaches = [video for video in allReaches if video.endswith('.mp4')]\n \n for reach in allReaches:\n \n if not os.path.isdir(AB_translatedDir + newName):\n os.mkdir(AB_translatedDir + newName)\n \n reachID = reach.split('_')[-1]\n \n oldFile = reachDir + reach\n newFile = AB_translatedDir + newName + '/' + '_'.join([newName,reachID])\n try:\n shutil.copy(oldFile, newFile)\n except:\n continue\n \ncsv_columns = ['Original Name','New Name']\nwith open(AB_maskedFilename, 'w') as f:\n writer = csv.DictWriter(f,fieldnames=csv_columns)\n writer.writeheader()\n for data in maskDict:\n writer.writerow(data)\n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"kristakernodle/VideoProcessing","sub_path":"skilledReaching/blindedScoring/maskVids.py","file_name":"maskVids.py","file_ext":"py","file_size_in_byte":4996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"12061958305","text":"# Initial state of boxes\nboxes = {\n 0: [],\n 1: ['lion'],\n 2: [],\n 3: ['pen', 'keyboard', 'coat'],\n 4: [],\n 5: ['watch', 'battery', 'spoon'],\n 6: ['sock', 'coin', 'grass', 'table'],\n 7: ['console', 'laptop', 'thread'],\n 8: [],\n 9: ['tie'],\n 10: [],\n 11: [],\n 12: ['lock', 'ship', 'candle']\n}\n\n# Move the tie from Box 9 to Box 2.\nboxes[9].remove('tie')\nboxes[2].append('tie')\n\n# Put the candle and the jungle into Box 11.\nboxes[11].append('candle')\nboxes[11].append('jungle')\n\n# Replace the grass and the sock with the doll and the ring in Box 6.\nboxes[6].remove('grass')\nboxes[6].remove('sock')\nboxes[6].append('doll')\nboxes[6].append('ring')\n\n# Empty Box 12.\nboxes[12] = []\n\n# Put the mask and the candle into Box 12.\nboxes[12].append('mask')\nboxes[12].append('candle')\n\n# Remove the spoon and the battery and the watch from Box 5.\nboxes[5].remove('spoon')\nboxes[5].remove('battery')\nboxes[5].remove('watch')\n\n# Remove the candle from Box 12.\nboxes[12].remove('candle')\n\n# Replace the doll and the coin with the perfume and the jungle in Box 6.\nboxes[6].remove('doll')\nboxes[6].remove('coin')\nboxes[6].append('perfume')\nboxes[6].append('jungle')\n\n# Remove the lion from Box 1.\nboxes[1].remove('lion')\n\n# Move the mask from Box 12 to Box 3.\nboxes[12].remove('mask')\nboxes[3].append('mask')\n\n# Empty Box 3.\nboxes[3] = []\n\n# Replace the jungle with the train in Box 11.\nboxes[11].remove('jungle')\nboxes[11].append('train')\n\n# Move the table and the perfume and the ring from Box 6 to Box 1.\nitems_to_move = ['table', 'perfume', 'ring']\nfor item in items_to_move:\n boxes[6].remove(item)\n boxes[1].append(item)\n\n# Put the dice into Box 9.\nboxes[9].append('dice')\n\n# Swap the train in Box 11 with the laptop in Box 7.\nboxes[11].remove('train')\nboxes[7].remove('laptop')\nboxes[11].append('laptop')\nboxes[7].append('train')\n\n# Put the key and the helmet and the candle into Box 4.\nboxes[4].append('key')\nboxes[4].append('helmet')\nboxes[4].append('candle')\n\n# Move the key from Box 4 to Box 2.\nboxes[4].remove('key')\nboxes[2].append('key')\n\n# Move the candle from Box 4 to Box 8.\nboxes[4].remove('candle')\nboxes[8].append('candle')\n\n# Move the table and the ring and the perfume from Box 1 to Box 10.\nitems_to_move = ['table', 'ring', 'perfume']\nfor item in items_to_move:\n boxes[1].remove(item)\n boxes[10].append(item)\n\n# Move the laptop from Box 11 to Box 7.\nboxes[11].remove('laptop')\nboxes[7].append('laptop')\n\n# Print the boxes\nfor box_number, items in boxes.items():\n print(f\"Box {box_number}: {items}\")","repo_name":"NLP-KU/fulgid","sub_path":"boxes/results/complex-boxes-dataset/code/gpt-3.5-turbo/59397e3298.py","file_name":"59397e3298.py","file_ext":"py","file_size_in_byte":2554,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"39"}
+{"seq_id":"4916753314","text":"def solution(name, yearning, photo):\n answer = []\n dic = {}\n for i in range(len(name)):\n dic[name[i]] = yearning[i]\n \n for i in photo:\n sum = 0\n for j in i:\n try:\n sum += dic[j]\n except: continue\n answer.append(sum)\n return answer","repo_name":"hyetae/Algorithm","sub_path":"프로그래머스/unrated/176963. 추억 점수/추억 점수.py","file_name":"추억 점수.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"39"}
+{"seq_id":"15875955971","text":"#!/usr/bin/env python3\nimport rospy\nfrom geometry_msgs.msg import Twist\nfrom pynput import keyboard\nimport math\n\nrobot_keyboardctl_flag = False\nxforward = 0\nzrotate = 0\n\n\ndef on_release(key):\n global xforward, zrotate, robot_keyboardctl_flag\n # print('{0} released'.format(key))\n robot_keyboardctl_flag = True # press any key will enable keyboard ctl, 'g' stop control, esc will quit.\n if key == keyboard.Key.up:\n xforward = min(robot_max_forward_speed, xforward + robot_forward_speed_inctl)\n if key == keyboard.Key.left:\n zrotate = min(robot_max_rotation_speed, zrotate + robot_rotation_speed_inctl)\n if key == keyboard.Key.right:\n zrotate = max(-robot_max_rotation_speed, zrotate - robot_rotation_speed_inctl)\n if key == keyboard.Key.down:\n xforward = 0\n zrotate = 0\n # 'g' and esc will both quit keyboard-control mode. g is temporary, esc is permanent\n if key == keyboard.KeyCode.from_char('g') or key == keyboard.Key.esc:\n robot_keyboardctl_flag = False\n xforward = 0\n zrotate = 0\n rospy.set_param('/robot_keyboard_control_flag', robot_keyboardctl_flag)\n\n if key == keyboard.Key.esc:\n # Stop listener\n return False\n\ndef reset():\n print(\"shutdown time! reset robot_keyboard_control_flag to False!\")\n rospy.set_param('/robot_keyboard_control_flag', False)\n\ndef vel_publisher():\n global vel_pub\n vel_pub = rospy.Publisher(\"~cmd_vel\", Twist, queue_size=500)\n rate = rospy.Rate(10)\n while not rospy.is_shutdown():\n vel_msg = Twist()\n vel_msg.linear.x = xforward\n vel_msg.angular.z = math.radians(zrotate)\n vel_pub.publish(vel_msg)\n rospy.loginfo(\"published twist info with keyboardctl_flag: {}, linear.x: {}, angular.z: {}\".format(\n robot_keyboardctl_flag, xforward, math.radians(zrotate)))\n rate.sleep()\n rospy.on_shutdown(reset)\n\n\n\ndef main():\n global robot_max_rotation_speed, robot_rotation_speed_inctl, robot_max_forward_speed, robot_forward_speed_inctl\n rospy.init_node(\"robot_keyboard_control\")\n # read & set params for ros param server\n robot_max_rotation_speed = rospy.get_param('~robot_max_rotation_speed', default=90.0)\n robot_rotation_speed_inctl = rospy.get_param('~robot_rotation_speed_inctl', default=5.0)\n robot_max_forward_speed = rospy.get_param('~robot_max_forward_speed', default=3.0)\n robot_forward_speed_inctl = rospy.get_param('~robot_forward_speed_inctl', default=0.2)\n rospy.set_param('/robot_keyboard_control_flag', robot_keyboardctl_flag)\n\n listener = keyboard.Listener(on_release=on_release)\n listener.start()\n try:\n vel_publisher()\n except rospy.ROSException as e:\n rospy.logerr(\"vel_publisher call failed: {}\".format(e))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"altfool/wheelchair-control","sub_path":"keyboardctl-ros/src/robot_keyboard_control/scripts/robot_keyboard_control.py","file_name":"robot_keyboard_control.py","file_ext":"py","file_size_in_byte":2818,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"39"}
+{"seq_id":"10814664071","text":"\"\"\"\nMinimize costs directly using a Linear Classifier and a Genetic Algorithm.\n\n1. Create a logger.\n2. Create an argument parser\n3. Read and clean the data.\n4.\n5. Define hyperparameter space.\n6. Perform hyperparameter tuning with train data.\n7. Fit pipeline with whole dataset and save predictions.\n\"\"\"\n\nimport os\nimport datetime\nimport logging\nimport argparse\n\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\n\nfrom models.tuning import GeneticAlgorithm\nfrom preprocessing.cleaning import clean\nfrom preprocessing.features import FeatureGenerator\n\n# 1. Create a logger.\nTIMECODE = datetime.datetime.now().strftime(\"%Y%m%d_%H%M\")\nLOGGER = logging.getLogger(__name__)\nLOGGER.setLevel(logging.INFO)\nFH = logging.FileHandler(os.path.join(\"logs\", \"genetic_\" + TIMECODE + \".log\"))\nFORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\nFORMATTER = logging.Formatter(FORMAT)\nFH.setFormatter(FORMATTER)\nLOGGER.addHandler(FH)\nLOGGER.info(\"================= Start new run =================\")\n\n# 2. Create an argument parser\nPARSER = argparse.ArgumentParser(fromfile_prefix_chars=\"@\")\nPARSER.add_argument(\"-e\", \"--elitism\", type=float, default=0.02,\n help=\"percentage of population kept in next iteration\")\nPARSER.add_argument(\"-p\", \"--population-size\", type=int, default=100,\n help=\"total number of solutions in pool\")\nPARSER.add_argument(\"--jobs\", type=int, default=-1,\n help=\"number of processors used\")\nPARSER.add_argument(\"-c\", \"--crossover-strategy\", type=str, default=\"arithmetic\",\n help=\"in {'arithmetic', 'point', 'heuristic'}\")\nPARSER.add_argument(\"-m\", \"--prob-mutation\", type=float, default=0.5,\n help=\"probability of mutating a solution\")\n\nPARSER.add_argument(\"--init-loc\", type=float, default=0.0,\n help=\"location parameter during initialization\")\nPARSER.add_argument(\"--init-scale\", type=float, default=1.0,\n help=\"scale parameter during initialization\")\n\nPARSER.add_argument(\"--maxiter\", type=int, default=20,\n help=\"maximum number of iterations\")\nPARSER.add_argument(\"--subsample\", type=float, default=0.5,\n help=\"size of subsample\")\nPARSER.add_argument(\"--bootstrap\", action=\"store_true\", default=False,\n help=\"if set, sample with replacement\")\nPARSER.add_argument(\"--reset-prob\", type=float, default=0.25,\n help=\"probability of redrawing the subsample\")\nPARSER.add_argument(\"--pca\", type=int, default=None)\nARGS = PARSER.parse_args()\n\n# 3. Load and clean the data\nDATAPATH = os.path.join(\"data\", \"BADS_WS1819_known.csv\")\nUNKNOWNPATH = os.path.join(\"data\", \"BADS_WS1819_unknown.csv\")\n\nOUTPATH = os.path.join(\n \"predictions\", \"genetic_predictions\" + TIMECODE + \".csv\")\n\nKNOWN = clean(DATAPATH)\nUNKNOWN = clean(UNKNOWNPATH)\nHISTORY = KNOWN.append(UNKNOWN, sort=False)\n\nTRAIN, TEST = train_test_split(KNOWN, test_size=0.2)\n\nwith open(\"variables.txt\", \"r\") as f:\n COLS = f.read().splitlines()\n\nFG = FeatureGenerator(cols=COLS)\nFG.fit(HISTORY, 'return')\nX_TRAIN, Y_TRAIN = FG.transform(TRAIN, ignore_woe=False, add_dummies=True)\nX_TEST, Y_TEST = FG.transform(TEST, ignore_woe=False, add_dummies=True)\n\nif ARGS.pca:\n STEPS = [(\"scaler\", StandardScaler()),\n (\"pca\", PCA(n_components=ARGS.pca))]\n PIPELINE = Pipeline(STEPS)\n SCALER = PIPELINE.fit(X_TRAIN, Y_TRAIN)\n\n X_TRAIN = SCALER.transform(X_TRAIN)\n X_TEST = SCALER.transform(X_TEST)\n\n# 4. Run Genetic Algorithm\nGA = GeneticAlgorithm(elitism=ARGS.elitism,\n population_size=ARGS.population_size,\n n_jobs=ARGS.jobs,\n crossover_strategy=ARGS.crossover_strategy,\n prob_mutation=ARGS.prob_mutation)\nGA.fit(X=X_TRAIN,\n y=Y_TRAIN,\n price=TRAIN.item_price.values,\n fit_intercept=True,\n loc=ARGS.init_loc,\n scale=ARGS.init_scale)\nRES = GA.run(maxiter=ARGS.maxiter,\n subsample=ARGS.subsample,\n bootstrap=ARGS.bootstrap,\n reset_prob=ARGS.reset_prob)\n\nTEST_PRED = GA.predict(X_TEST)\nTEST_SCORE = GA.get_utility(y_prob=TEST_PRED,\n y_true=Y_TEST,\n price=TEST.item_price.values,\n cutoff=GA.optimal_cutoff)\n\n# Baseline 0: nobody gets the message\nBASELINE_0 = GA.get_utility(y_prob=np.zeros(len(Y_TEST)),\n y_true=Y_TEST,\n price=TEST.item_price.values,\n cutoff=0.5)\n\n# Baseline 1: everybody gets the message\nBASELINE_1 = GA.get_utility(y_prob=np.ones(len(Y_TEST)),\n y_true=Y_TEST,\n price=TEST.item_price.values,\n cutoff=0.5)\n\n# Comparison: Train a shitty random forest\nRF = RandomForestClassifier(max_depth=87.0, min_samples_leaf=0.01432,\n min_samples_split=0.011417, n_estimators=392)\nRF.fit(X_TRAIN, Y_TRAIN)\nRF_COMP_PRED = RF.predict_proba(X_TEST)\nRF_COMPARISON, _ = GA.get_fitness(y_prob=RF_COMP_PRED[:, 1],\n y_true=Y_TEST,\n price=TEST.item_price.values)\n\nLR = LogisticRegression(solver=\"saga\")\nLR.fit(X_TRAIN, Y_TRAIN)\n\nLR_COMP_PRED = LR.predict_proba(X_TEST)\nLR_COMPARISON, _ = GA.get_fitness(y_prob=LR_COMP_PRED[:, 1],\n y_true=Y_TEST,\n price=TEST.item_price.values)\n# Create diagnostic plot\nGA.plot(os.path.join(\"logs\", \"genetic_\" + TIMECODE + \".png\"),\n title=\"Real Data\",\n figsize=(7, 5))\n# Log events\nfor arg, val in vars(ARGS).items():\n LOGGER.info(\"%s: %s\", arg, val)\n\nLOGGER.info(OUTPATH)\n\nLOGGER.info(\"------------------- Results -------------------\")\nLOGGER.info(\"Baseline score: % 14.2f\", BASELINE_1)\nLOGGER.info(\"RF score: % 20.2f\", RF_COMPARISON)\nLOGGER.info(\"LR score: % 20.2f\", LR_COMPARISON)\nLOGGER.info(\"Test score: % 18.2f\", TEST_SCORE)\nLOGGER.info(\"Coefficients: %s\", RES.round(2))\nLOGGER.info(\"Cost Optimal Threshold: % 2.2f\", GA.optimal_cutoff)\n\nprint(\"\\nBaseline 0 score: % 12.2f\" % (BASELINE_0))\nprint(\"Baseline 1 score: % 12.2f\" % (BASELINE_1))\nprint(\"RF score: % 20.2f\" % (RF_COMPARISON))\nprint(\"LR score: % 20.2f\" % (LR_COMPARISON))\nprint(\"Test score: % 18.2f\" % (TEST_SCORE))\nprint(\"Coefficients: %s\" % (RES.round(2)))\nprint(\"Cost Optimal Threshold: % 2.2f\" % (GA.optimal_cutoff))\n\nLOGGER.info(\"------------------- History -------------------\")\nLOGGER.info(\"| Best Fitness | Mean Fitness | OOB Fitness\")\nLOGGER.info(\"|--------------|--------------|-------------\")\nfor best, avg, oob in zip(GA.history[\"best_fitness\"],\n GA.history[\"mean_pop_fitness\"],\n GA.history[\"oob_fitness\"]):\n LOGGER.info(\"| % 12.7f | % 12.7f | % 12.7f\", best, avg, oob)\n\nprint(\"\\nSave Predictions\")\nX_PRED = FG.transform(UNKNOWN, ignore_woe=False, add_dummies=True)\nif ARGS.pca:\n X_PRED = SCALER.transform(X_PRED)\n\nPREDICTIONS = GA.predict(X_PRED)\nPREDICTIONS = pd.DataFrame(PREDICTIONS, index=UNKNOWN.index,\n columns=[\"return\"], dtype=float)\nPREDICTIONS.to_csv(OUTPATH)\n","repo_name":"thsis/BADS19","sub_path":"models/genetic.py","file_name":"genetic.py","file_ext":"py","file_size_in_byte":7485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"7798773334","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport __future__\nimport sys\nsys.stdin = open(\"./challenge_sample_input\", 'r')\nprint(\"===\" * 30)\nprint(\"SAMPLE OUTPUT:\")\nprint(\"===\" * 30)\nprint(open(\"./challenge_sample_output\", 'r').read())\nprint(\"===\" * 30)\nprint(\"START\")\nprint(\"===\" * 30)\n\nn = input()\nmset = set(map(int, raw_input().split())) \nfor arr in [raw_input().split() for _ in range(0,int(raw_input()))]:\n if len(arr) > 1:\n cmd, arg = arr\n else:\n arg = None\n cmd = arr.pop()\n if arg is not None:\n mset.__getattribute__(cmd)(int(arg))\n else:\n mset.__getattribute__(cmd)()\nprint(sum(mset))\n","repo_name":"shollingsworth/HackerRank","sub_path":"python/py-set-discard-remove-pop/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"21363383396","text":"\r\n# -----------------------------------------------------------------------------\r\n# calc.py\r\n#\r\n# A simple calculator with variables -- all in one file.\r\n# -----------------------------------------------------------------------------\r\n\r\ntokens = (\r\n 'NAME', 'NUMBER',\r\n 'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'EQUALS',\r\n 'LPAREN', 'RPAREN',\r\n )\r\n\r\n# Tokens\r\n# r 은 Regular Expression을 의미하는 것\r\n\r\nt_PLUS = r'\\+'\r\nt_MINUS = r'-'\r\nt_TIMES = r'\\*'\r\nt_DIVIDE = r'/'\r\nt_EQUALS = r'='\r\nt_LPAREN = r'\\('\r\nt_RPAREN = r'\\)'\r\nt_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*'\r\n\r\n\r\ndef t_NUMBER(t):\r\n r'\\d+'\r\n try:\r\n t.value = int(t.value)\r\n except ValueError:\r\n print(\"Integer value too large %d\", t.value)\r\n t.value = 0\r\n return t\r\n\r\n\r\n# Ignored characters\r\nt_ignore = \" \\t\"\r\n\r\n\r\ndef t_newline(t):\r\n r'\\n+'\r\n t.lexer.lineno += t.value.count(\"\\n\")\r\n\r\n\r\ndef t_error(t):\r\n print(\"Illegal character '%s'\" % t.value[0])\r\n t.lexer.skip(1)\r\n\r\n\r\n# Build the lexer\r\nimport ply.lex as lex\r\nlexer = lex.lex()\r\n\r\n# 이까지가 scan\r\n# ------------------------------------------------------------\r\n# 여기서부터 parsing 시작\r\n# Parsing rules\r\n\r\n# 우선순위가 높을수록 밑으로 간다.\r\n# 모호성을 제거해주기 위함.\r\nprecedence = (\r\n ('left', 'PLUS', 'MINUS'),\r\n ('left', 'TIMES', 'DIVIDE'),\r\n ('right', 'UMINUS'), # right 는 뭐지?\r\n )\r\n\r\n# dictionary of names\r\n# 심볼테이블. 명령형 프로그래밍 언어에서 변수의 동적 할당을 처리하는 방법.\r\n# 변수를 할당할 때 names 안에 dict 타입의 형태로 value 들이 담기게 된다.\r\n# { NAME : expression }\r\nnames = { }\r\n\r\n\r\n# 변수 할당\r\ndef p_statement_assign(t):\r\n 'statement : NAME EQUALS expression'\r\n names[t[1]] = t[3]\r\n\r\n\r\ndef p_statement_expr(t):\r\n 'statement : expression'\r\n print(t[1])\r\n\r\n\r\n# 모호성이 있는 RE라서 이렇게 쓰면 안되지만, precedence를 통해서 모호성을 제거해준다.\r\ndef p_expression_binop(t):\r\n \"\"\"\r\n expression : expression PLUS expression\r\n | expression MINUS expression\r\n | expression TIMES expression\r\n | expression DIVIDE expression\r\n \"\"\"\r\n # ('+', t[1], t[3]) -> abstract syntax tree로 표현하는 방법\r\n if t[2] == '+' : t[0] = t[1] + t[3] # 입력스트링(t[2])으로 '+'가 들어오면 t[1]과 t[3]을 더해서 t[0]에 할당하라\r\n elif t[2] == '-': t[0] = t[1] - t[3] # c 프로그래밍에서의 $(위치)와 비슷하다.\r\n elif t[2] == '*': t[0] = t[1] * t[3]\r\n elif t[2] == '/': t[0] = t[1] / t[3]\r\n\r\n# 간단한 산술식의 경우 위와 같이 표현할 수 있으나\r\n# if문과 같은 복잡한 구조는 abstract syntax tree를 필요로 한다.\r\n# 물론 산술식도 abstract syntax tree로 표현할 수 있다.\r\n\r\ndef p_expression_uminus(t):\r\n 'expression : MINUS expression %prec UMINUS'\r\n t[0] = -t[2]\r\n\r\n\r\ndef p_expression_group(t):\r\n 'expression : LPAREN expression RPAREN'\r\n t[0] = t[2]\r\n\r\n\r\ndef p_expression_number(t):\r\n 'expression : NUMBER'\r\n t[0] = t[1]\r\n\r\n\r\ndef p_expression_name(t):\r\n 'expression : NAME'\r\n try:\r\n t[0] = names[t[1]]\r\n except LookupError:\r\n print(\"Undefined name '%s'\" % t[1])\r\n t[0] = 0\r\n\r\n\r\ndef p_error(t):\r\n print(\"Syntax error at '%s'\" % t.value)\r\n\r\n\r\nimport ply.yacc as yacc\r\nparser = yacc.yacc()\r\n\r\nwhile True:\r\n try:\r\n s = input('calc > ') # Use raw_input on Python 2\r\n except EOFError:\r\n break\r\n parser.parse(s)\r\n","repo_name":"onsuk/TIL","sub_path":"computation_theory/src/3-ply_calc.py","file_name":"3-ply_calc.py","file_ext":"py","file_size_in_byte":3569,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"}
+{"seq_id":"4221764588","text":"#coding: utf-8\nimport os\nimport numpy as np\nimport imageio\nimport multiprocessing\nimport time\nimport zipfile\nfilename_definition = \"{}_Part{}\" #{folder_definition:}\nbatch_size = 1000\nsave_path = '../../npydata/'\nn_build_process = 1 # 多进程数\nn_frames = 61\n# data format: np.Array(n_samples, n_frames, row, col, channels)\n# channels default is 1, n_frames=61\ndef build(path='../../data'):\n folders = os.listdir(path)\n folders.sort()\n \n #去除重复文件夹\n # folders = list(set(folders + built_folder))\n\n for folder in folders:\n print('start build {}'.format(folder))\n samples = ['/'.join([path,folder, x]) for x in os.listdir('/'.join([path, folder]))]\n # samples.sort()\n n_part = 0\n p = multiprocessing.Pool(n_build_process)\n for i in range(0, len(samples), batch_size):\n n_part += 1\n arg_samples_path = samples[i:i+batch_size]\n p.apply_async(build_samples, args=(n_part, arg_samples_path, folder))\n p.close()\n p.join()\n print('finish build {}'.format(folder))\n\ndef build_samples(n_part, samples_path, folder_name):\n movie = np.zeros((batch_size, 61, 51, 51, 1), dtype=np.float)\n build_time = time.time()\n print('start build {} part {}'.format(folder_name, n_part))\n # sample_list = []\n # save\n save_filename = save_path \\\n + filename_definition.format(folder_name,n_part) \\\n + \"_{}batch\".format(batch_size) \\\n + \".npz\"\n if os.path.exists(save_filename):\n print('previously finished build {} part {}'.format(folder_name, n_part))\n return\n for i, sample in zip(range(len(samples_path)), samples_path):\n pics = os.listdir('/'.join([sample]))\n # pics_list = []\n pics.sort()\n for j, pic in zip(range(len(pics)), pics):\n pic_nparray = np.array(imageio.imread('/'.join([sample, pic])),\n dtype=np.float32)\n movie[i,j,::,::,0] = pic_nparray[::10,::10] # downsample\n print(pic)\n print(\"part{} progress {}/{}\".format(n_part, i + 1, batch_size))\n movie = movie[::,::,:50,:50,::] # cut picture\n print(len(movie[0][0]))\n movie = movie / 255.0 # normalize\n compress_time = time.time()\n print(\"part{} start compress\".format(n_part))\n np.savez_compressed(save_filename,\n movie)\n compress_time = time.time() - compress_time\n print(\"part{} finish compress, time:{}\".format(n_part, compress_time))\n build_time = time.time() - build_time\n print('finished build {} part {}, time:{}'.format(folder_name, n_part, build_time))\n\ndef load_data(path='../../npydata'):\n data = os.listdir()\n return np.load('/'.join([path, data[0]]), dtype=float)\n\ndef downsample(data, scale):\n return data[::,::,::scale,::scale,::]\n\ndef cut_frame(data, split):\n pass\nif __name__ == '__main__':\n build()\n","repo_name":"rabitdash/tianchi_icdm_2018","sub_path":"src/tools/imageset2np.py","file_name":"imageset2np.py","file_ext":"py","file_size_in_byte":2936,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"30"}
+{"seq_id":"17016192838","text":"# -*- coding:utf-8 -*-\r\nimport logging\r\nimport numpy as np\r\nimport pandas as pd\r\nimport re\r\nimport json\r\nfrom nltk.translate.bleu_score import corpus_bleu\r\n\r\n\r\ndef calculate_average(precisions, weights):\r\n \"\"\"Calculate the geometric weighted mean.\"\"\"\r\n tmp_res = 1\r\n for id, item in enumerate(precisions):\r\n tmp_res = tmp_res*np.power(item, weights[id])\r\n tmp_res = np.power(tmp_res, np.sum(weights))\r\n return tmp_res\r\n\r\n\r\ndef calculate_candidate(gram_list, candidate):\r\n \"\"\"Calculate the count of gram_list in candidate.\"\"\"\r\n gram_sub_str = ' '.join(gram_list)\r\n return len(re.findall(gram_sub_str, candidate))\r\n\r\n\r\ndef calculate_reference(gram_list, references):\r\n \"\"\"Calculate the count of gram_list in references\"\"\"\r\n gram_sub_str = ' '.join(gram_list)\r\n gram_count = []\r\n for item in references:\r\n # calculate the count of the sub string\r\n gram_count.append(len(re.findall(gram_sub_str, item)))\r\n return gram_count\r\n\r\n\r\ndef bleu_v2(candidate_sentence, reference_sentences, max_gram, weights,mode=0):\r\n \"\"\"\r\n https://en.wikipedia.org/wiki/BLEU\r\n bleu uses n-grams precision(usually 4)\r\n :return:\r\n \"\"\"\r\n candidate_corpus = list(candidate_sentence.split(' '))\r\n # number of the reference sentences\r\n refer_len = len(reference_sentences)\r\n candidate_tokens_len = len(candidate_corpus)\r\n if mode == 0:\r\n gram_precisions= []\r\n for i in range(max_gram):\r\n # calculate each gram precision\r\n # set current gram length\r\n curr_gram_len = i+1\r\n # calculate current gram length mole\r\n curr_gram_mole = 0\r\n # calculate current gram length deno\r\n curr_gram_deno = 0\r\n for j in range(0, candidate_tokens_len, curr_gram_len):\r\n if j + curr_gram_len > candidate_tokens_len:\r\n continue\r\n else:\r\n curr_gram_list = candidate_corpus[j:j+curr_gram_len]\r\n gram_candidate_count = calculate_candidate(curr_gram_list, candidate_sentence)\r\n # print(' current gram candidate count')\r\n # print(gram_candidate_count)\r\n gram_reference_count_list = calculate_reference(curr_gram_list, reference_sentences)\r\n # print(' current gram reference count list')\r\n # print(gram_reference_count_list)\r\n truncation_list = []\r\n for item in gram_reference_count_list:\r\n truncation_list.append(np.min([gram_candidate_count, item]))\r\n curr_gram_mole += np.max(truncation_list)\r\n curr_gram_deno += gram_candidate_count\r\n gram_precisions.append(curr_gram_mole/curr_gram_deno)\r\n\r\n average_res = calculate_average(gram_precisions, weights)\r\n # penalty on very short sentences\r\n bp = 1\r\n reference_len_list = [len(item.split(' ')) for item in reference_sentences]\r\n if candidate_tokens_len in reference_len_list:\r\n bp = 1\r\n else:\r\n if candidate_tokens_len < np.max(reference_len_list):\r\n bp = np.exp(1-(np.max(reference_len_list)/candidate_tokens_len))\r\n return bp*average_res\r\n\r\n\r\ndef main():\r\n # read test result json\r\n df = pd.read_json('./data/predict/predict2/test_predict.json', orient='split')\r\n # df.to_json('./data/test_result_columns.json', orient='split')\r\n predict_titles = df['predict_title'].to_list()\r\n actual_titles = df['actual_title'].to_list()\r\n bleu_list = []\r\n # one input and one output list\r\n n = len(predict_titles)\r\n for i in range(n):\r\n try:\r\n bleu_v2_score = bleu_v2(predict_titles[i], [actual_titles[i]], 1, weights=[1], mode=0)\r\n except:\r\n bleu_v2_score = 0\r\n finally:\r\n bleu_list.append(bleu_v2_score)\r\n df['bleu'] = bleu_list\r\n print(df)\r\n df.to_json('./data/predict/predict2/test_predict_columns.json', orient='split')\r\n\r\n\r\nif __name__ == '__main__':\r\n # full bleu test on references and candidate\r\n main()\r\n # predict_sentence = 'how old is the man'\r\n # train_sentences = ['this is a dog and not is a cat', 'this is a cat and not is a dog', 'it is a dragon', 'i like play ball']\r\n # bleu_v2_score = bleu_v2(predict_sentence, train_sentences, 4, weights=[0.25, 0.25, 0.25, 0.25], mode=0)","repo_name":"TopicSentenceExtraction/topicsentenceextraction","sub_path":"T5Model/calculate_bleu.py","file_name":"calculate_bleu.py","file_ext":"py","file_size_in_byte":4411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"27749768965","text":"from .. import db\n\n\nclass Alumno(db.Model):\n id = db.Column(db.Integer, primary_key = True)\n nombre = db.Column(db.String(45),nullable=False)\n email = db.Column(db.String(60),nullable=False, unique= True, index = True)\n\n def __repr__(self):\n return f'{self.nombre}'\n \n def to_json(self):\n alumno_json = {\n 'id':self.id,\n 'nombre': self.nombre,\n 'email': self.email\n }\n return alumno_json\n \n @staticmethod\n def from_json(alumno_json):\n id = alumno_json.get('id')\n nombre = alumno_json.get('nombre')\n email = alumno_json.get('email')\n\n return Alumno(\n id = id,\n nombre=nombre,\n email=email\n )","repo_name":"Reniato00/API_Scholar","sub_path":"main/models/Alumno.py","file_name":"Alumno.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"20222016401","text":"Price_list = [\"01\",\"Banana\", 5.00, \"unit\", \"02\", \"Mango\", 20.00, \"kg\", \"03\", \"Apple\", 15.00, \"kg\", \"04\", \"Papaya\", 25.00, \"unit\", \"05\", \"Guava\", 15.00, \"kg\"] \nnew_item = input(\"Want to add new items (Yes or NO) : \")\nif new_item == \"yes\" or new_item == \"YES\" or new_item == \"Yes\":\n id = input(\"ID : \")\n item_name = input(\"Item Name : \")\n price = float(input(\"Item Price : \"))\n u_k = input(\"Unit/kg : \")\n Price_list.extend([id,item_name,price,u_k])\n print(\"New Item Added List :\\n\",Price_list)\nelse:\n print(\"No Update\\n\",Price_list)","repo_name":"JoyTarafder/SuperShop-item_price","sub_path":"ass12.py","file_name":"ass12.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"13455856871","text":"from typing import List\n\n# 905. Sort Array By Parity https://leetcode.com/problems/sort-array-by-parity/\n# Given an integer array arr, move all the even integers at the beginning of the array followed by all the odd integers.\n# Return any array that satisfies this condition.\n\n\nclass Solution:\n def sort_array_by_parity(self, arr: List[int]) -> List[int]:\n \"\"\" Time complexity: O(n). We iterate through the list\n Space complexity: O(1).\n \"\"\"\n odd_index = 0\n even_index = 0\n\n for i in range(len(arr)):\n if arr[i] % 2 == 1:\n even_index += 1\n else:\n arr[odd_index], arr[even_index] = arr[even_index], arr[odd_index]\n odd_index += 1\n even_index += 1\n return arr\n","repo_name":"timshenkao/interview_coding_exercises","sub_path":"python_code/easy/905_Sort_Array_By_Parity_easy/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"32897304632","text":"import sys\nimport os\nimport pickle\nimport json\n\nfrom sklearn.metrics import precision_recall_curve\nimport sklearn.metrics as metrics\n\nif len(sys.argv) != 5:\n sys.stderr.write('Arguments error. Usage:\\n')\n sys.stderr.write('\\tpython evaluate.py model features scores plots\\n')\n sys.exit(1)\n\nmodel_file = sys.argv[1]\nmatrix_file = os.path.join(sys.argv[2], 'test.pkl')\nscores_file = sys.argv[3]\nplots_file = sys.argv[4]\n\nwith open(model_file, 'rb') as fd:\n model = pickle.load(fd)\n\nwith open(matrix_file, 'rb') as fd:\n matrix = pickle.load(fd)\n\nlabels = matrix[:, 1].toarray()\nx = matrix[:, 2:]\n\npredictions_by_class = model.predict_proba(x)\npredictions = predictions_by_class[:, 1]\n\nprecision, recall, thresholds = precision_recall_curve(labels, predictions)\n\nauc = metrics.auc(recall, precision)\n\nwith open(scores_file, 'w') as fd:\n json.dump({'auc': auc}, fd)\n\nwith open(plots_file, 'w') as fd:\n json.dump({'prc': [{\n 'precision': p,\n 'recall': r,\n 'threshold': t\n } for p, r, t in zip(precision, recall, thresholds)\n ]}, fd)\n","repo_name":"ashutosh1919/dvc_demo","sub_path":"src/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"30"}
+{"seq_id":"16481837709","text":"# -*- coding: utf-8 -*-\nimport os, time, random\nfrom multiprocessing import pool, Process, Queue\nimport subprocess\n# fork进程\nprint('Process (%s) start...' % os.getpid())\n# Only works on Unix/Linux/Mac:\n# pid = os.fork() \n# fork()调用一次,返回两次,操作系统自动把当前进程(称为父进程)复制了一份(称为子进程),\n# 分别在父进程和子进程内返回\n# if pid == 0:\n# print('I am child process (%s) and my parent is %s.' % (os.getpid(), os.getppid()))\n# else:\n# print('I (%s) just created a child process (%s).' % (os.getpid(), pid))\n\n# 子进程\n# print('$ nslookup www.python.org')\n# r = subprocess.call(['nslookup', 'www.python.org'])\n# print('Exit code:', r)\n# 子进程输入\nprint('$ nslookup')\np = subprocess.Popen(['nslookup'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\noutput, err = p.communicate(b'set q=mx\\npython.org\\nexit\\n')\n# print(output.decode('utf-8'))\nprint('Exit code:', p.returncode)\n# 进程通信: Queue、Pipes\ndef write(q):\n print('Process to write : %s' % os.getpid())\n for value in ['A', 'B', 'C']:\n print('Put %s to queue...' % value)\n q.put(value)\n time.sleep(random.random())\ndef read(q):\n print('Process to read: %s' % os.getpid())\n while True:\n value = q.get(True)\n print('Get %s from queue.' % value)\nif __name__ == '__main__':\n # 父进程创建Queue,并传给各个子进程:\n q = Queue()\n pw = Process(target=write, args=(q,))\n pr = Process(target=read, args=(q,))\n # 启动子进程pw,写入:\n pw.start()\n # 启动子进程pr,读取:\n pr.start()\n # 等待pw结束:\n pw.join()\n # pr进程里是死循环,无法等待其结束,只能强行终止:\n pr.terminate() ","repo_name":"zhipairen/python-demos","sub_path":"demo/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"9018459611","text":"\"\"\"\n The following script is intended to translate a specific\n JSON format string created by my PL/SQL script.\n Since I made a JSON only to the REC_SCREEN table,\n the query is applied only to this table, assuming that other\n tables are filled with data.\n I also created queries for the rest tables but did not\n apply them, because the JSON data of these tables is partial\n being internal compound objects of REC_SCREEN table.\n\n The main function responsible for parsing is \"retrieve_queries\".\n Before understanding its logic it is advisable to watch the\n structure of JSON in data1.json.\n\"\"\"\n\nimport cx_Oracle\nimport json\nimport config\n\nrclsql = [] # list to store queries to recycler table\ntransql = [] # list to store queries to transaction table\nrec_scrsql = [] # list to store queries to rec_screen table\ntransaction = {} # temp dict to store json object of transaction table data\nrecycler = {} # temp dict to store json object of recycler table data\n\n\"\"\"\n Recursive function which parses the JSON object\ninto INSERT queries for each table, i.e. rec_screen,\ntransaction, and recycler.\n The queries are stored in lists respective to the table names:\nrclsql, transql, rec_scrsql.\n\"\"\"\ndef retrieve_queries(table, i, table_name):\n if i < 4:\n keylist = \"(\"\n valuelist = \"(\"\n firstPair = True\n for key, value in table.items():\n if value == \"\" or value == 0:\n continue\n if not firstPair:\n keylist += \", \"\n valuelist += \", \"\n firstPair = False\n if key == \"transaction\":\n transaction = value.copy()\n retrieve_queries(transaction, i + 1, key)\n value = value[\"id\"]\n key = key[:4] # cut the 'transaction' to 'tran'\n key += \"_id\" # 'tran_id' is the column name\n if key == \"recycler\":\n recycler = value.copy()\n retrieve_queries(recycler, i + 1, key)\n value = value[\"id\"]\n key = \"rcl_id\" # here the column name is assigned explicitly\n keylist += key\n if isinstance(value, str):\n valuelist += \"'\" + value + \"'\"\n else:\n valuelist += str(value)\n keylist += \")\"\n valuelist += \")\"\n stmt = \"INSERT INTO \" + table_name + \" \" + keylist + \" VALUES \" + valuelist\n# print(stmt)\n if table_name == \"recycler\":\n rclsql.append(stmt)\n elif table_name == \"transaction\":\n transql.append(stmt)\n elif table_name == \"rec_screen\":\n rec_scrsql.append(stmt)\n\ndef unpack_json(jsonfile):\n \"\"\"\n Read data from json file.\n Call function that creates INSERT queries from json data\n :param: JSON file name\n \"\"\"\n with open (jsonfile, 'r') as f:\n jsondata = json.loads(f.read())\n for obj in jsondata:\n retrieve_queries(obj, 1, \"rec_screen\")\n\ndef create_connection():\n \"\"\"\n Create a database connection to the cx_Oracle database\n specified by config.py (imported)\n :return: Connection object or None\n \"\"\"\n conn = None\n try:\n conn = cx_Oracle.connect(\n config.username,\n config.password,\n config.dsn,\n encoding=config.encoding)\n except cx_Oracle.Error as error:\n print(error)\n\n return conn\n\ndef create_recscreen(conn):\n \"\"\"\n Insert data from json into the REC_SCREEN table\n :param: conn:\n \"\"\"\n cur = conn.cursor()\n for query in rec_scrsql:\n cur.execute(query)\n conn.commit()\n\ndef main():\n conn = create_connection()\n unpack_json('data1.json')\n with conn:\n create_recscreen(conn)\n\nif __name__ == '__main__':\n main()\n","repo_name":"K0ra/Kaspi","sub_path":"Marathon/jsonToSQL/jsonToSql.py","file_name":"jsonToSql.py","file_ext":"py","file_size_in_byte":3802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"40370043932","text":"\"\"\"\nGiven a binary array nums, you should delete one element from it.\n\nReturn the size of the longest non-empty subarray containing only 1's in the resulting array. Return 0 if there is no such subarray.\n\n \n\nExample 1:\n\nInput: nums = [1,1,0,1]\nOutput: 3\nExplanation: After deleting the number in position 2, [1,1,1] contains 3 numbers with value of 1's.\nExample 2:\n\nInput: nums = [0,1,1,1,0,1,1,0,1]\nOutput: 5\nExplanation: After deleting the number in position 4, [0,1,1,1,1,1,0,1] longest subarray with value of 1's is [1,1,1,1,1].\nExample 3:\n\nInput: nums = [1,1,1]\nOutput: 2\nExplanation: You must delete one element.\n\"\"\"\n\"\"\"\ni fell hard on this question. need to revisit. eventually came up with a solution where I keep count of zeroes in a given subarray and shift the left pointer until inedx between left and right has zero count of one.\nused booleans to handle edge cases where all the digits are 1's or there a longest given subsequence are all ones but there are preceding zeroes in which case delete one fo those zeroes. outside of edge cases as such my approach was to treat every subarray with only one zero inside of it as if that zero were a one and to compute the maximum length between maximumlength and currentlength = right index - left index +1.\n\"\"\"\n\n\n\n\n\n\n\ndef longestSubarray(nums) :\n\n if len(nums)<=1:\n return 0\n \n found =False\n left = 0\n foundzero = False\n for i in range(len(nums)):\n if nums[i] == 1:\n found =True\n left = i\n break\n foundzero = True\n curlen = 1\n flip = False\n right = left\n allones = True\n obj = {}\n zerocount = 0\n while right < len(nums):\n\n if nums[right] == 0 and flip:\n \n while right < len(nums) and nums[right] == 0:\n zerocount+=1\n temp = right\n right+=1\n if right < len(nums):\n \n while zerocount >1:\n if nums[left] == 0:\n zerocount-=1\n left+=1 \n elif nums[right] == 0 and not flip:\n allones = False\n flip = True\n curlen = max(curlen,right-left+1)\n right+=1\n zerocount+=1\n else:\n curlen = max(curlen,(right-left)+1)\n right+=1\n \n if foundzero and allones:\n return curlen\n return curlen-1","repo_name":"AlexeiVartoumian/algorithmicProblems","sub_path":"LeetCode/Sliding Window/Longest Subarray of 1's After Deleting One Element.py","file_name":"Longest Subarray of 1's After Deleting One Element.py","file_ext":"py","file_size_in_byte":2587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"22907126144","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Feb 9 23:10:12 2019\n\n@author: tanma\n\"\"\"\n\nimport os,sys\nfilenames = []\nfor file in os.listdir():\n filenames.append(file)\n\nfrom keras.models import Model\nfrom keras.layers import Input, LSTM, GRU, Dense, Embedding\nfrom keras.layers import Bidirectional, RepeatVector, Concatenate, Activation, Dot, Lambda\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nimport keras.backend as K\nfrom gensim.models import Word2Vec\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nif len(K.tensorflow_backend._get_available_gpus()) > 0:\n from keras.layers import CuDNNLSTM as LSTM\n from keras.layers import CuDNNGRU as GRU \n\nfrom PyPDF2 import PdfFileReader\n\ninput_text = []\ninput_texts = []\ntarget_texts = []\ntarget_texts_inputs = []\nlines = []\nfor line in open('all_rel_data.txt'):\n lines.append(line.rstrip())\n\nwhile '' in lines:\n lines.remove('')\n \nfor i in lines[:-1]:\n input_texts.append(i)\n\nfor i in input_texts:\n input_text.append(i.split())\n\ntranslation_texts = lines[1:]\n\nfor i in translation_texts:\n target_texts.append(i+' ')\n target_texts_inputs.append(' '+i)\n\n# some config\nBATCH_SIZE = 32\nEPOCHS = 100 \nLATENT_DIM = 256 \nNUM_SAMPLES = 10000 \nMAX_SEQUENCE_LENGTH = 100\nMAX_NUM_WORDS = 20000\nEMBEDDING_DIM = 100\n\nmodelx = Word2Vec(input_text)\nword_vectors = modelx.wv\nMAX_NB_WORDS = len(word_vectors.vocab)\n \n\ntokenizer_inputs = Tokenizer(num_words=MAX_NUM_WORDS)\ntokenizer_inputs.fit_on_texts(input_texts)\ninput_sequences = tokenizer_inputs.texts_to_sequences(input_texts)\n\nword2idx_inputs = tokenizer_inputs.word_index\nprint('Found %s unique input tokens.' % len(word2idx_inputs))\n\nmax_len_input = max(len(s) for s in input_sequences)\n\ntokenizer_outputs = Tokenizer(num_words=MAX_NUM_WORDS, filters='')\ntokenizer_outputs.fit_on_texts(target_texts + target_texts_inputs) \ntarget_sequences = tokenizer_outputs.texts_to_sequences(target_texts)\ntarget_sequences_inputs = tokenizer_outputs.texts_to_sequences(target_texts_inputs)\n\nword2idx_outputs = tokenizer_outputs.word_index\nprint('Found %s unique output tokens.' % len(word2idx_outputs))\n\nnum_words_output = len(word2idx_outputs) + 1\n\nmax_len_target = max(len(s) for s in target_sequences)\n\nencoder_inputs = pad_sequences(input_sequences, maxlen=max_len_input)\nprint(\"encoder_inputs.shape:\", encoder_inputs.shape)\nprint(\"encoder_inputs[0]:\", encoder_inputs[0])\n\ndecoder_inputs = pad_sequences(target_sequences_inputs, maxlen=max_len_target, padding='post')\nprint(\"decoder_inputs[0]:\", decoder_inputs[0])\nprint(\"decoder_inputs.shape:\", decoder_inputs.shape)\n\ndecoder_targets = pad_sequences(target_sequences, maxlen=max_len_target, padding='post')\n\nprint('Loading word vectors...')\nword2vec = {}\nwith open(os.path.join('very_large_data/glove.6B.%sd.txt' % EMBEDDING_DIM),'rb') as f:\n for line in f:\n values = line.split()\n word = values[0]\n vec = np.asarray(values[1:], dtype='float32')\n word2vec[word] = vec\nprint('Found %s word vectors.' % len(word2vec))\n\n\n\n\nprint('Filling pre-trained embeddings...')\nnum_words = min(MAX_NUM_WORDS, len(word2idx_inputs) + 1)\nembedding_matrix = np.zeros((num_words, EMBEDDING_DIM))\nfor word, i in word2idx_inputs.items():\n if i < MAX_NUM_WORDS:\n embedding_vector = word2vec.get(word)\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector\n\nwv_matrix = (np.random.rand(MAX_NB_WORDS, EMBEDDING_DIM) - 0.5) / 5.0\nfor word, i in word2idx_inputs.items():\n if i >= MAX_NB_WORDS:\n continue\n try:\n embedding_vector = word_vectors[word]\n wv_matrix[i] = embedding_vector\n except:\n pass \n\nembedding_layer = Embedding(\n num_words,\n EMBEDDING_DIM,\n weights=[embedding_matrix],\n input_length=max_len_input,\n trainable=True\n)\n\ndecoder_targets_one_hot = np.zeros(\n (\n len(input_texts),\n max_len_target,\n num_words_output\n ),\n dtype='float32'\n)\n\n\nfor i, d in enumerate(decoder_targets):\n for t, word in enumerate(d):\n decoder_targets_one_hot[i, t, word] = 1\n\n\nencoder_inputs_placeholder = Input(shape=(max_len_input,))\nx = embedding_layer(encoder_inputs_placeholder)\nencoder = LSTM(\n LATENT_DIM,\n return_state=True,\n)\nencoder_outputs, h, c = encoder(x)\n\nencoder_states = [h, c]\n\ndecoder_inputs_placeholder = Input(shape=(max_len_target,))\n\ndecoder_embedding = Embedding(num_words_output, LATENT_DIM)\ndecoder_inputs_x = decoder_embedding(decoder_inputs_placeholder)\n\ndecoder_lstm = LSTM(\n LATENT_DIM,\n return_sequences=True,\n return_state=True,\n)\ndecoder_outputs, _, _ = decoder_lstm(\n decoder_inputs_x,\n initial_state=encoder_states\n)\n\ndecoder_dense = Dense(num_words_output, activation='softmax')\ndecoder_outputs = decoder_dense(decoder_outputs)\n\nmodel = Model([encoder_inputs_placeholder, decoder_inputs_placeholder], decoder_outputs)\n\nmodel.compile(\n optimizer='rmsprop',\n loss='categorical_crossentropy',\n metrics=['accuracy']\n)\nr = model.fit(\n [encoder_inputs, decoder_inputs], decoder_targets_one_hot,\n batch_size=BATCH_SIZE,\n epochs=EPOCHS,\n # validation_split=0.2,\n)\n\n\"\"\"\nplt.plot(r.history['loss'], label='loss')\nplt.plot(r.history['val_loss'], label='val_loss')\nplt.legend()\nplt.show()\n\nplt.plot(r.history['acc'], label='acc')\nplt.plot(r.history['val_acc'], label='val_acc')\nplt.legend()\nplt.show()\n\"\"\"\nmodel.save('s2s.h5')\n\nencoder_model = Model(encoder_inputs_placeholder, encoder_states)\n\ndecoder_state_input_h = Input(shape=(LATENT_DIM,))\ndecoder_state_input_c = Input(shape=(LATENT_DIM,))\ndecoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]\n\ndecoder_inputs_single = Input(shape=(1,))\ndecoder_inputs_single_x = decoder_embedding(decoder_inputs_single)\n\ndecoder_outputs, h, c = decoder_lstm(\n decoder_inputs_single_x,\n initial_state=decoder_states_inputs\n)\n\ndecoder_states = [h, c]\n\ndecoder_outputs = decoder_dense(decoder_outputs)\n\ndecoder_model = Model(\n [decoder_inputs_single] + decoder_states_inputs, \n [decoder_outputs] + decoder_states\n)\n\nidx2word_eng = {v:k for k, v in word2idx_inputs.items()}\nidx2word_trans = {v:k for k, v in word2idx_outputs.items()}\n\n\ndef decode_sequence(input_seq):\n states_value = encoder_model.predict(input_seq)\n\n target_seq = np.zeros((1, 1))\n\n target_seq[0, 0] = word2idx_outputs['']\n\n eos = word2idx_outputs['']\n\n output_sentence = []\n for _ in range(max_len_target):\n output_tokens, h, c = decoder_model.predict(\n [target_seq] + states_value\n )\n \n idx = np.argmax(output_tokens[0, 0, :])\n\n if eos == idx:\n break\n\n word = ''\n if idx > 0:\n word = idx2word_trans[idx]\n output_sentence.append(word)\n\n target_seq[0, 0] = idx\n\n states_value = [h, c]\n\n return ' '.join(output_sentence)\n\n\n\nwhile True:\n i = np.random.choice(len(input_texts))\n input_seq = encoder_inputs[i:i+1]\n translation = decode_sequence(input_seq)\n print('-')\n print('Input:', input_texts[i])\n print('Translation:', translation)\n\n ans = input(\"Continue? [Y/n]\")\n if ans and ans.lower().startswith('n'):\n break\n\ndef custom_input(string):\n input_seq = tokenizer_inputs.texts_to_sequences(string)\n encoder_in = pad_sequences(input_seq, maxlen=max_len_input)\n translation = decode_sequence(encoder_in)\n return translation\n\ntake_input = str(input())\nprint(custom_input(take_input))\n\n \n","repo_name":"lordtt13/Context-Based-Text-Generator","sub_path":"new_solver.py","file_name":"new_solver.py","file_ext":"py","file_size_in_byte":7336,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"}
+{"seq_id":"35112032737","text":"import cv2\r\nimport matplotlib.pyplot as plt\r\n\r\n# Görüntüyü yükle\r\nimage = cv2.imread('image.jpg', 0) # tek kanal gri görüntü\r\n\r\n# Histogram için 256 elemanlı bir liste oluştur (8 BİT FOTOĞRAF)\r\nhistogram = [0] * 256\r\n\r\n# Görüntünün histogramı hesapla\r\nfor row in image:\r\n for pixel_value in row:\r\n histogram[pixel_value] += 1\r\n\r\n# Histogramı görselleştir\r\nplt.bar(range(256), histogram, color='gray', alpha=0.7)\r\nplt.title('Görüntü Histogramı')\r\nplt.xlabel('Piksel Değeri')\r\nplt.ylabel('Piksel Sayısı')\r\ncv2.imshow(\" \",image) # resmi gösterir\r\nplt.show()\r\n\r\n# GÖRÜNTÜNÜN HİSTOGRAMI\r\n\r\n# Bu örnekte, her bir pikselin değerini alarak, histogram adlı 256 elemanlı bir liste içinde ilgili değerin sıklığını arttırıyoruz.\r\n# Daha sonra, Matplotlib kütüphanesini kullanarak bu histogramı görselleştiriyoruz.","repo_name":"aahmetozen/Goruntu_isleme_odevler","sub_path":"Goruntu_isleme/Ödev1/görüntü_işleme_ödev.py","file_name":"görüntü_işleme_ödev.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"28904690873","text":"from django.contrib import admin\nfrom django.urls import path\nfrom . import views\napp_name = 'accounts'\nurlpatterns = [\n path('', views.index, name='index'),\n path('login/', views.log_in, name='login'),\n path('logout/', views.log_out, name='logout'),\n path('signup/', views.signup, name='signup'),\n path('/', views.user, name='profile'),\n path('/follow/', views.follow, name='follow'),\n \n]\n","repo_name":"EHwooKim/project10-fork","sub_path":"accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"5584577848","text":"import boto3\r\nimport json\r\n\r\n# Define the client to interact with Lex\r\nclient = boto3.client('lexv2-runtime')\r\ndef lambda_handler(event, context):\r\n # msg_from_user = event['messages'][0]\r\n # change this to the message that user submits on \r\n # your website using the 'event' variable\r\n print(event)\r\n msg_from_user = event[\"messages\"][0]\r\n print(f\"Message from frontend: {msg_from_user}\")\r\n # Initiate conversation with Lex\r\n botMessage = \"Please try again.\"\r\n if msg_from_user is None or len(msg_from_user) < 1:\r\n return {\r\n 'statusCode': 200,\r\n 'body': json.dumps(botMessage)\r\n }\r\n \r\n response = client.recognize_text(\r\n botId='UZY8NMWSUL', # MODIFY HERE\r\n botAliasId='TSTALIASID', # MODIFY HERE\r\n localeId='en_US',\r\n sessionId='testuser',\r\n text=msg_from_user[\"unstructured\"][\"text\"])\r\n \r\n msg_from_lex = response.get('messages', [])\r\n if msg_from_lex:\r\n \r\n print(f\"Message from Chatbot: {msg_from_lex[0]['content']}\")\r\n print(response)\r\n resp = {\r\n 'statusCode': 200,\r\n 'messages': [{\"type\": \"unstructured\", \r\n \"unstructured\": {\r\n \"text\": json.dumps(msg_from_lex[0]['content'])\r\n }}]\r\n }\r\n # modify resp to send back the next question Lex would ask from the user\r\n \r\n # format resp in a way that is understood by the frontend\r\n # HINT: refer to function insertMessage() in chat.js that you uploaded\r\n # to the S3 bucket\r\n return resp\r\n \r\n \r\n\r\n","repo_name":"ZEwithBayesTaylor/Dining-Suggestion-Chatbot","sub_path":"LF0.py","file_name":"LF0.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"42828671651","text":"from __future__ import print_function\n\nimport sys\nimport time\nimport numpy as np\nimport galsim\n\nseed = 140101\nrng = galsim.UniformDeviate(seed)\n\ntreering_func = galsim.SiliconSensor.simple_treerings(0.26, 47.)\ntreering_center = galsim.PositionD(0,0)\n\nskyCounts = 800.\nprint('skyCounts = ',skyCounts)\n\n# Not an LSST wcs, but just make sure this works properly with a non-trivial wcs.\nwcs = galsim.FitsWCS('../../tests/fits_files/tnx.fits')\n\nt0 = time.time()\nimage = galsim.ImageF(2000, 500, wcs=wcs)\nprint('image bounds = ',image.bounds)\n\nnrecalc = 1.e300\nsensor = galsim.SiliconSensor(rng=rng, nrecalc=nrecalc,\n treering_func=treering_func, treering_center=treering_center)\n\n# For regular sky photons, we can just use the pixel areas to buidl the sky image.\n# At this point the image is blank, so area is just from tree rings.\nsensor_area = sensor.calculate_pixel_areas(image)\nsensor_area.write('sensor_area.fits')\n\n# We also need to account for the distortion of the wcs across the image. \n# This expects sky_level in ADU/arcsec^2, not ADU/pixel.\nimage.wcs.makeSkyImage(image, sky_level=1.)\nimage.write('wcs_area.fits')\n\n# Rescale so that the mean sky level per pixel is skyCounts\nmean_pixel_area = image.array.mean()\nimage *= skyCounts / mean_pixel_area\n\n# Now multiply by the area due to the sensor effects.\nimage *= sensor_area\n\n# Finally, add noise. What we have here so far is the expectation value in each pixel.\n# We need to realize this according to Poisson statistics with these means.\nnoise = galsim.PoissonNoise(rng)\nimage.addNoise(noise)\nt1 = time.time()\nprint('Time to make sky image = ',t1-t0)\n\nimage.write('sky.fits')\n\n# Check that the photons follow Poisson statistics\nimport matplotlib.pyplot as plt\nfrom scipy.stats import poisson\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nbin_width = 5\nbins = np.arange(0,2*skyCounts,bin_width)\nn, bins, p = ax.hist(image.array.ravel(), bins=bins, histtype='step', color='blue', fill=True)\n\nnpix = np.prod(image.array.shape)\nax.plot(bins, npix * bin_width * poisson.pmf(bins, skyCounts), color='green')\n\nax.set_xlabel('photons per pixel')\nax.set_ylabel('n pixels')\nplt.tight_layout()\nplt.savefig('poisson_test.pdf')\n","repo_name":"kernsuite-debian/galsim","sub_path":"devel/lsst/treering_skybg2.py","file_name":"treering_skybg2.py","file_ext":"py","file_size_in_byte":2204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"73649152084","text":"#python\n\nimport modo\nfrom sys import exit\n\nframe_range = modo.Scene().currentRange\nframe_range = range(frame_range[0], frame_range[1])\n\nfps = modo.Scene().fps\nchannel = modo.Scene().item('Mesh').position.x\n\nif not channel.isAnimated:\n\texit\n\nfrozen_values = [channel.get(frame/fps) for frame in frame_range]\nfor index, frame in enumerate(frame_range):\n\tchannel.envelope.keyframes.add(frozen_values[index], frame/fps)\n\n# set to linear interpolation\nchannel.envelope.interpolation = 1","repo_name":"adamohern/MODDER","sub_path":"assets/snippets/keyframes.freeze.py","file_name":"keyframes.freeze.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"30"}
+{"seq_id":"1131344638","text":"try:\n from django.conf.urls import url, patterns\nexcept ImportError:\n from django.conf.urls.defaults import url, patterns\nfrom django.views.generic import TemplateView\nfrom friendship.views import view_friends, friendship_add_friend, friendship_accept, \\\nfriendship_reject, friendship_cancel, friendship_request_list, \\\nfriendship_request_list_rejected, friendship_requests_detail, followers,\\\nfollowing, follower_add, follower_remove, sortby_users, friend_invite, add_counselor\nfrom userprofile.views import EditProfile\nurlpatterns = patterns('',\n url(r'^profile/$', TemplateView.as_view(template_name='friendship/sprofile.html') ),\n url(r'^users/$', view=sortby_users, name='friendship_view_users',\n ),\n url(r'editprofile/$', EditProfile, name=\"edit_profile\" ,),\n url(r'^add/$', add_counselor, name='follow my counselor'),\n url(r'^friends/(?P[\\w.%+-]+@[A-Za-z0-9.-]+\\.[A-Za-z]{2,4})/$',\n view_friends,\n 'friendship_view_friends',\n ),\n url(r'^friend/add/(?P[\\w.%+-]+@[A-Za-z0-9.-]+\\.[A-Za-z]{2,4})/$',\n view=friendship_add_friend,\n name='friendship_add_friend',\n ),\n url(\n regex=r'^friend/accept/(?P\\d+)/$',\n view=friendship_accept,\n name='friendship_accept',\n ),\n url(\n regex=r'^friend/reject/(?P\\d+)/$',\n view=friendship_reject,\n name='friendship_reject',\n ),\n url(\n regex=r'^friend/cancel/(?P\\d+)/$',\n view=friendship_cancel,\n name='friendship_cancel',\n ),\n url(\n regex=r'^friend/requests/$',\n view=friendship_request_list,\n name='friendship_request_list',\n ),\n url(\n regex=r'^friend/requests/rejected/$',\n view=friendship_request_list_rejected,\n name='friendship_requests_rejected',\n ),\n url(\n regex=r'^friend/request/(?P\\d+)/$',\n view = friendship_requests_detail,\n name='friendship_requests_detail',\n ),\n url(\n regex=r'^followers/(?P[\\w.%+-]+@[A-Za-z0-9.-]+\\.[A-Za-z]{2,4})/$',\n view=followers,\n name='friendship_followers',\n ),\n url(\n regex=r'^following/(?P[\\w.%+-]+@[A-Za-z0-9.-]+\\.[A-Za-z]{2,4})/$',\n view=following,\n name='friendship_following',\n ),\n url(\n regex = r'^follower/add/(?P[\\w.%+-]+@[A-Za-z0-9.-]+\\.[A-Za-z]{2,4})/$', view=follower_add, name='follower_add',\n ),\n url(r'^follower/remove/(?P[\\w.%+-]+@[A-Za-z0-9.-]+\\.[A-Za-z]{2,4})/$',follower_remove,'follower_remove',\n ),\n\n url(r'^friends/invite/$', friend_invite, name='friend_invite'),\n )\n","repo_name":"kanimozhimurugan/Nxt4","sub_path":"django_friendship-1.1.0-py2.7.egg/friendship/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"35947223855","text":"# ---\n# jupyter:\n# jupytext:\n# formats: ipynb,py:percent\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.3'\n# jupytext_version: 1.11.4\n# kernelspec:\n# display_name: Python 3 (ipykernel)\n# language: python\n# name: python3\n# ---\n\n# %% [markdown] tags=[]\n# # Compute $\\Delta T$\n\n# %% [markdown]\n# ### Imports\n# %%\nimport numpy as np\n# %%\nimport pandas as pd\nimport xarray as xr\nfrom IPython.display import clear_output\n# %%\nfrom openscm_twolayermodel import ImpulseResponseModel # pip install openscm-twolayermodel\nfrom openscm_units import unit_registry # pip install openscm-units\nfrom scmdata import ScmRun # pip install scmdata\n\n# %load_ext autoreload\n# %autoreload 2\nfrom ar6_ch6_rcmipfigs.constants import INPUT_DATA_DIR_BADC\n# %%\nfrom ar6_ch6_rcmipfigs.utils.badc_csv import read_csv_badc\n\n# %% [markdown]\n# ### General about computing $\\Delta T$:\n# %% [markdown]\n# We compute the change in GSAT temperature ($\\Delta T$) from the effective radiative forcing (ERF) from MAGICC?????? (#TODO: check model and reference), by integrating with the impulse response function (IRF(t-t'))\n#\n# (Geoffroy at al 2013).\n#\n# For any forcing agent $x$, with estimated ERF$_x$, the change in temperature $\\Delta T$ is calculated as:\n#\n# %% [markdown]\n# \\begin{align*}\n# \\Delta T_x (t) &= \\int_0^t ERF_x(t') IRF(t-t') dt' \\\\\n# \\end{align*}\n# %% [markdown]\n# #### The Impulse response function (IRF):\n# In these calculations we use:\n# \\begin{align*}\n# IRF(t) = \\frac{q_1}{d_1} \\exp\\Big(\\frac{-t}{d_1}\\Big) + \\frac{q_2}{d_2} \\exp\\Big(\\frac{-t}{d_2}\\Big)\n# \\end{align*}\n#\n# Where the constants, $q_i$ and $d_i$ are shown below.\n#\n#\n# %% [markdown] tags=[]\n# # Code + figures\n\n# %% jp-MarkdownHeadingCollapsed=true tags=[]\nfn_IRF_constants = INPUT_DATA_DIR_BADC /'recommended_irf_from_2xCO2_2021_02_25_222758.csv'\n\n#irf_consts = pd.read_csv(fn_IRF_constants).set_index('id')\nirf_consts = read_csv_badc(fn_IRF_constants).set_index('id')\n\nld1 = 'd1 (yr)'\nld2 = 'd2 (yr)'\nlq1 = 'q1 (K / (W / m^2))'\nlq2 = 'q2 (K / (W / m^2))'\nmedian = 'median'\nperc5 = '5th percentile'\nperc95 = '95th percentile'\nrecommendation = 'recommendation'\nirf_consts # [d1]\n\n# %%\n# lets get the irf values from 0 until i\nd1 = float(irf_consts[ld1])\nd2 = float(irf_consts[ld2])\nq1 = float(irf_consts[lq1])\nq2 = float(irf_consts[lq2])\neff = float(irf_consts['efficacy (dimensionless)'])\n\nprint(f'd1={d1}, d2={d2}, q1={q1}, q2={q2}')\n\n\n# %% [markdown]\n# ### Path input data\n\n# %% jupyter={\"outputs_hidden\": false} pycharm={\"name\": \"#%%\\n\"}\nfrom ar6_ch6_rcmipfigs.constants import OUTPUT_DATA_DIR, RESULTS_DIR\n\nPATH_DATASET = OUTPUT_DATA_DIR/'fig6_12_ts15_historic_delta_GSAT/hist_ERF_est.csv'\n\n\n# %% [markdown]\n# ## Path output data\n\n# %% jupyter={\"outputs_hidden\": false} pycharm={\"name\": \"#%%\\n\"}\n#PATH_DT_TAB_OUTPUT = RESULTS_DIR / 'tables' / 'table_sens_dT_cs_recommandetion.csv'\nPATH_DF_OUTPUT = OUTPUT_DATA_DIR / 'fig6_12_ts15_historic_delta_GSAT/dT_data_hist_recommendation.nc'\n\n\n# %% [markdown] jupyter={\"outputs_hidden\": false} pycharm={\"name\": \"#%%\\n\"}\n# #### Extra output \n\n\n# %% jupyter={\"outputs_hidden\": false} pycharm={\"name\": \"#%%\\n\"}\nPATH_DT_TIMESERIES = RESULTS_DIR /'tables_historical_attribution' / 'Delta_T_timeseries.csv' \nPATH_ERF_TIMESERIES = RESULTS_DIR /'tables_historical_attribution' / 'ERF_timeseries.csv' \nprint(PATH_DF_OUTPUT)\n\n\n# %% [markdown]\n# ## various definitions\n\n# %%\n# name of output variable\nname_deltaT = 'Delta T'\n\nclimatemodel = 'climatemodel'\nscenario = 'scenario'\nvariable = 'variable'\ntime = 'time'\npercentile = 'percentile'\n\n# %% [markdown]\n# ## Set values:\n\n# %% [markdown]\n# We only compute for the recommendation (not e.g. percentiles):\n\n# %%\nIRFpercentiles = [recommendation]\n# {'ECS = 2K':0.526, 'ECS = 3.4K':0.884, 'ECS = 5K': 1.136 }\n\n# %% [markdown]\n# Year to integrate from and to:\n\n# %%\nfirst_y = 1750\nlast_y = 2020\n\n# %% [markdown]\n# **Set reference year for temperature change:**\n\n# %%\nref_year = 1750\n\n# %% [markdown]\n# ### Define variables to look at:\n\n# %%\n# variables to plot:\nvariables_erf_comp = [\n 'CO2', 'N2O', 'CH4', 'HC', 'NOx', 'SO2', 'BC', 'OC', 'NH3'\n]\n# total ERFs for anthropogenic and total:\nvariables_erf_tot = []\nvariables_all = variables_erf_comp + variables_erf_tot\n# Scenarios to plot:\nscenarios_fl = []\n\n\n# %% [markdown]\n# ## IRF function: \n\n# %%\n\ndef IRF(t, d1, q1, d2, q2):\n \"\"\"\n Returns the IRF function for:\n :param q2:\n :param d2:\n :param q1:\n :param d1:\n :param t: Time in years\n :return:\n IRF\n \"\"\"\n #print(f'd1={d1}, d2={d2}, q1={q1}, q2={q2}')\n irf = q1 / d1 * np.exp(-t / d1) + q2 / d2 * np.exp(-t / d2)\n return irf\n # l * (alpha1 * np.exp(-t / tau1) + alpha2 * np.exp(-t / tau2))\n\n\n# %% [markdown]\n# ### Open ERF dataset:\n\n# %%\ndf = pd.read_csv(PATH_DATASET, index_col=0)\nda_ERF = df.to_xarray().to_array()#'variable'\nda_ERF = da_ERF.rename({'index':'year'})\n#ds = xr.open_dataset(PATH_DATASET).sel(year=slice(1700, 2200)) # we need only years until 1700\nds = xr.Dataset({'ERF':da_ERF})\nds\n#da_ERF = ds['ERF']\n\n# %%\n#ds['ERF'].to_pandas().transpose().to_csv(PATH_ERF_TIMESERIES)#'ERF_timeseries.csv')\n\n# %% [markdown]\n# #### Simple pre-processing\n\n# %%\nds['time'] = pd.to_datetime(ds['year'].to_pandas().index.map(str), format='%Y')\n\n# delta_t is 1 (year)\nds['delta_t'] = xr.DataArray(np.ones(len(ds['year'])), dims='year', coords={'year': ds['year']})\nds\n\n# %% [markdown]\n# ## Integrate and compute $\\Delta T$:\n# The code below integrates the read in ERFs with the pre defined impulse response function (IRF).\n\n# %% [markdown]\n# \\begin{align*} \n# \\Delta T (t) &= \\int_0^t ERF(t') IRF(t-t') dt' \\\\\n# \\end{align*}\n\n# %%\n\n\ndef integrate_(i, _var, _nvar, ds_in: xr.Dataset, ds_DT, irf_cnst: dict):\n \"\"\"\n\n :param i:\n :param _var:\n :param _nvar:\n :param ds_in:\n :param ds_DT:\n :param irf_cnst: dictionary\n :return:\n \"\"\"\n #print('_var',_var)\n #print('_nvar',_nvar)\n # lets create a ds that goes from 0 to i inclusive\n ds_short = ds_in[{'year': slice(0, i + 1)}].copy()\n #print(ds_short)\n # lets get the current year\n current_year = ds_short['year'][{'year': i}] # .dt.year\n # lets get a list of years\n _years = ds_short['year'] # .dt.year\n # lets get the year delta until current year(i)\n ds_short['end_year_delta'] = current_year - _years\n\n # lets get the irf values from 0 until i\n d1 = irf_cnst[ld1]\n d2 = irf_cnst[ld2]\n q1 = irf_cnst[lq1]\n q2 = irf_cnst[lq2]\n\n ds_short['irf'] = IRF(\n ds_short['end_year_delta'] * ds_short['delta_t'], d1, q1, d2, q2)\n\n # lets do the famous integral\n ds_short['to_integrate'] = \\\n ds_short[_var] * \\\n ds_short['irf'] * \\\n ds_short['delta_t']\n\n # lets sum all the values up until i and set\n # this value at ds_DT\n # If whole array is null, set value to nan\n if np.all(ds_short['to_integrate'].isnull()): # or last_null:\n _val = np.nan\n else:\n # \n\n _ds_int = ds_short['to_integrate'].sum(['year'])\n # mask where last value is null (in order to not get intgral \n _ds_m1 = ds_short['to_integrate'].isel(year=-1)\n # where no forcing data)\n _val = _ds_int.where(_ds_m1.notnull())\n # set value in dataframe:\n ds_DT[_nvar][{'year': i}] = _val\n\n\ndef integrate_to_dT(_ds, from_t, to_t, irf_cnsts, int_var='ERF'):\n \"\"\"\n Integrate forcing to temperature change.\n\n :param _ds: dataset containing the forcings\n :param from_t: start year\n :param to_t: end year\n :param int_var: variables to integrate\n :param irf_cnsts: irf constants\n :return:\n \"\"\"\n # slice dataset\n ds_sl = _ds.sel(year=slice(from_t, to_t))\n len_time = len(ds_sl['year'])\n # lets create a result DS\n ds_DT = ds_sl.copy()\n\n # lets define the vars of the ds\n namevar = name_deltaT\n # set all values to zero for results dataarray:\n ds_DT[namevar] = ds_DT[int_var] * 0\n # Units Kelvin:\n ds_DT[namevar].attrs['unit'] = 'K'\n if 'unit' in ds_DT[namevar].coords:\n ds_DT[namevar].coords['unit'] = 'K'\n\n for i in range(len_time):\n # da = ds[var]\n if (i % 20) == 0:\n print('%s of %s done' % (i, len_time))\n integrate_(i, int_var, namevar, ds_sl, ds_DT, irf_cnsts)\n clear_output()\n # fn = 'DT_%s-%s.nc' % (from_t, to_t)\n #fname = OUTPUT_DATA_DIR/ fn#'DT_%s-%s.nc' % (from_t, to_t)\n # save dataset.\n #ds_DT.to_netcdf(fname)\n return ds_DT\n\n\n# %%\ndef calc_dGSAT(var, ds, ds_out, scenario='scenario'):\n s_y = int(ds.isel(year=0)['year'].values)\n _erf_tmp = ds['ERF'].sel(variable=var).to_pandas()\n unit = \"W/m^2\"\n\n driver = ScmRun(\n \n data=_erf_tmp,\n index=s_y + np.arange(len(_erf_tmp)),\n columns={\n \"unit\": unit,\n \"model\": \"custom\",\n \"scenario\": scenario,\n \"region\": \"World\",\n \"variable\": \"Effective Radiative Forcing\",\n },\n )\n\n impulse_res = ImpulseResponseModel(\n d1=d1 * unit_registry(\"yr\"),\n d2=d2 * unit_registry(\"yr\"),\n q1=q1* unit_registry(\"delta_degC / (W / m^2)\"),\n q2=q2* unit_registry(\"delta_degC / (W / m^2)\"),\n efficacy=eff* unit_registry(\"dimensionless\"),\n )\n dt_tmp = impulse_res.run_scenarios(driver)\n\n\n df_tmp= dt_tmp.filter(variable='Surface Temperature').timeseries()#.lineplot()#['Surface']\n #_ds_dT[var] =df_tmp.transpose()\n \n #ds_out[var] = \n df_tmp = df_tmp.reset_index().iloc[:,12:].transpose().rename({0:var}, axis=1)#.to_xarray()\n year_index = pd.to_datetime(df_tmp.index).year\n df_tmp['year'] = year_index\n df_tmp = df_tmp.set_index('year')\n\n ds_out[var] = df_tmp.to_xarray()[var]\n\n return ds_out\n\n\n# %%\ndef calc_GSAT_all_vars(_ds, ds_out, variables=None, scenario='scenario'):\n \n\n if variables is None:\n variables =_ds['variable'].values\n \n\n _ds_dT = xr.Dataset()\n for var in variables:\n print(var)\n _ds_dT = calc_dGSAT(var, _ds, _ds_dT, scenario=scenario)\n print()\n ds_DT = _ds_dT.to_array(name=name_deltaT)\n\n #ds_out[name_deltaT] = ds_DT\n \n\n return ds_DT\n\n\n# %%\ndef calc_GSAT_all_scenarios(ds, ds_out, scenarios_l = None):\n \n\n scenarios_l = None\n if scenarios_l is None:\n scenarios_l = ds[scenario].values\n _ds_dT = xr.Dataset()\n _ds_out = xr.Dataset()\n for scn in scenarios_l:\n print(scn)\n ds_scn = calc_GSAT_all_vars(ds.sel(scenario = scn), _ds_dT, scenario=scn)\n ds_scn = ds_scn.rename(scn)\n _ds_out[scn] = ds_scn\n ds_DT = _ds_out.to_array(dim=scenario, name=name_deltaT)\n ds_out[name_deltaT] = ds_DT\n \n\n return ds_out\n\n# %% jupyter={\"outputs_hidden\": false} pycharm={\"name\": \"#%%\\n\"}\ndic_ds_old = {}\nfor key in IRFpercentiles:\n dic_ds_old[key] = integrate_to_dT(ds, first_y, last_y, irf_consts.loc[key], int_var='ERF')\n\n# %% pycharm={\"name\": \"#%%\\n\"}\ndic_ds = {}\nfor key in IRFpercentiles:\n ds_out = ds.copy(deep=True)\n\n ds_out[name_deltaT]= calc_GSAT_all_vars(ds,ds_out)\n dic_ds[key] = ds_out\n\n# %%\nds#['delta_t']#[{'year': i}] # .dt.year\n\n\n# %% [markdown]\n# ## check:\n\n# %%\nfor per in IRFpercentiles:\n dic_ds[per].isel( variable=0)[name_deltaT].plot()\n\n# %% [markdown]\n# ### Make datset with percentile as dimension:\n# Does really only make sense with percentiles...\n\n# %%\nds_tmp = xr.Dataset(coords=dic_ds[recommendation].coords)\nds_tmp\nfor key in IRFpercentiles:\n ds_tmp[key] = dic_ds[key]['Delta T'] # .dims,dic_ds[key],)\nds['Delta T'] = ds_tmp.to_array('percentile')\n\n# %%\n#ds['Delta T'].sel(percentile='recommendation').to_pandas().transpose().to_csv(PATH_DT_TIMESERIES)\n\n# %%\nds['Delta T'].sel(percentile='recommendation').to_pandas().transpose()\n\n# %% [markdown]\n# # Save dataset:\n\n# %%\nds.sel(year=slice(first_y, last_y)).to_netcdf(PATH_DF_OUTPUT.with_suffix('.nc'))\nprint(f'Saved to ')\nprint(PATH_DF_OUTPUT.with_suffix('.nc'))\n# ds_DT.to_array('percentile')\n# dic_ds[key]['Delta T']\n\n# %%\n","repo_name":"sarambl/AR6_CH6_RCMIPFIGS","sub_path":"ar6_ch6_rcmipfigs/notebooks/fig6_12_and_ts15_spm2/03_historical_deltaGSAT.py","file_name":"03_historical_deltaGSAT.py","file_ext":"py","file_size_in_byte":12007,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"30"}
+{"seq_id":"1229752914","text":"from sys import stdin\n\nn = int(input())\nschedule = []\ndp = [0]*(n+1)\nfor _ in range(n):\n\tx, y = map(int, stdin.readline().split())\n\tschedule.append([x,y])\nmax_num = 0\n\nfor i in range(n):\n\tmax_num = max(max_num, dp[i])\n\tif i + schedule[i][0] > n:\n\t\tcontinue\n\tdp[i + schedule[i][0]] = max(max_num + schedule[i][1], dp[i+schedule[i][0]])\nprint(max(dp))","repo_name":"Jinwon777777/BarkingdogQ","sub_path":"lesson16_DP/15486.py","file_name":"15486.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"38754690858","text":"from email.policy import default\nimport enum\nfrom tkinter import CASCADE\nfrom sqlalchemy import Column, Enum, ForeignKey, Integer, String, DateTime\nfrom sqlalchemy.orm import relationship\nfrom database.models import Base\n\nclass ProfileType(enum.Enum):\n admin = 'admin'\n user = 'user'\n\nclass Profile(Base):\n __tablename__ = \"profiles\"\n id = Column(Integer, primary_key=True)\n type = Column(Enum(ProfileType))\n user_id = Column(ForeignKey('users.id'))\n user = relationship('User', back_populates='profile')\n name = Column(String, default='')\n telephone = Column(String, default='')\n email = Column(String, default='')\n address = relationship('ProfileAddress', cascade='all, delete-orphan', uselist=False)\n __mapper_args__ = {\n # \"polymorphic_identity\": \"profile\",\n \"polymorphic_on\": type,\n }\n\nclass AdminProfile(Profile):\n __tablename__ = \"admin_profiles\"\n id = Column(Integer, ForeignKey(\"profiles.id\"), primary_key=True)\n\n __mapper_args__ = {\n \"polymorphic_identity\": ProfileType.admin,\n }\n\nclass UserProfile(Profile):\n __tablename__ = 'user_profiles'\n id = Column(Integer, ForeignKey(\"profiles.id\"), primary_key=True)\n\n __mapper_args__ = {\n \"polymorphic_identity\": ProfileType.user,\n }","repo_name":"Maydmor/full-stack-template","sub_path":"backend/rest-api/database/models/profile.py","file_name":"profile.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"8752120015","text":"\nimport numpy as np\nfrom itertools import product\nfrom .tools import *\nfrom .solid import *\n\nimport scipy.ndimage as ndi\n\n############################# convert array to facet list ##########################################\n\ndef numpy2stl(A, mask_val=0, solid=True):\n \"\"\"\n Reads a numpy array, and list of facets\n\n Inputs:\n A (ndarray) - an 'm' by 'n' 2D numpy array\n Optional input:\n mask_val (float) - any element of the inputted array that is less than this value will not be included in the mesh.\n solid (bool): sets whether to create a solid geometry (with sides and a bottom) or not.\n \n Returns: vertices \n \"\"\"\n\n if mask_val is None:\n mask_val = A.min() - 1. \n min_val = mask_val\n \n print(\"Creating top...\")\n top_vertices, top_faces = array2faces(A, mask_val=mask_val)\n top_triangles = top_vertices[top_faces]\n\n if solid:\n ## Walls\n print(\"Creating walls...\")\n edges = get_open_edges(top_faces)\n perimeters = get_ordered_perimeter(top_vertices, edges )\n wall_triangles = perimeter_to_walls(top_vertices, perimeters, floor_val=min_val)\n \n ##Bottom \n print(\"Creating bottom...\")\n bottom_vertices = top_vertices.copy()\n bottom_vertices[:,2] = min_val\n\n _, bottom_faces = simplify_surface(bottom_vertices, perimeters)\n bottom_faces = bottom_faces[:,[1,0,2]]\n bottom_triangles = bottom_vertices[bottom_faces]\n \n all_triangles = np.concatenate([top_triangles, wall_triangles, bottom_triangles])\n \n else:\n all_triangles = top_triangles\n\n return all_triangles\n\ndef array2faces__(A, mask_val=0):\n \n m, n = A.shape\n xv,yv = np.meshgrid(range(n),range(m))\n vertices = np.stack([xv.ravel(),yv.ravel(),A.ravel()]).T\n\n idxs = np.array(range(m*n)).reshape(m,n)\n\n faces = []\n\n masked = A > mask_val\n for i, k in product(range(m - 1), range(n - 1)):\n\n if ((masked[i, k]) and (masked[i, k+1]) and \n (masked[i+1, k]) and (masked[i+1, k+1])):\n \n faces.append( [idxs[i, k], idxs[i, k+1], idxs[i+1, k+1] ] )\n faces.append( [idxs[i, k], idxs[i+1, k+1], idxs[i+1, k] ] )\n\n faces = np.array(faces)\n\n \n return vertices, faces\n\ndef array2faces(A, mask_val=0):\n \n m, n = A.shape\n xv,yv = np.meshgrid(range(n),range(m))\n vertices = np.stack([xv.ravel(),yv.ravel(),A.ravel()]).T\n\n idxs = np.array(range(m*n)).reshape(m,n)\n\n masked = A > mask_val\n\n tl = idxs[:-1,:-1].ravel()\n tr = idxs[:-1, 1:].ravel()\n bl = idxs[ 1:,:-1].ravel()\n br = idxs[ 1:, 1:].ravel()\n\n all_faces = np.vstack([tl,tr,bl,br])\n\n structure=np.array([[0,0,0],[0,1,1],[0,1,1]])\n masked = ndi.binary_dilation(masked, structure=structure)\n masked = masked[:-1,:-1]\n \n faces = all_faces[:,masked.ravel()]\n faces = faces[[0,1,3,0,3,2],:].T\n faces = faces.reshape(-1,3)\n\n return vertices, faces\n\ndef limit_facet_size(facets, max_width=1000., max_depth=1000., max_height=1000.):\n \"\"\"\n max_width, max_depth, max_height (floats) - maximum size of the stl object (in mm). \n Match this to the dimensions of a 3D printer platform.\n \"\"\"\n xsize = facets[:, 3::3].ptp()\n if xsize > max_width:\n facets = facets * float(max_width) / xsize\n\n ysize = facets[:, 4::3].ptp()\n if ysize > max_depth:\n facets = facets * float(max_depth) / ysize\n\n zsize = facets[:, 5::3].ptp()\n if zsize > max_height:\n facets = facets * float(max_height) / zsize\n\n return facets\n\ndef polygon_to_prism(vertices, perimeters=None, base_val=0):\n\n if perimeters is None:\n perimeters = [ np.arange(len(vertices)) ]\n\n wall_triangles = perimeter_to_walls(vertices, perimeters, floor_val=base_val)\n \n _, faces = simplify_surface(vertices, perimeters)\n top_triangles = vertices[faces]\n\n bottom_vertices = vertices.copy()\n bottom_vertices[:,2] = base_val\n bottom_triangles = bottom_vertices[faces[:,[1,0,2]]]\n \n all_triangles = np.concatenate([top_triangles, wall_triangles, bottom_triangles])\n\n return all_triangles\n\ndef perimeter_to_walls(vertices, perimeters, floor_val=0):\n \"\"\"\n \"\"\" \n wall_vertices = []\n\n for peri in perimeters: \n peri = vertices[peri]\n peri_roll = np.roll(peri,1,axis=0)\n\n for n,_ in enumerate(peri):\n\n top_left = np.concatenate([ peri[n,0:2], [floor_val] ])\n top_right = np.concatenate([ peri_roll[n,0:2], [floor_val] ])\n\n bottom_left = np.array( peri[n] )\n bottom_right = np.array( peri_roll[n] )\n\n vert = [top_right, top_left, bottom_right]\n wall_vertices.append(vert)\n\n vert = [bottom_right, top_left, bottom_left]\n wall_vertices.append(vert)\n\n wall_vertices = np.array(wall_vertices)\n return wall_vertices\n\ndef roll2d(image, shifts):\n return np.roll(np.roll(image, shifts[0], axis=0), shifts[1], axis=1)\n\n","repo_name":"EdgarCardenasDeLaHoz/numpy2stl","sub_path":"numpy2stl/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":5038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"17074423187","text":"\r\nclass TempTracker:\r\n def __init__(self):\r\n self.temperatures = []\r\n self.tempRecords = 0\r\n self.max = None\r\n self.min = None ##max assumed\r\n self.mean = None ## using only two arbitrary decimals places to report mean\r\n self.temp_sum= 0\r\n\r\n\r\n def insert(self, record):\r\n #insert function will keep track of min max and get_mean\r\n #for simplicity and optimal calculation of parameters\r\n\r\n\r\n #only imput data acepted is integers\r\n if type(record) != type(1):\r\n print (\"different type than int\")\r\n return\r\n\r\n ##input data has to be in range 0..110\r\n if record < 0 or record > 110:\r\n print (\"input out of range\")\r\n return\r\n\r\n self.temperatures.append(record)\r\n self.tempRecords+=1\r\n self.temp_sum+=record\r\n if self.max is None or self.max < record:\r\n self.max = record\r\n if self.min is None or self.min > record:\r\n self.min = record\r\n self.mean = format(self.temp_sum / self.tempRecords, '.2f')\r\n return\r\n\r\n ##returns maximum valid temperature record imputed\r\n def get_max(self):\r\n print(\"something\")\r\n return self.max\r\n\r\n ##returns minimum valid temperature record imputed\r\n def get_min(self):\r\n return self.min\r\n\r\n ##returns mean temperature from valid imputed records\r\n def get_mean(self):\r\n return self.mean\r\n\r\n\r\n ##Function used for testing pourposes\r\n def printall(self):\r\n lista=[self.tempRecords, self.min, self.max]\r\n return lista\r\n","repo_name":"AlejoPena/methSTDS_code_Chllng","sub_path":"temptracker.py","file_name":"temptracker.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"70792114964","text":"import asyncio\nimport io\nimport locale\n\nimport os\nimport shutil\nimport traceback\n\nimport requests\nfrom PIL import Image\nfrom aiogram import types\n\nfrom mutagen.id3 import ID3, APIC, error\nfrom mutagen.mp3 import MP3\nfrom yt_dlp import YoutubeDL\n\nfrom loader import dp, LANG, LANGS_FILE\n\n\nlocale.setlocale(locale.LC_TIME, '')\n\ndownloading_users = []\n\n\nif LANG is not None:\n print(\"Lang : \" + LANG)\nelse:\n print(\"Lang : en\")\n LANG = 'en'\n\n\ndef __(s):\n return LANGS_FILE[s][LANG]\n\n\ndef crop_center(pil_img, crop_width, crop_height):\n img_width, img_height = pil_img.size\n return pil_img.crop(((img_width - crop_width) // 2,\n (img_height - crop_height) // 2,\n (img_width + crop_width) // 2,\n (img_height + crop_height) // 2))\n\n\n@dp.message_handler(regexp=r\"^(http(s)?:\\/\\/)?((w){3}.)?youtu(be|.be)?(\\.com)?\\/.+\")\nasync def get_youtube_audio(event: types.Message):\n print(event.from_user)\n if event.from_user.id not in downloading_users:\n tmp_msg = await event.answer(__('downloading'))\n downloading_users.append(event.from_user.id)\n try:\n ydl_opts = {\n 'outtmpl': 'tmp/yt/%(id)s.%(ext)s',\n 'format': 'bestaudio/best',\n 'postprocessors': [{'key': 'FFmpegExtractAudio', 'preferredcodec': 'mp3', 'preferredquality': '320'}],\n }\n\n # Download file\n ydl = YoutubeDL(ydl_opts)\n dict_info = ydl.extract_info(event.text, download=True)\n\n thumb = dict_info[\"thumbnail\"]\n\n # Get thumb\n content = requests.get(thumb).content\n image_bytes = io.BytesIO(content)\n\n upload_date = \"Unknown date\"\n try:\n if dict_info is not None and dict_info[\"upload_date\"] is not None:\n upload_date = dict_info[\"upload_date\"]\n upload_date = upload_date[6:8] + \"/\" + upload_date[4:6] + \"/\" + upload_date[0:4]\n except:\n pass\n\n # Send cover\n await event.answer_photo(image_bytes.read(),\n caption=('Track: {}'\n '\\n{} - {}\\n\\n' + __('track_link') + '')\n .format(\n dict_info['title'],\n dict_info[\"uploader\"], upload_date,\n \"https://youtu.be/\" + dict_info[\"id\"]\n ),\n parse_mode='HTML'\n )\n\n # Delete user message\n await event.delete()\n\n location = \"tmp/yt/\" + dict_info[\"id\"] + '.mp3'\n tmp_song = open(location, 'rb')\n\n # TAG audio\n audio = MP3(location, ID3=ID3)\n try:\n audio.add_tags()\n except error:\n pass\n audio.tags.add(APIC(mime='image/jpeg', type=3, desc=u'Cover', data=image_bytes.read()))\n audio.save()\n\n # Create thumb\n roi_img = crop_center(Image.open(image_bytes), 80, 80)\n img_byte_arr = io.BytesIO()\n roi_img.save(img_byte_arr, format='jpeg')\n\n # Send audio\n await event.answer_audio(tmp_song,\n title=dict_info['title'],\n performer=dict_info['uploader'],\n thumb=img_byte_arr.getvalue(),\n disable_notification=True)\n tmp_song.close()\n try:\n shutil.rmtree(os.path.dirname(location))\n except FileNotFoundError:\n pass\n except Exception as e:\n traceback.print_exc()\n await event.answer('download_error' + ' ' + str(e))\n finally:\n await tmp_msg.delete()\n try:\n downloading_users.remove(event.from_user.id)\n except ValueError:\n pass\n else:\n tmp_err_msg = await event.answer('running_download')\n await event.delete()\n await asyncio.sleep(2)\n await tmp_err_msg.delete()\n","repo_name":"m-zagornyak/telegram-music-bot","sub_path":"handlers/users/youtube.py","file_name":"youtube.py","file_ext":"py","file_size_in_byte":4326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"21081716192","text":"import optparse\n\nimport m5\nfrom m5.objects import *\n\n# A single Hybrid Memory Cube (HMC)\nclass HMCSystem(SubSystem):\n #*****************************CROSSBAR PARAMETERS*************************\n # Flit size of the main interconnect [1]\n xbar_width = Param.Unsigned(32, \"Data width of the main XBar (Bytes)\")\n\n # Clock frequency of the main interconnect [1]\n # This crossbar, is placed on the logic-based of the HMC and it has its\n # own voltage and clock domains, different from the DRAM dies or from the\n # host.\n xbar_frequency = Param.Frequency('1GHz', \"Clock Frequency of the main \"\n \"XBar\")\n\n # Arbitration latency of the HMC XBar [1]\n xbar_frontend_latency = Param.Cycles(1, \"Arbitration latency of the XBar\")\n\n # Latency to forward a packet via the interconnect [1](two levels of FIFOs\n # at the input and output of the inteconnect)\n xbar_forward_latency = Param.Cycles(2, \"Forward latency of the XBar\")\n\n # Latency to forward a response via the interconnect [1](two levels of\n # FIFOs at the input and output of the inteconnect)\n xbar_response_latency = Param.Cycles(2, \"Response latency of the XBar\")\n\n # number of cross which connects 16 Vaults to serial link[7]\n number_mem_crossbar = Param.Unsigned(4, \"Number of crossbar in HMC\"\n )\n\n #*****************************SERIAL LINK PARAMETERS***********************\n # Number of serial links controllers [1]\n num_links_controllers = Param.Unsigned(4, \"Number of serial links\")\n\n # Number of packets (not flits) to store at the request side of the serial\n # link. This number should be adjusted to achive required bandwidth\n link_buffer_size_req = Param.Unsigned(10, \"Number of packets to buffer \"\n \"at the request side of the serial link\")\n\n # Number of packets (not flits) to store at the response side of the serial\n # link. This number should be adjusted to achive required bandwidth\n link_buffer_size_rsp = Param.Unsigned(10, \"Number of packets to buffer \"\n \"at the response side of the serial link\")\n\n # Latency of the serial link composed by SER/DES latency (1.6ns [4]) plus\n # the PCB trace latency (3ns Estimated based on [5])\n link_latency = Param.Latency('4.6ns', \"Latency of the serial links\")\n\n # Clock frequency of the each serial link(SerDes) [1]\n link_frequency = Param.Frequency('10GHz', \"Clock Frequency of the serial\"\n \"links\")\n\n # Clock frequency of serial link Controller[6]\n # clk_hmc[Mhz]= num_lanes_per_link * lane_speed [Gbits/s] /\n # data_path_width * 10^6\n # clk_hmc[Mhz]= 16 * 10 Gbps / 256 * 10^6 = 625 Mhz\n link_controller_frequency = Param.Frequency('625MHz',\n \"Clock Frequency of the link controller\")\n\n # Latency of the serial link controller to process the packets[1][6]\n # (ClockDomain = 625 Mhz )\n # used here for calculations only\n link_ctrl_latency = Param.Cycles(4, \"The number of cycles required for the\"\n \"controller to process the packet\")\n\n # total_ctrl_latency = link_ctrl_latency + link_latency\n # total_ctrl_latency = 4(Cycles) * 1.6 ns + 4.6 ns\n total_ctrl_latency = Param.Latency('11ns', \"The latency experienced by\"\n \"every packet regardless of size of packet\")\n\n # Number of parallel lanes in each serial link [1]\n num_lanes_per_link = Param.Unsigned( 16, \"Number of lanes per each link\")\n\n # Number of serial links [1]\n num_serial_links = Param.Unsigned(4, \"Number of serial links\")\n\n # speed of each lane of serial link - SerDes serial interface 10 Gb/s\n serial_link_speed = Param.UInt64(10, \"Gbs/s speed of each lane of\"\n \"serial link\")\n\n #*****************************PERFORMANCE MONITORING************************\n # The main monitor behind the HMC Controller\n enable_global_monitor = Param.Bool(False, \"The main monitor behind the \"\n \"HMC Controller\")\n\n # The link performance monitors\n enable_link_monitor = Param.Bool(False, \"The link monitors\" )\n\n # link aggregator enable - put a cross between buffers & links\n enable_link_aggr = Param.Bool(False, \"The crossbar between port and \"\n \"Link Controller\")\n\n enable_buff_div = Param.Bool(True, \"Memory Range of Buffer is\"\n \"divided between total range\")\n\n #*****************************HMC ARCHITECTURE ************************\n # Memory chunk for 16 vault - numbers of vault / number of crossbars\n mem_chunk = Param.Unsigned(4, \"Chunk of memory range for each cross bar \"\n \"in arch 0\")\n\n # size of req buffer within crossbar, used for modelling extra latency\n # when the reuqest go to non-local vault\n xbar_buffer_size_req = Param.Unsigned(10, \"Number of packets to buffer \"\n \"at the request side of the crossbar\")\n\n # size of response buffer within crossbar, used for modelling extra latency\n # when the response received from non-local vault\n xbar_buffer_size_resp = Param.Unsigned(10, \"Number of packets to buffer \"\n \"at the response side of the crossbar\")\n\n# configure host system with Serial Links\ndef config_host_hmc(options, system):\n\n system.hmc_host=HMCSystem()\n\n try:\n system.hmc_host.enable_global_monitor = options.enable_global_monitor\n except:\n pass;\n\n try:\n system.hmc_host.enable_link_monitor = options.enable_link_monitor\n except:\n pass;\n\n # Serial link Controller with 16 SerDes links at 10 Gbps\n # with serial link ranges w.r.t to architecture\n system.hmc_host.seriallink = [SerialLink(ranges = options.ser_ranges[i],\n req_size=system.hmc_host.link_buffer_size_req,\n resp_size=system.hmc_host.link_buffer_size_rsp,\n num_lanes=system.hmc_host.num_lanes_per_link,\n link_speed=system.hmc_host.serial_link_speed,\n delay=system.hmc_host.total_ctrl_latency)\n for i in xrange(system.hmc_host.num_serial_links)]\n\n # enable global monitor\n if system.hmc_host.enable_global_monitor:\n system.hmc_host.lmonitor = [ CommMonitor()\n for i in xrange(system.hmc_host.num_serial_links)]\n\n # set the clock frequency for serial link\n for i in xrange(system.hmc_host.num_serial_links):\n system.hmc_host.seriallink[i].clk_domain = SrcClockDomain(clock=system.\n hmc_host.link_controller_frequency, voltage_domain=\n VoltageDomain(voltage = '1V'))\n\n # Connect membus/traffic gen to Serial Link Controller for differrent HMC\n # architectures\n if options.arch == \"distributed\":\n for i in xrange(system.hmc_host.num_links_controllers):\n if system.hmc_host.enable_global_monitor:\n system.membus.master = system.hmc_host.lmonitor[i].slave\n system.hmc_host.lmonitor[i].master = \\\n system.hmc_host.seriallink[i].slave\n else:\n system.membus.master = system.hmc_host.seriallink[i].slave\n if options.arch == \"mixed\":\n if system.hmc_host.enable_global_monitor:\n system.membus.master = system.hmc_host.lmonitor[0].slave\n system.hmc_host.lmonitor[0].master = \\\n system.hmc_host.seriallink[0].slave\n\n system.membus.master = system.hmc_host.lmonitor[1].slave\n system.hmc_host.lmonitor[1].master = \\\n system.hmc_host.seriallink[1].slave\n\n system.tgen[2].port = system.hmc_host.lmonitor[2].slave\n system.hmc_host.lmonitor[2].master = \\\n system.hmc_host.seriallink[2].slave\n\n system.tgen[3].port = system.hmc_host.lmonitor[3].slave\n system.hmc_host.lmonitor[3].master = \\\n system.hmc_host.seriallink[3].slave\n else:\n system.membus.master = system.hmc_host.seriallink[0].slave\n system.membus.master = system.hmc_host.seriallink[1].slave\n system.tgen[2].port = system.hmc_host.seriallink[2].slave\n system.tgen[3].port = system.hmc_host.seriallink[3].slave\n if options.arch == \"same\" :\n for i in xrange(system.hmc_host.num_links_controllers):\n if system.hmc_host.enable_global_monitor:\n system.tgen[i].port = system.hmc_host.lmonitor[i].slave\n system.hmc_host.lmonitor[i].master = \\\n system.hmc_host.seriallink[i].slave\n else:\n system.tgen[i].port = system.hmc_host.seriallink[i].slave\n\n return system\n\n# Create an HMC device and attach it to the current system\ndef config_hmc(options, system, hmc_host):\n\n # Create HMC device\n system.hmc_dev = HMCSystem()\n\n # Global monitor\n try:\n system.hmc_dev.enable_global_monitor = options.enable_global_monitor\n except:\n pass;\n\n try:\n system.hmc_dev.enable_link_monitor = options.enable_link_monitor\n except:\n pass;\n\n\n if system.hmc_dev.enable_link_monitor:\n system.hmc_dev.lmonitor = [ CommMonitor()\n for i in xrange(system.hmc_dev.num_links_controllers)]\n\n # 4 HMC Crossbars located in its logic-base (LoB)\n system.hmc_dev.xbar = [ NoncoherentXBar(width=system.hmc_dev.xbar_width,\n frontend_latency=system.hmc_dev.xbar_frontend_latency,\n forward_latency=system.hmc_dev.xbar_forward_latency,\n response_latency=system.hmc_dev.xbar_response_latency )\n for i in xrange(system.hmc_host.number_mem_crossbar)]\n\n for i in xrange(system.hmc_dev.number_mem_crossbar):\n system.hmc_dev.xbar[i].clk_domain = SrcClockDomain(\n clock=system.hmc_dev.xbar_frequency,voltage_domain=\n VoltageDomain(voltage='1V'))\n\n # Attach 4 serial link to 4 crossbar/s\n for i in xrange(system.hmc_dev.num_serial_links):\n if system.hmc_dev.enable_link_monitor:\n system.hmc_host.seriallink[i].master = \\\n system.hmc_dev.lmonitor[i].slave\n system.hmc_dev.lmonitor[i].master = system.hmc_dev.xbar[i].slave\n else:\n system.hmc_host.seriallink[i].master = system.hmc_dev.xbar[i].slave\n\n # Connecting xbar with each other for request arriving at the wrong xbar,\n # then it will be forward to correct xbar. Bridge is used to connect xbars\n if options.arch == \"same\":\n numx = len(system.hmc_dev.xbar)\n\n # create a list of buffers\n system.hmc_dev.buffers = [ Bridge(\n req_size=system.hmc_dev.xbar_buffer_size_req,\n resp_size=system.hmc_dev.xbar_buffer_size_resp)\n for i in xrange(numx * (system.hmc_dev.mem_chunk - 1))]\n\n # Buffer iterator\n it = iter(range(len(system.hmc_dev.buffers)))\n\n # necesarry to add system_port to one of the xbar\n system.system_port = system.hmc_dev.xbar[3].slave\n\n # iterate over all the crossbars and connect them as required\n for i in range(numx):\n for j in range(numx):\n # connect xbar to all other xbars except itself\n if i != j:\n # get the next index of buffer\n index = it.next()\n\n # Change the default values for ranges of bridge\n system.hmc_dev.buffers[index].ranges = system.mem_ranges[\n j * int(system.hmc_dev.mem_chunk):\n (j + 1) * int(system.hmc_dev.mem_chunk)]\n\n # Connect the bridge between corssbars\n system.hmc_dev.xbar[i].master = system.hmc_dev.buffers[\n index].slave\n system.hmc_dev.buffers[\n index].master = system.hmc_dev.xbar[j].slave\n else:\n # Don't connect the xbar to itself\n pass\n\n # Two crossbars are connected to all other crossbars-Other 2 vault\n # can only direct traffic to it local vaults\n if options.arch == \"mixed\":\n\n system.hmc_dev.buffer30 = Bridge(ranges=system.mem_ranges[0:4])\n system.hmc_dev.xbar[3].master = system.hmc_dev.buffer30.slave\n system.hmc_dev.buffer30.master = system.hmc_dev.xbar[0].slave\n\n system.hmc_dev.buffer31 = Bridge(ranges=system.mem_ranges[4:8])\n system.hmc_dev.xbar[3].master = system.hmc_dev.buffer31.slave\n system.hmc_dev.buffer31.master = system.hmc_dev.xbar[1].slave\n\n system.hmc_dev.buffer32 = Bridge(ranges=system.mem_ranges[8:12])\n system.hmc_dev.xbar[3].master = system.hmc_dev.buffer32.slave\n system.hmc_dev.buffer32.master = system.hmc_dev.xbar[2].slave\n\n\n system.hmc_dev.buffer20 = Bridge(ranges=system.mem_ranges[0:4])\n system.hmc_dev.xbar[2].master = system.hmc_dev.buffer20.slave\n system.hmc_dev.buffer20.master = system.hmc_dev.xbar[0].slave\n\n system.hmc_dev.buffer21 = Bridge(ranges=system.mem_ranges[4:8])\n system.hmc_dev.xbar[2].master = system.hmc_dev.buffer21.slave\n system.hmc_dev.buffer21.master = system.hmc_dev.xbar[1].slave\n\n system.hmc_dev.buffer23 = Bridge(ranges=system.mem_ranges[12:16])\n system.hmc_dev.xbar[2].master = system.hmc_dev.buffer23.slave\n system.hmc_dev.buffer23.master = system.hmc_dev.xbar[3].slave\n\n","repo_name":"scale-lab/la-core","sub_path":"gem5/configs/common/HMC.py","file_name":"HMC.py","file_ext":"py","file_size_in_byte":13148,"program_lang":"python","lang":"en","doc_type":"code","stars":62,"dataset":"github-code","pt":"30"}
+{"seq_id":"73927290963","text":"# _*_ coding: utf-8 _*_\n\n\"\"\"\nA simple TensorFlow example of Linear Regression.\n\nAuthor: Genpeng Xu\nDate:\t2019/04/09\n\"\"\"\n\nimport os\nimport time\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\n# Own scaffolds\nfrom util.data_util import load_birth_life_data\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\nLOG_DIR = './runs'\n\n\ndef huber_loss(y_true, y_pred, delta=14.0):\n residual = tf.abs(y_true - y_pred)\n\n def f1():\n return 0.5 * tf.square(residual)\n\n def f2():\n return delta * residual - 0.5 * tf.square(delta)\n\n return tf.cond(residual > delta, f2, f1)\n\n\ndef main():\n data_file = \"../data/birth_life_2010.txt\"\n data = load_birth_life_data(data_file)\n n_samples = len(data)\n run_label = \"uni_var_linreg_placeholder\"\n\n # training parameters\n n_epochs = 100\n\n # create summary file\n if not os.path.exists(LOG_DIR):\n os.mkdir(LOG_DIR)\n run_dir = os.path.join(LOG_DIR, run_label)\n if not os.path.exists(run_dir):\n os.mkdir(run_dir)\n summary_dir = os.path.join(run_dir, 'summaries')\n if not os.path.exists(summary_dir):\n os.mkdir(summary_dir)\n train_summary_dir = os.path.join(summary_dir, 'train')\n test_summary_dir = os.path.join(summary_dir, 'test')\n if not os.path.exists(train_summary_dir):\n os.mkdir(train_summary_dir)\n if not os.path.exists(test_summary_dir):\n os.mkdir(test_summary_dir)\n\n # Assemble a graph\n # ===================================================================================== #\n\n x = tf.placeholder(dtype=tf.float32, name='x')\n y = tf.placeholder(dtype=tf.float32, name='y')\n\n w = tf.get_variable(name='w', dtype=tf.float32, initializer=tf.constant(0.1))\n b = tf.get_variable(name='b', dtype=tf.float32, initializer=tf.constant(0.1))\n\n with tf.name_scope('y_'):\n y_ = w * x + b\n\n # loss\n with tf.name_scope('loss'):\n loss = huber_loss(y, y_)\n\n with tf.name_scope('train_op'):\n train_op = tf.train.GradientDescentOptimizer(1e-3).minimize(loss)\n\n init = tf.global_variables_initializer()\n\n # Use a session to execute operations in the graph\n # ===================================================================================== #\n\n print(\"[INFO] Starting training...\")\n t0 = time.time()\n\n with tf.Session() as sess:\n sess.run(init)\n train_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)\n\n for i in range(1, n_epochs + 1):\n total_loss = 0\n for xx, yy in data:\n loss_train, _ = sess.run(fetches=[loss, train_op],\n feed_dict={x: xx, y: yy})\n total_loss += loss_train\n print(\"[Train] epoch: %03d, loss: %f\" % (i, total_loss / n_samples))\n w_out, b_out = w.eval(), b.eval()\n\n train_writer.close()\n\n print(\"[INFO] Training finished! ( ^ _ ^ ) V\")\n print(\"[INFO] Done in %f seconds.\" % (time.time() - t0))\n\n plt.plot(data[:, 0], data[:, 1], 'bo', label='Real data')\n plt.plot(data[:, 0], data[:, 0] * w_out + b_out, 'r', label='Predicted data')\n plt.legend()\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Genpeng/tf-in-action","sub_path":"03_basic_models/uni_var_linreg_placeholder.py","file_name":"uni_var_linreg_placeholder.py","file_ext":"py","file_size_in_byte":3181,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"}
+{"seq_id":"5940494364","text":"N, K = map(int, input().split())\nR, S, P = map(int, input().split())\nT = input()\n\ndef point(i, hand):\n if T[i] == 'r' and hand == 'p':\n return P\n elif T[i] == 's' and hand == 'r':\n return R\n elif T[i] == 'p' and hand == 's':\n return S\n return 0\n\ndp = [{'r': 0, 's': 0, 'p': 0} for _ in range(N)]\n\nfor i in range(N):\n if i - K < 0:\n dp[i]['r'] = point(i, 'r')\n dp[i]['s'] = point(i, 's')\n dp[i]['p'] = point(i, 'p')\n else:\n dp[i]['r'] = point(i, 'r') + max(dp[i-K]['s'], dp[i-K]['p'])\n dp[i]['s'] = point(i, 's') + max(dp[i-K]['r'], dp[i-K]['p'])\n dp[i]['p'] = point(i, 'p') + max(dp[i-K]['r'], dp[i-K]['s'])\n\nans = 0\nfor i in range(N - K, N):\n ans += max(dp[i]['r'], dp[i]['s'], dp[i]['p'])\n\nprint(ans)\n","repo_name":"kabik/procon","sub_path":"atcoder/abc149/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"}
+{"seq_id":"20449593087","text":"from app import app\nfrom flask import render_template\nfrom flask import redirect, request, flash, url_for\nfrom app.models import Todo\nfrom app.forms import AddTodoForm\n\n@app.route('/undone/')\ndef undone(todo_id):\n todo = Todo.objects(id=todo_id)[0]\n todo.status = 0\n todo.save()\n todos = Todo.objects()\n return render_template('index.html', todos=todos)\n\n\n@app.route('/done/')\ndef done(todo_id):\n todo = Todo.objects(id=todo_id)[0]\n todo.status = 1\n todo.save()\n todos = Todo.objects()\n return render_template('index.html', todos=todos)\n\n\n@app.route('/delete/')\ndef delete(todo_id):\n todo = Todo.objects(id=todo_id)[0]\n Todo.delete(todo)\n todos = Todo.objects()\n return render_template('index.html', todos=todos)\n\n\n@app.route('/')\ndef index():\n todos = Todo.objects()\n return render_template(\"index.html\", todos=todos)\n\n\n@app.route('/addTodo')\ndef addTodo():\n return add()\n\n\n@app.route('/add', methods=['GET','POST'])\ndef add():\n form = AddTodoForm(request.form)\n if request.method == 'POST' and form.validate():\n todo = Todo(content=form.content.data, time=form.time.data, status=form.status.data)\n todo.save()\n flash('success')\n return redirect('/')\n return render_template('add.html', form=form)\n\n# set the secret key. keep this really secret:\napp.secret_key = 'A0Zr98j/3yX R~XHH!jmN]LWX/,?RT'","repo_name":"zhenshub/awesome-flask-todo","sub_path":"Learning Flask/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"3653808913","text":"import pymongo\n\nfrom .mongod import Mongod\n\n\nclass MongoClient(pymongo.MongoClient):\n def __init__(self, host=None, port=None, **kwargs):\n self._mongod = Mongod()\n self._mongod.start()\n super().__init__(self._mongod.connection_string, **kwargs)\n\n def close(self):\n self._mongod.stop()\n super().close()\n\n def pim_mongodump(self, *args, **kwargs):\n return self._mongod.mongodump(*args, **kwargs)\n\n\nif __name__ == \"__main__\":\n import logging\n logging.basicConfig(level=logging.DEBUG)\n m = MongoClient(\"mongodb://127.0.0.1/something\", 27017)\n m.close()\n","repo_name":"LeonardoLeano333/pymongo_inmemory","sub_path":"pymongo_inmemory/_pim.py","file_name":"_pim.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"30"}
+{"seq_id":"21288111815","text":"\n# coding=utf-8\nfrom __future__ import absolute_import, division, print_function\n\nimport json\nimport logging\nimport math\nimport collections\nfrom io import open\nfrom tqdm import tqdm\n\nfrom transformers.tokenization_bert import whitespace_tokenize\n\nlogger = logging.getLogger(__name__)\n\n\nclass RelationExample(object):\n \"\"\"\n A single training/test example for the NYT dataset.\n For examples without an answer, the start and end position are -1.\n \"\"\"\n\n def __init__(self,\n qas_id,\n question_text,\n doc_tokens,\n entities=None,\n answer_ids=None,\n orig_answer_texts=None,\n entity_position=None,\n entity_end_position=None,\n is_impossible=None):\n self.qas_id = qas_id\n self.question_text = question_text\n self.doc_tokens = doc_tokens\n self.entities = entities\n self.answer_ids = answer_ids\n self.orig_answer_texts = orig_answer_texts\n self.entity_position = entity_position\n self.is_impossible = is_impossible\n\n def __str__(self):\n return self.__repr__()\n\n def __repr__(self):\n s = \"\"\n s += \"qas_id: %s\" % (self.qas_id)\n s += \", question_text: %s\" % (\n self.question_text)\n s += \", doc_tokens: [%s]\" % (\" \".join(self.doc_tokens))\n if self.entities:\n s += \", entities: %s\" % (', '.join(self.entities))\n if self.answer_id:\n s += \", answer_id: %d\" % (self.answer_id)\n if self.is_impossible:\n s += \", is_impossible: %r\" % (self.is_impossible)\n return s\n\n\nclass RelationFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self,\n unique_id,\n example_index,\n doc_span_index,\n # tokens,\n # token_to_orig_map,\n token_is_max_context,\n input_ids,\n input_mask,\n segment_ids,\n cls_index,\n p_mask,\n paragraph_len,\n entity_type_ids,\n candidate_index,\n candidate_length,\n index_to_entity_id_map,\n answer_mask=None,\n entities=None,\n is_impossible=None):\n self.unique_id = unique_id\n self.example_index = example_index\n self.doc_span_index = doc_span_index\n # self.tokens = tokens\n # self.token_to_orig_map = token_to_orig_map\n self.token_is_max_context = token_is_max_context\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.segment_ids = segment_ids\n self.cls_index = cls_index\n self.p_mask = p_mask\n self.paragraph_len = paragraph_len\n self.entity_type_ids = entity_type_ids\n self.candidate_index = candidate_index\n self.candidate_length = candidate_length\n self.index_to_entity_id_map = index_to_entity_id_map\n self.answer_mask = answer_mask\n self.entities = entities\n self.is_impossible = is_impossible\n\n\ndef read_nyt_examples(input_file, is_training, is_with_negative):\n \"\"\"Read a NYT json file into a list of RelationExample.\"\"\"\n with open(input_file, \"r\", encoding='utf-8') as reader:\n input_data = json.load(reader)\n\n def is_whitespace(c):\n if c == \" \" or c == \"\\t\" or c == \"\\r\" or c == \"\\n\" or ord(c) == 0x202F:\n return True\n return False\n\n examples = []\n for key, entry in input_data.items():\n document_text = entry[\"document\"]\n doc_tokens = []\n char_to_word_offset = []\n prev_is_whitespace = True\n entity_position = {}\n entities = []\n for c in document_text:\n if is_whitespace(c):\n prev_is_whitespace = True\n else:\n if prev_is_whitespace:\n doc_tokens.append(c)\n else:\n doc_tokens[-1] += c\n prev_is_whitespace = False\n char_to_word_offset.append(len(doc_tokens) - 1)\n\n for p in entry['entities']:\n entity_text = p['text']\n if entity_text == 'NA':\n assert len(entities) == 0\n assert entity_text not in entities\n entities.append(entity_text)\n entity_length = len(entity_text)\n entity_position[entity_text] = []\n for entity_start in p['entity_starts']:\n if entity_start < 0:\n entity_position[entity_text].append((-1, -1))\n else:\n t_start = char_to_word_offset[entity_start]\n t_end = char_to_word_offset[entity_start + entity_length - 1]\n entity_position[entity_text].append((t_start, t_end))\n\n assert len(entities) == len(set(entities))\n assert entities[0] == 'NA'\n for qa in entry[\"qas\"]:\n qas_id = qa[\"id\"]\n question_text = qa[\"question\"]\n orig_answer_texts = []\n answer_ids = []\n is_impossible = False\n if is_with_negative:\n is_impossible = qa[\"is_impossible\"]\n if (len(qa[\"answers\"]) < 1) and (not is_impossible):\n raise ValueError(\n \"Each answerable question should have at least 1 answer.\")\n if not is_impossible:\n for answer in qa[\"answers\"]:\n t_orig_answer_text = answer[\"text\"]\n t_answer_id = entities.index(t_orig_answer_text)\n assert t_answer_id > 0\n flag = False\n answer_length = len(t_orig_answer_text)\n for answer_offset in answer['answer_starts']:\n t_start_position = char_to_word_offset[answer_offset]\n t_end_position = char_to_word_offset[answer_offset + answer_length - 1]\n # Only add answers where the text can be exactly recovered from the\n # document. If this CAN'T happen it's likely due to weird Unicode\n # stuff so we will just skip the example. Note that this means for\n # training mode, every example is NOT guaranteed to be preserved.\n actual_text = \" \".join(doc_tokens[t_start_position:(t_end_position + 1)])\n cleaned_answer_text = \" \".join(whitespace_tokenize(t_orig_answer_text))\n if actual_text.find(cleaned_answer_text) == -1:\n logger.warning(\"Could not find answer: '%s' vs. '%s'\",\n actual_text, cleaned_answer_text)\n continue\n else:\n flag = True\n break\n if flag and t_orig_answer_text:\n orig_answer_texts.append(t_orig_answer_text)\n answer_ids.append(t_answer_id)\n else:\n orig_answer_texts.append(\"NA\")\n answer_ids.append(0)\n\n example = RelationExample(\n qas_id=qas_id,\n question_text=question_text,\n doc_tokens=doc_tokens,\n entities=entities,\n answer_ids=answer_ids,\n orig_answer_texts=orig_answer_texts,\n entity_position=entity_position,\n is_impossible=is_impossible)\n examples.append(example)\n return examples\n\n\ndef convert_examples_to_features(examples, tokenizer, max_seq_length,\n doc_stride, max_query_length, is_training,\n cls_token_at_end=False, retain_entity=False,\n cls_token='[CLS]', sep_token='[SEP]', pad_token=0,\n sequence_a_segment_id=0, sequence_b_segment_id=1,\n cls_token_segment_id=0, pad_token_segment_id=0,\n mask_padding_with_zero=True):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n\n unique_id = 1000000000\n # cnt_pos, cnt_neg = 0, 0\n # max_N, max_M = 1024, 1024\n # f = np.zeros((max_N, max_M), dtype=np.float32)\n\n features = []\n for (example_index, example) in enumerate(tqdm(examples)):\n\n # if example_index % 100 == 0:\n # logger.info('Converting %s/%s pos %s neg %s', example_index, len(examples), cnt_pos, cnt_neg)\n\n query_tokens = tokenizer.tokenize(example.question_text)\n\n retain_token_indices = set()\n if retain_entity:\n for values in example.entity_position.values():\n for e_s, e_e in values:\n for i in range(e_s, e_e+1):\n retain_token_indices.add(i)\n\n if len(query_tokens) > max_query_length:\n query_tokens = query_tokens[0:max_query_length]\n\n tok_to_orig_index = []\n orig_to_tok_index = []\n all_doc_tokens = []\n for (i, token) in enumerate(example.doc_tokens):\n orig_to_tok_index.append(len(all_doc_tokens))\n if retain_entity and i in retain_token_indices:\n if tokenizer.basic_tokenizer.do_lower_case:\n token = token.lower()\n sub_tokens = [token]\n else:\n sub_tokens = tokenizer.tokenize(token)\n for sub_token in sub_tokens:\n tok_to_orig_index.append(i)\n all_doc_tokens.append(sub_token)\n\n entity_tok_start_position = []\n entity_tok_end_position = []\n candidate_position = {}\n\n for key, values in example.entity_position.items():\n for s, e in values:\n if key not in candidate_position:\n candidate_position[key] = []\n if s < 0:\n candidate_position[key] = [(-1, -1)]\n continue\n t_tok_start_position = orig_to_tok_index[s]\n if e < len(example.doc_tokens) - 1:\n t_tok_end_position = orig_to_tok_index[e + 1] - 1\n else:\n t_tok_end_position = len(all_doc_tokens) - 1\n entity_tok_start_position.append(t_tok_start_position)\n entity_tok_end_position.append(t_tok_end_position)\n candidate_position[key].append((t_tok_start_position, t_tok_end_position))\n # The -3 accounts for [CLS], [SEP] and [SEP]\n max_tokens_for_doc = max_seq_length - len(query_tokens) - 3\n\n # We can have documents that are longer than the maximum sequence length.\n # To deal with this we do a sliding window approach, where we take chunks\n # of the up to our max length with a stride of `doc_stride`.\n _DocSpan = collections.namedtuple( # pylint: disable=invalid-name\n \"DocSpan\", [\"start\", \"length\"])\n doc_spans = []\n start_offset = 0\n while start_offset < len(all_doc_tokens):\n length = len(all_doc_tokens) - start_offset\n if length > max_tokens_for_doc:\n length = max_tokens_for_doc\n doc_spans.append(_DocSpan(start=start_offset, length=length))\n if start_offset + length == len(all_doc_tokens):\n break\n start_offset += min(length, doc_stride)\n\n for (doc_span_index, doc_span) in enumerate(doc_spans):\n tokens = []\n token_to_orig_map = {}\n token_is_max_context = {}\n segment_ids = []\n\n # p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer)\n # Original TF implem also keep the classification token (set to 0) (not sure why...)\n p_mask = []\n\n # CLS token at the beginning\n if not cls_token_at_end:\n tokens.append(cls_token)\n segment_ids.append(cls_token_segment_id)\n p_mask.append(0)\n cls_index = 0\n\n # Query\n for i, token in enumerate(query_tokens):\n tokens.append(token)\n segment_ids.append(sequence_a_segment_id)\n p_mask.append(1)\n\n # SEP token\n tokens.append(sep_token)\n segment_ids.append(sequence_a_segment_id)\n p_mask.append(1)\n\n # Paragraph\n for i in range(doc_span.length):\n split_token_index = doc_span.start + i\n token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]\n\n is_max_context = _check_is_max_context(doc_spans, doc_span_index,\n split_token_index)\n token_is_max_context[len(tokens)] = is_max_context\n tokens.append(all_doc_tokens[split_token_index])\n segment_ids.append(sequence_b_segment_id)\n p_mask.append(0)\n paragraph_len = doc_span.length\n\n # SEP token\n tokens.append(sep_token)\n segment_ids.append(sequence_b_segment_id)\n p_mask.append(1)\n\n # CLS token at the end\n if cls_token_at_end:\n tokens.append(cls_token)\n segment_ids.append(cls_token_segment_id)\n p_mask.append(0)\n cls_index = len(tokens) - 1 # Index of classification token\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(pad_token)\n input_mask.append(0 if mask_padding_with_zero else 1)\n segment_ids.append(pad_token_segment_id)\n p_mask.append(1)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n # entity_type_ids\n doc_offset = len(query_tokens) + 2\n doc_start = doc_span.start\n doc_end = doc_span.start + doc_span.length - 1\n entity_type_ids = [0] * len(input_ids)\n for es, ee in zip(entity_tok_start_position, entity_tok_end_position):\n if es >= doc_start and ee <= doc_end:\n s_p = es - doc_start + doc_offset\n e_p = ee - doc_start + doc_offset\n for p in range(s_p, e_p+1):\n entity_type_ids[p] = 1\n\n # candidate answer start and end positions\n added_ids = []\n candidate_length = 0\n # according to the document statisitcs, candidate count < 50\n # so we use max candidate _length as 50\n max_candidate_number = 50\n candidate_index = [[0] * max_candidate_number for i in range(2)]\n answer_mask = [0] * max_candidate_number\n index_to_entity_id_map = {}\n entity_id_to_answer_id = {}\n for entity_id, entity in enumerate(example.entities):\n for token_start, token_end in candidate_position[entity]:\n t_candidate_start = None\n t_candidate_end = None\n if token_start < 0:\n t_candidate_start = 0\n t_candidate_end = 0\n elif token_start >= doc_start and token_end <= doc_end:\n t_candidate_start = token_start - doc_start + doc_offset\n t_candidate_end = token_end - doc_start + doc_offset\n if t_candidate_start is not None:\n candidate_index[0][candidate_length] = t_candidate_start\n candidate_index[1][candidate_length] = t_candidate_end\n if entity_id in example.answer_ids:\n if entity_id in entity_id_to_answer_id:\n answer_id = entity_id_to_answer_id[entity_id]\n else:\n answer_id = len(entity_id_to_answer_id) + 1 # answer_id show larger than zero\n entity_id_to_answer_id[entity_id] = answer_id\n answer_mask[candidate_length] = answer_id\n added_ids.append(entity_id)\n index_to_entity_id_map[candidate_length] = entity_id\n candidate_length += 1\n span_is_impossible = example.is_impossible\n if not span_is_impossible:\n if len(added_ids) <= 0:\n span_is_impossible = True\n assert sum(answer_mask) == 0\n answer_mask[0] = 1\n else:\n assert added_ids[0] != 0\n\n # make sure the NA is in the first position\n assert candidate_index[0][0] == candidate_index[1][0] == 0\n assert sum(answer_mask) > 0\n\n if example_index < 10:\n logger.info(\"*** Example ***\")\n logger.info(\"unique_id: %s\" % (unique_id))\n logger.info(\"example_index: %s\" % (example_index))\n logger.info(\"doc_span_index: %s\" % (doc_span_index))\n logger.info(\"tokens: %s\" % \" \".join(tokens))\n logger.info(\"token_to_orig_map: %s\" % \" \".join([\n \"%d:%d\" % (x, y) for (x, y) in token_to_orig_map.items()]))\n logger.info(\"token_is_max_context: %s\" % \" \".join([\n \"%d:%s\" % (x, y) for (x, y) in token_is_max_context.items()\n ]))\n logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n logger.info(\n \"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n logger.info(\n \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n if is_training and span_is_impossible:\n logger.info(\"impossible example\")\n\n features.append(\n RelationFeatures(\n unique_id=unique_id,\n example_index=example_index,\n doc_span_index=doc_span_index,\n # tokens=tokens,\n # token_to_orig_map=token_to_orig_map,\n token_is_max_context=token_is_max_context,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n cls_index=cls_index,\n p_mask=p_mask,\n paragraph_len=paragraph_len,\n entity_type_ids=entity_type_ids,\n candidate_index=candidate_index,\n candidate_length=candidate_length,\n index_to_entity_id_map=index_to_entity_id_map,\n answer_mask=answer_mask,\n entities=example.entities,\n is_impossible=span_is_impossible))\n unique_id += 1\n\n return features\n\n\ndef _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,\n orig_answer_text):\n \"\"\"Returns tokenized answer spans that better match the annotated answer.\"\"\"\n\n # The SQuAD annotations are character based. We first project them to\n # whitespace-tokenized words. But then after WordPiece tokenization, we can\n # often find a \"better match\". For example:\n #\n # Question: What year was John Smith born?\n # Context: The leader was John Smith (1895-1943).\n # Answer: 1895\n #\n # The original whitespace-tokenized answer will be \"(1895-1943).\". However\n # after tokenization, our tokens will be \"( 1895 - 1943 ) .\". So we can match\n # the exact answer, 1895.\n #\n # However, this is not always possible. Consider the following:\n #\n # Question: What country is the top exporter of electornics?\n # Context: The Japanese electronics industry is the lagest in the world.\n # Answer: Japan\n #\n # In this case, the annotator chose \"Japan\" as a character sub-span of\n # the word \"Japanese\". Since our WordPiece tokenizer does not split\n # \"Japanese\", we just use \"Japanese\" as the annotation. This is fairly rare\n # in SQuAD, but does happen.\n new_input_start, new_input_end = [], []\n for text, s, e in zip(orig_answer_text, input_start, input_end):\n tok_answer_text = \" \".join(tokenizer.tokenize(text))\n flag = False\n for new_start in range(s, e + 1):\n for new_end in range(e, s - 1, -1):\n text_span = \" \".join(doc_tokens[new_start:(new_end + 1)])\n if text_span == tok_answer_text:\n flag = True\n new_input_start.append(new_start)\n new_input_end.append(new_end)\n if flag:\n break\n if flag:\n break\n if not flag:\n new_input_start.append(s)\n new_input_end.append(e)\n return (new_input_start, new_input_end)\n\n\ndef _check_is_max_context(doc_spans, cur_span_index, position):\n \"\"\"Check if this is the 'max context' doc span for the token.\"\"\"\n\n # Because of the sliding window approach taken to scoring documents, a single\n # token can appear in multiple documents. E.g.\n # Doc: the man went to the store and bought a gallon of milk\n # Span A: the man went to the\n # Span B: to the store and bought\n # Span C: and bought a gallon of\n # ...\n #\n # Now the word 'bought' will have two scores from spans B and C. We only\n # want to consider the score with \"maximum context\", which we define as\n # the *minimum* of its left and right context (the *sum* of left and\n # right context will always be the same, of course).\n #\n # In the example the maximum context for 'bought' would be span C since\n # it has 1 left context and 3 right context, while span B has 4 left context\n # and 0 right context.\n best_score = None\n best_span_index = None\n for (span_index, doc_span) in enumerate(doc_spans):\n end = doc_span.start + doc_span.length - 1\n if position < doc_span.start:\n continue\n if position > end:\n continue\n num_left_context = position - doc_span.start\n num_right_context = end - position\n score = min(num_left_context, num_right_context) + 0.01 * doc_span.length\n if best_score is None or score > best_score:\n best_score = score\n best_span_index = span_index\n\n return cur_span_index == best_span_index\n\n\nRawResult = collections.namedtuple(\"RawResult\",\n [\"unique_id\", \"logits\"])\n\n\ndef write_nyt_predictions(all_examples, all_features, all_results,\n output_prediction_file, output_nbest_file):\n logger.info(\"Writing predictions to: %s\" % (output_prediction_file))\n logger.info(\"Writing nbest to: %s\" % (output_nbest_file))\n\n example_index_to_features = collections.defaultdict(list)\n for feature in all_features:\n example_index_to_features[feature.example_index].append(feature)\n\n unique_id_to_result = {}\n for result in all_results:\n unique_id_to_result[result.unique_id] = result\n\n all_predictions = collections.OrderedDict()\n all_nbest_json = collections.OrderedDict()\n\n _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"NbestPrediction\", [\"text\", \"logit\"])\n\n for (example_index, example) in enumerate(all_examples):\n features = example_index_to_features[example_index]\n\n nbest = []\n\n nil_logit = 1000000 # the start logit at the slice with min null score\n seen_prediction = set()\n for (feature_index, feature) in enumerate(features):\n result = unique_id_to_result[feature.unique_id]\n for index in range(feature.candidate_length):\n logit = result.logits[index]\n entity_id = feature.index_to_entity_id_map[index]\n entity = feature.entities[entity_id]\n if index:\n assert entity_id > 0\n assert entity != 'NA'\n if not entity_id:\n if logit < nil_logit:\n nil_logit = logit\n else:\n start_position = feature.candidate_index[0][index]\n if not feature.token_is_max_context.get(start_position,\n False):\n continue\n seen_prediction.add(entity)\n nbest.append(\n _NbestPrediction(\n text=entity,\n logit=logit))\n\n # In very rare edge cases we could have no valid predictions. So we\n # just create a nonce prediction in this case to avoid failure.\n if 'NA' not in seen_prediction and '' not in seen_prediction:\n nbest.append(\n _NbestPrediction(\n text=\"\",\n logit=nil_logit))\n\n if not nbest:\n nbest.append(\n _NbestPrediction(text=\"\", logit=0.))\n\n assert len(nbest) >= 1\n\n nbest = sorted(\n nbest,\n key=lambda x: x.logit,\n reverse=True)\n total_na = 0\n for p in nbest:\n if not p.text:\n total_na += 1\n assert total_na > 0\n if total_na > 1:\n print(feature.entities)\n total_scores = []\n best_non_null_entry = None\n for entry in nbest:\n total_scores.append(entry.logit)\n if not best_non_null_entry:\n if entry.text:\n best_non_null_entry = entry\n\n probs = _compute_softmax(total_scores)\n\n nbest_json = []\n for (i, entry) in enumerate(nbest):\n text = entry.text\n prob = probs[i]\n '''\n if text in answer_map:\n map_index = answer_map[text]\n alread_prob = nbest_json[map_index]['probability']\n nbest_json[map_index]['probability'] = alread_prob + prob\n continue\n else:\n '''\n output = collections.OrderedDict()\n output[\"text\"] = text\n output[\"probability\"] = prob\n nbest_json.append(output)\n assert len(nbest_json) >= 1\n\n all_predictions[example.qas_id] = nbest_json[0][\"text\"]\n all_nbest_json[example.qas_id] = nbest_json\n\n with open(output_prediction_file, \"w\") as writer:\n writer.write(json.dumps(all_predictions, indent=2) + \"\\n\")\n\n with open(output_nbest_file, \"w\") as writer:\n writer.write(json.dumps(all_nbest_json, indent=2) + \"\\n\")\n\n return all_predictions\n\n\ndef _compute_softmax(scores):\n \"\"\"Compute softmax probability over raw logits.\"\"\"\n if not scores:\n return []\n\n max_score = None\n for score in scores:\n if max_score is None or score > max_score:\n max_score = score\n\n exp_scores = []\n total_sum = 0.0\n for score in scores:\n x = math.exp(score - max_score)\n exp_scores.append(x)\n total_sum += x\n\n probs = []\n for score in exp_scores:\n probs.append(score / total_sum)\n return probs\n","repo_name":"lingyongyan/docds","sub_path":"utils_nyt.py","file_name":"utils_nyt.py","file_ext":"py","file_size_in_byte":27861,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"30"}
+{"seq_id":"42505968331","text":"import socket\n\ndef main():\n #1.买个手机(创建套接字)\n tcp_server_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n\n #2.插上手机卡(绑定本地信息)\n tcp_server_socket.bind((\"\",7891))\n\n #3.将手机设置成正常的响铃模式(让默认的套接字由主动变成被动)\n tcp_server_socket.listen(128)\n #4.等待别人的电话到来(等待客户端的链接 accept)\n #accept返回元组 左边两个变量,右边一个元组 即元组拆包\n #如果有新的客户端来链接服务器,那么就产生一个新的套接字\n #专门为这个客服端服务\n #client_socket用来为这个客服端服务\n #tcp_server_socket就可以省下来专门等待其他新客服端的链接\n #clientAddr :客户的地址\n #tcp_server_socket负责监听,client_socket负责通信\n #第一个while true是循环等待不同客服端\n #第二个while true是为一个顾客服务\n while True:\n print('----等待一个新的客服端到来----')\n client_socket,clientAddr = tcp_server_socket.accept()\n print('----一个新的客服端已经到来%s----' % str(clientAddr))\n while True:\n #服务端先收,客服端先发\n #接收客服端发送过来的请求\n #recv_data是数据\n #recv默认会堵塞\n recv_data = client_socket.recv(1024)\n print(\"客服端收到的请求是:%s\" % recv_data.decode(\"gbk\"))\n #如果recv解堵塞,那么有两种方式:\n #1.客户端发送过来数据\n #2.客户端调用close导致\n if recv_data:\n #回送一部分数据给客户端\n client_socket.send('服务端已处理'.encode('gbk'))\n #if如果是none和false不成立 以及数据为空\n else:\n break\n #关闭套接字\n client_socket.close()\n print(\"已经为此客户服务完毕\")\n\n tcp_server_socket.close()\n\nif __name__ == \"__main__\":\n main()","repo_name":"wang-mumu/network-com","sub_path":"tcp/tcp-server 多个客户端.py","file_name":"tcp-server 多个客户端.py","file_ext":"py","file_size_in_byte":2216,"program_lang":"python","lang":"zh","doc_type":"code","stars":25,"dataset":"github-code","pt":"30"}
+{"seq_id":"29874500357","text":"import streamlit as st\nimport pandas as pd\nimport numpy as np\nimport pydeck as pdk\nimport plotly.express as px\nimport fastf1\nimport matplotlib.pyplot as plt\nimport fastf1.plotting\n\n\n##### Function Definitions ####\n\n@st.cache(persist=True)\ndef load_data(select_year, select_race, select_session):\n\n selected_session = fastf1.get_session(select_year, select_race, select_session)\n\n return selected_session\n\n\n\n##### Fast F1 Setup ####\n\n# enable some matplotlib patches for plotting timedelta values and load\n# FastF1's default color scheme\nfastf1.plotting.setup_mpl()\n\nst.set_page_config(page_title=\"Formula 1 Telemetry Analysis\", page_icon='🚓',\n layout='centered', initial_sidebar_state='collapsed')\nst.title(\"F1 Telemetry Analysis\")\nst.markdown(\n \"This application is a Streamlit dashboard that can be used to analyze F1 Telemetry Data\")\n\n\n# select necessary details for each race\nst.header(\"Select Year\")\nvalues_year = [2020, 2021, 2022]\ndefault_ix_year = values_year.index(2022)\nselect_year = st.selectbox('Championship Year', values_year, index=default_ix_year)\nst.write(select_year)\n\n\nst.header(\"Select Race Place\")\nvalues_place = ['Bahrain', 'Saudi Arabia']\ndefault_ix_race = values_place.index('Bahrain')\nselect_race = st.selectbox('Race Places', values_place, index=default_ix_race)\nst.write(select_race)\n\nst.header(\"Select Session\")\nvalues_session = ['Q', 'R']\ndefault_ix_session = values_session.index('Q')\nselect_session = st.selectbox('Sessions', values_session, index=default_ix_session)\nst.write(select_session)\n\nst.header(\"Select Pilots\")\nvalues_pilots = ['LEC', 'PER']\n#default_ix_pilots = values_pilots.index('LEC', 'PER')\nselect_pilots = st.multiselect('Sessions', values_pilots)\nst.write(select_pilots)\n\n\n\n# load a session and its telemetry data\nwith st.spinner(\"Loading data...\"):\n selected_session = load_data(select_year, select_race, select_session)\n st.write(selected_session)\n\n\n\n\n### with st.spinner(\"Loading data...\"):\n### selected_session = load_data(2022, 2, 'Q')\n\n\n\n##### let's plot something\nsession=selected_session\nfastf1.plotting.setup_mpl()\nsession.load()\n\nper_lap = session.laps.pick_driver('PER').pick_fastest()\nlec_lap = session.laps.pick_driver('LEC').pick_fastest()\n\nper_tel = per_lap.get_car_data().add_distance()\nlec_tel = lec_lap.get_car_data().add_distance()\n\nrbr_color = fastf1.plotting.team_color('RBR')\nfer_color = fastf1.plotting.team_color('FER')\n\n###\nfig1, ax = plt.subplots()\nax.plot(per_tel['Time'], per_tel['Speed'], color=rbr_color, label='PER')\nax.plot(lec_tel['Time'], lec_tel['Speed'], color=fer_color, label='LEC')\n\nax.set_xlabel('Time in sec')\nax.set_ylabel('Speed in km/h')\n\nax.legend()\nplt.suptitle(f\"Fastest Lap Speed Comparison \\n \"\n f\"{session.event['EventName']} {session.event.year} Qualifying\")\n\nst.write(fig1)\n\n\n###\nfig2, ax2 = plt.subplots()\nax2.plot(per_lap.get_telemetry().Time, per_lap.get_telemetry().Throttle, color=rbr_color, label='PER')\nax2.plot(lec_lap.get_telemetry().Time, lec_lap.get_telemetry().Throttle, color=fer_color, label='LEC')\n\nax2.set_xlabel('Time in sec')\nax2.set_ylabel('Throttle Percent %')\n\nax2.legend()\nplt.suptitle(f\"Fastest Lap Throttle Comparison \\n \"\n f\"{session.event['EventName']} {session.event.year} Qualifying\")\nst.write(fig2)\n\n\n###\nfig3, ax3 = plt.subplots()\nax3.plot(per_lap.get_telemetry().Time, per_lap.get_telemetry().nGear, color=rbr_color, label='PER')\nax3.plot(lec_lap.get_telemetry().Time, lec_lap.get_telemetry().nGear, color=fer_color, label='LEC')\n\nax3.set_xlabel('Time in sec')\nax3.set_ylabel('Gear')\n\nax3.legend()\nplt.suptitle(f\"Fastest Lap Gear Shift Comparison \\n \"\n f\"{session.event['EventName']} {session.event.year} Qualifying\")\nst.write(fig3)","repo_name":"Aerospacerr/Formula1-Telemetry-Data-Analysis","sub_path":"f1_app.py","file_name":"f1_app.py","file_ext":"py","file_size_in_byte":3739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"41056539916","text":"from django.db import models\n\n\nfrom .gamers import Gamer\nfrom .games import Games\nfrom levelupapi.models import eventstatus\n\nclass Events(models.Model):\n \n title = models.CharField(max_length=50, default = \"Unnamed Event\")\n description = models.CharField(max_length=500, default = \"No Description Given\")\n date = models.DateTimeField(auto_now_add=False)\n time = models.TimeField(auto_now_add=False)\n game = models.ForeignKey(Games,\n on_delete=models.SET_DEFAULT, default=1)\n creator = models.ForeignKey(Gamer,\n on_delete=models.SET_DEFAULT, default=1)\n attendees = models.ManyToManyField(Gamer, related_name=\"attending\")\n eventstatus = models.ForeignKey(\"levelupapi.eventstatus\",\n on_delete=models.SET_DEFAULT, default=1)\n \n @property\n def joined(self):\n return self.__joined\n\n @joined.setter\n def joined(self, value):\n self.__joined = value","repo_name":"EugeneTerry/levelup","sub_path":"levelupapi/models/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"70640572884","text":"import json\nfrom datetime import datetime as dt\nfrom datetime import timedelta as td\nimport re\n\nimport scrapy\nimport dateparser\n\nfrom api.app.models.enums import Sport\nfrom crawling.items import Match, Bet\nfrom crawling.enums import Bookmakers\n\n\n\nclass CodereSpider(scrapy.Spider):\n\n # Attributes\n name = \"codere\"\n rotate_user_agent = True\n main_url = 'https://m.apuestas.codere.es/csbgonline/home/GetSports?languageCode=es'\n sports = ['Baloncesto', 'Tenis', 'Voleibol']\n SPORTS_MAP = {\"volleyball\": Sport.VOLLEYBALL, \"basketball\": Sport.BASKETBALL,\n \"tennis\": Sport.TENNIS}\n\n def start_requests(self):\n yield scrapy.Request(url=self.main_url, callback=self.parse)\n\n def parse(self, response):\n \"\"\"Parse all the sports in sports list.\"\"\"\n docs = json.loads(response.body_as_unicode())\n sport_docs = [doc for doc in docs if doc['Name'] in self.sports]\n for sport in sport_docs:\n yield self._getEvents(response, sport['NodeId'], self.parse_sport)\n\n def parse_sport(self, response):\n \"\"\" Parse all the tournament in the sport\"\"\"\n tournament_docs = json.loads(response.body_as_unicode())\n for tournament in tournament_docs:\n yield self._getEvents(response, tournament['ParentNodeId'], self.parse_matches)\n\n def parse_matches(self, response):\n matches = json.loads(response.body_as_unicode())\n for match in matches:\n for game in match['Games']:\n if game['Name'].lower() == 'ganador del partido':\n results = game['Results']\n match_item = Match()\n match_item['team_1'] = results[0]['Name']\n match_item['team_2'] = results[1]['Name']\n bet = Bet()\n bet[\"bookmaker\"] = Bookmakers.CODERE.value\n bet[\"feed\"] = self.name\n bet['date_extracted'] = dt.now()\n bet[\"url\"] = response.url\n bet[\"odds\"] = {\"1\": results[0]['Odd'], \"2\": results[1]['Odd']}\n match_item[\"bets\"] = [bet]\n match_item['sport'] = self.SPORTS_MAP[match['SportHandle']].value\n match_item['tournament'] = match['LeagueName']\n # Get Date\n try:\n match_item['date'] = dateparser.parse((re.search('(-?\\d+)', match['StarDate']).group(0)))\n except AttributeError:\n continue\n yield match_item\n\n def _getEvents(self, response, node_id, callback):\n url = 'https://m.apuestas.codere.es/csbgonline/home/GetEvents?parentid={}'.format(node_id)\n return response.follow(url, callback)\n\n\n def _get_datetime(self, date_str, hour_str):\n year_str = str(dt.now().year)\n datetime = dateparser.parse(' '.join([hour_str, date_str, year_str]))\n if (datetime - dt.now()) < td(days=-1):\n datetime.year += 1\n return datetime\n\n\n\n\n\n","repo_name":"franloza/apiestas","sub_path":"crawling/spiders/codere.py","file_name":"codere.py","file_ext":"py","file_size_in_byte":3047,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"30"}
+{"seq_id":"24158885358","text":"x = int(input())\r\ny = int(input())\r\nif x in (1, 8):\r\n if y in (1, 8):\r\n print('3')\r\n elif y != x:\r\n print('5')\r\nelif y in (1, 8):\r\n print('5')\r\nelse: \r\n print('8')\r\n","repo_name":"firstzasz/Practice","sub_path":"chase_king.py","file_name":"chase_king.py","file_ext":"py","file_size_in_byte":191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"459094888","text":"\"\"\"\nCreated on Jan 13, 2018\n\n@author: nhan.nguyen\n\nVerify that user cannot get verkey in local of an unknown did.\n\"\"\"\n\nimport pytest\nfrom indy import did\nfrom indy.error import ErrorCode\nfrom utilities import utils, common, constant\nfrom test_scripts.functional_tests.did.signus_test_base \\\n import DidTestBase\n\n\nclass TestGetKeyForLocalDidWithUnknownDid(DidTestBase):\n @pytest.mark.asyncio\n async def test(self):\n # 1. Create wallet.\n # 2. Open wallet.\n self.wallet_handle = await \\\n common.create_and_open_wallet_for_steps(self.steps,\n self.wallet_name,\n self.pool_name,\n credentials=self.wallet_credentials)\n\n # 3. Get local verkey with unknown did and\n # verify that verkey cannot be gotten.\n self.steps.add_step(\"Get local verkey with unknown did and \"\n \"verify that verkey cannot be gotten\")\n err_code = ErrorCode.WalletItemNotFound\n await utils.perform_with_expected_code(self.steps,\n did.key_for_local_did,\n self.wallet_handle,\n constant.did_my1,\n expected_code=err_code)\n","repo_name":"hyperledger-archives/indy-post-install-automation","sub_path":"test_scripts/functional_tests/did/signus_key_for_local_did_fails_with_unknown_did_test.py","file_name":"signus_key_for_local_did_fails_with_unknown_did_test.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"30"}
+{"seq_id":"43114422284","text":"from typing import Callable, Optional\nfrom src.part1_practice.constellation_usecase import ConstellationUseCase\nfrom src.part1_practice.constellation import ConstellationSpecification\n\n\ndef main():\n specification = ConstellationSpecification()\n usecase = ConstellationUseCase(__input_loader, specification)\n try:\n month = usecase.load_month_of_birthday()\n day = usecase.load_day_of_birthday()\n constellation = usecase.find(month, day)\n print('your constellation is : ' + constellation)\n except ValueError as e:\n print(e)\n\n\ndef __input_loader(message):\n return input(message)\n","repo_name":"eno314/BrainTrainingPuzzles","sub_path":"src/part1_practice/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"74346447766","text":"#!/usr/bin/env python\n\"\"\"\nTest the DAQ log client\n\"\"\"\n\nimport os\nimport tempfile\nimport time\nimport unittest\nfrom DAQLog import FileAppender\n\n\nclass TestDAQLogClient(unittest.TestCase):\n \"Test the DAQ log client\"\n\n DIR_PATH = None\n\n @classmethod\n def read_log(cls, log_path):\n \"Return a list of text lines from 'log_path'\"\n lines = []\n with open(log_path, 'r') as fin:\n for line in fin:\n lines.append(line.rstrip())\n return lines\n\n def setUp(self):\n self.collector = None\n\n TestDAQLogClient.DIR_PATH = tempfile.mkdtemp()\n\n def tearDown(self):\n if self.collector is not None:\n self.collector.close()\n\n time.sleep(0.1)\n\n for root, dirs, files in os.walk(TestDAQLogClient.DIR_PATH,\n topdown=False):\n for name in files:\n os.remove(os.path.join(root, name))\n for name in dirs:\n os.rmdir(os.path.join(root, name))\n\n os.rmdir(TestDAQLogClient.DIR_PATH)\n TestDAQLogClient.DIR_PATH = None\n\n def test_daq_log_client(self):\n \"Test FileAppender\"\n log_name = 'foo'\n log_path = os.path.join(TestDAQLogClient.DIR_PATH, \"dash.log\")\n\n self.collector = FileAppender(log_name, log_path)\n\n self.assertTrue(os.path.exists(log_path), 'Log file was not created')\n\n msg = 'Test msg'\n\n self.collector.write(msg)\n\n self.collector.close()\n\n lines = self.read_log(log_path)\n self.assertEqual(1, len(lines), 'Expected 1 line, not %d' % len(lines))\n\n prefix = log_name + ' ['\n\n line = lines[0].rstrip()\n self.assertTrue(line.startswith(prefix),\n 'Log entry \"%s\" should start with \"%s\"' %\n (line, prefix))\n self.assertTrue(line.endswith('] ' + msg),\n 'Log entry \"%s\" should start with \"%s\"' %\n (line, '] ' + msg))\n\n def test_daq_log_client_bad_path(self):\n \"Test FileAppender bad path handling\"\n log_name = 'foo'\n bad_path = os.path.join('a', 'bad', 'path')\n while os.path.exists(bad_path):\n bad_path = os.path.join(bad_path, 'x')\n\n self.assertRaises(Exception, FileAppender, log_name, bad_path)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"dglo/dash","sub_path":"DAQLogClientTest.py","file_name":"DAQLogClientTest.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"35109490606","text":"def solution(dirs):\n visited = set()\n last = [0, 0]\n\n for c in dirs:\n old_last = list(last) # deep copy\n if c == \"U\" and last[1] < 5: last[1] += 1\n elif c == \"D\" and last[1] > -5: last[1] -= 1\n elif c == \"R\" and last[0] < 5: last[0] += 1\n elif c == \"L\" and last[0] > -5: last[0] -= 1\n else: print(c, \"Invalid\")\n # 생각이 약간 꼬였는데, 움직였을 때만 이동한 내역에 추가.\n if old_last != last: visited.add(str(sorted([old_last, last])))\n print(visited)\n return len(visited)\n\nprint(solution(\"ULURRDLLU\"))\nprint(solution(\"LULLLLLLU\"))\nprint(solution(\"LRLR\"))","repo_name":"junglesub/hgu_21summer_pps","sub_path":"week1/5-9_유정섭_20210709.py","file_name":"5-9_유정섭_20210709.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"9195023215","text":"from __future__ import annotations\n\nfrom .notifiers import Notifier\nfrom .repositories import JourneyRepository\nfrom datetime import datetime\n\nclass StartJourney:\n def __init__(self, repository: JourneyRepository, notifier: Notifier):\n self.repository = repository\n self.notifier = notifier\n\n def set_params(self, data: dict) -> StartJourney:\n self.data = data\n return self\n\n def execute(self) -> None:\n car = self.repository.get_or_create_car()\n vehicle = self.repository.create_vehicle(vehicle_type=car, **self.data)\n if not vehicle.can_start():\n raise StartJourney.CantStart(\"vehicle can't start\")\n\n journey = self.repository.create_journey(vehicle)\n self.notifier.send_notifications(journey)\n return journey\n\n class CantStart(Exception):\n pass\n\nclass StopJourney:\n def __init__(self, repository: JourneyRepository, notifier: Notifier):\n self.repository = repository\n self.notifier = notifier\n\n def set_params(self, data: dict) -> StopJourney:\n self.data = data\n return self\n\n def execute(self) -> None:\n print(self.data[\"date\"])\n DateBegin = datetime.strptime(self.data[\"date\"], '%d-%m-%Y')\n print(DateBegin)\n self.data.pop('date')\n car = self.repository.get_or_create_car()\n car.save()\n vehicle = self.repository.create_vehicle(vehicle_type=car, **self.data)\n vehicle.save()\n if not vehicle.can_start():\n raise StopJourney.CantStart(\"vehicle can't start\")\n journey_start = self.repository.create_journeyDate(vehicle=vehicle, StartDate=DateBegin)\n journey_start.save()\n return journey_start\n\n class CantStart(Exception):\n pass\n","repo_name":"galigaribaldi/testProof","sub_path":"adventure/usecases.py","file_name":"usecases.py","file_ext":"py","file_size_in_byte":1773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"21094205863","text":"import numpy as np\nimport torch\nfrom torch.autograd import Variable\nfrom collections import Counter\nfrom bilstmTrain import SEPARATOR, MAX_WORD_LEN, REP\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n# Dorin Keshales\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\n# Reading the data from the requested file.\ndef read_data(file_name):\n sentences, tags, prefixes, suffixes = [], [], [], []\n\n with open(file_name, \"r\", encoding=\"utf-8\") as file:\n data = file.readlines()\n sentence, sentence_tags, sentence_prefixes, sentence_suffixes = [], [], [], []\n\n # For each line in the file.\n for line in data:\n\n # As long as the sentence isn't over.\n if line is not '\\n':\n\n word, tag = line.strip().split(SEPARATOR)\n sentence.append(word)\n sentence_tags.append(tag)\n\n # For each word save it's prefix and suffix.\n sentence_prefixes.append(word[:3])\n sentence_suffixes.append(word[-3:])\n\n else: # Otherwise.\n sentences.append(sentence)\n prefixes.append(sentence_prefixes)\n suffixes.append(sentence_suffixes)\n tags.append(sentence_tags)\n sentence, sentence_tags, sentence_prefixes, sentence_suffixes = [], [], [], []\n\n return sentences, prefixes, suffixes, tags\n\n\n# Considerate rare words like if the were unknown words in order to train the corresponding embedding vector.\ndef convert_rare_words_to_unknown_token(data, num_occurrences=1, unknown_token=''):\n count = Counter()\n convert_to_unk = set()\n\n # Count the number of occurrences of each word in the training set.\n for sentence in data:\n count.update(sentence)\n\n # Collect the words in the training set that appear only once.\n for word, amount in count.items():\n if amount <= num_occurrences:\n convert_to_unk.add(word)\n\n # Go over each sentence in the training set.\n for sentence in data:\n\n # For each word in the sentence\n for i in range(len(sentence)):\n\n # If the current word appears only once then considerate it as unknown word.\n if sentence[i] in convert_to_unk:\n sentence[i] = unknown_token\n\n # Return the updated training set data.\n return data\n\n\n# Making a words vocabulary where each word has a unique index.\ndef create_words_vocabulary(data, unknown_token='', pad_token=''):\n vocab_words = set()\n\n # Go over each sentence in the data set.\n for sentence in data:\n\n # For each word in the sentence\n for word in sentence:\n vocab_words.add(word)\n\n vocab_words.remove(unknown_token)\n vocab_words = sorted(vocab_words)\n\n # Add the unknown_token.\n vocab_words = [pad_token, unknown_token] + vocab_words\n\n # Map each word to a unique index.\n word_to_ix = {word: i for i, word in enumerate(vocab_words)}\n ix_to_word = {i: word for i, word in enumerate(vocab_words)}\n\n return word_to_ix, ix_to_word\n\n\n# Making a chars vocabulary where each char has a unique index.\ndef create_chars_vocabulary(data, unknown_token='', pad_token=''):\n vocab_chars = set()\n\n # Go over each sentence in the data set.\n for sentence in data:\n\n # For each word in the sentence\n for word in sentence:\n\n # For each char in the word\n for ch in word:\n vocab_chars.add(ch)\n\n vocab_chars = sorted(vocab_chars)\n\n # Add the unknown_token.\n vocab_chars = [pad_token, unknown_token] + vocab_chars\n\n # Map each word to a unique index.\n char_to_ix = {char: i for i, char in enumerate(vocab_chars)}\n ix_to_char = {i: char for i, char in enumerate(vocab_chars)}\n\n return char_to_ix, ix_to_char\n\n\n# Making a tags vocabulary where each tag has a unique index.\ndef create_tags_vocabulary(tags, pad_token=''):\n vocab_tags = set()\n\n # Go over each sentence in the data set.\n for sentence_tags in tags:\n\n # For each tag which belongs to a word in the sentence\n for tag in sentence_tags:\n vocab_tags.add(tag)\n\n vocab_tags = sorted(vocab_tags)\n\n vocab_tags = [pad_token] + vocab_tags\n\n # Map each tag to a unique index.\n tag_to_ix = {tag: i for i, tag in enumerate(vocab_tags)}\n ix_to_tag = {i: tag for i, tag in enumerate(vocab_tags)}\n\n return tag_to_ix, ix_to_tag\n\n\n# Replace each word in the data set with its corresponding index.\ndef convert_data_to_indexes(data, vocab, unknown_token=''):\n sentences, words_indexes = [], []\n\n # Go over each sentence in the training set.\n for sentence in data:\n\n # For each word in the sentence\n for word in sentence:\n\n # Find its corresponding index - if not exist then assign the index of the unknown_token.\n ix = vocab.get(word) if word in vocab else vocab.get(unknown_token)\n words_indexes.append(ix)\n\n # Keep the words in the data set in sentences order.\n sentences.append(words_indexes)\n words_indexes = []\n\n # Return the updated data\n return sentences\n\n\n# Replace each char in the data set with its corresponding index.\ndef convert_chars_to_indexes(data, vocab, unknown_token=''):\n sentences, words_indexes, char_indexes = [], [], []\n\n # Go over each sentence in the training set.\n for sentence in data:\n\n # For each word in the sentence\n for word in sentence:\n\n for ch in word:\n # Find its corresponding index - if not exist then assign the index of the unknown_token.\n ix = vocab.get(ch) if ch in vocab else vocab.get(unknown_token)\n char_indexes.append(ix)\n\n words_indexes.append(char_indexes)\n char_indexes = []\n\n # Keep the chars in the data set in sentences order.\n sentences.append(words_indexes)\n words_indexes = []\n\n # Return the updated data\n return sentences\n\n\n# Replace each tag of a word in the data set with its corresponding index.\ndef convert_tags_to_indexes(tags, vocab):\n sentences, tags_indexes = [], []\n\n # Go over each sentence in the training set.\n for sentence in tags:\n\n # For each tag of a word in the sentence\n for tag_of_word in sentence:\n\n # Find its corresponding index\n ix = vocab.get(tag_of_word)\n tags_indexes.append(ix)\n\n # Keep the tags in the data set in sentences order.\n sentences.append(tags_indexes)\n tags_indexes = []\n\n # Return the updated tags data\n return sentences\n\n\ndef pad_sentences(sentences):\n ordered = sorted(sentences, key=len, reverse=True)\n\n # Get the length of each sentence\n lengths = [len(sentence) for sentence in ordered]\n longest_sent = max(lengths)\n\n # Create an empty matrix with padding tokens\n features = np.zeros((len(ordered), longest_sent), dtype=int)\n\n for ii, review in enumerate(ordered):\n if len(review) != 0:\n features[ii, :len(review)] = np.array(review)\n\n return features, lengths\n\n\ndef pad_sentences_and_words(sentences):\n ordered = sorted(sentences, key=len, reverse=True)\n\n # Get the length of each sentence\n lengths = [len(sentence) for sentence in ordered]\n longest_sent = max(lengths)\n longest_word = MAX_WORD_LEN\n\n # Create an empty matrix with padding tokens\n features = np.zeros((len(ordered), longest_sent, longest_word), dtype=int)\n\n for ii, sentence in enumerate(ordered):\n for idx, word in enumerate(sentence):\n if len(word) != 0:\n features[ii, idx, :len(word)] = np.array(word)\n\n return features, lengths\n\n\n# Preparing the data for the forward pass\ndef forward_passing(model, x_words, x_chars, pref_x, suf_x, y, lengths, data_set_lengths):\n\n if REP == 'a':\n if torch.cuda.is_available():\n x_words = x_words.cuda()\n y = y.cuda()\n\n # Get the input ready for the model.\n x_words = Variable(x_words)\n\n # Forward pass.\n return model(lengths, max(data_set_lengths), x_words), y\n\n elif REP == 'b':\n\n if torch.cuda.is_available():\n x_chars = x_chars.cuda()\n y = y.cuda()\n\n # Get the input ready for the model.\n x_chars = Variable(x_chars)\n\n # Forward pass.\n return model(lengths, max(data_set_lengths), xC=x_chars), y\n\n elif REP == 'c':\n\n if torch.cuda.is_available():\n x_words = x_words.cuda()\n pref_x = pref_x.cuda()\n suf_x = suf_x.cuda()\n y = y.cuda()\n\n # Get the input ready for the model.\n x_words, pref_x, suf_x = Variable(torch.LongTensor(x_words)), Variable(torch.LongTensor(pref_x)), Variable(\n torch.LongTensor(suf_x))\n\n # Forward pass.\n return model(lengths, max(data_set_lengths), xW=x_words, prefixes=pref_x, suffixes=suf_x), y\n\n elif REP == 'd':\n\n if torch.cuda.is_available():\n x_words = x_words.cuda()\n x_chars = x_chars.cuda()\n y = y.cuda()\n\n # Get the input ready for the model.\n x_words, x_chars = Variable(x_words), Variable(x_chars)\n\n # Forward pass.\n return model(lengths, max(data_set_lengths), x_words, x_chars), y\n\n else:\n raise ValueError(\"Wrong representation was entered\")\n","repo_name":"DorinK/RNN-Acceptors-and-BiRNN-Transducers","sub_path":"Part 3 - BiLSTM Tagger/bilstmTrain_utils.py","file_name":"bilstmTrain_utils.py","file_ext":"py","file_size_in_byte":9352,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"}
+{"seq_id":"36121315360","text":"import sys\n\nimport gi\n\ngi.require_version(\"Gtk\", \"3.0\")\nfrom gi.repository import Gio, Gtk\n\n# This would typically be its own file\nMENU_XML = \"\"\"\n\n\n \n\n\"\"\"\n\n\nclass AppWindow(Gtk.ApplicationWindow):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n outerbox = Gtk.Box(spacing=6, orientation=Gtk.Orientation.VERTICAL)\n self.add(outerbox)\n outerbox.show()\n\n builder = Gtk.Builder.new_from_string(MENU_XML, -1)\n menu = builder.get_object(\"app-menu\")\n\n button = Gtk.MenuButton.new()\n popover = Gtk.Popover.new_from_model(button, menu)\n button.set_popover(popover)\n\n outerbox.pack_start(button, False, True, 0)\n button.show()\n self.set_border_width(50)\n\n\nclass Application(Gtk.Application):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, application_id=\"org.example.myapp\", **kwargs)\n self.window = None\n\n def do_startup(self):\n Gtk.Application.do_startup(self)\n\n action = Gio.SimpleAction.new(\"about\", None)\n action.connect(\"activate\", self.on_about)\n self.add_action(action)\n\n action = Gio.SimpleAction.new(\"quit\", None)\n action.connect(\"activate\", self.on_quit)\n self.add_action(action)\n\n def do_activate(self):\n # We only allow a single window and raise any existing ones\n if not self.window:\n # Windows are associated with the application\n # when the last one is closed the application shuts down\n self.window = AppWindow(application=self, title=\"Main Window\")\n\n self.window.present()\n\n def on_about(self, action, param):\n about_dialog = Gtk.AboutDialog(transient_for=self.window, modal=True)\n about_dialog.present()\n\n def on_quit(self, action, param):\n self.quit()\n\n\nif __name__ == \"__main__\":\n app = Application()\n app.run(sys.argv)\n","repo_name":"bjtj/tjsamples","sub_path":"gtk/python/popovers/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":2327,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"30"}
+{"seq_id":"74085346004","text":"from django import forms\n\nfrom sales_products.models.sales import SellProduct\n\n\nclass SalesForm(forms.ModelForm):\n class Meta:\n model = SellProduct\n fields = [\n 'sold_by',\n 'cart_product',\n #'quantity',\n # 'unit_price',\n # 'amount',\n #'order_status'\n ]\n\n","repo_name":"By-Lucas/Sales-and-invetory-system-Django","sub_path":"apps/sales_products/forms/sales_form.py","file_name":"sales_form.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"30"}
+{"seq_id":"11969846784","text":"import telebot\nimport os\nfrom dotenv import load_dotenv\n\nfrom sed import Sed\n\nload_dotenv()\nbot = telebot.TeleBot(os.getenv('API_TOKEN'))\n\n@bot.message_handler(commands=['start'])\ndef welcome(message: telebot.types.Message) -> None:\n bot.send_message(message.chat.id, \"Hi, I am Jeaciaz's personal bot. To learn more about what I can do, type /help.\")\n\n@bot.message_handler(commands=['help'])\ndef help(message: telebot.types.Message) -> None:\n bot.reply_to(message, \"\"\"\\\nHi there, I am Jeaciaz's personal bot for stuff.\n\nRight now I only do message editing, to do so reply to a message with 's/thing/replacement/g', and I will replace the \"thing\" in message replied to with \"replacement\" (the \"g\" stands as a flag for \"replace every occurence instead of the first one\"). I also support Python RegExp syntax, and you can use the regex flags after the last slash.\n\"\"\")\n\n@bot.message_handler(func=lambda message: Sed.is_valid(message))\ndef echo_message(message: telebot.types.Message) -> None:\n bot.reply_to(message, Sed(message).calc())\n\nbot.infinity_polling()\n","repo_name":"Jeaciaz/jeabot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"22433145158","text":"\"\"\"Python micro framework for building nature-inspired algorithms.\"\"\"\n\nfrom __future__ import print_function # for backward compatibility purpose\n\nimport os\nimport logging\nimport json\nimport datetime\nimport xlsxwriter\nimport numpy as np\nfrom NiaPy import algorithms, benchmarks\n\n__all__ = ['algorithms', 'benchmarks']\n__project__ = 'NiaPy'\n__version__ = '1.0.2'\n\nVERSION = \"{0} v{1}\".format(__project__, __version__)\n\nlogging.basicConfig()\nlogger = logging.getLogger('NiaPy')\nlogger.setLevel('INFO')\n\n\nclass Runner:\n r\"\"\"Runner utility feature.\n\n Feature which enables running multiple algorithms with multiple benchmarks.\n It also support exporting results in various formats (e.g. LaTeX, Excel, JSON)\n\n \"\"\"\n\n def __init__(self, D, NP, nFES, nRuns, useAlgorithms, useBenchmarks, A=0.5, r=0.5,\n Qmin=0.0, Qmax=2.0, Pa=0.25, F=0.5, CR=0.9, alpha=0.5, betamin=0.2, gamma=1.0,\n p=0.5, Ts=4, Mr=0.05, C1=2.0, C2=2.0, w=0.7, vMin=-4, vMax=4, Tao=0.1):\n r\"\"\"Initialize Runner.\n\n **__init__(self, D, NP, nFES, nRuns, useAlgorithms, useBenchmarks, ...)**\n\n Arguments:\n D {integer} -- dimension of problem\n\n NP {integer} -- population size\n\n nFES {integer} -- number of function evaluations\n\n nRuns {integer} -- number of repetitions\n\n useAlgorithms [] -- array of algorithms to run\n\n useBenchmarks [] -- array of benchmarks to run\n\n A {decimal} -- laudness\n\n r {decimal} -- pulse rate\n\n Qmin {decimal} -- minimum frequency\n\n Qmax {decimal} -- maximum frequency\n\n Pa {decimal} -- probability\n\n F {decimal} -- scalling factor\n\n CR {decimal} -- crossover rate\n\n alpha {decimal} -- alpha parameter\n\n betamin {decimal} -- betamin parameter\n\n gamma {decimal} -- gamma parameter\n\n p {decimal} -- probability switch\n\n Ts {decimal} -- tournament selection\n\n Mr {decimal} -- mutation rate\n\n C1 {decimal} -- cognitive component\n\n C2 {decimal} -- social component\n\n w {decimal} -- inertia weight\n\n vMin {decimal} -- minimal velocity\n\n vMax {decimal} -- maximal velocity\n\n Tao {decimal}\n\n \"\"\"\n\n self.D = D\n self.NP = NP\n self.nFES = nFES\n self.nRuns = nRuns\n self.useAlgorithms = useAlgorithms\n self.useBenchmarks = useBenchmarks\n self.A = A\n self.r = r\n self.Qmin = Qmin\n self.Qmax = Qmax\n self.Pa = Pa\n self.F = F\n self.CR = CR\n self.alpha = alpha\n self.betamin = betamin\n self.gamma = gamma\n self.p = p\n self.Ts = Ts\n self.Mr = Mr\n self.C1 = C1\n self.C2 = C2\n self.w = w\n self.vMin = vMin\n self.vMax = vMax\n self.Tao = Tao\n self.results = {}\n\n def __algorithmFactory(self, name, benchmark):\n bench = benchmarks.utility.Utility().get_benchmark(benchmark)\n algorithm = None\n\n if name == 'BatAlgorithm':\n algorithm = algorithms.basic.BatAlgorithm(\n self.D, self.NP, self.nFES, self.A, self.r, self.Qmin, self.Qmax, bench)\n elif name == 'DifferentialEvolutionAlgorithm':\n algorithm = algorithms.basic.DifferentialEvolutionAlgorithm(\n self.D, self.NP, self.nFES, self.F, self.CR, bench)\n elif name == 'FireflyAlgorithm':\n algorithm = algorithms.basic.FireflyAlgorithm(\n self.D, self.NP, self.nFES, self.alpha, self.betamin, self.gamma, bench)\n elif name == 'FlowerPollinationAlgorithm':\n algorithm = algorithms.basic.FlowerPollinationAlgorithm(\n self.D, self.NP, self.nFES, self.p, bench)\n elif name == 'GreyWolfOptimizer':\n algorithm = algorithms.basic.GreyWolfOptimizer(\n self.D, self.NP, self.nFES, bench)\n elif name == 'ArtificialBeeColonyAlgorithm':\n algorithm = algorithms.basic.ArtificialBeeColonyAlgorithm(\n self.D, self.NP, self.nFES, bench)\n elif name == 'GeneticAlgorithm':\n algorithm = algorithms.basic.GeneticAlgorithm(\n self.D, self.NP, self.nFES, self.Ts, self.Mr, self.gamma, bench)\n elif name == 'ParticleSwarmAlgorithm':\n algorithm = algorithms.basic.ParticleSwarmAlgorithm(\n self.D, self.NP, self.nFES, self.C1, self.C2, self.w, self.vMin, self.vMax, bench)\n elif name == 'HybridBatAlgorithm':\n algorithm = algorithms.modified.HybridBatAlgorithm(\n self.D, self.NP, self.nFES, self.A, self.r, self.F, self.CR, self.Qmin, self.Qmax, bench)\n elif name == 'SelfAdaptiveDifferentialEvolutionAlgorithm':\n algorithm = algorithms.modified.SelfAdaptiveDifferentialEvolutionAlgorithm(\n self.D, self.NP, self.nFES, self.F, self.CR, self.Tao, bench)\n else:\n raise TypeError('Passed benchmark is not defined!')\n\n return algorithm\n\n @classmethod\n def __createExportDir(cls):\n if not os.path.exists('export'):\n os.makedirs('export')\n\n @classmethod\n def __generateExportName(cls, extension):\n return 'export/' + str(datetime.datetime.now()).replace(':', '.') + '.' + extension\n\n def __exportToLog(self):\n print(self.results)\n\n def __exportToJson(self):\n self.__createExportDir()\n with open(self.__generateExportName('json'), 'w') as outFile:\n json.dump(self.results, outFile)\n logger.info('Export to JSON completed!')\n\n def __exportToXls(self):\n self.__createExportDir()\n\n workbook = xlsxwriter.Workbook(self.__generateExportName('xlsx'))\n worksheet = workbook.add_worksheet()\n\n row = 0\n col = 0\n nRuns = 0\n\n for alg in self.results:\n worksheet.write(row, col, alg)\n col += 1\n\n for bench in self.results[alg]:\n worksheet.write(row, col, bench)\n\n nRuns = len(self.results[alg][bench])\n\n for i in range(len(self.results[alg][bench])):\n row += 1\n worksheet.write(row, col, self.results[alg][bench][i])\n\n row -= len(self.results[alg][bench]) # jump back up\n col += 1\n\n row += 1 + nRuns # jump down to row after previous results\n col -= 1 + len(self.results[alg])\n\n workbook.close()\n logger.info('Export to XLSX completed!')\n\n def __exportToLatex(self):\n self.__createExportDir()\n\n metrics = ['Best', 'Median', 'Worst', 'Mean', 'Std.']\n\n def only_upper(s):\n return \"\".join(c for c in s if c.isupper())\n\n with open(self.__generateExportName('tex'), 'a') as outFile:\n outFile.write('\\\\documentclass{article}\\n')\n outFile.write('\\\\usepackage[utf8]{inputenc}\\n')\n outFile.write('\\\\usepackage{siunitx}\\n')\n outFile.write('\\\\sisetup{\\n')\n outFile.write('round-mode=places,round-precision=3}\\n')\n outFile.write('\\\\begin{document}\\n')\n outFile.write('\\\\begin{table}[h]\\n')\n outFile.write('\\\\centering\\n')\n\n begin_tabular = '\\\\begin{tabular}{cc'\n\n for alg in self.results:\n for _i in range(len(self.results[alg])):\n begin_tabular += 'S'\n\n firstLine = ' &'\n\n for benchmark in self.results[alg].keys():\n firstLine += ' & \\\\multicolumn{1}{c}{\\\\textbf{' + \\\n benchmark + '}}'\n\n firstLine += ' \\\\\\\\'\n\n break\n\n begin_tabular += '}\\n'\n outFile.write(begin_tabular)\n outFile.write('\\\\hline\\n')\n outFile.write(firstLine + '\\n')\n outFile.write('\\\\hline\\n')\n\n for alg in self.results:\n for metric in metrics:\n line = ''\n\n if metric != 'Worst':\n line += ' & ' + metric\n else:\n shortAlg = ''\n if alg.endswith('Algorithm'):\n shortAlg = only_upper(alg[:-9])\n else:\n shortAlg = only_upper(alg)\n line += '\\\\textbf{' + shortAlg + '} & ' + metric\n\n for benchmark in self.results[alg]:\n if metric == 'Best':\n line += ' & ' + \\\n str(np.amin(self.results[alg][benchmark]))\n elif metric == 'Median':\n line += ' & ' + \\\n str(np.median(self.results[alg][benchmark]))\n elif metric == 'Worst':\n line += ' & ' + \\\n str(np.amax(self.results[alg][benchmark]))\n elif metric == 'Mean':\n line += ' & ' + \\\n str(np.mean(self.results[alg][benchmark]))\n else:\n line += ' & ' + \\\n str(np.std(self.results[alg][benchmark]))\n\n line += ' \\\\\\\\'\n outFile.write(line + '\\n')\n\n outFile.write('\\\\hline\\n')\n outFile.write('\\\\end{tabular}\\n')\n outFile.write('\\\\end{table}\\n')\n outFile.write('\\\\end{document}')\n\n logger.info('Export to Latex completed!')\n\n def run(self, export='log', verbose=False):\n \"\"\"Execute runner.\n\n Keyword Arguments:\n export {string} -- takes export type (e.g. log, json, xlsx, latex) (default: 'log')\n verbose {boolean} -- switch for verbose logging (default: {False})\n\n Raises:\n TypeError -- Raises TypeError if export type is not supported\n\n Returns:\n Dictionary -- Returns dictionary of results\n\n \"\"\"\n\n for alg in self.useAlgorithms:\n self.results[alg] = {}\n if verbose:\n logger.info('Running %s...', alg)\n for bench in self.useBenchmarks:\n benchName = ''\n # check if passed benchmark is class\n if not isinstance(bench, ''.__class__):\n # set class name as benchmark name\n benchName = str(type(bench).__name__)\n else:\n benchName = bench\n\n if verbose:\n logger.info(\n 'Running %s algorithm on %s benchmark...', alg, benchName)\n\n self.results[alg][benchName] = []\n\n for _i in range(self.nRuns):\n algorithm = self.__algorithmFactory(alg, bench)\n self.results[alg][benchName].append(algorithm.run())\n\n if verbose:\n logger.info(\n '---------------------------------------------------')\n\n if export == 'log':\n self.__exportToLog()\n elif export == 'json':\n self.__exportToJson()\n elif export == 'xlsx':\n self.__exportToXls()\n elif export == 'latex':\n self.__exportToLatex()\n else:\n raise TypeError('Passed export type is not supported!')\n\n return self.results\n","repo_name":"owen6789/IFB-PSO","sub_path":"NiaPy/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":11592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"5227829675","text":"import os\nfrom inputreader import aocinput\nfrom typing import List, Callable\n\n\ndef mostCommon(data: List[List[bool]]) -> List[bool]:\n summation = [0] * (len(data[0]))\n for line in data:\n for i, value in enumerate(line):\n summation[i] += value\n return [value >= len(data) / 2 for value in summation]\n\n\ndef leastCommon(data: List[List[bool]]) -> List[bool]:\n return [not value for value in mostCommon(data)]\n\n\ndef toInt(value: List[bool]) -> int:\n return int(''.join(map(str, map(int, value))), 2)\n\n\ndef listFilter(values: List[List[bool]], filtermethod: Callable[[List[List[bool]]], List[bool]]) -> List[bool]:\n index = 0\n while len(values) > 1:\n filtered = []\n filterValue = filtermethod(values)[index]\n for value in values:\n if value[index] == filterValue:\n filtered.append(value)\n values = filtered\n index += 1\n return values[0]\n\n\ndef powerConsumption(data: List[List[bool]]) -> [int, int]:\n gammarate = mostCommon(data)\n epsilonrate = [not value for value in gammarate]\n\n oxygenrating = listFilter(data.copy(), mostCommon)\n scrubberrating = listFilter(data.copy(), leastCommon)\n\n return toInt(gammarate) * toInt(epsilonrate), toInt(oxygenrating) * toInt(scrubberrating)\n\n\ndef main(day):\n data = aocinput(day)\n data = [[value == '1' for value in line.strip()] for line in data]\n result = powerConsumption(data)\n print(result)\n\n\nif __name__ == '__main__':\n main(int(os.path.basename(__file__)[3:-3]))\n","repo_name":"z4tz/adventofcode2021","sub_path":"day3.py","file_name":"day3.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"12298648005","text":"#!/usr/bin/env python3\n\nimport asyncio\nimport json\nimport stegos\n\n\ndef load_nodes(path):\n f = open(path, \"r\")\n encoded = f.read()\n return json.loads(encoded)\n\n\nasync def client_from_node(node):\n client = stegos.StegosClient(node_id=node['node_id'],\n uri=node['uri'],\n accounts=node['accounts'],\n master_key=node['key_password'],\n api_key=node['api_token'])\n\n await client.connect()\n await client.subscribe_status()\n return client\n\n\nasync def my_app(heap_node, nodes):\n node01 = await client_from_node(heap_node)\n\n my_account = list(heap['accounts'].keys())[0]\n print(\"Waiting for sync!\")\n await node01.wait_sync()\n balance = await node01.get_balance('heap')\n print(f\"Node01 balance before payments: {balance}\")\n for n in nodes:\n for id in n['accounts'].keys():\n await node01.payment_with_confirmation(my_account, n['accounts'][id], 100_000, comment=\"Initial payout\")\n\n balance = await node01.get_balance('heap')\n print(f\"Node01 balance after payments: {balance}\")\n\nif __name__ == '__main__':\n heap = {\n \"node_id\": \"heap\",\n \"accounts\": {\n \"heap\": \"7fYfUtAqcq2MyuuiaVkLmizsdSpqEXwGgYFxz9PPjVNkygo97RG\"\n },\n \"key_password\": \"joghie9fee0Eijoc\",\n \"api_token\": \"09DmvBtLXUNnnysvZVBmkg==\",\n \"uri\": \"ws://127.0.0.1:3155\",\n }\n\n nodes = load_nodes(\"sample.json\")\n loop = asyncio.get_event_loop()\n loop.run_until_complete(my_app(heap, nodes))\n","repo_name":"stegos/stegos-pycli","sub_path":"payout.py","file_name":"payout.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"6849073844","text":"# -*- coding: utf-8 -*-\n\"\"\" \n\nCreated on 19/11/18\n\nAuthor : Carlos Eduardo Barbosa\n\nRun ellipse into image to determine regions to combine spectra.\n\n\"\"\"\nfrom __future__ import print_function, division\n\nimport os\n\nimport numpy as np\nfrom astropy.io import fits\nfrom astropy.table import Table\nimport matplotlib.pyplot as plt\nfrom photutils.isophote import Ellipse\nfrom photutils.isophote.geometry import EllipseGeometry\nfrom scipy.interpolate import LSQUnivariateSpline\n\nimport context\n\ndef build_ellipse_model(shape, isolist, fill=0., high_harmonics=False):\n \"\"\"\n Adapted from photutils routine to handle nans in the outskirts of image.\n\n Build a model elliptical galaxy image from a list of isophotes.\n\n For each ellipse in the input isophote list the algorithm fills the\n output image array with the corresponding isophotal intensity.\n Pixels in the output array are in general only partially covered by\n the isophote \"pixel\". The algorithm takes care of this partial\n pixel coverage by keeping track of how much intensity was added to\n each pixel by storing the partial area information in an auxiliary\n array. The information in this array is then used to normalize the\n pixel intensities.\n\n Parameters\n ----------\n shape : 2-tuple\n The (ny, nx) shape of the array used to generate the input\n ``isolist``.\n isolist : `~photutils.isophote.IsophoteList` instance\n The isophote list created by the `~photutils.isophote.Ellipse`\n class.\n fill : float, optional\n The constant value to fill empty pixels. If an output pixel has\n no contribution from any isophote, it will be assigned this\n value. The default is 0.\n high_harmonics : bool, optional\n Whether to add the higher-order harmonics (i.e., ``a3``, ``b3``,\n ``a4``, and ``b4``; see `~photutils.isophote.Isophote` for\n details) to the result.\n\n Returns\n -------\n result : 2D `~numpy.ndarray`\n The image with the model galaxy.\n \"\"\"\n\n # the target grid is spaced in 0.1 pixel intervals so as\n # to ensure no gaps will result on the output array.\n finely_spaced_sma = np.arange(isolist[0].sma, isolist[-1].sma, 0.1)\n # interpolate ellipse parameters\n\n # End points must be discarded, but how many?\n # This seems to work so far\n idx = ~np.isnan(isolist.intens)\n nodes = isolist.sma[idx][2:-2]\n intens_array = LSQUnivariateSpline(\n isolist.sma[idx], isolist.intens[idx], nodes)(finely_spaced_sma)\n eps_array = LSQUnivariateSpline(\n isolist.sma[idx], isolist.eps[idx], nodes)(finely_spaced_sma)\n pa_array = LSQUnivariateSpline(\n isolist.sma[idx], isolist.pa[idx], nodes)(finely_spaced_sma)\n x0_array = LSQUnivariateSpline(\n isolist.sma[idx], isolist.x0[idx], nodes)(finely_spaced_sma)\n y0_array = LSQUnivariateSpline(\n isolist.sma[idx], isolist.y0[idx], nodes)(finely_spaced_sma)\n grad_array = LSQUnivariateSpline(\n isolist.sma[idx], isolist.grad[idx], nodes)(finely_spaced_sma)\n a3_array = LSQUnivariateSpline(\n isolist.sma[idx], isolist.a3[idx], nodes)(finely_spaced_sma)\n b3_array = LSQUnivariateSpline(\n isolist.sma[idx], isolist.b3[idx], nodes)(finely_spaced_sma)\n a4_array = LSQUnivariateSpline(\n isolist.sma[idx], isolist.a4[idx], nodes)(finely_spaced_sma)\n b4_array = LSQUnivariateSpline(\n isolist.sma[idx], isolist.b4[idx], nodes)(finely_spaced_sma)\n\n # Return deviations from ellipticity to their original amplitude meaning\n a3_array = -a3_array * grad_array * finely_spaced_sma\n b3_array = -b3_array * grad_array * finely_spaced_sma\n a4_array = -a4_array * grad_array * finely_spaced_sma\n b4_array = -b4_array * grad_array * finely_spaced_sma\n\n # correct deviations cased by fluctuations in spline solution\n eps_array[np.where(eps_array < 0.)] = 0.\n\n result = np.zeros(shape=shape)\n weight = np.zeros(shape=shape)\n\n eps_array[np.where(eps_array < 0.)] = 0.05\n\n # for each interpolated isophote, generate intensity values on the\n # output image array\n # for index in range(len(finely_spaced_sma)):\n for index in range(1, len(finely_spaced_sma)):\n sma0 = finely_spaced_sma[index]\n eps = eps_array[index]\n pa = pa_array[index]\n x0 = x0_array[index]\n y0 = y0_array[index]\n geometry = EllipseGeometry(x0, y0, sma0, eps, pa)\n\n intens = intens_array[index]\n\n # scan angles. Need to go a bit beyond full circle to ensure\n # full coverage.\n r = sma0\n phi = 0.\n while phi <= 2*np.pi + geometry._phi_min:\n # we might want to add the third and fourth harmonics\n # to the basic isophotal intensity.\n harm = 0.\n if high_harmonics:\n harm = (a3_array[index] * np.sin(3.*phi) +\n b3_array[index] * np.cos(3.*phi) +\n a4_array[index] * np.sin(4.*phi) +\n b4_array[index] * np.cos(4.*phi)) / 4.\n\n # get image coordinates of (r, phi) pixel\n x = r * np.cos(phi + pa) + x0\n y = r * np.sin(phi + pa) + y0\n i = int(x)\n j = int(y)\n\n if (i > 0 and i < shape[1] - 1 and j > 0 and j < shape[0] - 1):\n # get fractional deviations relative to target array\n fx = x - float(i)\n fy = y - float(j)\n\n # add up the isophote contribution to the overlapping pixels\n result[j, i] += (intens + harm) * (1. - fy) * (1. - fx)\n result[j, i + 1] += (intens + harm) * (1. - fy) * fx\n result[j + 1, i] += (intens + harm) * fy * (1. - fx)\n result[j + 1, i + 1] += (intens + harm) * fy * fx\n\n # add up the fractional area contribution to the\n # overlapping pixels\n weight[j, i] += (1. - fy) * (1. - fx)\n weight[j, i + 1] += (1. - fy) * fx\n weight[j + 1, i] += fy * (1. - fx)\n weight[j + 1, i + 1] += fy * fx\n\n # step towards next pixel on ellipse\n phi = max((phi + 0.75 / r), geometry._phi_min)\n r = max(geometry.radius(phi), 0.5)\n # if outside image boundaries, ignore.\n else:\n break\n\n # zero weight values must be set to 1.\n weight[np.where(weight <= 0.)] = 1.\n\n # normalize\n result /= weight\n\n # fill value\n result[np.where(result == 0.)] = fill\n\n return result\n\ndef run_ellipse(data, redo=False):\n \"\"\" Run ellipse fitting using NGC 3311 MUSE images. \"\"\"\n # Reading data and mask\n outfile = \"ellipse.txt\"\n if os.path.exists(outfile) and not redo:\n return\n mask = np.isnan(data)\n data[mask] = 3\n # Preparing ellipse fitting\n geometry = EllipseGeometry(x0=213, y0=235, sma=25, eps=0.1,\n pa=np.deg2rad(-50))\n geometry.find_center(data)\n ellipse = Ellipse(data, geometry)\n isolist = ellipse.fit_image(fflag=0.0, maxsma=500, maxrit=50, sclip=5.,\n nclip=2, sma0=50)\n table = isolist.to_table()[1:]\n table.write(outfile, format=\"ascii\", overwrite=True)\n # Producing image\n bmodel = build_ellipse_model(data.shape, isolist)\n bmodel[mask] = np.nan\n data[mask] = np.nan\n idx = bmodel <= 0\n bmodel[idx] = np.nan\n residual = data - bmodel\n fig, (ax1, ax2, ax3) = plt.subplots(figsize=(14, 5), nrows=1, ncols=3)\n fig.subplots_adjust(left=0.04, right=0.98, bottom=0.02, top=0.98)\n vmin = np.nanpercentile(data, 10)\n vmax = np.nanpercentile(data, 99)\n ax1.imshow(data, origin='lower', vmin=vmin, vmax=vmax)\n ax1.set_title('Data')\n\n smas = np.linspace(5, 200, 10)\n for sma in smas:\n iso = isolist.get_closest(sma)\n x, y, = iso.sampled_coordinates()\n ax1.plot(x, y, color='C1')\n vmin = np.nanpercentile(bmodel, 10)\n vmax = np.nanpercentile(bmodel, 99)\n ax2.imshow(bmodel, origin='lower', vmin=vmin, vmax=vmax)\n ax2.set_title('Ellipse Model')\n vmin = np.nanpercentile(residual, 10)\n vmax = np.nanpercentile(residual, 99)\n ax3.imshow(residual, origin='lower', vmin=vmin, vmax=vmax)\n ax3.set_title('Residual')\n plt.savefig(\"ellipse.png\", dpi=250)\n hdu1 = fits.PrimaryHDU(bmodel)\n hdu1.header[\"EXTNAME\"] = \"ELLIPMOD\"\n hdu2 = fits.ImageHDU(residual)\n hdu2.header[\"EXTNAME\"] = \"RESID\"\n hdulist = fits.HDUList([hdu1, hdu2])\n hdulist.writeto(\"ellipse_model.fits\", overwrite=True)\n plt.show()\n\nif __name__ == \"__main__\":\n field = \"fieldA\"\n wdir = os.path.join(context.home_dir, \"data\", field)\n os.chdir(wdir)\n imgdata = fits.getdata(f\"NGC3311_FieldA_IMAGE_COMBINED.fits\")\n imgdata *= 1e19\n noise = fits.getdata(f\"sn_{field}.fits\", hdu=2)\n run_ellipse(imgdata, redo=True)","repo_name":"cebarbosa/summer_project_hydra","sub_path":"ellipse_fitting.py","file_name":"ellipse_fitting.py","file_ext":"py","file_size_in_byte":8897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"6549832866","text":"produtocaro = ''\nprodutobarato = ''\nsoma = contador1000 = contador = valorbarato = 0\nwhile True:\n produto = str(input('Nome do produto: ')).strip().title()\n valor = float(input('Valor R$: '))\n soma += valor\n contador += 1\n if valor >= 1000:\n produtocaro = produto\n contador1000 += 1\n if contador == 1:\n produtobarato = produto\n valorbarato = valor\n else:\n if valor < valorbarato:\n produtobarato = produto\n valorbarato = valor\n opcao = str(input('Deseja continuar? [S/N]: ')).strip().upper()[0]\n if opcao == 'N':\n break\nprint(f'TOTAL: R${soma:.2f}')\nprint(f'{contador1000} produto(s) acima de R$1000: {produtocaro}')\nprint(f'{produtobarato} foi o mais barato, custando R${valorbarato:.2f}')\n","repo_name":"matheusmcz/Pythonaqui","sub_path":"Mundo2/ex070.py","file_name":"ex070.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"22755767523","text":"import datetime\n\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom data import ContrastiveDataset, build_CURL_dataset\nfrom GMM import GMM, GMMDataset, visualize\nfrom model import (\n ClassificationNet,\n FCLayer,\n Representation,\n train_curl,\n train_multiclass_sup,\n)\n\ntorch.manual_seed(0)\n\nN_CENTERS = 30\nDIM = 10\nTRAIN_SAMPLES = 5000\nTEST_SAMPLES = 5000\nVARIANCE = 1\n\nCURL_TRAIN_SIZE = 50 * N_CENTERS\nCURL_TEST_SIZE = 1000\nBATCH_SIZE = 100\n\n# Model\nINPUT_DIM = DIM\nHIDDEN_DIM = 20\nOUT_DIM = HIDDEN_DIM\n\n# Training\nN_EPOCH = 1000\nLR = 1e-3\n\n# Data generation\nCENTERS = torch.randn(N_CENTERS * DIM).view(N_CENTERS, DIM)\ngmm = GMM(DIM, CENTERS, VARIANCE)\n\nX_train, y_train = gmm.sample(TRAIN_SAMPLES)\nX_test, y_test = gmm.sample(TEST_SAMPLES)\n\ntrain_CURL = ContrastiveDataset(*build_CURL_dataset(X_train, y_train, CURL_TRAIN_SIZE))\nassert len(train_CURL) == CURL_TRAIN_SIZE\ntest_CURL = ContrastiveDataset(*build_CURL_dataset(X_test, y_test, CURL_TEST_SIZE))\n\ntrain_data = GMMDataset(X_train, y_train)\ntest_data = GMMDataset(X_test, y_test)\n\ntrain_loader = DataLoader(train_data, shuffle=True, batch_size=BATCH_SIZE)\ntest_loader = DataLoader(test_data, shuffle=False, batch_size=BATCH_SIZE)\n\ncurl_train_loader = DataLoader(train_CURL, shuffle=True, batch_size=BATCH_SIZE)\ncurl_test_loader = DataLoader(test_CURL, shuffle=False, batch_size=BATCH_SIZE)\n\n# Model\ncurl_model = Representation(INPUT_DIM, HIDDEN_DIM, OUT_DIM)\nsup_model = ClassificationNet(\n Representation(INPUT_DIM, HIDDEN_DIM, OUT_DIM), FCLayer(OUT_DIM, N_CENTERS)\n)\n\nwriter_str = (\n \"CURL/GMM-\"\n + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n + \"-\"\n + str(INPUT_DIM)\n + \"-\"\n + str(HIDDEN_DIM)\n + \"-\"\n + str(OUT_DIM)\n + \"-\"\n + str(LR)\n + \"-\"\n + str(BATCH_SIZE)\n)\nwriter = SummaryWriter(writer_str)\n\"\"\"\nsup_model = train_multiclass_sup(\n train_loader,\n sup_model,\n writer,\n N_EPOCH,\n LR,\n verbose=True,\n visualize=True,\n)\n\"\"\"\ncurl_model = train_curl(\n curl_train_loader,\n curl_model,\n writer,\n N_EPOCH,\n LR,\n verbose=True,\n visualize=True,\n test_X=X_test,\n test_y=y_test,\n)\n","repo_name":"theophilec/CURL-Experiments","sub_path":"curl-mnist/gmm_main.py","file_name":"gmm_main.py","file_ext":"py","file_size_in_byte":2199,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"19"}
+{"seq_id":"20666377367","text":"import sqlite3\nimport requests\nimport bs4\nfrom datetime import datetime\n\n# log time\nprint(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))\n\nconn = sqlite3.connect('main.db')\ndata = conn.execute(\"select * from gaoqing where download_url is null\")\nrows = data.fetchall()\nrownumber = 0\n\nfor row in rows:\n url = row[2]\n res = requests.get(url)\n soup = bs4.BeautifulSoup(res.text,\"html.parser\")\n elems = soup.select('#post_content > p > span > a')\n if len(elems)==0:\n continue\n download_url = elems[-1]['href']\n gaoqing_id = row[0]\n sql = \"UPDATE gaoqing set download_url = '%s' where ID='%s'\"%(download_url,gaoqing_id)\n conn.execute(sql)\n rownumber += 1\n\nconn.commit()\nconn.close()\n\nprint(\"%s items download link had updated\"%rownumber)\n\n","repo_name":"cliuxinxin/automation","sub_path":"UpdateGaoQingDownloadUrl.py","file_name":"UpdateGaoQingDownloadUrl.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"72626118443","text":"import typing\nimport os\nimport uuid\nfrom mimetypes import guess_extension\nimport requests\nfrom pathlib import Path\n\nfrom tools.CacheUtil import CACHE_UTIL\nfrom tools.ThreadPoolUtil import THREAD_POOL\n\n\nclass FileUtil:\n\n def __init__(self):\n pass\n\n @staticmethod\n def create_dir_if_not_exists(dir: str):\n if not os.path.exists(dir):\n os.makedirs(dir)\n\n @staticmethod\n def _process_key(key: str) -> str:\n return \"file_cache:\"+key\n\n @staticmethod\n def read_qss(style):\n with open(style, \"r\") as f:\n return f.read()\n\n @staticmethod\n def put_cache(key: str, path: str, file: bytes):\n with open(path, 'wb') as fd:\n fd.write(file)\n CACHE_UTIL.set(FileUtil._process_key(key), path)\n\n @staticmethod\n def get_cache_path(key: str) -> str:\n return CACHE_UTIL.get(FileUtil._process_key(key))\n\n @staticmethod\n def _download_url(\n url: str,\n file_path: str,\n callback: typing.Callable[[str], None] = None\n ):\n r = requests.get(url)\n type = guess_extension(\n r.headers['content-type'].partition(';')[0].strip())\n all_file_path = file_path+type\n with open(all_file_path, \"wb\") as f:\n f.write(r.content)\n CACHE_UTIL.set(FileUtil._process_key(url), all_file_path)\n if callback is not None:\n callback(all_file_path)\n\n @staticmethod\n def get_storage_from_url(url: str,\n dir: str,\n callback: typing.Callable[[str], None] = None\n ) -> str:\n # 检查是否存在\n now_path = FileUtil.get_cache_path(url)\n if now_path is not None and now_path != \"\":\n # 校验文件是否存在\n the_file = Path(now_path)\n if the_file.is_file():\n if callback is not None:\n callback(now_path)\n return\n path = dir+\"/\"+str(uuid.uuid1())\n THREAD_POOL.submit(FileUtil._download_url,\n url, path, callback)\n # wait([f], return_when=ALL_COMPLETED)\n\n\nFileUtil.create_dir_if_not_exists(\"./resources/images\")\n","repo_name":"dezhishen/im-gui-pyqt5","sub_path":"tools/FileUtil.py","file_name":"FileUtil.py","file_ext":"py","file_size_in_byte":2229,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"19"}
+{"seq_id":"36419178600","text":"import torch\nimport glob\nimport networkx as nx\nimport os\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nimport pathlib\nimport gdown\nfrom torch.utils.data import Dataset\nfrom sklearn.preprocessing import MinMaxScaler\nfrom torch_geometric.data import InMemoryDataset, Data, HeteroData, Dataset, extract_zip, download_url\n\ndata_url = \"https://zenodo.org/record/7376222/files/df00{}.zip?download=1\"\n\ncolumn_dict = {'id': 'TX_ID', 'sender': 'User',\n 'receiver': 'Merchant Name', 'label': 'Is Fraud?'}\n\noriginal_features = ['Amount', 'Insufficient Balance', 'Bad PIN',\n 'Technical Glitch', 'Bad Card Number', 'Bad CVV',\n 'Bad Expiration',\n 'Bad Zipcode', 'total_seconds',\n 'sin_time_seconds', 'cos_time_seconds', 'sin_time_days',\n 'cos_time_days',\n 'TX_CAT_1', 'TX_CAT_3', 'TX_CAT_4', 'TX_CAT_5',\n 'TX_CAT_6', 'TX_CAT_7', 'TX_CAT_8', 'TX_CAT_9', 'TX_TYPE_0',\n 'TX_TYPE_1', 'TX_TYPE_2', 'MC_CONTINENT_AF', 'MC_CONTINENT_AS',\n 'MC_CONTINENT_EU', 'MC_CONTINENT_NA', 'MC_CONTINENT_OC',\n 'MC_CONTINENT_SA', 'MC_CONTINENT_US'\n ]\n\n\nclass FraudSubset(InMemoryDataset):\n def __init__(self, root, features=None, column_dict=None, features_requiring_scaling=None, file_name='data', weighted=False, transform=None, pre_transform=None, pre_filter=None):\n self.features = features\n self.column_dict = column_dict\n self.features_requiring_scaling = features_requiring_scaling\n self.file_name = file_name\n self.weighted = weighted\n super().__init__(root, transform, pre_transform, pre_filter)\n\n @property\n def raw_file_names(self):\n\n return glob.glob(os.path.join(self.root + '/*.feather'))\n\n @property\n def processed_file_names(self):\n return [(str(self.file_name) + '_{}.pt').format(str(i)) for i in range(10)]\n\n @property\n def raw_dir(self) -> str:\n return os.path.join(self.root, 'raw')\n\n def process(self):\n # Read data into huge `Data` list.\n\n test_files = sorted(\n glob.glob(os.path.join(self.raw_dir + '/*_test.feather')))\n train_files = sorted(\n glob.glob(os.path.join(self.raw_dir + '/*_train.feather')))\n\n idx = 0\n for train_file, test_file in tqdm(zip(train_files, test_files)):\n\n data = read_fraud_data(train_file, test_file, features=self.features, weighted=self.weighted,\n column_dict=self.column_dict, features_requiring_scaling=self.features_requiring_scaling)\n\n if self.pre_filter is not None and not self.pre_filter(data):\n continue\n\n if self.pre_transform is not None:\n data = self.pre_transform(data)\n\n torch.save(data, os.path.join(self.processed_dir,\n str(self.file_name)+f'_{idx}.pt'))\n idx += 1\n\n def len(self):\n return len(self.processed_file_names)\n\n def get(self, idx):\n data = torch.load(os.path.join(self.processed_dir,\n str(self.file_name) + f'_{idx}.pt'))\n return data\n\n\ndef read_fraud_data(train_file, test_file, features=[], weighted=False, embeddings=False, features_requiring_scaling=None, column_dict={}):\n\n # Read both files\n df_train = pd.read_feather(train_file)\n #df_train = df_train.set_index(df_train.columns[0])\n df_test = pd.read_feather(test_file)\n #df_test = df_test.set_index(df_test.columns[0])\n\n frac_val = 0.1\n cutoff = int(df_train.shape[0] * (1-frac_val))\n df_val = df_train.iloc[cutoff:]\n df_train = df_train.iloc[:cutoff]\n print(\"df_train_shape\", str(df_train.shape))\n print(\"df_val_shape\", str(df_val.shape))\n print(\"df_test_shape\", str(df_test.shape))\n\n #\n # Feature scaling\n if features_requiring_scaling:\n scaler = MinMaxScaler()\n scaler.fit(df_train.loc[:, features_requiring_scaling])\n df_train_scaled = pd.DataFrame(scaler.transform(\n df_train.loc[:, features_requiring_scaling]), columns=features_requiring_scaling, index=df_train.index)\n df_train = df_train.drop(features_requiring_scaling, axis=1)\n df_train = df_train.merge(\n df_train_scaled, left_index=True, right_index=True)\n df_val_scaled = pd.DataFrame(scaler.transform(\n df_val.loc[:, features_requiring_scaling]), columns=features_requiring_scaling, index=df_val.index)\n df_val = df_val.drop(features_requiring_scaling, axis=1)\n df_val = df_val.merge(df_val_scaled, left_index=True, right_index=True)\n df_test_scaled = pd.DataFrame(scaler.transform(\n df_test.loc[:, features_requiring_scaling]), columns=features_requiring_scaling, index=df_test.index)\n df_test = df_test.drop(features_requiring_scaling, axis=1)\n df_test = df_test.merge(\n df_test_scaled, left_index=True, right_index=True)\n\n print(\"df_train_shape\", str(df_train.shape))\n print(\"df_val_shape\", str(df_val.shape))\n print(\"df_test_shape\", str(df_test.shape))\n\n df = pd.concat([df_train, df_val, df_test])\n\n print(df.shape)\n print(df.loc[:, column_dict['label']].value_counts())\n print(column_dict['id'])\n print(column_dict['sender'])\n print(column_dict['receiver'])\n print(column_dict['label'])\n\n G = nx.Graph()\n G.add_nodes_from(df.loc[:, column_dict['id']].unique(), type='TX')\n G.add_nodes_from(df.loc[:, column_dict['sender']].unique(), type='other')\n G.add_nodes_from(df.loc[:, column_dict['receiver']].unique(), type='other')\n\n if weighted:\n G.add_weighted_edges_from(zip(\n df.loc[:, column_dict['id']], df.loc[:, column_dict['sender']], df.loc[:, column_dict['edge_weight']]))\n G.add_weighted_edges_from(zip(\n df.loc[:, column_dict['id']], df.loc[:, column_dict['receiver']], df.loc[:, column_dict['edge_weight']]))\n else:\n G.add_edges_from(\n zip(df.loc[:, column_dict['id']], df.loc[:, column_dict['sender']]))\n G.add_edges_from(\n zip(df.loc[:, column_dict['id']], df.loc[:, column_dict['receiver']]))\n\n #print('done networkx')\n\n # determines the order of nodes\n tx_list = list(df.loc[:, column_dict['id']].unique())\n card_list = list(df.loc[:, column_dict['sender']].unique())\n merchant_list = list(df.loc[:, column_dict['receiver']].unique())\n nodelist = tx_list + card_list + merchant_list\n number_of_tx = len(list(df.loc[:, column_dict['id']].unique()))\n df_tx_index = df.set_index(column_dict['id'])\n\n # which features can be used for training?\n # Create a list of features that can be used during training (i.e. drop fraud label column)\n #features = df.columns.drop(['CARD_PAN_ID', 'TERM_MIDUID', 'TX_FRAUD', 'TX_ID'])\n\n #features = original_features.copy()\n # print(len(features))\n # if rfm:\n # features += rfm_features\n # print(len(features))\n # if apate:\n # features += apate_features\n # print(len(features))\n\n print(len(features))\n\n # Get y tensor\n a = df_tx_index.loc[tx_list, column_dict['label']].values.astype(int)\n b = np.zeros(len(card_list))\n c = np.zeros(len(merchant_list))\n y = np.concatenate((a, b, c))\n y = torch.tensor(y, dtype=torch.long)\n\n #print('done y')\n # delta is a very small number that is summed with the edge weights. This is to avoid that the sum of all edge weights of a node equals zero.\n # In the neighborsampler the sum of the weights is used to normalize the weights to probabilities. Hence, a sum equaling zero would result in NaN values.\n if weighted:\n edge_weight_col = df.loc[:, column_dict['edge_weight']]\n delta = min(edge_weight_col[edge_weight_col > 0]) / 100\n\n # Get edge_index\n adj = nx.to_scipy_sparse_array(G).tocoo()\n row = torch.from_numpy(adj.row.astype(np.int64)).to(torch.long)\n col = torch.from_numpy(adj.col.astype(np.int64)).to(torch.long)\n cell_data = torch.from_numpy(adj.data.astype(np.double)).to(torch.double)\n edge_index = torch.stack([row, col], dim=0)\n if weighted:\n edge_weight = cell_data + delta\n else:\n edge_weight = None\n #print('done edge_index')\n\n d = df_tx_index.loc[tx_list, features].values\n e = np.zeros((len(card_list), len(features)))\n f = np.zeros((len(merchant_list), len(features)))\n\n x = np.concatenate((d, e, f), axis=0)\n x = torch.tensor(x.astype(np.float), dtype=torch.float32)\n #print('done x')\n\n # Validation fraction from training data\n frac_val = 0.1\n train_mask = torch.zeros(y.size(0), dtype=torch.bool)\n val_mask = torch.zeros(y.size(0), dtype=torch.bool)\n test_mask = torch.zeros(y.size(0), dtype=torch.bool)\n\n train_cutoff = int(df_train.shape[0])\n val_cutoff = int(df_train.shape[0]) + int(df_val.shape[0])\n\n train_mask[:train_cutoff] = True\n val_mask[train_cutoff:val_cutoff] = True\n test_mask[val_cutoff:number_of_tx] = True\n\n data = Data(x=x, edge_index=edge_index, edge_weight=edge_weight, y=y,\n train_mask=train_mask, val_mask=val_mask, test_mask=test_mask)\n return data\n\n\nclass HeteroFraudSubset(InMemoryDataset):\n def __init__(self, root, subset, column_dict={}, weighted=False, features_requiring_scaling=None, transform=None, pre_transform=None, pre_filter=None):\n self.column_dict = column_dict\n self.features_requiring_scaling = features_requiring_scaling\n self.weighted = weighted\n self.subset = subset\n self.url = data_url.format(self.subset)\n root_subset = os.path.join(root, str(subset))\n super().__init__(root_subset, transform, pre_transform, pre_filter)\n\n @property\n def raw_file_names(self):\n\n return ['df'+str(self.subset).zfill(3)+'_test.feather', 'df'+str(self.subset).zfill(3)+'_train.feather']\n\n @property\n def processed_file_names(self):\n return ['subset'+str(self.subset) + '.pt']\n\n def download(self):\n # path = gdown.download(\n # id=file_ids[self.subset], output='./data/' + str(self.subset) + '/')\n path = download_url(self.url, self.raw_dir)\n extract_zip(path, self.raw_dir)\n os.unlink(path)\n\n @property\n def raw_dir(self) -> str:\n return os.path.join(self.root, 'raw')\n\n def process(self):\n data_list = []\n # Read data into huge `Data` list.\n\n test_file = glob.glob(os.path.join(\n self.raw_dir + '/*_test.feather'))[0]\n train_file = glob.glob(os.path.join(\n self.raw_dir + '/*_train.feather'))[0]\n\n data = read_hetero_fraud_data(\n train_file, test_file, scaling=True, weighted=self.weighted)\n\n if self.pre_filter is not None and not self.pre_filter(data):\n pass\n\n if self.pre_transform is not None:\n data = self.pre_transform(data)\n\n torch.save(data, os.path.join(self.processed_dir,\n 'subset' + str(self.subset)+'.pt'))\n\n def filter_TX_CH_edge(self, n1, n2):\n return self.G[n1][n2].get(\"type\") == 'pays'\n\n def filter_TX_MC_edge(self, n1, n2):\n return self.G[n1][n2].get(\"type\") == 'receives'\n\n def len(self):\n return len(self.processed_file_names)\n\n def get(self, idx):\n data = torch.load(os.path.join(self.processed_dir,\n 'subset'+str(self.subset) + '.pt'))\n return data\n\n\ndef read_hetero_fraud_data(train_file, test_file, scaling=True, weighted=False, frac_val=0.1):\n\n # Read both files\n df_train = pd.read_feather(train_file)\n # df_train.set_index(df_train.columns[0])\n df_test = pd.read_feather(test_file)\n # df_test.set_index(df_test.columns[0])\n cutoff = int(df_train.shape[0] * (1-frac_val))\n df_val = df_train.iloc[cutoff:]\n df_train = df_train.iloc[:cutoff]\n print(\"df_train_shape\", str(df_train.shape))\n print(\"df_val_shape\", str(df_val.shape))\n print(\"df_test_shape\", str(df_test.shape))\n\n if scaling:\n scaler = MinMaxScaler()\n scaler.fit(df_train.loc[:, original_features])\n df_train_scaled = pd.DataFrame(scaler.transform(\n df_train.loc[:, original_features]), columns=original_features, index=df_train.index)\n df_train = df_train.drop(original_features, axis=1)\n df_train = df_train.merge(\n df_train_scaled, left_index=True, right_index=True)\n df_val_scaled = pd.DataFrame(scaler.transform(\n df_val.loc[:, original_features]), columns=original_features, index=df_val.index)\n df_val = df_val.drop(original_features, axis=1)\n df_val = df_val.merge(df_val_scaled, left_index=True, right_index=True)\n df_test_scaled = pd.DataFrame(scaler.transform(\n df_test.loc[:, original_features]), columns=original_features, index=df_test.index)\n df_test = df_test.drop(original_features, axis=1)\n df_test = df_test.merge(\n df_test_scaled, left_index=True, right_index=True)\n\n print(\"df_train_shape\", str(df_train.shape))\n print(\"df_val_shape\", str(df_val.shape))\n print(\"df_test_shape\", str(df_test.shape))\n df = pd.concat([df_train, df_val, df_test])\n print(df.shape)\n print(df.loc[:, column_dict['label']].value_counts())\n\n data = HeteroData()\n # determines the order of nodes\n tx_list = list(df.loc[:, column_dict['id']].unique())\n print(len(tx_list))\n tx_dict = {id: i for i, id in enumerate(tx_list)}\n\n card_list = list(df.loc[:, column_dict['sender']].unique())\n card_dict = {id: i for i, id in enumerate(card_list)}\n\n merchant_list = list(df.loc[:, column_dict['receiver']].unique())\n merchant_dict = {id: i for i, id in enumerate(merchant_list)}\n\n tx_dict = {id: i for i, id in enumerate(tx_list)}\n card_dict = {id: i for i, id in enumerate(card_list)}\n merchant_dict = {id: i for i, id in enumerate(merchant_list)}\n\n number_of_tx = len(list(df.loc[:, column_dict['id']].unique()))\n df_tx_index = df.set_index(column_dict['id'])\n\n #features = original_features.copy()\n # if rfm:\n # features += rfm_features\n # if embeddings:\n # features += embedding_features_tx\n # if apate:\n # features += apate_features\n\n # Get y tensor\n # Get y tensor\n y = df_tx_index.loc[tx_list, column_dict['label']].values.astype(int)\n #b = np.zeros(len(card_list))\n #c = np.zeros(len(merchant_list))\n #y = np.concatenate((a,b,c))\n y = torch.tensor(y, dtype=torch.long)\n\n #print('done y')\n if weighted:\n edge_weight_col = df.loc[:, column_dict['edge_weight']]\n delta = min(edge_weight_col[edge_weight_col > 0]) / 100\n\n # The influence from cardholder/merchant on transaction nodes is not split according to label information!\n row = df.loc[:, column_dict['sender']].apply(lambda x: card_dict[x]).values\n col = df.TX_ID.apply(lambda x: tx_dict[x]).values\n #np.ones(len(df.loc[:, column_dict['sender']]))\n row = torch.from_numpy(row).to(torch.long)\n col = torch.from_numpy(col).to(torch.long)\n edge_index_CH = torch.stack([row, col], dim=0)\n if weighted:\n cell_data = df.loc[:, column_dict['edge_weight']].values\n cell_data = torch.from_numpy(\n cell_data.astype(np.double)).to(torch.double)\n edge_weight_CH = cell_data + delta\n else:\n edge_weight_CH = None\n\n row = df.loc[:, column_dict['receiver']].apply(\n lambda x: merchant_dict[x]).values\n col = df.TX_ID.apply(lambda x: tx_dict[x]).values\n row = torch.from_numpy(row).to(torch.long)\n col = torch.from_numpy(col).to(torch.long)\n edge_index_MC = torch.stack([row, col], dim=0)\n if weighted:\n # np.ones(len(df.loc[:, column_dict['receiver']]))\n cell_data = df.loc[:, column_dict['edge_weight']].values\n cell_data = torch.from_numpy(\n cell_data.astype(np.double)).to(torch.double)\n edge_weight_MC = cell_data + delta\n else:\n edge_weight_MC = None\n\n # get x\n tx_x = df_tx_index.loc[tx_list, original_features].values\n ch_x = np.zeros((len(card_list), 1))\n mc_x = np.zeros((len(merchant_list), 1))\n\n tx_x = torch.tensor(tx_x.astype(np.float), dtype=torch.float32)\n ch_x = torch.tensor(ch_x.astype(np.float), dtype=torch.float32)\n mc_x = torch.tensor(mc_x.astype(np.float), dtype=torch.float32)\n #print('done x')\n\n data['transaction'].x = tx_x\n data['cardholder'].x = ch_x\n data['merchant'].x = mc_x\n\n # From cardholder and merchant to transaction nodes we do not distinguish between fraud/non-fraud.\n data['cardholder', 'pays', 'transaction'].edge_index = edge_index_CH\n data['merchant', 'receives', 'transaction'].edge_index = edge_index_MC\n\n data['cardholder', 'pays', 'transaction'].edge_weight = edge_weight_CH\n data['merchant', 'receives', 'transaction'].edge_weight = edge_weight_MC\n\n data['transaction'].y = y\n\n # Validation fraction from training data\n\n train_mask = torch.zeros(y.size(0), dtype=torch.bool)\n val_mask = torch.zeros(y.size(0), dtype=torch.bool)\n test_mask = torch.zeros(y.size(0), dtype=torch.bool)\n\n train_cutoff = int(df_train.shape[0])\n val_cutoff = int(df_train.shape[0]) + int(df_val.shape[0])\n\n train_mask[:train_cutoff] = True\n val_mask[train_cutoff:val_cutoff] = True\n test_mask[val_cutoff:number_of_tx] = True\n\n data['transaction'].train_mask = train_mask\n data['transaction'].val_mask = val_mask\n data['transaction'].test_mask = test_mask\n\n return data\n","repo_name":"rafaelvanbelle/SHINE","sub_path":"code/fraud_dataloader.py","file_name":"fraud_dataloader.py","file_ext":"py","file_size_in_byte":17594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"43878263538","text":"import pandas as pd\n\n\nclass ColorData:\n COLORS_DICT = {\n \"rouge\": \"red\",\n \"orange\": \"orange\",\n \"jaune\": \"yellow\",\n \"vert\": \"green\",\n \"bleu\": \"blue\",\n \"indigo\": \"indigo\",\n \"violet\": \"purple\",\n \"rose\": \"#ff748c\",\n \"doré\": \"gold\",\n }\n COLORS_FR_EN = {\n \"rouge\": \"red\",\n \"orange\": \"orange\",\n \"jaune\": \"yellow\",\n \"vert\": \"green\",\n \"bleu\": \"blue\",\n \"indigo\": \"indigo\",\n \"violet\": \"purple\",\n \"rose\": \"pink\",\n \"doré\": \"gold\",\n }\n\n data = pd.read_excel(\"data/colors_meaning.xlsx\", index_col=0)\n data[\"color_en\"] = data[\"color\"].replace(COLORS_FR_EN)\n data[\"color_code\"] = data[\"color\"].replace(COLORS_DICT)\n\n def __init__(self, color_digit: int) -> None:\n self.color_digit = color_digit\n\n @property\n def title(self):\n return self.data.loc[self.color_digit, \"title\"]\n\n @property\n def color_code(self):\n return self.data.loc[self.color_digit, \"color_code\"]\n\n @property\n def color(self):\n return self.data.loc[self.color_digit, \"color\"]\n\n @property\n def keywords(self):\n return self.data.loc[self.color_digit, \"keywords\"]\n\n\nif __name__ == \"__main__\":\n dl = ColorData(4)\n print(dl.keywords)\n","repo_name":"jorislimonier/the-colour-path","sub_path":"src/color_data.py","file_name":"color_data.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"6166639752","text":"import torch\nfrom torch.nn import CrossEntropyLoss\nfrom pytorch_lightning.utilities import rank_zero_info\nfrom transformers import AutoConfig, AutoModel\nfrom transformers.modeling_utils import PreTrainedModel\nfrom transformers.modeling_outputs import SequenceClassifierOutput\nfrom attentions import (\n MultiHeadedAttention,\n SelfAttention,\n)\nfrom modeling_utils import (\n Classifier,\n PositionalEncoding,\n)\n\n\nclass VerificationModel(PreTrainedModel):\n def __init__(self, hparams, num_labels):\n config = AutoConfig.from_pretrained(\n hparams.pretrained_model_name, num_labels=num_labels\n )\n super().__init__(config)\n self.num_labels = num_labels\n self.num_evidence = hparams.num_evidence\n self.max_seq_length = hparams.max_seq_length\n assert hparams.aggregate_mode in {\"concat\", \"attn\", \"mean\", \"sum\"}\n self.aggregate_mode = hparams.aggregate_mode\n rank_zero_info(f\"aggregate mode: {hparams.aggregate_mode}\")\n self.attn_bias_type = hparams.attn_bias_type\n rank_zero_info(f\"attention bias type: {hparams.attn_bias_type}\")\n\n setattr(\n self,\n self.config.model_type,\n AutoModel.from_pretrained(\n hparams.pretrained_model_name, config=self.config\n ),\n )\n\n if self.aggregate_mode == \"concat\":\n hidden_size = self.config.hidden_size * (hparams.num_evidence + 1)\n else:\n hidden_size = self.config.hidden_size\n\n self.classifier = Classifier(\n hidden_size, num_labels, dropout=hparams.classifier_dropout_prob\n )\n\n self.aggregate_attn = None\n if hparams.aggregate_mode == \"attn\":\n self.aggregate_attn = MultiHeadedAttention(self.config, self.attn_bias_type)\n\n self.sent_attn = None\n if hparams.sent_attn:\n self.sent_attn = SelfAttention(self.config)\n self.sent_position = PositionalEncoding(\n self.num_evidence, self.config.hidden_size\n )\n\n self.word_attn = None\n if hparams.word_attn:\n self.word_attn = SelfAttention(self.config)\n self.word_position = PositionalEncoding(\n self.num_evidence * self.max_seq_length, self.config.hidden_size\n )\n\n def get_logits(self, encoder_outputs, attention_mask=None, sent_scores=None):\n # hidden_states: batch*(evidence+1) x len x hidden\n # attention_mask: batch*(evidence+1) x len\n # sent_scores: batch x evidence\n num_evidence_plus = self.num_evidence + 1\n hidden_states = encoder_outputs.last_hidden_state\n hidden_size = self.config.hidden_size\n\n sents = None\n if self.word_attn:\n seq_length = self.num_evidence * self.max_seq_length\n sent_hidden_states = hidden_states.view(\n -1, num_evidence_plus, self.max_seq_length, self.config.hidden_size\n )\n sent_hidden_states = sent_hidden_states[:, 1:] # skip claim\n sent_hidden_states = sent_hidden_states.view(\n -1, seq_length, self.config.hidden_size\n )\n sent_mask = attention_mask.view(-1, num_evidence_plus, self.max_seq_length)\n sent_mask = sent_mask[:, 1:] # skip claim\n sent_mask = sent_mask.view(-1, seq_length)\n\n sent_hidden_states = self.word_position(sent_hidden_states)\n sent_hidden_states = self.word_attn(\n sent_hidden_states, sent_mask.unsqueeze(1)\n )\n\n # batch x evidence x len x hidden -> batch x evidence x hidden\n sent_hidden_states = sent_hidden_states.view(\n -1, self.num_evidence, self.max_seq_length, self.config.hidden_size\n )\n sents = sent_hidden_states[:, :, 0] # equiv. to [CLS]\n\n # features: batch*(evidence+1) x hidden\n features = hidden_states[:, 0] # equiv. to [CLS]\n\n # claims: batch x hidden\n # sents: batch x evidence x hidden\n claims = features[0::num_evidence_plus]\n if sents is None:\n sents = features.view(-1, num_evidence_plus, hidden_size)[:, 1:]\n\n if self.sent_attn:\n sents = self.sent_position(sents)\n sents = self.sent_attn(sents)\n\n if self.aggregate_mode == \"sum\":\n aggregate_output = sents.sum(dim=1)\n elif self.aggregate_mode == \"mean\":\n aggregate_output = sents.mean(dim=1)\n elif self.aggregate_mode == \"attn\":\n aggregate_output = self.aggregate_attn(\n claims,\n sents,\n sents,\n bias=sent_scores,\n ).squeeze(1)\n elif self.aggregate_mode == \"concat\":\n x = torch.cat([claims.unsqueeze(1), sents], dim=1)\n aggregate_output = x.view(x.size(0), -1)\n\n return self.classifier(aggregate_output)\n\n def create_outputs(self, loss, logits, encoder_outputs, return_dict):\n if not return_dict:\n output = (logits,) + encoder_outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n )\n\n def encoder(\n self,\n input_ids,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n assert input_ids.dim() == 3 # batch x evidence x len\n input_ids = input_ids.view(-1, self.max_seq_length)\n attention_mask = attention_mask.view(-1, self.max_seq_length)\n if token_type_ids is not None:\n token_type_ids = token_type_ids.view(-1, self.max_seq_length)\n\n return getattr(self, self.config.model_type)(\n input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=True,\n )\n\n def forward(\n self,\n input_ids,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n selection_labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n class_weights=None,\n ):\n encoder_outputs = self.encoder(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=True,\n )\n\n logits = self.get_logits(encoder_outputs, attention_mask)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss(weight=class_weights)\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n return self.create_outputs(loss, logits, encoder_outputs, return_dict)\n\n\nclass VerificationJointModel(VerificationModel):\n def __init__(\n self,\n hparams,\n num_labels,\n ):\n super().__init__(hparams, num_labels=num_labels)\n assert 0.0 < hparams.lambda_joint <= 1.0\n self.lambda_joint = hparams.lambda_joint\n self.sent_num_labels = 2\n self.sent_classifier = Classifier(\n self.config.hidden_size,\n self.sent_num_labels,\n dropout=hparams.classifier_dropout_prob,\n )\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n selection_labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n class_weights=None,\n ):\n encoder_outputs = self.encoder(\n input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=True,\n )\n\n # batch*(evidence+1) x hidden\n features = encoder_outputs.last_hidden_state[:, 0] # equiv. to [CLS]\n\n logits_s = self.sent_classifier(features)\n logits_s = logits_s.view(-1, self.num_evidence + 1, self.sent_num_labels)[\n :, 1:\n ].contiguous() # exclude claim\n\n selection_loss = None\n if selection_labels is not None:\n selection_labels = selection_labels[:, 1:].contiguous() # exclude claim\n loss_fct = CrossEntropyLoss()\n selection_loss = loss_fct(\n logits_s.view(-1, self.sent_num_labels), selection_labels.view(-1)\n )\n\n sent_scores = None\n if self.attn_bias_type != \"none\":\n # sent_scores: batch x evidence\n sent_scores = torch.softmax(logits_s, dim=-1)[:, :, 1]\n\n logits = self.get_logits(encoder_outputs, attention_mask, sent_scores)\n\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss(weight=class_weights)\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n if selection_loss is not None:\n loss = loss + (self.lambda_joint * selection_loss)\n\n return self.create_outputs(loss, logits, encoder_outputs, return_dict)\n","repo_name":"nii-yamagishilab/mla","sub_path":"modeling_verification.py","file_name":"modeling_verification.py","file_ext":"py","file_size_in_byte":10066,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"19"}
+{"seq_id":"41917081374","text":"def quick_sort(arr):\n \"\"\"\n quick_sort on array of integers\n \"\"\"\n def partition(arr, p, r):\n pivot = arr[r]\n i = p-1\n for j in range(p, r):\n if arr[j] <= pivot:\n i += 1\n arr[i], arr[j] = arr[j], arr[i]\n arr[i+1], arr[r] = arr[r], arr[i+1]\n return i+1\n\n def quick_sort_rec(arr, p, r):\n if p < r:\n q = partition(arr, p, r)\n quick_sort_rec(arr, p, q-1)\n quick_sort_rec(arr, q+1, r)\n\n n = len(arr)\n quick_sort_rec(arr, 0, n-1)\n return arr\n\n\ndef main():\n print('\\nQUICK SORT \\t')\n arr = []\n size = int(input(\"\\nEnter size of the array: \\t\"))\n for i in range(size):\n elements = int(input(\"Enter the element: \\t\"))\n arr.append(elements)\n print('\\nThe unsorted array: \\t', arr)\n quick_sort(arr)\n print('\\nThe sorted array: \\t', arr)\n print('\\n')\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"engez7/ssapyc","sub_path":"quicksortrec.py","file_name":"quicksortrec.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"22621431709","text":"import json\nimport pandas as pd\n\ndf = pd.read_json('data/text/dialog.json', encoding='utf-8')\nbody = df['body'].tolist()\nour = df['our'].tolist()\ngd = df['gd'].tolist()\nfor i in range(len(body)):\n summary_1 = our[i]\n summary_2 = gd[i]\n dialog = body[i].split('< code >')[0]\n code = body[i].split('< code >')[1]\n s1 = summary_1.split('\\t')[1].split('\\n')[0]\n s2 = summary_2.split('\\t')[1].split('\\n')[0]\n print(code)\n\n","repo_name":"GuodongFan/ADSum","sub_path":"tools/dialog_GUI/file_deal.py","file_name":"file_deal.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"}
+{"seq_id":"21285908411","text":"#!/usr/bin/python3\n\"\"\"function that queries the Reddit API and prints the titles of the\nfirst 10 hot posts listed for a given subreddit\n\"\"\"\n\nimport requests\n\n\ndef top_ten(subreddit):\n \"\"\"Prints the titles of ist 10 hot post a given subreddit.\"\"\"\n reddit_url = \"https://www.reddit.com/r/\" + subreddit + \"/hot.json?limit=10\"\n header = {\"User-Agent\": \"Chrome/81.0.4044.129\"}\n req = requests.get(reddit_url, headers=header)\n reddit = req.json()\n\n if (req.status_code == 200):\n \"\"\"checks if the response status is ok\"\"\"\n kali_post = reddit.get(\"data\").get(\"children\")\n for post in kali_post:\n print(post.get(\"data\").get(\"title\"))\n else:\n print(\"None\")\n","repo_name":"AishaKhalfan/alx-system_engineering-devops","sub_path":"0x16-api_advanced/1-top_ten.py","file_name":"1-top_ten.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"19"}
+{"seq_id":"36596360909","text":"import atexit\nimport logging\nimport os\nimport json\nimport subprocess\nfrom pathlib import Path\n\nfrom bokeh.embed import server_document\nfrom flask import Flask, request\nfrom flask import render_template\n\nproject_dir = Path(__file__).resolve().parents[1]\nprint(project_dir)\n\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.DEBUG)\n\napp = Flask(__name__)\napp.config[\"SECRET_KEY\"] = \"hello_lifeplayer\"\napp.config[\"DEBUG_TB_INTERCEPT_REDIRECTS\"] = False\n#\npath_to_bokeh_py = f\"{project_dir}/lifeplayer_app/lifeplayer_plot.py\"\nif os.getenv('bokeh_runs', 'no') == 'no':\n bokeh_process = subprocess.Popen(\n [\n \"python\",\n \"-m\",\n \"panel\",\n \"serve\",\n \"--allow-websocket-origin=localhost:5000\",\n path_to_bokeh_py,\n ],\n stdout=subprocess.PIPE,\n )\n os.environ['bokeh_runs'] = 'yes'\n\n\n@atexit.register\ndef kill_server():\n bokeh_process.kill()\n os.environ['bokeh_runs'] = 'no'\n\n\n@app.route(\"/lifeplayer_plot\", methods=['GET', 'POST'])\ndef life_player_json():\n data = request.get_json()\n print(f'Flask side: {data}')\n script = server_document(\n url=\"http://localhost:5006/lifeplayer_plot\", arguments=data)\n print(script)\n return render_template(\"lifeplayer_template.html\", bokeh_script=script)\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', debug=True)\n","repo_name":"Brayden-Arthur/golio","sub_path":"src/lifeplayer_app/test_lifeplayer_args.py","file_name":"test_lifeplayer_args.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"6054916068","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# PYTHON_ARGCOMPLETE_OK\n\"\"\"\n Usage: git-loi-101.py [--aide] COMMANDE [OPTIONS]\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport argparse\n# import locale\nimport sys\nfrom subprocess import call, Popen, PIPE, STDOUT\nfrom six import iteritems\n\ntry:\n from argcomplete import autocomplete\n autocomplete_installed = True\nexcept ImportError:\n autocomplete_installed = False\n\n# locale.setlocale(locale.LC_ALL, 'fr_FR.UTF-8')\n\nfrench_to_english = {\n 'clone': 'clone',\n 'pousser': 'push',\n 'tirer': 'pull',\n 'statut': 'status',\n 'branche': 'branch',\n 'différence': 'diff',\n 'initialiser': 'init',\n 'étiqueter': 'tag',\n 'réinitialiser': 'reset',\n 'regarder-ailleurs': 'checkout',\n 'afficher': 'show',\n 'cachette': 'stash',\n 'commettre': 'commit',\n 'fusion': 'merge',\n 'outil-fusion': 'mergetool',\n 'outil-des-différences': 'difftool',\n 'cueillette-de-cerise': 'cherry-pick',\n 'ajouter': 'add',\n 'enlever': 'rm',\n 'refonte': 'rebase',\n 'rapporter': 'fetch',\n 'aide': 'help',\n}\n\nfrench_to_english_flags = {\n 'tout': 'all',\n 'pruneau': 'prune',\n 'émonder': 'prune',\n 'dur': 'hard',\n 'courge': 'squash',\n 'écraser': 'squash',\n 'oblige': 'force',\n 'récursive': 'r',\n}\n\nfrench_to_english_args = {\n 'maître': 'master',\n 'tête': 'head',\n 'origine': 'origin',\n}\n\nparser = argparse.ArgumentParser(description='Wrapper de Git en français', add_help=False)\nparser.add_argument('--aide', '-a', action=\"help\")\n\nsubparsers = parser.add_subparsers(dest=\"commandes\")\n\n# Git Pull\npull = subparsers.add_parser(\n 'tirer',\n help=\"Mettre à jour l'entrepôt local à partir d'une branche distante\",\n add_help=False\n)\n\n# Git Push\npush = subparsers.add_parser(\n 'pousser',\n help=\"Pousser les changements local ver une branche distante\",\n add_help=False\n)\npush.add_argument('--aide', '-a', action=\"help\")\npush.add_argument('distante', nargs='?')\npush.add_argument('branche', nargs='?')\n\n# Git Clone\nclone = subparsers.add_parser(\n 'clone', help=\"Cloner un entrepôt distante\", add_help=False\n)\nclone.add_argument(\n 'entrepôt', help=\"l'URL de l'entrepôt\", metavar=\"ENTREPÔT\"\n)\nclone.add_argument(\n 'dossier', nargs='?', help=\"Le dossier cible\", metavar=\"DOSSIER\"\n)\n\n# Git status\nstatus = subparsers.add_parser(\n 'statut', help=\"Voir l'état de l'entrepôt local\", add_help=False\n)\n\n# Git Branch\nbranch = subparsers.add_parser(\n 'branche', help=\"Afficher les branches de l'entrepôt\", add_help=False\n)\nbranch.add_argument(\n '--tout', '-t', action='store_true', help=\"Inclus les branches distantes\"\n)\n\n# Git diff\ndiff = subparsers.add_parser(\n 'différence', help=\"Compare deux branches\", add_help=False\n)\ndiff_tool = subparsers.add_parser(\n 'outil-des-différences', help=\"Compare deux branches\", add_help=False\n)\n\ninit = subparsers.add_parser(\n 'initialiser',\n help=\"Initialiser un entrepôt git dans le dossier actuel\",\n add_help=False\n)\ntag = subparsers.add_parser(\n 'étiqueter', help=\"Ajoute une étiquette sur la branche\", add_help=False\n)\n\nreset = subparsers.add_parser(\n 'réinitialiser', help=\"Réinitialiser\", add_help=False\n)\nreset.add_argument(\n '--dur', '-d', action='store_true', help=\"Réinitialiser de manière dur\"\n)\nreset.add_argument(\n 'cible', help=\"Le fichier ou la branche cible\", metavar=\"CIBLE\"\n)\n\ncheckout = subparsers.add_parser('regarder-ailleurs', help=\"\", add_help=False)\nshow = subparsers.add_parser('afficher', help=\"\", add_help=False)\nstash = subparsers.add_parser('cachette', help=\"\", add_help=False)\ncommit = subparsers.add_parser('commettre', help=\"\", add_help=False)\n\nmerge = subparsers.add_parser('fusion', add_help=False)\nmerge.add_argument(\n '--courge', '-c', action='store_true', help=\"Fusionner les commit ensemble.\"\n)\nmerge.add_argument(\n '--écraser', '-e', action='store_true', help=\"Combiner les commettres ensemble\"\n)\n\n\nmerge_tool = subparsers.add_parser(\n 'outil-fusion',\n help=\"Gérer les conflits de fusion avec une interface graphique.\",\n add_help=False\n)\n\n# Git Cherry-Pick\ncherry_pick = subparsers.add_parser(\n 'cueillette-de-cerise',\n help=\"Prendre un commis en particulier et l'amener dans la branche actuelle.\",\n add_help=False\n)\n\nadd = subparsers.add_parser('ajouter', add_help=False)\nadd.add_argument(\n 'fichiers', nargs='+', help=\"Le ou les fichiers à ajouter\",\n metavar=\"FICHIERS\",\n)\nadd.add_argument(\n '--oblige', '-o', action='store_true', help=\"Oblige l'ajout de(s) fichier(s)\"\n)\n\nrm = subparsers.add_parser('enlever', add_help=False)\nrm.add_argument(\n 'fichiers', nargs='+', help=\"Le ou les fichiers à supprimer\", metavar=\"FICHIERS\"\n)\nrm.add_argument(\n '--oblige', '-o', action='store_true', help=\"Oblige le suppression de(s) fichier(s)\"\n)\nrm.add_argument(\n '--récursive', '-r', action='store_true', help=\"Fais la suppression de manière récursive\"\n)\n\nrebase = subparsers.add_parser(\n 'refonte',\n help=\"Mettre à jour l'historique des commis avec une autre branche\",\n add_help=False\n)\nfetch = subparsers.add_parser('rapporter', add_help=False)\nfetch.add_argument(\n '--pruneau', '-p', action='store_true', help=\"Enlever les branches supprimer\"\n)\nfetch.add_argument(\n '--émonder', '-e', action='store_true', help=\"Enlever les branches supprimer\"\n)\n\nif autocomplete_installed:\n autocomplete(parser)\n\nargs, unknown_args = parser.parse_known_args()\n\ncommand = french_to_english.get(args.commandes)\nif not command:\n if not unknown_args:\n print(__doc__)\n sys.exit(1)\n call(['git'] + unknown_args)\n sys.exit(0)\n\nargs = vars(args)\ndel args['commandes']\ngit_arguments = ['git', command]\n\nfor key, value in iteritems(args):\n if value is True or value is False:\n flag = french_to_english_flags.get(key)\n if flag is True:\n if len(flag) == 1:\n value = \"-\" + flag\n else:\n value = \"--\" + flag\n else:\n continue\n\n if isinstance(value, list):\n if value:\n git_arguments += value\n\n elif value:\n english_value = french_to_english_args.get(value)\n if english_value:\n value = english_value\n\n git_arguments.append(value)\n\nall_args = git_arguments + unknown_args\n\ninteractive_commands = [\n 'commit', 'merge', 'diff'\n]\n\nif command in interactive_commands:\n exit_code = call(all_args)\n sys.exit(exit_code)\n\ncmd = None\ntry:\n cmd = Popen(\" \".join(all_args), stdout=PIPE, stderr=STDOUT, shell=True)\n status_code = cmd.wait()\n while True:\n line = cmd.stdout.readline()\n if not line:\n break\n print(line.decode(\"utf-8\"), end='')\n\n if status_code != 0:\n print(\"Un erreur est survenu\")\n sys.exit(1)\n\nexcept KeyboardInterrupt:\n if cmd:\n cmd.kill()\n","repo_name":"sopelj/git-loi-101","sub_path":"git-loi-101.py","file_name":"git-loi-101.py","file_ext":"py","file_size_in_byte":6918,"program_lang":"python","lang":"fr","doc_type":"code","stars":8,"dataset":"github-code","pt":"19"}
+{"seq_id":"14859751440","text":"import sys\nimport math\n\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtGui import QColor\nfrom PyQt5.QtWidgets import QApplication, QMessageBox\nfrom PyQt5.QtOpenGL import QGL, QGLFormat, QGLWidget\n\ntry:\n from OpenGL import GL\nexcept ImportError:\n app = QApplication(sys.argv)\n QMessageBox.critical(None, \"OpenGL samplebuffers\",\n \"PyOpenGL must be installed to run this example.\")\n sys.exit(1)\n\n\nclass GLWidget(QGLWidget):\n GL_MULTISAMPLE = 0x809D\n rot = 0.0\n\n def __init__(self, parent):\n super(GLWidget, self).__init__(QGLFormat(QGL.SampleBuffers), parent)\n\n self.list_ = []\n\n self.startTimer(40)\n self.setWindowTitle(\"Sample Buffers\")\n\n def initializeGL(self):\n GL.glMatrixMode(GL.GL_PROJECTION)\n GL.glLoadIdentity()\n GL.glOrtho( -.5, .5, .5, -.5, -1000, 1000)\n GL.glMatrixMode(GL.GL_MODELVIEW)\n GL.glLoadIdentity()\n GL.glClearColor(1.0, 1.0, 1.0, 1.0)\n\n self.makeObject()\n\n def resizeGL(self, w, h):\n GL.glViewport(0, 0, w, h)\n\n def paintGL(self):\n GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n\n GL.glMatrixMode(GL.GL_MODELVIEW)\n GL.glPushMatrix()\n GL.glEnable(GLWidget.GL_MULTISAMPLE)\n GL.glTranslatef( -0.25, -0.10, 0.0)\n GL.glScalef(0.75, 1.15, 0.0)\n GL.glRotatef(GLWidget.rot, 0.0, 0.0, 1.0)\n GL.glCallList(self.list_)\n GL.glPopMatrix()\n\n GL.glPushMatrix()\n GL.glDisable(GLWidget.GL_MULTISAMPLE)\n GL.glTranslatef(0.25, -0.10, 0.0)\n GL.glScalef(0.75, 1.15, 0.0)\n GL.glRotatef(GLWidget.rot, 0.0, 0.0, 1.0)\n GL.glCallList(self.list_)\n GL.glPopMatrix()\n\n GLWidget.rot += 0.2\n\n self.qglColor(Qt.black)\n self.renderText(-0.35, 0.4, 0.0, \"Multisampling enabled\")\n self.renderText(0.15, 0.4, 0.0, \"Multisampling disabled\")\n\n def timerEvent(self, event):\n self.update()\n\n def makeObject(self):\n trolltechGreen = QColor.fromCmykF(0.40, 0.0, 1.0, 0.0)\n NumSectors = 15\n x1 = +0.06\n y1 = -0.14\n x2 = +0.14\n y2 = -0.06\n x3 = +0.08\n y3 = +0.00\n x4 = +0.30\n y4 = +0.22\n\n self.list_ = GL.glGenLists(1)\n GL.glNewList(self.list_, GL.GL_COMPILE)\n\n for i in range(NumSectors):\n angle1 = float((i * 2 * math.pi) / NumSectors)\n x5 = 0.30 * math.sin(angle1)\n y5 = 0.30 * math.cos(angle1)\n x6 = 0.20 * math.sin(angle1)\n y6 = 0.20 * math.cos(angle1)\n\n angle2 = float(((i + 1) * 2 * math.pi) / NumSectors)\n x7 = 0.20 * math.sin(angle2)\n y7 = 0.20 * math.cos(angle2)\n x8 = 0.30 * math.sin(angle2)\n y8 = 0.30 * math.cos(angle2)\n\n self.qglColor(trolltechGreen)\n self.quad(GL.GL_QUADS, x5, y5, x6, y6, x7, y7, x8, y8)\n self.qglColor(Qt.black)\n self.quad(GL.GL_LINE_LOOP, x5, y5, x6, y6, x7, y7, x8, y8)\n\n self.qglColor(trolltechGreen)\n self.quad(GL.GL_QUADS, x1, y1, x2, y2, y2, x2, y1, x1)\n self.quad(GL.GL_QUADS, x3, y3, x4, y4, y4, x4, y3, x3)\n\n self.qglColor(Qt.black)\n self.quad(GL.GL_LINE_LOOP, x1, y1, x2, y2, y2, x2, y1, x1)\n self.quad(GL.GL_LINE_LOOP, x3, y3, x4, y4, y4, x4, y3, x3)\n\n GL.glEndList()\n\n def quad(self, primitive, x1, y1, x2, y2, x3, y3, x4, y4):\n GL.glBegin(primitive)\n\n GL.glVertex2d(x1, y1)\n GL.glVertex2d(x2, y2)\n GL.glVertex2d(x3, y3)\n GL.glVertex2d(x4, y4)\n\n GL.glEnd()\n\n\nif __name__ == '__main__':\n\n app = QApplication(sys.argv)\n\n f = QGLFormat.defaultFormat()\n f.setSampleBuffers(True)\n QGLFormat.setDefaultFormat(f)\n\n if not QGLFormat.hasOpenGL():\n QMessageBox.information(None, \"OpenGL samplebuffers\",\n \"This system does not support OpenGL.\")\n sys.exit(0)\n\n widget = GLWidget(None)\n\n if not widget.format().sampleBuffers():\n QMessageBox.information(None, \"OpenGL samplebuffers\",\n \"This system does not have sample buffer support.\")\n sys.exit(0)\n\n widget.resize(640, 480)\n widget.show()\n\n sys.exit(app.exec_())\n","repo_name":"baoboa/pyqt5","sub_path":"examples/opengl/samplebuffers.py","file_name":"samplebuffers.py","file_ext":"py","file_size_in_byte":4234,"program_lang":"python","lang":"en","doc_type":"code","stars":1034,"dataset":"github-code","pt":"19"}
+{"seq_id":"41150148923","text":"\"\"\" RiZoeLX 2022-2023 © SpamX \"\"\"\n\nimport os, time, sys\nfrom sys import stdout\n\nos.system(\"clear\")\n\n# Color snippets\nblack=\"\\033[0;30m\"\nred=\"\\033[0;31m\"\nbred=\"\\033[1;31m\"\ngreen=\"\\033[0;32m\"\nbgreen=\"\\033[1;32m\"\nyellow=\"\\033[0;33m\"\nbyellow=\"\\033[1;33m\"\nblue=\"\\033[0;34m\"\nbblue=\"\\033[1;34m\"\npurple=\"\\033[0;35m\"\nbpurple=\"\\033[1;35m\"\ncyan=\"\\033[0;36m\"\nbcyan=\"\\033[1;36m\"\nwhite=\"\\033[0;37m\"\nnc=\"\\033[00m\"\n\nversion=\"2.1\"\n\nask = f\"{green}[{white}?{green}] {yellow}\"\nsuccess = f\"{yellow}[{white}√{yellow}] {green}\"\nerror = f\"{blue}[{white}!{blue}] {red}\"\ninfo = f\"{yellow}[{white}+{yellow}] {cyan}\"\ninfo2 = f\"{green}[{white}•{green}] {purple}\"\n\nspamx_logo = f'''\n{bred}┏━━━┓━━━━━━━━━━━━━━━┏┓━┏┓━\n{bblue}┃┏━┓┃━━━━━━━━━━━━━━━┃┃━┃┃━\n{yellow}┃┗━┗┛┏━━┓┏━━┓━┏━━━━┓┗━━━┛━\n{bpurple}┏┓━┓┃┃┏┓┃┃┏┓┃━┃┏┓┏┓┃┏━━━┓━\n{bpurple}┃┗━┛┃┃┗┛┃┃┗━┗┓┃┃┃┃┃┃┃┃━┃┃━\n{byellow}┗━━━┛┃━━┛┗━━━┛┗┛┗┛┗┛┗┛━┗┛━\n{bblue}━━━━━┃┃━━━━━━━━━━━━━━━━━━━\n{bred}━━━━━┗┛━━━━━━━━━━━━━━━━━━━\n'''\n\ndef sprint(text, second):\n for line in text + '\\n':\n stdout.write(line)\n stdout.flush()\n time.sleep(second)\n\n# Clear the screen and show logo\ndef clear(fast=False):\n os.system(\"clear\")\n if fast:\n print(spamx_logo)\n else:\n sprint(spamx_logo, 0.01)\n\nevn_vars = \"\"\"\nAPI_ID=\nAPI_HASH=\nOWNER_ID=\nSUDO_USERS=\nALIVE_PIC=\nALIVE_MSG=\nPING_MSG=\nLOGS_CHANNEL=\nDATABASE_URL=\nCLIENT=\nCLIENT2=\nCLIENT3=\nCLIENT4=\nCLIENT5=\nCLIENT6=\nCLIENT7=\nCLIENT8=\nCLIENT9=\nCLIENT10=\nCLIENT11=\nCLIENT12=\nCLIENT13=\nCLIENT14=\nCLIENT15=\nCLIENT16=\nCLIENT17=\nCLIENT18=\nCLIENT19=\nCLIENT20=\nHNDLR=\n\"\"\"\n\ndef SpamX_Setup():\n os.system(\"pip3 install python-dotenv[cli]\")\n clear()\n print(f' {white}SpamX Version: v0.5 \\n {white}By RiZoeX')\n \n time.sleep(2)\n api_id = input(f\"{ask}Enter API_ID: \")\n if api_id:\n print(f\"{bcyan}Got it! Fill next value\")\n os.system(f\"dotenv set API_ID {api_id}\")\n else:\n print(f\"{error}You have to fill this variable! all process restarting..\")\n time.sleep(2)\n SpamX_Setup()\n api_hash = input(f\"\\n{ask}Enter API_HASH: \")\n if api_hash:\n print(f\"{bcyan}Got it! Fill next value\")\n os.system(f\"dotenv set API_HASH {api_hash}\")\n else:\n print(f\"{error}You have to fill this variable! all process restarting..\")\n time.sleep(2)\n SpamX_Setup()\n ALIVE_PIC = input(f\"\\n{ask}Enter ALIVE_PIC (Telegraph link) or press enter!: \")\n if ALIVE_PIC:\n print(f\"{bcyan}Got it! Fill next value\")\n os.system(f\"dotenv set ALIVE_PIC {ALIVE_PIC}\")\n ALIVE_MSG = input(f\"\\n{ask}Enter ALIVE_MSG or press enter: \").replace(\" \", \"\\ \")\n if ALIVE_MSG:\n print(f\"{bcyan}Got it! Fill next value\")\n os.system(f\"dotenv set ALIVE_MSG {ALIVE_MSG}\")\n PING_MSG = input(f\"\\n{ask}Enter PING_MSG or press enter: \").replace(\" \", \"\\ \")\n if PING_MSG:\n print(f\"{bcyan}Got it! Fill next value\")\n os.system(f\"dotenv set PING_MSG {PING_MSG}\")\n LOGS_CHANNEL = input(f\"\\n{ask}Enter Chat ID or Username of LOGS_CHANNEL or press enter: \")\n if LOGS_CHANNEL:\n print(f\"{bcyan}Got it! Fill next value\")\n os.system(f\"dotenv set LOGS_CHANNEL {LOGS_CHANNEL}\")\n owner_id = input(f\"\\n{ask}Enter OWNER_ID: \")\n if owner_id:\n print(f\"{bcyan}Got it! Fill next value\")\n os.system(f\"dotenv set OWNER_ID {owner_id}\")\n else:\n print(f\"{error}You have to fill this variable! all process restarting..\")\n time.sleep(2)\n SpamX_Setup()\n sudo_users = input(f\"\\n{ask}Enter SUDO_USERS (space by space) or press enter: \").replace(\" \", \"\\ \")\n if sudo_users:\n print(f\"{bcyan}Got it! Fill next value\")\n os.system(f\"dotenv set SUDO_USERS {sudo_users}\")\n cmd_hndlr = input(f\"\\n{ask}Enter HNDLR or press enter: \")\n if cmd_hndlr:\n print(f\"{bcyan}Got it! Fill next value\")\n os.system(f\"dotenv set HNDLR {cmd_hndlr}\")\n CLIENT = input(f\"\\n{ask}Enter session or bot token of CLIENT: \")\n if CLIENT:\n print(f\"{bcyan}Got it! Fill next value\")\n os.system(f\"dotenv set CLIENT {CLIENT}\")\n else:\n print(f\"{error}You have to fill this variable! all process restarting..\")\n time.sleep(2)\n SpamX_Setup()\n CLIENT2 = input(f\"\\n{ask}Enter session or bot token of CLIENT2 or press enter: \")\n if CLIENT2:\n print(f\"{bcyan}Got it! Fill next value\")\n os.system(f\"dotenv set CLIENT2 {CLIENT2}\")\n CLIENT3 = input(f\"\\n{ask}Enter session or bot token of CLIENT3 or press enter: \")\n if CLIENT3:\n print(f\"{bcyan}Got it! Fill next value\")\n os.system(f\"dotenv set CLIENT3 {CLIENT3}\")\n CLIENT4 = input(f\"\\n{ask}Enter session or bot token of CLIENT4 or press enter: \")\n if CLIENT4:\n print(f\"{bcyan}Got it! Fill next value\")\n os.system(f\"dotenv set CLIENT4 {CLIENT4}\")\n CLIENT5 = input(f\"\\n{ask}Enter session or bot token of CLIENT5 or press enter: \")\n if CLIENT5:\n print(f\"{bcyan}Got it! Fill next value\")\n os.system(f\"dotenv set CLIENT5 {CLIENT5}\")\n CLIENT6 = input(f\"\\n{ask}Enter session or bot token of CLIENT6 or press enter: \")\n if CLIENT6:\n print(f\"{bcyan}Got it! Fill next value\")\n os.system(f\"dotenv set CLIENT6 {CLIENT6}\")\n CLIENT7 = input(f\"\\n{ask}Enter session or bot token of CLIENT7 or press enter: \")\n if CLIENT7:\n print(f\"{bcyan}Got it! Fill next value\")\n os.system(f\"dotenv set CLIENT7 {CLIENT7}\")\n CLIENT8 = input(f\"\\n{ask}Enter session or bot token of CLIENT8 or press enter: \")\n if CLIENT8:\n print(f\"{bcyan}Got it! Fill next value\")\n os.system(f\"dotenv set CLIENT8 {CLIENT8}\")\n CLIENT9 = input(f\"\\n{ask}Enter session or bot token of CLIENT9 or press enter: \")\n if CLIENT9:\n print(f\"{bcyan}Got it! Fill next value\")\n os.system(f\"dotenv set CLIENT9 {CLIENT9}\")\n CLIENT10 = input(f\"\\n{ask}Enter session or bot token of CLIENT10 or press enter: \")\n if CLIENT10:\n print(f\"{bcyan}Got it! Fill next value\")\n os.system(f\"dotenv set CLIENT10 {CLIENT10}\")\n CLIENT11 = input(f\"\\n{ask}Enter session or bot token of CLIENT11 or press enter: \")\n if CLIENT11:\n print(f\"{bcyan}Got it! Fill next value\")\n os.system(f\"dotenv set CLIENT11 {CLIENT11}\")\n CLIENT12 = input(f\"\\n{ask}Enter session or bot token of CLIENT12 or press enter: \")\n if CLIENT12:\n print(f\"{bcyan}Got it! Fill next value\")\n os.system(f\"dotenv set CLIENT12 {CLIENT12}\")\n CLIENT13 = input(f\"\\n{ask}Enter session or bot token of CLIENT13 or press enter: \")\n if CLIENT13:\n print(f\"{bcyan}Got it! Fill next value\")\n os.system(f\"dotenv set CLIENT13 {CLIENT13}\")\n CLIENT14 = input(f\"\\n{ask}Enter session or bot token of CLIENT14 or press enter: \")\n if CLIENT14:\n print(f\"{bcyan}Got it! Fill next value\")\n os.system(f\"dotenv set CLIENT14 {CLIENT14}\")\n CLIENT15 = input(f\"\\n{ask}Enter session or bot token of CLIENT15 or press enter: \")\n if CLIENT15:\n print(f\"{bcyan}Got it! Fill next value\")\n os.system(f\"dotenv set CLIENT15 {CLIENT15}\")\n CLIENT16 = input(f\"\\n{ask}Enter session or bot token of CLIENT16 or press enter: \")\n if CLIENT16:\n print(f\"{bcyan}Got it! Fill next value\")\n os.system(f\"dotenv set CLIENT16 {CLIENT16}\")\n CLIENT17 = input(f\"\\n{ask}Enter session or bot token of CLIENT17 or press enter: \")\n if CLIENT17:\n print(f\"{bcyan}Got it! Fill next value\")\n os.system(f\"dotenv set CLIENT17 {CLIENT17}\")\n CLIENT18 = input(f\"\\n{ask}Enter session or bot token of CLIENT18 or press enter: \")\n if CLIENT18:\n print(f\"{bcyan}Got it! Fill next value\")\n os.system(f\"dotenv set CLIENT18 {CLIENT18}\")\n CLIENT19 = input(f\"\\n{ask}Enter session or bot token of CLIENT19 or press enter: \")\n if CLIENT19:\n print(f\"{bcyan}Got it! Fill next value\")\n os.system(f\"dotenv set CLIENT19 {CLIENT19}\")\n CLIENT20 = input(f\"\\n{ask}Enter session or bot token of CLIENT20 or press enter: \")\n if CLIENT20:\n print(f\"{bcyan}Got it! Fill next value\")\n os.system(f\"dotenv set CLIENT20 {CLIENT20}\")\n database_url = input(f\"\\n{ask}Enter Postgres database url or press enter: \")\n if database_url:\n if 'postgresql' in database_url or 'postgres' in database_url:\n print(f\"{bcyan}Got it!\")\n os.system(f\"dotenv set DATABASE_URL {database_url}\")\n else:\n print(f\"{error}Need Postgres database url, fill DATABASE_URL manually\")\n recheck()\n\ndef recheck():\n Recheck = input(f\"\\n{ask}Filled ALL Vars Correctly?: y/n: \")\n if Recheck.lower() == \"n\":\n os.system(\"clear\")\n print(f\"{info}Okay! Fill Your Vars Again\")\n SpamX_Setup()\n elif Recheck.lower() == \"y\":\n \n get_start()\n else:\n print(f\"\\n{ask}Input Must Be Y or N\")\n recheck()\n\ndef get_start():\n clear(fast=True)\n question = input(f\"{ask}Wanna start SpamX Now?: y/n: \")\n if question.lower() == \"y\":\n os.system(\"pip3 install python-dotenv\")\n os.system(\"python3 -m SpamX\")\n elif question.lower() == \"n\":\n print(f\"\\n{info}Nevermind !! You Can Start It Later With by using; python3-m SpamX\\n\")\n exit(2)\n else:\n os.system(\"clear\")\n print(f\"{error}\\nInput Must Be y or n\")\n get_start()\n","repo_name":"RiZoeLX/SpamX","sub_path":"resources/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":9715,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"19"}
+{"seq_id":"2874859852","text":"import logging\nLOG_LEVEL = 'INFO'\nLOG_FORMAT = '%(asctime)s %(name)-12s %(levelname)-8s %(message)s'\nlogging.basicConfig(\n format=LOG_FORMAT,\n level=getattr(logging, LOG_LEVEL)\n)\n\nimport os\nimport time\n\nimport numpy as np\nimport torch\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\n\nimport model as Network\nimport PS1\nimport utils\nfrom anchor_loss import AnchorLoss\nfrom checkpoint import Checkpoint\nfrom optim import Optimizer\n\n\ndef get_dataset_config(batch_size):\n dparams = {\n 'batch_size': batch_size,\n 'shuffle': True,\n 'num_workers': 2}\n evdparams = {\n 'batch_size': batch_size,\n 'shuffle': False,\n 'num_workers': 2}\n\n return dparams, evdparams\n\n\ndef prepare_db(opt):\n db = {}\n dparams, evdparams = get_dataset_config(opt.batch_size)\n\n if opt.train:\n train_set = PS1.Dataset('train', opt)\n val_set = PS1.Dataset('val', opt)\n\n train_loader = torch.utils.data.DataLoader(train_set, **dparams)\n val_loader = torch.utils.data.DataLoader(val_set, **evdparams)\n\n db['train'] = train_loader\n db['val'] = val_loader\n elif opt.infer:\n infer_set = PS1.Dataset('infer', opt)\n infer_loader = torch.utils.data.DataLoader(infer_set, **evdparams)\n db['infer'] = infer_loader\n else:\n test_set = PS1.Dataset('test', opt)\n\n test_loader = torch.utils.data.DataLoader(test_set, **evdparams)\n\n db['test'] = test_loader\n\n return db\n\n\ndef prepare_loss(opt):\n opt.criterion = AnchorLoss(gamma=opt.gamma)\n\n return opt\n\n\ndef prepare_model(opt):\n opt.ninp = 17\n model = Network.MBRNN(opt)\n if not opt.train:\n model.eval()\n\n return model\n\n\ndef prepare_optim(model):\n # setting optimizer\n optimizer = Optimizer(\n torch.optim.Adam(\n model.parameters(),\n lr=0.0008,\n betas=(0.5, 0.999),\n weight_decay=5e-5),\n max_grad_norm=5)\n # setting scheduler of optimizer for learning rate decay.\n scheduler = ReduceLROnPlateau(\n optimizer.optimizer,\n patience=5,\n factor=0.5,\n min_lr=0.000001)\n optimizer.set_scheduler(scheduler)\n\n return optimizer\n\n\ndef loss_dictionaries(opt):\n ldic = {}\n # *_loss_every: [avg_*_loss2]\n ldic['tloss_pevery'] = []\n ldic['vloss_pevery'] = []\n # tot_*_group_loss: tot_*_print_loss\n ldic['tgloss'] = np.array([0.])\n ldic['vgloss'] = np.array([0.])\n # tot_*_epoch_loss: [tot_*_epoch_loss, count_for_average]\n ldic['teloss'] = [0., 0.]\n ldic['veloss'] = [0., 0.]\n\n return ldic\n\n\ndef train(db, model, optim, opt):\n logging.info(\"Train started\")\n\n train_generator, val_generator = db['train'], db['val']\n train_set = train_generator.dataset\n\n model.train(True)\n model.to(opt.device)\n\n step = 0 # The number of backpropagation\n stime = time.time() # start time\n val_min_tloss = np.inf # minimum validation loss placeholder\n ldic = loss_dictionaries(opt)\n max_training_epoch = opt.max_training_epoch\n batch_max_epoch = int(len(train_set)/opt.batch_size) + 1\n for epoch in range(max_training_epoch):\n for Be, (local_batch, local_zbin) in enumerate(train_generator):\n local_batch = local_batch.to(opt.device)\n local_zbin = local_zbin.to(opt.device)\n\n optim.zero_grad()\n out_probs = model(local_batch)\n\n loss = opt.criterion(out_probs, local_zbin)\n\n # update loss and its denominator\n ldic['teloss'][0] += loss\n ldic['teloss'][1] += 1\n ldic['tgloss'][0] += loss.item()\n\n loss.backward()\n optim.step()\n\n if step != 0 and step % opt.pevery == 0:\n mean_tgloss = ldic['tgloss'][0] / opt.pevery\n ldic['tloss_pevery'].append(mean_tgloss)\n ldic['tgloss'] = np.array([0.])\n\n # for log messages\n prog = float(epoch)/(max_training_epoch)*100\n for param in optim.param_groups():\n lr = param['lr'] # learning rate.\n\n # log messages\n log_msg = \"Step: %d/%d, \" % (Be, batch_max_epoch)\n log_msg += \"Progress %d%%, \" % prog\n log_msg += \"cls loss: %.5f, \" % mean_tgloss\n log_msg += \"learning rate: %.6f\" % lr\n logging.info(log_msg)\n\n if step != 0 and step % opt.vevery == 0:\n model.eval()\n with torch.set_grad_enabled(False):\n val_step, val_correct = 0, 0\n for local_batch, local_zbin in val_generator:\n local_batch = local_batch.to(opt.device)\n local_zbin = local_zbin.to(opt.device)\n\n out_probs = model(local_batch)\n\n val_loss = opt.criterion(out_probs, local_zbin)\n\n # get the index of the maximum probability\n val_pred = out_probs.data.max(1, keepdim=True)[1]\n val_correct += val_pred.eq(\n local_zbin.data.view_as(val_pred)).cpu().sum()\n\n # update total validation loss\n # and its denominator\n ldic['veloss'][0] += val_loss\n ldic['veloss'][1] += 1\n ldic['vgloss'][0] += val_loss.item()\n\n val_step += 1\n\n avg_vgloss = ldic['vgloss'][0]/val_step\n ldic['vloss_pevery'].append(avg_vgloss)\n ldic['vgloss'] = np.array([0.])\n\n if avg_vgloss < val_min_tloss:\n val_min_tloss = avg_vgloss\n checkpoint = Checkpoint(\n step, epoch, model, optim, opt=opt)\n checkpoint.save()\n\n # for validation log message\n vdat_len = len(val_generator.dataset)\n val_acc = float(val_correct)/vdat_len\n\n log_msg = \"Validation set accuracy: %i/%i (%.6f)\\n\" % \\\n (val_correct, vdat_len, val_acc)\n log_msg += \"current validation cls loss: %.5f, \" % \\\n avg_vgloss\n log_msg += \"current minimum loss: %.5f\\n\" % val_min_tloss\n logging.info(log_msg)\n model.train(True)\n step += 1\n\n # averaged epoch losses for training and validation\n avg_teloss = ldic['teloss'][0]/ldic['teloss'][1]\n avg_veloss = ldic['veloss'][0]/ldic['veloss'][1]\n # initialize total epoch loss dictionries\n ldic['teloss'] = [0., 0.]\n ldic['veloss'] = [0., 0.]\n\n if epoch >= opt.lr_decay_epoch:\n optim.update(avg_vgloss, epoch)\n\n log_msg = \"Finished epoch %d, \" % epoch\n log_msg += \"train loss: %.5f, \" % avg_teloss\n log_msg += \"validation loss: %.5f, \" % avg_veloss\n logging.info(log_msg)\n\n etime = time.time() # end time\n dur = etime - stime # training time\n logging.info(\"Training is done. Took %.3fh\" % (dur/3600.))\n\n\ndef test(db, model, opt):\n logging.info(\"Test started\")\n\n # model setting\n model = set_loaded_model(model, opt=opt)\n model.eval()\n\n test_generator = db['test']\n\n test_set = test_generator.dataset\n binc = test_set.binc.to(opt.device)\n dlen = len(test_set)\n test_correct = 0\n\n probs_placeholder = torch.empty(dlen, opt.ncls).to(opt.device)\n zphot_placeholder = torch.empty(dlen).to(opt.device)\n with torch.no_grad():\n for bepoch, (local_batch, local_zbin) in enumerate(test_generator):\n local_batch = local_batch.to(opt.device)\n local_zbin = local_zbin.to(opt.device)\n\n # input into model\n out_probs = model(local_batch)\n\n # get the index of the maximum log-probability\n pred = out_probs.data.max(1, keepdim=True)[1]\n correct_mask = pred.eq(local_zbin.data.view_as(pred))\n test_correct += correct_mask.cpu().sum()\n\n zphot = torch.sum(out_probs*binc, dim=1).view(-1)\n\n sidx = bepoch*opt.batch_size\n eidx = sidx+opt.batch_size\n\n probs_placeholder[sidx:eidx] = out_probs\n zphot_placeholder[sidx:eidx] = zphot\n\n tdat_len = len(test_generator.dataset)\n test_acc = float(test_correct)/tdat_len\n log_msg = \"Test accuracy: %i/%i (%.6f)\\n\" % \\\n (test_correct, tdat_len, test_acc)\n logging.info(log_msg)\n\n probs = probs_placeholder.cpu().detach().numpy()\n zphot = zphot_placeholder.cpu().detach().numpy()\n outputs = np.hstack((probs, zphot.reshape(-1, 1)))\n save_results(outputs, opt)\n\n\ndef infer(db, model, opt):\n logging.info(\"Inference started\")\n\n # model setting\n model = set_loaded_model(model, opt=opt)\n model.eval()\n\n test_generator = db['infer']\n\n test_set = test_generator.dataset\n binc = test_set.binc.to(opt.device)\n dlen = len(test_set)\n\n probs_placeholder = torch.empty(dlen, opt.ncls).to(opt.device)\n zphot_placeholder = torch.empty(dlen).to(opt.device)\n zmode_placeholder = torch.empty(dlen).to(opt.device)\n zsig_placeholder = torch.empty(dlen).to(opt.device)\n with torch.no_grad():\n for bepoch, (local_batch, local_zbin) in enumerate(test_generator):\n local_batch = local_batch.to(opt.device)\n local_zbin = local_zbin.to(opt.device)\n\n # input into model\n out_probs = model(local_batch)\n\n # get the index of the maximum log-probability\n pred = out_probs.data.max(1, keepdim=True)[1]\n\n # average redshifts\n zphot = torch.sum(out_probs*binc, dim=1).view(-1)\n\n # mode redshifts\n prob_argmax = torch.argmax(out_probs, dim=1)\n zmode = binc[prob_argmax]\n\n # standard deviation\n zsig = torch.sum(out_probs*(binc-zphot.view(-1, 1))**2., dim=1)\n\n sidx = bepoch*opt.batch_size\n eidx = sidx+opt.batch_size\n\n probs_placeholder[sidx:eidx] = out_probs\n zphot_placeholder[sidx:eidx] = zphot\n zmode_placeholder[sidx:eidx] = zmode\n zsig_placeholder[sidx:eidx] = zsig\n\n probs = probs_placeholder.cpu().detach().numpy()\n zphot = zphot_placeholder.cpu().detach().numpy()\n zmode = zmode_placeholder.cpu().detach().numpy()\n zsig = zsig_placeholder.cpu().detach().numpy()\n outputs = np.hstack((probs,\n zphot.reshape(-1, 1),\n zmode.reshape(-1, 1),\n zsig.reshape(-1, 1)))\n for ind in range(0, len(zphot)):\n print(\"Photo-z: %d %.6f %.6f %.6f\" % (ind+1, zphot[ind], zmode[ind], zsig[ind]))\n save_results(outputs, opt)\n\n\ndef save_results(outputs, opt):\n if not os.path.exists(opt.out_fd):\n os.makedirs(opt.out_fd)\n if opt.infer:\n fn = 'inference_output.npy'\n elif opt.test:\n fn = 'test_output.npy'\n out_fn = os.path.join(opt.out_fd, fn)\n\n np.save(out_fn, outputs)\n logging.info(\"Outputs are saved at %s\" % out_fn)\n\n\ndef set_loaded_model(model, optim=None, opt=None):\n resume_checkpoint = Checkpoint.load(\n model, optim=optim, opt=opt)\n model = resume_checkpoint.model\n model.to(opt.device)\n\n if optim:\n optim = resume_checkpoint.optim\n return model, optim\n else:\n return model\n\n\ndef main():\n opt = utils.Parser()\n\n if torch.cuda.is_available():\n opt.device = torch.device('cuda:%s' % opt.gpuid)\n else:\n logging.warning(\"RUN WITHOUT GPU\")\n opt.device = torch.device('cpu')\n\n db = prepare_db(opt)\n opt = prepare_loss(opt)\n model = prepare_model(opt)\n\n if opt.train:\n optim = prepare_optim(model)\n train(db, model, optim, opt)\n elif opt.infer:\n infer(db, model, opt)\n else:\n test(db, model, opt)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"GooLee0123/MBRNN","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12051,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"}
+{"seq_id":"17433459881","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n\nimport pandas as pd\nimport numpy as np\n\n\n# In[2]:\n\n\nrw = pd.read_excel(r\"C:\\Users\\3363\\Desktop\\Akshay\\Incetive.xlsx\")\nrw.head()\n\n\n# In[3]:\n\n\nrw.fillna(0)\n\n\n# In[4]:\n\n\nrw[\"BRACES\"].replace({1: 300, 2: 1000, 3:1700, 4:2400, 5:3100, 6:3800, 7:4500}, inplace=True)\n\nrw[\"CARDIO/PGX- PULMONARY\"].replace({1: 250, 2: 750, 3:1250}, inplace=True)\n\n#rw[\"DIABETES\"].replace({1: 200, 2: 600, 3:1000}, inplace=True)\n\nrw[\"MED. SUP\"].replace({1: 0, 2: 200, 3:300, 4:400, 5:500,6:600,7:700}, inplace=True)\nrw[\"CANCER\"].replace({1: 250, 2: 500, 3:750}, inplace=True)\n\nrw.fillna(0)\n\nrw['Total Amount'] = rw[\"BRACES\"] + rw[\"CARDIO/PGX- PULMONARY\"] + rw[\"MED. SUP\"] + rw[\"CANCER\"]\nReport = rw\nReport.head()\n\n\n# In[5]:\n\n\nReport.columns\n\n\n# In[6]:\n\n\nReport = Report[['Emp #', 'BRACES', 'CARDIO/PGX- PULMONARY' ,'MED. SUP', 'Total', 'Total Amount']]\n\n\n# In[7]:\n\n\nReport1 = Report[['Emp #' , 'Total Amount']]\nReport1 = Report1[:-1]\n\n\n# In[8]:\n\n\nReport1.to_excel(r\"C:\\Users\\3363\\Desktop\\Akshay\\Incetive Final.xlsx\", index = False)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"Akshay4408/International-Salary-Data-Codes","sub_path":"Incetive Data 2.py","file_name":"Incetive Data 2.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"39316712103","text":"# N の 1 の位が 2,4,5,7,9 のとき hon\n# N の 1 の位が 0,1,6,8 のとき pon\n# N の 1 の位が 3 のとき bon\nhon = [2, 4, 5, 7, 9]\nn = input()\nif n[2] in hon:\n print(\"hon\")\nelif n[2] == \"3\":\n print(\"bon\")\nelse:\n print(\"pon\")\n\nn = input()\nif n[-1] == \"3\":\n print(\"bon\")\nelif n[-1] in \"0168\":\n print(\"pon\")\nelse:\n print(\"hon\")\n\n#別解\n#print('pphbhhphph'[int(input())%10]+'on')","repo_name":"Lotka-Volterra/ABC","sub_path":"ABC151-175/abc168/a/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"4253245435","text":"class Solution:\n def calcEquation(self, equations: List[List[str]], values: List[float], queries: List[List[str]]) -> List[float]:\n res = []\n\n ratios = defaultdict(list)\n for i in range(len(values)):\n equation = equations[i]\n ratios[equation[0]].append((equation[1], values[i]))\n ratios[equation[1]].append((equation[0], 1/values[i]))\n\n def evaluate(curr, target):\n if curr in ratios and target in ratios:\n frontier, visited = [(curr, 1)], set()\n while frontier:\n node = frontier.pop()\n visited.add(node[0])\n if node[0] == target:\n return node[1]\n for x, y in ratios[node[0]]:\n if x not in visited:\n frontier.append((x, node[1] * y))\n return -1.0\n\n for query in queries:\n res.append(evaluate(query[0], query[1]))\n\n return res\n","repo_name":"kapforty/leetcode","sub_path":"python3/399.py","file_name":"399.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"}
+{"seq_id":"39060172353","text":"class Node:\n def __init__(self, data, link):\n self.data = data\n self.link = link\n\ndef addtoLast(data): # 마지막에 데이터 삽입\n global Head\n if Head == None: # 빈 리스트이면\n Head = Node(data, None)\n else:\n p = Head\n while p.link != None: # 마지막 노드 찾을때까지\n p = p.link\n p.link = Node(data, None)\n\ndef delete(pre):\n if pre == None or pre.link == None:\n print('error')\n else:\n pre.link = pre.link.link\n\ndef deleteFirst():\n global Head\n if pre == None:\n print('error')\n else:\n Head = Head.link\n\ndef addtoFirst(data):\n global Head\n Head = Node(data, Head)\n\n\ndef add(pre, data):\n if pre == None:\n print('error')\n else:\n pre.link=Node(data, pre.link)\n\n\nimport sys\nsys.stdin = open('sample_sequence_sum.txt', 'r')\n\nN = int(input())\n\nfor i in range(1, N+1):\n Head = None\n length, nums = map(int, input().split())\n first_list = list(map(int, input().split()))\n for k in range(length):\n addtoLast(first_list[k])\n\n for j in range(nums-1):\n next_list = list(map(int, input().split()))\n p = Head\n if next_list[0] < p.data:\n for k in range(-1, -length-1, -1):\n addtoFirst(next_list[k])\n else:\n while p.link != None:\n if p.link.data <= next_list[0]:\n p = p.link\n q = p.data\n elif p.link.data > next_list[0]:\n break\n for k in range(-1, -length-1, -1):\n add(p, next_list[k])\n u = 0\n result = []\n while Head.link != None:\n result.append(Head.data)\n Head = Head.link\n result.append(Head.data)\n print('#{}'.format(i), end=' ')\n for v in range(-1, -10, -1):\n print(result[v], end=' ')\n print(result[-10])\n","repo_name":"91hongppie/algorithm","sub_path":"linked_list/수열합치기.py","file_name":"수열합치기.py","file_ext":"py","file_size_in_byte":1899,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"}
+{"seq_id":"17591322529","text":"import minimax\r\nfrom quoridor import *\r\n\r\ndef manhattanDistance(xy1, xy2):\r\n \"Returns the Manhattan distance between points xy1 and xy2\"\r\n return abs( xy1[0] - xy2[0] ) + abs( xy1[1] - xy2[1] )\r\n\r\nclass heuristic:\r\n\r\n def __init__(self):\r\n\r\n self._current_state = {}\r\n self._game_state = \"\"\r\n\r\n\r\n def switch_heuristic(dict_heuristic):\r\n\r\n condition = False # For structure to implement\r\n \r\n if (dict_heuristic[\"Step\"] < 3 and dict_heuristic[\"Player1_walls\"] + dict_heuristic[\"Player2_walls\"] == 20): # Early game heuristic\r\n \r\n _game_state = \"EarlyGame\"\r\n print(\"Early move\")\r\n return \"EarlyMove\"\r\n\r\n elif (condition):\r\n \r\n _game_state = \"MonteCarlo\"\r\n print(\"MonteCarlo move\")\r\n return \"MonteCarlo\"\r\n \r\n elif (condition):\r\n \r\n _game_state = \"DifferentState\"\r\n print(\"AnotherTypeOfDecision\")\r\n return \"SomethingToBeImplemented\"\r\n\r\n else: # Minimax algo \r\n\r\n _game_state = \"MiniMax\"\r\n print(\"Minimax move\")\r\n return \"MiniMax\"\r\n\r\n ","repo_name":"Frisoufou/TP1_V1_JF","sub_path":"heuristic_state.py","file_name":"heuristic_state.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"24857771276","text":"#!/usr/bin/env python\n\nimport argparse\nimport os\nimport time\nfrom pprint import pprint\n\nimport googleapiclient.discovery\nimport google.auth\nimport google.oauth2.service_account as service_account\n\n#\n# Use Google Service Account - See https://google-auth.readthedocs.io/en/latest/reference/google.oauth2.service_account.html#module-google.oauth2.service_account\n#\n\nBUCKET = 'sneethi'\nZONE = 'us-west1-b'\nINSTANCE_NAME = 'part3vm1'\n\ncredentials = service_account.Credentials.from_service_account_file(filename='service-credentials.json')\nproject = os.getenv('datacenterlab2') or 'datacenterlab2'\nservice = googleapiclient.discovery.build('compute', 'v1', credentials=credentials)\n\n# [START create_instance]\ndef create_instance(compute, project, zone, name, bucket):\n image_response = compute.images().getFromFamily(\n project='ubuntu-os-cloud', family='ubuntu-1804-lts').execute()\n source_disk_image = image_response['selfLink']\n\n # Configure the machine\n machine_type = \"zones/%s/machineTypes/f1-micro\" % ZONE\n\n startup_script = open(\n os.path.join(\n os.path.dirname(__file__), 'startup-script-vm1.sh'), 'r').read()\n\n with open( os.path.join(os.path.dirname(__file__), 'startup-script-vm2.sh')) as f:\n \tstartup_script_vm2 = \"\\n\".join(f.readlines())\n \t\n with open( os.path.join(os.path.dirname(__file__), 'part3B.py')) as f:\n \tpython_script_3B = \"\\n\".join(f.readlines())\n\n with open(os.path.join(os.path.dirname(__file__), 'service-credentials.json')) as f:\n \tservice_account_json = \"\\n\".join(f.readlines())\n\n config = {\n 'name': name,\n 'machineType': machine_type,\n\n # Specify the boot disk and the image to use as a source.\n 'disks': [\n {\n 'boot': True,\n 'autoDelete': True,\n 'initializeParams': {\n 'sourceImage': source_disk_image,\n }\n }\n ],\n\n # Specify a network interface with NAT to access the public\n # internet.\n 'networkInterfaces': [{\n 'network': 'global/networks/default',\n 'accessConfigs': [\n {'type': 'ONE_TO_ONE_NAT', 'name': 'External NAT'}\n ]\n }],\n\n # Allow the instance to access cloud storage and logging.\n 'serviceAccounts': [{\n 'email': 'default',\n 'scopes': [\n 'https://www.googleapis.com/auth/devstorage.read_write',\n 'https://www.googleapis.com/auth/logging.write'\n ]\n }],\n\n\t\t# Metadata is readable from the instance and allows you to\n # pass configuration from deployment scripts to instances.\n 'metadata': {\n 'items': [{\n # Startup script is automatically executed by the\n # instance upon startup.\n 'key': 'startup-script',\n 'value': startup_script\n },\n {\n 'key': 'startupscript',\n 'value': startup_script_vm2\n },\n {\n 'key': 'pythonscript',\n 'value': python_script_3B\n },\n {\n 'key': 'serviceaccountjson',\n 'value': service_account_json\n }]\n }\n }\n\n\n return compute.instances().insert(\n project=project,\n zone=zone,\n body=config).execute()\n# [END create_instance]\n\n# [START wait_for_operation]\ndef wait_for_operation(compute, project, zone, operation):\n print('Waiting for operation to finish...')\n while True:\n result = compute.zoneOperations().get(\n project=project,\n zone=zone,\n operation=operation).execute()\n\n if result['status'] == 'DONE':\n print(\"done.\")\n if 'error' in result:\n raise Exception(result['error'])\n return result\n\n time.sleep(1)\n# [END wait_for_operation]\n\noperation = create_instance(service, project, ZONE, INSTANCE_NAME, BUCKET)\nwait_for_operation(service, project, ZONE, operation['name'])","repo_name":"shettyneethi/DataCenter","sub_path":"programmable-cloud/part3/part3.py","file_name":"part3.py","file_ext":"py","file_size_in_byte":4088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"11410641344","text":"from random import seed\r\nfrom random import randint\r\n\r\nseed(1)\r\n\r\n#give me base, I give you earning\r\ndef roulette_payout(base):\r\n if(randint(0,2)):\r\n return (2/3)*base\r\n else:\r\n return -base\r\n\r\n#returns how much is in pocket\r\ndef roulette_simulator(start, rolls, reroll_ratio, pullout_ratio):\r\n no_losing = 1\r\n pocket = start\r\n for r in range(rolls):\r\n pocket += roulette_payout(pocket * reroll_ratio)\r\n if no_losing: #cut your losses if you fall below a certain amount\r\n if pocket < (start *pullout_ratio):\r\n return pocket\r\n\r\n return int(pocket)\r\n\r\n\r\n#Start the simulation here...\r\nvisit = 10 #visit the casino 10 times0\r\nlifetime_earnings = 0\r\nfor i in range(visit):\r\n total_earning = 0\r\n starting_money = 1000\r\n roll_again = 100\r\n reroll_ratio_main = 1/3\r\n pullout_ratio_main = .2\r\n earning = roulette_simulator(starting_money, roll_again, reroll_ratio_main, pullout_ratio_main) - starting_money\r\n lifetime_earnings += earning\r\n\r\n print(\"Total earning for visit #\", i ,\" is:\", earning)\r\n\r\nprint(\"Lifetime earning: \", lifetime_earnings)\r\n","repo_name":"deanthebeandip/coding_fun","sub_path":"roulette_hacker.py","file_name":"roulette_hacker.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"74453894444","text":"import pandas as pd\n\n# making lists out of the provided data using pandas\ndata = pd.read_csv('MORSE.csv')\nletter_list = data['LETTER'].to_list()\nmorse_list = data['CODE'].to_list()\n\n# user input\ninp_str = input(\"Enter a String\\n\")\noutput_list=[]\nfor char in inp_str:\n index_in_char_list = letter_list.index(char)\n morse_char = morse_list[index_in_char_list]\n output_list.append(morse_char)\n\nmorse_code = \"\".join(output_list)\nprint(f'The Morse Code Generated is {morse_code}')","repo_name":"lucifer78907/Text_To_Morse_code","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"}
+{"seq_id":"74495352362","text":"import re\nimport os\nimport datetime\nimport json\nimport logging\nfrom Types import *\nfrom typing import List\n\n\nclass InterpretedStatementExtractor:\n\n def __init__(self, raw_entries : List[RawEntry]):\n self.__raw_entries : List[RawEntry] = raw_entries\n\n self.__interpreted_entries : List[InterpretedEntry] = []\n self.__init_interpreted_entries()\n\n self.__tag_patterns : List[TagPattern] = []\n \n def load_tag_patterns(self, config_json : str):\n config_json_path = os.path.normpath(config_json)\n if not os.path.isfile(config_json_path):\n logging.warning(f\"File {config_json_path} does not exist\")\n else:\n with open(config_json_path, mode=\"r\") as f:\n tag_patterns_json = json.load(f)\n if not isinstance(tag_patterns_json, List):\n logging.warning(f\"Expecting a list on first level inside {config_json_path}\")\n for tag_pattern in tag_patterns_json:\n self.__tag_patterns.append(\n TagPattern(\n pattern=tag_pattern[\"pattern\"], \n tag=Tag[tag_pattern[\"tag\"]] ) )\n\n def run(self):\n self.__extract_amount()\n self.__extract_date()\n self.__extract_tags()\n self.__add_undefined_tag_for_entries_without_tags()\n\n def get_interpreted_entries(self):\n return self.__interpreted_entries\n\n def __init_interpreted_entries(self):\n self.__interpreted_entries = [InterpretedEntry(date = None, amount = 0.0, tags = [], raw = raw_entry) for raw_entry in self.__raw_entries]\n\n def __extract_amount(self):\n for i, raw_entry in enumerate(self.__raw_entries):\n match = re.fullmatch(\"([\\d\\.]+),(\\d{2}) ([HS])\", raw_entry.amount)\n if match:\n before_comma : str = re.sub(\"\\.\", \"\", match.group(1))\n after_comma : str = match.group(2)\n plus_minus : str = match.group(3)\n\n self.__interpreted_entries[i].amount = float(int(before_comma))\n self.__interpreted_entries[i].amount += int(after_comma) / 100.0\n self.__interpreted_entries[i].amount *= -1 if plus_minus == \"S\" else +1\n \n def __extract_date(self):\n for i, raw_entry in enumerate(self.__raw_entries):\n match = re.fullmatch(\"(\\d{2})\\.(\\d{2})\\. \\d{2}\\.\\d{2}\\.(\\d{4})\", raw_entry.date)\n if match:\n day = int(match.group(1))\n month = int(match.group(2))\n year = int(match.group(3))\n self.__interpreted_entries[i].date = datetime.date(year, month, day)\n\n def __extract_tags(self):\n for entry in self.__interpreted_entries:\n for tag_pattern in self.__tag_patterns:\n match = re.search(tag_pattern.pattern, entry.raw.comment)\n if match:\n entry.tags.append(tag_pattern.tag)\n\n def __add_undefined_tag_for_entries_without_tags(self):\n for entry in self.__interpreted_entries:\n if len(entry.tags) == 0:\n entry.tags.append(Tag.UNDEFINED)","repo_name":"Bardeteleon/FinancialAnalysis","sub_path":"InterpretedStatementExtractor.py","file_name":"InterpretedStatementExtractor.py","file_ext":"py","file_size_in_byte":3153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"25809345164","text":"#!/usr/bin/env python\n\n\"\"\" Duke Simulation for NCEAS FACE experiment\n\nSite History:\n-------------\n* Pre-1850 temperate broadleaved deciduous forest\n* Harvest forest.\n* Grassland from 1850-1982, mowed (+/- annually - DO NOT SIMULATE MOWING :P)\n* burnt prior to planting\n* At start of experiment aboveground biomass 5.5-11 kg C m-2\n* Needleaf forest planted in 1983\n\nSpin-up the model to a steady state. Recycle the met data in batches of a\n50 years (over and over), until the SOM, plant and litter C pools cease to\nchange.\n\n-> Spinup with forest params, fixed NDEP, fixed CO2 .\n-> Vary NDEP/CO2 for about 200 odd yrs...using grassland params so that we get\n through the industrial to the 1980s period.\n\"\"\"\n\nimport os\nimport shutil\nimport sys\nimport subprocess\nimport numpy as np\n\nUSER = os.getlogin()\nsys.path.append('/Users/%s/src/c/gday/scripts' % (USER))\nimport adjust_gday_param_file as ad\n\n__author__ = \"Martin De Kauwe\"\n__version__ = \"1.0 (14.12.2014)\"\n__email__ = \"mdekauwe@gmail.com\"\n\n\ndef main(experiment_id, site, SPIN_UP=True, POST_INDUST=True, SPIN_UP_SIMS=True):\n\n GDAY_SPIN = \"gday -s -p \"\n GDAY = \"gday -p \"\n\n base_dir = os.path.dirname(os.getcwd())\n\n # dir names\n base_dir = \"/Users/%s/src/c/gday/example/params\" % (USER)\n param_dir = \"params\"\n met_dir = os.path.join(base_dir, \"met_data\")\n run_dir = os.path.join(base_dir, \"outputs\")\n\n\n if SPIN_UP == True:\n\n # copy base files to make two new experiment files\n shutil.copy(os.path.join(base_param_dir, base_param_name + \".cfg\"),\n os.path.join(param_dir, \"%s_%s_model_spinup.cfg\" % \\\n (experiment_id, site)))\n\n # Run model to equilibrium assuming forest, growing C pools from effectively\n # zero\n itag = \"%s_%s_model_spinup\" % (experiment_id, site)\n otag = \"%s_%s_model_spunup\" % (experiment_id, site)\n mtag = \"%s_met_data_equilibrium_50_yrs.csv\" % (site)\n out_fn = itag + \"_equilib.out\"\n out_param_fname = os.path.join(param_dir, otag + \".cfg\")\n cfg_fname = os.path.join(param_dir, itag + \".cfg\")\n met_fname = os.path.join(met_dir, mtag)\n out_fname = os.path.join(run_dir, out_fn)\n\n replace_dict = {\n\n # files\n \"out_param_fname\": \"%s\" % (out_param_fname),\n \"cfg_fname\": \"%s\" % (cfg_fname),\n \"met_fname\": \"%s\" % (met_fname),\n \"out_fname\": \"%s\" % (out_fname),\n\n # state - default C:N 25.\n \"age\": \"0.0\",\n \"canht\": \"17.0\", # Canopy height increased from 16m in 2001 to 18m in 2004 at Duke\n \"activesoil\": \"0.001\",\n \"activesoiln\": \"0.00004\",\n \"age\": \"0.0\",\n \"branch\": \"0.001\",\n \"branchn\": \"0.00004\",\n \"cstore\": \"0.001\",\n \"inorgn\": \"0.00004\",\n \"metabsoil\": \"0.0\",\n \"metabsoiln\": \"0.0\",\n \"metabsurf\": \"0.0\",\n \"metabsurfn\": \"0.0\",\n \"nstore\": \"0.00004\",\n \"passivesoil\": \"0.001\",\n \"passivesoiln\": \"0.0004\",\n \"prev_sma\": \"-999.9\",\n \"root\": \"0.001\",\n \"root_depth\": \"-9999.9\",\n \"rootn\": \"0.00004\",\n \"sapwood\": \"0.001\",\n \"shoot\": \"0.001\",\n \"shootn\": \"0.00004\",\n \"slowsoil\": \"0.001\",\n \"slowsoiln\": \"0.00004\",\n \"stem\": \"0.001\",\n \"stemn\": \"0.00004\",\n \"stemnimm\": \"0.00004\",\n \"stemnmob\": \"0.0\",\n \"structsoil\": \"0.001\",\n \"structsoiln\": \"0.00004\",\n \"structsurf\": \"0.001\",\n \"structsurfn\": \"0.00004\",\n \"croot\": \"0.0\", # don't simulate coarse roots\n \"crootn\": \"0.0\", # don't simulate coarse roots\n\n # parameters\n \"latitude\": \"35.9\",\n \"intercep_frac\": \"0.15\",\n \"max_intercep_lai\": \"3.0\",\n \"albedo\": \"0.123\", # modis site avg\n \"finesoil\": \"0.51\", # set based on silt+clay fractions of topsoil 0.42+0.09=0.5\n \"slamax\": \"4.4\", # Protocol [m2 kg-1 DW]\n \"sla\": \"4.4\", # Protocol [m2 kg-1 DW]\n \"slazero\": \"4.4\", # Protocol [m2 kg-1 DW]\n \"cfracts\": \"0.5\",\n \"lai_closed\": \"0.5\", # I am effectively turning this feature off by setting it so low\n\n #\"c_alloc_fmax\": \"0.25\",\n #\"c_alloc_fmin\": \"0.25\",\n #\"c_alloc_rmax\": \"0.05\",\n #\"c_alloc_rmin\": \"0.05\",\n #\"c_alloc_bmax\": \"0.2\",\n #\"c_alloc_bmin\": \"0.2\",\n\n #\"c_alloc_fmax\": \"0.3\",\n #\"c_alloc_fmin\": \"0.3\",\n #\"c_alloc_rmax\": \"0.3\",\n #\"c_alloc_rmin\": \"0.3\",\n #\"c_alloc_bmax\": \"0.2\",\n #\"c_alloc_bmin\": \"0.2\",\n #\"c_alloc_cmax\": \"0.0\", # turn off coarse roots!\n\n \"c_alloc_fmax\": \"0.35\",\n \"c_alloc_fmin\": \"0.15\",\n \"c_alloc_rmax\": \"0.35\",\n \"c_alloc_rmin\": \"0.05\",\n \"c_alloc_bmax\": \"0.1\",\n \"c_alloc_bmin\": \"0.1\",\n \"c_alloc_cmax\": \"0.0\", # turn off coarse roots!\n\n\n \"fretrans\": \"0.5\",\n \"rretrans\": \"0.0\",\n \"bretrans\": \"0.0\",\n \"wretrans\": \"0.0\",\n \"ncwnewz\": \"0.003\",\n \"ncwnew\": \"0.003\",\n \"ncwimmz\": \"0.003\",\n \"ncwimm\": \"0.003\",\n \"ncbnewz\": \"0.003\",\n \"ncbnew\": \"0.003\",\n \"ncrfac\": \"0.8\",\n \"ncmaxfyoung\": \"0.04\",\n \"ncmaxfold\": \"0.04\",\n \"ncmaxr\": \"0.03\",\n \"retransmob\": \"0.0\",\n \"fdecay\": \"0.59988\", # Protocol [years-1]\n \"fdecaydry\": \"0.59988\", # Protocol\n \"rdecay\": \"0.33333\", # Protocol\n \"rdecaydry\": \"0.33333\", # Protocol\n \"bdecay\": \"0.02\", # No data, assuming 50 years\n \"wdecay\": \"0.02\",\n \"crdecay\": \"0.00\", # turn off coarse roots!\n \"watdecaydry\": \"0.0\",\n \"watdecaywet\": \"0.1\",\n \"ligshoot\": \"0.24\", # Based on White et al. 2000 for ENF\n \"ligroot\": \"0.22\", # Based on White et al. 2000\n \"rateuptake\": \"2.2\", # set somewhat (very) arbitarly to get an LAI ~ 4.\n \"rateloss\": \"0.5\",\n \"wcapac_root\": \"96.75\", # [mm] (FC (m3/m-3)-WP (m3/m-3)) * rooting_depth (mm) using derived values and depth from protocol, 750 mm (FC=0.164 - WP=0.035)\n \"wcapac_topsoil\": \"25.8\", # [mm] (FC (m3/m-3)-WP (m3/m-3)) * rooting_depth (mm) using derived values and depth from protocol, assuming 200 mm top soil following Corbeels 2005a (FC=0.164 - WP=0.035)\n\n\n \"ctheta_topsoil\": \"0.5\", # Derive based on soil type clay_loam\n \"ntheta_topsoil\": \"5.0\", # Derive based on soil type clay_loam\n \"ctheta_root\": \"0.4\", # Derive based on soil type clay\n \"ntheta_root\": \"3.0\", # Derive based on soil type clay\n \"topsoil_type\": \"clay_loam\",\n \"rootsoil_type\": \"clay\",\n \"measurement_temp\": \"25.0\",\n \"dz0v_dh\": \"0.075\", # However I have used value from Jarvis, quoted in Jones 1992, pg. 67. Produces a value within the bounds of 3.5-1.1 mol m-2 s-1 Drake, 2010, GCB for canht=17\n \"displace_ratio\": \"0.78\",\n \"g1\": \"2.74\",\n\n\n\n #\"jmaxna\": \"60.0\", # Original values Belinda had, I think based on Crous 2008, fig 2. Although those values I think are measured at 28 and 30 deg, the assumption being here that this is the same as 25 deg!\n #\"jmaxnb\": \"0.0\", # Original values Belinda had, I think based on Crous 2008, fig 2. Although those values I think are measured at 28 and 30 deg, the assumption being here that this is the same as 25 deg!\n #\"vcmaxna\": \"30.61\",# Original values Belinda had, I think based on Crous 2008, fig 2. Although those values I think are measured at 28 and 30 deg, the assumption being here that this is the same as 25 deg!\n #\"vcmaxnb\": \"0.0\", # Original values Belinda had, I think based on Crous 2008, fig 2. Although those values I think are measured at 28 and 30 deg, the assumption being here that this is the same as 25 deg!\n \"vcmaxna\": \"22.29\",\n \"vcmaxnb\": \"8.45\",\n \"jv_slope\": \"1.86\",\n \"jv_intercept\": \"0.0\",\n\n\n\n \"sapturnover\": \"0.1\",\n \"heighto\": \"4.826\",\n \"htpower\": \"0.35\",\n \"height0\": \"5.0\",\n \"height1\": \"20.0\",\n \"leafsap0\": \"8000.0\",\n \"leafsap1\": \"3060.0\", # Duke protocol\n \"branch0\": \"5.61\",\n \"branch1\": \"0.346\",\n \"targ_sens\": \"0.5\",\n \"density\": \"420.0\",\n\n\n # control\n \"adjust_rtslow\": \"false\", # priming, off\n \"alloc_model\": \"allometric\",\n \"assim_model\": \"mate\",\n \"calc_sw_params\": \"false\", #false=use fwp values, true=derive them\n \"deciduous_model\": \"false\",\n \"disturbance\": \"false\",\n \"exudation\": \"false\",\n \"fixed_stem_nc\": \"true\",\n \"fixleafnc\": \"false\",\n \"grazing\": \"false\",\n \"gs_model\": \"medlyn\",\n \"model_optroot\": \"false\",\n \"modeljm\": \"2\",\n \"ncycle\": \"true\",\n \"nuptake_model\": \"2\",\n \"passiveconst\": \"false\",\n \"print_options\": \"end\",\n \"ps_pathway\": \"c3\",\n \"respiration_model\": \"fixed\",\n \"strfloat\": \"0\",\n \"sw_stress_model\": \"1\", # Sands and Landsberg\n \"trans_model\": \"1\",\n \"use_eff_nc\": \"0\",\n \"use_leuning\": \"0\",\n \"water_stress\": \"true\",\n\n }\n ad.adjust_param_file(cfg_fname, replace_dict)\n os.system(GDAY_SPIN + cfg_fname)\n\n if POST_INDUST == True:\n\n # run for 200 odd years post industrial with increasing co2/ndep\n # we are swapping forest params for grass params now\n # copy spunup base files to make two new experiment files\n shutil.copy(os.path.join(param_dir, \"%s_%s_model_spunup.cfg\" % (experiment_id, site)),\n os.path.join(param_dir, \"%s_%s_model_spunup_adj.cfg\" % (experiment_id, site)))\n\n itag = \"%s_%s_model_spunup_adj\" % (experiment_id, site)\n otag = \"%s_%s_model_indust\" % (experiment_id, site)\n mtag = \"%s_met_data_industrial_to_present_1850_1983.csv\" % (site)\n out_fn = itag + \"_indust.out\"\n out_param_fname = os.path.join(param_dir, otag + \".cfg\")\n cfg_fname = os.path.join(param_dir, itag + \".cfg\")\n met_fname = os.path.join(met_dir, mtag)\n out_fname = os.path.join(run_dir, out_fn)\n\n replace_dict = {\n # git stuff\n #\"git_hash\": str(git_revision),\n\n # files\n \"out_param_fname\": \"%s\" % (out_param_fname),\n \"cfg_fname\": \"%s\" % (cfg_fname),\n \"met_fname\": \"%s\" % (met_fname),\n \"out_fname\": \"%s\" % (out_fname),\n\n # state - default C:N 25.\n \"age\": \"0.0\",\n \"branch\": \"0.0\",\n \"branchn\": \"0.0\",\n \"canht\": \"0.79\", # Taken default C3grass value from JULES\n \"cstore\": \"0.001\",\n \"nstore\": \"0.00004\",\n \"croot\": \"0.0\", # don't simulate coarse roots\n \"crootn\": \"0.0\", # don't simulate coarse roots\n \"root\": \"0.001\",\n \"rootn\": \"0.00004\",\n \"sapwood\": \"0.0\",\n \"shoot\": \"0.001\",\n \"shootn\": \"0.00004\",\n \"stem\": \"0.0\",\n \"stemn\": \"0.0\",\n \"stemnimm\": \"0.0\",\n \"stemnmob\": \"0.0\",\n \"nepsum\": \"0.0\",\n \"nppsum\": \"0.0\",\n\n # parameters\n \"ligshoot\": \"0.09\", # Smith et al. 2000, GRASS\n \"ligroot\": \"0.22\", # Smith et al. 2000\n \"age\": \"1.0\",\n \"slamax\": \"6.0\",\n \"sla\": \"6.0\",\n \"slazero\": \"6.0\",\n \"cfracts\": \"0.5\",\n \"lai_closed\": \"0.5\", # I am effectively turning this feature off by setting it so low\n \"c_alloc_fmax\": \"0.8\",\n \"c_alloc_fmin\": \"0.2\",\n \"c_alloc_rmax\": \"0.8\",\n \"c_alloc_rmin\": \"0.2\",\n \"c_alloc_bmax\": \"0.0\",\n \"c_alloc_bmin\": \"0.0\",\n \"c_alloc_cmax\": \"0.0\", # turn off coarse roots!\n \"fretrans\": \"0.4\",\n \"rretrans\": \"0.0\",\n \"bretrans\": \"0.0\",\n \"wretrans\": \"0.0\",\n \"ncwnewz\": \"0.0\",\n \"ncwnew\": \"0.0\",\n \"ncwimmz\": \"0.0\",\n \"ncwimm\": \"0.0\",\n \"ncbnewz\": \"0.0\",\n \"ncbnew\": \"0.0\",\n \"ncrfac\": \"0.7\",\n \"ncmaxfyoung\": \"0.035\",\n \"ncmaxfold\": \"0.035\",\n \"ncmaxr\": \"0.0287\",\n \"retransmob\": \"0.0\",\n \"fdecay\": \"1.0\",\n \"fdecaydry\": \"1.0\",\n \"rdecay\": \"1.0\",\n \"rdecaydry\": \"1.0\",\n \"bdecay\": \"0.0\",\n \"wdecay\": \"0.0\",\n \"watdecaydry\": \"0.0\",\n \"watdecaywet\": \"0.1\",\n \"crdecay\": \"0.00\", # turn off coarse roots!\n\n\n \"dz0v_dh\": \"0.10\", # Taken default C3grass value from JULES\n \"displace_ratio\": \"0.64\", #Jones 1992, pg. 67.\n \"z0h_z0m\": \"1.0\", # Assume z0m = z0h, probably a big assumption [as z0h often < z0m.], see comment in code!!\n\n\n \"jmaxna\": \"62.0\", # assuming j = v * 2\n \"jmaxnb\": \"0.0\", # assuming no intercept\n \"vcmaxna\": \"31.0\", # C3 grasses - CLM4 tech doc, table 8.2, Oleson et al 2010, page 176\n \"vcmaxnb\": \"0.0\", # assuming no intercept\n\n # control\n \"adjust_rtslow\": \"false\", # priming, off\n \"alloc_model\": \"grasses\",\n \"assim_model\": \"mate\",\n \"calc_sw_params\": \"false\", #false=use fwp values, true=derive them\n \"deciduous_model\": \"false\",\n \"disturbance\": \"false\",\n \"exudation\": \"false\",\n \"fixed_stem_nc\": \"true\",\n \"fixleafnc\": \"false\",\n \"grazing\": \"false\",\n \"gs_model\": \"medlyn\",\n \"model_optroot\": \"false\",\n \"modeljm\": \"1\",\n \"nuptake_model\": \"2\",\n \"ncycle\": \"true\",\n \"passiveconst\": \"false\",\n \"print_options\": \"end\",\n \"ps_pathway\": \"c3\",\n \"respiration_model\": \"fixed\",\n \"strfloat\": \"0\",\n \"sw_stress_model\": \"1\", # Sands and Landsberg\n \"trans_model\": \"1\",\n \"use_eff_nc\": \"0\",\n \"use_leuning\": \"0\",\n \"water_stress\": \"true\",\n\n }\n ad.adjust_param_file(cfg_fname, replace_dict)\n os.system(GDAY + cfg_fname)\n\n\nif __name__ == \"__main__\":\n\n experiment_id = \"NCEAS\"\n site = \"DUKE\"\n main(experiment_id, site, SPIN_UP=True, POST_INDUST=True)\n","repo_name":"mdekauwe/GDAY","sub_path":"example/duke_spinup_to_equilibrium.py","file_name":"duke_spinup_to_equilibrium.py","file_ext":"py","file_size_in_byte":18240,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"19"}
+{"seq_id":"73560050924","text":"from datetime import datetime\nfrom os.path import abspath, dirname\nfrom typing import Dict\n\nfrom fastapi import Depends, Request\nfrom fastapi.security import SecurityScopes\nfrom jose import JWTError, jwt\n\nfrom auth.schemas import JWTUser\nfrom config import settings\nfrom exceptions import NotAuthenticated, PermissionDenied\n\n\ndef get_token(request: Request):\n \"\"\"\n Получение токена из запроса с использованием библиотеки Request\n :param token:\n :return: dict\n \"\"\"\n token = request.headers.get(\"Authorization\")\n try:\n token = token.split()[-1]\n except:\n raise NotAuthenticated\n return token\n\n\ndef get_current_user(security_scopes: SecurityScopes, token: str = Depends(get_token)):\n user = None\n try:\n user = JWTUser(\n role=\"owner\",\n data=jwt.decode(\n token,\n settings.TOKEN_OWNER_KEY,\n algorithms=settings.ALGORITHM,\n audience=settings.TOKEN_OWNER_AUDIENCE,\n issuer=settings.TOKEN_ISSUER,\n ),\n )\n except JWTError:\n try:\n user = JWTUser(\n role=\"worker\",\n data=jwt.decode(\n token,\n settings.TOKEN_WORKER_KEY,\n algorithms=settings.ALGORITHM,\n audience=settings.TOKEN_WORKER_AUDIENCE,\n issuer=settings.TOKEN_ISSUER,\n ),\n )\n except JWTError:\n raise NotAuthenticated\n\n if user is None or int(user.data.get(\"exp\")) < datetime.utcnow().timestamp():\n raise NotAuthenticated\n\n if user.role == \"worker\":\n for scope in security_scopes.scopes:\n if scope not in user.data.get(\"api-permission\", []):\n raise PermissionDenied\n\n return user\n","repo_name":"efir-it/cash-shift-v2","sub_path":"auth/dependencies.py","file_name":"dependencies.py","file_ext":"py","file_size_in_byte":1879,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"}
+{"seq_id":"13957998879","text":"\"\"\"\nselenium+chromedriver+打码平台 模拟登陆\n简洁明了,还不操心\n\"\"\"\n\nimport pickle\nimport time\n\nfrom PIL import Image\nfrom selenium import webdriver\nfrom cjy import ChaojiyingClient\n\n# 登陆页面url\nlogin_web_url = 'https://www.javbus.com/forum/member.php?mod=logging&action=login&referer=%2F%2Fwww.javbus.com%2Fstar%2F6xe'\n\n\n# 用selenium+chromewebdirver做了,上面的也能登陆成功,但是有个重定向很麻烦\nbrowser = webdriver.Chrome()\nbrowser.maximize_window()\nbrowser.get(login_web_url)\nbrowser.find_element_by_xpath(\n '//form[@name=\"login\"]//input[@name=\"username\"]').send_keys('论坛账号')\nbrowser.find_element_by_xpath(\n '//form[@name=\"login\"]//input[@name=\"password\"]').send_keys('论坛密码')\ntime.sleep(5)\nseccode_element = browser.find_element_by_xpath(\n '//span[contains(@id, \"vseccode\")]/img')\n\n# 截屏\nbrowser.get_screenshot_as_file('whole_web.png')\nleft = int(seccode_element.location['x'])\ntop = int(seccode_element.location['y'])\nright = int(seccode_element.location['x'] + seccode_element.size['width'])\nbottom = int(seccode_element.location['y'] + seccode_element.size['height'])\n\n# Image处理\nim = Image.open('whole_web.png')\nim = im.crop((2*left, 2*top, 2*right, 2*bottom)) # 这里是13寸mac的缩放,这也是我讨厌用selenium的地方\nim.save('seccode.png')\n\n# 超级鹰打码(需要改动的地方)\nchaojiying = ChaojiyingClient('超级鹰账号', '超级鹰密码', 'soft_id')\nim = open('seccode.png', 'rb').read()\nsec_code_value = chaojiying.PostPic(im, 1902).get('pic_str')\n\nbrowser.find_element_by_xpath(\n '//form[@name=\"login\"]//input[@name=\"seccodeverify\"]').send_keys(sec_code_value)\n\nbrowser.find_element_by_xpath(\n '//form[@name=\"login\"]//button[@name=\"loginsubmit\"]').click()\n\n# 登陆成功后,会有个重定向,等跳转后再获取cookies\ntime.sleep(60)\n\ncookies = browser.get_cookies()\nprint(cookies)\npickle.dump(cookies, open('javbus_sc.cookie', 'wb'))\nbrowser.close()","repo_name":"Achang0121/MockLogin","sub_path":"javbus_mock_login/javbus_login_selenium_chromedriver.py","file_name":"javbus_login_selenium_chromedriver.py","file_ext":"py","file_size_in_byte":1982,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"}
+{"seq_id":"30380325140","text":"from classification import preprocess\nfrom classification import document\n\n\ndef naive_rule(text: str) -> bool:\n \"\"\"Given SEC document classify trade.\n\n For this naive rule, we simply look for LOI and business combination\n agreements while taking into account the number of redemptions.\n Args:\n text: String SEC document.\n Returns:\n Boolean of whether we should trade or not.\n \"\"\"\n\n # Initialize document object.\n doc = document.Document(text)\n\n # Reject if too many votes against.\n votes = preprocess.parse_vote_results(text)\n votes_for, votes_against, votes_abstain, votes_broker_non_votes = votes\n votes_total = (votes_for + votes_against +\n votes_abstain + votes_broker_non_votes)\n if votes_against / votes_total > 0.1:\n return False\n\n # Reject if is ipo document.\n if doc.is_ipo():\n return False\n\n # Reject if is item 2.03 document.\n if doc.is_item_203():\n return False\n\n boolean_conditions = [\n doc.is_letter_of_intent(),\n doc.is_business_combination_agreement(),\n doc.is_consummation(),\n doc.is_extension(),\n doc.is_trust(),\n ]\n return any(boolean_conditions)\n","repo_name":"alandu20/spac","sub_path":"backtest/rules.py","file_name":"rules.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"}
+{"seq_id":"43460342539","text":"import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport pdb\n\n#def Cosdist(x,y):\n \nclass BasicConv2d(nn.Module):\n \"\"\"BasicConv2d model.\"\"\"\n\n def __init__(self, in_channels, out_channels, **kwargs):\n \"\"\"Init BasicConv2d model.\"\"\"\n super(BasicConv2d, self).__init__()\n self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)\n self.bn = nn.BatchNorm2d(out_channels, eps=0.001)\n\n def forward(self, x):\n \"\"\"Forward BasicConv2d model.\"\"\"\n x = self.conv(x)\n x = self.bn(x)\n #return x\n return F.relu(x, inplace=True)\n\n\nclass CNN(nn.Module):\n def __init__(self,feature_size):\n super(CNN,self).__init__()\n self.feature_size = feature_size\n self.conv1 = BasicConv2d(1,feature_size,kernel_size=[1,feature_size])\n #self.conv2 = nn.Conv2d(1,feature_size,kernel_size=[1,2*feature_size])\n #self.conv3 = nn.Conv2d(1,feature_size,kernel_size=[1,3*feature_size])\n for m in self.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m,nn.Linear):\n import scipy.stats as stats\n stddev = m.stddev if hasattr(m, 'stddev') else 0.1\n X = stats.truncnorm(-2, 2, scale=stddev)\n values = torch.Tensor(X.rvs(m.weight.data.numel()))\n values = values.view(m.weight.data.size())\n m.weight.data.copy_(values)\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def forward(self,feat):\n feat = feat.view(feat.size(0),1,120,-1)\n #feat1 = F.pad(feat,(0,0,1,0))[:,:,:120,:]\n #feat2 = F.pad(feat,(0,0,2,0))[:,:,:120,:]\n #feat1 = torch.cat((feat,feat1),3)\n #feat2 = torch.cat((feat1,feat2),3)\n\n #feat = feat.view(feat.size(0),1,120,-1)\n feat = self.conv1(feat)\n feat = feat.view(feat.size(0),120,-1)\n #feat1 = feat1.view(feat1.size(0),1,120,-1)\n #feat1 = self.conv2(feat1)\n #feat1 = feat1.view(feat1.size(0),120,-1)\n #feat2 = feat2.view(feat2.size(0),1,120,-1)\n #feat2 = self.conv3(feat2)\n #feat2 = feat.view(feat2.size(0),120,-1)\n\n #out = torch.cat((feat,feat1,feat2),2)\n return feat\n\n\nclass FeatAggregate(nn.Module):\n def __init__(self, input_size=1024, hidden_size=128,framenum=120,cell_num=2):\n super(FeatAggregate,self).__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.cell_num = cell_num\n self.framenum = framenum\n self.rnn1 = nn.GRU(input_size, hidden_size, cell_num, batch_first=True)\n #self.rnn2 = nn.LSTM(hidden_size, hidden_size, cell_num, batch_first=True)\n #self.bp = nn.BatchNorm1d(hidden_size)\n #self.ap = nn.AvgPool1d(120)\n self.conv = nn.Conv1d(hidden_size,hidden_size*6,kernel_size=framenum)\n\n\n def forward(self, feats):\n h01 = Variable(torch.randn(self.cell_num, feats.size(0), self.hidden_size), requires_grad=False)\n #c01 = Variable(torch.randn(self.cell_num, feats.size(0), self.hidden_size), requires_grad=False)\n #h02 = Variable(torch.randn(self.cell_num, feats.size(0), self.hidden_size), requires_grad=False)\n #c02 = Variable(torch.randn(self.cell_num, feats.size(0), self.hidden_size), requires_grad=False)\n\n if feats.is_cuda:\n h01 = h01.cuda()\n #c01 = c01.cuda()\n #h02 = h02.cuda()\n #c02 = c02.cuda()\n\n # aggregated feature\n feat, hn1 = self.rnn1(feats, h01)\n #feat = torch.transpose(feat,1,2)\n #feat = feat.contiguous()\n #feat = self.bp(feat)\n #feat = torch.transpose(feat,1,2)\n #feat, (hn2, cn2) = self.rnn2(feat, (h02, c02))\n feat = feat.transpose(1,2)\n #feat = self.ap(feat)\n feat = self.conv(feat)\n #feat = feat[:,-1,:]\n feat = feat.view(feat.size(0),-1)\n #feat = feat.contiguous()\n out = torch.cat((feat,hn1[1],hn1[0]),1)\n \n return out\n\nclass VAMetric(nn.Module):\n def __init__(self):\n super(VAMetric,self).__init__()\n #self.pool_v = nn.AvgPool1d(2,stride=2)\n #self.pool_a = nn.AvgPool1d(2,stride=2)\n #self.rnn_vp = FeatAggregate(1024,128,60)\n #self.rnn_ap = FeatAggregate(128,128,60)\n #self.fc_vp = nn.Linear(2*128,2*128)\n #self.fc_ap = nn.Linear(2*128,2*128)\n self.rnn_v = FeatAggregate(1024,128,120)\n self.rnn_a = FeatAggregate(128,128,120)\n self.fc_v1 = nn.Linear(128*8,4*128)\n self.fc_v2 = nn.Linear(128*4,128)\n self.fc_a1 = nn.Linear(128*8,128*4)\n self.fc_a2 = nn.Linear(4*128,128)\n #self.fc = nn.Linear(4*128,128)\n #self.cnn_a = CNN(128)\n #self.cnn_v = CNN(1024)\n\n for m in self.modules():\n if isinstance(m, nn.Linear):\n nn.init.xavier_uniform(m.weight)\n nn.init.constant(m.bias, 0)\n\n\n def forward(self,vfeat,afeat):\n \n \n \n #vfeat1 = F.sigmoid(vfeat1)\n\n #vfeat = self.cnn_v(vfeat)\n\n #vfeat1 = torch.transpose(vfeat,1,2)\n\n #vfeat1 = self.pool_v(vfeat1)\n #vfeat1 = torch.transpose(vfeat1,1,2)\n #vfeat1 = self.rnn_vp(vfeat1)\n #vfeat1 = self.fc_vp(vfeat1)\n #vfeat1 = F.normalize(vfeat1)\n vfeat = self.rnn_v(vfeat)\n #v = torch.cat((vfeat,vfeat1),1)\n #vfeat = F.relu(vfeat)\n #vfeat = torch.cat((vfeat,vfeat1),1)\n v = self.fc_v1(vfeat)\n v = F.sigmoid(v)\n v = F.dropout(v,0.0)\n v = self.fc_v2(v)\n #vfeat = F.normalize(vfeat)\n \n #vfeat = F.sigmoid(vfeat)\n #vfeat = F.dropout(vfeat,0.2)\n #vfeat = self.fc_v2(vfeat)\n #vfeat = F.sigmoid(vfeat)\n\n #afeat = self.cnn_a(afeat)\n \n #afeat1 = F.sigmoid(afeat1)\n \n\n #afeat = self.cnn_a(afeat)\n #afeat1 = torch.transpose(afeat,1,2)\n \n #afeat1 = self.pool_a(afeat1)\n #afeat1 = torch.transpose(afeat1,1,2)\n #afeat1 = self.rnn_ap(afeat1)\n #afeat1 = self.fc_ap(afeat1)\n #afeat1 = F.normalize(afeat1)\n afeat = self.rnn_a(afeat)\n \n #afeat = F.relu(afeat)\n #afeat = torch.cat((afeat,afeat1),1)\n #a = torch.cat((afeat,afeat1),1)\n a = self.fc_a1(afeat)\n a = F.sigmoid(a)\n a = F.dropout(a,0.0)\n a = self.fc_a2(a)\n\n #afeat = F.normalize(afeat)\n \n #afeat = F.dropout(afeat,0.0)\n #afeat = self.fc_a2(afeat)\n\n #afeat = F.softmax(afeat)\n #out = F.pairwise_distance(a,v)\n \n #out = torch.clamp(out,max=1.0)\n #out = (1-F.cosine_similarity(vfeat, afeat))/2\n return v,a\n\n\n\n\n\n# Visual-audio multimodal metric learning: MaxPool+FC\nclass VAMetric2(nn.Module):\n def __init__(self, framenum=120):\n super(VAMetric2, self).__init__()\n self.mp = nn.MaxPool1d(framenum)\n self.vfc = nn.Linear(1024, 128)\n self.fc = nn.Linear(128, 96)\n self.init_params()\n\n def init_params(self):\n for m in self.modules():\n if isinstance(m, nn.Linear):\n nn.init.xavier_uniform(m.weight)\n nn.init.constant(m.bias, 0)\n\n def forward(self, vfeat, afeat):\n # aggregate the visual features\n vfeat = vfeat.transpose(2, 1)\n vfeat = self.mp(vfeat)\n vfeat = vfeat.view(-1, 1024)\n vfeat = F.relu(self.vfc(vfeat))\n vfeat = self.fc(vfeat)\n\n # aggregate the auditory features\n afeat = afeat.transpose(2, 1)\n afeat = self.mp(afeat)\n afeat = afeat.view(-1, 128)\n afeat = self.fc(afeat)\n return F.pairwise_distance(vfeat, afeat)\n\n\nclass ContrastiveLoss(torch.nn.Module):\n \"\"\"\n Contrastive loss function.\n Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf\n \"\"\"\n def __init__(self, margin=1.0):\n super(ContrastiveLoss, self).__init__()\n self.margin = margin\n\n def forward(self, dist, label):\n loss = torch.mean((1-label) * torch.pow(dist, 2) +\n (label) * torch.pow(torch.clamp(self.margin - dist, min=0.0), 2))\n return loss\n","repo_name":"yfreedomliTHU/VA_Project","sub_path":"Other attempted models/models1.py","file_name":"models1.py","file_ext":"py","file_size_in_byte":8283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"18521971244","text":"import sys\nfrom setuptools import setup\n\n\ndef forbid_publish():\n argv = sys.argv\n blacklist = ['register', 'upload']\n\n for command in blacklist:\n if command in argv:\n values = {'command': command}\n raise RuntimeError('Command \"%(command)s\" has been blacklisted' %\n values)\n\nforbid_publish()\n\nsetup(\n name='email-parser',\n packages=[\n 'email_parser'\n ],\n package_dir={\n 'email_parser': 'email_parser'\n },\n test_suite='tests',\n author='Tomaz Muraus',\n author_email='tomaz+pypi@tomaz.me',\n)\n","repo_name":"Kami/mailytics-email-parser","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"32128860106","text":"from PyQt5 import QtWidgets, QtGui, QtCore\nfrom ..Model import constants\n\n\n__all__ = ['EffectCutline']\n\n\nclass EffectCutline(QtWidgets.QGraphicsItem):\n\n def __init__(self, parent=None):\n \"\"\"\n Cutline which is used to delete the pipes.\n Delete logic is defined in manager.\n \"\"\"\n\n super(EffectCutline, self).__init__(parent)\n self.line_points = []\n self.pen = QtGui.QPen(QtCore.Qt.white, 2)\n self.pen.setDashPattern([3, 2])\n self.setZValue(constants.Z_VAL_CUTLINE)\n\n def boundingRect(self) -> QtCore.QRectF:\n if self.line_points:\n x__min_point = min([point.x() for point in self.line_points])\n y__min_point = min([point.y() for point in self.line_points])\n x__max_point = max([point.x() for point in self.line_points])\n y__max_point = max([point.y() for point in self.line_points])\n return QtCore.QRectF(x__min_point, y__min_point, x__max_point - x__min_point, y__max_point - y__min_point)\n else:\n return QtCore.QRectF(0, 0, 1, 1)\n\n\n def paint(self, painter, option, widget=None) -> None:\n painter.setRenderHint(QtGui.QPainter.Antialiasing)\n painter.setBrush(QtCore.Qt.NoBrush)\n painter.setPen(self.pen)\n\n poly = QtGui.QPolygonF(self.line_points)\n painter.drawPolyline(poly)\n","repo_name":"nomiuo/NodeNote","sub_path":"src/NodeNotePackage/NodeNote/Components/effect_cutline.py","file_name":"effect_cutline.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":356,"dataset":"github-code","pt":"19"}
+{"seq_id":"20013132223","text":"from textwrap import wrap\r\ndef convert_base(num, to_base=10, from_base=10):\r\n # first convert to decimal number\r\n if isinstance(num, str):\r\n n = int(num, from_base)\r\n else:\r\n n = int(num)\r\n # now convert decimal to 'to_base' base\r\n alphabet = \"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\r\n if n < to_base:\r\n return alphabet[n]\r\n else:\r\n return convert_base(n // to_base, to_base) + alphabet[n % to_base]\r\ndef ascii_convert(ndx):\r\n key=list(range(32,127))\r\n key=key+list(range(192,256))\r\n\r\n value=[chr(i) for i in range(32, 127)]\r\n value=value+list([chr(i) for i in range(1040, 1104)])\r\n\r\n ascii_codes=dict(zip(key, value))\r\n if int(ndx) in ascii_codes:\r\n return ascii_codes[int(ndx)]\r\n else:\r\n return ''\r\n \r\n\r\n\r\n\r\nalias=input(\"Введите исходную задачу: \")\r\nstart=alias[0]\r\nalias=alias[1:]\r\nalias=alias.replace(' ', '')\r\narr=[]\r\n\r\nwhile len(alias)>0:\r\n counter=0\r\n i=0\r\n while alias[i]!='1':\r\n counter+=1\r\n i+=1\r\n arr.append(alias[0:counter*2+1])\r\n alias=alias[counter*2+1:len(alias)]\r\n\r\nfor i in range(len(arr)):\r\n arr[i]=convert_base(arr[i], 10, 2)\r\n\r\nfor i in range(len(arr)):\r\n arr[i]=start*int(arr[i])\r\n if start=='1':\r\n start='0'\r\n else:\r\n start='1'\r\n\r\ns=''\r\nfor i in range(len(arr)):\r\n s+=str(arr[i])\r\n\r\ns=wrap(s,8)\r\nfinal=''\r\nfor i in range(len(s)):\r\n s[i]=convert_base(s[i], 10, 2)\r\n final+=ascii_convert(s[i])\r\nprint(final)","repo_name":"punk1503/Alias_Decode","sub_path":"alias.py","file_name":"alias.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"22077989837","text":"import numpy as np\nfrom numpy import asarray as ar\n\n\ndef read_data(data_path):\n \"\"\"\n Reads data\n \"\"\"\n data = []\n f = open(data_path, \"r\")\n for x in f:\n data.append([int(c) for c in x.strip()])\n\n # Convert to numpy and return\n return np.array(data)\n\n\ndef do_step(data, l=10):\n cnt = 0\n data = data + 1\n while sum(sum(data > 9)) > 0:\n cnt += sum(sum(data > 9))\n indices10 = np.where(data > 9)\n data[np.where(data > 9)] = -100\n for (i, j) in list(zip(*indices10)):\n ia = (\n np.array([i - 1, i - 1, i - 1, i, i, i + 1, i + 1, i + 1]),\n np.array([j - 1, j, j + 1, j - 1, j + 1, j - 1, j, j + 1]),\n )\n keep = ar(ia[0] >= 0) & ar(ia[1] >= 0) & ar(ia[0] < l) & ar(ia[1] < l)\n indices_clean = (ia[0][keep], ia[1][keep])\n data[indices_clean] += 1\n data[np.where(data < 0)] = 0\n return data, cnt\n\n\nif __name__ == \"__main__\":\n\n # Read data\n data_path = \"input\"\n data = read_data(data_path)\n dim = len(data)\n\n steps = 100\n count = 0\n for i in range(steps):\n data, count_step = do_step(data, l=dim)\n count += count_step\n\n print(f\"There were {count} flashes after {steps} steps\")\n\n count = 0\n step = 100\n while count < 100:\n step += 1\n data, count = do_step(data, l=dim)\n\n print(f\"All octopuses flash simultaneously in step {step}\")\n","repo_name":"jakuberan/AoC-2021","sub_path":"day_11/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"29144471890","text":"\"\"\"\n# Differential Algebra Core Engine in Python - DACEyPy\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nfrom __future__ import annotations\n\nfrom ctypes import POINTER, c_double, c_uint, pointer\nfrom typing import List, Tuple, Union, overload\n\nimport numpy as np\nfrom numpy.typing import NDArray\n\nfrom . import _DA, _array, core\nfrom ._DACEException import DACEException\nfrom ._PrettyType import PrettyType\n\n\nclass compiledDA(metaclass=PrettyType):\n\n __slots__ = \"dim\", \"ac\", \"terms\", \"vars\", \"ord\"\n\n # *************************************************************************\n # * Constructors & Destructors\n # *************************************************************************\n\n def __init__(self, da: Union[_DA.DA, List[_DA.DA], _array.array]):\n \"\"\"\n Create a compiledDA object from one or more DA objects.\n\n Args:\n da: DA object(s) to use as source.\n\n Raises:\n DACEException\n\n Derived from C++:\n `compiledDA::compiledDA(const std::vector &da)`\n `compiledDA::compiledDA(const DA &da)`\n \"\"\"\n\n if isinstance(da, _DA.DA):\n da = [da]\n elif isinstance(da, _array.array):\n if da.ndim != 1:\n raise TypeError(\"This function works only on 1D objects\")\n\n dim = len(da)\n\n if not dim:\n raise DACEException(16, 4)\n\n c_ac = (c_double * (_DA.DA.getMaxMonomials() * (dim + 2)))()\n c_terms = c_uint()\n c_vars = c_uint()\n c_ord = c_uint()\n\n da_array_t = POINTER(core.DACEDA) * dim\n da_array = da_array_t(*(pointer(da_elem.m_index) for da_elem in da))\n core.EvalTree(da_array, dim, c_ac, c_terms, c_vars, c_ord)\n\n self.dim: int = dim\n self.ac: Tuple[float] = tuple(c_ac) # type: ignore\n self.terms: int = c_terms.value\n self.vars: int = c_vars.value\n self.ord: int = c_ord.value\n\n def __hash__(self) -> int:\n return hash((self.dim, self.ac, self.terms, self.vars, self.ord))\n\n def __eq__(self, other) -> bool:\n if not isinstance(other, compiledDA):\n return False\n if self.dim != other.dim:\n return False\n if self.ac != other.ac:\n return False\n if self.terms != other.terms:\n return False\n if self.vars != other.vars:\n return False\n if self.ord != other.ord:\n return False\n return True\n\n # Notes on methods not ported from C++:\n # - `compiledDA::compiledDA(const compiledDA &cda)`\n # Create a copy of a compiledDA object.\n # -> can be achieved using deepcopy(cda)\n # - `compiledDA::~compiledDA()` (destructor)\n # -> not necessary since Python garbage collector frees memory\n # - `compiledDA& compiledDA::operator=(const compiledDA &cda)` (assignment)\n # -> in order to make compiledDA immutable and therefore hashable\n\n # *************************************************************************\n # * Evaluation overloads and template specialization\n # *************************************************************************\n\n @overload\n def eval(self, args: List[float]) -> List[float]:\n \"\"\"\n Evaluate the compiledDA object using a list of floats.\n\n Args:\n args: list of float to use for the evaluation.\n\n Returns:\n Result of the evaluation as list of floats.\n\n Raises:\n DACEException\n\n Derived from C++:\n `void compiledDA::eval(const std::vector &args, std::vector &res)`\n \"\"\"\n ...\n\n @overload\n def eval(self, args: _array.array) -> _array.array:\n \"\"\"\n Evaluate the compiledDA object using a DACEyPy array.\n\n Args:\n args: DACEyPy array to use for the evaluation.\n\n Returns:\n Result of the evaluation as DACEyPy array.\n\n Raises:\n DACEException\n\n Derived from C++:\n `void compiledDA::eval(const std::vector &args, std::vector &res)`\n \"\"\"\n ...\n\n @overload\n def eval(self, args: NDArray[np.double]) -> NDArray[np.double]:\n \"\"\"\n Evaluate the compiledDA object using a NumPy array of doubles.\n\n Args:\n args: NumPy array of doubles to use for the evaluation.\n\n Returns:\n Result of the evaluation as NumPy array of doubles.\n\n Raises:\n DACEException\n\n Derived from C++:\n `void compiledDA::eval(const std::vector &args, std::vector &res)`\n \"\"\"\n ...\n\n @overload\n def eval(self, args: List[_DA.DA]) -> List[_DA.DA]:\n \"\"\"\n Evaluate the compiledDA object using a list of DA objects.\n\n Args:\n args: list of DA objects to use for the evaluation.\n\n Returns:\n Result of the evaluation as list of DA objects.\n\n Raises:\n DACEException\n\n Derived from C++:\n `void compiledDA::eval(const std::vector &args, std::vector &res)`\n \"\"\"\n ...\n\n def eval(\n self,\n args: Union[\n List[float], List[_DA.DA], NDArray[np.double], _array.array],\n ) -> Union[List[float], List[_DA.DA], NDArray[np.double], _array.array]:\n\n if isinstance(args, np.ndarray) and args.ndim != 1:\n raise TypeError(\"This function works only on 1D objects\")\n\n narg = len(args)\n\n xm: Union[_array.array, NDArray[np.double]]\n res: Union[_array.array, NDArray[np.double]]\n\n if narg == 0 or isinstance(args[0], (float, int, np.number)):\n p = 2\n xm = np.zeros(self.ord + 1)\n res = np.zeros(self.dim)\n\n # prepare temporary powers\n xm[0] = 1.0\n\n # constant part\n for i in range(self.dim):\n res[i] = self.ac[p]\n p += 1\n\n # higher order terms\n for i in range(1, self.terms):\n jl = int(self.ac[p])\n p += 1\n jv = int(self.ac[p]) - 1\n p += 1\n if jv < narg:\n xm[jl] = xm[jl - 1] * args[jv]\n else:\n xm[jl] = 0.0\n for j in range(self.dim):\n res[j] += xm[jl] * self.ac[p]\n p += 1\n else:\n jlskip = self.ord + 1\n p = 2\n\n xm = _array.array.zeros(self.ord + 1)\n tmp = _DA.DA()\n res = _array.array.zeros(self.dim)\n\n # prepare temporary powers\n xm[0] = 1.0\n\n # constant part\n for i in range(self.dim):\n res[i] = self.ac[p]\n p += 1\n\n # higher order terms\n for i in range(1, self.terms):\n jl = int(self.ac[p])\n p += 1\n jv = int(self.ac[p]) - 1\n p += 1\n if jl > jlskip:\n p += self.dim\n continue\n if jv >= narg:\n jlskip = jl\n p += self.dim\n continue\n\n jlskip = self.ord + 1\n core.Multiply(xm[jl - 1], args[jv], xm[jl])\n for j in range(self.dim):\n if(self.ac[p] != 0.0):\n core.MultiplyDouble(xm[jl], self.ac[p], tmp)\n core.Add(res[j].m_index, tmp, res[j].m_index)\n p += 1\n\n if isinstance(args, list):\n return res.tolist()\n\n return res\n\n @overload\n def evalScalar(self, arg: float) -> NDArray[np.double]:\n \"\"\"\n Evaluate the compiled polynomial with a single argument of type\n float and return vector of results.\n\n Args:\n arg:\n The value of the first independent DA variable to evaluate with.\n All remaining independent DA variables are assumed to be zero.\n\n Returns:\n NumPy array with the result of the evaluation.\n \"\"\"\n ...\n\n @overload\n def evalScalar(self, arg: _DA.DA) -> _array.array:\n \"\"\"\n Evaluate the compiled polynomial with a single argument of type\n DA and return vector of results.\n\n Args:\n arg: The value of the first independent DA variable to evaluate with.\n All remaining independent DA variables are assumed to be zero.\n\n Returns:\n DACEyPy array with the result of the evaluation.\n \"\"\"\n ...\n\n def evalScalar(self, arg: Union[float, _DA.DA]) \\\n -> Union[NDArray[np.double], _array.array]:\n args = np.array([arg]) if isinstance(arg, (float, int)) else _array.array([arg])\n return self.eval(args)\n\n @overload\n def __call__(self, arg: float) -> NDArray[np.double]: ...\n\n @overload\n def __call__(self, arg: _DA.DA) -> _array.array: ...\n\n @overload\n def __call__(self, arg: NDArray[np.double]) -> NDArray[np.double]: ...\n\n @overload\n def __call__(self, arg: List[float]) -> List[float]: ...\n\n @overload\n def __call__(self, arg: _array.array) -> _array.array: ...\n\n @overload\n def __call__(self, arg: List[_DA.DA]) -> List[_DA.DA]: ...\n\n def __call__(\n self, arg: Union[\n float, _DA.DA, NDArray[np.double],\n List[float], _array.array, List[_DA.DA],\n ]\n ) -> Union[NDArray[np.double], List[float], _array.array, List[_DA.DA]]:\n\n if isinstance(arg, (list, _array.array, np.ndarray)):\n return self.eval(arg)\n return self.evalScalar(arg)\n\n # *************************************************************************\n # * Member access routines\n # *************************************************************************\n\n def getAc(self) -> Tuple[float]:\n return self.ac\n\n def getDim(self) -> int:\n return self.dim\n\n def getOrd(self) -> int:\n return self.ord\n\n def getVars(self) -> int:\n return self.vars\n\n def getTerms(self) -> int:\n return self.terms\n\n def __getattr__(self, k):\n raise TypeError\n\n def __delattr__(self, k):\n raise TypeError\n","repo_name":"giovannipurpura/daceypy","sub_path":"daceypy/_compiledDA.py","file_name":"_compiledDA.py","file_ext":"py","file_size_in_byte":10743,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"19"}
+{"seq_id":"17907427120","text":"import os\nimport shutil\nfrom datetime import datetime\nfrom PIL import Image\n\nclass photoOrganizer:\n extensions = ['jpg','jpeg','JPG','JPEG']\n\n def folder_path(self, file):\n date = self.photo_date(file)\n m =''\n month = date.strftime(\"%m\")\n if month == \"01\":\n m = 'Janeiro'\n elif month == \"02\":\n m = 'Fevereiro'\n elif month == \"03\":\n m = 'Março'\n elif month == \"04\":\n m = 'Abril'\n elif month == \"05\":\n m = 'Maio'\n elif month == \"06\":\n m = 'Junho'\n elif month == \"07\":\n m = 'Julho'\n elif month == \"08\":\n m = 'Agosto'\n elif month == \"09\":\n m = 'Setembro'\n elif month == \"10\":\n m = 'Outubro'\n elif month == \"11\":\n m = 'Novembro'\n elif month == \"12\":\n m = 'Dezembro'\n return date.strftime('%Y' + '/' + m)\n\n def photo_date(self, file):\n photo = Image.open(file)\n \n if not photo.getexif() == {}:\n info = photo._getexif()\n if 36867 in info:\n date = info[36867]\n date = datetime.strptime(date, '%Y:%m:%d %H:%M:%S')\n else:\n date = datetime.fromtimestamp(os.path.getmtime(file))\n return date\n\n def movePhoto(self,file):\n new_folder = self.folder_path(file)\n if not os.path.exists(new_folder):\n os.makedirs(new_folder)\n shutil.move(file, new_folder + '/' + file)\n\n def organize(self):\n photos = [\n filename for filename in os.listdir('.') if any(filename.endswith(ext) for ext in self.extensions)\n ]\n for filename in photos:\n self.movePhoto(filename)\n\nPO = photoOrganizer()\nPO.organize()","repo_name":"Digu62/Photo_Organizer","sub_path":"photoOrganizer.py","file_name":"photoOrganizer.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"3823552909","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nCreated at 8/20/2019\n__author__ = 212577071\nUsage: \n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\nfrom model.utility import get_static_visit_data, get_obiee_visit_df\n\nsource = 'static'\ndef genearte_score_df(source='static') -> pd.DataFrame:\n if source == 'static':\n visit_df = get_static_visit_data()\n elif source == 'live':\n visit_df = get_obiee_visit_df()\n else:\n raise NotImplementedError(f'{source} is a invalid source')\n score_df = visit_df[['user_sso', 'report_id', 'total_access']].copy()\n cut_off = np.percentile(score_df['total_access'], 99)\n \n\n def min_max_scaler(_, min_score=1, max_score=5):\n if _ >= cut_off:\n return max_score\n else:\n return (_ - 1) * (max_score - min_score) / (cut_off - 1) + min_score\n\n score_df['score'] = score_df['total_access'].apply(min_max_scaler)\n score_df['total_access'].describe()\n score_df['score'].describe()\n \n\n return score_df[['user_sso', 'report_id', 'score']]\n","repo_name":"sidddataanalytics/BITS-Project-Recomended-Systems","sub_path":"svd_recommendation/prepare_data.py","file_name":"prepare_data.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"71377167082","text":"#!/usr/bin/env python3\n\n\"\"\"Automatically build and push new images to Docker Hub if necessary.\"\"\"\n\nimport json\nimport subprocess\nfrom argparse import ArgumentParser\n\nimport docker\nfrom docker.models.images import Image\n\nIMAGE_TYPES = (\"cpp\", \"python\", \"cpp-and-twogtp\")\n# Maps an image type X in IMAGE_TYPES to a list of image types that should built\n# before X can be built.\nIMAGE_PREREQS = {\n \"cpp-and-twogtp\": [\"cpp\"],\n}\nREPO_NAME = \"humancompatibleai/goattack\"\n\n\ndef main():\n \"\"\"Main entry point.\"\"\"\n parser = ArgumentParser(\n description=\"Builds and pushes new KataGo images to Docker Hub.\",\n )\n parser.add_argument(\n \"--image\",\n type=str,\n choices=IMAGE_TYPES,\n default=[\"cpp\", \"python\"],\n help=\"Which images to update\",\n nargs=\"+\",\n )\n args = parser.parse_args()\n image_types = args.image\n\n client = docker.from_env()\n images = client.images.list(name=REPO_NAME)\n\n # We use the Git hash to tag our images. Find the current hash.\n hash_raw = subprocess.check_output([\"git\", \"rev-parse\", \"--short\", \"HEAD\"])\n current_hash = hash_raw.decode(\"ascii\").strip()\n\n # The \"tag\" string actually includes the repo name as well\n # (e.g. \"humancompatibleai/goattack:c27e251\"). These are all\n # from the same repo, so we just look at the tag proper (e.g. \"c27e251\").\n available_tags = [\n tag.split(\":\")[1]\n for image in images\n if isinstance(image, Image)\n for tag in image.tags\n ]\n\n # We also need to know the absolute path for the root Go Attack directory\n # in order to build the Docker images if necessary.\n rootdir_raw = subprocess.check_output([\"git\", \"rev-parse\", \"--show-toplevel\"])\n rootdir = rootdir_raw.decode(\"ascii\").strip()\n\n for image_type in image_types:\n tag = f\"{current_hash}-{image_type}\"\n image_name = f\"{REPO_NAME}:{tag}\"\n if tag in available_tags:\n print(f\"Using existing local copy of {image_name}\")\n continue\n # The image is missing, so we need to build and push it.\n\n BUILD_ARGS = {\"ARG_GIT_COMMIT\": current_hash}\n prereqs = IMAGE_PREREQS.get(image_type, [])\n for prereq in prereqs:\n print(f\"Building prereq: {prereq}\")\n client.images.build(\n path=rootdir,\n dockerfile=f\"compose/{prereq}/Dockerfile\",\n tag=f\"{REPO_NAME}:{prereq}\",\n buildargs=BUILD_ARGS,\n )\n print(f\"Building {REPO_NAME}:{tag}\")\n build_result = client.images.build(\n path=rootdir,\n dockerfile=f\"compose/{image_type}/Dockerfile\",\n tag=image_name,\n buildargs=BUILD_ARGS,\n )\n # Pylance can't quite figure out the type of build_result; see\n # https://docker-py.readthedocs.io/en/stable/images.html#image-objects for info\n assert isinstance(build_result, tuple) and len(build_result) == 2\n img, _ = build_result\n assert isinstance(img, Image)\n\n print(f\"Pushing {image_name}\")\n push_result = client.images.push(repository=REPO_NAME, tag=tag)\n # push_result is a string consisting of JSON messages on separate lines\n for line in push_result.splitlines():\n message = json.loads(line)\n if \"error\" in message:\n raise RuntimeError(f\"Pushing {image_name} failed: {message['error']}\")\n\n # Write the current image tags to a file so that Kubernetes can use them.\n with open(f\"{rootdir}/kubernetes/active-images.env\", \"w\") as f:\n for image_type in image_types:\n env_variable_name = f\"{image_type.upper().replace('-', '_')}_IMAGE\"\n f.write(f\"{env_variable_name}={REPO_NAME}:{current_hash}-{image_type}\\n\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"AlignmentResearch/go_attack","sub_path":"kubernetes/update_images.py","file_name":"update_images.py","file_ext":"py","file_size_in_byte":3824,"program_lang":"python","lang":"en","doc_type":"code","stars":69,"dataset":"github-code","pt":"19"}
+{"seq_id":"70504155243","text":"#!/usr/bin/env python2.6\n\nfrom __future__ import (\n print_function,\n unicode_literals\n)\n\n# somehow have to make this work. Maybe symbolic links?\nfrom stone.ir.data_types import (\n is_boolean_type,\n is_float_type,\n is_integer_type,\n is_list_type,\n is_nullable_type,\n is_string_type,\n is_struct_type,\n is_timestamp_type,\n is_union_type,\n is_void_type\n)\nfrom stone.backend import CodeBackend\n\nimport copy\n\nclass APIEndpointGenerator(CodeBackend):\n \"\"\"Generates API Endpoint objects for the API explorer.\"\"\"\n\n endpoint_vars = []\n\n def generate(self, api):\n with self.output_to_relative_path('../../src/endpoints.ts'):\n self.outputHeader()\n with self.indent():\n for namespace in api.namespaces.values():\n self.emit_namespace(namespace)\n self.emit()\n self.generate_multiline_list(self.endpoint_vars,\n delim=('',''),\n before='export const endpointList: Utils.Endpoint[] = [',\n after='];')\n self.outputFooter()\n\n def outputHeader(self):\n self.emit('// Automatically generated code; do not edit')\n self.emit()\n self.emit(\"import * as Utils from './utils';\")\n self.emit()\n self.emit('module Endpoints {')\n\n def outputFooter(self):\n self.emit('}')\n self.emit()\n self.emit('export = Endpoints;')\n\n def emit_namespace(self, namespace):\n for route in namespace.routes:\n if not route.deprecated: # Skip deprecated route.\n self.emit_route(namespace, route)\n\n\n def emit_route(self, namespace, route):\n self.endpoint_vars.append(self._var_name(route, namespace))\n\n def get_param_list():\n if is_union_type(route.arg_data_type):\n return [self.data_type_constructor(route.arg_data_type, \"''\", is_root=True)]\n else:\n return list(map(self.parameter_constructor, route.arg_data_type.all_fields))\n\n def is_empty_type(arg_type):\n return is_void_type(arg_type) or len(arg_type.all_fields) == 0\n\n # Right now, this is just upload_session_start\n if is_empty_type(route.arg_data_type) and 'style' in route.attrs and route.attrs['style'] == 'upload':\n self.emit('const {0} = new Utils.Endpoint(\"{1}\", \"{2}\",'.format(\n self._var_name(route, namespace),\n namespace.name,\n self._route_name(route)\n ))\n with self.indent():\n self._emit_attr_dict(route.attrs)\n self.emit('new Utils.FileParam()')\n self.emit(');')\n\n elif 'style' in route.attrs and route.attrs['style'] == 'upload': # is upload-style, not void\n self.emit('const {0} = new Utils.Endpoint(\"{1}\", \"{2}\",'.format(\n self._var_name(route, namespace),\n namespace.name,\n self._route_name(route)\n ))\n\n with self.indent():\n self._emit_attr_dict(route.attrs)\n self.emit('new Utils.FileParam(),')\n self.generate_multiline_list(get_param_list(), delim=('',''))\n self.emit(');')\n elif not is_empty_type(route.arg_data_type): # not upload style, and has params\n self.emit('const {0} = new Utils.Endpoint(\"{1}\", \"{2}\",'.format(\n self._var_name(route, namespace),\n namespace.name,\n self._route_name(route)\n ))\n with self.indent():\n self._emit_attr_dict(route.attrs)\n self.generate_multiline_list(get_param_list(), delim=('',''))\n self.emit(');')\n else: # void, but not upload_style\n self.emit('const {0} = new Utils.Endpoint(\"{1}\", \"{2}\",'.format(\n self._var_name(route, namespace),\n namespace.name,\n self._route_name(route)\n ))\n with self.indent():\n self._emit_attr_dict(route.attrs, True)\n self.emit(');')\n\n def _route_name(self, route):\n if route.version == 1:\n return route.name\n else:\n return '{}_v{}'.format(route.name, route.version)\n\n # converts route name into Typescript variable name\n def _var_name(self, route, namespace):\n route_name = self._route_name(route)\n return namespace.name + '_' + route_name.replace('/', '_') + '_endpt'\n\n # Emit route attrs to dict.\n def _emit_attr_dict(self, attrs, is_last=False):\n close = '}' if is_last else '},'\n if not attrs:\n self.emit('{' + close)\n return\n\n self.emit('{')\n with self.indent():\n for k, v in attrs.items():\n self.emit('{0}: \"{1}\",'.format(k, v))\n self.emit(close)\n\n # Pattern-match on the type of the parameter\n # was_nullable indicates that this was wrapped in a nullable type.\n # A parameter is optional if it was nullable or it has a default value.\n def parameter_constructor(self, param, was_nullable=None):\n # TODO: we can't guarantee that param has a 'has_default' attribute\n has_default = getattr(param, 'has_default', False)\n return self.data_type_constructor(param.data_type, '\"{0}\"'.format(param.name),\n has_default=has_default, was_nullable=was_nullable)\n\n def data_type_constructor(self, data_type, name, has_default=False, was_nullable=False, is_root=False):\n optional = self._emit_bool(has_default or was_nullable)\n # Since params are reused between different endpoints, making a copy prevents\n # one parameter from overwriting information about another's arguments.\n if is_nullable_type(data_type):\n return self.data_type_constructor(data_type.data_type, name, was_nullable=True)\n if is_integer_type(data_type):\n return 'new Utils.IntParam({0}, {1})'.format(name, optional)\n elif is_float_type(data_type):\n return 'new Utils.FloatParam({0}, {1})'.format(name, optional)\n # It would be nice to separate timestamps out (e.g. a bunch of selectors!)\n elif is_string_type(data_type) or is_timestamp_type(data_type):\n return 'new Utils.TextParam({0}, {1})'.format(name, optional)\n elif is_boolean_type(data_type):\n return 'new Utils.BoolParam({0}, {1})'.format(name, optional)\n elif is_union_type(data_type) or is_struct_type(data_type):\n # would be nice to make this prettier\n if is_union_type(data_type):\n param_type = 'RootUnionParam' if is_root else 'UnionParam'\n else:\n param_type = 'StructParam'\n\n return 'new Utils.{0}({1}, {2}, {3})'.format(\n param_type,\n name,\n optional,\n '[' + ', '.join(self.parameter_constructor(field) for field in data_type.all_fields if field.name != 'other') + ']',\n )\n elif is_void_type(data_type):\n return 'new Utils.VoidParam({0})'.format(name)\n elif is_list_type(data_type):\n return 'new Utils.ListParam({0}, {1}, (index: string): Utils.Parameter => {2})'.format(\n name,\n optional,\n self.data_type_constructor(data_type.data_type, 'index'))\n else:\n return 'null /* not implemented yet */'\n\n # emit Typescript representation of boolean\n def _emit_bool(self, b):\n return 'true' if b else 'false'\n","repo_name":"dropbox/dropbox-api-v2-explorer","sub_path":"codegen/codegen.stoneg.py","file_name":"codegen.stoneg.py","file_ext":"py","file_size_in_byte":7699,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"19"}
+{"seq_id":"71259351722","text":"####################################\n### Import libraries ###\n####################################\n\nfrom lib.visualization.figures import *\nfrom lib.auth.check import EncryptedAuth\nfrom lib.utils.utils import human_format, read_data, correct_month, correct_week, standardized_frame, summarize_data\n\nimport requests\nfrom datetime import datetime, date, timedelta\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport dash\nimport dash_auth\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_bootstrap_components as dbc\nfrom dash.dependencies import Input, Output\n\n####################################\n### Setup Application ###\n####################################\n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\n\nserver = app.server\n\nauth = EncryptedAuth(\n app\n)\n\n####################################\n### Loading data ###\n####################################\n\n# Get names\ncache_data = read_data(\"data/data.pkl\")\n\nadresses = [\"Tufsteen 4\", \"Tufsteen 5\", \"Tufsteen 6\", \"Tufsteen 7\", \"Tufsteen 8\", \"Tufsteen 9\"]\nmaanden = cache_data[\"Maand\"].unique()\nmaanden = dict(zip(range(0, len(maanden)), list(maanden)))\n\nweer = pd.read_pickle(\"data/weer.pkl\")\nprint(cache_data)\n# global cache for buttons\ncache_changed_button = []\n\n####################################\n### All code for Application ###\n####################################\n\napp.layout = html.Div([\n html.Div(\n [\n html.Img(\n src=\"https://github.com/Douwe-Spaanderman/SEGRO_energydashboard/blob/main/data/logo_lower.png?raw=true\",\n className='one column',\n ),\n html.Div(\n [\n html.H2(\n 'Elektrisiteit Dashboard',\n ),\n html.H4(\n 'Saldo en productie',\n )\n ],\n\n ),\n html.A(\n html.Button(\n \"Learn More\",\n id=\"learnMore\"\n ),\n href=\"https://github.com/Douwe-Spaanderman/SEGRO_energydashboard/\",\n )\n ],\n id=\"header\",\n className='row-header',\n ),\n html.Div(\n [\n html.Div(\n [\n html.P(\n 'Selecteer het adress of meerdere adressen:',\n className=\"control_label\"\n ),\n dcc.Dropdown(\n id='persisted-adress',\n value=[adresses[0]],\n options=[{'label': v, 'value': v} for v in adresses],\n persistence=True,\n multi=True,\n persistence_type='session',\n searchable=False, \n ),\n html.P(\n 'Selecteer een of meerdere maanden:',\n className=\"control_label\"\n ),\n dcc.RangeSlider(\n id='month-slider',\n min=list(maanden)[0],\n max=list(maanden)[-1],\n value=[list(maanden)[0],list(maanden)[1]],\n marks={label: maanden[label] for label in range(0, int(list(maanden)[-1]), 4)},\n className=\"dcc_control\",\n persistence=True,\n persistence_type='session',\n ),\n html.P(\n 'Selecteer de week:',\n className=\"control_label\"\n ),\n html.Div(\n dcc.Slider(\n id='week-slider', \n value=0), \n id='week-slider-container'\n ),\n html.Div(\n [\n html.Button(\n 'Analyseer normale Data', \n id='btn-1',\n n_clicks=1, \n className=\"btn active\"\n ),\n html.Button(\n 'Reken nodige capaciteit uit', \n id='btn-2',\n n_clicks=0,\n className=\"btn\"),\n ],\n id=\"myDIV\",\n className=\"myDiv\"\n ),\n html.Div(\n 'Hoeveelheid capaciteit in kWh: ',\n id='capaciteit-slider-output',\n className=\"control_label\"\n ),\n dcc.Slider(\n id='capaciteit-slider',\n marks={i: '{}'.format(1000 ** i) for i in range(4)},\n max=75000,\n value=5000,\n step=100\n ),\n html.Div(\n 'Hoeveelheid kWh op te halen per uur in winternachten (12-6): ',\n id='ophalen-slider-output',\n className=\"control_label\"\n ),\n dcc.Slider(\n id='ophalen-slider',\n marks={i: '{}'.format(10 ** i) for i in range(4)},\n max=250, \n value=2,\n step=0.5\n ),\n ],\n className=\"pretty_container four columns\"\n ),\n html.Div(\n [\n html.Div(\n [\n html.Div(\n [\n html.P(\n id=\"productie_header\",\n className=\"info_text\"\n ),\n html.H6(\n id=\"productie_text\",\n className=\"info_text\"\n )\n ],\n id=\"productie\",\n className=\"pretty_container\"\n ),\n html.Div(\n [\n html.P(\n id=\"verbruik_header\",\n className=\"info_text\"\n ),\n html.H6(\n id=\"verbruik_text\",\n className=\"info_text\"\n )\n ],\n id=\"verbruik\",\n className=\"pretty_container\"\n ),\n html.Div(\n [\n html.P(\n id=\"saldo_header\",\n className=\"info_text\"\n ),\n html.H6(\n id=\"saldo_text\",\n className=\"info_text\"\n )\n ],\n id=\"saldo\",\n className=\"pretty_container\"\n ),\n html.Div(\n [\n html.P(\n id=\"weer_header\",\n className=\"info_text\"\n ),\n html.H6(\n id=\"weer_text\",\n className=\"info_text\"\n )\n ],\n id=\"zon\",\n className=\"pretty_container\"\n ),\n ],\n id=\"infoContainer\",\n className=\"row\"\n ),\n html.Div(\n [\n dcc.Graph(\n id='figure-1-container',\n )\n ],\n id=\"Graph1Container\",\n className=\"pretty_container\"\n )\n ],\n id=\"rightCol\",\n className=\"eight columns\"\n )\n ],\n className=\"row\"\n ),\n html.Div(\n [\n html.Div(\n [\n dcc.Graph(id='figure-2-container')\n ],\n className='pretty_container seven columns',\n ),\n html.Div(\n [\n dcc.Graph(id='figure-3-container')\n ],\n className='pretty_container five columns',\n ),\n ],\n className='row'\n ),\n html.Div(\n [\n html.Div(\n [\n dcc.Graph(id='figure-4-container')\n ],\n className='pretty_container seven columns',\n ),\n html.Div(\n [\n dcc.Graph(id='figure-5-container')\n ],\n className='pretty_container five columns',\n ),\n ],\n className='row'\n ),\n ],\n id=\"mainContainer\",\n style={\n \"display\": \"flex\",\n \"flex-direction\": \"column\"\n }\n)\n\n# Button\n@app.callback(\n [Output(f\"btn-{i}\", \"className\") for i in range(1, 3)],\n [Input(f\"btn-{i}\", \"n_clicks\") for i in range(1, 3)],\n)\ndef set_active(*args):\n ctx = dash.callback_context\n\n if not ctx.triggered or not any(args):\n return [\"btn active\", \"btn\"]\n\n # get id of triggering button\n button_id = ctx.triggered[0][\"prop_id\"].split(\".\")[0]\n\n return [\n \"btn active\" if button_id == f\"btn-{i}\" else \"btn\" for i in range(1, 3)\n ]\n\n# Sliders\n@app.callback(\n Output('week-slider-container', 'children'),\n Input('month-slider', 'value'),\n)\ndef week_slider(month):\n weken = correct_week(cache_data, month)\n return dcc.Dropdown(\n id='week-slider',\n value=list(weken.keys())[0],\n options=[{'label': k, 'value': v} for v,k in weken.items()],\n persistence=True,\n persistence_type='session',\n searchable=False, \n ),\n\n@app.callback(\n Output('capaciteit-slider-output', 'children'),\n Output('ophalen-slider-output', 'children'),\n Input('capaciteit-slider', 'value'),\n Input('ophalen-slider', 'value'),\n)\ndef capaciteit_ophalen_slider(capaciteit, ophalen):\n return f\"Selecteer de hoeveelheid capaciteit in kWh: {capaciteit}\", f\"Hoeveelheid kWh op te halen per uur in winternachten (12-6): {ophalen}\"\n\n# Figures\n@app.callback(\n Output('figure-1-container', 'figure'),\n Output('productie_text', 'children'),\n Output('productie_header', 'children'),\n Output('verbruik_text', 'children'),\n Output('verbruik_header', 'children'),\n Output('saldo_text', 'children'),\n Output('saldo_header', 'children'),\n Output('weer_text', 'children'),\n Output('weer_header', 'children'),\n Input('persisted-adress', 'value'),\n Input('month-slider', 'value'),\n Input('capaciteit-slider', 'value'),\n Input('ophalen-slider', 'value'),\n Input('btn-1', 'n_clicks'),\n Input('btn-2', 'n_clicks')\n)\ndef main_figure_display(adress, month, capaciteit, ophalen, btn_1, btn_2):\n changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]\n\n global cache_changed_button\n\n try:\n if 'btn-' in changed_id:\n cache_changed_button = changed_id\n except UnboundLocalError:\n cache_changed_button = []\n\n if 'btn-2' in cache_changed_button:\n msg = 'Button 2 was most recently clicked'\n\n month_data = correct_month(cache_data, month)\n data = standardized_frame(month_data, adress)\n fig, summary = figure1b_data(data, capaciteit, ophalen)\n header = [\"Overproductie volgens 0 lijn\", \"Capaciteit eisen volgens 0 lijn\", \"Gemiddelde (Ideaal)\", \"Capaciteit eisen volgens Ideaal\"]\n\n else:\n msg = \"None of the buttons have been clicked yet'\"\n\n data = standardized_frame(cache_data, adress)\n summary = summarize_data(data, month, weer)\n fig = figure1a_data(data, adress, month)\n header = [\"Productie\", \"Verbruik\", \"Saldo\", \"Zon\"]\n \n return fig, summary[0], header[0], summary[1], header[1], summary[2], header[2], summary[3], header[3]\n\n@app.callback(\n Output('figure-2-container', 'figure'),\n Output('figure-3-container', 'figure'),\n Output('figure-4-container', 'figure'),\n Output('figure-5-container', 'figure'),\n Input('persisted-adress', 'value'),\n Input('month-slider', 'value'),\n Input('week-slider', 'value'), \n Input('capaciteit-slider', 'value'),\n Input('ophalen-slider', 'value'),\n)\ndef create_figures(adress, month, week, capaciteit, ophalen):\n month_data = correct_month(cache_data, month)\n data = standardized_frame(month_data, adress)\n\n return figure2_data(data, adress), figure3_data(data, capaciteit, ophalen), figure4_data(data), figure3_data(data, capaciteit, ophalen, week)\n\nif __name__ == '__main__':\n app.run_server(debug=True)","repo_name":"Douwe-Spaanderman/SEGRO_energydashboard","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":14307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"26040047711","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 30 00:16:55 2019\n\n@author: pengz\n\"\"\"\n\n'''\nGiven a binary tree, return the level order traversal of its nodes' values. \n(ie, from left to right, level by level).\n\nFor example:\nGiven binary tree [3,9,20,null,null,15,7],\n\n 3\n / \\\n 9 20\n / \\\n 15 7\n\nreturn its level order traversal as:\n\n[\n [3],\n [9,20],\n [15,7]\n]\n'''\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\nfrom collections import deque\ndef levelOrder(root: TreeNode) -> [[int]]: ## 用deque实现queue的功能, 也就是FIFO, 用来实现BFS\n queue = deque() ## 用deque的原因是因为popleft是O(1), 不然list的pop(0)的时间是O(n)\n queue.append(root)\n ret = []\n if not root:\n return ret\n while len(queue) != 0:\n num = len(queue) ## 这一层需要的pop次数\n i = 1\n tmp = []\n while i <= num: ## pop出这一层的就行了, 下一层的会添加进来\n vertex = queue.popleft() ## 先pop出这一层的第一个点\n if vertex.left:\n queue.append(vertex.left)\n if vertex.right:\n queue.append(vertex.right)\n tmp.append(vertex.val)\n i += 1\n ret.append(tmp)\n return ret\n\n## https://www.youtube.com/watch?v=IWiprpdSgzg 给DFS的解释\ndef levelOrder2(root: TreeNode) -> [[int]]: ## 用DFS解, 因为是按层来所以比较关键的就是要知道目前到了第几层\n ret = []\n def dfs(node,level,ret):\n if not node: ## base case\n return \n if level >= len(ret): ## 这里一定是大于等于, 用等于是为了满足一开始还在第一层的时候, 初始化, 就需要用等于来添加\n ## 这里level也相当于是一个针对这每个小list的index\n ret.append([]) ## 也就是说每到新的一层, 记得要添加进一个新的list, 用来装这一层的点\n ret[level].append(node.val) ## 把这点在相应的level的小list里添加上\n dfs(node.left, level+1, ret) ## 这两步就相当于DFS里的, 深度优先, 先往深一层level走\n dfs(node.right, level+1, ret)\n dfs(root,0,ret)\n return ret","repo_name":"pengzhefu/LeetCodePython","sub_path":"BFS/median/q102.py","file_name":"q102.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"41897280287","text":"x=5\ny='punit'\nprint(x)\nprint(y)\n\n\n#Casting Variable ;\nx=str(3)\ny=int(3)\nz=float(3)\n\nprint(x),print(y),print(z)\n\n#Get the Type;\nx=10\ny='punit'\nz=()\na=.1\nprint(type(x)),print(type(y)),print(type(z)),print(type(a))\n\n#Multi Word Variables Name;\n#Camel Case;\nmyvariablename='punit'\n#Pascal Case;\nMyVariableName='Deepak'\n#Snake Case;\nmy_variable_name='Amit'\nprint(myvariablename,MyVariableName,my_variable_name)\n\n#Unpack a Collection;\nfruits=[\"Apple\",\"Banana\",\"Mango\",\"Gauava\"]\na,b,c,d=fruits\nprint(a)\nprint(b)\nprint(c)\nprint(d)\n\n#Global Variable or Keyword;\n\nx=\"punit kumar\"\ndef myfunc():\n print(\"my name is\",x)\nmyfunc()\n\nx=\"Awesome\"#.....Thish is knowen as Global Variable...\ndef myfunc():\n x=\"fantastic\"#.....Thish is knowen as Local Variable...\n print(\"python is\",x)\nmyfunc()\nprint(\"python is\",x)\n\n#Global Keyword.....\n\nx=\"Awesome\"\n\ndef myfunc():\n global x\n x=\"fantastic\"\nmyfunc()\nprint(\"Python is\",x)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Puneetkumar05522/python-code","sub_path":"variable/variable1.py","file_name":"variable1.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"27688170453","text":"from socket import *\n\ns = socket(AF_INET, SOCK_STREAM)\ns.connect(('localhost', 3333))\n\nwhile True:\n msg = input('input formula: ')\n if msg == 'q':\n break\n s.send(msg.encode())\n try:\n data = s.recv(1024)\n except:\n break\n else:\n if not data:\n break\n print('Received answer: ', data.decode())\n\n\n\ns.close()","repo_name":"24-decembre/net_programming","sub_path":"hw4/tcp_calculator_client.py","file_name":"tcp_calculator_client.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"27467905011","text":"import loader\nimport generators\nimport random\n\ndef start(): \n mode = pick_mode() \n print(\"mode:\", mode)\n if mode == 1:\n surah = False \n # print(surah)\n while not surah:\n surah = pick_surah_j30_plus_fatiha() \n new_simple_game(generators.GenerateMidDiffQuizFromSingleSura(surah)) \n elif mode == 2:\n list_surah = loader.load_juz_amma_plus_fatiha()\n # print(list_surah)\n new_simple_game(generators.GenerateMidDiffQuizFromMultipleSura(list_surah))\n elif mode == 3:\n surah = False \n # print(surah)\n while not surah:\n surah = pick_surah_all() \n new_simple_game(generators.GenerateMidDiffQuizFromSingleSura(surah)) \n elif mode == 4:\n list_surah = loader.load_surah_all() \n new_simple_game(generators.GenerateMidDiffQuizFromMultipleSura(list_surah))\n else:\n print(\"Mode tidak dikenali\")\n\ndef pick_mode(): \n print(\"PERMAINAN BELAJAR KOSAKATA AL-QURAN\")\n print(\"Anda akan diminta memilih arti kosakata tertentu dari Al-Qur'an\")\n print(\"Pilih sumber kosakata:\")\n print(\"1. Satu surat pendek (juz amma + al-fatihah)\")\n print(\"2. Seluruh surat pendek (juz amma + al-fatihah)\")\n print(\"3. Satu surat (dari seluruh surat)\")\n # print(\"4. Seluruh surat)\")\n mode = 0\n valid_input = False \n while not valid_input:\n mode = input(\"Pilihan anda(angka): \")\n if mode.isdigit():\n mode = int(mode)\n valid_input = mode>=1 and mode <=3 \n if not valid_input:\n print(\"Gunakan angka dari 1 s.d 3\")\n return mode\n\ndef pick_surah_all():\n surahs = loader.load_surah_all()\n # prepare column\n # 114 / 4 => 29 rows\n row_cnt = 29\n keys = list(surahs.keys())\n # print(\"keys:\",keys) \n rows = []\n for i in range(row_cnt):\n s_row = []\n for c in range(4): \n idx = i + (c * row_cnt)\n # print(idx) \n if idx < 114: \n key = keys[idx]\n # print(\"i c idx key:\",i,c,idx,key,end=\" \")\n surah = surahs[key]\n surah[\"key\"] = key\n s_row.append(surah)\n rows.append(s_row)\n print(\"Daftar surat Al Qur'an:\")\n for row in rows:\n for s_row in row:\n print(f'{s_row[\"key\"]:>3} {s_row[\"nama\"]:<13}',end=\" \")\n print()\n idx_select = input(\"Pilih surat (nomor): \")\n if idx_select in surahs:\n selected = surahs[idx_select]\n selected[\"idx\"] = idx_select\n print(selected[\"nama\"])\n return selected\n else:\n print(\"Nomor surat tidak ditemukan\")\n return False\n\ndef pick_surah_j30_plus_fatiha():\n surah_j30 = loader.load_juz_amma_plus_fatiha()\n # prepare column\n # 114 / 4 => 29 rows\n row_cnt = 10\n keys = list(surah_j30.keys())\n # print(\"keys:\",keys) \n rows = []\n for i in range(row_cnt):\n s_row = []\n for c in range(4): \n idx = i + (c * row_cnt) \n if idx < 38: \n key = keys[idx]\n # print(\"i c idx key:\",i,c,idx,key,end=\" \")\n surah = surah_j30[key]\n surah[\"key\"] = key\n s_row.append(surah)\n rows.append(s_row)\n print(\"Daftar surat pada juz 30 plus Al-Fatihah:\")\n for row in rows:\n for s_row in row:\n print(f'{s_row[\"key\"]:>3} {s_row[\"nama\"]:<13}',end=\" \")\n print()\n idx_select = input(\"Pilih surat (nomor): \")\n if idx_select in surah_j30:\n selected = surah_j30[idx_select]\n selected[\"idx\"] = idx_select\n print(\"Anda memilih:\",selected[\"nama\"])\n return selected\n else:\n print(\"Nomor surat tidak ditemukan\")\n return False\n\ndef new_simple_game(generator:generators.QuizGenerator): \n quiz_list= generator.generateQuizList()\n score = 0\n cur_quiz_idx = 0 \n correct = True\n while correct and cur_quiz_idx < len(quiz_list):\n quiz = quiz_list[cur_quiz_idx]\n print(quiz.question)\n choices = quiz.choices\n random.shuffle(choices)\n choice_map = {\n \"a\" : choices[0], \"b\" : choices[1], \"c\" : choices[2]\n }\n print(f\"Pilih a. {choices[0]} b. {choices[1]} c. {choices[2]}\")\n answer = input(\"Jawab (a/b/c):\") \n if answer in choice_map:\n correct = choice_map[answer] == quiz.correct_answer\n else:\n print(\"\\_0_/\")\n correct = False\n if correct:\n score += 1\n cur_quiz_idx +=1\n else:\n print(f\"Jawaban benar: {quiz.correct_answer}\")\n print(f\"Permainan berakhir, skor anda: {score}\")\n\n# surah = pick_sura_j30_plus_fatiha()\n# new_game(surah)\n\n","repo_name":"ewinsutriandi/quran-word-translation-game-python","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":4920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"19"}
+{"seq_id":"27415729774","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# @Time : 2017/11/2 21:42\n# @Author : lingxiangxiang\n# @File : demon6.py\nimport multiprocessing\n\n\ndef worker(d, l):\n l += range(11, 16)\n for i in xrange(1, 6):\n key = \"key{0}\".format(i)\n val = \"val{0}\".format(i)\n d[key] = val\n\n\nif __name__ == \"__main__\":\n manager = multiprocessing.Manager()\n d = manager.dict()\n l = manager.list()\n p = multiprocessing.Process(target=worker, args=(d, l))\n p.start()\n p.join()\n print(d)\n print(l)\n print(\"main end\")\n\n","repo_name":"ajing2/python-demon","sub_path":"multiprocess/demon6.py","file_name":"demon6.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"19"}
+{"seq_id":"22442767191","text":"while True:\n saltos = []\n nome = input('Atleta: ')\n if (nome == ''): break\n while len(saltos) < 5:\n try:\n saltos.append(float(input(f'{len(saltos)+1}° salto: ')))\n except:\n print('Valor inválido! Tnete novamente.')\n continue\n print(f'Atleta: {nome}')\n print(f'Saltos: {saltos}')\n print(f'Média dos saltos: {sum(saltos)/len(saltos)}')","repo_name":"iWesley72/exercicios-python","sub_path":"04_Listas/17_salto_em_distancia.py","file_name":"17_salto_em_distancia.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"70029192359","text":"from django.test import TestCase\nfrom .models import NfcCard, Semester, User\n\n\nclass FixturesTestCase(TestCase):\n fixtures = [\"nfccard\", \"semester\", \"user\"]\n\n def test_nfccard_fixtures(self):\n card = NfcCard.objects.get(card_uid=\"3b2be5a2\")\n self.assertIsNotNone(card, \"There is a NfcCard with uid 3b2be5a2\")\n self.assertEqual(\n NfcCard.objects.count(), 1, \"There are ony one NfcCard registered\"\n )\n\n def test_semester_fixtures(self):\n self.assertEqual(\n Semester.objects.count(), 6, \"There are six semesters registererd\"\n )\n\n def test_user_fixtures(self):\n user = User.objects.get(username=\"cyb\")\n self.assertIsNone(user.last_login, \"User cyb have never logged in\")\n self.assertEqual(User.objects.count(), 3, \"There are three users registered\")\n","repo_name":"cybernetisk/internsystem","sub_path":"core/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"}
+{"seq_id":"70724313321","text":"from django.test import TestCase\nfrom django.shortcuts import get_object_or_404\n# Create your tests here.\n\nfrom .models import OrganizerProfile,EventUserProfile,AuthUser,UserBook\n# 1\nclass AuthTestUser(TestCase):\n @classmethod\n def setTestData(cls):\n auth_user = AuthUser.objects.create_user(\n email=\"dwaveflux@gmail.com\",password=\"hullabaloooo\"\n )\n \n auth_user.save()\n\n\n def test_email(self):\n try:\n auth = AuthUser.objects.get(id=1)\n expected_object_name = f'{auth.email}'\n expected_object_pass = f'{auth.password}'\n self.assertEqual(expected_object_name, 'dwaveflux@gmail.com')\n self.assertEqual(expected_object_pass, 'hullabaloooo')\n\n except AuthUser.DoesNotExist:\n auth = None\n\n\n # def test_user(self):\n # auth = AuthUser.objects.get(id=1)\n # expected_object_name = f'{auth.password}'\n # self.assertEquals(expected_object_name, 'hullabaloooo')\n\n# 2\n\n# ##create a test case for the organizer profile model\nclass OrganizerTestProfile(TestCase):\n\n @classmethod\n def setUpTestData(cls):\n auth_user = AuthUser.objects.create_user(\n email=\"dwaveflux@gmail.com\",password=\"hullabaloooo\"\n )\n\n auth_user.save()\n \n organizers_prof = OrganizerProfile.objects.create(\n name='Ewave', role='organizer',email_address=auth_user,location=\"Lagos State\",\n phone_number=\"+2347045477824\",supplementary_phone_number=\"+2347081927814\",description=\"Good here\",\n image=\"cloudinary.jpg\",price=400.5,street_address=\"Yaba, Lagos\"\n )\n\n organizers_prof.save()\n\n\n try:\n\n def test_organizer_instance(self):\n organizer = OrganizerProfile.objects.get(id=1)\n name = f'{organizer.name}'\n role = f'{organizer.role}'\n email = f'{organizer.email_address}'\n location = f'{organizer.location}'\n phone_number = f'{organizer.phone_number}'\n supplementary = f'{organizer.supplementary_phone_number}'\n description = f'{organizer.description}'\n image = f'{organizer.image}'\n price = f'{organizer.price}'\n street = f'{organizer.street_address}'\n self.assertEquals(name, 'Ewave')\n self.assertEquals(role, 'organizer')\n self.assertEquals(email,'dwaveflux@gmail.com')\n self.assertEquals(location,'Lagos State')\n self.assertEquals(phone_number,'+2347045477824')\n self.assertEquals(supplementary,'+2347081927814')\n self.assertEquals(description,\"Good here\")\n self.assertEquals(image,\"cloudinary\")\n self.assertEquals(price,'400.50')\n self.assertEquals(street,\"Yaba, Lagos\")\n\n except OrganizerProfile.DoesNotExist:\n organizer = None\n\n\n # def test_role(self):\n # organizer = OrganizerProfile.objects.get(id=1)\n # expected_object_name = f'{organizer.role}'\n # self.assertEquals(expected_object_name, 'organizer')\n\n # def test_email(self):\n # organizer = OrganizerProfile.objects.get(id=1)\n # expected_object_name = f'{organizer.email_address}'\n # self.assertEquals(expected_object_name,'dwaveflux@gmail.com')\n\n # def test_location(self):\n # organizer = OrganizerProfile.objects.get(id=1)\n # expected_object_name = f'{organizer.location}'\n # self.assertEquals(expected_object_name,'Lagos State')\n\n # def test_phone(self):\n # organizer = OrganizerProfile.objects.get(id=1)\n # expected_object_name = f'{organizer.phone_number}'\n # self.assertEquals(expected_object_name,'+2347045477824')\n\n # def test_supplement(self):\n # organizer = OrganizerProfile.objects.get(id=1)\n # expected_object_name = f'{organizer.supplementary_phone_number}'\n # self.assertEquals(expected_object_name,'+2347081927814')\n\n # def test_description(self):\n # organizer = OrganizerProfile.objects.get(id=1)\n # expected_object_name = f'{organizer.description}'\n # self.assertEquals(expected_object_name,\"Good here\")\n\n # def test_image(self):\n # organizer = OrganizerProfile.objects.get(id=1)\n # expected_object_name = f'{organizer.image}'\n # self.assertEquals(expected_object_name,\"cloudinary\")\n \n # def test_price(self):\n # organizer = OrganizerProfile.objects.get(id=1)\n # expected_object_name = f'{organizer.price}'\n # self.assertEquals(expected_object_name,'400.50')\n\n \n # def test_street(self):\n # organizer = OrganizerProfile.objects.get(id=1)\n # expected_object_name = f'{organizer.street_address}'\n # self.assertEquals(expected_object_name,\"Yaba, Lagos\")\n\n\n#write further test cases for the userprofile model\n\n# 3\n\nclass UserTestEvent(TestCase):\n\n @classmethod\n def setTestData(cls):\n auth_user = AuthUser.objects.create_user(\n email=\"dwaveflux@gmail.com\",password=\"hullabaloooo\"\n )\n auth_user.save()\n\n\n event_user = EventUserProfile.objects.create(\n name=\"Ewave\",role=\"user\", email_address=\"dwaveflux@gmail.com\",\n location=\"Lagos State\", phone_number=\"+2347045477824\",\n image=\"http://res.cloudinary.com/e-wave/image/upload/v1615919129/rbloqmw2hwxn4nfetcco.png\"\n )\n\n event_user.save()\n\n try:\n\n\n def test_user(self):\n user = EventUserProfile.objects.get(id=1)\n username = f'{user.name}'\n role = f'{user.role}'\n user_email=f'{user.email_address}'\n location = f'{user.location}'\n phone_number = f'{user.phone_number}'\n image = f'{user.image}'\n self.assertEquals(username, 'Ewave')\n self.assertEquals(role, 'user')\n self.assertEquals(user_email,'dwaveflux@gmail.com')\n self.assertEquals(location,'Lagos State')\n self.assertEquals(phone_number,'+2347045477824')\n self.assertEquals(image,\"http://res.cloudinary.com/e-wave/image/upload/v1615919129/rbloqmw2hwxn4nfetcco.png\")\n\n except EventUserProfile.DoesNotExist:\n user = None\n\n # def test_role(self):\n # user = EventUserProfile.objects.get(id=1)\n # expected_object_name = f'{user.role}'\n # self.assertEquals(expected_object_name, 'user')\n\n # def test_email(self):\n # user = EventUserProfile.objects.get(id=1)\n # expected_object_name = f'{user.email_address}'\n # self.assertEquals(expected_object_name,'dwaveflux@gmail.com')\n\n # def test_location(self):\n # user = EventUserProfile.objects.get(id=1)\n # expected_object_name = f'{user.location}'\n # self.assertEquals(expected_object_name,'Lagos State')\n\n # def test_phone(self):\n # user = EventUserProfile.objects.get(id=1)\n # expected_object_name = f'{user.phone_number}'\n # self.assertEquals(expected_object_name,'+2347045477824')\n\n # def test_image(self):\n # user = EventUserProfile.objects.get(id=1)\n # expected_object_name = f'{user.image}'\n # self.assertEquals(expected_object_name,\"http://res.cloudinary.com/e-wave/image/upload/v1615919129/rbloqmw2hwxn4nfetcco.png\")\n\n\n\n##write a unit test for the userbook model\nclass UserBookTest(TestCase):\n def UserBookTest(cls):\n user_book = UserBook.objects.create(\n email_address='dwave100@yahoo.com',\n allotted_budget=1340.50,\n date_of_event='2020-08-21',\n event_type='expo',\n estimated_no_of_guests=30,\n available='no'\n )\n user_book.save()\n try:\n def test_user_book(self):\n userbook = UserBook.objects.get(id=1)\n email=f'{userbook.email_address}'\n budget=f'{userbook.allotted_budget}'\n ddate=f'{userbook.date_of_event}'\n event=f'{userbook.event_type}'\n guests=f'{userbook.estimated_no_of_guests}'\n available=f'{userbook.available}'\n self.assertEquals(email,'dwave100@yahoo.com')\n self.assertEquals(budget,1340.50)\n self.assertEquals(ddate,'2020-08-21')\n self.assertEquals(event,'expo')\n self.assertEquals(guests,30)\n self.assertEquals(available,'no')\n \n except UserBook.DoesNotExist:\n userbook = None\n\n \n","repo_name":"E-wave112/Bevents","sub_path":"booking/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":8630,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"}
+{"seq_id":"19325786268","text":"import math\ndef sumofdig(x):\n\tans = 0\t\n\tfor i in list(map(int, \" \".join(str(x)).split())):\n\t\tans+=i\n\treturn ans\n\ndef getlog(x,y,z):\n\tif x == 1: \n\t\treturn True\n\tif y == 1 and x != 1:\n\t\treturn False\n\tif x == y:\n\t\treturn True\t\n\telif x % (y**z) == 0:\n\t\treturn getlog(x//(y**z),y,z)\n\telse: \n\t\tif z == 1: \n\t\t\treturn False\n\t\telse:\n\t\t\treturn(getlog(x,y,z-1))\n\n#print(sumofdig(614656))\na = []\nprint(getlog(81,9,4))\n\nfor i in range(10**8,5*10**8):\n\t#print(i)\n\tif getlog(i,sumofdig(i),max(math.ceil(math.log( i,max(2,sumofdig(i)) ) ),20) ):\n\t\tprint(i)\n\t\ta.append(i)\n\t#if i%10 == 0:\n\t\t#print(i)\nprint(*a)","repo_name":"pondus314/random","sub_path":"pubquiz/fifth.py","file_name":"fifth.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"37981406923","text":"###################################################\n# creates csv for users for each connect instance #\n# and uploads teh csv fiel to an s3 bucket #\n###################################################\n\n# Requirements:\n# - Connect - Read Access\n# - S3 - Write access only to bucket where csv files are saved\n\n# Expected event input, s3_object_prefix is optional :\n# {\n# \"s3_bucket\": \"s3 bucket\",\n# \"s3_object_prefix\": \"connect_backup/\"\n# }\n\nimport logging\nimport boto3\nfrom botocore.exceptions import ClientError\nfrom datetime import datetime\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n\ndef upload_file(file_name, bucket, object_name=None):\n # If S3 object_name was not specified, use file_name\n file_name = str(file_name)\n if object_name is None:\n object_name = file_name\n\n # Upload the file\n s3_client = boto3.client('s3')\n try:\n response = s3_client.upload_file(file_name, bucket, object_name)\n except ClientError as e:\n logging.error(e)\n return False\n return True\n\n# connect backup users function\ndef connect_backup_users(instance):\n azn_connect = boto3.client('connect')\n # created variable used for user backup\n instance_id_management = instance['IdentityManagementType']\n\n### Start of getting routing profiles\n routing_profiles_raw = azn_connect.list_routing_profiles(InstanceId=instance['Id'])\n routing_profiles = routing_profiles_raw['RoutingProfileSummaryList']\n### Start of getting routing profiles\n\n### Start of getting security profiles\n security_profiles_raw = azn_connect.list_security_profiles(InstanceId=instance['Id'])\n security_profiles = security_profiles_raw['SecurityProfileSummaryList']\n### End of getting security profiles\n\n### Start of backing up Users\n # get list of users in instance\n users_raw = azn_connect.list_users(InstanceId=instance['Id'])\n users = users_raw['UserSummaryList']\n\n #set some variables for later use\n user_num = 1\n users_output = {}\n # create csv file and add columns\n csv_output = instance['InstanceAlias']+\".users.csv\"\n user_f = open('/tmp/'+csv_output, 'w')\n # setup csv output depending on the id management\n if instance_id_management == 'SAML':\n user_f.write(\"first name,last name,user login,routing profile name,security_profile_name_1|security_profile_name_2,phone type (soft/desk),phone number,soft phone auto accept (yes/no),ACW timeout (seconds)\\n\")\n if instance_id_management == 'CONNECT_MANAGED':\n user_f.write(\"first name,last name,email address,password,user login,routing profile name,security_profile_name_1|security_profile_name_2,phone type (soft/desk),phone number,soft phone auto accept (yes/no),ACW timeout (seconds)\\n\")\n ##### need to add the other connect id management at a later date #####\n for user in users:\n # get details of user config\n user_raw = azn_connect.describe_user(UserId=user['Id'], InstanceId=instance['Id'])\n user = user_raw['User']\n ## prepare user details for CSV\n user_indentity = user['IdentityInfo']\n user_phone_config = user['PhoneConfig']\n # convert ID's to names for CSV file\n for routing_profile in routing_profiles:\n if routing_profile['Id'] == user['RoutingProfileId']:\n user.update({'RoutingProfileId': routing_profile['Name']}) \n\n # add user to user json output \n user_output = {'user'+str(user_num) : user}\n users_output.update(user_output)\n user_num = user_num + 1\n # converting security profiles for csv file\n sec_profile_dectected = 0\n user_security_profile_output = \"\"\n for security_profile in security_profiles:\n if security_profile['Id'] in user['SecurityProfileIds']:\n if sec_profile_dectected != 0:\n user_security_profile_output = user_security_profile_output + \"|\" \n user_security_profile_output = user_security_profile_output + security_profile['Name']\n sec_profile_dectected = sec_profile_dectected + 1 \n # converting phone type for csv\n if user_phone_config['PhoneType'] == 'SOFT_PHONE':\n user_phone_type = 'soft'\n else:\n user_phone_type = 'desk'\n # converting auto accept for csv\n if str(user_phone_config['AutoAccept']) == 'FALSE':\n user_auto_accept = 'no'\n else:\n user_auto_accept = 'yes' \n\n # write csv output to file depending on the id management\n if instance_id_management == 'SAML':\n user_f.write( user_indentity['FirstName']+\",\"+ \\\n user_indentity['LastName']+\",\"+ \\\n user['Username']+\",\"+ \\\n user['RoutingProfileId']+\",\"+ \\\n user_security_profile_output+\",\"+ \\\n user_phone_type +\",\"+ \\\n user_phone_config['DeskPhoneNumber']+\",\"+ \\\n user_auto_accept+\",\"+ \\\n str(user_phone_config['AfterContactWorkTimeLimit'])+\"\\n\")\n if instance_id_management == 'CONNECT_MANAGED':\n user_f.write( user_indentity['FirstName']+\",\"+ \\\n user_indentity['LastName']+\",\"+ \\\n user_indentity['Email']+\",\"+ \\\n \",\"+ \\\n user['Username']+\",\"+ \\\n user['RoutingProfileId']+\",\"+ \\\n user_security_profile_output+\",\"+ \\\n user_phone_type +\",\"+ \\\n user_phone_config['DeskPhoneNumber']+\",\"+ \\\n user_auto_accept+\",\"+ \\\n str(user_phone_config['AfterContactWorkTimeLimit'])+\"\\n\")\n logger.info(instance['InstanceAlias']+': '+str(user_num-1)+' users backed up')\n user_f.close()\n return csv_output\n### End of backing up users function\n\n##### Start of Main lambda function #####\ndef lambda_handler(event, context):\n s3_bucket = event['s3_bucket']\n s3_object_prefix = event['s3_object_prefix']\n azn_connect_instance = boto3.client('connect')\n instances_raw = azn_connect_instance.list_instances()\n # creates a variables that just contains the Instance list\n instances = instances_raw['InstanceSummaryList']\n instances_num = len(instances)\n logger.info(\"Number of Connect Instances : \" + str(instances_num))\n\n for instance in instances:\n # back up users to csv file\n csv_output = connect_backup_users(instance)\n csv_output = str(csv_output)\n # upload csv file to s3 bucket\n datetime_now = datetime.now()\n datetime_now = datetime_now.strftime(\"%Y-%m-%d_%H:%M.\")\n\n if s3_object_prefix is not None:\n s3_object = str(s3_object_prefix + datetime_now +csv_output)\n upload_status = upload_file('/tmp/'+csv_output, s3_bucket, s3_object)\n else:\n upload_status = upload_file('/tmp/'+csv_output, s3_bucket)\n s3_object = str(datetime_now + csv_output)\n \n logger.info(instance['InstanceAlias'] +' users: s3://' + s3_bucket + '/' + s3_object)\n##### End of Main lambda function #####","repo_name":"MWarren1/Amazon-Connect-Config-Backup","sub_path":"archived-scripts/connect-backup-users-lambda.py","file_name":"connect-backup-users-lambda.py","file_ext":"py","file_size_in_byte":7260,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"}
+{"seq_id":"42159186751","text":"# -*- coding: utf-8 -*-\r\nimport async_timeout, asyncio, aiohttp\r\nfrom aiohttp.client import ClientSession\r\n\r\nfrom thrift.transport.TTransport import TTransportBase\r\nfrom thrift.transport.TTransport import TMemoryBuffer\r\nfrom thrift.transport.TTransport import TTransportException\r\n\r\nfrom frugal.aio.transport import FTransportBase\r\nfrom frugal.context import FContext\r\nfrom frugal.aio.transport.http_transport import FHttpTransport\r\nfrom frugal.exceptions import TTransportExceptionType\r\n\r\nclass HttpClient(FHttpTransport):\r\n\tdef __init__(self, url, timeout=5000, loop=None):\r\n\t\tsuper().__init__(0)\r\n\t\tself._url = url\r\n\t\tself.loop = loop if loop else asyncio.get_event_loop()\r\n\t\tself.setTimeout(timeout) \r\n\t\tself._headers = {\r\n\t\t\t'Content-Type': 'application/x-thrift',\r\n\t\t\t'Accept': 'application/x-thrift',\r\n\t\t\t'User-Agent': 'Python/Frugal Thrift',\r\n\t\t}\r\n\t\t\r\n\tdef setTimeout(self, timeout):\r\n\t\tself._timeout = timeout\r\n\r\n\tasync def request(self, context: FContext, payload) -> TTransportBase:\r\n\t\tpayload = payload[4:] \r\n\t\tself._payload = payload\r\n\t\tself._preflight_request_check(payload)\r\n\t\tstatus, text = await self._make_request(context, self._payload)\r\n\t\tif status == 400: \r\n\t\t\traise TTransportException(\r\n\t\t\t\ttype=400, \r\n\t\t\t\tmessage='Bad request: '+str(text) + ' :: '+ str(payload))\r\n\t\telif status == 403:\r\n\t\t\traise TTransportException(\r\n\t\t\t\ttype=403, \r\n\t\t\t\tmessage='Forbidden: '+str(text))\r\n\t\telif status == 404:\r\n\t\t\traise TTransportException(\r\n\t\t\t\ttype=404,\r\n\t\t\t\tmessage='Not Found: '+str(text))\r\n\t\telif status == 410:\r\n\t\t\tpass\r\n\t\telif status == 500:\r\n\t\t\traise TTransportException(\r\n\t\t\t\ttype=500,\r\n\t\t\t\tmessage='Backend Error: '+str(text))\r\n\t\telif status >= 300:\r\n\t\t\traise TTransportException(\r\n\t\t\t\ttype=TTransportExceptionType.UNKNOWN,\r\n\t\t\t\tmessage='request errored with {0} and message {1}'.format(\r\n\t\t\t\t\tstatus, str(text)\r\n\t\t\t\t\t))\r\n\t\treturn TMemoryBuffer(text)\r\n\t\t\r\n\tasync def _make_request(self, context:FContext, payload):\r\n\t\tsem = asyncio.Semaphore(200)\r\n\t\tconn = aiohttp.TCPConnector(use_dns_cache=True, loop=self.loop, limit=0)\r\n\t\tasync with sem:\r\n\t\t\tasync with ClientSession(connector=conn) as session:\r\n\t\t\t\ttry:\r\n\t\t\t\t\tif self._timeout > 0:\r\n\t\t\t\t\t\twith async_timeout.timeout(self._timeout / 1000):\r\n\t\t\t\t\t\t\tasync with session.post(self._url, \r\n\t\t\t\t\t\t\t\t\t\t\t\tdata=payload,\r\n\t\t\t\t\t\t\t\t\t\t\t\theaders=self._headers) \\\r\n\t\t\t\t\t\t\t\tas response:\r\n\t\t\t\t\t\t\t\treturn response.status, await response.content.read()\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tasync with session.post(self._url,data=payload,headers=self._headers) as response:\r\n\t\t\t\t\t\t\treturn response.status, await response.content.read()\r\n\t\t\t\texcept asyncio.TimeoutError:\r\n\t\t\t\t\traise TTransportException(\r\n\t\t\t\t\t\ttype=TTransportExceptionType.TIMED_OUT,\r\n\t\t\t\t\t\tmessage='request timed out'\r\n\t\t\t\t\t\t)","repo_name":"Alnyz/AsyncLine","sub_path":"AsyncLine/http_client.py","file_name":"http_client.py","file_ext":"py","file_size_in_byte":2728,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"18"}
+{"seq_id":"70565816041","text":"from django.conf.urls import url\n\nfrom . import views\n\napp_name = 'contacts'\n\nurlpatterns = [\n url(r'^create/', views.create, name='create'),\n url(r'^(?P[0-9]+)/delete/', views.delete, name='delete'),\n url(r'^(?P[0-9]+)/edit/', views.edit, name='edit'),\n url(r'^(?P[0-9]+)/$', views.detail, name='detail'),\n url(r'^$', views.index, name='index'),\n url(r'^new_organization/', views.new_organization, name='new_organization'),\n url(r'^organization/(?P[0-9]+)/$', views.detail_organization, name='detail_organization'),\n]\n\n\nr'^contacts/'\n","repo_name":"EricFries/contacts_manager","sub_path":"contacts_manager/apps/contacts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"32231650210","text":"import psycopg, os, logging\n\nPOSTGRES = os.environ[\"DATABASE_URL\"]\n\nclass Casino():\n def __init__(self, credits, client, ctx):\n self.credits = credits\n self.client = client\n self.ctx = ctx\n self.view = None\n\n def extract_user_credits(self, user_id):\n with psycopg.connect(POSTGRES) as conn:\n with conn.cursor() as cur:\n cur.execute(\n \"SELECT * FROM credits WHERE user_id = %s\",\n (user_id,)\n )\n extracted_user = cur.fetchone()\n \n if not extracted_user:\n return 0\n else:\n return extracted_user[2]\n\n async def wager_credits(self, user_id):\n user_credits = self.extract_user_credits(user_id)\n\n if self.credits > user_credits:\n await self.ctx.send(\"You do not have sufficient credits, you have \" + str(user_credits) + \" credits.\") \n return False\n \n with psycopg.connect(POSTGRES) as conn:\n with conn.cursor() as cur:\n cur.execute(\n \"UPDATE credits SET credit = credit - %s WHERE user_id = %s\",\n (self.credits, user_id)\n )\n\n if cur.rowcount == 0:\n logging.warning(\"User wagered \" + str(self.credits) + \" but was not found in database.\")\n return\n\n conn.commit()\n \n return True\n\n \n def multiplied_credits(self, user_id, credits_won):\n with psycopg.connect(POSTGRES) as conn:\n with conn.cursor() as cur:\n cur.execute(\n \"UPDATE credits SET credit = credit + %s WHERE user_id = %s\",\n (credits_won, user_id)\n )\n if cur.rowcount == 0:\n logging.warning(\"User won \" + str(credits_won) + \" but was not found in database.\")\n return\n\n conn.commit()\n ","repo_name":"austinlhx/Swiss","sub_path":"casino/casino.py","file_name":"casino.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"}
+{"seq_id":"2436769219","text":"import socket\nimport json\nimport struct\n\ndef controllerGenerator():\n controller = {\n \"steer\": 0.0,\n \"throttle\": 0.0,\n \"roll\": 0.0,\n \"pitch\": 0.0,\n \"yaw\": 0.0,\n \"jump\": False,\n \"boost\": False,\n \"use_item\": False,\n \"chat\": 0\n }\n\n return controller\n\ndef createHeader(jString):\n length = len(jString.encode('utf-8'))\n header = struct.pack(\"H\",length)\n return header\n\n\n\nclass SocketBot():\n def __init__(self,):\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.port = 8085\n self.socket.connect((\"localhost\",8085))\n\n def recievePacket(self,):\n recieved = self.socket.recv(1024)\n header = struct.unpack(\"H\", recieved[:2])[0]\n recieved = recieved[2:]\n\n while len(recieved) < header:\n recieved += self.socket.recv(1024)\n\n packet = json.loads(recieved,encoding='utf-8')\n return packet\n\n def sendControls(self,controler):\n c_string = json.dumps(controler)\n header = createHeader(c_string)\n controlerBytes = header + bytes(c_string.encode('utf-8'))\n self.socket.sendall(controlerBytes)\n\n\n\n def getOutput(self):\n packet = self.recievePacket()\n print(packet)\n blankControls = controllerGenerator()\n self.sendControls(blankControls)\n\n\nif __name__ == \"__main__\":\n bot = SocketBot()\n while True:\n bot.getOutput()\n\n\n\n\n\n\n","repo_name":"oxrock/python-rlbot-socket-example","sub_path":"pythonExample.py","file_name":"pythonExample.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"72907482599","text":"import os\nfrom datetime import timedelta\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nSECRET_KEY = os.getenv('SECRET_KEY', 'no_secret')\n\nDEBUG = os.getenv('DEBUG', 'True') == 'True'\n\nALLOWED_HOSTS = ['*']\n\nINSTALLED_APPS = [\n 'shopify',\n 'rest_framework',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.middleware.common.CommonMiddleware',\n]\n\nROOT_URLCONF = 'shopify.urls'\n\nWSGI_APPLICATION = 'shopify.wsgi.application'\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': 'postgres',\n 'USER': 'postgres',\n 'PASSWORD': 'postgres',\n 'HOST': 'postgres',\n 'PORT': '5432',\n }\n}\n\nTIME_ZONE = os.getenv('TZ', 'UTC')\n\nUSE_I18N = False\n\nREST_FRAMEWORK = {\n 'DEFAULT_RENDERER_CLASSES': (\n 'rest_framework.renderers.JSONRenderer',\n ),\n 'DEFAULT_PARSER_CLASSES': (\n 'rest_framework.parsers.JSONParser',\n ),\n 'DEFAULT_PERMISSION_CLASSES': (\n 'rest_framework.permissions.IsAuthenticatedOrReadOnly',\n ),\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'rest_framework_jwt.authentication.JSONWebTokenAuthentication',\n ),\n}\n","repo_name":"kahrabian/shopify","sub_path":"shopify/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"29595111445","text":"# -*- coding: utf-8 -*-\n\n# Restframework Serializers\nfrom rest_framework import serializers\n\n# App Models\nfrom .models import Phoenicopterus\n\n\nclass PhoenicopterusSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Phoenicopterus\n fields = (\n 'url',\n 'pk',\n 'name',\n 'description',\n 'age',\n 'genre',\n 'created_date',\n 'updated_date'\n )\n","repo_name":"angiealejo/flamingo","sub_path":"ruber/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"}
+{"seq_id":"74716112039","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\nEscola Politecnica da Universidade de São Paulo\nDepartamento de Engenharia Mecatrônica e de Sistemas Mecânicos - PMR\n\n@author: Flávia Piñeiro Nery and Matheus Alves Ivanaga\n@advisor: Larissa Driemeier\n\nThis script contains the functions used to generate a failure analysis metamodel.\n\nUse run() function to configure and call the plotting functions.\n\"\"\"\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LinearRegression, ElasticNet\nfrom sklearn.preprocessing import PolynomialFeatures, normalize\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.model_selection import validation_curve, KFold, cross_val_predict,cross_val_score\nfrom mpl_toolkits.mplot3d import Axes3D \nfrom matplotlib import cm\nfrom sklearn.utils.testing import ignore_warnings\nfrom sklearn.exceptions import ConvergenceWarning\n\ndef PolynomialRegression(degree=1,**kwargs):\n return make_pipeline(PolynomialFeatures(degree),LinearRegression(**kwargs))\n\ndef ElasticNetRegression(degree=1,alpha=1,l1_ratio=0.5):\n return make_pipeline(PolynomialFeatures(degree),ElasticNet(alpha=alpha,l1_ratio=l1_ratio,normalize=True))\n \n \n########################################################################################\n# Fitting Polynomial Regression to the dataset\n########################################################################################\n@ignore_warnings(category=ConvergenceWarning)\ndef PolyReg(x,y,deg,seed=0,splits_start=2, splits_stop=8, savefig=False, elasticnet=False):\n splits_range = range(splits_start,splits_stop+1) \n rows = len(splits_range)//2 \n fig, ax = plt.subplots(rows, 2,constrained_layout=True,figsize=(10,10))\n \n if elasticnet:\n poly_reg = ElasticNetRegression(degree=deg,alpha=elasticnet[0],l1_ratio=elasticnet[1])\n if deg == 1:\n fig.suptitle('Regressão Linear (alfa={},beta={})'.format(elasticnet[0],elasticnet[1]), fontsize=16)\n else:\n fig.suptitle('Regressão Polinomial Grau {} (alfa={},beta={})'.format(deg,elasticnet[0],elasticnet[1]), fontsize=16)\n else:\n poly_reg = PolynomialRegression(degree=deg)\n if deg == 1:\n fig.suptitle('Regressão Linear', fontsize=16)\n else:\n fig.suptitle('Regressão Polinomial Grau {}'.format(deg), fontsize=16)\n \n k=0\n pred_color = [1,0.9-(deg%5)/6,(deg%5)/6]\n for i in range(rows):\n for j in range(2):\n cv_splits = splits_range[k]\n k+=1\n cv = KFold(n_splits=cv_splits, random_state=seed, shuffle=True)\n best_score = -1000\n poly_reg_scores = []\n for train_index, test_index in cv.split(x): \n X_train, X_test, y_train, y_test = x[train_index], x[test_index], y[train_index], y[test_index]\n poly_reg.fit(X_train, y_train)\n r2_score = poly_reg.score(X_test, y_test)\n poly_reg_scores.append(r2_score)\n \n if r2_score > best_score:\n X_train_best, X_test_best, y_train_best, y_test_best = (X_train, X_test, y_train, y_test)\n best_score = r2_score\n \n poly_reg.fit(X_train_best, y_train_best)\n y_pred = poly_reg.predict(X_test_best)\n if elasticnet:\n coef = poly_reg.named_steps['elasticnet'].coef_\n intercept = poly_reg.named_steps['elasticnet'].intercept_\n coef[0] = intercept\n else:\n coef = poly_reg.named_steps['linearregression'].coef_[0]\n coef[0] = poly_reg.named_steps['linearregression'].intercept_\n poly_curve = np.poly1d(np.flip(coef))\n x_range = np.linspace(-0.4,0.4,50)\n ax[i,j].plot(x_range,poly_curve(x_range),color=pred_color,label = \"Previsões do modelo\")\n ax[i,j].scatter(X_test_best, y_pred, color=pred_color)\n ax[i,j].scatter(X_test_best, y_test_best,color='navy',label = \"Dados de Teste\")\n ax[i,j].scatter(X_train_best, y_train_best, color='lightblue', label=\"Dados de Treino\")\n ax[i,j].set_title('Divisões: {} R²: {:.5f}'.format(cv_splits,best_score))\n ax[i,j].set_xlabel('Triaxialidade')\n ax[i,j].set_ylabel('Deformação Plástica Equiv.')\n ax[i,j].legend()\n \n if savefig:\n if elasticnet:\n plt.savefig('polyreg_deg{}_alpha{}_beta{}.png'.format(deg,elasticnet[0],elasticnet[1]), bbox_inches='tight')\n else:\n plt.savefig('polyreg_deg{}.png'.format(deg), bbox_inches='tight')\n plt.show()\n return cv.split(x)\n\n\n########################################################################################\n# Plotting Polynomial Regression validation surface\n########################################################################################\ndef Poly_Validation_Surfaces(x,y,deg_range=np.arange(1, 7),seed=0,savefig=False):\n split_range=np.arange(2,11)\n test_scores=np.zeros((len(split_range),len(deg_range)))\n train_scores=np.zeros((len(split_range),len(deg_range)))\n for cv_splits in split_range:\n cv = KFold(n_splits=cv_splits, random_state=seed, shuffle=True)\n train_out, test_out = validation_curve(PolynomialRegression(), x, y,'polynomialfeatures__degree', deg_range,cv=cv,scoring='r2')\n test_scores[cv_splits-split_range[0]] = np.copy(np.mean(test_out, axis=1))\n train_scores[cv_splits-split_range[0]] = np.copy(np.mean(train_out, axis=1))\n \n for i in range(test_scores.shape[0]):\n for j in range(test_scores.shape[1]):\n test_scores[i,j] = 0 if test_scores[i,j] <=0 else test_scores[i,j]\n \n \n X_plot, Y_plot = np.meshgrid(deg_range,split_range)\n \n max_id= (np.where(test_scores == np.amax(test_scores))[0][0],np.where(test_scores == np.amax(test_scores))[1][0])\n\n fig = plt.figure(figsize=(15,10))\n ax = fig.gca(projection='3d')\n \n # Plot the surface.\n surf = ax.plot_surface(X_plot, Y_plot, test_scores, cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n # Add a color bar which maps values to colors.\n fig.colorbar(surf, shrink=0.5, aspect=5)\n fig.suptitle('Validation Surface', fontsize=16)\n ax.set_xlabel('graus')\n ax.set_ylabel('nº de divisões')\n ax.set_zlabel('Score médio da validação cruzada')\n ax.set_title('Score Máx.: {:.5f} -> [grau = {}, nº de divisões = {}]'.format(test_scores[max_id],deg_range[max_id[1]],split_range[max_id[0]]))\n\n if savefig:\n plt.savefig('val_surf.png', bbox_inches='tight')\n plt.show()\n \n return train_scores, test_scores\n\n########################################################################################\n# Plotting ElasticNet Regression validation matrix\n########################################################################################\n@ignore_warnings(category=ConvergenceWarning)\ndef Regularization_Matrix(x,y,deg_range=np.arange(1, 11),split_range=np.arange(2,9),seed=0,savefig=False):\n alpha_range=np.linspace(0.00001,0.1,11)\n penalty_range=np.linspace(0,1,11)\n lin = len(alpha_range)\n col = len(penalty_range)\n param_matrix=np.zeros((lin,col))\n param_matrix_labels=np.zeros((lin,col), dtype = 'object')\n test_scores=np.zeros((len(split_range),len(deg_range)))\n train_scores=np.zeros((len(split_range),len(deg_range)))\n i,j,max_param=0,0,0\n for alpha in alpha_range:\n for beta in penalty_range:\n for cv_splits in split_range:\n cv = KFold(n_splits=cv_splits, random_state=seed, shuffle=True)\n train_out, test_out = validation_curve(ElasticNetRegression(alpha=alpha,l1_ratio=beta), x, y,'polynomialfeatures__degree', deg_range,cv=cv,scoring='r2')\n test_scores[cv_splits-split_range[0]] = np.copy(np.mean(test_out, axis=1))\n train_scores[cv_splits-split_range[0]] = np.copy(np.mean(train_out, axis=1))\n \n max_id= (np.where(test_scores == np.amax(test_scores))[0][0],np.where(test_scores == np.amax(test_scores))[1][0])\n param_matrix[i,j]=test_scores[max_id]\n param_matrix_labels[i,j]=(\"{:.4f}\\n({}g,{}d)\".format(test_scores[max_id],deg_range[max_id[1]],split_range[max_id[0]]))\n \n if max_param < param_matrix[i,j]:\n max_param = param_matrix[i,j]\n max_param_label = \"Melhor resultado: {:.5f} -> [alfa = {},beta = {}]\".format(max_param,alpha,beta)\n j+=1\n \n j=0\n i+=1\n \n fig, ax = plt.subplots(figsize=(15,10))\n \n mat = ax.imshow(param_matrix, cmap=plt.cm.coolwarm,extent =[0,col,0,lin])\n \n param_matrix_labels = np.flip(np.transpose(param_matrix_labels),1)\n for i in range(col):\n for j in range(lin):\n ax.text(i+0.5,j+0.5, param_matrix_labels[i,j], va='center', ha='center')\n \n \n fig.colorbar(mat).set_label('Score')\n # fig.suptitle(max_param_label, fontsize=16,x=0.6)\n ax.set_xticklabels([(\"(Ridge)\\n{:.1f}\".format(x) if x==0 else \"(Lasso)\\n{:.1f}\".format(x) if x==1 else \"{:.1f}\".format(x)) for x in penalty_range])\n ax.set_yticklabels([\"{:.3f}\".format(x) for x in alpha_range][::-1])\n ax.set_xticks(np.linspace(0.5,col-0.5,col))\n ax.set_yticks(np.linspace(0.5,lin-0.5,lin))\n ax.set_xlabel(r\"$\\beta$\",fontsize=14)\n ax.set_ylabel(r\"$\\alpha$\",fontsize=14)\n ax.xaxis.set_ticks_position('top')\n ax.xaxis.set_label_position('top')\n ax.set_title(\"(g = grau do polinômio, d = número de divisões)\",y=-0.05)\n \n \n if savefig:\n plt.savefig('val_mat.png', bbox_inches='tight')\n \n \n\n########################################################################################\n# Fitting Polynomial Regression to the dataset\n########################################################################################\n@ignore_warnings(category=ConvergenceWarning)\ndef PolyReg3D(x,y,deg,seed=0,splits_start=2, splits_stop=8, savefig=False,\n elasticnet=False, trd_val_name=\"Ângulo de Lode\",trd_val_lim=(-1,1)):\n splits_range = range(splits_start,splits_stop+1) \n rows = len(splits_range)//2 \n fig = plt.figure(figsize=(12,10))\n \n if elasticnet:\n poly_reg = ElasticNetRegression(degree=deg,alpha=elasticnet[0],l1_ratio=elasticnet[1])\n if deg == 1:\n fig.suptitle('Regressão Linear (alfa={},beta={})'.format(elasticnet[0],elasticnet[1]), fontsize=16)\n else:\n fig.suptitle('Regressão Polinomial Grau {} (alfa={},beta={})'.format(deg,elasticnet[0],elasticnet[1]), fontsize=16)\n else:\n poly_reg = PolynomialRegression(degree=deg)\n if deg == 1:\n fig.suptitle('Regressão Linear', fontsize=16)\n else:\n fig.suptitle('Regressão Polinomial Grau {}'.format(deg), fontsize=16)\n \n k=0\n pred_color = [1,0.9-(deg%5)/6,(deg%5)/6]\n for i in range(rows):\n for j in range(2):\n cv_splits = splits_range[k]\n k+=1\n cv = KFold(n_splits=cv_splits, random_state=seed, shuffle=True)\n best_score = -10000\n poly_reg_scores = []\n for train_index, test_index in cv.split(x): \n X_train, X_test, y_train, y_test = x[train_index], x[test_index], y[train_index], y[test_index]\n poly_reg.fit(X_train, y_train)\n r2_score = poly_reg.score(X_test, y_test)\n poly_reg_scores.append(r2_score)\n \n if r2_score > best_score or (i,j)==(0,0):\n X_train_best, X_test_best, y_train_best, y_test_best = (X_train, X_test, y_train, y_test)\n best_score = r2_score\n \n poly_reg.fit(X_train_best, y_train_best)\n y_pred = poly_reg.predict(X_test_best)\n if elasticnet:\n coef = poly_reg.named_steps['elasticnet'].coef_\n coef[0] = poly_reg.named_steps['elasticnet'].intercept_[0]\n else:\n coef = poly_reg.named_steps['linearregression'].coef_[0]\n coef[0] = poly_reg.named_steps['linearregression'].intercept_[0]\n \n \n ax = fig.add_subplot(rows,2,k,projection='3d')\n # m=0\n # n=0\n # for i in range(X_train_best.shape[0]):\n # if X_train_best[i,0] < 0:\n # m=i\n # elif X_train_best[i,0] < 0.4:\n # n=i\n \n # ax.scatter(X_train_best[n+1:,0], X_train_best[n+1:,1], y_train_best[n+1:], marker='.', color='magenta',s=100, label=\"Triax. > 0.4\")\n # ax.scatter(X_train_best[m+1:n+1,0], X_train_best[m+1:n+1,1], y_train_best[m+1:n+1], marker='.', color='darkviolet',s=100, label=\"Triax. > 0\")\n # ax.scatter(X_train_best[:m+1,0], X_train_best[:m+1,1], y_train_best[:m+1], marker='.', color='red',s=100, label=\"Dados de Treino\")\n ax.scatter(X_train_best[:,0], X_train_best[:,1], y_train_best, marker='.', color='red',s=100, label=\"Dados do treino\")\n ax.scatter(X_test_best[:,0], X_test_best[:,1], y_test_best, marker='.', color='green',s=100,label = \"Dados do teste\")\n ax.scatter(X_test_best[:,0], X_test_best[:,1], y_pred, marker='.', color='blue',s=100,label = \"Previsões do modelo\")\n ax.set_xlabel(\"Triaxialidade\")\n ax.set_ylabel(trd_val_name)\n ax.set_zlabel(\"Deformação Plástica Equiv.\")\n xs = np.linspace(-0.6,0.5,50)\n ys = np.linspace(trd_val_lim[0],trd_val_lim[1],50)\n X,Y = np.meshgrid(xs,ys)\n inpt = np.column_stack((X.ravel(),Y.ravel()))\n poly_reg.fit(X_train_best, y_train_best)\n Z = poly_reg.predict(inpt)\n Z[Z<-1]=np.nan\n Z[Z<-0.5]=-0.5\n Z=Z.reshape((50,50)) \n plt.locator_params(axis='y', nbins=5)\n ax.plot_surface(X,Y,Z, alpha=0.5)\n ax.view_init(azim=40)\n ax.legend(loc=\"upper right\",bbox_to_anchor=(0.9, 0.95))\n ax.set_title('Divisões: {} R²: {:.5f}'.format(cv_splits,best_score))\n \n plt.tight_layout(rect=[0,0,1,0.95]) \n if savefig:\n if elasticnet:\n if trd_val_name==\"Ângulo de Lode\":\n plt.savefig('polyreg3d_lode_deg{}_alpha{}_beta{}.png'.format(deg,elasticnet[0],elasticnet[1]), bbox_inches='tight',pad_inches=0.2)\n else: \n plt.savefig('polyreg3d_deg{}_alpha{}_beta{}.png'.format(deg,elasticnet[0],elasticnet[1]), bbox_inches='tight',pad_inches=0.2)\n else:\n if trd_val_name==\"Ângulo de Lode\":\n plt.savefig('polyreg3d_lode_deg{}.png'.format(deg), bbox_inches='tight',pad_inches=0.2)\n else:\n plt.savefig('polyreg3d_deg{}.png'.format(deg), bbox_inches='tight',pad_inches=0.2)\n plt.show()\n return None\n\n###############################################################################\n#RUN PLOTTING FUNCTIONS\n#Functions for 2d dataset:\n#- PolyReg\n#- Poly_Validation_Surfaces\n#Functions for 3d dataset:\n#- PolyReg3D\n#Functions for both datasets:\n#- Regularization_Matrix\n###############################################################################\ndef run(s):\n # Fixing random state for reproducibility\n np.random.seed(s)\n\n #############################\t\n #Importing the dataset\n #\n #For 2d model: comment lines with #3d\n #For 3d model: comment lines with #2d \n #############################\n # dataset=pd.read_csv('PontosMetamodelo_19.csv',sep=';') #2d\n dataset=pd.read_csv('PontosMetamodeloComLode.csv',sep=';') #3d\n # x = dataset['x'].to_numpy().reshape(-1, 1) #2d\n x = np.zeros((len(dataset['x']),2)) #3d\n x[:,0] = dataset['x'].to_numpy() #3d\n x[:,1] = dataset['z'].to_numpy() #3d\n y = dataset['y'].to_numpy().reshape(-1, 1) \n \n ####################################\n # Plotting triaxiality vs lode angle\n ####################################\n # plt.figure(figsize=(8,6))\n # plt.scatter(x[:,0],x[:,1])\n # plt.title(\"Triaxialidade x Ângulo de Lode\", fontsize=16)\n # plt.xlabel(\"Triaxialidade\")\n # plt.ylabel(\"Ângulo de Lode\")\n # plt.savefig(\"triax_vs_lode.png\")\n # plt.show()\n \n \n PolyReg3D(x,y,deg=2,seed=s,savefig=True)\n PolyReg3D(x,y,deg=3,seed=s,elasticnet=[0.004,0],savefig=True)\n # PolyReg3D(x,y,deg=6,seed=s,elasticnet=[0.01,0],savefig=True,trd_val_name=\"Tensão de Escoamento\",trd_val_lim=(200,300))\n # Regularization_Matrix(x,y,seed=s,savefig=True)\n # PolyReg(x,y,deg=1,seed=s) \n # PolyReg(x,y,deg=2,seed=s,savefig=True)\n # PolyReg(x,y,deg=2,seed=s,elasticnet=[0.002,0])\n # PolyReg(x,y,deg=5,seed=s)\n # Poly_Validation_Surfaces(x,y,deg_range=np.arange(1,10),seed=s)\n\n \ndef main():\n plt.close('all')\n run(0)\n \nif __name__=='__main__':\n main()","repo_name":"flavianery/MetamodeloFalhasMetais","sub_path":"run_metamodel.py","file_name":"run_metamodel.py","file_ext":"py","file_size_in_byte":17055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"24474039988","text":"#######################################################################\n#####VALID COOKIES MUST BE USED OTHER WISE WILL ERROR OUT OR CRASH#####\n###########IF THE PROGRAM ERRORS THEN A NEW KEY IS NEEDED##############\n#######################################################################\n\n#CURL BASH TO PYTHON COOKIES\n# https://curl.trillworks.com/\n\nimport ConvertCurlBashToPythonCookie\nimport attendance\nimport requests\nfrom bs4 import BeautifulSoup\n\n#Value for up for STARTING WEEK (default = 1)\nstartingWeek = 1\n#Value for up for FINISHING WEEK (default value should be the last week)\nfinishingWeek = 6\n\n#Manual Cookie Inserter for overiding; cookie loop @ line \n#Insert cookie header below;\n\n# =============================================================\n# =============================================================\n\n#cookie = \"\"\"\n#Insert cookie header below;\n\n\n\n#Insert Cookie header above\n#\"\"\"\n\n# =============================================================\n# =============================================================\n\n#Insert Cookie header above\n\n#Initalises and sets values to 0\n#total FINAL values for FORM periods and TOTAL Periods\nsumFormPresent = 0\nsumFormAbsent = 0\nsumFormJustified = 0\nsumFormLate = 0\nsumTotalFormPeriod = 0\n\nsumTotalPeriods = 0\nsumTotalPresents = 0\nsumTotalAbsents = 0\nsumTotalJustified = 0\nsumTotalLates = 0\n\n#temporory values which are constantly updated and refreshed in function\nformPresent = 0\nformAbsent = 0\nformJustified = 0\nformLate = 0\ntotalFormPeriod = 0\ntotalPeriods = 0\ntotalPresents = 0\ntotalAbsents = 0\ntotalJustified = 0\ntotalLates = 0\n\n#Session Cookie Loop\nprint(\"\\nEnter session cookie (instructions on how is in ReadMe.md): \")\ncookieList = []\ncookieInput = ' '\n\nwhile cookieInput != \"\":\n cookieInput = str(input(\"\"))\n if \"--compressed\" in cookieInput:\n cookieList.append(cookieInput)\n break\n cookieList.append(cookieInput)\n\ncookieStr = ('\\n'.join(cookieList))\ncookie = str(cookieStr)\n\n#Calls CurlBashToPythonCookie from ConvertCurlBashToPythonCookie.py to convert\n#cURL to python dictionary\nheaders = ConvertCurlBashToPythonCookie.CurlBashToPythonCookie(cookie)\n\n\n#Sets default attendance URL\nparentPortalURL = 'https://lynfield.mystudent.school.nz/attendance/'\nparentPortalURLList = list(parentPortalURL)\n\n#Requests for attendance data for each URL page by 1 integer increments (by 1 week increments)\nfor number in range(startingWeek, (finishingWeek + 1)):\n parentPortalURLList = list(parentPortalURL)\n #print(parentPortalURLList)\n parentPortalURLList.append(str(number))\n URL = (''.join(parentPortalURLList))\n #print(URL)\n response = requests.get(URL, headers=headers)\n try:\n formPresent, formAbsent, formJustified, formLate, totalFormPeriod, totalPeriods, totalPresents, totalAbsents, totalJustified, totalLates = attendance.attendances(response)\n \n except TypeError:\n print(\"Invalid Cookies or URLS\")\n print(\"Stopped on week {}\".format(number))\n print(\"Try again with new cookie!!\")\n #Adds attendance data to the SUM of that variable\n sumFormPresent += formPresent\n sumFormAbsent += formAbsent\n sumFormJustified += formJustified\n sumFormLate =+ formLate\n sumTotalFormPeriod += totalFormPeriod\n\n sumTotalPeriods += totalPeriods\n sumTotalPresents += totalPresents\n sumTotalAbsents += totalAbsents\n sumTotalJustified += totalJustified\n sumTotalLates += totalLates\n \n print(\"\"\"\n Week: FormPresents FormAbsents FormAttendanceRate: AllPresents: AllAbsents: AttendanceRate:\n {}{:8}{:16}{:15}{:4.2f}%{:19}{:15}{:13}{:4.2f}% \"\"\" \n .format(number, formPresent, formAbsent, \"\", (((formPresent + formLate + formJustified) / totalFormPeriod) * 100), \n totalPresents, totalAbsents, \"\", (((totalPresents + totalLates + totalJustified) / totalPeriods) * 100) )\n )\n\n\n\n#soup = BeautifulSoup(response.content, \"html.parser\")\n#print(soup)\n\n#Calculates the attendance rate for both FORM periods and ALL periods \nformPeriodAttendanceRate = ((sumFormPresent + sumFormJustified + sumFormLate) / sumTotalFormPeriod) * 100\nallPeriodAttendanceRate = ((sumTotalPresents + sumTotalJustified + sumTotalLates) / sumTotalPeriods) * 100\n\n#Prints out final attendance data formatted.\n\nprint(\"\\n\\nTotal Form Periods: \\n\")\nprint(\"Total form periods: %s\" % sumTotalFormPeriod)\nprint(\"Total form periods present (including late periods) : %s\" % (sumFormPresent + sumFormLate))\nprint(\"Total periods late: %s\" % sumFormLate)\nprint(\"Total form periods absent: %s\" % sumFormAbsent)\nprint(\"Total form justified absents: %s\" % sumFormJustified)\nprint(\"Form period Attendance Rate (includes late and justified form periods) : {:4.2f}%\".format(formPeriodAttendanceRate)) \n\nprint(\"\\n\\nTotal Period Attendance:\\n \")\nprint(\"Total periods: %s\" % sumTotalPeriods)\nprint(\"Total periods present (including late periods) : %s\" % (sumTotalPresents+ sumTotalLates))\nprint(\"Total periods late: %s\" % sumTotalLates)\nprint(\"Total periods absent: %s\" % sumTotalAbsents)\nprint(\"Total justified absents: %s\" % sumTotalJustified)\nprint(\"All period Attendance Rate (includes late and justified periods) : {:4.2f}%\".format(allPeriodAttendanceRate) )\n\n\n","repo_name":"ItzCino/LynAttendance","sub_path":"All 3 modules including main/AttendanceMain.py","file_name":"AttendanceMain.py","file_ext":"py","file_size_in_byte":5204,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"}
+{"seq_id":"30474566593","text":"from models import Pets\nfrom start import sesssion\n\nclass PetsMethods:\n\n @classmethod\n def create_pet(cls):\n name = input('Ingrese el nombre de la Mascota: ')\n age = int(input('Ingrese la edad de la Mascota: '))\n pet = Pets(name,age)\n sesssion.add(pet)\n sesssion.commit()\n print('Mascota registrada con exito')\n\n @classmethod\n def show_all(cls):\n pets = sesssion.query(Pets).all()\n print('<--- *Mascotas Registradas* --->')\n for pet in pets:\n print(pet)\n print('<----->')\n\n @classmethod\n def show_one(cls):\n id_pet = int(input('Ingrese el ID del registro que desea consultar: '))\n pet = sesssion.query(Pets).get(id_pet)\n print(pet)\n\n @classmethod\n def delete_pet(cls):\n id_pet = int(input('Ingrese el ID del registro que desea eliminar: '))\n pet = sesssion.query(Pets).get(id_pet)\n sesssion.delete(pet)\n sesssion.commit()\n print('Registro eliminado con exito')\n\n @classmethod\n def update_pet(cls):\n id_pet = int(input('Ingrese el ID del registro que desea actualizar: '))\n pet = sesssion.query(Pets).get(id_pet)\n pet.name = input('Ingrese el nombre de la Mascota: ')\n pet.age = int(input('Ingrese la edad de la mascota: '))\n sesssion.commit()\n print('Registro actualizado')\n\n @classmethod\n def filter_name(cls):\n name = input('Ingrese el nombre de la mascota: ')\n pets = sesssion.query(Pets).filter(Pets.name == name)\n for pet in pets:\n print('----')\n print(pet)\n\n @classmethod\n def filter_first(cls):\n name = input('Ingrese el nombre de la mascota: ')\n pets = sesssion.query(Pets).filter(Pets.name == name) \n print(pets) \n\n @classmethod\n def filter_age(cls):\n age = input('Ingrese la edad de la mascota: ')\n pets = sesssion.query(Pets).filter(Pets.age == age) #Pets.age<=age\n for pet in pets:\n print('----')\n print(pet)\n\n\n#PetsMethods.create_pet()\n#PetsMethods.show_all()\n#PetsMethods.show_one()\n#PetsMethods.delete_pet()\n#PetsMethods.update_pet()\nPetsMethods.filter_age()\n","repo_name":"nestorjuarezR/Python_PSQL","sub_path":"ORMs/pests.py","file_name":"pests.py","file_ext":"py","file_size_in_byte":2218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"43302345283","text":"import logging\nfrom typing import Dict, List, Optional, Union\n\nfrom user.weatherlink_live.packets import NotInPacket, DavisConditionsPacket\nfrom user.weatherlink_live.static import PacketSource, targets, labels\nfrom user.weatherlink_live.static.packets import DataStructureType, KEY_TEMPERATURE, KEY_HUMIDITY, KEY_DEW_POINT, \\\n KEY_HEAT_INDEX, KEY_WET_BULB, KEY_WIND_DIR, KEY_RAIN_AMOUNT_DAILY, KEY_RAIN_SIZE, KEY_RAIN_RATE, \\\n KEY_SOLAR_RADIATION, KEY_UV_INDEX, KEY_WIND_CHILL, KEY_THW_INDEX, KEY_THSW_INDEX, KEY_SOIL_MOISTURE, \\\n KEY_TEMPERATURE_LEAF_SOIL, KEY_LEAF_WETNESS, KEY_TEMPERATURE_INDOOR, KEY_HUMIDITY_INDOOR, KEY_DEW_POINT_INDOOR, \\\n KEY_HEAT_INDEX_INDOOR, KEY_BARO_ABSOLUTE, KEY_BARO_SEA_LEVEL, KEY_WIND_SPEED, KEY_BATTERY_FLAG\n\nlog = logging.getLogger(__name__)\n\n\ndef _parse_option_boolean(opts: list, check_for: str) -> bool:\n if len(opts) < 1:\n return False\n\n uppercase_opts = [opt.upper() for opt in opts]\n uppercase_check_for = check_for.upper()\n\n return uppercase_check_for in uppercase_opts\n\n\nclass AbstractMapping(object):\n def __init__(self, mapping_opts: list, used_map_targets: list,\n log_success: bool = False, log_error: bool = True):\n self.mapping_opts = mapping_opts\n\n self.log_success = log_success\n self.log_error = log_error\n\n self.targets = self.__search_multi_targets(self._map_target_dict, used_map_targets)\n self._log_success(\"Mapping targets: %s\" % repr(self.targets))\n\n def __str__(self):\n return type(self).__name__ + (repr(self.mapping_opts) if self.mapping_opts else \"\")\n\n def _log_success(self, message: str, level: int = logging.DEBUG) -> None:\n if self.log_success:\n log.log(level, \"%s: %s\" % (str(self), message))\n\n def _log_error(self, message: str, level: int = logging.DEBUG) -> None:\n if self.log_error:\n log.log(level, \"%s: %s\" % (str(self), message))\n\n def _log_mapping_success(self, target: str, value: float = None):\n self._log_success(\"Mapped: %s=%s\" % (target, repr(value)))\n\n def _log_mapping_notResponsible(self, message: str):\n \"\"\"Logged when the mapper doesn't feel responsible for a packet\"\"\"\n self._log_success(\"Mapping not responsible: %s\" % message)\n\n def _log_mapping_notInPacket(self):\n self._log_success(\"Observation not found in packet\")\n\n def _parse_option_int(self, opts: list, index: int) -> int:\n try:\n return int(opts[index])\n except IndexError as e:\n raise IndexError(\"Mapping options for mapping %s incomplete: Expected at least %d parameters; got %d\" % (\n str(self), index + 1, len(opts)\n )) from e\n except ValueError as e:\n raise ValueError(\"Could not parse mapping option %d for mapping %s: Expected an integer; got %s\" % (\n index + 1, str(self), repr(opts[index])\n )) from e\n\n def __search_multi_targets(self, available_map_targets_dict: dict = (), used_map_targets=None) -> dict:\n if used_map_targets is None:\n used_map_targets = []\n\n if len(available_map_targets_dict) < 1:\n return {}\n\n target_length = min([len(target_list) for target_list in available_map_targets_dict.values()])\n for i in range(0, target_length):\n map_targets = dict([\n (k, v[i]) for k, v in available_map_targets_dict.items()\n ])\n\n if any([map_target in used_map_targets for map_target in map_targets.values()]):\n continue\n else:\n return map_targets\n\n raise RuntimeError(\"Mapping %s has all map targets used: %s\" % (\n str(self), available_map_targets_dict\n ))\n\n def map(self, packet: DavisConditionsPacket, record: dict):\n try:\n self._do_mapping(packet, record)\n except NotInPacket:\n self._log_mapping_notInPacket()\n pass\n\n def _do_mapping(self, packet: DavisConditionsPacket, record: dict):\n pass\n\n @property\n def _map_target_dict(self) -> Dict[str, List[str]]:\n raise NotImplementedError()\n\n def _set_record_entry(self, record: dict, key: str, value: float = None):\n record.update({key: value})\n self._log_mapping_success(key, value)\n\n @property\n def map_source_transmitter(self) -> str:\n raise NotImplementedError()\n\n @property\n def map_table(self) -> Dict[str, Union[str, list[str]]]:\n raise NotImplementedError()\n\n\nclass TMapping(AbstractMapping):\n\n def __init__(self, mapping_opts: list, used_map_targets: list, log_success: bool = False, log_error: bool = True):\n super().__init__(mapping_opts, used_map_targets, log_success, log_error)\n\n self.tx_id = self._parse_option_int(mapping_opts, 0)\n\n @property\n def _map_target_dict(self) -> Dict[str, List[str]]:\n return {\n 't': targets.TEMP\n }\n\n def _do_mapping(self, packet: DavisConditionsPacket, record: dict):\n target = self.targets['t']\n\n self._set_record_entry(record, target,\n packet.get_observation(KEY_TEMPERATURE, DataStructureType.ISS, self.tx_id))\n\n @property\n def map_source_transmitter(self) -> str:\n return labels.LABEL_SOURCE_TX_ID % self.tx_id\n\n @property\n def map_table(self) -> Dict[str, str]:\n return {\n labels.LABEL_TEMPERATURE: self.targets['t'],\n }\n\n\nclass THMapping(AbstractMapping):\n def __init__(self, mapping_opts: list, used_map_targets: list, log_success: bool = False, log_error: bool = True):\n super().__init__(mapping_opts, used_map_targets, log_success, log_error)\n\n self.tx_id = self._parse_option_int(mapping_opts, 0)\n\n @property\n def _map_target_dict(self) -> Dict[str, List[str]]:\n return {\n 't': targets.TEMP,\n 'h': targets.HUM,\n 'dp': targets.DEW_POINT,\n 'hi': targets.HEAT_INDEX,\n 'wb': targets.WET_BULB\n }\n\n def _do_mapping(self, packet: DavisConditionsPacket, record: dict):\n target_t = self.targets['t']\n target_h = self.targets['h']\n target_dp = self.targets['dp']\n target_hi = self.targets['hi']\n target_wb = self.targets['wb']\n\n self._set_record_entry(record, target_t,\n packet.get_observation(KEY_TEMPERATURE, DataStructureType.ISS, self.tx_id))\n self._set_record_entry(record, target_h,\n packet.get_observation(KEY_HUMIDITY, DataStructureType.ISS, self.tx_id))\n self._set_record_entry(record, target_dp,\n packet.get_observation(KEY_DEW_POINT, DataStructureType.ISS, self.tx_id))\n self._set_record_entry(record, target_hi,\n packet.get_observation(KEY_HEAT_INDEX, DataStructureType.ISS, self.tx_id))\n self._set_record_entry(record, target_wb,\n packet.get_observation(KEY_WET_BULB, DataStructureType.ISS, self.tx_id))\n\n @property\n def map_source_transmitter(self) -> str:\n return labels.LABEL_SOURCE_TX_ID % self.tx_id\n\n @property\n def map_table(self) -> Dict[str, str]:\n return {\n labels.LABEL_TEMPERATURE: self.targets['t'],\n labels.LABEL_HUMIDITY: self.targets['h'],\n labels.LABEL_DEW_POINT: self.targets['dp'],\n labels.LABEL_HEAT_INDEX: self.targets['hi'],\n labels.LABEL_WET_BULB: self.targets['wb'],\n }\n\n\nclass WindMapping(AbstractMapping):\n def __init__(self, mapping_opts: list, used_map_targets: list, log_success: bool = False, log_error: bool = True):\n super().__init__(mapping_opts, used_map_targets, log_success, log_error)\n\n self.tx_id = self._parse_option_int(mapping_opts, 0)\n\n @property\n def _map_target_dict(self) -> Dict[str, List[str]]:\n return {\n 'wind_dir': targets.WIND_DIR,\n 'wind_speed': targets.WIND_SPEED,\n 'gust_dir': targets.WIND_GUST_DIR,\n 'gust_speed': targets.WIND_GUST_SPEED\n }\n\n def _do_mapping(self, packet: DavisConditionsPacket, record: dict):\n if packet.data_source != PacketSource.WEATHER_PUSH:\n self._log_mapping_notResponsible(\"Not a broadcast packet\")\n return\n\n target_dir = self.targets['wind_dir']\n target_speed = self.targets['wind_speed']\n\n self._set_record_entry(record, target_dir,\n packet.get_observation(KEY_WIND_DIR, DataStructureType.ISS, self.tx_id))\n self._set_record_entry(record, target_speed,\n packet.get_observation(KEY_WIND_SPEED, DataStructureType.ISS, self.tx_id))\n\n @property\n def map_source_transmitter(self) -> str:\n return labels.LABEL_SOURCE_TX_ID % self.tx_id\n\n @property\n def map_table(self) -> Dict[str, str]:\n return {\n labels.LABEL_WIND_SPEED: self.targets['wind_speed'],\n labels.LABEL_WIND_DIR: self.targets['wind_dir'],\n labels.LABEL_WIND_GUST_SPEED: self.targets['gust_speed'],\n labels.LABEL_WIND_GUST_DIR: self.targets['gust_dir'],\n }\n\n\nclass RainMapping(AbstractMapping):\n def __init__(self, mapping_opts: list, used_map_targets: list, log_success: bool = False, log_error: bool = True):\n super().__init__(mapping_opts, used_map_targets, log_success, log_error)\n\n # 0: Reserved, 1: 0.01\", 2: 0.2 mm, 3: 0.1 mm, 4: 0.001\"\n self.rain_bucket_sizes = {\n 1: 0.01,\n 4: 0.001,\n 2: (1 / 25.4) * 0.2,\n 3: (1 / 25.4) * 0.1\n }\n\n self.tx_id = self._parse_option_int(mapping_opts, 0)\n\n self.last_daily_rain_count = None\n\n @property\n def _map_target_dict(self) -> Dict[str, List[str]]:\n return {\n 'amount': targets.RAIN_AMOUNT,\n 'rate': targets.RAIN_RATE,\n 'count': targets.RAIN_COUNT,\n 'count_rate': targets.RAIN_COUNT_RATE,\n 'size': targets.RAIN_SIZE,\n }\n\n def _do_mapping(self, packet: DavisConditionsPacket, record: dict):\n if packet.data_source != PacketSource.WEATHER_PUSH:\n self._log_mapping_notResponsible(\"Not a broadcast packet\")\n return\n\n target_amount = self.targets['amount']\n target_rate = self.targets['rate']\n target_count = self.targets['count']\n target_rate_count = self.targets['count_rate']\n target_size = self.targets['size']\n\n rain_bucket_factor = self.rain_bucket_factor(packet)\n self._set_record_entry(record, target_size, rain_bucket_factor)\n\n rain_rate_count = packet.get_observation(KEY_RAIN_RATE, DataStructureType.ISS, self.tx_id)\n self._set_record_entry(record, target_rate_count, rain_rate_count)\n self._set_record_entry(record, target_rate, self._multiply(rain_rate_count, rain_bucket_factor))\n\n current_daily_rain_count = packet.get_observation(KEY_RAIN_AMOUNT_DAILY, DataStructureType.ISS, self.tx_id)\n if current_daily_rain_count is None:\n self._log_error(\"Daily rain count not in packet. Skipping diff calculation\")\n return\n\n if self.last_daily_rain_count is None:\n self._log_success(\"First daily rain value\", logging.INFO)\n\n elif self.last_daily_rain_count > current_daily_rain_count:\n self._log_success(\"Last daily rain (%d) larger than current (%d). Probably reset\" % (\n self.last_daily_rain_count, current_daily_rain_count), logging.INFO)\n self._set_record_entry(record, target_count, current_daily_rain_count)\n self._set_record_entry(record, target_amount, self._multiply(current_daily_rain_count, rain_bucket_factor))\n\n else:\n count_diff = current_daily_rain_count - self.last_daily_rain_count\n self._set_record_entry(record, target_count, count_diff)\n self._set_record_entry(record, target_amount, self._multiply(count_diff, rain_bucket_factor))\n\n self.last_daily_rain_count = current_daily_rain_count\n\n @property\n def map_source_transmitter(self) -> str:\n return labels.LABEL_SOURCE_TX_ID % self.tx_id\n\n @staticmethod\n def _multiply(a: Optional[float], b: Optional[float]) -> Optional[float]:\n if a is None or b is None:\n return None\n return a * b\n\n def rain_bucket_factor(self, packet) -> Optional[float]:\n rain_bucket_size = packet.get_observation(KEY_RAIN_SIZE, DataStructureType.ISS, self.tx_id)\n if rain_bucket_size is None:\n return None\n\n try:\n return self.rain_bucket_sizes[rain_bucket_size]\n except KeyError as e:\n raise KeyError(\"Unexpected rain bucket size %s\" % repr(rain_bucket_size)) from e\n\n @property\n def map_table(self) -> Dict[str, str]:\n return {\n labels.LABEL_RAIN_AMOUNT: self.targets['amount'],\n labels.LABEL_RAIN_RATE: self.targets['rate'],\n labels.LABEL_RAIN_COUNT: self.targets['count'],\n labels.LABEL_RAIN_COUNT_RATE: self.targets['count_rate'],\n labels.LABEL_RAIN_SIZE: self.targets['size'],\n }\n\n\nclass SolarMapping(AbstractMapping):\n def __init__(self, mapping_opts: list, used_map_targets: list, log_success: bool = False, log_error: bool = True):\n super().__init__(mapping_opts, used_map_targets, log_success, log_error)\n\n self.tx_id = self._parse_option_int(mapping_opts, 0)\n\n @property\n def _map_target_dict(self) -> Dict[str, List[str]]:\n return {\n 'solar': targets.SOLAR_RADIATION\n }\n\n def _do_mapping(self, packet: DavisConditionsPacket, record: dict):\n target = self.targets['solar']\n\n self._set_record_entry(record, target,\n packet.get_observation(KEY_SOLAR_RADIATION, DataStructureType.ISS, self.tx_id))\n\n @property\n def map_source_transmitter(self) -> str:\n return labels.LABEL_SOURCE_TX_ID % self.tx_id\n\n @property\n def map_table(self) -> Dict[str, str]:\n return {\n labels.LABEL_SOLAR_RADIATION: self.targets['solar'],\n }\n\n\nclass UvMapping(AbstractMapping):\n def __init__(self, mapping_opts: list, used_map_targets: list, log_success: bool = False, log_error: bool = True):\n super().__init__(mapping_opts, used_map_targets, log_success, log_error)\n\n self.tx_id = self._parse_option_int(mapping_opts, 0)\n\n @property\n def _map_target_dict(self) -> Dict[str, List[str]]:\n return {\n 'uv': targets.UV\n }\n\n def _do_mapping(self, packet: DavisConditionsPacket, record: dict):\n target = self.targets['uv']\n\n self._set_record_entry(record, target,\n packet.get_observation(KEY_UV_INDEX, DataStructureType.ISS, self.tx_id))\n\n @property\n def map_source_transmitter(self) -> str:\n return labels.LABEL_SOURCE_TX_ID % self.tx_id\n\n @property\n def map_table(self) -> Dict[str, str]:\n return {\n labels.LABEL_UV_INDEX: self.targets['uv'],\n }\n\n\nclass WindChillMapping(AbstractMapping):\n def __init__(self, mapping_opts: list, used_map_targets: list, log_success: bool = False, log_error: bool = True):\n super().__init__(mapping_opts, used_map_targets, log_success, log_error)\n\n self.tx_id = self._parse_option_int(mapping_opts, 0)\n\n @property\n def _map_target_dict(self) -> Dict[str, List[str]]:\n return {\n 'windchill': targets.WINDCHILL\n }\n\n def _do_mapping(self, packet: DavisConditionsPacket, record: dict):\n target = self.targets['windchill']\n\n self._set_record_entry(record, target,\n packet.get_observation(KEY_WIND_CHILL, DataStructureType.ISS, self.tx_id))\n\n @property\n def map_source_transmitter(self) -> str:\n return labels.LABEL_SOURCE_TX_ID % self.tx_id\n\n @property\n def map_table(self) -> Dict[str, str]:\n return {\n labels.LABEL_WIND_CHILL: self.targets['windchill'],\n }\n\n\nclass ThwMapping(AbstractMapping):\n def __init__(self, mapping_opts: list, used_map_targets: list, log_success: bool = False, log_error: bool = True):\n self.is_app_temp = _parse_option_boolean(mapping_opts, 'appTemp')\n\n super().__init__(mapping_opts, used_map_targets, log_success, log_error)\n\n self.tx_id = self._parse_option_int(mapping_opts, 0)\n\n @property\n def _map_target_dict(self) -> Dict[str, List[str]]:\n target_dict = {\n 'thw': targets.THW,\n 'app_temp': targets.APPARENT_TEMPERATURE\n } if self.is_app_temp else {\n 'thw': targets.THW\n }\n return target_dict\n\n def _do_mapping(self, packet: DavisConditionsPacket, record: dict):\n target = self.targets['thw']\n\n self._set_record_entry(record, target,\n packet.get_observation(KEY_THW_INDEX, DataStructureType.ISS, self.tx_id))\n\n if self.is_app_temp:\n target_app_temp = self.targets['app_temp']\n self._set_record_entry(record, target_app_temp,\n packet.get_observation(KEY_THW_INDEX, DataStructureType.ISS, self.tx_id))\n\n @property\n def map_source_transmitter(self) -> str:\n return labels.LABEL_SOURCE_TX_ID % self.tx_id\n\n @property\n def map_table(self) -> Dict[str, list[str]]:\n return {\n labels.LABEL_THW_INDEX: [self.targets['thw'], self.targets['app_temp']] if self.is_app_temp else [\n self.targets['thw']],\n }\n\n\nclass ThswMapping(AbstractMapping):\n def __init__(self, mapping_opts: list, used_map_targets: list, log_success: bool = False, log_error: bool = True):\n self.is_app_temp = _parse_option_boolean(mapping_opts, 'appTemp')\n\n super().__init__(mapping_opts, used_map_targets, log_success, log_error)\n\n self.tx_id = self._parse_option_int(mapping_opts, 0)\n\n @property\n def _map_target_dict(self) -> Dict[str, List[str]]:\n target_dict = {\n 'thsw': targets.THSW,\n 'app_temp': targets.APPARENT_TEMPERATURE\n } if self.is_app_temp else {\n 'thsw': targets.THSW\n }\n return target_dict\n\n def _do_mapping(self, packet: DavisConditionsPacket, record: dict):\n target = self.targets['thsw']\n\n self._set_record_entry(record, target,\n packet.get_observation(KEY_THSW_INDEX, DataStructureType.ISS, self.tx_id))\n\n if self.is_app_temp:\n target_app_temp = self.targets['app_temp']\n self._set_record_entry(record, target_app_temp,\n packet.get_observation(KEY_THSW_INDEX, DataStructureType.ISS, self.tx_id))\n\n @property\n def map_source_transmitter(self) -> str:\n return labels.LABEL_SOURCE_TX_ID % self.tx_id\n\n @property\n def map_table(self) -> Dict[str, list[str]]:\n return {\n labels.LABEL_THSW_INDEX: [self.targets['thsw'], self.targets['app_temp']] if self.is_app_temp else [\n self.targets['thsw']],\n }\n\n\nclass SoilTempMapping(AbstractMapping):\n def __init__(self, mapping_opts: list, used_map_targets: list, log_success: bool = False, log_error: bool = True):\n super().__init__(mapping_opts, used_map_targets, log_success, log_error)\n\n self.tx_id = self._parse_option_int(mapping_opts, 0)\n self.sensor = self._parse_option_int(mapping_opts, 1)\n\n @property\n def _map_target_dict(self) -> Dict[str, List[str]]:\n return {\n 'soil_temp': targets.SOIL_TEMP\n }\n\n def _do_mapping(self, packet: DavisConditionsPacket, record: dict):\n target = self.targets['soil_temp']\n\n self._set_record_entry(record, target,\n packet.get_observation(KEY_TEMPERATURE_LEAF_SOIL % self.sensor,\n DataStructureType.LEAF_SOIL, self.tx_id))\n\n @property\n def map_source_transmitter(self) -> str:\n return labels.LABEL_SOURCE_TX_ID % self.tx_id\n\n @property\n def map_table(self) -> Dict[str, str]:\n return {\n (labels.LABEL_SOIL_TEMPERATURE % self.sensor): self.targets['soil_temp'],\n }\n\n\nclass SoilMoistureMapping(AbstractMapping):\n def __init__(self, mapping_opts: list, used_map_targets: list, log_success: bool = False, log_error: bool = True):\n super().__init__(mapping_opts, used_map_targets, log_success, log_error)\n\n self.tx_id = self._parse_option_int(mapping_opts, 0)\n self.sensor = self._parse_option_int(mapping_opts, 1)\n\n @property\n def _map_target_dict(self) -> Dict[str, List[str]]:\n return {\n 'soil_moisture': targets.SOIL_MOISTURE\n }\n\n def _do_mapping(self, packet: DavisConditionsPacket, record: dict):\n target = self.targets['soil_moisture']\n\n self._set_record_entry(record, target,\n packet.get_observation(KEY_SOIL_MOISTURE % self.sensor,\n DataStructureType.LEAF_SOIL, self.tx_id))\n\n @property\n def map_source_transmitter(self) -> str:\n return labels.LABEL_SOURCE_TX_ID % self.tx_id\n\n @property\n def map_table(self) -> Dict[str, str]:\n return {\n (labels.LABEL_SOIL_MOISTURE % self.sensor): self.targets['soil_moisture'],\n }\n\n\nclass LeafWetnessMapping(AbstractMapping):\n def __init__(self, mapping_opts: list, used_map_targets: list, log_success: bool = False, log_error: bool = True):\n super().__init__(mapping_opts, used_map_targets, log_success, log_error)\n\n self.tx_id = self._parse_option_int(mapping_opts, 0)\n self.sensor = self._parse_option_int(mapping_opts, 1)\n\n @property\n def _map_target_dict(self) -> Dict[str, List[str]]:\n return {\n 'leaf_wetness': targets.LEAF_WETNESS\n }\n\n def _do_mapping(self, packet: DavisConditionsPacket, record: dict):\n target = self.targets['leaf_wetness']\n\n self._set_record_entry(record, target,\n packet.get_observation(KEY_LEAF_WETNESS % self.sensor,\n DataStructureType.LEAF_SOIL, self.tx_id))\n\n @property\n def map_source_transmitter(self) -> str:\n return labels.LABEL_SOURCE_TX_ID % self.tx_id\n\n @property\n def map_table(self) -> Dict[str, str]:\n return {\n (labels.LABEL_LEAF_WETNESS % self.sensor): self.targets['leaf_wetness'],\n }\n\n\nclass THIndoorMapping(AbstractMapping):\n def __init__(self, mapping_opts: list, used_map_targets: list, log_success: bool = False, log_error: bool = True):\n super().__init__(mapping_opts, used_map_targets, log_success, log_error)\n\n @property\n def _map_target_dict(self) -> Dict[str, List[str]]:\n return {\n 't': targets.INDOOR_TEMP,\n 'h': targets.INDOOR_HUM,\n 'dp': targets.INDOOR_DEW_POINT,\n 'hi': targets.INDOOR_HEAT_INDEX\n }\n\n def _do_mapping(self, packet: DavisConditionsPacket, record: dict):\n target_t = self.targets['t']\n target_h = self.targets['h']\n target_dp = self.targets['dp']\n target_hi = self.targets['hi']\n\n self._set_record_entry(record, target_t,\n packet.get_observation(KEY_TEMPERATURE_INDOOR, DataStructureType.WLL_TH))\n self._set_record_entry(record, target_h,\n packet.get_observation(KEY_HUMIDITY_INDOOR, DataStructureType.WLL_TH))\n self._set_record_entry(record, target_dp,\n packet.get_observation(KEY_DEW_POINT_INDOOR, DataStructureType.WLL_TH))\n self._set_record_entry(record, target_hi,\n packet.get_observation(KEY_HEAT_INDEX_INDOOR, DataStructureType.WLL_TH))\n\n @property\n def map_source_transmitter(self) -> str:\n return labels.LABEL_SOURCE_WLL_TH\n\n @property\n def map_table(self) -> Dict[str, str]:\n return {\n labels.LABEL_TEMPERATURE_INDOOR: self.targets['t'],\n labels.LABEL_HUMIDITY_INDOOR: self.targets['h'],\n labels.LABEL_DEW_POINT_INDOOR: self.targets['dp'],\n labels.LABEL_HEAT_INDEX_INDOOR: self.targets['hi'],\n }\n\n\nclass BaroMapping(AbstractMapping):\n def __init__(self, mapping_opts: list, used_map_targets: list, log_success: bool = False, log_error: bool = True):\n super().__init__(mapping_opts, used_map_targets, log_success, log_error)\n\n @property\n def _map_target_dict(self) -> Dict[str, List[str]]:\n return {\n 'baro_abs': targets.BARO_ABSOLUTE,\n 'baro_sl': targets.BARO_SEA_LEVEL\n }\n\n def _do_mapping(self, packet: DavisConditionsPacket, record: dict):\n target_abs = self.targets['baro_abs']\n target_sl = self.targets['baro_sl']\n\n self._set_record_entry(record, target_abs,\n packet.get_observation(KEY_BARO_ABSOLUTE, DataStructureType.WLL_BARO))\n self._set_record_entry(record, target_sl,\n packet.get_observation(KEY_BARO_SEA_LEVEL, DataStructureType.WLL_BARO))\n\n @property\n def map_source_transmitter(self) -> str:\n return labels.LABEL_SOURCE_WLL_BAROMETER\n\n @property\n def map_table(self) -> Dict[str, str]:\n return {\n labels.LABEL_BARO_ABSOLUTE: self.targets['baro_abs'],\n labels.LABEL_BARO_SEA_LEVEL: self.targets['baro_sl'],\n }\n\n\nclass BatteryStatusMapping(AbstractMapping):\n def __init__(self, mapping_opts: list, used_map_targets: list, log_success: bool = False, log_error: bool = True):\n super().__init__(mapping_opts, used_map_targets, log_success, log_error)\n\n self.tx_id = self._parse_option_int(mapping_opts, 0)\n\n further_opts = mapping_opts[1:]\n try:\n self.further_targets = [targets.BATTERY_STATUS_NAMED[key] for key in further_opts]\n except KeyError as e:\n raise KeyError(\"Invalid battery remap target\") from e\n\n @property\n def _map_target_dict(self) -> Dict[str, List[str]]:\n return {\n 'battery': targets.BATTERY_STATUS\n }\n\n def _do_mapping(self, packet: DavisConditionsPacket, record: dict):\n battery_num = self.targets['battery']\n\n self._set_record_entry(record, battery_num,\n packet.get_observation(KEY_BATTERY_FLAG, tx=self.tx_id))\n for target in self.further_targets:\n self._set_record_entry(record, target,\n packet.get_observation(KEY_BATTERY_FLAG, tx=self.tx_id))\n\n @property\n def map_source_transmitter(self) -> str:\n return labels.LABEL_SOURCE_TX_ID % self.tx_id\n\n @property\n def map_table(self) -> Dict[str, list[str]]:\n return {\n labels.LABEL_BATTERY_STATUS: [self.targets['battery'], *self.further_targets],\n }\n","repo_name":"michael-slx/weewx-weatherlink-live","sub_path":"bin/user/weatherlink_live/mappers.py","file_name":"mappers.py","file_ext":"py","file_size_in_byte":27134,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"18"}
+{"seq_id":"12222456503","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 2 20:53:43 2019\n\n@author: Laila Niazy 03660940\n\nA class for the maze with different methods\n\"\"\"\n\nimport numpy as np\n \nclass Analyse_Maze:\n \n def __init__(self, path):\n #initializing different values\n self.maze, self.width, self.height = self.generateMaze(path)\n self.initial_state = self.find_specific_states(\"S\")\n self.Trap = self.find_specific_states('T')\n self.Goal = self.find_specific_states('G')\n self.p = 0.1\n self.action = np.array(['up', 'down', 'left', 'right', 'idle'])\n self.prob = {self.action[0]:[self.p,np.subtract(1,np.multiply(2,self.p)), self.p],\n self.action[1]:[self.p,np.subtract(1,np.multiply(2,self.p)), self.p],\n self.action[2]:[self.p,np.subtract(1,np.multiply(2,self.p)), self.p],\n self.action[3]:[self.p,np.subtract(1,np.multiply(2,self.p)), self.p],\n self.action[4]:[self.p,np.subtract(1,np.multiply(2,self.p)), self.p]}\n \n def generateMaze(self, path):\n \"\"\"Return maze from textfile as a nested list with the width and height of the maze\"\"\"\n text = open(path, \"r\")\n self.maze = []\n for line in text:\n if line[0] == \"#\":\n continue\n else: \n char = [x for x in line if x != \"\\n\"] \n self.maze.append(char)\n text.close()\n return(self.maze, len(self.maze[1]), len(self.maze))\n \n def find_specific_states(self, case):\n \n '''getting the coordinates for the start, goal and trap state'''\n location = [(i,j) for i,m in enumerate(self.maze) for j,s in enumerate(m) if s == case]\n\n return location[0]\n \n def checkwall(self, s):\n '''checking if this state is a wall or outside the maze'''\n if self.maze[s[0]][s[1]] == '1' :\n return(False) \n else:\n return(True)\n \n def allowed_actions(self, i, j, is_wall=False):\n '''Based on the current states, which actions are allowed and to which \n successor states will they lead\n Return a list of allowed actions for this state (i,j)'''\n allowed_actions = []\n #mapping the different successor states to actions\n station_to_action = {(i-1,j):self.action[0], \n (i+1,j):self.action[1],\n (i,j-1):self.action[2], \n (i,j+1):self.action[3]} \n #iterate through the dict \n for s in station_to_action:\n #s is the successor states\n #check if the successor states is a wall or outside the maze\n if s[0] < 0 or s[1] > (self.width-1) or s[1] < 0 or s[0] > (self.height-1):\n continue\n elif self.checkwall(np.ravel(s)) == is_wall:\n continue\n else:\n #see if the current state is the goal because then the action \n #ideal will be taken into account\n if (i,j) == self.Goal:\n allowed_actions.append(self.action[4])\n else: \n pass\n allowed_actions.append(station_to_action[s])\n \n return allowed_actions\n \n# \n def transition_propability(self, allowed_actions, i, j, is_wall=False):\n \"\"\"Returns a dict with the transition probabilities for each action as \n a key: we get a tuple with a probability and successor state\"\"\"\n # the states that correspond to action a.\n states = {self.action[0]:[(i-1,j-1),(i-1,j),(i-1,j+1)],\n self.action[1]:[(i+1,j-1),(i+1,j),(i+1,j+1)],\n self.action[2]:[(i-1,j-1),(i,j-1),(i+1,j-1)],\n self.action[3]:[(i-1,j+1),(i,j+1),(i+1,j+1)],\n self.action[4]:[(i-1,j),(i,j),(i,j+1)]}\n #initialize a dict for the transition probabilities \n trans_prob = {}\n #iterating through the allowed actions\n for a in allowed_actions:\n #for each action a, we get the possible successor states from the dict 'states'\n coordinates = states[a]\n #make a list out of the possible successor states\n cor = list(coordinates)\n #get the possible probabilities for the corresponding action\n #the probabilies are in a dict and were initialized in the beginning\n pr = list(self.prob[a])\n #setting dummy variable\n x = 3\n #iterating through the list of possible successor states\n for k, c in enumerate(cor):\n #check if any of the succcessor states is outside maze\n if c[0] < 0 or c[1] > (self.width-1) or c[1] < 0 or c[0] > (self.height-1):\n if k == 2:\n pr[k-1] = pr[k-1] + pr[k]\n pr[k] = 0.0\n else:\n #if we are in any other iteration and there is a wall then add p\n #to the second entry in the probability list\n pr[k+1] = pr[k+1]+ pr[k]\n pr[k] = 0.0\n #save indices that is not in maze to del later on\n x = k\n continue\n # or is a wall \n elif self.checkwall(c) == is_wall:\n #if we are in the third iteration and there is a wall then add p\n #to the second entry in the probability list\n if k == 2:\n pr[k-1] = pr[k-1] + pr[k]\n pr[k] = 0.0\n else:\n #if we are in any other iteration and there is a wall then add p\n #to the second entry in the probability list\n pr[k+1] = pr[k+1]+ pr[k]\n pr[k] = 0.0\n else:\n # if the state is not a wall, leave the probability as initiated\n continue\n #the final dict with the change probabilities\n #del the coordinate and probability where the coordinate is outside the maze\n if x == 3:\n trans_prob[a] = [(pr[y],cor[y]) for y in range(len(pr))]\n continue\n else: \n indices = [y for y in range(len(pr)) if y != x]\n trans_prob[a] = [(pr[y],cor[y]) for y in indices]\n \n\n return trans_prob\n \n def look_up(self, is_wall=False):\n \"\"\"Returns a dict with the following form {state:{action:(prob,next_state),....}}\n its a nested dictionary\"\"\"\n Transitions = {}\n #iterate through all states in the maze\n for i in range(len(self.maze)):\n for j in range(len(self.maze[1])):\n #check if the current state is a wall\n if self.checkwall((i,j)) == is_wall:\n continue\n else:\n #get the list of allowed states\n actions = self.allowed_actions(i,j)\n if actions is None:\n continue\n else:\n #get the dictionary with transition prob and put them \n # as values in the dictionary of the final Transitions dict\n trans_prob = self.transition_propability(actions,i,j)\n Transitions[i,j] = trans_prob\n \n return Transitions\n \n def reward(self, method=1, is_wall=False):\n '''calculating the reward using one of the two methods'''\n \"\"\"Return a nested dict with the following form\n {state:{(next_state,action): reward....}}\"\"\"\n reward= {}\n #iterate through the maze\n for i in range(len(self.maze)):\n for j in range(len(self.maze[1])):\n #check if any state is a wall\n if self.checkwall((i,j)) == is_wall:\n continue\n else:\n #get list of allowed actions\n allowed_actions = self.allowed_actions(i, j)\n #mapping actions to successor states\n states = {self.action[0]:[(i-1,j-1),(i-1,j),(i-1,j+1)],\n self.action[1]:[(i+1,j-1),(i+1,j),(i+1,j+1)],\n self.action[2]:[(i-1,j-1),(i,j-1),(i+1,j-1)],\n self.action[3]:[(i-1,j+1),(i,j+1),(i+1,j+1)],\n self.action[4]:[(i-1,j),(i,j),(i,j+1)]}\n current_state = (i,j)\n #iterating through allowed action\n for a in allowed_actions:\n #getting the possible successor states from the dict states\n coordinates = states[a]\n cor = list(coordinates)\n for c in cor:\n if c[0] < 0 or c[1] > (self.width-1) or c[1] < 0 or c[0] > (self.height-1):\n continue\n else:\n if method == 1:\n #checking if the current state is not the goal state and if the successor state is the goal\n if not np.any(np.subtract(current_state, self.Goal)) and np.any(np.subtract(c, self.Goal)):\n #check if dict has a key of the current state\n #if yes them append the reward, action and successor state\n #if not generate a new key\n if (i,j) in reward:\n reward[(i,j)][(c,a)] = 1.0\n else:\n reward[(i,j)] = {(c,a):1.0}\n #checking if the current state and the next state are the goal\n elif not np.any(np.subtract(current_state, self.Goal)) and not np.any(np.subtract(c, self.Goal)):\n if (i,j) in reward:\n reward[(i,j)][(c,a)] = 1.0\n else:\n reward[(i,j)] = {(c,a):1.0} \n #checking if the current state is the trap\n elif not np.any(np.subtract(current_state, self.Trap)) and np.any(np.subtract(c, self.Trap)):\n if (i,j) in reward:\n reward[(i,j)][(c,a)] = -50.0\n else:\n reward[(i,j)] = {(c,a):-50.0}\n else:\n if (i,j) in reward:\n reward[(i,j)][(c,a)] = 0.0\n else:\n reward[(i,j)] = {(c,a):0.0} \n elif method == 2:\n #checking if the current state and the next state are the goal\n if not np.any(np.subtract(current_state, self.Goal)) and not np.any(np.subtract(c, self.Goal)):\n if (i,j) in reward:\n reward[(i,j)][(c,a)] = 0.0\n else:\n reward[(i,j)] = {(c,a):0.0}\n #checking if the current state is the trap and the successor state is anything else\n elif not np.any(np.subtract(current_state, self.Trap)) and np.any(np.subtract(c, self.Trap)):\n if (i,j) in reward:\n reward[(i,j)][(c,a)] = -50.0\n else:\n reward[(i,j)] = {(c,a):-50.0}\n else:\n if (i,j) in reward:\n reward[(i,j)][(c,a)] = -1.0\n else:\n reward[(i,j)] = {(c,a):-1.0}\n else:\n print('No Method with that number only 1 or 2')\n return reward\n\n","repo_name":"LailaNiazy/Value-and-Policy-Iteration","sub_path":"Code/Analyse_Maze.py","file_name":"Analyse_Maze.py","file_ext":"py","file_size_in_byte":12652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"41660775397","text":"from django.shortcuts import render\nfrom .models import Topic, Entry\n\nfrom django.http import HttpResponseRedirect, Http404\nfrom django.urls import reverse\nfrom django.contrib.auth.decorators import login_required\nfrom .forms import TopicForm, EntryForm\n\n# Create your views here.\ndef index(request):\n\t\"\"\"The home page for our website.\"\"\"\n\treturn render(request, 'learning_logs/index.html')\n\n@login_required\ndef topics(request):\n\t\"\"\"Shows all the topics.\"\"\"\n\ttopics = Topic.objects.filter(owner = request.user).order_by('date_added')\n\tcontext = {'topics': topics}\n\treturn render(request, 'learning_logs/topics.html', context)\n\n@login_required\ndef topic(request, topic_id):\n\t\"\"\"Shows the entries of a topic.\"\"\"\n\ttopic = Topic.objects.get(id = topic_id)\n\tif topic.owner != request.user:\n\t\traise Http404\n\n\tentries = topic.entry_set.order_by('-date_added') #minus sign sorts in reverse order\n\tcontext = {'topic': topic, 'entries': entries}\n\treturn render(request, 'learning_logs/topic.html', context)\n\n@login_required\ndef new_topic(request):\n\t\"\"\"A form to add a new topic.\"\"\"\n\tif request.method != 'POST':\n\t\t# If there is no data to sumit, create a blank form\n\t\tform = TopicForm()\n\telse:\n\t\t# If the data is submitted, process the data.\n\t\tform = TopicForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tnew_topic = form.save(commit = False)\n\t\t\tnew_topic.owner = request.user\n\t\t\tnew_topic.save()\n\t\t\treturn HttpResponseRedirect(reverse('learning_logs:topics'))\n\n\tcontext = {'form': form}\n\treturn render(request, 'learning_logs/new_topic.html', context)\n\n@login_required\ndef new_entry(request, topic_id):\n\t\"\"\"To add a new entry for a topic\"\"\"\n\ttopic = Topic.objects.get(id = topic_id)\n\n\tif request.method != 'POST':\n\t\t# Blank Form\n\t\tform = EntryForm()\n\telse:\n\t\t# Data submitted, process data\n\t\tform = EntryForm(data = request.POST)\n\t\tif form.is_valid():\n\t\t\tnew_entry = form.save(commit = False)\n\t\t\tnew_entry.topic = topic\n\t\t\tnew_entry.save()\n\t\t\treturn HttpResponseRedirect(reverse('learning_logs:topic', args=[topic_id]))\n\n\tcontext = {'topic': topic, 'form': form}\n\treturn render(request, 'learning_logs/new_entry.html', context)\n\n@login_required\ndef edit_entry(request, entry_id):\n\t\"\"\"Edit an existing entry.\"\"\"\n\tentry = Entry.objects.get(id = entry_id)\n\ttopic = entry.topic\n\tif topic.owner != request.user:\n\t\traise Http404\n\n\tif request.method != 'POST':\n\t\t# Pre fill the form with existing data\n\t\tform = EntryForm(instance = entry)\n\n\telse:\n\t\t# process data\n\t\tform = EntryForm(instance = entry, data = request.POST)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\treturn HttpResponseRedirect(reverse('learning_logs:topic', args = [topic.id]))\n\t\n\tcontext = {'entry': entry, 'topic': topic, 'form': form}\n\treturn render(request, 'learning_logs/edit_entry.html', context)","repo_name":"ruthvikkkkk/learning_log","sub_path":"learning_logs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2745,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"}
+{"seq_id":"32167359211","text":"import math\nimport heapq\n\ndef minSum(number, k):\n heap = number\n heapq._heapify_max(heap)\n\n for i in range(k):\n max_val = heap[0]\n if max_val == 1:\n break\n new_val = (max_val >> 1) + (max_val & 1)\n replace_max(heap, new_val)\n\n return sum(heap)\nn, k = map(int,input().split())\nnumber = list(map(int,input().split()))\n\ns = minSum(number,k)\nprint(s)\n","repo_name":"dheeraj-2000/dsalgo","sub_path":"CodeVita/min_the_sum.py","file_name":"min_the_sum.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":80,"dataset":"github-code","pt":"18"}
+{"seq_id":"71391212519","text":"import os\n\nfrom tqdm import tqdm\n\nfrom utils.files import read_json_data, write_csv_data\n\n\ndef collect_pr_desc(file_dir, project_name, csv_file_path):\n \"\"\"\n \"body\"\n \"title\"\n \"\"\"\n pr_data = read_json_data(file_dir)\n header = [\"pr_url\", \"body\", \"title\"]\n res = []\n for data in tqdm(pr_data):\n cur = []\n cur.append(data[\"url\"])\n if data[\"body\"]:\n cur.append(\"\".join(data[\"body\"].splitlines()))\n else:\n cur.append(\"none\")\n\n if data[\"title\"]:\n cur.append(\"\".join(data[\"title\"].splitlines()))\n else:\n cur.append(\"none\")\n res.append(cur)\n\n output_file = os.path.join(csv_file_path, f\"{project_name}_pr_description.csv\")\n write_csv_data(output_file, header, res)\n","repo_name":"ckxkexing/pr-acceptance","sub_path":"features_factory/use_other/pr_desc.py","file_name":"pr_desc.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"10154266241","text":"# importing the dependencies\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score\n\n\n# Data Collection and Processing\n\n\n# Load the Data from CSV File to Pandas DataFrame\ntitanic_data = pd.read_csv(\"/Users/DELL/PycharmProjects/pythonProject10/train.csv\")\n# Printing the First Five Rows of the Dataframe\ntitanic_data.head()\n# number of rows and columns\ntitanic_data.shape\n# getting some information about the data\ntitanic_data.info()\n# checking the number of missing values in each column\ntitanic_data.isnull().sum()\n\n\n# Handling the missing values\n\n\n# drop the cabin column from he dataframe\ntitanic_data = titanic_data.drop(columns=\"Cabin\", axis=1) # 0 for row, 1 for column\n# assign mean age in missing age places\ntitanic_data[\"Age\"].fillna(titanic_data[\"Age\"].mean(), inplace=True) # fillna for filling missing values\n# inplace to save in original dataframe\n# finding the mode value for embarked column\nprint(titanic_data[\"Embarked\"].mode())\nprint(titanic_data[\"Embarked\"].mode()[0])\n# replacing the missing values in Embarked column with mode value\ntitanic_data[\"Embarked\"].fillna(titanic_data[\"Embarked\"].mode()[0], inplace=True)\ntitanic_data.isnull().sum()\n# getting some statistical measures about the data\ntitanic_data.describe()\n# finding the number of people who survived and didn't survive\ntitanic_data[\"Survived\"].value_counts() # 0 for not survived, 1 for survived\n\n\n# Data Visualization\n\n\nsns.set()\n# Making a count plot for survived column\nsns.countplot(x=\"Survived\", data=titanic_data)\n# Making a count plot for sex column\nsns.countplot(x=\"Sex\", data=titanic_data)\ntitanic_data[\"Sex\"].value_counts()\nsns.countplot(x=\"Sex\", hue=\"Survived\", data=titanic_data)\nplt.show()\n# Making a count plot for Pclass column\nsns.countplot(x=\"Pclass\", data=titanic_data)\nsns.countplot(x=\"Pclass\", hue=\"Survived\", data=titanic_data)\nplt.show()\n\n# Encoding the Categorical Columns\n\n\n# Replacing male values with 0, female values with 1\ntitanic_data[\"Sex\"].value_counts()\ntitanic_data.replace({\"Sex\": {\"male\": 0, \"female\": 1}, \"Embarked\": {\"S\": 0, \"C\": 1, \"Q\": 2}}, inplace=True)\n\n#\ntitanic_data[\"Embarked\"].value_counts()\n","repo_name":"diksha-shrivastava13/titanicSurvivalPrediction","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"4005528704","text":"import enum\n\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import tensor\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import array_ops_stack\nfrom tensorflow.python.ops import bitwise_ops\nfrom tensorflow.python.ops import gen_stateless_random_ops_v2\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n@tf_export(\"random.Algorithm\", \"random.experimental.Algorithm\")\nclass Algorithm(enum.Enum):\n \"\"\"A random-number-generation (RNG) algorithm.\n\n Many random-number generators (e.g. the `alg` argument of\n `tf.random.Generator` and `tf.random.stateless_uniform`) in TF allow\n you to choose the algorithm used to generate the (pseudo-)random\n numbers. You can set the algorithm to be one of the options below.\n\n * `PHILOX`: The Philox algorithm introduced in the paper [\"Parallel\n Random Numbers: As Easy as 1, 2,\n 3\"](https://www.thesalmons.org/john/random123/papers/random123sc11.pdf).\n * `THREEFRY`: The ThreeFry algorithm introduced in the paper\n [\"Parallel Random Numbers: As Easy as 1, 2,\n 3\"](https://www.thesalmons.org/john/random123/papers/random123sc11.pdf).\n * `AUTO_SELECT`: Allow TF to automatically select the algorithm\n depending on the accelerator device. Note that with this option,\n running the same TF program on different devices may result in\n different random numbers. Also note that TF may select an\n algorithm that is different from `PHILOX` and `THREEFRY`.\n \"\"\"\n\n # The numbers here must match framework/rng_alg.h\n PHILOX = 1\n THREEFRY = 2\n AUTO_SELECT = 3\n\n\ndef convert_alg_to_int(alg):\n \"\"\"Converts algorithm to an integer.\n\n Args:\n alg: can be one of these types: integer, Algorithm, Tensor, string. Allowed\n strings are \"philox\" and \"threefry\".\n\n Returns:\n An integer, unless the input is a Tensor in which case a Tensor is returned.\n \"\"\"\n if isinstance(alg, int):\n return alg\n if isinstance(alg, Algorithm):\n return alg.value\n if isinstance(alg, tensor.Tensor):\n return alg\n if isinstance(alg, str):\n # canonicalized alg\n canon_alg = alg.strip().lower().replace(\"-\", \"\").replace(\"_\", \"\")\n if canon_alg == \"philox\":\n return Algorithm.PHILOX.value\n elif canon_alg == \"threefry\":\n return Algorithm.THREEFRY.value\n elif canon_alg == \"autoselect\":\n return Algorithm.AUTO_SELECT.value\n else:\n raise ValueError(unsupported_alg_error_msg(alg))\n else:\n raise TypeError(\n f\"Can't convert argument `alg` (of value {alg} and type {type(alg)}) \"\n \"to int.\"\n )\n\n\ndef _get_key_counter(seed, alg):\n \"\"\"Calculates the key and counter to pass to raw RNG ops.\n\n This function calculates the key and counter that will be passed to\n the raw RNG ops like `StatelessRandomUniformV2`. Depending on the\n input `alg`, the key and counter may be scrambled or copied from\n `seed`. If `alg` is `\"auto_select\"`, the key and counter will be\n determined at runtime based on device type.\n\n Args:\n seed: An integer tensor of shape [2]. The seed to calculate the key and\n counter from.\n alg: The RNG algorithm. See `tf.random.stateless_uniform` for an\n explanation.\n\n Returns:\n A pair (key, counter) suitable for V2 stateless RNG ops like\n `StatelessRandomUniformV2`.\n \"\"\"\n if alg == Algorithm.AUTO_SELECT.value:\n key, counter = gen_stateless_random_ops_v2.stateless_random_get_key_counter(\n seed\n )\n elif alg == Algorithm.PHILOX.value:\n key, counter = _philox_scramble_seed(seed)\n elif alg == Algorithm.THREEFRY.value:\n key = array_ops.reshape(\n _uint32s_to_uint64(math_ops.cast(seed, dtypes.uint32)), [1]\n )\n counter = array_ops.zeros([1], dtypes.uint64)\n else:\n raise ValueError(unsupported_alg_error_msg(alg))\n return key, counter\n\n\ndef get_key_counter_alg(seed, alg):\n \"\"\"Calculates the key, counter and algorithm to pass to raw RNG ops.\n\n This function calculates the key and counter, and determines the algorithm\n that will be passed to the raw RNG ops like `StatelessRandomUniformV2`.\n Depending on the input `alg`, the key and counter may be scrambled or copied\n from `seed`. If `alg` is `\"auto_select\"`, the key and counter will be\n determined at runtime based on device type.\n\n Args:\n seed: An integer tensor of shape [2]. The seed to calculate the key and\n counter from.\n alg: The RNG algorithm. See `tf.random.stateless_uniform` for an\n explanation.\n\n Returns:\n A pair (key, counter, algorithm) suitable for V2 stateless RNG ops like\n `StatelessRandomUniformV2`.\n \"\"\"\n if alg is None:\n alg = Algorithm.AUTO_SELECT.value\n alg = convert_alg_to_int(alg)\n key, counter = _get_key_counter(seed, alg)\n return key, counter, alg\n\n\ndef _uint32s_to_uint64(x):\n return bitwise_ops.bitwise_or(\n math_ops.cast(x[0], dtypes.uint64),\n bitwise_ops.left_shift(\n math_ops.cast(x[1], dtypes.uint64),\n constant_op.constant(32, dtypes.uint64),\n ),\n )\n\n\ndef unsupported_alg_error_msg(alg):\n \"\"\"Produces the unsupported-algorithm error message.\"\"\"\n if isinstance(alg, int):\n philox = Algorithm.PHILOX.value\n threefry = Algorithm.THREEFRY.value\n auto_select = Algorithm.AUTO_SELECT.value\n elif isinstance(alg, str):\n philox = \"philox\"\n threefry = \"threefry\"\n auto_select = \"auto_select\"\n else:\n philox = Algorithm.PHILOX\n threefry = Algorithm.THREEFRY\n auto_select = Algorithm.AUTO_SELECT\n return (\n f\"Argument `alg` got unsupported value {alg}. Supported values are \"\n f\"{philox} for the Philox algorithm, \"\n f\"{threefry} for the ThreeFry algorithm, and \"\n f\"{auto_select} for auto-selection.\"\n )\n\n\ndef _philox_scramble_seed(seed):\n \"\"\"Determines the key and counter for Philox PRNG with the given seed.\n\n Args:\n seed: An integer tensor of shape [2]. The seed to calculate the key and\n counter from.\n\n Returns:\n A pair (key, counter) suitable for V2 stateless RNG ops like\n `StatelessRandomUniformV2`.\n \"\"\"\n # the same scrambling procedure as core/kernels/stateless_random_ops.cc\n key = constant_op.constant([0x02461E293EC8F720], dtypes.uint64)\n counter = math_ops.cast(seed, dtypes.uint64)\n mix = gen_stateless_random_ops_v2.stateless_random_uniform_full_int_v2(\n [4],\n key=key,\n counter=counter,\n dtype=dtypes.uint32,\n alg=Algorithm.PHILOX.value,\n )\n key = array_ops.reshape(_uint32s_to_uint64(mix[:2]), [1])\n counter = array_ops_stack.stack([0, _uint32s_to_uint64(mix[2:])], axis=0)\n return key, counter\n","repo_name":"tensorflow/tensorflow","sub_path":"tensorflow/python/ops/random_ops_util.py","file_name":"random_ops_util.py","file_ext":"py","file_size_in_byte":6619,"program_lang":"python","lang":"en","doc_type":"code","stars":178918,"dataset":"github-code","pt":"18"}
+{"seq_id":"23087113647","text":"import numpy as np\n\nimport matplotlib.pyplot as plt\n\nimport torch.nn as nn\nimport os\nimport torch\nimport torch.utils.data as Data\n\nfrom network import *\n\n\nclass My_loss(nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, train_input, train_output):\n return torch.mean(torch.pow((train_input - train_output), 2))\n\n\ndef standardization(data):\n mu = np.mean(data, axis=0)\n sigma = np.std(data, axis=0)\n return (data - mu) / sigma\n\n\ncriterion = My_loss()\n\nBATCH_SIZE = 6000\nlearning_rate = 1e-5\nepochs = 1\n\nthreshold = 0.5\n\nprint_loss_frequency = 1\nprint_train_accuracy_frequency = 1\ntest_frequency = 1\nsave_model = 10\n\nmini_loss = 100\nmaxauc = 0.92\n\nshow_test_detail = False\nplot_loss = False\n\nLoss_list = []\nAccuracy_list = []\n\ntest_input = np.load('data/test_input.npy')\ntest_output = np.load('data/test_output.npy')\n\nideal_atte_x_comp = np.array([0, 1])\nideal_atte_x = np.tile(ideal_atte_x_comp, 1024)\nideal_atte_x = torch.from_numpy(ideal_atte_x)\nideal_atte_x = ideal_atte_x.float()\n\ntest_input = torch.from_numpy(test_input)\ntest_output = torch.from_numpy(test_output)\n\ntest_indicator = np.ones(test_input.shape[0])\ntest_indicator = torch.from_numpy(test_indicator)\ntest_indicator = test_indicator.unsqueeze(1)\n\ntest_torch_dataset = Data.TensorDataset(test_input, test_indicator, test_output)\n\ntest_loader = Data.DataLoader(\n dataset=test_torch_dataset, # torch TensorDataset format\n batch_size=BATCH_SIZE, # mini batch size\n shuffle=False, # 要不要打乱数据 (打乱比较好)\n # num_workers=2, # 多线程来读数据\n)\n\nprint(\"torch.cuda.is_available() = \", torch.cuda.is_available())\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\nmodel = net()\nmodel.to(device) # 移动模型到cuda\n\n\nif os.path.exists('checkpoint/model.pkl'):\n print('load model')\n model.load_state_dict(torch.load('checkpoint/model.pkl'))\n\n\nfor step, (test_input, test_indicator, test_output) in enumerate(test_loader):\n\n ideal_atte_x = ideal_atte_x.float().to(device)\n test_indicator = test_indicator.float().to(device)\n\n test_input = test_input.float().to(device)\n test_output = test_output.float().to(device)\n\n test_preds = model(test_input, test_indicator, ideal_atte_x)\n\n test_preds = test_preds.cpu()\n test_output = test_output.cpu()\n test_input = test_input.cpu()\n\n test_preds = test_preds.detach().numpy()\n test_output = test_output.detach().numpy()\n test_input = test_input.detach().numpy()\n\n test_input = test_input[60]\n test_preds = test_preds[60]\n test_output = test_output[60]\n\n l1, = plt.plot(test_input)\n l2, = plt.plot(test_preds)\n l3, = plt.plot(test_output)\n\n plt.legend([l1, l2, l3], ['noisy input', 'noise', 'ideal output'], loc='upper right')\n\n plt.title('denoise network')\n\n plt.show()\n\n\n","repo_name":"ZeroAda/DeepSeparator","sub_path":"Exp1/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":2884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"15011887073","text":"import urllib.parse\nfrom django.conf import settings\nfrom rest_framework import serializers\nfrom projects import models\nfrom tags.serializers import TagSerializer\nfrom files.serializers import ImageSerializer\n\n\nclass ProjectSerializer(serializers.ModelSerializer):\n tags = serializers.SerializerMethodField()\n meta = serializers.SerializerMethodField()\n logo = serializers.SerializerMethodField()\n\n class Meta:\n model = models.Project\n fields = [\n \"id\",\n \"name\",\n \"slug\",\n \"logo\",\n \"summary\",\n \"date_published\",\n \"url_name\",\n \"url\",\n \"tags\",\n \"meta\",\n ]\n\n def get_logo(self, project):\n if not project.logo:\n return None\n serializer = ImageSerializer(project.logo)\n return serializer.data\n\n def get_tags(self, project):\n serializer = TagSerializer(project.tags, many=True)\n return serializer.data\n\n def get_keywords(self, project):\n return \", \".join([tag.name for tag in project.tags.all()])\n\n def get_canonical(self, project):\n base = urllib.parse.urljoin(settings.CLIENT_CANONICAL_URL, \"projects/\")\n return urllib.parse.urljoin(base, project.slug)\n\n def get_html_title(self, project):\n name = settings.CLIENT_CANONICAL_NAME\n return \"{} | {}\".format(project.name, name) if name else project.name\n\n def get_meta(self, project):\n return {\n \"title\": project.name,\n \"html_title\": self.get_html_title(project),\n \"description\": project.summary,\n \"keywords\": self.get_keywords(project),\n \"canonical\": self.get_canonical(project),\n }\n","repo_name":"albeiks/omaralbeik.com","sub_path":"backend/projects/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"18"}
+{"seq_id":"21321166229","text":"import json\nfrom pyspark import SparkContext\nimport time\nimport numpy as np\nimport xgboost as xgbr\nimport sys\n#from sklearn.metrics import mean_squared_error\n\n\n\ndef loadBussinessJson(line):\n business = json.loads(line)\n return (business['business_id'],[float(business['stars']),float(business['review_count'])])\n\ndef loadUserJson(line):\n user = json.loads(line)\n return (user['user_id'],[float(user['average_stars']),float(user['review_count'])])\n\ndef createFeature(input):\n ip_data = input.split(',')\n user = ip_data[0]\n business = ip_data[1]\n if len(ip_data) == 3:\n rating = float(ip_data[2])\n else:\n rating = 0\n if user in userMap:\n user_avg_star =userMap[user][0]\n user_review_count = userMap[user][1]\n else:\n user_avg_star = 2.5\n user_review_count = 10 ##tune it correctly\n\n if business in businessMap:\n business_star = businessMap[business][0]\n business_review_count = businessMap[business][1]\n else:\n business_star =2.5\n business_review_count = 10 ## tune it correctly\n\n return [user,business,user_avg_star,user_review_count,business_star,business_review_count,rating]\n\n\nst = time.time()\nsc = SparkContext('local[*]','count_reviews')\nsc.setLogLevel(\"ERROR\")\n\npath = sys.argv[1]\nip_business = sc.textFile(path+\"/business.json\")\nip_user = sc.textFile(path+\"/user.json\")\n\nbusinessMap= dict(ip_business.map(loadBussinessJson).collect())\n\nuserMap = dict(ip_user.map(loadUserJson).collect())\n\n\ndef getModelBasedRatings():\n\n read_data= sc.textFile(path+\"yelp_train.csv\")\n first = read_data.first()\n read_data = read_data.filter(lambda x: x!= first)\n data = read_data.map(lambda x : createFeature(x)).collect()\n\n test_data= sc.textFile(path+sys.argv[2])\n first = test_data.first()\n test_data = test_data.filter(lambda x: x!= first)\n test_data = test_data.map(lambda x : createFeature(x)).collect()\n\n np_data = np.array(data)\n test_np_data = np.array(test_data)\n\n train_x = np_data[:,2:6].astype('float64')\n train_y = np_data[:,6].astype('float64')\n test_x = test_np_data[:,2:6].astype('float64')\n test_y = test_np_data[:,6].astype('float64')\n test_cases = test_np_data[:, 0:2]\n print(\"hiii\")\n\n model = xgbr.XGBRegressor()\n model.fit(train_x,train_y)\n print(model)\n output = model.predict(data=test_x)\n\n rmseError = np.sqrt(np.mean((output-test_y)**2))\n predicted_rating = {}\n for A, B in zip(test_cases, output):\n predicted_rating[A[0] + \"*\" + A[1]] = B\n print()\n return predicted_rating\n\n\nop = sys.argv[3]\nop_file = open(op,\"w\")\nop_file.write(\"user_id ,business_id ,prediction\\n\")\npr = getModelBasedRatings()\nfor i in pr:\n u = i.split(\"*\")[0]\n b = i.split(\"*\")[1]\n op_file.write(u+\",\"+b+\",\"+str(pr[i])+\"\\n\")","repo_name":"soniagodhwani/Data-Mining","sub_path":"Recommendation_System/Implementations/Model_based_recommendation.py","file_name":"Model_based_recommendation.py","file_ext":"py","file_size_in_byte":2811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"14030814383","text":"import os\nfrom PIL import Image\n\n\nclass RandomGenerator():\n\n\tdef __init__(self):\n\t\tpass\n\n\n\tdef get_ints(self, amount, min_val=0, max_val=255, col=3):\n\t\t\"\"\"fetch random integers from random API and store them as instance variables\"\"\"\n\t\turl = \"curl 'https://www.random.org/integers/?num=%d&min=%d&max=%d&col=%d&base=10&format=plain'\" % (amount, min_val, max_val, col)\n\n\t\tresponse = os.popen(url).read()\n\t\tif response.startswith(\"Error:\"):\n\t\t\traise Exception(response)\n\t\telse:\n\t\t\tself.random_ints = response\n\t\t\treturn response\n\t\t\t\n\n\n\tdef create_image(self, x_dim=128, y_dim=128,):\n\t\tself.get_ints(9999)\n\t\tif self.random_ints:\n\t\t\trgb = self.format_rgb_response(self.random_ints)\n\t\t\trgb = np.random.randint(0, 255, (128*128, 3))\n\t\t\timg = Image.new('RGB', (128,128), \"black\")\n\t\t\tpix = img.load()\n\t\t\tcount = 0\n\t\t\t# set pixel rgb values to stored random ints\n\t\t\tfor i in range(img.size[0]):\n\t\t\t\tfor j in range(img.size[1]):\n\t\t\t\t\tpix[i,j] = tuple(rgb[count])\n\t\t\t\t\tcount += 1\n\n\t\t\t\t\tif count > len(rgb) - 1:\n\t\t\t\t\t\tcount = 0\n\t\t\t\t\t\tself.get_ints(9999)\n\t\t\t\t\t\trgb = self.format_rgb_response(self.random_ints)\n\n\t\t\timg.save(\"randBMP.bmp\")\n\t\telse:\n\t\t\treturn None\n\n\n\tdef format_rgb_response(self, response):\n\t\t\"\"\"formats random ints into sets of rgb tuples\"\"\"\n\t\trgb = map(lambda x: tuple(x.split(\"\\t\")), response.split(\"\\n\"))\n\t\trgb.pop()\n\t\treturn rgb\n\n","repo_name":"jtmcbride/unify_radom_bitmap","sub_path":"random_generator/random_client.py","file_name":"random_client.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"41001714717","text":"from django import template\nregister = template.Library()\n\n@register.filter\ndef dict_keys(value, args):\n \"\"\" Get Dict Keys from Nested Dict Object\n \n Get keys from nested dictionary object.\n \n Args:\n value: object\n args (string): specify the strings comma separated\n \"\"\"\n \n if ((args is not None) and (args != '')):\n arg_list = [arg.strip() for arg in args.split(',')]\n \n keys = None\n _value = value\n for key in arg_list:\n keys = _value[key].keys()\n _value = _value[key]\n \n return keys\n else:\n return None\n\n@register.filter\ndef dict_value(value, args):\n \"\"\" Get Dict Value from Nested Dict Object\n \n Get value from nested dictionary object.\n \n Args:\n value: object\n args (string): specify the strings comma separated\n \"\"\"\n \n if ((args is not None) and (args != '')):\n arg_list = [arg.strip() for arg in args.split(',')]\n \n keys = None\n _value = value\n for key in arg_list[:-1]:\n keys = _value[key].keys()\n _value = _value[key]\n \n return _value[arg_list[-1]]\n else:\n return None\n\n","repo_name":"ryoma-jp/samples","sub_path":"web_app/05_how_to_access_to_nested_dict_objects/app/templatetags/custom_filter.py","file_name":"custom_filter.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"1874427244","text":"# ---\n# jupyter:\n# jupytext:\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.13.8\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n# Import relevant modules\nimport random\n\n\nclass Hangman:\n\n # Initialize class variables\n # Word bank from which game draws word\n word_bank = [\n \"treetop\",\n \"automobile\",\n \"garden\",\n \"television\",\n \"radio\",\n \"cream\",\n \"world\",\n \"chandelier\",\n ]\n\n # Constructor\n def __init__(self):\n \"\"\"Initialize instance variables and set up board.\"\"\"\n # Keep track of whether player loses or wins\n self.lose = False\n\n # Keep track of correct letters picked by player\n self.correctLetters = []\n\n # Choose a word randomly from word bank\n self.word = random.choice(self.word_bank)\n\n # Keep track of number of body parts when player chooses incorrect letters\n self.bodyParts = 0\n\n # Set up board (display a \"blank\" for each letter in word)\n print(\"__ \" * len(self.word))\n\n def displayBoard(self):\n \"\"\"\n Display board:.\n\n display letters in word that were picked\n display 'blanks' for letters in word that were not picked\n \"\"\"\n for letter in self.word:\n if letter in self.correctLetters:\n print(letter, end=\" \")\n else:\n print(\"__\", end=\" \")\n\n def checkForMatch(self, letterChoice):\n \"\"\"\n Check if the player's current letter choice matches any of the letters in word.\n Display result accordingly.\n If there's a match, update the board.\n Otherwise, add a body part and update the lose instance variable as needed.\n \"\"\"\n\n if letterChoice in self.word:\n for letter in self.word:\n if letter == letterChoice:\n self.correctLetters.append(letterChoice)\n print(\"Match!\")\n else:\n self.bodyParts += 1\n if self.bodyParts == 10:\n self.lose = True\n print(\"Not a match!\")\n\n def play(self):\n \"\"\"\n Carry out game:\n While the player has not guessed all letters in word and not all body parts have been added,\n the game proceeds (player continues to guess); otherwise, game reaches end.\n Each time player chooses a letter, check if it's a match, and then display the updated board.\n At end of game, check whether player loses or wins, and display result accordingly.\n \"\"\"\n while len(self.correctLetters) != len(self.word) and self.bodyParts < 10:\n print()\n letterChoice = input(\"Pick a letter: \")\n self.checkForMatch(letterChoice)\n self.displayBoard()\n\n if self.lose is True:\n print()\n print(\"You lost!\")\n else:\n print()\n print(\"You won!\")\n\n # return\n\n\n# Create an instance of Hangman\nHM_game1 = Hangman()\n# Play a game of Hangman\nHM_game1.play()\n","repo_name":"krishnakumarg1984/pydev_linkedin_learning","sub_path":"z_other/02_python_data_science_mistakes_to_avoid/01_01_comments.py","file_name":"01_01_comments.py","file_ext":"py","file_size_in_byte":3153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"71668403239","text":"from typing import Optional\n\nfrom absl import logging\nfrom lingvo.jax import base_model_params\n\nBaseModelParamsT = base_model_params.BaseModelParamsT\n\n_MODEL_PREFIX = 'lingvo'\n\n\ndef _ModelClassKey(model_class: BaseModelParamsT) -> str:\n \"\"\"Retrieves a model key from the model class.\"\"\"\n path = model_class.__module__ + '.' + model_class.__name__\n # Removes model_registry from `...lingvo.jax.model_registry.`.\n prefix = _ModelClassKey.__module__.replace('.model_registry', '.')\n return path.replace(prefix, '').replace('tasks.', '').replace('params.', '')\n\n\nclass _ModelRegistryHelper:\n \"\"\"Helper class encapsulating a global registry keyed by model name.\"\"\"\n\n # Global variable for the model registry\n _registry = {}\n\n @classmethod\n def _ClassPathPrefix(cls):\n \"\"\"Prefixes for model names registered by this module.\"\"\"\n return _MODEL_PREFIX\n\n @classmethod\n def RegisterModel(cls, model_class: BaseModelParamsT) -> BaseModelParamsT:\n \"\"\"Registers a model class in the global registry.\"\"\"\n key = cls._ClassPathPrefix() + '.' + _ModelClassKey(model_class)\n if key in cls._registry:\n raise ValueError(f'Model `{key}` already registed.')\n logging.info('Registering model %s as %s', model_class, key)\n cls._registry[key] = model_class\n return model_class\n\n @classmethod\n def GetModel(cls, key: str) -> Optional[BaseModelParamsT]:\n \"\"\"Retrieves a model from the global registry from the input key.\"\"\"\n key = cls._ClassPathPrefix() + '.' + key\n if key not in cls._registry:\n for k in cls._registry:\n logging.info('Known model: %s', k)\n return cls._registry.get(key)\n\n\nRegisterModel = _ModelRegistryHelper.RegisterModel\nGetModel = _ModelRegistryHelper.GetModel\n","repo_name":"mlcommons/training_results_v1.1","sub_path":"Google/benchmarks/bert/implementations/bert-research-JAX-tpu-v4-2048/jax/model_registry.py","file_name":"model_registry.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"18"}
+{"seq_id":"7468026861","text":"# -*- coding: utf-8 -*-\n\nimport os, time, json, requests\nimport multiprocessing as mp\nfrom apscheduler.schedulers.blocking import BlockingScheduler\n\ndef data_obtain():\n \"\"\"获取数据\"\"\"\n \n url = 'https://view.inews.qq.com/g2/getOnsInfo?name=disease_h5&callback=&_=%d'%int(time.time()*1000)\n with open('fb/ncp.txt', 'w') as fp:\n fp.write(requests.get(url=url).json()['data'])\n \n print('Obtain OK')\n\ndef data_process():\n \"\"\"处理数据\"\"\"\n \n while True:\n if os.path.isfile('fb/ncp.txt'):\n with open('fb/ncp.txt', 'r') as fp:\n data = json.loads(fp.read())\n \n with open('fb/ncp.csv', 'w') as fp:\n for p in data['areaTree'][0]['children']:\n fp.write('%s,%d,%d,%d,%d\\n'%(p['name'], p['total']['confirm'], p['total']['suspect'], p['total']['dead'], p['total']['heal']))\n \n os.remove('fb/ncp.txt')\n print('Process OK')\n else:\n print('No data file')\n \n time.sleep(10)\n\nif __name__ == '__main__':\n # 创建并启动数据处理子进程\n p_process = mp.Process(target=data_process) # 创建数据处理子进程\n p_process.daemon = True # 设置子进程为守护进程\n p_process.start() # 启动数据处理子进程\n \n # 创建调度器\n scheduler = BlockingScheduler() \n \n # 添加任务\n scheduler.add_job(\n data_obtain, # 获取数据的任务\n trigger = 'cron', # 设置触发器为cron \n minute = '*/1', # 设置每分钟执行一次\n misfire_grace_time = 30 # 30秒内没有执行此job,则放弃执行\n )\n \n # 启动调度服务\n scheduler.start() ","repo_name":"xufive/2020Pyday","sub_path":"miniCrawler.py","file_name":"miniCrawler.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"18"}
+{"seq_id":"71891667240","text":"from application.vectorstore.base import BaseVectorStore\nfrom application.core.settings import settings\nimport elasticsearch\n\nclass Document(str):\n \"\"\"Class for storing a piece of text and associated metadata.\"\"\"\n\n def __new__(cls, page_content: str, metadata: dict):\n instance = super().__new__(cls, page_content)\n instance.page_content = page_content\n instance.metadata = metadata\n return instance\n\n\n\n\nclass ElasticsearchStore(BaseVectorStore):\n _es_connection = None # Class attribute to hold the Elasticsearch connection\n\n def __init__(self, path, embeddings_key, index_name=settings.ELASTIC_INDEX):\n super().__init__()\n self.path = path.replace(\"application/indexes/\", \"\").rstrip(\"/\")\n self.embeddings_key = embeddings_key\n self.index_name = index_name\n \n if ElasticsearchStore._es_connection is None:\n connection_params = {}\n if settings.ELASTIC_URL:\n connection_params[\"hosts\"] = [settings.ELASTIC_URL]\n connection_params[\"http_auth\"] = (settings.ELASTIC_USERNAME, settings.ELASTIC_PASSWORD)\n elif settings.ELASTIC_CLOUD_ID:\n connection_params[\"cloud_id\"] = settings.ELASTIC_CLOUD_ID\n connection_params[\"basic_auth\"] = (settings.ELASTIC_USERNAME, settings.ELASTIC_PASSWORD)\n else:\n raise ValueError(\"Please provide either elasticsearch_url or cloud_id.\")\n\n \n\n ElasticsearchStore._es_connection = elasticsearch.Elasticsearch(**connection_params)\n \n self.docsearch = ElasticsearchStore._es_connection\n\n def connect_to_elasticsearch(\n *,\n es_url = None,\n cloud_id = None,\n api_key = None,\n username = None,\n password = None,\n ):\n try:\n import elasticsearch\n except ImportError:\n raise ImportError(\n \"Could not import elasticsearch python package. \"\n \"Please install it with `pip install elasticsearch`.\"\n )\n\n if es_url and cloud_id:\n raise ValueError(\n \"Both es_url and cloud_id are defined. Please provide only one.\"\n )\n\n connection_params = {}\n\n if es_url:\n connection_params[\"hosts\"] = [es_url]\n elif cloud_id:\n connection_params[\"cloud_id\"] = cloud_id\n else:\n raise ValueError(\"Please provide either elasticsearch_url or cloud_id.\")\n\n if api_key:\n connection_params[\"api_key\"] = api_key\n elif username and password:\n connection_params[\"basic_auth\"] = (username, password)\n\n es_client = elasticsearch.Elasticsearch(\n **connection_params,\n )\n try:\n es_client.info()\n except Exception as e:\n raise e\n\n return es_client\n\n def search(self, question, k=2, index_name=settings.ELASTIC_INDEX, *args, **kwargs):\n embeddings = self._get_embeddings(settings.EMBEDDINGS_NAME, self.embeddings_key)\n vector = embeddings.embed_query(question)\n knn = {\n \"filter\": [{\"match\": {\"metadata.store.keyword\": self.path}}],\n \"field\": \"vector\",\n \"k\": k,\n \"num_candidates\": 100,\n \"query_vector\": vector,\n }\n full_query = {\n \"knn\": knn,\n \"query\": {\n \"bool\": {\n \"must\": [\n {\n \"match\": {\n \"text\": {\n \"query\": question,\n }\n }\n }\n ],\n \"filter\": [{\"match\": {\"metadata.store.keyword\": self.path}}],\n }\n },\n \"rank\": {\"rrf\": {}},\n }\n resp = self.docsearch.search(index=self.index_name, query=full_query['query'], size=k, knn=full_query['knn'])\n # create Documents objects from the results page_content ['_source']['text'], metadata ['_source']['metadata']\n doc_list = []\n for hit in resp['hits']['hits']:\n \n doc_list.append(Document(page_content = hit['_source']['text'], metadata = hit['_source']['metadata']))\n return doc_list\n\n def _create_index_if_not_exists(\n self, index_name, dims_length\n ):\n\n if self._es_connection.indices.exists(index=index_name):\n print(f\"Index {index_name} already exists.\")\n\n else:\n\n indexSettings = self.index(\n dims_length=dims_length,\n )\n self._es_connection.indices.create(index=index_name, **indexSettings)\n\n def index(\n self,\n dims_length,\n ):\n return {\n \"mappings\": {\n \"properties\": {\n \"vector\": {\n \"type\": \"dense_vector\",\n \"dims\": dims_length,\n \"index\": True,\n \"similarity\": \"cosine\",\n },\n }\n }\n }\n\n def add_texts(\n self,\n texts,\n metadatas = None,\n ids = None,\n refresh_indices = True,\n create_index_if_not_exists = True,\n bulk_kwargs = None,\n **kwargs,\n ):\n \n from elasticsearch.helpers import BulkIndexError, bulk\n\n bulk_kwargs = bulk_kwargs or {}\n import uuid\n embeddings = []\n ids = ids or [str(uuid.uuid4()) for _ in texts]\n requests = []\n embeddings = self._get_embeddings(settings.EMBEDDINGS_NAME, self.embeddings_key)\n\n vectors = embeddings.embed_documents(list(texts))\n\n dims_length = len(vectors[0])\n\n if create_index_if_not_exists:\n self._create_index_if_not_exists(\n index_name=self.index_name, dims_length=dims_length\n )\n\n for i, (text, vector) in enumerate(zip(texts, vectors)):\n metadata = metadatas[i] if metadatas else {}\n\n requests.append(\n {\n \"_op_type\": \"index\",\n \"_index\": self.index_name,\n \"text\": text,\n \"vector\": vector,\n \"metadata\": metadata,\n \"_id\": ids[i],\n }\n )\n\n\n if len(requests) > 0:\n try:\n success, failed = bulk(\n self._es_connection,\n requests,\n stats_only=True,\n refresh=refresh_indices,\n **bulk_kwargs,\n )\n return ids\n except BulkIndexError as e:\n print(f\"Error adding texts: {e}\")\n firstError = e.errors[0].get(\"index\", {}).get(\"error\", {})\n print(f\"First error reason: {firstError.get('reason')}\")\n raise e\n\n else:\n return []\n\n def delete_index(self):\n self._es_connection.delete_by_query(index=self.index_name, query={\"match\": {\n \"metadata.store.keyword\": self.path}},)\n\n","repo_name":"arc53/DocsGPT","sub_path":"application/vectorstore/elasticsearch.py","file_name":"elasticsearch.py","file_ext":"py","file_size_in_byte":7263,"program_lang":"python","lang":"en","doc_type":"code","stars":13254,"dataset":"github-code","pt":"18"}
+{"seq_id":"2216783713","text":"#!/usr/bin/env python3\n'''Records scans to a given file in the form of numpy array.\nUsage example:\n\n$ ./record_scans.py out.npy'''\nimport sys\nimport numpy as np\nfrom rplidar import RPLidar\nimport matplotlib.pyplot as plt\n\n\n\nPORT_NAME = '/dev/ttyUSB0'\n\n\ndef run():\n\t'''Main function'''\n\tlidar = RPLidar(PORT_NAME)\n\tdata = []\n\tangles = []\n\tdistances = []\n\tk = 0\n\n\tangles1 = []; distances1 = []\n\tangles2 = []; distances2 = []\n\tangles3 = []; distances3 = []\n\tangles4 = []; distances4 = []\n\tangles5 = []; distances5 = []\n\tangles6 = []; distances6 = []\n\tangles7 = []; distances7 = []\n\n\n\n\n\tprint('Recording measurments... Press Crl+C to stop.')\n\tfor scan in lidar.iter_scans('express'):\n\n\t\tscan = np.array(scan)\n\t\tfor i in range (len(scan)) :\n\t\t\t\n\t\t\tangle = np.deg2rad(360-scan[i][1])\n\t\t\tdistance = scan[i][2]\n\t\t\t\n\t\t\tif ((angle < 0.13) | (angle > 6.14)) & (distance < 3000) : #+/- 7.59°\n\t\t\t\tangles1.append(angle)\n\t\t\t\tdistances1.append(distance)\n\t\t\telif ((angle < 0.15) | (angle > 6.10)) & (distance < 2500) : #+/- 9.09°\n\t\t\t\tangles2.append(angle)\n\t\t\t\tdistances2.append(distance)\n\t\t\telif ((angle < 0.19) | (angle > 6.07)) & (distance < 2000) : #+/- 11.30°\n\t\t\t\tangles3.append(angle)\n\t\t\t\tdistances3.append(distance)\n\t\t\telif ((angle < 0.24) | (angle > 6.02)) & (distance < 1500) : #+/- 14.93°\n\t\t\t\tangles4.append(angle)\n\t\t\t\tdistances4.append(distance)\n\t\t\telif ((angle < 0.36) | (angle > 5.89)) & (distance < 1000) : #+/- 21.81°\n\t\t\t\tangles5.append(angle)\n\t\t\t\tdistances5.append(distance)\n\t\t\telif ((angle < 0.46) | (angle > 5.81)) & (distance < 800) : #+/- 26.56°\n\t\t\t\tangles6.append(angle)\n\t\t\t\tdistances6.append(distance)\n\t\t\telif ((angle < 1.09) | (angle > 5.18)) & (distance < 400) : #+/- 63°\n\t\t\t\tangles7.append(angle)\n\t\t\t\tdistances7.append(distance)\n\t\t\telse :\n\t\t\t\tangles.append(angle)\n\t\t\t\tdistances.append(distance)\n\t\t\t\n\t\tk += 1\n\t\tif k == 1 :\n\t\t\tbreak\n\n\n\tmark = '.'\n\n\tfig = plt.figure()\n\tax = fig.add_subplot(111, projection='polar')\n\tax.scatter(angles, distances, c='grey', marker = mark)\n\tax.scatter(angles1, distances1, c='red', marker = mark)\n\tax.scatter(angles2, distances2, c='yellow', marker = mark)\n\tax.scatter(angles3, distances3, c='cyan', marker = mark)\n\tax.scatter(angles4, distances4, c='blue', marker = mark)\n\tax.scatter(angles5, distances5, c='black', marker = mark)\n\tax.scatter(angles6, distances6, c='green', marker = mark)\n\tax.scatter(angles7, distances7, c='magenta', marker = mark)\n\t\n\tplt.savefig(\"test.png\", bbox_inches='tight')\n\tplt.show()\n\t\n\tprint('Stoping.')\n\tlidar.stop()\n\tlidar.disconnect()\n\t#np.save(path, np.array(data))\n\t\n\t\n\n\nrun()\n\n","repo_name":"Maxence125/PI","sub_path":"obstacle_detection.py","file_name":"obstacle_detection.py","file_ext":"py","file_size_in_byte":2555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"29298746069","text":"from collections import deque\nfrom functools import reduce\n\n\ndef _hex2num(char):\n if char.isdigit():\n return int(char)\n return 10 + ord(char) - ord('a')\n\n\ndef _num2hex(num):\n if num < 10:\n return str(num)\n return chr(ord('a') + num - 10)\n\n\ndef _get_sum(a, b, add=0):\n sm = a + b + add\n return sm % 16, sm // 16\n\n\ndef _get_multi(a, b, add=0):\n ml = a * b + add\n return ml % 16, ml // 16\n\n\ndef hex_sum(first, second, del_nulls=True):\n res = deque()\n add = 0\n\n while first and second:\n a, b = first.pop(), second.pop()\n if type(a) is str:\n a = _hex2num(a.lower())\n if type(b) is str:\n b = _hex2num(b.lower())\n sm, add = _get_sum(a, b, add)\n res.appendleft(sm)\n\n greater_num = first if first else second\n\n while greater_num:\n num = greater_num.pop()\n if type(num) is str:\n num = _hex2num(num.lower())\n sm, add = _get_sum(num, add)\n res.appendleft(sm)\n if add:\n res.appendleft(1)\n\n if del_nulls:\n while res[0] == 0 and len(res) > 1:\n res.popleft()\n\n return res\n\n\ndef hex_multi(first, second):\n sums = []\n counter = 0\n\n for fnum in reversed(first):\n fnum = fnum.lower()\n res = deque([0 for _ in range(counter)])\n add = 0\n for snum in reversed(second):\n ml, add = _get_multi(_hex2num(fnum), _hex2num(snum.lower()), add)\n res.appendleft(ml)\n if add:\n res.appendleft(add)\n counter += 1\n sums.append(res)\n\n res = reduce(lambda a, b: hex_sum(a, b, False), sums)\n while res[0] == 0 and len(res) > 1:\n res.popleft()\n\n return res\n\n\nif __name__ == '__main__':\n first_num, optype, second_num = input(\"Введите операцию (hex1 +* hex2): \").strip().split()\n\n if optype == '+':\n res = hex_sum(list(first_num), list(second_num))\n else:\n res = hex_multi(list(first_num), list(second_num))\n\n print(f'>>>', ''.join(_num2hex(i) for i in res))\n","repo_name":"x415a/GB_py_algorithms","sub_path":"lesson_5/task_2.py","file_name":"task_2.py","file_ext":"py","file_size_in_byte":2048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"27220290574","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nfrom pprint import pprint\nfrom PIL import Image\nfrom scipy.cluster.vq import kmeans, vq\n\n'''modify color depends on light(gray) degree, can be used as heat map\n'''\n\ndef sepearte_by_lights(gray_data, number=3):\n data = np.asarray(gray_data, dtype=\"float\")\n w, h = data.shape\n data = np.reshape(data, w*h)\n centroids,_ = kmeans(data, number)\n idx,_ = vq(data, centroids)\n return idx\n\ndef posterize(image_path, modification, number, extra_colors, output):\n img = Image.open(image_path)\n gray = img.convert(\"L\")\n data = np.asarray(img, dtype=\"int32\")\n w, h, k = data.shape\n data = np.reshape(data, (w*h, k))\n gray_idx = sepearte_by_lights(gray, number)\n for i in range(number):\n data[gray_idx == i] = modification(data[gray_idx == i], extra_colors[i])\n data = np.reshape(data, (w, h, k))\n img = Image.fromarray(np.asarray(np.clip(data, 0, 255), dtype=\"uint8\"), \"RGB\")\n img.save(output)\n\n\nif __name__ == \"__main__\":\n def modification(data, extra_color):\n return data + extra_color\n number = 5\n extra_colors = [[255,0,0],[0,255,0],[0,0,255],[255,255,0],[0,255,255]]\n posterize(\"test.jpg\", modification, number, extra_colors, \"new_test.jpg\")\n\n\n","repo_name":"ariesduanmu/pyPhotoshop","sub_path":"posterize.py","file_name":"posterize.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"}
+{"seq_id":"19496402117","text":"# Libraries\nimport RPi.GPIO as GPIO\nimport time\nimport requests\nimport os\n\nGPIO.cleanup()\n\n# dashboard URL\nSPYLENT_DASHBOARD_URL = os.getenv('DASHBOARD_URL', 'https://spylent.herokuapp.com/api/point')\n\n# cleanup buffer\ndistance_buffer_idx = 0\ndistance_buffer_max = 5\ndistance_buffer = [0 for x in range(distance_buffer_max)]\n\n# GPIO Mode (BOARD / BCM)\nGPIO.setmode(GPIO.BCM)\n\n# set GPIO Pins\nGPIO_TRIGGER = 18\nGPIO_ECHO = 24\n\n# set GPIO direction (IN / OUT)\nGPIO.setup(GPIO_TRIGGER, GPIO.OUT)\nGPIO.setup(GPIO_ECHO, GPIO.IN)\n\n\ndef send_to_spylent_dashboard(tag='', value=None):\n res = requests.post(SPYLENT_DASHBOARD_URL, json={'tag': tag, 'value': value})\n print(res)\n print(type(tag))\n print(type(value))\n print(res.text)\n\n\ndef distance():\n # set Trigger to HIGH\n GPIO.output(GPIO_TRIGGER, True)\n\n # set Trigger after 0.01ms to LOW\n time.sleep(0.00001)\n GPIO.output(GPIO_TRIGGER, False)\n\n StartTime = time.time()\n StopTime = time.time()\n ticks_left = 10000\n # save StartTime\n while GPIO.input(GPIO_ECHO) == 0 and ticks_left:\n time.sleep(0.0000001)\n StartTime = time.time()\n ticks_left -= 1\n\n ticks_left = 10000\n # save time of arrival\n while GPIO.input(GPIO_ECHO) == 1 and ticks_left:\n time.sleep(0.0000001)\n StopTime = time.time()\n ticks_left -= 1\n\n # time difference between start and arrival\n TimeElapsed = StopTime - StartTime\n # multiply with the sonic speed (34300 cm/s)\n # and divide by 2, because there and back\n dist = (TimeElapsed * 34300) / 2\n\n if 0 < dist < 1000:\n return dist\n\ndef median(arr):\n return sorted(arr)[int(len(arr) / 2)]\n\nif __name__ == '__main__':\n while True:\n try:\n dist = distance() or distance()\n if dist:\n distance_buffer[distance_buffer_idx] = dist\n if distance_buffer_idx == distance_buffer_max - 1 or True:\n average_distance = sum(distance_buffer) / len(distance_buffer)\n print(\"%s - Average Measured Distance = %.1f cm\" % (time.time(), average_distance))\n print(\"distance buffer:\", distance_buffer, \"median:\", median(distance_buffer))\n send_to_spylent_dashboard('distance', median(distance_buffer))\n distance_buffer_idx = (distance_buffer_idx + 1) % distance_buffer_max\n except Exception as e:\n print(e)\n except Error as e:\n print(e)\n\n time.sleep(0.5)\n\nGPIO.cleanup()","repo_name":"jmdx/spylent","sub_path":"coffeestation/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"75265483240","text":"from rest_framework import generics, permissions\nfrom .serializers import UserSerializer, ProductSerializer, PartySerializer, \\\n CommandSerializer, MemberOperationSerializer, UserMiscSerializer, UserFriendsSerializer, FriendOperationSerializer\nfrom django.contrib.auth.models import User\nfrom .models import FriendRequest, Product, Party, Command, CommandContribution\nfrom rest_framework.authtoken.views import ObtainAuthToken\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom .permissions import IsMemberOfParty, IsOwner, IsLeaderOfParty\nfrom django.db import models\nfrom django.shortcuts import get_object_or_404\nfrom datetime import datetime\n\n\n# Users #\n\nclass UsersView(generics.ListCreateAPIView):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n\n\nclass UserDetailView(generics.RetrieveAPIView):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n\n\nclass UserMiscView(generics.RetrieveAPIView):\n queryset = User.objects.all()\n serializer_class = UserMiscSerializer\n\n def get_object(self):\n obj = super(UserMiscView, self).get_object()\n return obj.usermisc\n\n\n# Friend requests #\n\nclass FriendsView(generics.RetrieveAPIView):\n queryset = User.objects.all()\n serializer_class = FriendOperationSerializer\n permission_classes = [permissions.IsAuthenticated, IsOwner]\n\n def get_object(self):\n obj = super(FriendsView, self).get_object()\n return obj.usermisc\n\n def get(self, request, *args, **kwargs):\n instance = self.get_object()\n serializer = UserFriendsSerializer(instance)\n return Response(serializer.data)\n\n def post(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n misc = self.get_object()\n\n try:\n target = User.objects.get(pk=serializer.data['user'])\n except User.DoesNotExist:\n return Response({\"detail\": \"User not found\"}, status=status.HTTP_404_NOT_FOUND)\n\n if target == misc.user:\n return Response({\"detail\": \"Can't perform any friend action on self\"}, status=status.HTTP_400_BAD_REQUEST)\n\n if serializer.data['action'] == \"ADD\":\n return self.add_friend(target)\n\n if serializer.data['action'] == \"REMOVE\":\n return self.remove_friend(target)\n\n if serializer.data['action'] == \"ACCEPT\":\n return self.accept_request(target)\n\n if serializer.data['action'] == \"REFUSE\":\n return self.refuse_request(target)\n\n if serializer.data['action'] == \"CANCEL\":\n return self.cancel_request(target)\n\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n def cancel_request(self, target):\n misc = self.get_object()\n\n try:\n friend_request = FriendRequest.objects.get(from_user=misc.user, to_user=target, status=\"PENDING\")\n except FriendRequest.MultipleObjectsReturned:\n return Response({\n \"detail\": \"Unexpected error : multiple requests found\"\n }, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n except FriendRequest.DoesNotExist:\n return Response({\n \"detail\": \"You didn't send a friend request to this user\"\n }, status=status.HTTP_400_BAD_REQUEST)\n\n friend_request.status = \"CANCELED\"\n friend_request.save()\n\n return Response(UserFriendsSerializer(misc).data, status=status.HTTP_200_OK)\n\n def refuse_request(self, target):\n misc = self.get_object()\n\n try:\n friend_request = FriendRequest.objects.get(from_user=target, to_user=misc.user, status=\"PENDING\")\n except FriendRequest.MultipleObjectsReturned:\n return Response({\n \"detail\": \"Unexpected error : multiple requests found\"\n }, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n except FriendRequest.DoesNotExist:\n return Response({\n \"detail\": \"You didn't received a friend request from this user\"\n }, status=status.HTTP_400_BAD_REQUEST)\n\n friend_request.status = \"REJECTED\"\n friend_request.save()\n\n return Response(UserFriendsSerializer(misc).data, status=status.HTTP_200_OK)\n\n def remove_friend(self, target):\n misc = self.get_object()\n\n if target.id not in misc.friends:\n return Response({\"detail\": \"This user isn't your friend.\"}, status=status.HTTP_400_BAD_REQUEST)\n\n friend_request = FriendRequest.objects.filter(\n models.Q(from_user=misc.user, to_user=target) | models.Q(from_user=target, to_user=misc.user)\n ).filter(status=\"ACCEPTED\")[0]\n\n friend_request.status = \"DELETED\"\n friend_request.save()\n\n return Response(UserFriendsSerializer(misc).data, status=status.HTTP_200_OK)\n\n def add_friend(self, target):\n misc = self.get_object()\n\n if target.id in misc.received_requests:\n return self.accept_request(target)\n\n if target.id in misc.sent_requests:\n return Response({\n \"detail\": \"You already sent a friend request to this user\"\n }, status=status.HTTP_400_BAD_REQUEST)\n\n if target.id in misc.friends:\n return Response({\n \"detail\": \"This user is already your friend\"\n }, status=status.HTTP_400_BAD_REQUEST)\n\n FriendRequest.objects.create(from_user=misc.user, to_user=target, status=\"PENDING\")\n\n return Response(UserFriendsSerializer(misc).data, status=status.HTTP_200_OK)\n\n def accept_request(self, target):\n misc = self.get_object()\n\n try:\n request = FriendRequest.objects.get(from_user=target, to_user=misc.user, status=\"PENDING\")\n except FriendRequest.MultipleObjectsReturned:\n return Response({\n \"detail\": \"Unexpected error : multiple requests found\"\n }, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n except FriendRequest.DoesNotExist:\n return Response({\"detail\": \"You don't have any request from this user\"}, status=status.HTTP_400_BAD_REQUEST)\n\n request.status = \"ACCEPTED\"\n request.save()\n\n return Response(UserFriendsSerializer(misc).data, status=status.HTTP_200_OK)\n\n\n# Products #\n\nclass ProductsView(generics.ListAPIView):\n queryset = Product.objects.filter(old=False)\n serializer_class = ProductSerializer\n\n\nclass ProductDetailView(generics.RetrieveAPIView):\n queryset = Product.objects.all()\n serializer_class = ProductSerializer\n\n\n# Auth #\n\nclass WithUserIdTokenProviderView(ObtainAuthToken):\n\n def post(self, request, *args, **kwargs):\n serializer = self.serializer_class(data=request.data, context={'request': request})\n serializer.is_valid(raise_exception=True)\n user = serializer.validated_data['user']\n token, created = Token.objects.get_or_create(user=user)\n return Response({\n 'token': token.key,\n 'id': user.id\n })\n\n\n# Parties #\n\nclass PartiesView(generics.ListCreateAPIView):\n queryset = Party.objects.all()\n serializer_class = PartySerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly, IsLeaderOfParty]\n\n def get_queryset(self):\n queryset = Party.objects.all()\n user = self.request.query_params.get(\"for\", None)\n if user is not None:\n queryset = queryset.filter(members__in=[user])\n return queryset\n\n def create(self, request, *args, **kwargs):\n user = request.user\n party = Party.objects.create(leader=user, status=\"IN PROGRESS\")\n party.members.add(user)\n serializer = self.get_serializer(party)\n headers = self.get_success_headers(serializer.data)\n return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)\n\n\nclass PartyDetailView(generics.RetrieveAPIView):\n queryset = Party.objects.all()\n serializer_class = PartySerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly, IsLeaderOfParty]\n\n def post(self, request, *args, **kwargs):\n party = self.get_object()\n party.status = \"FINISHED\"\n party.end_date = datetime.now()\n party.save()\n serializer = self.get_serializer(party)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n\nclass PartyMembersUpdateView(generics.GenericAPIView):\n queryset = Party.objects.all()\n serializer_class = MemberOperationSerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly, IsMemberOfParty]\n\n def get(self, request, *args, **kwargs):\n instance = self.get_object()\n serializer = PartySerializer(instance)\n return Response(serializer.data['members'])\n\n def post(self, request, *args, **kwargs):\n queryset = self.get_object()\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n try:\n to_update = User.objects.get(id=serializer.data['user'])\n except User.DoesNotExist:\n return Response({\"detail\": \"User not found\"}, status=status.HTTP_404_NOT_FOUND)\n\n if queryset.status == \"FINISHED\":\n return Response({\n \"detail\": \"Can't add or remove members from a finished party\",\n \"members\": PartySerializer(queryset).data['members'],\n })\n\n members = queryset.members.all()\n\n if serializer.data['action'] == \"REMOVE\":\n total_to_pay = CommandContribution.objects.filter(user=to_update).filter(command__party=queryset) \\\n .aggregate(models.Sum(\"part\"))[\"part__sum\"]\n\n if total_to_pay is not None:\n return Response({\n \"detail\": \"Can't remove members needing to pay something\",\n \"members\": PartySerializer(queryset).data['members'],\n }, status=status.HTTP_400_BAD_REQUEST)\n if to_update not in members:\n return Response({\n \"detail\": \"This user in not a member of the party\",\n \"members\": PartySerializer(queryset).data['members'],\n }, status=status.HTTP_400_BAD_REQUEST)\n\n if to_update == queryset.leader:\n return Response({\n \"detail\": \"Can't remove leader from party's members\",\n \"members\": PartySerializer(queryset).data['members'],\n }, status=status.HTTP_400_BAD_REQUEST)\n\n queryset.members.remove(to_update)\n\n elif serializer.data['action'] == \"ADD\":\n\n if to_update.id not in request.user.usermisc.friends:\n return Response({\n \"detail\": \"Can't add an user who isn't your friend\",\n \"members\": PartySerializer(queryset).data['members'],\n }, status=status.HTTP_400_BAD_REQUEST)\n\n if to_update in members:\n return Response({\n \"detail\": \"Can't add a member already in the party\",\n \"members\": PartySerializer(queryset).data['members'],\n }, status=status.HTTP_400_BAD_REQUEST)\n\n queryset.members.add(to_update)\n\n serializer = PartySerializer(queryset)\n return Response(serializer.data['members'], status=status.HTTP_200_OK)\n\n\n# Commands\n\nclass CommandsView(generics.ListAPIView):\n queryset = Command.objects.all()\n serializer_class = CommandSerializer\n\n\nclass CommandDetailView(generics.RetrieveUpdateAPIView):\n queryset = Command.objects.all()\n serializer_class = CommandSerializer\n\n\nclass PartyCommandsView(generics.RetrieveAPIView):\n queryset = Party\n serializer_class = CommandSerializer\n permission_classes = [permissions.IsAuthenticatedOrReadOnly, IsMemberOfParty]\n\n def get(self, request, *args, **kwargs):\n party = self.get_object()\n serializer = self.get_serializer([x for x in party.commands.all()], many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n def post(self, request, *args, **kwargs):\n party = self.get_object()\n\n if party.status == \"FINISHED\":\n return Response({\"detail\": \"You can't add a command when the party is finished\"},\n status=status.HTTP_400_BAD_REQUEST)\n\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n contributions = [self.create_contribution(c) for c in serializer.data[\"contributions\"]]\n command = Command(\n author=request.user,\n product=get_object_or_404(Product, id=serializer.data[\"product\"]),\n )\n command.save()\n for c in contributions:\n c.command = command\n c.save()\n party.commands.add(command)\n party.save()\n serializer = self.get_serializer(command)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n @staticmethod\n def create_contribution(contribution_data):\n data = {\n \"product\": get_object_or_404(Product, id=contribution_data[\"product\"]),\n \"user\": None if contribution_data[\"user\"] is None else get_object_or_404(User,\n id=contribution_data[\"user\"]),\n \"part\": contribution_data[\"part\"],\n }\n return CommandContribution(**data)\n","repo_name":"BrokenSwing/DestinationTapas","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"}
+{"seq_id":"3944272257","text":"import cv2\nimport glob\nimport sys\nimport os\nimport imutils\n\nf_name_lst = []\nangles = [0, 90, 180, 270]\nscale = 1.0\nstart = 0\n\ndef f_dir(path):\n global f_name_lst\n f_name_lst = glob.glob(path)\n\ndef counter():\n global start\n start += 1\n print(f'Przetwarzanie: {start}/{len(f_name_lst) * 4}')\n\ndef change(path, size):\n for file in range(len(f_name_lst)):\n img = cv2.imread(f_name_lst[file], 1)\n (h, w) = img.shape[:2]\n for angle in angles:\n rotated = imutils.rotate_bound(img, angle)\n if w > h:\n resized = cv2.resize(rotated, (size, round(h/w*size)))\n if angle == 90:\n cv2.imwrite(f\"{path}//{str(file)}a.png\", resized)\n counter()\n elif angle == 180:\n cv2.imwrite(f\"{path}//{str(file)}b.png\", resized)\n counter()\n elif angle == 270:\n cv2.imwrite(f\"{path}//{str(file)}c.png\", resized)\n counter()\n else:\n cv2.imwrite(f\"{path}//{str(file)}.png\", resized)\n counter()\n else:\n resized = cv2.resize(rotated, (round(w/h*size), size))\n if angle == 90:\n cv2.imwrite(f\"{path}//{str(file)}a.png\", resized)\n counter()\n elif angle == 180:\n cv2.imwrite(f\"{path}//{str(file)}b.png\", resized)\n counter()\n elif angle == 270:\n cv2.imwrite(f\"{path}//{str(file)}c.png\", resized)\n counter()\n else:\n cv2.imwrite(f\"{path}//{str(file)}.png\", resized)\n counter()\n\ndef main():\n global start\n run = True\n while run:\n start = 0\n user_input = int(input(\"1. Start\\n2. Wyjście\\n\"))\n if user_input == 1:\n path = input(\"Podaj ścieżkę do folderu z plikami: \")\n path_resized = input(\"Podaj ścieżkę do folderu zapisu: \")\n resized_size = int(input(\"Podaj porządaną wartość długośći krawędzi zdjęcia: \"))\n good_path = path + '\\*.*'\n print(f'\\nOtwarty folder: {path}\\n')\n f_dir(good_path)\n change(path_resized, resized_size)\n print(f'\\nZapis plików do folderu: {path_resized}\\n')\n elif user_input == 2:\n run = False\n sys.exit(\"BYE!\")\n else:\n print('Coś poszło nie tak...')\n\nif __name__ == \"__main__\":\n main()\n\n\n\n\n","repo_name":"czarnowskiadam/Resizer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"2669919895","text":"def fact(n):\r\n if(n<=1):return 1;\r\n return n * fact(n-1)\r\n\r\nd={}\r\n\r\nfor i in range(0,10):\r\n d[str(i)]=fact(i)\r\n\r\nn = (input(\"enter a number:\"))\r\nsum=0\r\nfor i in range(len(n)):\r\n sum=sum+d[n[i]]\r\nif(sum==int(n)):\r\n print(\"strong number\")\r\nelse:\r\n print(\"not strong number\")","repo_name":"nikhil-soni-32/https-github.com-nikhil-soni-pythonprogram-CT-32","sub_path":"26.py","file_name":"26.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"9510409663","text":"def loetleFilmid(zanr):\n fail=open(\"filmid.txt\")\n filmid=[]\n for rida in fail:\n rida=rida.strip()\n rida=rida.split(\" - \")\n if zanr in rida:\n filmid.append(rida[0])\n fail.close()\n return filmid\ndef lisaFilm(nimi, zanr):\n fail=open(\"filmid.txt\", \"a\")\n fail.write(\"\\n\" + nimi + \" - \" + zanr)\n fail.close()\ndef kustutaFilm(nimi):\n fail=open(\"filmid.txt\")\n read=fail.readlines()\n fail.close()\n fail2=open(\"filmid.txt\", \"w\")\n for rida in read:\n rida=rida.strip()\n rida=rida.split(\" - \")\n if rida[0]!= nimi:\n fail2.write(str(rida[0] + \" - \" + rida[1] + \"\\n\"))\n fail2.close()\n","repo_name":"ArR4e/DSProject","sub_path":"processed/K08/S111/film.py","file_name":"film.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"17647312561","text":"\"\"\"\r\nCreated by : Ramprasad Ingle\r\nProgram:- Reveresed a given interger number with forloop, stack, recursion etc\r\nUsage: reversed a given number\r\n\"\"\"\r\ndef reverse_num(num):\r\n rev = 0\r\n\r\n while(num>0):\r\n digit = num %10\r\n rev = rev*10 + digit\r\n\r\n num = num//10\r\n return rev \r\n\r\ndef main():\r\n print(\"Entrer the number : \")\r\n num = int(input())\r\n rev = reverse_num(num)\r\n print(\"Number after reversed : \", rev)\r\n\r\nif __name__==\"__main__\":\r\n main()","repo_name":"ingle3889/Study_python","sub_path":"Reversed_number.py","file_name":"Reversed_number.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"16972413170","text":"# Dependencies\nimport os\nimport csv\n\n# Import file\ncsvpath = os.path.join(\".\",\"Resources\", \"electiondata.csv\")\n\n# Define auxiliary lists\ntotalcandidatevotes = []\nuniquecandidatelist = []\n\n# Initialize votes per candidate\nkhan_votes = 0\ncorrey_votes = 0\nli_votes = 0\notooley_votes = 0\n\n# Open file and read it\nwith open(csvpath, newline='') as election:\n reader = csv.reader(election, delimiter=',')\n csv_header = next(reader) \n\n # Get unique candidates and a list with all votes\n # A complete list of candidates who received votes\n # The total number of votes cast \n\n for row in reader:\n totalcandidatevotes.append(row[2])\n if row[2] not in uniquecandidatelist:\n uniquecandidatelist.append(row[2])\n \n #Get total votes for each candidate \n if row[2] == 'Khan': khanvotes += 1\n elif row[2] == 'Correy': correyvotes += 1\n elif row[2] == 'Li': livotes += 1\n else: otooleyvotes += 1\n \n # Calculate\n # The total number of votes each candidate won\n # The percentage of votes each candidate won\n totalvotes = len(total_candidate_votes)\n votespercandidate = [khanvotes, correyvotes, livotes, otooleyvotes]\n percentagevotes = [(div / totalvotes)*100 for div in votespercandidate]\n \n # Get the winner of the election based on popular vote.\n max_votes = max(votes_per_candidate) \n winner = uniquecandidatelist[ votespercandidate.index(maxvotes) ] \n\n # PRINT results to Terminal\n report = ( '\\n'\n 'Election Results\\n'\n '---------------------------\\n'\n f'Total Votes: {total_votes}\\n'\n f'Khan: {votespercandidate[0]} {percentagevotes[0]:.3f} %\\n'\n f'Correy: {votespercandidate[1]} {percentagevotes[1]:.3f} %\\n'\n f'Li: {votespercandidate[2]} {percentagevotes[2]:.3f} %\\n'\n f\"O'Tooley: {votespercandidate[3]} {percentagevotes[3]:.3f} %\\n\"\n '---------------------------\\n'\n f'Winner is: {winner}'\n )\n print(report)\n\n\n# Define an Output path and a filename for that output.\noutputfile = os.path.join(\".\", \"Analysis\", \"electionresults.txt\")\n\n\nwith open(outputfile,\"w\") as file:\n \n # Write methods to print to Financial Analysis.\n file.write(report)","repo_name":"criseza/python-challenge","sub_path":"PyPoll/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"14194243045","text":"class node:\n def __init__(self,value):\n self.data=value\n self.next=None\n self.prev=None\n \nclass ll:\n def __init__(self):\n self.head=None\n \n def insert(self,head,value):\n if head==None:\n head=node(value)\n else:\n head.next=self.insert(head.next,value)\n head.next.prev=head\n return head\n \n def traverse(self):\n temp=self.head\n while temp:\n print(temp.data)\n temp=temp.next\n\n def reverse(self):\n temp=self.head\n while temp.next:\n temp=temp.next\n while temp:\n print(temp.data)\n temp=temp.prev\n\n def delete(self,head,value):\n if head.data==value:\n return head.next\n else:\n head.next=self.delete(head.next,value)\n head.next.prev=head\n return head\n\n'''obj=ll()\nwhile True:\n n=int(input(\"1.insert 2.traverse 3.delete 4.reverse : \"))\n if n==1:\n obj.head=obj.insert(obj.head,int(input(\"enter the value :\")))\n elif n==2:\n obj.traverse()\n elif n==3:\n obj.head=obj.delete(obj.head,int(input(\"enter the value to delete :\")))\n elif n==4:\n obj.reverse() '''\n\ndef mergeSort(arr): \n\tif len(arr) >1: \n\t\tmid = len(arr)//2 # Finding the mid of the array \n\t\tL = arr[:mid] # Dividing the array elements \n\t\tR = arr[mid:] # into 2 halves \n\n\t\tmergeSort(L) # Sorting the first half \n\t\tmergeSort(R) # Sorting the second half \n\n\t\ti = j = k = 0\n\t\t\n\t\t# Copy data to temp arrays L[] and R[] \n\t\twhile i < len(L) and j < len(R): \n\t\t\tif L[i] < R[j]: \n\t\t\t\tarr[k] = L[i] \n\t\t\t\ti+= 1\n\t\t\telse: \n\t\t\t\tarr[k] = R[j] \n\t\t\t\tj+= 1\n\t\t\tk+= 1\n\t\t\n\t\t# Checking if any element was left \n\t\twhile i < len(L): \n\t\t\tarr[k] = L[i] \n\t\t\ti+= 1\n\t\t\tk+= 1\n\t\t\n\t\twhile j < len(R): \n\t\t\tarr[k] = R[j] \n\t\t\tj+= 1\n\t\t\tk+= 1\n# nlist=[14,46,43,27,57,41,45,21,70]\n# mergeSort(nlist)\n# print(nlist)\n\ndef partition(arr,low,high):\n i=low-1\n pivot=arr[high]\n for j in range(low,high):\n if arr[j]0 and sptset[v]==False and dist[v]>dist[last]+self.graph[last][v]:\n dist[v]=dist[last]+self.graph[last][v]\n\n print(dist)\n\ng = graph(9) \ng.graph = [[0, 4, 0, 0, 0, 0, 0, 8, 0], \n\t\t[4, 0, 8, 0, 0, 0, 0, 11, 0], \n\t\t[0, 8, 0, 7, 0, 4, 0, 0, 2], \n\t\t[0, 0, 7, 0, 9, 14, 0, 0, 0], \n\t\t[0, 0, 0, 9, 0, 10, 0, 0, 0], \n\t\t[0, 0, 4, 14, 10, 0, 2, 0, 0], \n\t\t[0, 0, 0, 0, 0, 2, 0, 1, 6], \n\t\t[8, 11, 0, 0, 0, 0, 1, 0, 7], \n\t\t[0, 0, 2, 0, 0, 0, 6, 7, 0] \n\t\t]; \n\ng.dijkstra(0);","repo_name":"shahid-zain/Python-codes","sub_path":"ds.py","file_name":"ds.py","file_ext":"py","file_size_in_byte":3486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"23417849457","text":"from .base import basis,MAXPRINT\nfrom ._reshape_subsys import _lattice_partial_trace_pure,_lattice_reshape_pure\nfrom ._reshape_subsys import _lattice_partial_trace_mixed,_lattice_reshape_mixed\nfrom ._reshape_subsys import _lattice_partial_trace_sparse_pure,_lattice_reshape_sparse_pure\nimport numpy as _np\nimport scipy.sparse as _sp\nfrom numpy.linalg import norm,eigvalsh,svd\nfrom scipy.sparse.linalg import eigsh\nimport warnings\n\n_dtypes={\"f\":_np.float32,\"d\":_np.float64,\"F\":_np.complex64,\"D\":_np.complex128}\n\nclass lattice_basis(basis):\n\tdef __init__(self):\n\t\tself._Ns = 0\n\t\tself._basis = _np.asarray([])\n\t\tself._operators = \"no operators for base.\"\n\t\tself._unique_me = True\n\t\tself._check_symm = None\n\t\tself._check_pcon = None\n\t\tif self.__class__.__name__ == 'lattice_basis':\n\t\t\traise ValueError(\"This class is not intended\"\n\t\t\t\t\t\t\t \" to be instantiated directly.\")\n\n\tdef __getitem__(self,key):\n\t\treturn self._basis.__getitem__(key)\n\n\tdef __iter__(self):\n\t\treturn self._basis.__iter__()\n\n\tdef index(self,s):\n\t\t\"\"\"Finds the index of user-defined Fock state in any lattice basis.\n\n\t\tNotes\n\t\t-----\n\t\tParticularly useful for defining initial Fock states through a unit vector in the direction specified\n\t\tby `index()`. \n\n\t\tParameters\n\t\t-----------\n\t\ts : {str, int}\n\t\t\tDefines the Fock state with number of particles (spins) per site in underlying lattice `basis`.\n\n\t\tReturns\n\t\t--------\n\t\tint\n\t\t\tPosition of the Fock state in the lattice basis.\n\n\t\tExamples\n\t\t--------\n\t\t\n\t\t>>> i0 = index(\"111000\") # pick state from basis set\n\t\t>>> print(basis)\n\t\t>>> print(i0)\n\t\t>>> psi = np.zeros(basis.Ns,dtype=np.float64)\n\t\t>>> psi[i0] = 1.0 # define state corresponding to the string \"111000\"\n\n\t\t\"\"\"\n\t\tif type(s) is int:\n\t\t\tpass\n\t\telif type(s) is str:\n\t\t\ts = int(s,self.sps)\n\t\telse:\n\t\t\traise ValueError(\"s must be integer or state\")\n\n\t\tindx = _np.argwhere(self._basis == s)\n\n\t\tif len(indx) != 0:\n\t\t\treturn _np.squeeze(indx)\n\t\telse:\n\t\t\traise ValueError(\"s must be representive state in basis. \")\n\n\tdef _partial_trace(self,state,sub_sys_A=None,subsys_ordering=True,return_rdm=\"A\",enforce_pure=False,sparse=False):\n\t\t\"\"\"Calculates reduced density matrix, through a partial trace of a quantum state in a lattice `basis`.\n\n\t\tParameters\n\t\t-----------\n\t\tstate : obj\n\t\t\tState of the quantum system. Can be either one of:\n\n\t\t\t\t* numpy.ndarray [shape (Ns,)]: pure state (default).\n\t\t\t\t* numpy.ndarray [shape (Ns,Ns)]: density matrix (DM).\n\t\t\t\t* dict('V_states',V_states) [shape (Ns,Nvecs)]: collection of `Nvecs` states stored in the columns of `V_states`.\n\t\tsub_sys_A : tuple/list, optional\n\t\t\tDefines the sites contained in subsystem A [by python convention the first site of the chain is labelled j=0].\n\t\t\tDefault is `tuple(range(N//2))` with `N` the number of lattice sites.\n\t\treturn_rdm : str, optional\n\t\t\tToggles returning the reduced DM. Can be tierh one of:\n\n\t\t\t\t* \"A\": returns reduced DM of subsystem A.\n\t\t\t\t* \"B\": returns reduced DM of subsystem B.\n\t\t\t\t* \"both\": returns reduced DM of both A and B subsystems.\n\t\tsubsys_ordering : bool, optional\n\t\t\tWhether or not to reorder the sites in `sub_sys_A` in ascending order. Default is `True`.\n\t\tenforce_pure : bool, optional\n\t\t\tWhether or not to assume `state` is a collection of pure states or a mixed density matrix, if\n\t\t\tit is a square array. Default is `False`.\n\t\tsparse : bool, optional\n\t\t\tWhether or not to return a sparse DM. Default is `False`.\n\n\t\tReturns\n\t\t--------\n\t\tnumpy.ndarray\n\t\t\tDensity matrix associated with `state`. Depends on optional arguments.\n\n\t\tExamples\n\t\t--------\n\n\t\t>>> partial_trace(state,sub_sys_A=tuple(range(basis.N//2),return_rdm=\"A\",enforce_pure=False,sparse=False,subsys_ordering=True)\n\n\t\t\"\"\"\n\n\t\tif sub_sys_A is None:\n\t\t\tsub_sys_A = tuple(range(self.N//2))\n\t\telif len(sub_sys_A)==self.N:\n\t\t\traise ValueError(\"Size of subsystem must be strictly smaller than total system size N!\")\n\n\n\t\tN_A = len(sub_sys_A)\n\t\tN_B = self.N - N_A\n\n\t\tif sub_sys_A is None:\n\t\t\tsub_sys_A = tuple(range(self.N//2))\n\n\t\tsub_sys_A = tuple(sub_sys_A)\n\n\t\tif any(not _np.issubdtype(type(s),_np.integer) for s in sub_sys_A):\n\t\t\traise ValueError(\"sub_sys_A must iterable of integers with values in {0,...,N-1}!\")\n\n\t\tif any(s < 0 or s > self.N for s in sub_sys_A):\n\t\t\traise ValueError(\"sub_sys_A must iterable of integers with values in {0,...,N-1}\")\n\n\t\tdoubles = tuple(s for s in sub_sys_A if sub_sys_A.count(s) > 1)\n\t\tif len(doubles) > 0:\n\t\t\traise ValueError(\"sub_sys_A contains repeated values: {}\".format(doubles))\n\n\t\tif return_rdm not in set([\"A\",\"B\",\"both\"]):\n\t\t\traise ValueError(\"return_rdm must be: 'A','B','both' or None\")\n\n\t\tif subsys_ordering:\n\t\t\tsub_sys_A = sorted(sub_sys_A)\n\n\t\tsps = self.sps\n\t\tN = self.N\n\n\t\tif not hasattr(state,\"shape\"):\n\t\t\tstate = _np.asanyarray(state)\n\t\t\tstate = state.squeeze() # avoids artificial higher-dim reps of ndarray\n\n\n\t\tif state.shape[0] != self.Ns:\n\t\t\traise ValueError(\"state shape {0} not compatible with Ns={1}\".format(state.shape,self._Ns))\n\n\t\tif _sp.issparse(state) or sparse:\n\t\t\tstate=self.get_vec(state,sparse=True).T\n\t\t\t\n\t\t\tif state.shape[0] == 1:\n\t\t\t\t# sparse_pure partial trace\n\t\t\t\trdm_A,rdm_B = _lattice_partial_trace_sparse_pure(state,sub_sys_A,N,sps,return_rdm=return_rdm)\n\t\t\telse:\n\t\t\t\tif state.shape[0]!=state.shape[1] or enforce_pure:\n\t\t\t\t\t# vectorize sparse_pure partial trace \n\t\t\t\t\tstate = state.tocsr()\n\t\t\t\t\ttry:\n\t\t\t\t\t\tstate_gen = (_lattice_partial_trace_sparse_pure(state.getrow(i),sub_sys_A,N,sps,return_rdm=return_rdm) for i in xrange(state.shape[0]))\n\t\t\t\t\texcept NameError:\n\t\t\t\t\t\tstate_gen = (_lattice_partial_trace_sparse_pure(state.getrow(i),sub_sys_A,N,sps,return_rdm=return_rdm) for i in range(state.shape[0]))\n\n\t\t\t\t\tleft,right = zip(*state_gen)\n\n\t\t\t\t\trdm_A,rdm_B = _np.stack(left),_np.stack(right)\n\n\t\t\t\t\tif any(rdm is None for rdm in rdm_A):\n\t\t\t\t\t\trdm_A = None\n\n\t\t\t\t\tif any(rdm is None for rdm in rdm_B):\n\t\t\t\t\t\trdm_B = None\n\t\t\t\telse: \n\t\t\t\t\traise ValueError(\"Expecting a dense array for mixed states.\")\n\n\t\telse:\n\t\t\tif state.ndim==1:\n\t\t\t\t# calculate full H-space representation of state\n\t\t\t\tstate=self.get_vec(state,sparse=False)\n\t\t\t\trdm_A,rdm_B = _lattice_partial_trace_pure(state.T,sub_sys_A,N,sps,return_rdm=return_rdm)\n\n\t\t\telif state.ndim==2: \n\t\t\t\tif state.shape[0]!=state.shape[1] or enforce_pure:\n\t\t\t\t\t# calculate full H-space representation of state\n\t\t\t\t\tstate=self.get_vec(state,sparse=False)\n\t\t\t\t\trdm_A,rdm_B = _lattice_partial_trace_pure(state.T,sub_sys_A,N,sps,return_rdm=return_rdm)\n\n\t\t\t\telse: \n\t\t\t\t\tproj = self.get_proj(_dtypes[state.dtype.char])\n\t\t\t\t\tproj_state = proj*state*proj.H\n\n\t\t\t\t\tshape0 = proj_state.shape\n\t\t\t\t\tproj_state = proj_state.reshape((1,)+shape0)\t\t\t\t\t\n\n\t\t\t\t\trdm_A,rdm_B = _lattice_partial_trace_mixed(proj_state,sub_sys_A,N,sps,return_rdm=return_rdm)\n\n\t\t\telif state.ndim==3: #3D DM \n\t\t\t\tproj = self.get_proj(_dtypes[state.dtype.char])\n\t\t\t\tstate = state.transpose((2,0,1))\n\t\t\t\t\n\t\t\t\tNs_full = proj.shape[0]\n\t\t\t\tn_states = state.shape[0]\n\t\t\t\t\n\t\t\t\tgen = (proj*s*proj.H for s in state[:])\n\n\t\t\t\tproj_state = _np.zeros((n_states,Ns_full,Ns_full),dtype=_dtypes[state.dtype.char])\n\t\t\t\t\n\t\t\t\tfor i,s in enumerate(gen):\n\t\t\t\t\tproj_state[i,...] += s[...]\n\n\t\t\t\trdm_A,rdm_B = _lattice_partial_trace_mixed(proj_state,sub_sys_A,N,sps,return_rdm=return_rdm)\n\t\t\telse:\n\t\t\t\traise ValueError(\"state must have ndim < 4\")\n\n\t\tif return_rdm == \"A\":\n\t\t\treturn rdm_A\n\t\telif return_rdm == \"B\":\n\t\t\treturn rdm_B\n\t\telse:\n\t\t\treturn rdm_A,rdm_B\n\n\tdef _ent_entropy(self,state,sub_sys_A=None,density=True,subsys_ordering=True,return_rdm=None,enforce_pure=False,return_rdm_EVs=False,sparse=False,alpha=1.0,sparse_diag=True,maxiter=None):\n\t\t\"\"\"Calculates entanglement entropy of subsystem A and the corresponding reduced density matrix\n\n\t\t.. math::\n\t\t\tS_\\\\mathrm{ent}(\\\\alpha) = \\\\frac{1}{N}\\\\frac{1}{1-\\\\alpha}\\\\log \\\\mathrm{tr}_{A} \\\\left( \\\\mathrm{tr}_{A^c} \\\\vert\\\\psi\\\\rangle\\\\langle\\\\psi\\\\vert \\\\right)^\\\\alpha \n\n\t\twhere the normalization :math:`N` can be switched on and off using the optional argument `density`.\n\t\t\t\n\t\t**Note:** The logarithm used is the natural logarithm (base e).\n\n\t\tNotes\n\t\t-----\n\t\tAlgorithm is based on both partial tracing and sigular value decomposition (SVD), optimised for speed.\n\n\t\tParameters\n\t\t-----------\n\t\tstate : obj\n\t\t\tState of the quantum system. Can be either one of:\n\n\t\t\t\t* numpy.ndarray [shape (Ns,)]: pure state (default).\n\t\t\t\t* numpy.ndarray [shape (Ns,Ns)]: density matrix (DM).\n\t\t\t\t* dict('V_states',V_states) [shape (Ns,Nvecs)]: collection of `Nvecs` states stored in the columns of `V_states`.\n\t\tsub_sys_A : tuple/list, optional\n\t\t\tDefines the sites contained in subsystem A [by python convention the first site of the chain is labelled j=0].\n\t\t\tDefault is `tuple(range(N//2))` with `N` the number of lattice sites.\n\t\tdensity : bool, optional\n\t\t\tToggles whether to return entanglement entropy normalized by the number of sites in the subsystem.\n\t\treturn_rdm : str, optional\n\t\t\tToggles returning the reduced DM. Can be tierh one of:\n\n\t\t\t\t* \"A\": returns reduced DM of subsystem A.\n\t\t\t\t* \"B\": returns reduced DM of subsystem B.\n\t\t\t\t* \"both\": returns reduced DM of both A and B subsystems.\n\t\tenforce_pure : bool, optional\n\t\t\tWhether or not to assume `state` is a collection of pure states or a mixed density matrix, if\n\t\t\tit is a square array. Default is `False`.\n\t\tsubsys_ordering : bool, optional\n\t\t\tWhether or not to reorder the sites in `sub_sys_A` in ascending order. Default is `True`.\n\t\tsparse : bool, optional\n\t\t\tWhether or not to return a sparse DM. Default is `False`.\n\t\treturn_rdm_EVs : bool, optional \n\t\t\tWhether or not to return the eigenvalues of rthe educed DM. If `return_rdm` is specified,\n\t\t\tthe eigenvalues of the corresponding DM are returned. If `return_rdm` is NOT specified, \n\t\t\tthe spectrum of `rdm_A` is returned by default. Default is `False`.\n\t\talpha : float, optional\n\t\t\tRenyi :math:`\\\\alpha` parameter for the entanglement entropy. Default is :math:`\\\\alpha=1`.\n\n\t\t\t\n\t\tsparse_diag : bool, optional\n\t\t\tWhen `sparse=True`, this flag enforces the use of\n\t\t\t`scipy.sparse.linalg.eigsh() `_\n\t\t\tto calculate the eigenvaues of the reduced DM.\n\t\tmaxiter : int, optional\n\t\t\tSpecifies the number of iterations for Lanczos diagonalisation. Look up documentation for \n\t\t\t`scipy.sparse.linalg.eigsh() `_.\n\n\t\tReturns\n\t\t--------\n\t\tdict\n\t\t\tDictionary with following keys, depending on input parameters:\n\t\t\t\t* \"Sent_A\": entanglement entropy of subsystem A (default).\n\t\t\t\t* \"Sent_B\": entanglement entropy of subsystem B.\n\t\t\t\t* \"p_A\": singular values of reduced DM of subsystem A (default).\n\t\t\t\t* \"p_B\": singular values of reduced DM of subsystem B.\n\t\t\t\t* \"rdm_A\": reduced DM of subsystem A.\n\t\t\t\t* \"rdm_B\": reduced DM of subsystem B.\n\n\t\tExamples\n\t\t--------\n\n\t\t>>> ent_entropy(state,sub_sys_A=[0,3,4,7],return_rdm=\"A\",enforce_pure=False,return_rdm_EVs=False,\n\t\t>>>\t\t\t\tsparse=False,alpha=1.0,sparse_diag=True,subsys_ordering=True)\n\n\t\t\"\"\"\n\t\tif sub_sys_A is None:\n\t\t\tsub_sys_A = list(range(self.N//2))\n\t\telse:\n\t\t\tsub_sys_A = list(sub_sys_A)\n\t\n\t\tif len(sub_sys_A)>=self.N:\n\t\t\traise ValueError(\"Size of subsystem must be strictly smaller than total system size N!\")\n\n\t\tN_A = len(sub_sys_A)\n\t\tN_B = self.N - N_A\n\n\t\tif any(not _np.issubdtype(type(s),_np.integer) for s in sub_sys_A):\n\t\t\traise ValueError(\"sub_sys_A must iterable of integers with values in {0,...,N-1}!\")\n\n\t\tif any(s < 0 or s > self.N for s in sub_sys_A):\n\t\t\traise ValueError(\"sub_sys_A must iterable of integers with values in {0,...,N-1}\")\n\n\t\tdoubles = tuple(s for s in set(sub_sys_A) if sub_sys_A.count(s) > 1)\n\t\tif len(doubles) > 0:\n\t\t\traise ValueError(\"sub_sys_A contains repeated values: {}\".format(doubles))\n\n\t\tif return_rdm not in set([\"A\",\"B\",\"both\",None]):\n\t\t\traise ValueError(\"return_rdm must be: 'A','B','both' or None\")\n\n\t\tif subsys_ordering:\n\t\t\tsub_sys_A = sorted(sub_sys_A)\n\n\t\tsps = self.sps\n\t\tN = self.N\n\n\t\tif not hasattr(state,\"shape\"):\n\t\t\tstate = _np.asanyarray(state)\n\t\t\tstate = state.squeeze() # avoids artificial higher-dim reps of ndarray\n\n\n\t\tif state.shape[0] != self.Ns:\n\t\t\traise ValueError(\"state shape {0} not compatible with Ns={1}\".format(state.shape,self._Ns))\n\n\t\t\n\n\t\tpure=True # set pure state parameter to True\n\t\tif _sp.issparse(state) or sparse:\n\t\t\tif state.ndim == 1:\n\t\t\t\tstate = state.reshape((-1,1))\n\n\t\t\tsparse=True # set sparse flag to True\n\t\t\tif state.shape[1] == 1:\n\t\t\t\tp, rdm_A, rdm_B = self._p_pure_sparse(state,sub_sys_A,return_rdm=return_rdm,sparse_diag=sparse_diag,maxiter=maxiter)\n\t\t\telse:\n\t\t\t\tif state.shape[0]!=state.shape[1] or enforce_pure:\n\t\t\t\t\tp, rdm_A, rdm_B = self._p_pure_sparse(state,sub_sys_A,return_rdm=return_rdm)\n\t\t\t\telse: \n\t\t\t\t\traise ValueError(\"Expecting a dense array for mixed states.\")\n\t\t\t\t\t\n\t\telse:\n\t\t\tif state.ndim==1:\n\t\t\t\tstate = state.reshape((-1,1))\n\t\t\t\tp, rdm_A, rdm_B = self._p_pure(state,sub_sys_A,return_rdm=return_rdm)\n\t\t\t\n\t\t\telif state.ndim==2: \n\n\t\t\t\tif state.shape[0]!=state.shape[1] or enforce_pure:\n\t\t\t\t\tp, rdm_A, rdm_B = self._p_pure(state,sub_sys_A,return_rdm=return_rdm)\n\t\t\t\telse: # 2D mixed\n\t\t\t\t\tpure=False\n\t\t\t\t\t\"\"\"\n\t\t\t\t\t# check if DM's are positive definite\n\t\t\t\t\ttry:\n\t\t\t\t\t\t_np.linalg.cholesky(state)\n\t\t\t\t\texcept:\n\t\t\t\t\t\traise ValueError(\"LinAlgError: (collection of) DM(s) not positive definite\")\n\t\t\t\t\t# check oif trace of DM is unity\n\t\t\t\t\tif _np.any( abs(_np.trace(state) - 1.0 > 1E3*_np.finfo(state.dtype).eps) ):\n\t\t\t\t\t\traise ValueError(\"Expecting eigenvalues of DM to sum to unity!\")\n\t\t\t\t\t\"\"\"\n\t\t\t\t\tshape0 = state.shape\n\t\t\t\t\tstate = state.reshape(shape0+(1,))\n\t\t\t\t\tp_A, p_B, rdm_A, rdm_B = self._p_mixed(state,sub_sys_A,return_rdm=return_rdm)\n\t\t\t\t\n\t\t\telif state.ndim==3: #3D DM \n\t\t\t\tpure=False\n\n\t\t\t\t\"\"\"\n\t\t\t\t# check if DM's are positive definite\n\t\t\t\ttry:\n\t\t\t\t\t_np.linalg.cholesky(state)\n\t\t\t\texcept:\n\t\t\t\t\traise ValueError(\"LinAlgError: (collection of) DM(s) not positive definite\")\n\n\t\t\t\t# check oif trace of DM is unity\n\t\t\t\tif _np.any( abs(_np.trace(state, axis1=1,axis2=2) - 1.0 > 1E3*_np.finfo(state.dtype).eps) ):\n\t\t\t\t\traise ValueError(\"Expecting eigenvalues of DM to sum to unity!\")\n\t\t\t\t\"\"\"\n\t\t\t\tp_A, p_B, rdm_A, rdm_B = self._p_mixed(state,sub_sys_A,return_rdm=return_rdm)\n\n\t\t\telse:\n\t\t\t\traise ValueError(\"state must have ndim < 4\")\n\n\t\t\n\n\t\tif pure:\n\t\t\tp_A, p_B = p, p\n\n\t\tSent_A, Sent_B = None, None\n\t\tif alpha == 1.0:\n\t\t\tif p_A is not None:\n\t\t\t\tSent_A = - _np.nansum((p_A * _np.log(p_A)),axis=-1)\n\t\t\t\tif density: Sent_A /= N_A\n\t\t\tif p_B is not None:\n\t\t\t\tSent_B = - _np.nansum((p_B * _np.log(p_B)),axis=-1)\n\t\t\t\tif density: Sent_B /= N_B\n\t\telif alpha >= 0.0:\n\t\t\tif p_A is not None:\n\t\t\t\tSent_A = _np.log(_np.nansum(_np.power(p_A,alpha),axis=-1))/(1.0-alpha)\n\t\t\t\tif density: Sent_A /= N_A\n\t\t\tif p_B is not None:\n\t\t\t\tSent_B = _np.log(_np.nansum(_np.power(p_B,alpha),axis=-1))/(1.0-alpha)\n\t\t\t\tif density: Sent_B /= N_B\n\t\telse:\n\t\t\traise ValueError(\"alpha >= 0\")\n\n\t\t# initiate variables\n\t\tvariables = [\"Sent_A\"]\n\t\tif return_rdm_EVs:\n\t\t\tvariables.append(\"p_A\")\n\n\t\tif return_rdm == \"A\":\n\t\t\tvariables.append(\"rdm_A\")\n\t\t\t\n\t\telif return_rdm == \"B\":\n\t\t\tvariables.extend([\"Sent_B\",\"rdm_B\"])\n\t\t\tif return_rdm_EVs:\n\t\t\t\tvariables.append(\"p_B\")\n\t\t\t\n\t\telif return_rdm == \"both\":\n\t\t\tvariables.extend([\"rdm_A\",\"Sent_B\",\"rdm_B\"])\n\t\t\tif return_rdm_EVs:\n\t\t\t\tvariables.extend([\"p_A\",\"p_B\"])\n\t\n\t\t# store variables to dictionar\n\t\treturn_dict = {}\n\t\tfor i in variables:\n\t\t\tif locals()[i] is not None:\n\t\t\t\tif sparse and 'rdm' in i:\n\t\t\t\t\treturn_dict[i] = locals()[i] # don't squeeze sparse matrix\n\t\t\t\telse:\n\t\t\t\t\treturn_dict[i] = _np.squeeze( locals()[i] )\n\n\t\treturn return_dict\n\n\n\n\t##### private methods\n\n\tdef _p_pure(self,state,sub_sys_A,return_rdm=None):\n\t\t\n\t\t# calculate full H-space representation of state\n\t\tstate=self.get_vec(state,sparse=False)\n\t\t# put states in rows\n\t\tstate=state.T\n\t\t# reshape state according to sub_sys_A\n\t\tv=_lattice_reshape_pure(state,sub_sys_A,self.N,self._sps)\n\t\t\n\t\trdm_A=None\n\t\trdm_B=None\n\n\t\t# perform SVD\t\n\t\tif return_rdm is None:\n\t\t\tlmbda = svd(v, compute_uv=False) \n\t\telse:\n\t\t\tU, lmbda, V = svd(v, full_matrices=False)\n\t\t\tif return_rdm=='A':\n\t\t\t\trdm_A = _np.einsum('...ij,...j,...kj->...ik',U,lmbda**2,U.conj() )\n\t\t\telif return_rdm=='B':\n\t\t\t\trdm_B = _np.einsum('...ji,...j,...jk->...ik',V.conj(),lmbda**2,V )\n\t\t\telif return_rdm=='both':\n\t\t\t\trdm_A = _np.einsum('...ij,...j,...kj->...ik',U,lmbda**2,U.conj() )\n\t\t\t\trdm_B = _np.einsum('...ji,...j,...jk->...ik',V.conj(),lmbda**2,V )\n\n\n\t\treturn lmbda**2 + _np.finfo(lmbda.dtype).eps, rdm_A, rdm_B\n\n\tdef _p_pure_sparse(self,state,sub_sys_A,return_rdm=None,sparse_diag=True,maxiter=None):\n\n\t\tpartial_trace_args = dict(sub_sys_A=sub_sys_A,sparse=True,enforce_pure=True)\n\n\t\tN_A=len(sub_sys_A)\n\t\tN_B=self.N-N_A\n\n\t\trdm_A=None\n\t\trdm_B=None\n\n\t\tif return_rdm is None:\n\t\t\tif N_A <= N_B:\n\t\t\t\tpartial_trace_args[\"return_rdm\"] = \"A\"\n\t\t\t\trdm = self._partial_trace(state,**partial_trace_args)\n\t\t\telse:\n\t\t\t\tpartial_trace_args[\"return_rdm\"] = \"B\"\n\t\t\t\trdm = self._partial_trace(state,**partial_trace_args)\n\n\t\telif return_rdm=='A' and N_A <= N_B:\n\t\t\tpartial_trace_args[\"return_rdm\"] = \"A\"\n\t\t\trdm_A = self._partial_trace(state,**partial_trace_args)\n\t\t\trdm = rdm_A\n\n\t\telif return_rdm=='B' and N_B <= N_A:\n\t\t\tpartial_trace_args[\"return_rdm\"] = \"B\"\n\t\t\trdm_B = self._partial_trace(state,**partial_trace_args)\n\t\t\trdm = rdm_B\n\n\t\telse:\n\t\t\tpartial_trace_args[\"return_rdm\"] = \"both\"\n\t\t\trdm_A,rdm_B = self._partial_trace(state,**partial_trace_args)\n\n\t\t\tif N_A < N_B:\n\t\t\t\trdm = rdm_A\n\t\t\telse:\n\t\t\t\trdm = rdm_B\n\n\t\tif sparse_diag and rdm.shape[0] > 16:\n\n\t\t\tdef get_p_patchy(rdm):\n\t\t\t\tn = rdm.shape[0]\n\t\t\t\tp_LM = eigsh(rdm,k=n//2+n%2,which=\"LM\",maxiter=maxiter,return_eigenvectors=False) # get upper half\n\t\t\t\tp_SM = eigsh(rdm,k=n//2,which=\"SM\",maxiter=maxiter,return_eigenvectors=False) # get lower half\n\t\t\t\tp = _np.concatenate((p_LM[::-1],p_SM)) + _np.finfo(p_LM.dtype).eps\n\t\t\t\treturn p\n\n\t\t\tif _sp.issparse(rdm):\n\t\t\t\tp = get_p_patchy(rdm)\n\t\t\t\tp = p.reshape((1,-1))\n\t\t\telse:\n\t\t\t\tp_gen = (get_p_patchy(dm) for dm in rdm[:])\n\t\t\t\tp = _np.stack(p_gen)\n\n\t\telse:\n\t\t\tif _sp.issparse(rdm):\n\t\t\t\tp = eigvalsh(rdm.todense())[::-1] + _np.finfo(rdm.dtype).eps\n\t\t\t\tp = p.reshape((1,-1))\n\t\t\telse:\n\t\t\t\tp_gen = (eigvalsh(dm.todense())[::-1] + _np.finfo(dm.dtype).eps for dm in rdm[:])\n\t\t\t\tp = _np.stack(p_gen)\n\n\t\treturn p,rdm_A,rdm_B\n\t\n\tdef _p_mixed(self,state,sub_sys_A,return_rdm=None):\n\t\t\"\"\"\n\t\tThis function calculates the eigenvalues of the reduced density matrix.\n\t\tIt will first calculate the partial trace of the full density matrix and\n\t\tthen diagonalizes it to get the eigenvalues. It will automatically choose\n\t\tthe subsystem with the smaller hilbert space to do the diagonalization in order\n\t\tto reduce the calculation time but will only return the desired reduced density\n\t\tmatrix. \n\t\t\"\"\"\n\t\tN = self.N\n\t\tsps = self.sps\n\n\t\tN_A = len(sub_sys_A)\n\t\tN_B = N - N_A\n\n\t\tproj = self.get_proj(_dtypes[state.dtype.char])\n\t\tstate = state.transpose((2,0,1))\n\n\t\tNs_full = proj.shape[0]\n\t\tn_states = state.shape[0]\n\t\t\n\t\tgen = (proj*s*proj.H for s in state[:])\n\n\t\tproj_state = _np.zeros((n_states,Ns_full,Ns_full),dtype=_dtypes[state.dtype.char])\n\t\t\n\t\tfor i,s in enumerate(gen):\n\t\t\tproj_state[i,...] += s[...]\t\n\n\t\trdm_A,p_A=None,None\n\t\trdm_B,p_B=None,None\n\t\t\n\t\tif return_rdm=='both':\n\t\t\trdm_A,rdm_B = _lattice_partial_trace_mixed(proj_state,sub_sys_A,N,sps,return_rdm=\"both\")\n\t\t\t\n\t\t\tp_A = eigvalsh(rdm_A) + _np.finfo(rdm_A.dtype).eps\n\t\t\tp_B = eigvalsh(rdm_B) + _np.finfo(rdm_B.dtype).eps\n\n\t\telif return_rdm=='A':\n\t\t\trdm_A,rdm_B = _lattice_partial_trace_mixed(proj_state,sub_sys_A,N,sps,return_rdm=\"A\")\n\t\t\tp_A = eigvalsh(rdm_A) + _np.finfo(rdm_A.dtype).eps\n\t\t\t\n\t\telif return_rdm=='B':\n\t\t\trdm_A,rdm_B = _lattice_partial_trace_mixed(proj_state,sub_sys_A,N,sps,return_rdm=\"B\")\n\t\t\tp_B = eigvalsh(rdm_B) + _np.finfo(rdm_B.dtype).eps\n\n\t\telse:\n\t\t\trdm_A,rdm_B = _lattice_partial_trace_mixed(proj_state,sub_sys_A,N,sps,return_rdm=\"A\")\n\t\t\tp_A = eigvalsh(rdm_A) + _np.finfo(rdm_A.dtype).eps\n\t\t\t\n\t\t\t\n\t\treturn p_A, p_B, rdm_A, rdm_B\n\n\tdef _get__str__(self):\n\n\t\tdef get_state(b):\n\t\t\tn_space = len(str(self.sps))\n\t\t\tif self.N <= 64:\n\t\t\t\tbits = (int(b)//int(self.sps**(self.N-i-1))%self.sps for i in range(self.N))\n\t\t\t\tstate = \"|\"+(\" \".join((\"{:\"+str(n_space)+\"d}\").format(bit) for bit in bits))+\">\"\n\t\t\telse:\n\t\t\t\tleft_bits = (int(b)//int(self.sps**(self.N-i-1))%self.sps for i in range(32))\n\t\t\t\tright_bits = (int(b)//int(self.sps**(self.N-i-1))%self.sps for i in range(self.N-32,self.N,1))\n\n\t\t\t\tstr_list = [(\"{:\"+str(n_space)+\"d}\").format(bit) for bit in left_bits]\n\t\t\t\tstr_list.append(\"...\")\n\t\t\t\tstr_list.extend((\"{:\"+str(n_space)+\"d}\").format(bit) for bit in right_bits)\n\t\t\t\tstate = \"|\"+(\" \".join(str_list))+\">\"\n\n\t\t\treturn state\n\n\n\t\ttemp1 = \" {0:\"+str(len(str(self.Ns)))+\"d}. \"\n\t\tif self._Ns > MAXPRINT:\n\t\t\thalf = MAXPRINT // 2\n\t\t\tstr_list = [(temp1.format(i))+get_state(b) for i,b in zip(range(half),self._basis[:half])]\n\t\t\tstr_list.extend([(temp1.format(i))+get_state(b) for i,b in zip(range(self._Ns-half,self._Ns,1),self._basis[-half:])])\n\t\telse:\n\t\t\tstr_list = [(temp1.format(i))+get_state(b) for i,b in enumerate(self._basis)]\n\n\t\treturn tuple(str_list)\n\n\n\n","repo_name":"wenya-r/ED","sub_path":"quspin/basis/lattice.py","file_name":"lattice.py","file_ext":"py","file_size_in_byte":21024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"22676571459","text":"# -*- coding: utf-8 -*-\n\"\"\"\nGulppy Plugin factory\n\"\"\"\nfrom pathlib import Path\nfrom typing import Generator, Callable\nfrom contextlib import contextmanager\nfrom enum import Enum\nfrom gulppy.config import GLPP_LOGGER\nfrom gulppy.core import glpp_exceptions\nfrom gulppy.core.glpp_abstract_plugin import GlppAbstractPlugin\n\n\nclass MutableModeEnum(Enum):\n \"\"\"\n Enumeration for sys.modules context alteration mode\n \"\"\"\n DEFAULT = 1\n \"\"\"\n Use the class specific defined mutable mode. That is the one setted by the class variable IMMUTABLE_SYS_PATH_MODULE\n \"\"\"\n MUTABLE = 2\n \"\"\"\n Use a mutable mode.\n \"\"\"\n IMMUTABLE = 3\n \"\"\"\n Use a immutable mode\n \"\"\"\n\n\n@contextmanager\ndef mutable_context(plugin_cls: GlppAbstractPlugin,\n mutable_mode: MutableModeEnum) -> Generator[str, None, None]:\n \"\"\"\n Create a context using a specific mutable mode\n :param plugin_cls: the plugin class to use in the context\n :param mutable_mode: the mutable mode to activate\n :return:\n \"\"\"\n mutable_default_value = plugin_cls.IMMUTABLE_SYS_PATH_MODULE\n if mutable_mode == MutableModeEnum.DEFAULT:\n pass\n elif mutable_mode == MutableModeEnum.IMMUTABLE:\n plugin_cls.IMMUTABLE_SYS_PATH_MODULE = True\n elif mutable_mode == MutableModeEnum.MUTABLE:\n plugin_cls.IMMUTABLE_SYS_PATH_MODULE = False\n else:\n pass\n yield\n plugin_cls.IMMUTABLE_SYS_PATH_MODULE = mutable_default_value\n\n\nclass GlppPluginFactory(object):\n GLPP_PLUGIN_REGISTRY = {}\n # A plugin factory method\n @classmethod\n def create_plugin(cls,\n plugin_desc: str or Path,\n load: bool = True,\n mutable_mode: MutableModeEnum = MutableModeEnum.DEFAULT) -> GlppAbstractPlugin:\n \"\"\"\n A function to create a plugin from its description file\n :param plugin_desc: plugin description file\n :param load: boolean flag to load modules at creation\n :param mutable_mode: mutable mode to use for the plugin load\n :return: a plugin instance\n \"\"\"\n plugin_mode = GlppAbstractPlugin.get_plugin_mode(plugin_desc)\n try:\n plugin_cls = cls.GLPP_PLUGIN_REGISTRY[plugin_mode]\n except KeyError:\n raise glpp_exceptions.UnknownPluginMode(plugin_mode)\n else:\n with mutable_context(plugin_cls=plugin_cls, mutable_mode=mutable_mode):\n return plugin_cls(plugin_desc=plugin_desc, load=load)\n\n\n @classmethod\n def register(cls, name: str) -> Callable:\n \"\"\"\n Class method to register a plugin class in the factory\n This is aimed to be used as a decorator of plugin classes.\n\n :param name: name of the plugin class\n :return: the plugin class itself\n \"\"\"\n def inner_wrapper(wrapped_class: GlppAbstractPlugin) -> Callable:\n if name in cls.GLPP_PLUGIN_REGISTRY:\n GLPP_LOGGER.warning(\"Plugin class {} is already registered in the factory. \"\n \"It will be overwritten.\".format(name))\n cls.GLPP_PLUGIN_REGISTRY[name] = wrapped_class\n return wrapped_class\n\n return inner_wrapper\n","repo_name":"arnaudkelbert/gulppy","sub_path":"gulppy/core/glpp_plugin_factory.py","file_name":"glpp_plugin_factory.py","file_ext":"py","file_size_in_byte":3230,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"}
+{"seq_id":"30625260533","text":"class Duck():\n\tdef __init__(self, input_name):\n\t\tself.hidden_name = input_name\n\t@property # attention the key word\n\tdef name(self):\n\t\tprint('inside the getter')\n\t\treturn self.hidden_name\n\n\t# attention the key word and the line 16 ~ 20\n\t@name.setter\n\tdef name(self, input_name):\n\t\tprint('inside the setter')\n\t\tself.hidden_name = input_name\n\nif __name__ == '__main__':\n\tfowl = Duck('Howard')\n\tprint('fowl.name = ', fowl.name)\n\tfowl.name = 'Donald'\n\tprint('fowl.name = ', fowl.name)\n\tprint('fowl.hidden_name = ', fowl.hidden_name)\n","repo_name":"WeigangZhu/Python","sub_path":"introduction_python/property_setter/duck_property.py","file_name":"duck_property.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"41686271607","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Apr 16 17:29:34 2021\r\n\r\n@author: saman\r\n\"\"\"\r\nfrom All_files import *\r\n\r\n\r\n\r\nside_bar=st.beta_container()\r\n\r\n\r\nwith side_bar:\r\n st.sidebar.image(\"images/Logo_v.png\", width=250,)\r\n st.sidebar.title(\"Twitter Analytics option\") \r\n user_box=st.sidebar.checkbox(\"User Analysis\")\r\n extract_box = st.sidebar.checkbox(\"Extract Tweets\") \r\n analyse_box = st.sidebar.checkbox(\"Analyse Custom Query\")\r\n if user_box:\r\n user_box=True\r\n if user_box:\r\n menu = [\"Stephen King\", \"Kai\", \"Sign Up\", \"Learn\"]\r\n choice = st.sidebar.selectbox(\"Menu\", menu)\r\n \r\n if choice == \"Stephen King\":\r\n user_df = pd.read_csv(\"StephenKing.csv\" )\r\n st.write(\"The intial dataset\")\r\n st.dataframe(user_df)\r\n # acquires both tweet polarity and subjectivity\r\n user_df['sentiment'] = user_df['tweet'].apply(lambda tweet: TextBlob(tweet).sentiment)\r\n # only polarity\r\n user_df['polarity_score'] = user_df['tweet'].apply(lambda tweet: TextBlob(tweet).sentiment.polarity)\r\n user_df['polarity'] = user_df['polarity_score'].apply(lambda x: 'positive' if x > 0 else ('negative' if x < 0 else 'neutral'))\r\n user_df.polarity.value_counts().plot(kind=\"pie\",\r\n autopct='%1.1f%%',\r\n labels=None,\r\n pctdistance=1.12,\r\n colors=[\"limegreen\", \"red\", \"gray\"]) \r\n su=plt.axis('equal')\r\n su=plt.title(\"Franction of each sentiment in random tweets\")\r\n su=plt.legend(labels=user_df.polarity.value_counts().index, loc=\"upper left\")\r\n st.write(su.figure)\r\n \r\n tweet_dataset = user_df.copy()\r\n #Removing non-ascii characters (for example, arabian chars)\r\n user_df['tweet'].replace({r'[^\\x00-\\x7F]+':''}, regex=True, inplace=True)\r\n #Making all fields string type\r\n for i in range(len(user_df)): \r\n user_df.at[i,'tweet'] = str(user_df.iloc[i]['tweet'])\r\n for i in range(len(user_df)): \r\n user_df.at[i,'tweet'] =remove_urls(user_df.iloc[i]['tweet'])\r\n # Convert to list\r\n datal= user_df['tweet'].values.tolist()\r\n datal = [re.sub('\\S*@\\S*\\s?', '', sent) for sent in datal]\r\n datal = [re.sub('\\s+', ' ', sent) for sent in datal]\r\n datal = [re.sub(\"\\'\", \"\", sent) for sent in datal]\r\n data_s = np.array(datal)\r\n tokenizer = Tokenizer(num_words=max_words)\r\n test_sequence = tokenizer.texts_to_sequences(data_s)\r\n test_sequence = pad_sequences(test_sequence, maxlen=2500)\r\n test_prediction = model.predict(test_sequence)\r\n np.around(test_prediction, decimals=0)\r\n \r\n tweet_dataset['label'] = np.around(test_prediction, decimals=0)\r\n tweet_dataset[tweet_dataset['label']==1.0].head(10) \r\n \r\n for i in range(10):\r\n st.write(tweet_dataset.iloc[i*2]['text']) \r\n st.write('\\n')\r\n \r\n \r\n if analyse_box:\r\n st.sidebar.title(\"Twitter Analysis Input Form\")\r\n dataset_file = st.sidebar.file_uploader(\r\n \"Upload Tweet Dataset\", type=[\"csv\"]\r\n )\r\n \r\n\r\n \r\n \r\n analyse_button = st.sidebar.button(\"Start Analysis\")\r\n if analyse_button: \r\n tweet_df = read_tweets_csv(dataset_file)\r\n st.write(\"The intial dataset\")\r\n st.dataframe(tweet_df)\r\n # acquires both tweet polarity and subjectivity\r\n tweet_df['sentiment'] = tweet_df['text'].apply(lambda tweet: TextBlob(tweet).sentiment)\r\n # only polarity\r\n tweet_df['polarity_score'] = tweet_df['text'].apply(lambda tweet: TextBlob(tweet).sentiment.polarity)\r\n tweet_df['polarity'] = tweet_df['polarity_score'].apply(lambda x: 'positive' if x > 0 else ('negative' if x < 0 else 'neutral'))\r\n \r\n \r\n \r\n tweet_df.polarity.value_counts().plot(kind=\"pie\",\r\n autopct='%1.1f%%',\r\n labels=None,\r\n pctdistance=1.12,\r\n colors=[\"limegreen\", \"red\", \"gray\"])\r\n su=plt.axis('equal')\r\n su=plt.title(\"Franction of each sentiment in random tweets\")\r\n su=plt.legend(labels=tweet_df.polarity.value_counts().index, loc=\"upper left\")\r\n st.write(su.figure)\r\n \r\n tweets_dataset = tweet_df.copy()\r\n \r\n #Removing non-ascii characters (for example, arabian chars)\r\n tweet_df['text'].replace({r'[^\\x00-\\x7F]+':''}, regex=True, inplace=True)\r\n #Making all fields string type\r\n for i in range(len(tweet_df)): \r\n tweet_df.at[i,'text'] = str(tweet_df.iloc[i]['text'])\r\n for i in range(len(tweet_df)): \r\n tweet_df.at[i,'text'] = remove_urls(tweet_df.iloc[i]['text'])\r\n # Convert to list\r\n dataf= tweet_df['text'].values.tolist()\r\n dataf = [re.sub('\\S*@\\S*\\s?', '', sent) for sent in dataf]\r\n dataf = [re.sub('\\s+', ' ', sent) for sent in dataf]\r\n dataf = [re.sub(\"\\'\", \"\", sent) for sent in dataf]\r\n data_t = np.array(dataf)\r\n\r\n \r\n tokenizer = Tokenizer(num_words=max_words)\r\n test_sequence = tokenizer.texts_to_sequences(data_t)\r\n test_sequence = pad_sequences(test_sequence, maxlen=2500)\r\n test_prediction = model.predict(test_sequence)\r\n np.around(test_prediction, decimals=0)\r\n \r\n tweets_dataset['label'] = np.around(test_prediction, decimals=0)\r\n tweets_dataset[tweets_dataset['label']==1.0].head(10)\r\n \r\n for i in range(10):\r\n st.write(tweets_dataset.iloc[i*2]['text']) \r\n st.write('\\n')\r\n \r\n","repo_name":"samanabatool96/machine_learning_mental_health","sub_path":"dep_app.py","file_name":"dep_app.py","file_ext":"py","file_size_in_byte":6143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"16365293625","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('content', '0032_auto_20160808_1203'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='ideasurvey',\n name='answer_mode',\n field=models.SmallIntegerField(default=0, verbose_name='Vastaamisen asetukset', choices=[(0, 'Yksi vastaus osallistujaa kohden'), (1, 'Rajattomat vastaukset kirjautumattomille k\\xe4ytt\\xe4jille')]),\n ),\n ]\n","repo_name":"oikeusministerio/nuortenideat_django","sub_path":"content/migrations/0033_ideasurvey_answer_mode.py","file_name":"0033_ideasurvey_answer_mode.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"fi","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"4000178695","text":"import subprocess\nimport platform\nimport chardet\n\n\nstr_lst = {'разработка', 'сокет', 'декоратор'}\n\nfor _ in str_lst:\n b_str = _.encode()\n print(type(b_str), b_str)\n\nstr_lst = {b'class', b'function', b'method'}\n\nfor _ in str_lst:\n print(type(_), _, len(_))\n\n\nstr_lst = {'attribute', 'класс', 'функция', 'type'}\nfor _ in str_lst:\n try:\n print(_.encode())\n except:\n pass\n\n\nhostnames = {\"yandex.ru\", \"youtube.com\"}\nfor _ in hostnames:\n cmd = \"ping -{} 1 {}\".format('n' if platform.system().lower()==\"windows\" else 'c', _)\n output = subprocess.check_output(cmd, shell=True)\n print(output.decode('cp866'))\n\n\ntmp_file = 'test_file.txt'\nwith open(tmp_file, 'w+') as f:\n f.write('сетевое программирование\\nсокет\\nдекоратор')\nwith open(tmp_file, 'rb') as f:\n print(chardet.detect(f.read()))\nwith open(tmp_file, 'r', encoding='utf-8') as f:\n print(f.read())\n","repo_name":"delovoy70/gkbrns","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"69916687764","text":"# -*- coding: utf-8 -*-\nimport logging\nfrom typing import Any, Dict, Mapping, Optional\n\nimport arrow\nfrom bring.pkg import PkgTing\nfrom bring.pkg_index.config import IndexConfig\nfrom bring.pkg_index.index import BringIndexTing\nfrom bring.pkg_index.utils import retrieve_index_file_content\nfrom frkl.common.exceptions import FrklException\nfrom frkl.tasks.task import SingleTaskAsync, Task\nfrom frkl.tasks.task_desc import TaskDesc\nfrom tings.ting import TingMeta\n\n\nlog = logging.getLogger(\"bring\")\n\n\nclass BringIndexFile(object):\n def __init__(self, index_file: str):\n\n self._index_file: str = index_file\n\n self._pkg_data: Optional[Mapping[str, Mapping[str, Any]]] = None\n self._metadata: Optional[Mapping[str, Any]] = None\n\n async def get_metadata(self, update_index_file: bool = False) -> Mapping[str, Any]:\n\n if self._metadata is None:\n await self.get_pkg_data(update_index_file=update_index_file)\n return self._metadata # type: ignore\n\n async def update(self):\n\n self._pkg_data = None\n self._metadata = None\n\n await self.get_pkg_data(update_index_file=True)\n\n async def get_pkg_data(\n self, update_index_file: bool = False\n ) -> Mapping[str, Mapping[str, Any]]:\n\n if self._pkg_data is not None:\n return self._pkg_data\n\n pkgs: Dict[str, Mapping[str, Any]] = {}\n\n data: Mapping[str, Any] = await retrieve_index_file_content(\n self._index_file, update=update_index_file\n )\n\n index_metadata: Dict[str, Any] = {}\n\n for pkg_name, pkg_data in data.items():\n\n if pkg_name.startswith(\"_bring_\"):\n\n if pkg_name == \"_bring_metadata_timestamp\":\n try:\n pkg_data = arrow.get(pkg_data)\n index_metadata[pkg_name] = pkg_data\n except Exception as e:\n log.debug(f\"Can't parse date '{pkg_data}', ignoring: {e}\")\n else:\n index_metadata[pkg_name] = pkg_data\n\n continue\n\n pkgs[pkg_name] = pkg_data\n\n self._pkg_data = pkgs\n self._metadata = index_metadata\n return self._pkg_data\n\n async def create_ting(self, index: BringIndexTing, pkg_name: str) -> PkgTing:\n\n pkgs = await self.get_pkg_data()\n pkg_data = pkgs.get(pkg_name, None)\n\n if pkg_data is None:\n raise FrklException(\n msg=f\"Can't create ting '{pkg_name}'.\",\n reason=\"No package with that name available.\",\n )\n\n ting: PkgTing = index.tingistry.get_ting( # type: ignore\n f\"{index.full_name}.pkgs.{pkg_name}\"\n )\n if ting is None:\n ting = index.tingistry.create_ting( # type: ignore\n \"bring.types.static_pkg\",\n f\"{index.full_name}.pkgs.{pkg_name}\", # type: ignore\n )\n # ting.bring_index = index\n\n ting.set_input(**pkg_data)\n # ting._set_result(data)\n return ting\n\n async def create_tings(self, index: BringIndexTing) -> Mapping[str, PkgTing]:\n\n pkgs = await self.get_pkg_data()\n\n result: Dict[str, PkgTing] = {}\n for pkg_name in pkgs.keys():\n result[pkg_name] = await self.create_ting(index=index, pkg_name=pkg_name)\n\n return result\n\n\nclass BringStaticIndexTing(BringIndexTing):\n def __init__(self, name: str, meta: TingMeta):\n self._uri: Optional[str] = None\n self._index_file: Optional[BringIndexFile] = None\n self._pkgs: Optional[Mapping[str, PkgTing]] = None\n\n super().__init__(name=name, meta=meta)\n\n def _invalidate(self) -> None:\n\n self._pkgs = None\n self._index_file = None\n\n async def _get_metadata_timestamp(self) -> Optional[str]:\n\n index_file = await self.get_index_file()\n metadata = await index_file.get_metadata()\n ts = metadata.get(\"_bring_metadata_timestamp\", None)\n if ts:\n ts = str(ts)\n return ts\n\n async def get_index_file(self, update: bool = False) -> BringIndexFile:\n if self._index_file is None or update:\n if self._uri is None:\n raise Exception(\n \"Can't load packages: index uri not set. This is a bug.\"\n )\n\n if self._index_file is None:\n self._index_file = BringIndexFile(index_file=self._uri)\n if update:\n await self._index_file.update()\n return self._index_file\n\n async def _get_pkgs(self) -> Mapping[str, PkgTing]:\n\n if self._pkgs is None:\n index_file = await self.get_index_file()\n self._pkgs = await index_file.create_tings(self)\n return self._pkgs\n\n async def _create_update_tasks(self) -> Optional[Task]:\n\n task_desc = TaskDesc(\n name=f\"metadata update {self.name}\",\n msg=f\"updating metadata for index '{self.name}'\",\n )\n\n async def update_index():\n self.invalidate()\n await self.get_index_file(update=True)\n\n task = SingleTaskAsync(update_index, task_desc=task_desc, parent_task=None)\n\n return task\n\n async def init(self, config: IndexConfig) -> None:\n\n self._uri = config.index_file\n self.invalidate()\n\n async def get_uri(self) -> str:\n\n if self._uri is None:\n raise FrklException(\n \"Can't retrieve uri for index.\", reason=\"Index not initialized yet.\"\n )\n return self._uri\n","repo_name":"makkus/bring","sub_path":"src/bring/pkg_index/static_index.py","file_name":"static_index.py","file_ext":"py","file_size_in_byte":5565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"710956897","text":"from typing import List\nimport unittest\n\nimport numpy as np\n\nfrom .morse_state_size import MorseCodeStateSize\nfrom .morse_code_states import MorseCodeStates\n\nACTIVE = MorseCodeStates.ACTIVE\nINACTIVE = MorseCodeStates.INACTIVE\n\ndef cap_sizes(sizes):\n LARGEST_UNIT = 7\n smallest_number = np.min(sizes)\n largest_allowed_size = smallest_number * LARGEST_UNIT\n capped_sizes =[]\n for size in sizes:\n if size > largest_allowed_size:\n size = largest_allowed_size\n capped_sizes.append(size)\n \n return capped_sizes\n\n# Extends \"list\" so we have more control of internal data\nclass MorseCodeStateSizes(list):\n sizes = None\n state_sizes_by_state = None\n\n # if state_sizes is provided, then a deep copy is made\n def __init__(self, state_sizes: List[MorseCodeStateSize] = None) -> None:\n self.sizes = []\n self.state_sizes_by_state = {\n ACTIVE: [],\n INACTIVE: []\n }\n if state_sizes is not None:\n self.copy_from(state_sizes)\n\n def copy_from(self, state_sizes):\n for state_size in state_sizes:\n self.append(state_size.copy())\n \n def append(self, state_size: MorseCodeStateSize) -> None:\n size = state_size.size\n self.sizes.append(size)\n\n state = state_size.state\n self.state_sizes_by_state[state].append(state_size)\n\n return super().append(state_size)\n \n def normalize(self):\n stats_by_state = {}\n for state in self.state_sizes_by_state:\n state_sizes = self.state_sizes_by_state[state]\n sizes = [state_size.size for state_size in state_sizes]\n if len(sizes):\n std = np.std(sizes)\n stats_by_state[state] = {\n 'smallest': np.min(sizes),\n 'sizes': cap_sizes(sizes),\n 'middle': np.median(sizes),\n 'std': std,\n 'half_std': std / 2\n }\n\n normalized_state_sizes = MorseCodeStateSizes()\n for state_size in self:\n state = state_size.state\n size = state_size.size\n value = state_size.value\n\n stats = stats_by_state[state]\n\n if size < (stats['smallest'] * 1.5):\n size = 1\n elif size < (stats['smallest'] * 4):\n size = 3\n else:\n size = 7\n normalized_state_sizes.append(MorseCodeStateSize(value, size))\n\n return normalized_state_sizes\n\n def __repr__(self) -> str:\n output = []\n for state_size in self:\n output.append(str(state_size))\n \n return ', '.join(output)\n\n @classmethod\n def deserialize(self, serialized_str: str):\n state_sizes = MorseCodeStateSizes()\n for _str in serialized_str.split(', '):\n state, size = _str.split(' ')\n state_size = MorseCodeStateSize(int(state), float(size))\n state_sizes.append(state_size)\n \n return state_sizes\n\n\n\nclass TestMorseCodeStateSizes(unittest.TestCase):\n def test_init(self):\n a = MorseCodeStateSizes()\n self.assertIsInstance(a, list)\n self.assertEqual(len(a), 0)\n\n def test_init_copy_with_value(self):\n a = MorseCodeStateSizes()\n state_size1 = MorseCodeStateSize(1, 5)\n a.append(state_size1)\n\n # Copy of list\n b = MorseCodeStateSizes(a)\n\n self.assertEqual(len(a), 1)\n self.assertEqual(len(b), 1)\n self.assertEqual(a, b)\n self.assertIsNot(a, b)\n\n def test_init_copy_without_value(self):\n a = MorseCodeStateSizes()\n state_size1 = MorseCodeStateSize(1, 5)\n\n # Copy of list\n b = MorseCodeStateSizes(a)\n\n # Append after copying\n a.append(state_size1)\n\n self.assertEqual(len(a), 1)\n self.assertEqual(len(b), 0)\n self.assertNotEqual(a, b)\n\n def test_init_copy_with_value_mutation(self):\n a = MorseCodeStateSizes()\n state_size1 = MorseCodeStateSize(1, 5)\n a.append(state_size1)\n\n # Copy of list\n b = MorseCodeStateSizes(a)\n\n state_size1.size = 2\n\n # Mutating item from first does not change item from second\n self.assertEqual(a[0], state_size1)\n\n self.assertEqual(len(a), 1)\n self.assertEqual(len(b), 1)\n self.assertNotEqual(a[0], b[0])\n self.assertNotEqual(a, b)\n self.assertIsNot(a, b)\n \n def test_get_sizes(self):\n a = MorseCodeStateSizes()\n a.append(MorseCodeStateSize(1, 5))\n a.append(MorseCodeStateSize(2, 4))\n\n self.assertEqual(a.sizes, [5, 4])\n\n def helper_sizes_to_state_sizes(self, sizes: List[float]):\n state_sizes = MorseCodeStateSizes()\n active = 0\n for size in sizes:\n state_sizes.append(MorseCodeStateSize(active, size))\n active = abs(active - 1)\n \n return state_sizes\n\n def test_normalize_sizes(self):\n raw = self.helper_sizes_to_state_sizes([0.2, 0.9, 0.6])\n expected_normalized = self.helper_sizes_to_state_sizes([1, 1, 3])\n normalized = raw.normalize()\n self.assertEqual(normalized, expected_normalized)\n \n raw = self.helper_sizes_to_state_sizes([0.3, 0.7, 1.2])\n expected_normalized = self.helper_sizes_to_state_sizes([1, 1, 7])\n normalized = raw.normalize()\n self.assertEqual(normalized, expected_normalized)\n \n def test_normalize_sizes_extreme(self):\n # Last value should not skew smaller numbers\n raw = self.helper_sizes_to_state_sizes([1, 3, 3, 5, 200])\n expected_normalized = self.helper_sizes_to_state_sizes([1, 1, 3, 3, 7])\n normalized = raw.normalize()\n self.assertEqual(normalized, expected_normalized)\n \n def test_deserialize(self):\n serialized_text = '0 1, 1 3, 0 3'\n _sz = MorseCodeStateSize\n expected_output = MorseCodeStateSizes([_sz(0, 1), _sz(1, 3), _sz(0, 3)])\n\n state_sizes = MorseCodeStateSizes.deserialize(serialized_text)\n\n self.assertEqual(state_sizes, expected_output)\n","repo_name":"kjprice/pi-robot","sub_path":"python/modules/morse_code/morse_state_sizes.py","file_name":"morse_state_sizes.py","file_ext":"py","file_size_in_byte":6144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"34331469975","text":"from __future__ import unicode_literals\n\nimport pprint\nimport re\nimport string\nfrom typing import List\n\nfrom textx import *\n\nfrom SFX.SFXParsingRules import *\n\n\nclass SFXTranslator():\n\n def __init__(self) -> None:\n self.mm = metamodel_from_file('SFX/SignalFX.tx', \\\n classes=[Filter,FilterFunction,DataFunction,Expression, Term, \\\n Factor,SFXExpression,SingleInput,SFXFunction,SFXId,\\\n FilteringQuery, PlainTextQuery, UnSupportedSFXFunctions, RefrencedQuery],use_regexp_group=True)\n \n def cname(self,o):\n return o.__class__.__name__\n \n\n def get_stream_variables(self,obj, stream_vars:List):\n if obj and hasattr(obj, 'stream_variable_name') and obj.stream_variable_name:\n stream_vars.append(obj.stream_variable_name)\n elif obj and hasattr(obj, 'factors') and obj.factors:\n for factor in obj.factors:\n self.get_stream_variables(factor,stream_vars)\n elif obj and hasattr(obj, 'terms') and obj.terms:\n for term in obj.terms:\n self.get_stream_variables(term,stream_vars)\n elif obj and hasattr(obj, 'expr') and obj.expr:\n self.get_stream_variables(obj.expr,stream_vars)\n else:\n return\n\n\n def translate(self, current_logger, model_str, sumoLogicFiltersInjections:List, query_duration='1h'):\n SumoLogicDashboardDicts.reset_stream_var_labels_lookup()\n sumo_queries = {}\n model_str1 = re.sub(\"\\\\n+\", r\"\\n\", model_str)\n model_str2 = re.sub(\"\\s*(?:\\\\n|\\n)+\\s*(and|not|filter|\\()\", r\" \\1\", model_str1)\n model_strs = model_str2.split('\\n')\n refrenced_stream_variables={}\n sfx_inputs=[]\n \n for mdl_str in model_strs:\n if not mdl_str:\n continue\n model = self.mm.model_from_str(mdl_str.strip())\n sfx_inputs += model.inputs\n \n for single_input in sfx_inputs:\n input_type_name = self.cname(single_input.input_type)\n stream_result_variable_name = ''\n if single_input and single_input.input_type:\n publish_options = single_input.get_publish_options()\n stream_result_variable_name = single_input.input_type.stream_result_variable_name\n if not stream_result_variable_name:\n stream_result_variable_name = publish_options['label']\n stream_result_variable_name = SumoLogicDashboardDicts.get_stream_var_labels_mapping(stream_result_variable_name, generate_key_if_none=True) if stream_result_variable_name not in SumoLogicDashboardDicts.stream_var_labels_mapping.keys() else stream_result_variable_name\n else:\n stream_result_variable_name = stream_result_variable_name.name\n \n refrenced_stream_variables[stream_result_variable_name]=[]\n main_query = None\n expression_labels = None\n\n if isinstance(single_input.input_type.get_sumo_query(sumoLogicFiltersInjections=sumoLogicFiltersInjections), tuple):\n main_query = single_input.input_type.get_sumo_query(sumoLogicFiltersInjections=sumoLogicFiltersInjections)[0]\n if main_query and len(main_query) > 2750:\n main_query = single_input.input_type.get_sumo_query( generate_fuzzy_filters=False, sumoLogicFiltersInjections=sumoLogicFiltersInjections)\n hashed_expression_labels = single_input.input_type.get_sumo_query(sumoLogicFiltersInjections=sumoLogicFiltersInjections)[1]\n expression_labels = [hashed_expression_label.replace(\"#\",\"\") for hashed_expression_label in hashed_expression_labels if hashed_expression_label]\n else:\n main_query = single_input.input_type.get_sumo_query(sumoLogicFiltersInjections=sumoLogicFiltersInjections)\n if main_query and len(main_query) > 2750:\n main_query = single_input.input_type.get_sumo_query( generate_fuzzy_filters=False, sumoLogicFiltersInjections = sumoLogicFiltersInjections)\n\n\n query_tail = single_input.get_query_tail(query_duration=query_duration)\n final_query = f\"{main_query} {query_tail}\"\n sumo_queries[stream_result_variable_name] = {\"expression_labels\": expression_labels,\"input_type_name\": input_type_name, \"query\": final_query, \"bys\": single_input.get_grp_bys(), \"publish\": publish_options, \"aggregationType\": single_input.get_aggregation_type()}\n \n if single_input.input_type and self.cname(single_input.input_type)==\"sfx_expression\":\n for expr in single_input.input_type.exprs:\n self.get_stream_variables(expr,refrenced_stream_variables[stream_result_variable_name])\n \n for stream_result_variable_name, sumo_query in sumo_queries.items():\n current_group_bys = []\n if sumo_query[\"expression_labels\"]:\n for label in sumo_query[\"expression_labels\"]:\n if sumo_queries[label][\"bys\"]:\n current_group_bys.append(sumo_queries[label][\"bys\"])\n if \"along\" in sumo_queries[label][\"query\"]:\n along_value_search = re.search('along\\s(?P\\S+)', sumo_queries[label][\"query\"])\n if along_value_search:\n along_value = along_value_search.group('along_value')\n if along_value:\n current_group_bys.append(along_value)\n\n current_group_bys = flatten(current_group_bys)\n current_group_bys = list(set(current_group_bys))\n current_group_bys_stmt = f\" along {','.join(current_group_bys)} \" if current_group_bys and len(current_group_bys) > 0 else \"\"\n \n sumo_queries[stream_result_variable_name][\"query\"]= sumo_query[\"query\"].replace(\"___ALONG___BYS___\", f\"{current_group_bys_stmt}\")\n\n\n return sumo_queries\n \n ","repo_name":"SumoLogic-Labs/contents-migration-tools","sub_path":"SFXSumoTranspiler/SFX/SFXQueryTransPiler.py","file_name":"SFXQueryTransPiler.py","file_ext":"py","file_size_in_byte":6147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"36924191425","text":"from typing import List\n\n\nclass TwoSum:\n def two_sum(self, nums: List[int], target: int) -> List[int]:\n prevMap = {}\n\n for i, num in enumerate(nums):\n diff = target - num\n if diff in prevMap:\n return [prevMap[diff], i]\n\n prevMap[num] = i\n","repo_name":"brutalv4/leetcode-problems","sub_path":"solutions/two_sum.py","file_name":"two_sum.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"9496615343","text":"fail1 = input('Lähtefaili nimi: ')\nfail2 = input('Sihtfaili nimi: ')\nf = open(fail1)\nfaili_sisu = f.read()\nkogus = faili_sisu.count('Hello')\ntõlgitud = faili_sisu.replace('Hello', 'Tere')\nprint('Tehti ' + str(kogus) + ' asendamist. \\n')\nf.close()\nf1 = open(fail2, 'w')\nf1.write(tõlgitud)\nf1.close()\nprint('Faili ' + str(fail2) + ' sisu: \\n')\nprint(tõlgitud)","repo_name":"ArR4e/DSProject","sub_path":"processed/K02/S089/kodu4.py","file_name":"kodu4.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"et","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"1043319928","text":"f = open('data.txt', 'w') # utworzenie nowego pliku w trybie do zapisu\nf.write('Witaj,\\n') # Zapisanie ciągu znaków w pliku (\"\\n\" zaczyna od nowej linijki tekstu)\nf.write('Brian\\n')\nf.close() # zapisanie bufora wyjściowego na dysku i zamknięcie pliku\nf = open('data.txt') # tryb do odczytu ('r') jest domyślym trybem przetwarzania plików\ntext = f.read() # załadownie całego pliku do obiektu testowego\nprint(text) # drukuje zawartość pliku\nprint(text.split()) # zawartość pliku jest zawsze łańcuchem znaków\nprint(dir(f)) # pokazuje wszystkie dostępne metody dla pliku\nprint(help(f.seek)) # funkcja help dla konkretnej metody pokazuję jej opis","repo_name":"MarPaw3/Python.-Wprowadzenie","sub_path":"4. Typy obiektów/Pliki.py","file_name":"Pliki.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"30175661997","text":"from dataclasses import dataclass\n\n\nclass Singleton(type):\n _instances = {}\n\n def __call__(cls, *args, **kwargs):\n if not cls._instances:\n cls._instances[cls] = super().__call__(*args, **kwargs)\n return cls._instances[cls]\n\n\n@dataclass\nclass AppSettings(metaclass=Singleton):\n tema: str = 'dark'\n size: str = '18px'\n\n\nif __name__ == '__main__':\n as1 = AppSettings()\n as1.tema = 'bright' # Set as bright.\n\n print(as1.tema)\n as2 = AppSettings() # When initialized, NOT set as default dark. Keeps bright.\n print(as2.tema)\n\n # Obj are the same:\n print(id(as1))\n print(id(as2))\n print(as1 == as2)\n\n # All obj receives new attrs:\n as1.nome = 'Dio'\n print(as2.nome)\n\n","repo_name":"diogenesdornelles/udemy-otavio-miranda-python","sub_path":"Section 16 - Design Patterns/Singleton/Singleton_3.py","file_name":"Singleton_3.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"4848416394","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # AGE DETECTION SYSTEM USING CNN ON IMAGES\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np\nimport tensorflow as tf\nimport glob\nimport os\nimport seaborn as sns \nfrom matplotlib import pyplot as plt\nfrom sklearn import metrics\nfrom sklearn.metrics import confusion_matrix\n\n\n# In[2]:\n\n\ndata = pd.read_csv(\"train.csv\")\ndata.head()\n\n\n# In[4]:\n\n\ndata['Class'].replace(['YOUNG','MIDDLE','OLD'],[0,1,2],inplace=True)\ndata.head(10)\n\n\n# In[5]:\n\n\ndef readImage(path,ch = 3, resize=(150,150)):\n\tdi = tf.io.read_file(path)\n\tdi = tf.image.decode_jpeg(di, channels=ch)\n\tdi = tf.image.convert_image_dtype(di, dtype=tf.float32)\n\tdi = tf.image.resize(di, resize)\n\treturn di\n\n\n# In[6]:\n\n\ndef load_data(image_path, label):\n img = readImage(image_path, 3, (150,150))\n return (img, label)\n\n\n# In[8]:\n\n\nPATH = \"Train\"\nimage_paths = []\nfor path in os.listdir(PATH):\n image_paths.append(PATH+\"/\"+path)\nprint(len(image_paths))\n\nresponse_list = []\n\nfor i in image_paths:\n _,tail = os.path.split(i)\n response = data.loc[data['ID'] == tail]['Class'].values[0]\n response_list.append(response)\nprint(len(response_list))\n\n\n# In[9]:\n\n\ntrain_size = int(0.9*(len(image_paths)))\nprint(train_size)\ntest_size = int(0.1*(len(image_paths)))\n\ntrain_set = tf.data.Dataset.from_tensor_slices((image_paths[:train_size], response_list[:train_size]))\ntest_set = tf.data.Dataset.from_tensor_slices((image_paths[test_size:], response_list[test_size:]))\n\n\n# In[10]:\n\n\ntrain_set = (train_set\n .map(load_data, num_parallel_calls=tf.data.AUTOTUNE)\n .batch(64)\n .prefetch(tf.data.AUTOTUNE)\n)\n\n\n# In[11]:\n\n\ntest_set = (test_set\n .map(load_data, num_parallel_calls=tf.data.AUTOTUNE)\n .batch(64)\n .prefetch(tf.data.AUTOTUNE)\n)\n\n\n# In[12]:\n\n\nfrom tensorflow.keras import layers,models\n\ncnn_model = models.Sequential([\n layers.Conv2D(filters=30, kernel_size=(3, 3), activation='relu', input_shape=(150, 150, 3), padding = 'same'),\n layers.MaxPooling2D((2, 2)),\n \n # layers.BatchNormalization(),\n\n layers.Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding = 'same'),\n layers.MaxPooling2D((2, 2)),\n\n # layers.BatchNormalization(),\n \n # layers.Conv2D(filters=128, kernel_size=(3, 3), activation='relu', padding = 'same'),\n # layers.MaxPooling2D((2, 2)),\n \n layers.Flatten(),\n layers.Dense(64, activation='relu'),\n # layers.Dropout(0.25),\n layers.Dense(3, activation='softmax')\n])\n\n\n# In[13]:\n\n\ncnn_model.summary()\n\n\n# In[14]:\n\n\ncnn_model.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\n\n# In[15]:\n\n\ncnn_model.fit(train_set, epochs=10, validation_data=test_set)\n\n\n# In[16]:\n\n\ncnn_model.evaluate(train_set)\n\n\n# In[17]:\n\n\ncnn_model.evaluate(test_set)\n\n\n# In[18]:\n\n\ntest_pred = cnn_model.predict(test_set)\n\n\n# In[19]:\n\n\ny_labels = [np.argmax(item) for item in test_pred]\nprint(\"Test Predictions response sample:\",y_labels[:10])\n\ntest_response = response_list[test_size:]\nprint(\"Test True response sample:\", test_response[:10])\n\n\n# In[20]:\n\n\nclass_names = ['YOUNG','MIDDLE','OLD']\n\n\n# In[21]:\n\n\ndef plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n# print(cm)\n\n fig, ax = plt.subplots(figsize=(7,7))\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n return ax\n\n\nnp.set_printoptions(precision=2)\n\n\n# In[22]:\n\n\nplot_confusion_matrix(y_labels, test_response, classes=class_names,\n title='Confusion matrix, without normalization')\n\n\n# In[23]:\n\n\nplot_confusion_matrix(y_labels, test_response, classes=class_names, normalize=True,\n title='Normalized confusion matrix')\n\n","repo_name":"GhostByteX/age-classification-CNN","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"11120068000","text":"class Solution:\n def deleteDuplicates(self, head):\n p = head\n while p and p.next:\n if p.val == p.next.val:\n p.next = p.next.next\n else:\n p = p.next\n return head\n\n\nif __name__ == '__main__':\n import Test\n from ListBuilder import build\n\n Test.test(Solution().deleteDuplicates, [\n (build(1), build(1)),\n (build(1, 1, 2), build(1, 2)),\n (build(1, 1, 2, 3, 3), build(1, 2, 3)),\n (build(1, 1, 1), build(1)),\n ])\n","repo_name":"papalagichen/leet-code","sub_path":"0083 - Remove Duplicates from Sorted List.py","file_name":"0083 - Remove Duplicates from Sorted List.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"39398273364","text":"from django import forms\nfrom .models import Ticket, Review\n\n\nRATING_CHOICES = [\n ('1', '1'),\n ('2', '2'),\n ('3', '3'),\n ('4', '4'),\n ('5', '5'),\n]\n\n\nclass TicketForm(forms.ModelForm):\n \"\"\"Form to create a ticket.\"\"\"\n\n class Meta:\n model = Ticket\n fields = ['title', 'description', 'image']\n\n\nclass ReviewForm(forms.ModelForm):\n \"\"\"Form to create a review.\"\"\"\n\n rating = forms.TypedChoiceField(\n widget=forms.RadioSelect, choices=RATING_CHOICES, coerce=int\n )\n\n class Meta:\n model = Review\n fields = ('headline', 'rating', 'body')\n","repo_name":"BastienDslnds/oc_python_projet_9","sub_path":"litreview/review/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"9508540113","text":"f = open(\"taksohinnad.txt\", encoding = \"utf-8\")\n\nteepikkus = float(input(\"Sisesta tee pikkus kilomeetrites: \"))\nkäesolev_hind = 0\nodavaim_hind = 0\nodavaim = \"\"\n\nfor rida in f:\n rida_j = rida.strip().split(\",\")\n käesolev_hind = float(rida_j[1]) + float(rida_j[2]) * teepikkus\n if odavaim_hind == 0:\n odavaim = rida_j[0]\n odavaim_hind = käesolev_hind\n elif käesolev_hind < odavaim_hind:\n odavaim = rida_j[0]\n odavaim_hind = käesolev_hind\n\nif odavaim == \"\":\n print(\"Taksod puuduvad.\")\nelse:\n print(\"Kõige odavam on \" + odavaim + \".\")\n\nf.close()","repo_name":"ArR4e/DSProject","sub_path":"processed/K07/S188/kodu2.py","file_name":"kodu2.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"et","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"1026348630","text":"#!/usr/bin/env python3\n'''dS378 client'''\nimport sys\n\nfrom ocs.matched_client import MatchedClient\n\ndef usage():\n print('usage: pcr_client.py', file=sys.stderr)\n\ndef main():\n '''PCR client'''\n pcr_client = MatchedClient('stm-heater-source', args=[])\n\n pcr_client.set_volt_ac(volt_set=0)\n pcr_client.set_output(output=False)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"dixilo/pcr500ma","sub_path":"pcr_client.py","file_name":"pcr_client.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"72474292245","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nN = 64\nk0 = 7\nx = np.exp(1j * 2 * np.pi * k0 / N * np.arange(N))\n\n#empty array\nX = np.array([])\n\n#iter over frequency sample\nfor k in range(N):\n #complex exponential\n s = np.exp(1j*2 * np.pi * k / N * np.arange(N))\n # compute output spectrum\n X = np.append(X, sum(x*np.conjugate(s)))\n \nplt.plot(np.arange(N), abs(X))\nplt.axis([0, N-1, 0, N])\n\nplt.show()\n","repo_name":"sergiiGitHub/Demo","sub_path":"Python/AudioSygnalProcesing/lessons_2/DFT.py","file_name":"DFT.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"37875144165","text":"import numpy as np\nfrom matplotlib import pyplot as plt\n\nfrom solution.solution_plot import plot_data, plot_separator\nfrom solution_data import gen_big_data, super_simple_separable_through_origin, xor, data1, labels1, data2, labels2, \\\n big_data, big_data_labels\nfrom solution_tools import score\n\n######################################################################\n# tests\n\ndef test_linear_classifier(dataFun, learner, learner_params = {},\n draw = False, refresh = True, pause = False):\n '''\n Prints score of your classifier on given dataset\n dataFun method that returns a dataset\n learner your classifier method\n learner_params parameters for the learner\n '''\n data, labels = dataFun()\n d, n = data.shape\n if draw:\n ax = plot_data(data, labels)\n def hook(params):\n (th, th0) = params\n if refresh: plot_data(data, labels, ax, clear = True)\n plot_separator(ax, th, th0)\n #print('th', th.T, 'th0', th0)\n plt.pause(0.05)\n if pause: input('go?')\n else:\n hook = None\n th, th0 = learner(data, labels, hook = hook, params = learner_params)\n print(\"Final score\", float(score(data, labels, th, th0)) / n)\n print(\"Params\", np.transpose(th), th0)\n\nexpected_perceptron = [(np.array([[-9.0], [18.0]]), np.array([[2.0]])),(np.array([[0.0], [-3.0]]), np.array([[0.0]]))]\nexpected_averaged = [(np.array([[-9.0525], [17.5825]]), np.array([[1.9425]])),(np.array([[1.47], [-1.7275]]), np.array([[0.985]]))]\ndatasets = [super_simple_separable_through_origin,xor]\n\n\ndef incorrect(expected,result):\n print(\"Test Failed.\")\n print(\"Your code output \",result)\n print(\"Expected \",expected)\n print(\"\\n\")\n\n\ndef correct():\n print(\"Passed! \\n\")\n\n\ndef test_perceptron(perceptron):\n '''\n Checks perceptron theta and theta0 values for 100 iterations\n '''\n for index in range(len(datasets)):\n data, labels = datasets[index]()\n th,th0 = perceptron(data, labels, {\"T\": 100})\n expected_th,expected_th0 = expected_perceptron[index]\n print(\"-----------Test Perceptron \"+str(index)+\"-----------\")\n if((th==expected_th).all() and (th0==expected_th0).all()):\n correct()\n else:\n incorrect(\"th: \" + str(expected_th.tolist()) + \", th0: \" + str(expected_th0.tolist()), \"th: \" + str(th.tolist()) + \", th0: \" + str(th0.tolist()))\n\n\ndef test_averaged_perceptron(averaged_perceptron):\n '''\n Checks average perceptron theta and theta0 values for 100 iterations\n '''\n for index in range(2):\n data, labels = datasets[index]()\n th,th0 = averaged_perceptron(data, labels, {\"T\": 100})\n expected_th,expected_th0 = expected_averaged[index]\n print(\"-----------Test Averaged Perceptron \"+str(index)+\"-----------\")\n if((th==expected_th).all() and (th0==expected_th0).all()):\n correct()\n else:\n incorrect(\"th: \" + str(expected_th.tolist()) + \", th0: \" + str(expected_th0.tolist()), \"th: \" + str(th.tolist()) + \", th0: \" + str(th0.tolist()))\n\n\ndef test_eval_classifier(eval_classifier,perceptron):\n '''\n Checks your classifier's performance on data1\n '''\n expected = [0.5333333333333333,0.6333333333333333]\n dataset_train = [(data1,labels1),(data2,labels2)]\n for index in range(len(dataset_train)):\n data_train,labels_train = dataset_train[index]\n #print(data_train,labels_train)\n result = eval_classifier(perceptron, data_train, labels_train,data2,labels2)\n print(\"-----------Test Eval Classifier \"+str(index)+\"-----------\")\n if(result==expected[index]):\n correct()\n else:\n incorrect(expected[index], result)\n\n\ndef test_eval_learning_alg(eval_learning_alg,perceptron):\n '''\n Checks your learning algorithm's performance on big_data\n eval_learning_alg method for evaluating learning algorithm\n perceptron your perceptron learning algorithm method\n '''\n expected = 0.5599999999999999\n result = eval_learning_alg(perceptron, gen_big_data(), 10, 10, 5)\n print(\"-----------Test Eval Learning Algo-----------\")\n if result == expected:\n correct()\n else:\n incorrect(expected, result)\n\n\ndef test_xval_learning_alg(xval_learning_alg,perceptron):\n '''\n Checks your learning algorithm's performance on big_data using cross validation\n xval_learning_alg method for evaluating learning algorithm using cross validation\n perceptron your perceptron learning algorithm method\n '''\n expected = 0.61\n result=xval_learning_alg(perceptron, big_data, big_data_labels, 5)\n print(\"-----------Test Cross-eval Learning Algo-----------\")\n if result == expected:\n correct()\n else:\n incorrect(expected, result)\n\n\n# Test Cases:\n# test_averaged_perceptron(averaged_perceptron)\n\ndef eval_classifier(learner, data_train, labels_train, data_test, labels_test):\n pass\n\n\n# Test cases:\n# test_eval_classifier(eval_classifier,perceptron)\n\n\ndef eval_learning_alg(learner, data_gen, n_train, n_test, it):\n pass\n\n\n# Test cases:\n# test_eval_learning_alg(eval_learning_alg,perceptron)\n\n\ndef xval_learning_alg(learner, data, labels, k):\n pass\n\n# Test cases:\n# test_xval_learning_alg(xval_learning_alg,perceptron)\n\n\n# For problem 10, here is an example of how to use gen_flipped_lin_separable, in this case with a flip probability of 50%\n# print(eval_learning_alg(perceptron, gen_flipped_lin_separable(pflip=.5), 20, 20, 5))\n\n\n# Visualization of perceptron, comment in the next three lines to see your perceptron code in action:\n'''\nfor datafn in (super_simple_separable_through_origin,super_simple_separable):\n data, labels = datafn()\n test_linear_classifier(datafn,perceptron,draw=True)\n'''\n\n\n# Test Cases:\n# test_perceptron(perceptron)\n\n\n# Visualization of Averaged Perceptron:\n'''\nfor datafn in (super_simple_separable, xor, xor_more, big_higher_dim_separable):\n data, labels = datafn()\n test_linear_classifier(datafn,averaged_perceptron,draw=True)\n'''\n\n","repo_name":"abhijitrai/ML","sub_path":"MIT/6036/week1/solution/solution_test.py","file_name":"solution_test.py","file_ext":"py","file_size_in_byte":6067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"4654348866","text":"from collections import deque\n\ndx = [0, 0, -1, 1]\ndy = [-1, 1, 0, 0]\n\nc, r = map(int, input().split())\narr = []\nfor _ in range(c):\n tmp = list(map(int, input().split()))\n arr.append(tmp)\n\nq = deque()\n\ncheeses = 0\nfor i in range(c):\n for j in range(r):\n if arr[i][j] == 1:\n cheeses += 1\n\ndef bfs(cheeses):\n while q:\n start = q.popleft()\n for i in range(4):\n x = start[0] + dx[i]\n y = start[1] + dy[i]\n if 0 <= x < c and 0 <= y < r and visited[x][y] == False:\n visited[x][y] = True\n if arr[x][y] == 0:\n q.append((x, y))\n else:\n arr[x][y] = 0\n cheeses -= 1\n return cheeses\n\ncount = 0\nlast = cheeses\nwhile cheeses != 0:\n visited = [[False for _ in range(r)] for _ in range(c)]\n q.append((0, 0))\n visited[0][0] = True\n\n cheeses = bfs(cheeses)\n count += 1\n if cheeses != 0:\n last = cheeses\n\nprint(count)\nprint(last)","repo_name":"kyurimki/Study-CodingTest","sub_path":"BOJ/11-Gold5/2636-[BFS]치즈.py","file_name":"2636-[BFS]치즈.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"4247029066","text":"VOWELS = \"AEIOUY\"\nCONSONANTS = \"BCDFGHJKLMNPQRSTVWXZ\"\n\n\ndef striped_words(text):\n n = 0\n text = text.split()\n text = ' '.join(str(e) for e in text)\n text = text.split(',')\n text = ' '.join(str(e) for e in text)\n text = text.split('.')\n text = ' '.join(str(e) for e in text)\n print(\"text = \", text)\n for _ in text.split():\n print(\"_ =\", _)\n m = 0\n for i in range(0, len(_) - 1):\n print(\"_[\", i, \"] =\", _[i])\n if _[i].upper() not in VOWELS and _[i].upper() not in CONSONANTS:\n print(\"i(1) =\", i)\n m = 0\n print(\"m(1) =\", m)\n break\n elif _[i].upper() in VOWELS and _[i + 1].upper() not in VOWELS or _[i + 1].isalpha() is not True:\n print(\"i(2) =\", i)\n m = 1\n print(\"m(2) =\", m)\n elif _[i].upper() in CONSONANTS and _[i + 1].upper() not in CONSONANTS or _[i + 1].isalpha() is not True:\n print(\"i(3) =\", i)\n m = 1\n print(\"m(3) =\", m)\n else:\n m = 0\n break\n n += m\n print(\"___\")\n return n\n\nprint(\"n =\", striped_words(\"Hello world\"))\n\"\"\"\n\nStriped Words\n\nThe alphabet contains both vowel and consonant letters (yes, we divide the letters).\n\nVowels -- A E I O U Y\n\nConsonants -- B C D F G H J K L M N P Q R S T V W X Z\n\nYou are given a block of text with different words. These words are separated by white-spaces and punctuation marks. \nNumbers are not considered words in this mission (a mix of letters and digits is not a word either). You should count \nthe number of words (striped words) where the vowels with consonants are alternating, that is; words that you count \ncannot have two consecutive vowels or consonants. The words consisting of a single letter are not striped -- do not \ncount those. Casing is not significant for this mission.\n\nInput: A text as a string.\n\nOutput: A quantity of striped words as a number.\n\nExample:\n\nstriped_words(\"My name is ...\") == 3\nstriped_words(\"Hello world\") == 0\nstriped_words(\"A quantity of striped words.\") == 1\nstriped_words(\"Dog,cat,mouse,bird.Human.\") == 3\nPrecondition:\n\nA text contains only ASCII symbols.\n\n0 < |text| < 10000\n\nHow it is used:\n\nThis idea in this task is a useful exercise for linguistic research and analysis. Text processing is one of the main \ntools used in the analysis of various books and languages and can help translate print text to a digital format.\n\nif __name__ == '__main__':\n # These \"asserts\" using only for self-checking and not necessary for auto-testing\n assert striped_words(\"My name is ...\") == 3, \"All words are striped\"\n assert striped_words(\"Hello world\") == 0, \"No one\"\n assert striped_words(\"A quantity of striped words.\") == 1, \"Only of\"\n assert striped_words(\"Dog,cat,mouse,bird.Human.\") == 3, \"Dog, cat and human\"\n\n print(\"Coding complete? Click 'Check' to review your tests and earn cool rewards!\")\n\"\"\"","repo_name":"paolo12/first_gift","sub_path":"empireofcode/striped_words.py","file_name":"striped_words.py","file_ext":"py","file_size_in_byte":3008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"73111381526","text":"# ------------------------------------------------------------------------\n# This file is to parse each individual file from dataset into trees and save in the mongodb\n# ------------------------------------------------------------------------\n\nimport os\nimport networkx as nx\nimport argparse\nfrom tree_sitter import Language, Parser\nimport pymongo\n\n# -------------------------------------------------------\n# Mongo db initialization commands starts here\n# -------------------------------------------------------\n\nmyclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\n\nmydb = myclient[\"CodeStylometry\"]\n\nmycol = mydb[\"Graphs\"]\n\n# -------------------------------------------------------\n# Mongo db initialization commands ends here\n# -------------------------------------------------------\n\nnodeCount = 0\nnodePosition = 0\nsubtree = []\nextractTree = []\nnewdirectorypath = \"\"\n\n\ndef printTree(node, parentNode):\n global nodeCount\n nodeCount += 1\n\n if node.type.strip():\n nodeName = nodeCount\n print(nodeName)\n else:\n nodeName = \"separator\"\n print(nodeName)\n\n for child in node.children:\n printTree(child, nodeName)\n\n\ndef convertTree(node, parentNode):\n global nodePosition\n nodePosition += 1\n\n # if node.type.strip():\n # nodeName = nodePosition\n\n # else:\n # nodeName = \"separator\"\n\n nodeName = nodePosition\n if node.type.strip():\n graph.add_node(nodeName, nodeType=node.type)\n\n else:\n graph.add_node(nodeName, nodeType=\"separator\")\n\n # graph.add_node(nodeName, nodeType = node.type)\n\n if not (parentNode is None):\n graph.add_edge(\n parentNode, nodeName, between=str(parentNode) + \" - \" + str(nodeName)\n )\n\n for child in node.children:\n convertTree(child, nodeName)\n\n\n\ndef printnxTree(parentNode):\n childExist = list(nx.neighbors(graph, parentNode))\n\n if childExist:\n print(parentNode)\n for childnode in childExist:\n # print(childnode)\n printnxTree(childnode)\n\n else:\n print(parentNode)\n\n\n\ndef findnxFunctionNodes(graph):\n funcfinder = graph.number_of_nodes()\n type_of_node = nx.get_node_attributes(graph, \"nodeType\")\n fdefnodeposition = []\n\n for x in range(1, funcfinder + 1):\n if type_of_node[x] == \"function_definition\":\n fdefnodeposition.append(x)\n\n return fdefnodeposition\n\n\ndef extractnxSubTrees(nodelist):\n # for pnode in nodelist:\n # print(nodelist)\n # print(len(nodelist))\n\n for n in range(0, len(nodelist)):\n extract = extraction(nodelist[n])\n print(extract)\n extractTree.clear()\n\n\n\ndef extraction(node):\n childExist = list(nx.neighbors(graph, node))\n extractTree.append(node)\n\n for childNode in childExist:\n if childExist:\n extraction(childNode)\n\n return extractTree\n\n\n\ndef savenxSubTrees(nodelist, filepath):\n for pnode in nodelist:\n subtree = nx.DiGraph()\n subtree = nx.dfs_tree(graph, pnode)\n subtreefilename = str(pnode) + \"Subtree.xml\"\n nx.write_graphml(subtree, filepath + \"/\" + subtreefilename)\n\n\n\ndef printwithdfs_tree(\n nodelist, codeByfolder, datafolder, filename, codeBy, destPath, filePath\n):\n # # -------------------------------------------------------\n # # Mongo db initialization commands\n # # -------------------------------------------------------\n\n # myclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\n\n # mydb = myclient[\"localllm\"]\n\n # mycol = mydb[\"ExpGraphs\"]\n\n # # -------------------------------------------------------\n # # Mongo db initialization commands end here\n # # -------------------------------------------------------\n\n # -------------------------------------------------------\n # importing elementTree to convert the graph in to string\n # -------------------------------------------------------\n\n import xml.etree.ElementTree as ET\n\n # -------------------------------------------------------\n # running for loop for each parent node wherever function definition was identified\n # -------------------------------------------------------\n\n for parentNode in nodelist:\n subtree = nx.DiGraph()\n subtree = nx.dfs_tree(graph, parentNode)\n subtree = graph.subgraph(subtree).copy()\n\n # print(subtree)\n\n # -------------------------------------------------------\n\n # Create the path for the destination foldername by using splited source foldername\n newdirectorypath = os.path.join(destPath, codeByfolder, datafolder)\n finalpath = os.path.join(newdirectorypath, filename + \".xml\")\n try:\n os.makedirs(newdirectorypath, exist_ok=True)\n # Write the graph to new destination folder which will have same folder name as source\n # subtreeName = str(parentNode) + 'subgraph.xml'\n # nx.write_graphml(subtree, newdirectorypath + '/' + subtreeName)\n nx.write_graphml(subtree, finalpath)\n except OSError as error:\n print(\"unable to create\")\n\n # -------------------------------------------------------\n # Code block to convert the subtree that was created above to a string to save in mongodb\n # -------------------------------------------------------\n\n # subtreePath = os.path.join(newdirectorypath, subtreeName)\n\n subtree = ET.parse(finalpath)\n\n subtree = subtree.getroot()\n\n str_subtree = ET.tostring(subtree, encoding=\"unicode\")\n\n # print(str_subtree)\n\n # -------------------------------------------------------\n # -------------------------------------------------------\n\n # # -------------------------------------------------------\n # # Code block to convert the parent Graph that was created in previous function to a string to save in mongodb\n # # -------------------------------------------------------\n\n # parentGraphPath = os.path.join(newdirectorypath, \"NetworkGraph.xml\")\n\n # parentGraph = ET.parse(parentGraphPath)\n\n # parentGraph = parentGraph.getroot()\n\n # str_parentGraph = ET.tostring(parentGraph, encoding='unicode')\n\n # # -------------------------------------------------------\n # # -------------------------------------------------------\n\n # # -------------------------------------------------------\n # # Defining the insert query to insert in mongodb as a record\n # # -------------------------------------------------------\n\n insertDocument = {\n \"File_Name\": filename,\n \"File_Path\": filePath,\n \"Coded_By\": codeBy,\n \"Parent_Folder\": datafolder,\n \"Root_Folder\": codeByfolder,\n \"Node_ID\": parentNode,\n \"Graph_Path\": finalpath,\n \"XML_Graph\": str_subtree,\n }\n\n # # -------------------------------------------------------\n # # running the insert query in mongodb\n # # -------------------------------------------------------\n\n x = mycol.insert_one(insertDocument)\n\n\n\ndef ArrayFunctionName(node, parentNode):\n # global nodeCount\n # nodeCount += 1\n\n # if node.type.strip()\n # nodeName = nodeCount\n # print(nodeName)\n # else:\n # nodeName = \"separator\"\n # print(nodeName)\n\n # for child in node.children:\n # printTree(child, nodeName)\n pass\n\n # ---------------------------------------------------\n # ---------------- MAIN STARTS HERE -----------------\n # ---------------------------------------------------\n\n\ndef main():\n # Parse command line arguments using argparse\n\n cmdparser = argparse.ArgumentParser()\n cmdparser.add_argument(\n \"file\", help=\"Add absolute path to the file that needs to be parsed\"\n )\n # cmdparser.add_argument('-parser', help='Add absolute path to Tree Sitter parser library', default='build/my-languages.so')\n cmdparser.add_argument(\"destfolder\")\n\n args = cmdparser.parse_args()\n\n # Destination folder given in the looper file\n destPath = args.destfolder\n\n # get the foldername of the source file so can create the destination foldername of the same name\n splittedPath = args.file.split(\"/\")\n\n # to remember the filename\n filePath = args.file\n\n # need foldername to evaluate if code is written by human, machine or human and machine\n if \"Control\" in filePath:\n codeBy = \"Human\"\n\n if \"Autopilot\" in filePath:\n codeBy = \"Machine\"\n\n codeByFolder = splittedPath[-3]\n datafolder = splittedPath[-2]\n filename = os.path.splitext(splittedPath[-1])[0]\n\n # Load the parser library\n CPP_LANGUAGE = Language(\"build/my-languages.so\", \"cpp\")\n # PY_LANGUAGE = Language('build/my-languages.so', 'python')\n parser = Parser()\n parser.set_language(CPP_LANGUAGE)\n\n # Parse the file\n with open(args.file, \"rb\") as f:\n fcontent = f.read(-1)\n tree = parser.parse(fcontent)\n\n global graph\n graph = nx.DiGraph()\n\n global newgraph\n newgraph = nx.DiGraph()\n\n dfsrootnode = 1\n\n # print(filePath)\n # print(codeBy)\n # print(datafolder)\n # print(filename)\n # # print(destPath)\n\n # newdirectorypath = os.path.join(destPath, codeByFolder, datafolder)\n # # print(newdirectorypath)\n\n # finalpath = os.path.join(newdirectorypath , filename + '.xml')\n # print(finalpath)\n\n # ------------------------------------------------------------\n # 1. Print every node in the tree (in any order) using a recursive depth-first traversal\n # ------------------------------------------------------------\n\n # printTree(tree.root_node, None)\n\n # ------------------------------------------------------------\n # 2. Convert the tree-sitter tree into a networkx graph\n # ------------------------------------------------------------\n\n convertTree(tree.root_node, None)\n\n # #Create the path for the destination foldername by using splited source foldername\n # newdirectorypath = os.path.join(destPath, codeByFolder, datafolder)\n # finalpath = os.path.join(newdirectorypath , filename + '.xml')\n # try:\n # os.makedirs(newdirectorypath, exist_ok=True)\n # #Write the graph to new destination folder which will have same folder name as source\n # nx.write_graphml(graph, finalpath)\n # except OSError as error:\n # print(\"unable to create\")\n\n # ------------------------------------------------------------\n # 3. Print every node in the networx tree using a recursive depth-first traversal\n # ------------------------------------------------------------\n\n # printnxTree(dfsrootnode)\n\n # ------------------------------------------------------------\n # 4. Generate an array which contains every node in the networkx tree which has the string \"function_definition\" in its name\n # ------------------------------------------------------------\n\n nxfnodes = findnxFunctionNodes(graph)\n\n # ------------------------------------------------------------\n # 5. For each node in the array, extract the subtree rooted at that node\n # ------------------------------------------------------------\n\n # extractnxSubTrees(nxfnodes)\n\n # ------------------------------------------------------------\n # 6. Save each subtree to a separate file\n # ------------------------------------------------------------\n\n # savenxSubTrees(nxfnodes, newdirectorypath)\n\n # ------------------------------------------------------------\n # 7. Create tree with networkx function\n # ------------------------------------------------------------\n\n printwithdfs_tree(\n nxfnodes, codeByFolder, datafolder, filename, codeBy, destPath, filePath\n )\n\n # ------------------------------------------------------------\n # 8. Print function name\n # ------------------------------------------------------------\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ldklab/scored23_release","sub_path":"ASTanalysis/ASTParser.py","file_name":"ASTParser.py","file_ext":"py","file_size_in_byte":11950,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"}
+{"seq_id":"36357518879","text":"import kivy\nfrom kivy.app import App\nfrom kivy.uix.label import Label\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.textinput import TextInput\nfrom kivy.uix.button import Button\n\nclass UI(GridLayout):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.cols = 1\n self.add_widget(Label(text='First Name:'))\n self.name = TextInput(multiline=False)\n self.add_widget(self.name)\n \n self.add_widget(Label(text='Last Name:'))\n self.lname = TextInput(multiline=False)\n self.add_widget(self.lname)\n \n self.sumbit = Button(text='Submit', font_size=40)\n self.add_widget(self.sumbit)\n\nclass MainUI(App):\n \n def build(self):\n return UI()\n\ndef main():\n UI = MainUI()\n UI.run()\n\nif __name__ == '__main__':\n main()\n","repo_name":"Advik-B/Learn-Python","sub_path":"kivy/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"30"}
+{"seq_id":"11501692920","text":"# -*- coding: utf-8 -*-\nimport re,time,xlrd,random\nimport pyautogui#模拟鼠标键盘操作\nimport pyperclip#剪切板操作\nimport traceback#用于错误处理\nfrom openpyxl import load_workbook\n\n#******************************************取数据***********************************************\ndef diZhiChuLi(beizhu):\n '''地址处理:传入存在改地址的备注,返回address'''\n beizhu=beizhu.replace('\\n','')#去除备注中的换行\n dz01=re.findall('改地址[::](.*?)┋',beizhu)\n d=''\n if dz01:\n d=''.join(e for e in dz01[-1] if e.isalnum() or e in '::;;._-')#去除备注里的符号\n c='(?:联系方式|电话|手机).{,5}?(?=1\\d{10})|(?:所在地区|详细地址|收货人|姓名|地址)[::]'\n d=re.sub(c,'',d)\n if d:d=re.split('[;;]',d)\n if len(d)>3:return False,False\n dh=[]\n xm=[]\n dz=[]\n for i in d:\n cd=len(i)\n sz=len(re.findall('\\d',i))\n sj=re.findall('省|市|区|县|乡|镇|村|路|号|街|道|',i)\n try:\n while 1:sj.remove('')\n except:pass\n sj=len(sj)\n if sz>9:\n if cd-sz<6:dh+=[i]\n else:dz+=[i]\n else:\n if sj>2:dz+=[i]\n elif sj==0:\n if cd<8:xm+=[i]\n else:dz+=[i]\n else:\n if cd>10:dz+=[i]\n else:xm+=[i]\n for i in [dh,dz,xm]:\n try:\n while 1:i.remove('')\n except:pass\n if len(dh)==0:dh=''\n elif len(dh)==1:dh=dh[0]\n else:\n dh1=''\n for i in dh:\n if not dh1:\n num1=len(i)\n dh1=i\n elif num1>len(i):\n num1=len(i)\n dh1=i\n dh=dh1\n if len(xm)==0:xm=''\n elif len(xm)==1:xm=xm[0]\n else:\n xm1=''\n for i in xm:\n if not xm1:\n num1=len(i)\n xm1=i\n elif num1>len(i):\n num1=len(i)\n xm1=i\n xm=xm1\n if len(dz)==0:dz=''\n elif len(dz)==1:dz=dz[0]\n else:\n dz1=''\n for i in dz:\n if not dz1:\n num1=len(i)\n dz1=i\n elif num18:li.append([i,zhi])#i行,zhi权重\n zhi=[i[1] for i in li]#值的列表\n zhi1=sorted(zhi)[-2:]\n if not zhi1:print('改地址备注权重不足以判断地区:备注内容非地址',dz);return False,False\n elif len(zhi1)==1 or zhi1[0]!=zhi1[1]:\n if zhi1[-1]>10:sqx=t1.row_values(li[zhi.index(zhi1[-1])][0])[:5]\n else:print('改地址备注权重不足以判断地区:备注内容不全',dz);return False,False\n else:\n if zhi1[0]>10:\n g5=t1.row_values(li[zhi.index(zhi1[-1])][0])[4]\n #sqx=[t1.row_values(i)[:3] for i in range(t1.nrows) if (t1.row_values(i)[4]==g5 and '其它区' in t1.row_values(i)[5])][0]\n try:sqx=[t1.row_values(i)[:5] for i in range(t1.nrows)\n if (t1.row_values(i)[4]==g5 and '其它区' in t1.row_values(i)[5])][0]\n except:print('其他区不存在1111111111111111',dz);return False,False\n else:print('修改地址备注异常222222222222',dz);return False,False\n shengshi=[]\n sqx1=[]\n if sqx:\n sqx1= [int(x) for x in sqx[:3]]\n shengshi= [x for x in sqx[3:]]\n address=(xm,dh,dz,sqx1)#姓名、电话、地址、地区编码的列表,省份城市列表\n for i in address:\n if i:return address,shengshi\n #print('电话',dh);print('姓名',xm);print('地址',dz);print('编码',sqx);print('省份城市',shengshi)\n return False,False\n\ndef xuanzeshijian():#时间段\n t1,li=time.time(),[1,4,8,12,24,72]\n lli=[]\n for i in range(len(li)-1):\n t2=time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(t1-3600*li[i+1]))\n t3=time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(t1-3600*li[i]))\n lli.append([t2,t3])\n return lli\n\ndef ddsj():#订单数据:去取当前查询到的当前页所有订单的数据并返回(2)\n import zong\n b=zong.quanXuan()#实现判断订单明细是否能加载成功或者存在\n if not b:return {}\n #zong.xiuGaiBiaoQian()#设置标签为测试\n b=[i.split('\\t') for i in b.split('\\r\\n校验')]#关注前11个--店铺名\\仓库名\\省\\市\\区\\地址\\客服备注\\客户备注\\快递\\异常原因\\平台单号\n c={}\n for i in b:\n try:\n if i[12]=='' or i[12]=='测试':\n c[i[11]]={'店铺名':i[1],'仓库名':i[2],'省份':i[3],'城市':i[4],'区':i[5],'地址':i[6],'客服备注':i[7],'客户备注':i[8],'快递':i[9],'异常原因':i[10],'订单标签':i[12]}\n except:print(i,'订单分割存不成功')#;Ri_zhi()\n return c\n\n#加载中必换,其他可以通过控件坐标定位\ndef ddhpsj():#订单货品数据:获取当前查询到订单的货品数据并返回(3)\n import zong,abc123\n d={}\n f=''\n for j in range(200):\n for i in range(36):#此循环实现判断订单货品明细是否能加载成功\n b=pyperclip.copy(\"\")\n pyautogui.click(*zong.zb['商品信息'])#商品信息\n time.sleep(0.1)\n pyautogui.click(*zong.zb['商品全选'])#全选商品\n abc123.kuaiJieJian(17,67)\n time.sleep(0.1)\n b=pyperclip.paste()\n if re.findall('平台商品|普通商品',b):pan_tiao=0 ;break\n try:\n b=[i.split('\\t') for i in b.split('\\r\\n校验')]#商品编码\\品名\\数量\\规格\\商品类型\n b=[{'商品编码':i[1],'品名':i[2],'数量':i[3],'规格':i[4],'商品类型':i[5]} for i in b]#前五个\n except:print('出错87') ; break\n pyautogui.click(*zong.zb['订单信息'])#订单信息\n pyautogui.press('tab',3)\n abc123.kuaiJieJian(17,67)\n if f!=pyperclip.paste():\n f=pyperclip.paste()\n else:break\n if b:d[f]={'订单货品':b}\n else:print(f,'商品不符合条件,或者不存在')\n pyautogui.click(*zong.zb['订单列表'])\n pyautogui.press('down')\n return d\n\ndef ddsp():#订单商品���调用ddsj,和ddhpsj 返回订单商品数据\n dd=ddsj()\n print('dd的订单数是:%s'% len(dd))\n if not dd:return []\n data1=ddhpsj()\n if not data1:return []\n print('hp的订单数是:%s'% len(data1))\n hp={}\n for i in data1:\n try:hp[i]=dict(data1[i],**dd[i])\n except:pass#print(i,'订单商品合并失败');Ri_zhi()\n return hp\n\ndef readConfigure():\n '''获取配置,返回各配置文件配置'''\n wb=load_workbook('快递配置.xlsx')\n ws=wb['人工处理']\n rgcl=[[i.value for i in ws[i+1]] for i in range(ws.max_row)]\n rgcl=[i for i in rgcl if i.count(None)!=len(i)]\n rgcl=[{i1:i2 for i1,i2 in zip(rgcl[0],i) if i2 and i1 !='备注'} for i in rgcl[1:]]\n ws=wb['改快递']\n gkd=[[i.value for i in ws[i+1]] for i in range(ws.max_row)]\n gkd=[i for i in gkd if i.count(None)!=len(i)]\n gkd=[{i1:i2 for i1,i2 in zip(gkd[0],i) if i2 and i1 !='备注'} for i in gkd[1:]]\n ws=wb['改赠品']\n gzp=[[i.value for i in ws[i+1]] for i in range(ws.max_row)]\n gzp=[i for i in gzp if i.count(None)!=len(i)]\n gzp=[{i1:i2 for i1,i2 in zip(gzp[0],i) if i2 and i1 !='备注'} for i in gzp[1:]]\n ws=wb['当前使用快递']\n k=[[i.value for i in ws[i+1]] for i in range(ws.max_row)][1:]\n k=[i for i in k if i.count(None)!=len(i)]\n k={i[0]:i[1] for i in k}\n wb.close\n return rgcl,gkd,gzp,k\n\ndef panduanpeizhi(v1,v2):#判段配置,传入配置值和订单对应字段值,符合配置返回True,不符合返回False\n #if v1==None:return True\n if re.findall('\\d+<%s<\\d+',v1):return eval(v1% v2)\n else:\n tj=re.findall('^\\[包含\\]|^\\[不包含\\]|^\\[空\\]|^\\[非空\\]',v1[:5])[0]\n v1=v1.replace(tj,'')\n if '&' in v1:\n zf01=v1.split('&')\n zf01=['(?=.*%s)'% i for i in zf01]\n v1=''.join(zf01)+'.+'\n if tj=='[包含]':\n if re.findall(v1,v2):return True\n else:return False\n elif tj=='[不包含]':\n if re.findall(v1,v2):return False\n else:return True\n elif tj=='[非空]':\n if v2=='':return False\n else:return True\n elif tj=='[空]':\n if v2=='':return True\n else:return False\n\ndef panduan(hp,Config):\n '''传入订单货品数据和配置内容,返回zdsh(自动修改)、zdsh1(自动审核平台单号)、人工处理(平台单号)'''\n rgcl,gkd,gzp,k=Config\n zdsh={'改赠品':[],'关闭订单':[],'改地址':{}}\n zdsh['改快递']={k[i]:[] for i in k}\n zdsh1=[]\n 人工处理=[]\n print('将要分析的订单数是:%s'%len(hp))\n for i in hp:#i订单编号\n dianpu001=re.findall('SEPTWOLVES雅赋专卖店|少年狼箱包|拼多多美之瑞专卖店',hp[i]['店铺名'])\n if dianpu001:\n with open(\"触发日志.txt\",'a') as f:f.write('订单%s存在不需要处理的店铺%s\\n'%(i,dianpu001))\n continue#这几个店铺不处理\n try:\n guige,pingming,shuliang='','',0\n for j1 in hp[i]['订单货品']:\n if '赠品' in j1['商品类型']:continue\n guige+=j1['规格']\n pingming+=j1['品名']\n shuliang+=int(j1['数量'])\n hp[i]['规格']=guige\n hp[i]['品名']=pingming\n hp[i]['平台商品总数量']=shuliang\n panduan_rg=False\n #筛选改地址\n dz01=re.findall('改地址[::](.*?)┋',hp[i]['客服备注'])\n if dz01:\n dz01,shengshi=diZhiChuLi(hp[i]['客服备注'])\n if dz01:\n zdsh['改地址'][i]=dz01#符合条件,修改备注\n if shengshi:\n hp[i]['省份']=shengshi[0]\n hp[i]['城市']=shengshi[1]\n else:\n with open(\"触发日志.txt\",'a') as f:f.write('订单%s改地址识别原因需要人工审核\\n'% i)\n panduan_rg=True#判断人工处理1\n\n #判断人工处理2\n r=hp[i]['客服备注'].split('┋')\n #kdlx1='|'.join([k1 for k1 in k]) #百世快递|顺丰|中通快递拉杆箱|中通速递|中国邮政-快递包裹|申通快递|圆通速递\n if len([i7 for i7 in r if not re.findall('快递[::]|长度[::]|改地址[::]',i7)])>1:\n panduan_rg=True\n with open(\"触发日志.txt\",'a') as f:f.write('订单%s备注含有需要人工处理的内容\\n'% i)\n\n #判断人工处理3\n for i11 in rgcl:#i1一行\n i1={i:i11[i] for i in i11 if not re.findall('处理方式',i)}\n pan1=1\n for i4 in i1:\n if not panduanpeizhi(i1[i4],hp[i][i4]):#人工筛选\n pan1=0\n break\n if pan1:\n panduan_rg=True\n with open(\"触发日志.txt\",'a') as f:f.write('订单%s符合条件%s人工处理\\n'%(i,i1))\n break\n\n #筛选改快递\n for i11 in gkd:#i1一行\n i1={i:i11[i] for i in i11 if not re.findall('使用快递',i)}#当前选择条件(一行)\n pan1=1\n for i4 in i1:\n if not panduanpeizhi(i1[i4],hp[i][i4]):#快递筛选\n pan1=0\n break\n if pan1:\n if '客服备注指定' not in i11['使用快递']:\n kd=i11['使用快递'].split('|')\n kd=random.choice(kd)\n else:\n kd=re.findall('快递[::](.*?)┋',hp[i]['客服备注'])[-1]#中通\n kd=[i for i in k if kd in i]#['中通快递拉杆箱', '中通速递']\n if len(kd)!=1:\n panduan_rg=True\n break\n else:kd=kd[0]\n if kd not in hp[i]['快递']:\n zdsh['改快递'][k[kd]].append(i)\n with open(\"触发日志.txt\",'a') as f:f.write('订单%s符合条件%s改快递为[%s]\\n'%(i,i1,kd))\n break\n\n #筛选改赠品 \n for i11 in gzp:#i1一行\n i1={i:i11[i] for i in i11 if not re.findall('处理方式|使用快递',i)}\n clfs=i11['处理方式']\n try:\n sykd01=i11['使用快递']\n except:sykd01=''\n pan1=1\n for i4 in i1:\n if not panduanpeizhi(i1[i4],hp[i][i4]):\n pan1=0\n break\n if pan1:\n if sykd01=='' or sykd01==kd:\n zdsh['改赠品'].append(i)\n with open(\"触发日志.txt\",'a') as f:f.write('订单%s符合条件%s改打火机为卡包\\n'%(i,i1))\n break#向后移动了一格,避免了i1为空的情况\n except:\n panduan_rg=True\n with open(\"触发日志.txt\",'a') as f:f.write('订单%s错误需要人工处理!!\\n'% i)\n Ri_zhi()\n try:print(j1,'错误需要人工处理!!',i)\n except:pass\n if panduan_rg:人工处理.append(i)\n else:zdsh1+=[i]\n return zdsh,zdsh1,人工处理#返回需要修改的订单、所有可以自动审核订单、人工处理订单\n\n#******************************************取数据***********************************************\n\nclass Ri_zhi():#日志和记录\n def __init__(self):\n now = int(time.time())\n self.timeArray = time.localtime(now)\n self.otherStyleTime = time.strftime(\"%Y-%m-%d %H:%M:%S\",self.timeArray)\n if traceback.format_exc()!='NoneType: None\\n':\n f=open(r\"执行日志.txt\",'a')\n traceback.print_exc(file=f)\n f.write('*******************************'+self.otherStyleTime+'************************************\\n\\n')\n f.flush() \n f.close()\n def xujiludnr(self,neirong):\n rq01='%s%s%s'%(self.timeArray[0],self.timeArray[1],self.timeArray[2])\n f=open(\"错误订单.txt\",'a')\n f.write(neirong+'\\n')\n f.flush()\n f.close()\n\nif __name__ == '__main__':\n #b,c,d=panduan()\n #b=ddsj()\n pass\n hp=ddsp()\n Config=readConfigure()#获取配置内容\n b,c,d=panduan(hp,Config)\n\n\n'''\ndef xgma(i,n,h=1):#西格玛之python实现\n zhi=0\n for j in range(i,n+1):\n zhi+=j*h\n return zhi\n\n'''\n\n","repo_name":"abc123mmc/untitled","sub_path":"get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":15521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"30764494929","text":"\"\"\"\n This file is part of nucypher.\n\n nucypher is free software: you can redistribute it and/or modify\n it under the terms of the GNU Affero General Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n nucypher is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU Affero General Public License for more details.\n\n You should have received a copy of the GNU Affero General Public License\n along with nucypher. If not, see .\n\"\"\"\n\nimport pytest\n\nfrom nulink.characters.lawful import Alice, Bob\nfrom nulink.crypto.powers import DecryptingPower, SigningPower\nfrom nulink.policy.identity import Card\nfrom tests.utils.middleware import MockRestMiddleware\n\n\n@pytest.mark.parametrize('character_class', (Bob, Alice))\ndef test_character_card(character_class, capsys):\n character = character_class(federated_only=True,\n start_learning_now=False,\n network_middleware=MockRestMiddleware())\n\n character_card = character.get_card()\n same_card = Card.from_character(character)\n assert character_card == same_card\n\n with pytest.raises(TypeError):\n # only cards can be compared to other cards\n _ = character_card == same_card.verifying_key\n\n # Bob's Keys\n assert character_card.verifying_key == character.public_keys(SigningPower)\n assert character_card.encrypting_key == character.public_keys(DecryptingPower)\n\n # Card Serialization\n\n # bytes\n card_bytes = bytes(character_card)\n assert Card.from_bytes(card_bytes) == character_card == same_card\n\n # hex\n hex_bob = character_card.to_hex()\n assert Card.from_hex(hex_bob) == character_card == same_card\n\n # base64\n base64_bob = character_card.to_base64()\n assert Card.from_base64(base64_bob) == character_card == same_card\n\n # qr code echo\n character_card.to_qr_code()\n captured = capsys.readouterr()\n qr_code_padding = '\\xa0' * 21 # min length for qr code version 1\n assert captured.out.startswith(qr_code_padding)\n assert captured.out.endswith(f'{qr_code_padding}\\n')\n\n # filepath without nickname\n assert character_card.id.hex() in str(character_card.filepath)\n\n # nicknames\n original_checksum = character_card.id\n nickname = 'Wilson the Great'\n expected_nickname = nickname.replace(' ', '_')\n character_card.set_nickname(nickname)\n restored = Card.from_bytes(bytes(character_card))\n restored_checksum = restored.id\n assert restored.nickname == expected_nickname\n assert original_checksum == restored_checksum == same_card.id\n\n # filepath with nickname\n assert f'{expected_nickname}.{character_card.id.hex()}' in str(character_card.filepath)\n","repo_name":"NuLink-network/nulink-core","sub_path":"tests/unit/test_card.py","file_name":"test_card.py","file_ext":"py","file_size_in_byte":2910,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"30"}
+{"seq_id":"20167339537","text":"import json\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\nclass Peptide:\r\n def __init__(self, sequence,\r\n peptideCoordinates,\r\n nodeCoordinates,\r\n protease2expression):\r\n self.sequence = sequence\r\n self.peptideCoordinates = peptideCoordinates\r\n self.nodeCoordinates = [nodeCoordinates[0]]\r\n for i in range(1, len(nodeCoordinates)):\r\n if self.nodeCoordinates[-1][1] == nodeCoordinates[i][0]:\r\n self.nodeCoordinates[-1][1] = nodeCoordinates[i][1]\r\n else:\r\n self.nodeCoordinates.append(nodeCoordinates[i])\r\n assert len(self.nodeCoordinates) == len(self.peptideCoordinates), sequence\r\n self.protease2expression = protease2expression\r\n\r\n @staticmethod\r\n def read(filename, proteases):\r\n result = {}\r\n with open(filename, \"r\") as fs:\r\n header = fs.readline().rstrip().split('\\t')\r\n sequenceIndex = header.index(\"Sequence\")\r\n peptideCoordinatesIndex = header.index(\"PeptideCoordinates\")\r\n nodeCoordinatesIndex = header.index(\"NodeCoordinates\")\r\n protease2index = {protease: header.index(protease) for protease in proteases}\r\n for line in fs:\r\n spl = line.rstrip().split('\\t')\r\n result[spl[sequenceIndex]] = Peptide(\r\n spl[sequenceIndex],\r\n [[int(coordinate.split('|')[0]), int(coordinate.split('|')[1])]\r\n for coordinate in spl[peptideCoordinatesIndex].split(';')],\r\n [[int(coordinate.split('|')[0]), int(coordinate.split('|')[1])]\r\n for coordinate in spl[nodeCoordinatesIndex].split(';')],\r\n {protease: int(spl[index])\r\n for protease, index in protease2index.items()}\r\n )\r\n return result\r\n\r\n\r\ndef plotSummaryJunctionPeptides(peptide2data, proteases, color, outputFile):\r\n size = 3\r\n protease2count = {protease: [0 for _ in range(size)] for protease in proteases}\r\n for peptide, data in peptide2data.items():\r\n for protease in proteases:\r\n if data.protease2expression[protease] != 0:\r\n protease2count[protease][min(size - 1, len(data.nodeCoordinates) - 1)] += 1\r\n protease2countNormalized = {protease: [protease2count[protease][i] / sum(protease2count[protease])\r\n for i in range(size)]\r\n for protease in proteases}\r\n\r\n width = 0.5\r\n\r\n fig, axs = plt.subplots(nrows=2, ncols=1, figsize=(5, 2*5), sharex=True, squeeze=True)\r\n\r\n inds = [i for i in range(len(proteases))]\r\n values = [0 for _ in range(len(proteases))]\r\n for i in range(len(color)):\r\n values0 = [protease2count[proteases[j]][i] for j in range(len(proteases))]\r\n axs[0].bar(inds, values0, width, bottom=values, color=color[i])\r\n values = [x0 + x1 for x0, x1 in zip(values, values0)]\r\n axs[0].set_ylabel(\"Peptides\")\r\n\r\n values = [0 for _ in range(len(proteases))]\r\n for i in range(len(color)):\r\n values0 = [protease2countNormalized[proteases[j]][i] for j in range(len(proteases))]\r\n axs[1].bar(inds, values0, width, bottom=values, color=color[i])\r\n if i == 0:\r\n for j in range(len(proteases)):\r\n axs[1].text(j, values0[j] - 0.05, f\"{values0[j]}\"[:4], color=color[1],\r\n verticalalignment='top', horizontalalignment='center')\r\n values = [x0 + x1 for x0, x1 in zip(values, values0)]\r\n axs[1].set_ylabel(\"Percent\")\r\n axs[1].set_xticks(inds)\r\n proteaseLabels = []\r\n for i in range(len(proteases)):\r\n if len(proteases[i]) > 7:\r\n proteaseLabels.append(proteases[i][:5] + \".\")\r\n else:\r\n proteaseLabels.append(proteases[i])\r\n axs[1].set_xticklabels(proteaseLabels)\r\n\r\n plt.savefig(outputFile)\r\n\r\n\r\ndef plotSummaryAroundExon(peptide2data, proteases, color, outputFile, size=10):\r\n nTerminusCount = {protease: [0 for _ in range(size)] for protease in proteases}\r\n cTerminusCount = {protease: [0 for _ in range(size)] for protease in proteases}\r\n nTerminusMsMsCount = {protease: [0 for _ in range(size)] for protease in proteases}\r\n cTerminusMsMsCount = {protease: [0 for _ in range(size)] for protease in proteases}\r\n for peptide, data in peptide2data.items():\r\n for protease in proteases:\r\n if data.protease2expression[protease] != 0:\r\n if data.peptideCoordinates[0][0] - data.nodeCoordinates[0][0] < size:\r\n nTerminusCount[protease][data.peptideCoordinates[0][0] - data.nodeCoordinates[0][0]] += 1\r\n nTerminusMsMsCount[protease][data.peptideCoordinates[0][0] - data.nodeCoordinates[0][0]] += \\\r\n data.protease2expression[protease]\r\n if data.nodeCoordinates[-1][1] - data.peptideCoordinates[-1][1] < size:\r\n cTerminusCount[protease][data.nodeCoordinates[-1][1] - data.peptideCoordinates[-1][1]] += 1\r\n cTerminusMsMsCount[protease][data.nodeCoordinates[-1][1] - data.peptideCoordinates[-1][1]] += \\\r\n data.protease2expression[protease]\r\n\r\n # data = [[nTerminusCount, cTerminusCount], [\r\n # {protease: [nTerminusCount[protease][i] / sum(nTerminusCount[protease]) for i in range(size)]\r\n # for protease in proteases},\r\n # {protease: [cTerminusCount[protease][i] / sum(cTerminusCount[protease]) for i in range(size)]\r\n # for protease in proteases}\r\n # ]]\r\n data = [nTerminusCount, cTerminusCount]\r\n fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(2*5, 5), squeeze=True)\r\n\r\n inds = [i for i in range(size)]\r\n\r\n for j in range(len(data)):\r\n for k in range(size):\r\n for p in range(len(proteases)):\r\n # print(inds[k]-0.25+p*0.1)\r\n # print(data[i][j][0][proteases[p]])\r\n # print(data[i][j][0][proteases[p]][k])\r\n if j == 0:\r\n axs[j].bar(inds[k] - 0.3 + p * 0.12, data[j][proteases[p]][k], 0.11, color=color[p])\r\n else:\r\n axs[j].yaxis.tick_right()\r\n axs[j].bar(inds[k] - 0.3 + p * 0.12, data[j][proteases[p]][size - k - 1], 0.11,\r\n color=color[p])\r\n axs[j].set_ylim([0, 15000])\r\n axs[j].set_xticks(inds)\r\n axs[j].set_ylabel(\"Peptides\")\r\n if j == 0:\r\n axs[j].set_xticklabels([r for r in range(size)])\r\n axs[j].set_xlabel(\"Distance to 5' of an exon, nt\")\r\n else:\r\n axs[j].set_xticklabels([size - r - 1 for r in range(size)])\r\n axs[j].set_xlabel(\"Distance to 3' of an exon, nt\")\r\n\r\n plt.savefig(outputFile)\r\n\r\n\r\ndef plotSummaryExonExon(peptide2data, proteases, color, outputFile, size=11):\r\n nTerminusCount = {protease: [0 for _ in range(size)] for protease in proteases}\r\n cTerminusCount = {protease: [0 for _ in range(size)] for protease in proteases}\r\n nTerminusMsMsCount = {protease: [0 for _ in range(size)] for protease in proteases}\r\n cTerminusMsMsCount = {protease: [0 for _ in range(size)] for protease in proteases}\r\n for peptide, data in peptide2data.items():\r\n if len(data.nodeCoordinates) == 1:\r\n continue\r\n for protease in proteases:\r\n if data.protease2expression[protease] != 0:\r\n if data.nodeCoordinates[0][1] - data.peptideCoordinates[0][0] < size:\r\n nTerminusCount[protease][data.nodeCoordinates[0][1] - data.peptideCoordinates[0][0]] += 1\r\n nTerminusMsMsCount[protease][data.nodeCoordinates[0][1] - data.peptideCoordinates[0][0]] += \\\r\n data.protease2expression[protease]\r\n if data.peptideCoordinates[-1][1] - data.nodeCoordinates[-1][0] < size:\r\n cTerminusCount[protease][data.peptideCoordinates[-1][1] - data.nodeCoordinates[-1][0]] += 1\r\n cTerminusMsMsCount[protease][data.peptideCoordinates[-1][1] - data.nodeCoordinates[-1][0]] += \\\r\n data.protease2expression[protease]\r\n data = [nTerminusCount, cTerminusCount]\r\n fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(2*5, 5), squeeze=True)\r\n inds = [i for i in range(size)]\r\n for j in range(len(data)):\r\n for k in range(size):\r\n for p in range(len(proteases)):\r\n axs[j].set_ylim([0, 7500])\r\n if j == 0:\r\n axs[0].bar(inds[k] - 0.3 + p * 0.12, data[j][proteases[p]][k], 0.11, color=color[p])\r\n axs[0].set_ylabel(\"Peptides\")\r\n axs[0].set_xlabel(\"Number of nucleotides at 5' donor exon, nt\")\r\n else:\r\n axs[1].bar(inds[k] - 0.3 + p * 0.12, data[j][proteases[p]][k], 0.11, color=color[p])\r\n axs[1].set_xlabel(\"Number of nucleotides at 3' acceptor exon, nt\")\r\n axs[j].set_xticklabels([i for i in range(size)])\r\n axs[j].set_xticks([i for i in range(size)])\r\n\r\n plt.savefig(outputFile)\r\n\r\n\r\ndef plot(parameters):\r\n peptide2data = Peptide.read(parameters[\"input\"][\"peptideFile\"],\r\n parameters[\"input\"][\"proteases\"])\r\n plotSummaryJunctionPeptides(peptide2data, parameters[\"input\"][\"proteases\"],\r\n parameters[\"input\"][\"summaryJunctionPeptides\"][\"color\"],\r\n parameters[\"output\"][\"summaryJunctionPeptides\"])\r\n plotSummaryAroundExon(peptide2data, parameters[\"input\"][\"proteases\"], parameters[\"input\"][\"color\"],\r\n parameters[\"output\"][\"summaryAroundExon\"])\r\n plotSummaryExonExon(peptide2data, parameters[\"input\"][\"proteases\"], parameters[\"input\"][\"color\"],\r\n parameters[\"output\"][\"summaryExonExon\"])\r\n\r\n\r\nif __name__ == \"__main__\":\r\n with open(\"parameters.json\", 'r') as parameters_fs:\r\n plot(json.loads(parameters_fs.read()))\r\n","repo_name":"coongroup/DeepProteomeSequencing-Software","sub_path":"FigureSuppl/FigureS8/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10167,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"30"}
+{"seq_id":"15278673411","text":"from runmain import db\nfrom models import Compliance\nfrom flask import session, logging, request\nimport datetime\nimport calendar\nimport re\nimport os\nimport shutil\nimport subprocess\nimport ntpath\nfrom CCC_system_setup import myoslist,addpath,addtxt, scac\n\ndef isoC():\n\n if request.method == 'POST':\n# ____________________________________________________________________________________________________________________B.FormVariables.Compliance\n\n from viewfuncs import parseline, popjo, jovec, newjo, timedata, nonone, nononef\n from viewfuncs import numcheck, numcheckv, viewbuttons, get_ints, numcheckvec\n\n #Zero and blank items for default\n username = session['username'].capitalize()\n cache=0\n modata=0\n modlink=0\n fdata=0\n filesel=''\n docref=''\n doctxt=''\n longs=''\n\n\n today = datetime.date.today()\n now = datetime.datetime.now().strftime('%I:%M %p')\n\n leftsize=10\n\n match = request.values.get('Match')\n modify = request.values.get('Modify')\n vmod = request.values.get('Vmod')\n viewo = request.values.get('ViewO')\n print = request.values.get('Print')\n returnhit = request.values.get('Return')\n deletehit = request.values.get('Delete')\n delfile = request.values.get('DELF')\n # hidden values\n update = request.values.get('Update')\n newjob=request.values.get('NewJ')\n thisjob=request.values.get('ThisJob')\n oder=request.values.get('oder')\n modlink = request.values.get('modlink')\n longs=request.values.get(longs)\n searchfind=request.values.get('mastersearch')\n assemble=request.values.get('Assemble')\n\n oder=nonone(oder)\n modlink=nonone(modlink)\n actype = request.values.get('actype')\n\n leftscreen=1\n err=['All is well', ' ', ' ', ' ', ' ']\n\n if returnhit is not None:\n modlink=0\n actype='Choose Compliance Subject'\n\n if searchfind is not None:\n modlink=20\n\n# ____________________________________________________________________________________________________________________E.FormVariables.Compliance\n# ____________________________________________________________________________________________________________________B.DataUpdates.Compliance\n\n if update is not None and modlink==1:\n if oder > 0:\n modata=Compliance.query.get(oder)\n vals=['subject','category','longs','item','date1','date2']\n a=list(range(len(vals)))\n for i,v in enumerate(vals):\n a[i]=request.values.get(v)\n datedue=a[4]\n datefiled=a[5]\n\n if datedue == '':\n newdate1 = None\n else:\n newdate1 = datedue\n\n if datefiled == '':\n newdate2 = None\n else:\n newdate2 = datedue\n\n modata.Subject=a[0]\n modata.Category=a[1]\n modata.Textinfo=a[2]\n modata.Item=a[3]\n modata.Date1=newdate1\n modata.Date2=newdate2\n\n db.session.commit()\n err[3]= 'Modification to Compliance id ' + str(modata.id) + ' completed.'\n modlink=0\n\n\n\n# ____________________________________________________________________________________________________________________B.GetData.Compliance\n if actype == 'Choose Compliance Subject' or actype is None:\n odata = Compliance.query.all()\n else:\n odata = Compliance.query.filter(Compliance.Subject == actype).all()\n\n\n if assemble is not None:\n avec=numcheckvec(odata,'oder')\n scommand=['pdfunite']\n if len(avec)>0:\n for a in avec:\n gdat=Compliance.query.get(a)\n dot=gdat.File1\n docref=f'tmp/{scac}/data/vcompliance/' + dot\n scommand.append(docref)\n scommand.append(f'tmp/{scac}/data/vunknown/assembledoutput.pdf')\n tes=subprocess.check_output(scommand)\n# ____________________________________________________________________________________________________________________B.Search.Compliance\n\n if (newjob is None and modlink<10) or modlink==20:\n oder,numchecked=numcheck(1,odata,0,0,0,0,['oder'])\n\n# ____________________________________________________________________________________________________________________E.Search.Compliance\n\n# ____________________________________________________________________________________________________________________B.Views.Compliance\n\n if viewo is not None and numchecked==1:\n err=[' ', ' ', 'There is no document available for this selection', ' ', ' ']\n if oder>0:\n modata=Compliance.query.get(oder)\n if modata.File1 is not None:\n dot=modata.File1\n txt=dot.split('.',1)[0]+'.txt'\n docref=f'tmp/{scac}/data/vcompliance/' + dot\n doctxt=docref.replace('.pdf','.txt').replace('.jpg','.txt').replace('.jpeg','.txt')\n leftscreen=0\n leftsize=10\n modlink=0\n err=[' ', ' ', 'Viewing document '+docref, ' ', ' ']\n\n if (viewo is not None) and numchecked!=1:\n err=['Must check exactly one box to use this option', ' ', ' ', ' ', ' ']\n\n# ____________________________________________________________________________________________________________________E.Views.Compliance\n# ____________________________________________________________________________________________________________________B.Modify.Compliance\n if (modify is not None or vmod is not None) and numchecked==1 :\n modlink=1\n leftsize=8\n\n if oder>0:\n modata=Compliance.query.get(oder)\n if vmod is not None:\n err=[' ', ' ', 'There is no document available for this selection', ' ', ' ']\n if modata.File1 is not None:\n dot=modata.File1\n docref=f'tmp/{scac}/data/vcompliance/' + dot\n doctxt=docref.replace('.pdf','.txt').replace('.jpg','.txt').replace('.jpeg','.txt')\n\n\n leftscreen=0\n\n err=['All is well', ' ', ' ', ' ', ' ']\n\n if (modify is not None or vmod is not None) and numchecked!=1:\n modlink=0\n err[0]=' '\n err[2]='Must check exactly one box to use this option'\n# ____________________________________________________________________________________________________________________E.Modify.Compliance\n\n# ____________________________________________________________________________________________________________________B.Delete.Compliance\n if deletehit is not None and numchecked==1:\n if oder>0:\n #This section is to determine if we can delete the source file along with the data. If other data is pointing to this\n #file then we need to keep it.\n modata=Compliance.query.get(oder)\n if modata.File1 is not None:\n dot=modata.File1\n docref=f'tmp/{scac}/data/vcompliance/' + dot\n\n othdoc=Compliance.query.filter((Compliance.File1==dot) & (Compliance.File1 != modata.id)).first()\n if othdoc is None:\n try:\n os.remove(addpath(docref))\n os.remove(addtxt(docref))\n except:\n err[0]='File already removed'\n\n Compliance.query.filter(Compliance.id == oder).delete()\n db.session.commit()\n odata = Compliance.query.all()\n\n if deletehit is not None and numchecked != 1:\n err=[' ', ' ', 'Must have exactly one item checked to use this option', ' ', ' ']\n# ____________________________________________________________________________________________________________________E.Delete.Compliance\n\n# ____________________________________________________________________________________________________________________B.Newjob.Compliance\n if newjob is not None:\n err=['Select Source Document from List']\n fdata = myoslist(f'tmp/{scac}/data/vunknown')\n fdata.sort()\n modlink=10\n leftsize=8\n leftscreen=0\n docref=f'tmp/{scac}/data/vunknown/NewJob.pdf'\n\n if newjob is None and update is None and modlink==10:\n filesel=request.values.get('FileSel')\n filetxt=filesel.replace('.pdf','.txt').replace('.jpg','.txt').replace('.jpeg','.txt')\n fdata = myoslist(f'tmp/{scac}/data/vunknown')\n fdata.sort()\n leftsize=8\n leftscreen=0\n docref=f'tmp/{scac}/data/vunknown/'+filesel\n doctxt=f'tmp/{scac}/data/vunknown/'+filetxt\n\n try:\n longs = open(addpath(doctxt)).read()\n longs = longs[0:999]\n except:\n doctxt=''\n longs=''\n\n if delfile is not None and modlink==10:\n modlink=0\n filesel=request.values.get('FileSel')\n if filesel != '1':\n dockill1=f'tmp/{scac}/data/vunknown/'+filesel\n try:\n os.remove(addpath(dockill1))\n except:\n err[1]='Could not delete ...'\n try:\n os.remove(addtxt(dockill1))\n except:\n err[2]='Could not delete txt'\n\n\n if update is not None and modlink==10:\n modlink=0\n #Create the new database entry for the source document\n filesel=request.values.get('FileSel')\n\n if filesel != '1':\n docold=f'tmp/{scac}/data/vunknown/'+filesel\n docref=f'tmp/{scac}/data/vcompliance/'+filesel\n try:\n shutil.move(addpath(docold),addpath(docref))\n except:\n err[4]='File has been moved already'\n try:\n shutil.move(addtxt(docold),addtxt(docref))\n except:\n err[4]='File has been moved already'\n\n else:\n docref=''\n doctxt=''\n\n subject=request.values.get('subject')\n category=request.values.get('category')\n item = request.values.get('item')\n longs=request.values.get('longs')\n datedue = request.values.get('date1')\n datefiled = request.values.get('date2')\n docsave= ntpath.basename(docref)\n\n if datedue == '':\n newdate1 = None\n else:\n newdate1 = datedue\n\n if datefiled == '':\n newdate2 = None\n else:\n newdate2 = datedue\n\n input = Compliance(Subject=subject, Category=category, Item=item, Textinfo=longs, File1=docsave, File2=None, File3=None, Date1=newdate1, Date2=newdate2)\n db.session.add(input)\n db.session.commit()\n db.session.close()\n\n odata = Compliance.query.all()\n\n modlink=0\n leftscreen=1\n oder=0\n leftsize=10\n err=['All is well', ' ', ' ', ' ', ' ']\n# ____________________________________________________________________________________________________________________E.Newjob.Compliance\n\n if modlink==20:\n odata=Compliance.query.filter((Compliance.Textinfo.contains(searchfind)) | (Compliance.Subject.contains(searchfind)) | (Compliance.Category.contains(searchfind))).all()\n\n #This is the else for 1st time through (not posting data from overseas.html)\n else:\n from viewfuncs import popjo, jovec, timedata, nonone, nononef, init_truck_zero\n today = datetime.date.today()\n #today = datetime.datetime.today().strftime('%Y-%m-%d')\n now = datetime.datetime.now().strftime('%I:%M %p')\n oder=0\n cache=0\n modata=0\n modlink=0\n fdata=0\n filesel=''\n docref=''\n doctxt=''\n longs=''\n odata = Compliance.query.all()\n leftscreen=1\n leftsize=10\n err=['All is well', ' ', ' ', ' ', ' ']\n actype = ''\n\n fdata = myoslist('data/vunknown')\n fdata.sort()\n doctxt=os.path.splitext(docref)[0] + '.txt'\n leftsize = 8\n acdata=[]\n for adat in db.session.query(Compliance.Subject).distinct():\n acdata.append(adat.Subject)\n return odata,oder,err,modata,modlink,leftscreen,docref,leftsize,fdata,filesel,today,now,doctxt,longs,acdata,actype\n","repo_name":"markwnixon/class8","sub_path":"iso_C.py","file_name":"iso_C.py","file_ext":"py","file_size_in_byte":12988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"4872645136","text":"from modengine.entity import Entity\nfrom modengine.memory.memorymanager import MemoryManager\n\nGAME = 'steam'\n\nclass GTA5Entity(Entity):\n if GAME == 'steam':\n WorldPTR_label = 'GTA5.EXE+236ADE0' # 'gta5!start+0xc667d8'\n BlipPTR_label = 'GTA5.EXE+1F9A2C0' # 'gta5!start+0x895cb8'\n AmmoPTR_label = 'GTA5.EXE+E89425'\n ClipPTR_label = 'GTA5.EXE+E893E0'\n GetPointerAddressA_label = 'GTA5.EXE!start+14e7d78'\n ObjectsPTR_label = 'GTA5.EXE+1E92AB8'\n NamePTR_label = 'GTA5.EXE+0274E7A8'\n PlayerListPTR_label = 'GTA5.EXE+1CE49C0'\n GloablPTR_label = 'GTA5.EXE+16EB380'\n WaypointPTR_label = 'GTA5.EXE+1FC0C70'\n ObjectivePTR_label = 'GTA5.EXE+1FC0D20'\n else:\n WorldPTR_label = 'GTA5.EXE+2366EC8'\n BlipPTR_label = 'GTA5.EXE+1F9E750'\n AmmoPTR_label = 'GTA5.EXE+E88EB9'\n ClipPTR_label = 'GTA5.EXE+E88E74'\n GetPointerAddressA_label = 'GTA5.EXE!start+14e5828'\n ObjectsPTR_label = 'GTA5.EXE+1E90138'\n NamePTR_label = 'GTA5.EXE+02749450'\n PlayerListPTR_label = 'GTA5.EXE+1CE0AA0'\n GloablPTR_label = 'GTA5.EXE+16F5EF0'\n WaypointPTR_label = 'GTA5.EXE+1FBCAE0'\n ObjectivePTR_label = 'GTA5.EXE+1FBCB90'\n\n # Waypoint: GTA5.exe + 1FBCAE0\n # Objective: GTA5.exe + 1FBCB90\n\n _WorldPTR = None\n _BlipPTR = None\n _AmmoPTR = None\n _ClipPTR = None\n _ObjectsPTR = None\n _getPTRAddress = None\n _NamePTR = None\n _PlayerListPTR = None\n _GloablPTR = None\n\n def __init__(self):\n super(GTA5Entity, self).__init__(MemoryManager(\"GTA5.EXE\"))\n\n @property\n def WorldPTR(self):\n if self._WorldPTR is None:\n self._WorldPTR = self.getPTRAddress(self.WorldPTR_label)\n return self._WorldPTR\n\n @property\n def BlipPTR(self):\n if self._BlipPTR is None:\n self._BlipPTR = self.getPTRAddress(self.BlipPTR_label)\n return self._BlipPTR\n\n @property\n def ClipPTR(self):\n if self._ClipPTR is None:\n self._ClipPTR = self.getPTRAddress(self.ClipPTR_label)\n return self._ClipPTR\n\n @property\n def AmmoPTR(self):\n if self._AmmoPTR is None:\n self._AmmoPTR = self.getPTRAddress(self.AmmoPTR_label)\n return self._AmmoPTR\n\n @property\n def ObjectsPTR(self):\n if self._ObjectsPTR is None:\n self._ObjectsPTR = self.getPTRAddress(self.ObjectsPTR_label)\n return self._ObjectsPTR\n\n @property\n def GetPointerAddressA(self):\n if self._getPTRAddress is None:\n self._getPTRAddress = self.getPTRAddress(self.GetPointerAddressA_label) + 0x8\n return self._getPTRAddress\n\n @property\n def NamePTR(self):\n if self._NamePTR is None:\n self._NamePTR = self.getPTRAddress(self.NamePTR_label)\n return self._NamePTR\n\n @property\n def PlayerListPTR(self):\n if self._PlayerListPTR is None:\n self._PlayerListPTR = self.getPTRAddress(self.PlayerListPTR_label)\n return self._PlayerListPTR\n\n @property\n def GloablPTR(self):\n if self._GloablPTR is None:\n self._GloablPTR = self.getPTRAddress(self.GloablPTR_label)\n return self._GloablPTR\n\n def self_test(self):\n for property_name in self.properties:\n print(\"testing %s\" % property_name)\n try:\n prop_value = self.__getattr__(property_name)\n print(\"%s : %s\" % (property_name, str(prop_value)))\n except Exception as e:\n print(\"failed to read property %s: %s\" % (property_name, e.message))\n\n try:\n self.__setattr__(property_name, prop_value)\n except Exception as e:\n print(\"failed to write property %s: %s\" % (property_name, e.message))\n\n try:\n if self.__getattr__(property_name) == prop_value:\n print(\"%s: test successfull\" % property_name)\n else:\n # raise Exception(\"%s: fail to test write\" % property_name)\n print(\"%s: test failed: %s != %s\" % (\n property_name, str(self.__getattr__(property_name)), prop_value))\n except Exception:\n pass\n\n def teleport(self, x, y, z, freeze=True):\n assert self.has_attribute('x'), \"no x attribute\"\n assert self.has_attribute('y'), \"no y attribute\"\n assert self.has_attribute('z'), \"no z attribute\"\n try:\n if freeze:\n self.freeze = True\n\n self.x = x\n self.y = y\n self.z = z\n\n if freeze:\n self.freeze = False\n return True\n except Exception:\n return False\n","repo_name":"il-katta/PyCheat","sub_path":"gta5/entities/gtaentity.py","file_name":"gtaentity.py","file_ext":"py","file_size_in_byte":4751,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"30"}
+{"seq_id":"29901785031","text":"import os\nimport struct\nimport sys\n\nsys.path.append(os.path.join(os.path.dirname(__file__), '.'))\nimport utils\n\n# Adapted from https://github.com/Sanqui/romhacking/blob/master/telefang/punika.py\ndef decompress_tileset(rom, offset):\n rom.seek(offset)\n compressed = utils.read_byte(rom)\n total = utils.read_short(rom)\n data = []\n original = [compressed] + list(total.to_bytes(2, byteorder='little'))\n if compressed:\n while len(data) < total:\n modes = utils.read_short(rom)\n original += list(modes.to_bytes(2, byteorder='little'))\n for mode in bin(modes)[2:].zfill(16)[::-1]:\n if int(mode) == 1:\n e = rom.read(1)\n d = rom.read(1)\n original += [struct.unpack(\"B\", e)[0], struct.unpack(\"B\", d)[0]]\n loc = -(struct.unpack(\"> 3) & 0x1f) + 0x03\n loc += len(data)-1\n for j in range(num):\n if loc < 0:\n raise \"Unknown location\"\n else:\n data.append(data[loc+j])\n else:\n d = utils.read_byte(rom)\n data.append(d)\n original.append(d)\n if len(data) == total: # We'll read bytes that we don't need to if we don't check this here\n break\n elif compressed == 0:\n data = [utils.read_byte(rom) for i in range(0,total)]\n original += data\n else:\n raise \"Unknown compression flag (expect 0 or 1)\"\n return data, original\n\ndef compress_tileset(file):\n file.seek(0, 2)\n total = file.tell()\n file.seek(0)\n compressed_data = [0x0] + list(total.to_bytes(2, byteorder='little'))\n loc = 0\n while loc < total:\n b = utils.read_byte(file)\n compressed_data.append(b)\n loc += 1\n return compressed_data\n\n# Returns a mapped tileset table, index is required only if the same tileset is repeated in multiple table entries\ndef get_tileset(tileset_name, index = -1, override_offset = -1):\n base_offset = 0\n if override_offset == -1:\n if index == -1:\n idx_tbl = utils.read_table('scripts/res/meta_tileset_index.tbl')\n hits = [idx for idx in idx_tbl if idx_tbl[idx] == tileset_name]\n if len(hits) != 1:\n raise f\"Found more or less than one entry for {tileset_name}, provide an index if it appears more than once\"\n index = hits[0]\n \n offsets = utils.read_table('scripts/res/meta_tileset_load_offsets.tbl')\n base_offset = (int(offsets[index], 16) // 0x10) & 0xFF\n else:\n base_offset = override_offset\n\n tbl = utils.read_list(f'scripts/res/tilesets/{tileset_name}.lst', base_offset)\n # If not explicitly defined, '0' generally refers to 'space'\n if 0 not in tbl:\n tbl[0] = ' '\n return tbl\n\nif __name__ == \"__main__\":\n operation = int(sys.argv[1])\n input_file = sys.argv[2]\n output_file = sys.argv[3]\n offset = int(sys.argv[4])\n\n with open(input_file, 'rb') as i, open(output_file, 'wb') as o:\n if operation == 0: # Decompress\n o.write(bytearray(decompress_tileset(i, offset)[0]))\n elif operation == 1: # Compress\n o.write(bytearray(compress_tileset(i)))","repo_name":"Medabots/medarot3","sub_path":"scripts/common/tilesets.py","file_name":"tilesets.py","file_ext":"py","file_size_in_byte":3422,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"30"}
+{"seq_id":"10431513921","text":"from selenium import webdriver\nimport time\nimport os\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nimport requests\n\ndir_path = os.getcwd()\nchrome_options2 = Options()\nchrome_options2.add_argument(r\"user-data-dir=\" + dir_path + \"profile/zap\")\ndriver = webdriver.Chrome(chrome_options=chrome_options2)\ndriver.get('https://web.whatsapp.com/')\n#######API DO EDITACODIGO##########################################\nagent = {\"User-Agent\": 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36'}\n\napi = requests.get(\"https://editacodigo.com.br/index/api-whatsapp/9M1ujEdimQDYACbr5mXeUlVM8FMIj44z\" , headers=agent)\ntime.sleep(1)\napi = api.text\napi = api.split(\".n.\")\nbolinha_notificacao = api[3].strip()\ncontato_cliente = api[4].strip()\ncaixa_msg = api[5].strip()\nmsg_cliente = api[6].strip()\t\n\n##########################################\ntime.sleep(10)\n\ndef bot():\n\n try:\n ####Pegar a mensagem do cliente e clicar nela ###\n bolinha = driver.find_element(By.CLASS_NAME,bolinha_notificacao)\n bolinha = driver.find_elements(By.CLASS_NAME,bolinha_notificacao)\n clica_bolinha = bolinha[-1]\n acao_bolinha = webdriver.common.action_chains.ActionChains(driver)\n acao_bolinha.move_to_element_with_offset(clica_bolinha,0,-20)\n acao_bolinha.click()\n acao_bolinha.perform()\n acao_bolinha.click()\n acao_bolinha.perform()\n\n ###### Mensagem do cliente ########\n todas_as_mensagens = driver.find_elements(By.CLASS_NAME,msg_cliente)\n todas_as_msg_texto = [e.text for e in todas_as_mensagens]\n msg = todas_as_msg_texto[-1]\n\n ###### Processa a mensagenm na API da Ia\n chave_api = 'API DO CHAT GPT'\n editacodigo = '9M1ujEdimQDYACbr5mXeUlVM8FMIj44z'\n sistema = 'explique tudo sobre o hotel Copacabana Palace. Endereço: Av. Atlântica, 1111 - Copacabana, Rio de Janeiro - RJ, 22021-111. Telefone: (21) 2548-1111, reservas por email: reserva@email.com, aceitamos todas as formas de pagamento. OBS: responda com no maximo com 15 palavras'\n resposta = requests.get(\"https://editacodigo.com.br/gpt/index.php\", params={'pagina': editacodigo,'sistema': sistema, 'chave_api': chave_api, 'mensagem_usuario': msg}, headers=agent)\n time.sleep(3)\n resposta = resposta.text\n\n ###### Envia a mensagem para o cliente ######\n campo_de_texto = driver.find_element(By.XPATH, caixa_msg)\n campo_de_texto.click()\n time.sleep(3)\n campo_de_texto.send_keys(resposta, Keys.ENTER)\n time.sleep(2)\n\n ######### Volta para a tela inicial ##########\n webdriver.ActionChains(driver).send_keys(Keys.ESCAPE).perform()\n\n except:\n print('buscando novas notificações')\n\nwhile True:\n bot() \n","repo_name":"LucaCguerreiro/Chat_Bot","sub_path":"zapsalvo.py","file_name":"zapsalvo.py","file_ext":"py","file_size_in_byte":2908,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"12948633163","text":"import os\nimport random\nimport flask\nfrom flask_login import login_user, current_user, LoginManager, UserMixin\nfrom flask_login.utils import login_required, logout_user\nfrom flask_sqlalchemy import SQLAlchemy\nfrom tmdb import movie_info\nfrom wikipedia import getURL\nfrom dotenv import load_dotenv, find_dotenv\n\nload_dotenv(find_dotenv())\n\n\napp = flask.Flask(__name__)\napp.secret_key = \"secret\"\napp.config[\"SEND_FILE_MAX_AGE_DEFAULT\"] = 0\n\nuri = os.getenv(\"DATABASE_URL\")\nif uri and uri.startswith(\"postgres://\"):\n uri = uri.replace(\"postgres://\", \"postgresql://\", 1)\n\napp.config[\"SQLALCHEMY_DATABASE_URI\"] = uri\napp.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\n\nlogin_manager = LoginManager()\nlogin_manager.login_view = \"login\"\nlogin_manager.init_app(app)\n\ndb = SQLAlchemy(app)\n\n\nclass Authentication(UserMixin, db.Model):\n id = db.Column(db.Integer, primary_key=True)\n register = db.Column(db.String(20))\n\n\nclass Movies(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n user_login = db.Column(db.String(20))\n title = db.Column(db.Integer)\n ratings = db.Column(db.Integer)\n comments = db.Column(db.String(500))\n\n\ndb.create_all()\n\n\n@login_manager.user_loader\ndef load_user(register):\n return Authentication.query.get(register)\n\n\n@app.route(\"/register\", methods=[\"POST\", \"GET\"])\ndef registration():\n user_id = flask.request.form.get(\"register_id\")\n user_check = Authentication.query.filter_by(register=user_id).first()\n if user_check:\n flask.flash(\"Existing User ID\")\n pass\n else:\n user_check = Authentication(register=user_id)\n db.session.add(user_check)\n db.session.commit()\n return flask.redirect(flask.request.referrer)\n\n\n@app.route(\"/login\", methods=[\"POST\", \"GET\"])\ndef login():\n user_id = flask.request.form.get(\"login_id\")\n user_check = Authentication.query.filter_by(register=user_id).first()\n if user_check:\n login_user(user_check)\n return flask.redirect(flask.url_for(\"index\"))\n else:\n flask.flash(\"user id does not exist!\")\n return flask.redirect(flask.request.referrer)\n\n\n@app.route(\"/logout\")\ndef logout():\n print(current_user)\n logout_user()\n return flask.redirect(\"/\")\n\n\n# Database\n@app.route(\"/add\", methods=[\"POST\", \"GET\"])\ndef add():\n if flask.request.method == \"POST\":\n movie_title = flask.request.form.get(\"movie_title\")\n movie_ratings = flask.request.form.get(\"ratings\")\n movie_comments = flask.request.form.get(\"comments\")\n user_login = current_user.register\n new_movie = Movies(\n user_login=user_login,\n title=movie_title,\n ratings=movie_ratings,\n comments=movie_comments,\n )\n db.session.add(new_movie)\n db.session.commit()\n\n return flask.redirect(\"/index\")\n\n\n# main page = login\n@app.route(\"/\")\ndef main_login():\n if current_user.is_authenticated:\n return flask.redirect(flask.url_for(\"index\"))\n return flask.render_template(\"login.html\")\n\n\n# main movie information page\n@app.route(\"/index\", methods=[\"POST\", \"GET\"])\ndef index():\n if flask.request.method == \"POST\":\n data = flask.request.form.get(\"movie_title\")\n db.session.add(data)\n db.session.commit()\n\n MOVIE_IDS = [634649, 141, 438631] # Spiderman No Way Home # Donnie Darko # Dune\n data = flask.request.form.get(\"movie_title\")\n random_id = random.choice(MOVIE_IDS)\n title, tagline, genre, posterImg = movie_info(random_id)\n url = getURL(title)\n movie_list = Movies.query.all()\n user_list = []\n comment_list = []\n rating_list = []\n\n for i in movie_list:\n if i.title == random_id:\n user_list.append(i.user_login)\n comment_list.append(i.comments)\n rating_list.append(i.ratings)\n\n num_movie = len(movie_list)\n num_comment = len(comment_list)\n\n return flask.render_template(\n \"index.html\",\n title=title,\n tagline=tagline,\n genre=genre,\n posterImg=posterImg,\n url=url,\n random_id=random_id,\n movie_list=movie_list,\n num_movie=num_movie,\n user_list=user_list,\n comment_list=comment_list,\n rating_list=rating_list,\n current_user=current_user.register,\n num_comment=num_comment,\n )\n\n\napp.run(host=os.getenv(\"IP\", \"0.0.0.0\"), port=int(os.getenv(\"PORT\", 8080)), debug=True)\n","repo_name":"ksy941108/tmdbAPI_Milestone2","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"29652715314","text":"import os\nfrom contextlib import contextmanager\n\nwith open('test.json') as f:\n print(f.read())\n\n\nclass File:\n def __init__(self, file_name):\n if os.path.exists(file_name):\n self.__file = open(file_name)\n else:\n raise ValueError(f'{file_name} does not exist')\n \n def __enter__(self):\n print('Create context')\n return self.__file\n \n def __exit__(self, type, value, traceback):\n print('Leaving context and close file')\n self.__file.close()\n\n\nwith File('test.json') as f1:\n print(f1.read())\n\n\nclass TempMutableList:\n def __init__(self, data):\n self.list = data\n self.__local_list = data[:]\n \n def __enter__(self):\n return self.list\n \n def __exit__(self, exc_type, exc_value, traceback):\n self.list = self.__local_list[:]\n\ntempData = TempMutableList([1,2,3,4])\nprint(tempData.list)\nwith tempData as l1:\n l1.append(5)\n print(l1)\n print(tempData.list)\nprint(tempData.list)\n\n@contextmanager\ndef file_example(file_name):\n file_obj = open(file_name)\n yield file_obj\n print('Trying to close')\n file_obj.close()\n\nwith file_example('test.json') as f1:\n print(f1.read())\n\n@contextmanager\ndef file_example_raise(file_name):\n file_obj = open(file_name)\n try:\n yield file_obj\n finally:\n print('Trying to close')\n file_obj.close()\n\n\ntry:\n with file_example_raise('test.json') as f1:\n raise ValueError\n print(f1.read())\nexcept:\n pass\n\nimport package1\nprint(package1.f)","repo_name":"python4eg/Python-test-repo-phase-1","sub_path":"lesson17.py","file_name":"lesson17.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"}
+{"seq_id":"73100394326","text":"edad=input(\"Introduce la edad: \")\n\n\nwhile(edad.isdigit()==False):\n\n\tprint(\"Por favor, introduce un valor numérico\")\n\n\tedad=input(\"Introduce la edad: \")\n\nif(int(edad)<18):\n\t print(\"No puede pasar\")\n\nelse:\n\n\tprint(\"Puedes pasar\")\n\n\n#upper(): Convierte en mayúsculas todas las letras de un String.\n\n#lower(): Convierte en minúsculas todas las letras de un String.\n\n#capitalize(): En un string hace que la primera letra sea mayúscula.\n\n#count(): Cuenta cuantás veces se repite en una cadena de caracteres dentro de una frase.\n\n#find(): Representar el índice en la cual aparece un caracter o un grupo de caracteres dentro de un texto.\n\n#isdigit(): Devuelve un booleano(True, False). Nos dice si es valor introducido es un dígito o no.\n\n#isalum(): Comprueba si es un alpha numérico.\n\n#isalpha(): Comprueba si en un String solo hay letras(Los espacios tambien).\n\n#split(): Separa por palabras utilizando espacios.\n\n#strip(): Elimina los espacios sobrantes del principio y del final.\n\n#replace(): Cambia una palabra o letra por otra dentro de un String.\n\n#rfind(): Hace lo mismo que el find() pero empieza por detrás dentro del String.","repo_name":"itsJRillo/Curso_Python3","sub_path":"Metodos_Cadenas.py","file_name":"Metodos_Cadenas.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"38059632359","text":"from pydantic import BaseModel\nfrom enum import Enum\n\nclass BasePermissionModel(BaseModel):\n Read: bool\n Vet: bool\n Write: bool\n DeleteSelf: bool\n DeleteOthers: bool\n\nclass BlogPermissionModel(BasePermissionModel):\n pass\n\nclass EventPermissionModel(BasePermissionModel):\n Join: bool\n\nclass ChallengePermissionModel(BasePermissionModel):\n pass\n\nclass PermissionModel(BaseModel):\n Blog: BlogPermissionModel\n Event: EventPermissionModel\n Challenge: ChallengePermissionModel\n\n\nDEFAULT_PERMISSION = \"r----j;r----;-----\"\nDEFAULT_GROUPS = DEFAULT_PERMISSION.split(';')\n\n\"\"\"\n权限检测\n\"\"\"\nclass Event(Enum):\n Read = 'r'\n Write = 'w'\n Vet = 'v'\n DeleteSelf = 'd'\n DeleteOthers = 'D'\n Join = 'j'\n\nclass Blog(Enum):\n Read = 'r'\n Write = 'w'\n Vet = 'v'\n DeleteSelf = 'd'\n DeleteOthers = 'D'\n\nclass Challenge(Enum):\n Read = 'r'\n Write = 'w'\n Vet = 'v'\n DeleteSelf = 'd'\n DeleteOthers = 'D'\n\ndef parse_permission(perm: str = None):\n if perm is None:\n group = DEFAULT_GROUPS\n else:\n group = perm.split(\";\")\n if len(group) < len(DEFAULT_GROUPS):\n group += DEFAULT_GROUPS[len(group): ]\n fields = [\n (Event, 'Event'), \n (Blog, 'Blog'), \n (Challenge, 'Challenge')\n ]\n collect = {}\n for (check, key), g in zip(fields, group):\n tmp = collect[key] = {}\n for idx, field in enumerate(check):\n name = field.name\n code = g[idx] == field.value\n tmp[name] = code\n return PermissionModel(**collect)\n\nif __name__ == \"__main__\":\n sss = 'r-----'\n res = parse_permission(sss)\n print(res.json(indent=4))\n ","repo_name":"miaobuao/ITA-Server","sub_path":"app/utils/permission.py","file_name":"permission.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"13745489080","text":"from playwright.sync_api import Playwright, sync_playwright, expect, Page\nfrom datetime import date, timedelta\nimport time\nimport os\nfrom dotenv import load_dotenv\n\ndef run(playwright: Playwright, start_day: date) -> None:\n browser = playwright.chromium.launch(headless=False)\n context = browser.new_context()\n page = context.new_page()\n\n login(page)\n download_statistics(page, start_day)\n\n # ---------------------\n context.close()\n browser.close()\n\ndef login(page: Page):\n # go to statistik page to get correct redirect to login mask\n page.goto(os.getenv('login_page'))\n\n # fill username field\n username = page.get_by_label(\"Email Address or Username\")\n username.fill(os.getenv('user'))\n\n page.get_by_role(\"button\", name=\"Continue\").first.click()\n time.sleep(2)\n\n # fill password field\n password = page.get_by_label(\"Password\")\n password.fill(os.getenv('password'))\n\n page.get_by_role(\"button\", name=\"Log In\").click()\n time.sleep(2)\n\n # select correct part\n # page.get_by_role(\"link\", name=\"Improve & Repeat\").click()\n\n\ndef download_statistics(page: Page, start: date):\n end = date.today()\n stats_day = min(start, end)\n \n while stats_day < end:\n download_statistics_for_day(page, stats_day)\n stats_day += timedelta(days=1)\n\n\ndef download_statistics_for_day(page: Page, day: date):\n posts = f\"https://wordpress.com/stats/day/posts/improveandrepeat.com?startDate={day}\"\n print(posts)\n page.goto(posts)\n \n time.sleep(2)\n\n with page.expect_download() as download_info: \n download = page.get_by_role(\"button\", name=\"Download data as CSV\")\n download.scroll_into_view_if_needed() \n download.click()\n\n download_value = download_info.value\n print(download_value.path())\n download_value.save_as(f\"C:/temp/JetpackStats/{day}.csv\")\n\nwith sync_playwright() as playwright:\n load_dotenv()\n run(playwright, date(2022, 12, 1))\n","repo_name":"jgraber/PythonFriday","sub_path":"Playwright/playwright_jetpack.py","file_name":"playwright_jetpack.py","file_ext":"py","file_size_in_byte":1961,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"30"}
+{"seq_id":"39451887587","text":"# ********* IMPORT *********\nimport random\nfrom typing import List\n\n# ********* CLASS and FUNCTION **********\nclass Student:\n def __init__(self, id_, cs, pgm):\n self.id = id_\n self.cs = cs\n self.pgm = pgm\n self.avr = float( (cs+pgm)/2 )\n def show(self):\n print(f'{self.id}\\t{self.cs}\\t{self.pgm}\\t{self.avr}')\n\ndef class_init(clas: List[int], num: int) -> None:\n for i in range(num):\n newst = Student(id_=i+106711101, cs=random.randint(0,100), pgm=random.randint(0,100))\n clas.append(newst)\n\ndef class_show(clas: List[int]) -> None:\n print('[學號]\\t\\t[計概]\\t[程規]\\t[平均]')\n for i in clas:\n i.show()\n\ndef class_sort(clas: List[int], chose: int) -> None:\n for i in range(len(clas)-1):\n for j in range(i,len(clas)):\n if(clas[i].cs < clas[j].cs and chose is 1):\n clas[i], clas[j] = clas[j], clas[i]\n if(clas[i].pgm < clas[j].pgm and chose is 2):\n clas[i], clas[j] = clas[j], clas[i]\n if(clas[i].avr < clas[j].avr and chose is 3):\n clas[i], clas[j] = clas[j], clas[i]\n\n\n# ********* MAIN **********\nclas = []\nchose = 1\n\nclass_init( clas, int( input('請輸入幾個學生: ') ) )\nclass_show(clas)\n\nwhile(chose is not 0):\n chose = int( input('\\t@主選單@\\n(1)依計概成績排名\\n(2)依程規成績排名\\n(3)依平均排名\\n(0)結束\\n請輸入選擇: ') )\n if chose > 0 and chose < 4:\n class_sort(clas, chose)\n class_show(clas)\n elif chose is not 0:\n print('\\t error: 輸入錯誤!')\n\n# ********* END **********\nprint('\\t[ 程式結束 ]')\n\n","repo_name":"Yu-Zhuang/Python","sub_path":"justPracitce/studentII.py","file_name":"studentII.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"43256012961","text":"lista=[]\nm=0\nnotas=0\nresp=0\nwhile True:\n nome= str( input('NOME:'))\n N1= float( input('NOTA 1:'))\n N2= float( input('NOTA 2:'))\n m= (N1 + N2) / 2\n lista.append([nome,[N1,N2],m])\n resposta= str(input('Quer continuar? [S/N]')).upper()\n if resposta == 'N':\n break\n\nprint('-='*10)\nprint('BOLETIM')\nprint(f' {\"N²\":<4} {\"NOME\":<10} {\"MÉDIA\":<10} ')\nfor i,p in enumerate(lista):\n print(f'{i:<4} {p[0]:<10} {p[2]:<10}')\n\nwhile True:\n mostrar= int( input('Deseja mostrar a nota de qual aluno? (Aperte 999 para parar)'))\n if mostrar == 999:\n break\n else:\n print(f'Notas de {lista[mostrar][0]}: {lista[mostrar][1]}')\n\n\n\n","repo_name":"BrunoSantosLira/Programas_PythonCev","sub_path":"Mundo3_EstruturasCompostas/ex89.py","file_name":"ex89.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"}
+{"seq_id":"16574631946","text":"import random\nimport requests\nfrom flaskapi import db\nfrom flaskapi.models import User, Post\nimport settings\n\nroles = ('author', 'editor')\nstates = ('active', 'inactive', 'deleted')\ntitles = ('Pfizer', 'Moderna', 'AstraZeneca', 'Sputnik V', 'Johnson & Johnson')\n\nurl = \"https://fake-data-person.p.rapidapi.com/api/Persons/Get\"\n\nheaders = {\n 'x-rapidapi-host': \"fake-data-person.p.rapidapi.com\",\n 'x-rapidapi-key': settings.x_rapidapi_key\n}\n\nfor id in range(1, 26):\n response = requests.request(\"GET\", url, headers=headers, params={'id': f'{id}'})\n name = response.json()['firstName']\n last_name = response.json()['lastName']\n email = response.json()['emailAddress']\n role = random.choice(roles)\n state = random.choice(states)\n\n user = User(name=name, last_name=last_name, email=email, role=role, state=state)\n try:\n db.session.add(user)\n db.session.commit()\n except Exception as e:\n raise e\n\nfor author in range(1, 26):\n title = random.choice(titles)\n description = f'Finally got my first shot of {title}'\n\n post = Post(title=title, description=description, author=author)\n try:\n db.session.add(post)\n db.session.commit()\n except Exception as e:\n raise e\n","repo_name":"gd-m1/web-service-flask-api","sub_path":"population_script_for_db.py","file_name":"population_script_for_db.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"74352263446","text":"\nimport urllib.request, urllib.parse, urllib.error\nimport json\nserviceurl = 'http://py4e-data.dr-chuck.net/geojson?'\naddress = input('Enter location: ')\nurl = serviceurl + urllib.parse.urlencode({'address': address})\nuh = urllib.request.urlopen(url)\ndata = uh.read().decode()\nfichier=json.loads(data)\nprint(fichier['results'][0]['place_id'])","repo_name":"DieuMerciKIMPOLO/Couresera_python_courses","sub_path":"Using_Python_to_Access_Web_Data/AssignmentFinal3_2.py","file_name":"AssignmentFinal3_2.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
+{"seq_id":"43096998990","text":"import numpy as np\nimport os\nclass Logger:\n def __init__(self, app):\n self.app = app\n self.controller = app.scene.objects[1].controller\n self.state = []\n self.distance = []\n self.distance_est = []\n self.time = []\n self.divergence = []\n self.divergence_d = []\n self.img_box_center_x = []\n self.img_box_center_y = []\n self.img_box_center_x_d = []\n self.img_box_center_y_d = []\n\n def add_step(self):\n self.state.append(self.controller.agent.states)\n self.distance.append(self.controller.distance)\n self.distance_est.append(self.controller.distance_est)\n self.time.append(self.app.time)\n self.divergence.append(self.controller.divergence)\n self.divergence_d.append(self.derivative(self.divergence))\n self.img_box_center_x.append(self.controller.img_box_center_x)\n self.img_box_center_y.append(self.controller.img_box_center_y)\n self.img_box_center_x_d.append(self.derivative(self.img_box_center_x))\n self.img_box_center_y_d.append(self.derivative(self.img_box_center_y))\n\n def derivative(self, series):\n current_step = self.controller.n_iteration - self.controller.delay + 1\n if self.controller.n_iteration >= self.controller.delay:\n dxdt = (series[current_step] - series[current_step-1]) / (self.app.delta_time / 1000)\n else:\n dxdt = 0\n return dxdt\n\n def save_log_file(self):\n states = np.array(self.state)\n divergences = np.array(self.divergence)\n distances = np.array(self.distance)\n distance_est = np.array(self.distance_est)\n time = np.array(self.time) - self.time[0]\n data = np.block([[np.transpose(states)], [time], [divergences], [distances], [distance_est]])\n filename = os.path.join('sim_data', self.controller.name)\n np.save(filename, data)\n\n","repo_name":"SanderHazelaar/OpenGLSim","sub_path":"logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}