diff --git "a/5588.jsonl" "b/5588.jsonl" new file mode 100644--- /dev/null +++ "b/5588.jsonl" @@ -0,0 +1,551 @@ +{"seq_id":"353028272","text":"import requests\nimport re\nimport time\nimport threadpool\nfrom serch_job.insert_db import insertDB\nfrom scrapy import Selector\n\n\nclass SerachJob:\n def index(self):\n \"\"\"\n 获取所有页\n :return: 总共多少页\n \"\"\"\n url = 'https://search.51job.com/list/080200,000000,0000,00,9,99,%2520,2,1.html'\n params = {\n 'lang': 'c',\n 'stype': '',\n 'postchannel': '0000',\n 'workyear': '99',\n 'cotype': '99',\n 'degreefrom': '99',\n 'jobterm': '99',\n 'companysize': '99',\n 'providesalary': '99',\n 'lonlat': '\t0,0',\n 'radius': '-1',\n 'ord_field': '0',\n 'confirmdate': '9',\n 'fromType': '',\n 'dibiaoid': '0',\n 'address': '',\n 'line': '',\n 'specialarea': '00',\n 'from': '',\n 'welfare': ''\n }\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.92 Safari/537.36'\n }\n index_res = requests.get(url=url, headers=headers, params=params)\n index_selector = Selector(text=index_res.content.decode('gbk'))\n details_url = index_selector.xpath('//p[@class=\"t1 \"]//a/@href').extract()\n self.details(details_url)\n all_page = index_selector.xpath('//span[@class=\"td\"]/text()').extract_first()\n all_page_number = re.findall(r'共(\\d+)页', all_page)[0]\n nex_url = index_selector.xpath(\"//a[contains(text(),'下一页')]/@href\").extract_first()\n if nex_url:\n self.loop_page(nex_url)\n\n def loop_page(self, nex_url):\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.92 Safari/537.36'\n }\n index_res = requests.get(url=nex_url, headers=headers)\n try:\n index_selector = Selector(text=index_res.content.decode('gbk'))\n except BaseException as e:\n print(e)\n details_url = index_selector.xpath('//p[@class=\"t1 \"]//a/@href').extract()\n self.details(details_url)\n nex_url = index_selector.xpath(\"//a[contains(text(),'下一页')]/@href\").extract_first()\n if nex_url:\n self.loop_page(nex_url)\n else:\n print('采集结束..........')\n exit()\n\n def details(self, all_url):\n for each_url in all_url:\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.92 Safari/537.36'\n }\n details_res = requests.get(url=each_url, headers=headers)\n try:\n details_selector = Selector(text=details_res.content.decode('gbk'))\n except BaseException as e:\n print(e)\n position = details_selector.xpath('//div[@class=\"cn\"]/h1/@title').extract_first()\n company = details_selector.xpath('//p[@class=\"cname\"]/a/@title').extract_first()\n price = details_selector.xpath('//div[@class=\"cn\"]/strong/text()').extract_first()\n all_infos = details_selector.xpath('//p[@class=\"msg ltype\"]/@title').extract_first()\n if all_infos:\n all_infos = all_infos.split('|')\n if len(all_infos) == int(7):\n address = re.findall(r'(.*)\\xa0\\xa0', all_infos[0])[0]\n work_year = re.findall(r'\\xa0\\xa0(.*)\\xa0\\xa0', all_infos[1])[0]\n education = re.findall(r'\\xa0\\xa0(.*)\\xa0\\xa0', all_infos[2])[0]\n nedd_people = re.findall(r'\\xa0\\xa0(.*)\\xa0\\xa0', all_infos[3])[0]\n pubtime = re.findall(r'\\xa0\\xa0(.*)\\xa0\\xa0', all_infos[4])[0]\n english = re.findall(r'\\xa0\\xa0(.*)\\xa0\\xa0', all_infos[5])[0]\n major = re.findall(r'\\xa0\\xa0(.*)', all_infos[6])[0]\n if len(all_infos) == int(6):\n address = re.findall(r'(.*)\\xa0\\xa0', all_infos[0])[0]\n work_year = re.findall(r'\\xa0\\xa0(.*)\\xa0\\xa0', all_infos[1])[0]\n education = re.findall(r'\\xa0\\xa0(.*)\\xa0\\xa0', all_infos[2])[0]\n nedd_people = re.findall(r'\\xa0\\xa0(.*)\\xa0\\xa0', all_infos[3])[0]\n pubtime = re.findall(r'\\xa0\\xa0(.*)\\xa0\\xa0', all_infos[4])[0]\n english = re.findall(r'\\xa0\\xa0(.*)', all_infos[5])[0]\n major = ''\n if len(all_infos) == int(5):\n address = re.findall(r'(.*)\\xa0\\xa0', all_infos[0])[0]\n work_year = re.findall(r'\\xa0\\xa0(.*)\\xa0\\xa0', all_infos[1])[0]\n education = re.findall(r'\\xa0\\xa0(.*)\\xa0\\xa0', all_infos[2])[0]\n nedd_people = re.findall(r'\\xa0\\xa0(.*)\\xa0\\xa0', all_infos[3])[0]\n pubtime = re.findall(r'\\xa0\\xa0(.*)', all_infos[4])[0]\n english = ''\n major = ''\n if len(all_infos) == int(4):\n print('this %s 岗位无学历' % position)\n address = re.findall(r'(.*)\\xa0\\xa0', all_infos[0])[0]\n work_year = re.findall(r'\\xa0\\xa0(.*)\\xa0\\xa0', all_infos[1])[0]\n education = ''\n nedd_people = re.findall(r'\\xa0\\xa0(.*)\\xa0\\xa0', all_infos[2])[0]\n pubtime = re.findall(r'\\xa0\\xa0(.*)', all_infos[3])[0]\n english = ''\n major = ''\n update_time = time.strftime('%Y-%m-%d')\n each_infos = (\n None, position, company, price, address, work_year, education, nedd_people, pubtime, english, major,\n each_url, update_time)\n insertDB(each_infos, 'test', 'search_jobs')\n\n def use_threadpool(self):\n task_pool = threadpool.ThreadPool(5) # 准备5个线程池\n requests = threadpool.makeRequests(self.index())\n for req in requests:\n task_pool.putRequest(req)\n task_pool.wait()\n\n\nif __name__ == '__main__':\n job = SerachJob()\n job.use_threadpool()\n","repo_name":"zengxiaoye/webspider","sub_path":"serch_job/51_jobs.py","file_name":"51_jobs.py","file_ext":"py","file_size_in_byte":6240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"5571203248","text":"import io, os\nfrom google.cloud import vision\nfrom google.cloud.vision_v1 import types\nfrom typing import List\n\n#API KEY Path 설정\ncredential_path = \"./auth.json\"\nos.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"] = credential_path\n\ndef convert_image(image_file) -> str:\n \"\"\"\n google API를 통해서 텍스트 이미지를 텍스트로 전환하기\n \"\"\"\n content = image_file.read()\n\n image = types.Image(content=content)\n response = client.text_detection(image=image)\n labels: List[List[str]]= response.text_annotations\n res: str = labels[0].description\n return res\n\n\nclient = vision.ImageAnnotatorClient()\n","repo_name":"koreahong/convert_image_to_text","sub_path":"googleAPI.py","file_name":"googleAPI.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"32589216150","text":"N = 1000000007\n\n\ndef exponentiation(bas, exp):\n t = 1\n while exp > 0:\n\n # for cases where exponent\n # is not an even value\n if exp % 2 != 0:\n t = (t * bas) % N\n\n bas = (bas * bas) % N\n exp = int(exp / 2)\n return t % N","repo_name":"pouyaardehkhani/Algorithms","sub_path":"Mathematical Algorithms/Exponential Squaring (Fast Modulo Multiplication)/ExpSq.py","file_name":"ExpSq.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"71245922006","text":"#!/usr/bin/env python\n\n###################################################################\n# This script works for any applications.\n# It creates a new file (line 21) containing only the last \n# concentration group and everything else needed to restart Xolotl.\n# This is useful when restarting the simulation often and when \n# the other concentration groups won't be needed for data analysis.\n###################################################################\n\nimport h5py\n\n## Open the file we want to copy from\nf = h5py.File('/home/sophie/Workspace/xolotl-dynamicGrid-build/script/xolotlStop.h5', 'r')\n\n## Get the last time step saved in the file\nconcGroup = f['concentrationsGroup']\ntimestep = concGroup.attrs['lastTimeStep']\nlastLoop = concGroup.attrs['lastLoop']\n\n## Create the file to copy to\nfNew = h5py.File('/home/sophie/Workspace/xolotl-dynamicGrid-build/script/networkNew.h5', 'a')\n\n## Create the concentration group\nconcGroupNew = fNew.create_group('concentrationsGroup')\n\n## Set the last time step\nconcGroupNew.attrs['lastTimeStep'] = timestep\nconcGroupNew.attrs['lastLoop'] = lastLoop\n\n## Copy the last timestep group\ngroupName ='concentration_' + str(lastLoop) + '_' + str(timestep)\nconcGroup.copy(groupName, concGroupNew)\n\n## Copy the other groups\nf.copy('networkGroup', fNew)\n","repo_name":"ORNL-Fusion/xolotl","sub_path":"analysis/HDF5utils/keepLastTS.py","file_name":"keepLastTS.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"30"} +{"seq_id":"4640546479","text":"from pbi.pbi import PowerBI\nfrom pbi.report import PbiReport\nfrom tests.config import config\n\ntest_pbi = PowerBI(config)\n\ntest_pbi.download(\n 'test_report',\n test_pbi.download_folder\n)\n\ninitial_report = PbiReport(\n test_pbi.download_folder,\n 'test_report'\n)\n\nreport = initial_report.copy(\n new_name='report',\n new_folder=test_pbi.upload_folder\n)\n\nhome_page = report.get_page('Home')\npage1 = report.get_page('Page 1')\n\nheader = home_page.get_visual_group('Header')\nadded_header = page1.add_visuals(header)\n\nbuttons = home_page.get_visuals('Button')\nadded_buttons = page1.add_visuals(buttons)\n\nfuture_button = home_page.get_visuals('Future Link')[0]\nfuture_button.hide()\n\nwip_button = home_page.get_visuals('WIP Button')[0]\nwip_button.remove_link(\n update_style=False,\n hide=False\n)\n\ntitle = home_page.get_visuals('Awesome title')[0]\ntitle['config']['singleVisual']['objects']['general'][0]['properties']['paragraphs'][0]['textRuns'][0]['textStyle']['fontSize'] = '20pt'\n\nreport.update_multiselect()\nreport.add_search()\nreport.update_keep_layer_order()\nreport.disable_headers(\n types_to_filter=['shape', 'textbox']\n)\nreport.save()\n\ntest_pbi.upload(\n test_pbi.upload_folder\n)\n","repo_name":"JChamboredon/pbi","sub_path":"tests/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"30"} +{"seq_id":"40743577331","text":"cars = [\"Ford\", \"Volvo\", \"BMW\"]\nprint(cars[0])\n\n# loop through list\nfor car in cars:\n print(car)\n\n# modify each element in list\nfor i, car in enumerate(cars):\n cars[i] = car + \" \"\n\n# print modified cars\nprint(cars)\n\n# modify via iterating through range in list\nfor n in range(len(cars)):\n cars[n] += \"car\"\n\n# print modified cars\nprint(cars)\n\n# add element to list\n# You can use the append() method to add an element to an list.\n\ncars.append(\"Honda\")\n\nprint(cars)\n\nfruits = [\"apple\", \"banana\", \"cherry\"]\n\n# insert(index, element) inserts element at index\nfruits.insert(1, \"orange\")\n\nprint(fruits)\n\n# pop(index) Removes the element at the specified position. Defaults to last item in list if index is not provided.\nfruits.pop(1)\n\nprint(fruits)\n\n# You can also use the remove method to remove an element from the list.\n# remove(value) Removes the first item with the specified value\n\nfruits.insert(0, \"apple\")\n\nprint(fruits)\n\nfruits.remove(\"apple\")\n\nprint(fruits)\n\n# index(value) - returns index of first instance of element\nindexOfBanana = fruits.index(\"banana\")\n\nprint(\"indexOfBanana\", indexOfBanana)\n\nsquares = [1, 4, 9, 16, 25]\nlength = len(squares)\n\nsecondToFourthItem = squares[1:4] # slice from index 1 (inclusive) to index 4 (exclusive)\nlastThree = squares[-3:] # slicing returns a new list\nprint(\"lastThree: \", lastThree) # [9, 16, 25]\n\nlastItem = squares[-1] # 25 - use negative indices to indicate to begin counting from the right\naccessLastItem = squares[length - 1]\n\nprint(\"lastItemAccess: \", {\n \"lastItem\": lastItem,\n \"accessLastItem\": accessLastItem\n})\n\nsecondToLastItem = squares[-2] # 16\nprint(\"secondToLastItem: \", secondToLastItem)\n\nshallowCopyOfSquares = squares[:] # [1, 4, 9, 16, 25]\n\n# Assignment to slices is also possible, and this can even change the size of the list or clear it entirely:\nletters = ['a', 'b', 'c', 'd', 'e', 'f', 'g']\n\n# replace some values\nletters[2:5] = ['C', 'D', 'E']\nprint(\"Assignment to slices\", letters) # ['a', 'b', 'C', 'D', 'E', 'f', 'g']\n\n# now remove them\nletters[2:5] = []\nprint(\"remove values with assignment via slice\", letters) # ['a', 'b', 'f', 'g']\n\n# clear list entirely\nletters[:] = [] # OR letters.clear()\nprint(\"clear list\", letters)\n\n# concatenation\nsquares = squares + [36, 49, 64, 81, 100]\n# [1, 4, 9, 16, 25, 36, 49, 64, 81, 100]\n\nprint(\"concatenation\", squares)\n\nsquares.pop()\n\nprint(\"no index provided to pop method. Pop off last item from list: \", squares)\n# [1, 4, 9, 16, 25, 36, 49, 64, 81]\n\n\ndel squares[0]\n\nprint(squares) # [4, 9, 16, 25, 36, 49, 64, 81]\n\n# delete values from index 1(inclusive) to index 3(exclusive) using slice\ndel squares[1:3]\n\nprint(squares) # [4, 25, 36, 49, 64, 81]\n\ndel squares[:]\n\nprint(squares) # []\n","repo_name":"Sergio16T/python-elements-of-programming","sub_path":"LanguageBasics/list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":2727,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"12539419601","text":"# -*- coding: utf-8 -*-\n\nimport win32com.client\nimport re\nimport pprint\nimport datetime\nimport json\nimport yaml\nimport time\n\ndef pretty_print(o):\n pp = pprint.PrettyPrinter(indent=4)\n pp.pprint(o)\n\n\ndef split_chunks(a, n):\n k, m = divmod(len(a), n)\n return (a[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(n))\n\nclass Word:\n\n def __init__(self, path):\n self.path = path\n self.app = win32com.client.gencache.EnsureDispatch('Word.Application')\n self.app.Visible = True\n self.app.DisplayAlerts = False\n self.app.Documents.Open(self.path)\n self.doc = self.app.ActiveDocument\n \n def updateTable(self, bookmark, data, heading_rows=1):\n word_range = self.doc.Bookmarks(bookmark).Range \n table = word_range.Tables(1)\n rows_count = table.Rows.Count\n if not rows_count >= len(data) + heading_rows:\n table.Select()\n self.app.Selection.InsertRowsBelow(NumRows=len(data) + heading_rows - rows_count)\n i = heading_rows\n for entry in data: #sorted(data, key=lambda x: (x[0], x[1])):\n i += 1\n for n in range(len(entry)):\n table.Cell(i, n+1).Range.Text = entry[n]\n\n def updateIDs(self, bookmark, prefix):\n rex = re.compile('[A-Z]+', re.IGNORECASE)\n word_range = self.doc.Bookmarks(bookmark).Range \n table = word_range.Tables(1)\n rows_count = table.Rows.Count\n count = 0\n for rid in range(1, rows_count+1):\n m = rex.search(table.Cell(rid, 1).Range.Text)\n if m:\n pass\n else:\n count+=1\n table.Cell(rid,1).Range.Text = f\"{prefix}{str(count).zfill(3)}\"\n\n\ndef make_data():\n with open(r'C:\\Users\\ric\\ownCloud\\Uni\\CSG5226_CaseFileTimeline.yaml', 'r') as f:\n data = yaml.load(f)\n\n new_data = []\n for dttm, rest in data.items():\n dt = datetime.datetime.strptime(dttm, '%d/%m/%Y %H:%M')\n dt_s = datetime.datetime.strftime(dt, '%Y-%m-%d %H:%M')\n aim, method, results = '', '', ''\n for k, v in rest.items():\n if k == 'Aim':\n aim = rest[k].strip()\n elif k == 'Method':\n method = rest[k].strip()\n elif k == 'Results':\n results = rest[k].strip()\n new_data.append(['', dt_s, aim, method, results, '', dt])\n\n new_data = sorted(new_data, key=lambda x: x[-1])\n new_data = [x[:-1] for x in new_data]\n\n return new_data\n\ndef main(bookmark, data=[], heading_rows=1):\n my_path = r'C:\\Users\\ric\\Dropbox\\Uni\\CSG5126\\Assignment 2\\CSG5126 Assignment 2 - Presentation of Content.docx'\n wd = Word(my_path)\n wd.updateTable(bookmark, data, heading_rows)\n time.sleep(1)\n wd.updateIDs(bookmark, prefix=\"\")\n\ndef mock(data, **kwargs):\n pretty_print(data)\n \nif __name__ == \"__main__\":\n data = make_data()\n mock(bookmark='bk3', data=data, heading_rows=1)\n main(bookmark='runningsheet', data=data, heading_rows=1)\n # main(bookmark='Financials1', data=data, heading_rows=1)\n\n\n\n\n\n","repo_name":"rdapaz/csg5126_assignment2_scripts","sub_path":"wordUpdateWithClass_Investigation.py","file_name":"wordUpdateWithClass_Investigation.py","file_ext":"py","file_size_in_byte":3098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"32518461011","text":"from django import forms\nfrom django.contrib.auth.views import redirect_to_login\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect\nfrom django.utils.translation import ugettext as _\nfrom django.views.generic import list_detail\n\n\ndef object_list(request, **kwargs):\n if 'login_required' in kwargs:\n if kwargs['login_required'] and not request.user.is_authenticated():\n return redirect_to_login(request.path)\n kwargs.pop('login_required')\n \n queryset = kwargs['queryset']\n if 'extra_context' not in kwargs: kwargs['extra_context'] = {}\n \n kwargs['extra_context']['action_form'], response = serve_delete_action_form(\n request, queryset)\n if response is not None:\n return response\n \n return list_detail.object_list(request, **kwargs)\n\nclass ActionForm(forms.Form):\n objects = forms.ModelMultipleChoiceField(queryset=None, required=False)\n action = forms.ChoiceField(label=_('Action'), required=False)\n \n def __init__(self, queryset, actions, *args, **kwargs):\n super(ActionForm, self).__init__(*args, **kwargs)\n self.fields['objects'].queryset = queryset\n self.fields['action'].choices = [('', \"---------\")] + actions\n \ndef serve_delete_action_form(request, queryset):\n '''\n Process delete action form. Returns tuple (form, response).\n When response is not None, then it should be returned from \n the view function.\n '''\n if request.method== 'POST':\n form = ActionForm(queryset, [('delete', _('Delete'))], request.POST)\n if form.is_valid():\n if form.cleaned_data['action'] == 'delete':\n for obj in form.cleaned_data['objects']:\n obj.delete()\n return form, HttpResponseRedirect(request.path)\n else:\n form = ActionForm(queryset, [('delete', _('Delete'))])\n return form, None","repo_name":"tomasd/schools-old","sub_path":"src/schools/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"20030383488","text":"\n\nn, a, b = 0, 0, 1\n\n# print(n, a, b)\n\n# Fibonacci\ndef fib(max):\n n, a, b = 0, 0, 1\n while n < max:\n print(b)\n a, b = b, a + b\n n = n + 1\n return 'done'\n\n# fib(6)\n\ndef fib_generator(max):\n n, a, b = 0, 0, 1\n while n < max:\n yield b\n a, b = b, a + b\n n = n + 1\n return 'done'\n\nprint(fib_generator(6))","repo_name":"billpoon12138/python_study","sub_path":"Advance_Features/Generator.py","file_name":"Generator.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"25544173326","text":"import opencv\nfrom credit_card_reader import CreditCardReader\nfrom parser import CommandLineParser\n\n\ndef main():\n cli = CommandLineParser()\n args = cli.parse()\n\n image = opencv.load_image(args.image)\n\n reader = CreditCardReader(image)\n reader.detect()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"bcandido/credit-card-reader","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"8543857528","text":"# Import Modules\r\nimport dbl\r\nimport discord\r\nimport pyparticle as pp\r\nfrom discord.ext import commands, tasks\r\nimport asyncio\r\nimport _thread\r\nfrom sseclient import SSEClient\r\n\r\n# reference to URL with published events\r\n# 'catHere' is the name of the event being published\r\nmessages = SSEClient('https://api.particle.io/v1/events/catHere?access_token=PARTICLE_ACCOUNT_TOKEN')\r\n\r\n# reference to command listener,\r\n# also specifies prefix to issue bot commands '.'\r\nclient = commands.Bot(command_prefix = '.')\r\n\r\n# reference to particle account - should be change to new token\r\nparticle = pp.Particle(access_token='PARTICLE_ACCOUNT_TOKEN')\r\n\r\n# reference to list of devices linked to particle account\r\ndevices = particle.list_devices()\r\n# particle photon is only device linked, therefore at index 0\r\ndevice = devices[0]\r\n# print to confirm correct device selected\r\nprint('Selected device: %s' % device['name'])\r\n\r\n# location of cat\r\nisInside = True\r\n# set to false to prevent continous listening\r\nkeepListening = True\r\n# when startStatus != IsInside, the cat has been detected\r\nstartStatus = True\r\n\r\n# Output to terminal once the bot is ready to process commands\r\n@client.event\r\nasync def on_ready():\r\n print(\"Bot is ready.\")\r\n\r\n# Makes web request to call particle function 'lock'\r\n# Activate this function will lock or unlock the cat door\r\n# Usage: .lock\r\n@client.command()\r\nasync def lock(context):\r\n global particle\r\n particle.call_function(device['id'], 'lock', 10)\r\n await context.send('Door locked')\r\n\r\n\r\n# Stops the program contiously checking for changes in cat status\r\n# Usage: .stopListening\r\n@client.command()\r\nasync def stopListening(context):\r\n global keepListening\r\n keepListening = False\r\n await context.send('... Listening stopped')\r\n\r\n\r\n# Discord output stating cat location\r\n# Usage: .status\r\n@client.command()\r\nasync def status(context):\r\n global isInside\r\n if(isInside): await context.send('Cat is inside.')\r\n else: await context.send('Cat is outside.')\r\n\r\n# Starts cat detection listener\r\n# Usage: .listen\r\n@client.command()\r\nasync def listen(context):\r\n # Discord feedback\r\n await context.send(\"Listening...\")\r\n # Start listener on another thread so bot stays responsive during loop\r\n _thread.start_new_thread(listenLooper, (context,) )\r\n # Provide discord feedback on change\r\n await outputLoop(context)\r\n\r\n# Checks once every second if the cat has been detected\r\nasync def outputLoop(context):\r\n global startStatus\r\n global isInside\r\n # While user has not ended loop and cat has not been detected\r\n while(startStatus == isInside & keepListening):\r\n await asyncio.sleep(1)\r\n if startStatus != isInside:\r\n status(context)\r\n startStatus = isInside\r\n\r\n# Continously makes web requests to check published events by particle photon\r\ndef listenLooper(context):\r\n global isInside\r\n global keepListening\r\n while keepListening:\r\n for msg in messages:\r\n if(str(msg) != ''): \r\n isInside = not isInside\r\n break\r\n if(not keepListening):\r\n break\r\n keepListening = True\r\n\r\n# Starts the bot, causing it to join the server\r\nclient.run('BOT_TOKEN')","repo_name":"nathanvarano/Embedded-Prototype","sub_path":"embedbot.py","file_name":"embedbot.py","file_ext":"py","file_size_in_byte":3240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"41921864418","text":"import pymysql\n\n\nclass DataModel:\n def __init__(self):\n self.conn = pymysql.connect(host=\"127.0.0.1\", port=3306, user=\"root\", passwd=\"1\",\n database=\"sign_up_db\", charset=\"utf8\",\n cursorclass=pymysql.cursors.DictCursor\n )\n self.cursor = self.conn.cursor()\n# 删除和插入\n def test(self, sql_test):\n self.cursor.execute(sql_test)\n self.conn.commit()\n self.cursor.close()\n self.conn.close()\n\n\n# 查询\n def test_query(self, sql_test):\n self.cursor.execute(sql_test)\n # self.conn.commit()\n result = self.cursor.fetchall()\n self.cursor.close()\n self.conn.close()\n return result\n\n def try1(self, sql_test, image):\n self.cursor.execute(sql_test, image)\n self.conn.commit()\n self.cursor.close()\n self.conn.close()\n\ndef new_student(*args):\n sql = \"insert into person_infor(registration_number,name,sex,\" \\\n \"minzu,cartype,cart_number,cart_photo,job,\" \\\n \"phone,birthday,province,city,detail_addrees)\" \\\n \" values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\n insert = DataModel()\n insert.try1(sql, args)\n\n\n","repo_name":"TruthLoveLife/mandarin","sub_path":"fastsql/CRUD.py","file_name":"CRUD.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"5830960414","text":"class DaoAdicionarTarefa():\n def __init__(self, idtarefa, tarefa):\n with open(self.arquivo, \"a\") as arquivo:\n arquivo.write(idtarefa)\n arquivo.write(\" - \")\n arquivo.write(tarefa)\n arquivo.write(\"\\n\")\n print(\"Tarefa foi adicionada.\")\n\nclass DAOListarTarefas():\n def __init__(self):\n with open(\"tarefas.txt\", \"r\") as arquivo:\n linhas = arquivo.readlines()\n cont = -1\n for tarefas in linhas:\n cont += 1\n print(f\"{cont} - {tarefas}\")\n \n ","repo_name":"Japiinhaa/To-do","sub_path":"Dao.py","file_name":"Dao.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"27683067728","text":"\"\"\"\nCapture and externalize an object's internal state\nso that the object can be returned to this state\nlater.\n\"\"\"\nimport copy\nfrom typing import List\n\n\nclass Memento:\n\n def __init__(self, data) -> None:\n \"\"\" make a deep copy of every variable in the given class. \"\"\"\n for attribute in vars(data):\n setattr(self, attribute, copy.deepcopy(getattr(data, attribute)))\n\n\nclass Undoable:\n\n def __init__(self) -> None:\n \"\"\"\n each instance keeps the latest saved copy so that there is only\n one copy of each in memory\n \"\"\"\n self._last: Memento = None\n\n def save(self) -> None:\n self._last = Memento(self)\n\n def undo(self) -> None:\n for attibute in vars(self):\n setattr(self, attibute, copy.deepcopy(getattr(self._last, attibute)))\n\n\nclass Data(Undoable):\n\n def __init__(self) -> None:\n super().__init__()\n self.numbers: List[int] = []\n\n\ndata: Undoable = Data()\n\n# foward\nfor i in range(10):\n data.save()\n data.numbers.append(i)\n\ndata.save()\nprint(data.numbers)\n\n#backward\nfor i in range(10):\n data.undo()\n print(data.numbers)\n","repo_name":"rafaelcassau/design-patterns","sub_path":"behavioral/memento/udemy.py","file_name":"udemy.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"30"} +{"seq_id":"9624696915","text":"def two_sum(nums, target):\n # Create a dictionary to store the complement value and its corresponding index\n complement_dict = {}\n\n # Iterate through the list\n for i, num in enumerate(nums):\n complement = target - num\n # Check if the complement value exists in the dictionary\n if complement in complement_dict:\n # Return the indices of the two numbers that add up to the target\n return [complement_dict[complement], i]\n # Add the current number and its index to the dictionary\n complement_dict[num] = i\n\n # If no solution is found, return an empty list\n return []\n\nprint(two_sum([2, 7, 11, 15], 9))\n","repo_name":"Melissamichaud1/dsa_python_code","sub_path":"Grokking/TwoPointer/2sumpt2.py","file_name":"2sumpt2.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"72151228244","text":"import shelve\n\ndef addStudents(professor, listOfStudents):\n # listOfStudents is array of dictionaries with studentID as key and time as value\n d = shelve.open(\"emails\")\n if d.has_key(professor):\n newIDs = [s[\"id\"] for s in listOfStudents]\n d[professor] = filter(lambda x: x[\"id\"] not in newIDs, d[professor]) + listOfStudents\n else:\n d[professor] = listOfStudents\n d.close()\n\ndef getStudents(professor):\n d = shelve.open(\"emails\")\n if d.has_key(professor):\n return d[professor]\n else:\n return []\n","repo_name":"cddude229/StudentSearch","sub_path":"studentsearch/emailStudents.py","file_name":"emailStudents.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"18280947535","text":"import uvicorn\nfrom dotenv import dotenv_values\nfrom pymongo import MongoClient\nfrom routes import router as animal_router\nfrom fastapi import FastAPI,Request\nfrom fastapi.templating import Jinja2Templates\nfrom fastapi.staticfiles import StaticFiles\n\napp = FastAPI()\n\nconfig = dotenv_values(\".env\")\n\n\n@app.on_event(\"startup\")\ndef startup_db_client():\n app.mongodb_client = MongoClient(config[\"ATLAS_URI\"])\n app.database = app.mongodb_client[config[\"DB_NAME\"]]\n\n@app.on_event(\"shutdown\")\ndef shutdown_db_client():\n app.mongodb_client.close()\n\napp.mount('/static',StaticFiles(directory='static'),name='static')\ntemplates = Jinja2Templates(directory='templates')\n\n@app.get(\"/\")\ndef read_main(req: Request):\n return templates.TemplateResponse('index.html',{'request':req})\n\napp.include_router(animal_router, prefix=\"/animals\")\n\nif __name__ == \"__main__\":\n uvicorn.run(\"main:app\", host=\"0.0.0.0\", port=3000, reload=True)\n\n\n","repo_name":"arihara-sudhan/simple-fastapi-app","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"20201012880","text":"import pandas as pd\nimport sqlite3\nimport os \nimport json\n\nimport argparse\n\ndef create_db(data, table_id, conn):\n\n # read table\n table_id = 'table_' + table_id\n table_id = table_id.replace('-','_')\n\n # schema\n schema = []\n for idx, dtype in enumerate(data.dtypes):\n t = 'text' if dtype=='object' else 'real'\n schema.append(f'col{idx} {t}')\n\n schema = ', '.join(schema)\n\n # create table\n conn.execute(f'CREATE TABLE {table_id}({schema})')\n\n # insert data into db\n values = ['?' for i in range(len(data.columns))]\n\n for str_col in data.columns[data.dtypes=='object']:\n data[str_col] = data[str_col].str.lower()\n\n cur = conn.cursor()\n cur.executemany(\n f'INSERT INTO {table_id} VALUES (' + ', '.join(values) + ')',\n data.values\n )\n\n conn.commit()\n\ndef create_table(data, table_id):\n table = {}\n\n # headers\n header = [c.upper() for c in data.columns]\n\n # types\n types_lst = []\n for dtype in data.dtypes:\n t = 'text' if dtype=='object' else 'real'\n types_lst.append(t)\n\n # rows\n data = data.astype(str)\n data = data.apply(lambda x: x.str.lower(), axis=1)\n\n # insert\n table['id'] = table_id\n table['header'] = header\n table['types'] = types_lst\n table['rows'] = data.values.tolist()\n\n return table\n\n\ndef preprocessing(data, table_id):\n # data preprocessing\n if table_id == 'CustomerAcqusition':\n data['Limit'] = data['Limit'].astype(int)\n data = data.dropna()\n \n elif table_id == 'CustomerRepayment':\n del data['Unnamed: 4']\n data = data.rename(columns={'SL No:':'SL No'})\n data.iloc[0,0] = 1\n data = data.dropna()\n data['SL No'] = data['SL No'].astype(int)\n\n elif table_id == 'CustomerSpend':\n data = data.rename(columns={'SL No:':'SL No'})\n data = data.dropna()\n\n elif table_id == 'ApplicationRecord':\n data['CNT_FAM_MEMBERS'] = data['CNT_FAM_MEMBERS'].astype(int)\n data['AMT_INCOME_TOTAL'] = data['AMT_INCOME_TOTAL'].astype(int)\n data = data.dropna()\n\n elif table_id == 'PersonalTransaction':\n data['Date'] = pd.to_datetime(data.Date).astype(str)\n \n else:\n data = data.dropna()\n\n return data.head(100) # select 100 samples from top\n\nif __name__=='__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--datadir',type=str,help='data directory')\n args = parser.parse_args()\n\n table_ids = [table_id for table_id in os.listdir(args.datadir) if '.csv' in table_id]\n conn = sqlite3.connect(f\"./{args.datadir}/test.db\")\n \n table_lst = []\n for table_id in table_ids:\n # read data\n data = pd.read_csv(f'{args.datadir}/{table_id}')\n # define table_id\n table_id = table_id.replace('.csv','')\n\n # preprocessing data\n data = preprocessing(data, table_id)\n \n # create database\n create_db(data, table_id, conn)\n print(f'CREATE TABLE {table_id} / TABLE SIZE: ROW-{data.shape[0]} COL-{data.shape[1]}')\n\n # create table\n table = create_table(data, table_id)\n table_lst.append(table)\n\n # end database connection\n conn.close()\n\n # save tables\n n_written = 0\n with open(f'./{args.datadir}/test.tables.jsonl','w',encoding='utf-8') as fo:\n for line in table_lst:\n fo.write(json.dumps(line, ensure_ascii=False) + '\\n')\n n_written += 1\n print('wrote {} examples'.format(n_written))\n","repo_name":"TooTouch/SPARTA","sub_path":"demo/kaggle_demo/kaggle_demo.py","file_name":"kaggle_demo.py","file_ext":"py","file_size_in_byte":3509,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"30"} +{"seq_id":"35774466678","text":"# lint-amnesty, pylint: disable=missing-module-docstring\nfrom unittest.mock import patch\n\nimport ddt\nfrom crum import set_current_request\nfrom django.conf import settings\nfrom edx_toggles.toggles.testutils import override_waffle_switch\n\nfrom common.djangoapps.student.models import CourseEnrollment\nfrom common.djangoapps.student.tests.factories import UserFactory\nfrom openedx.core.djangolib.testing.utils import get_mock_request\nfrom xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase\nfrom xmodule.modulestore.tests.factories import CourseFactory, ItemFactory\n\nfrom ..config.waffle import ASSUME_ZERO_GRADE_IF_ABSENT, waffle_switch\nfrom ..course_data import CourseData\nfrom ..course_grade import ZeroCourseGrade\nfrom ..course_grade_factory import CourseGradeFactory\nfrom .base import GradeTestBase\nfrom .utils import answer_problem\n\n\n@patch.dict(settings.FEATURES, {'ASSUME_ZERO_GRADE_IF_ABSENT_FOR_ALL_TESTS': False})\n@ddt.ddt\nclass ZeroGradeTest(GradeTestBase):\n \"\"\"\n Tests ZeroCourseGrade (and, implicitly, ZeroSubsectionGrade)\n functionality.\n \"\"\"\n\n @ddt.data(True, False)\n def test_zero(self, assume_zero_enabled):\n \"\"\"\n Creates a ZeroCourseGrade and ensures it's empty.\n \"\"\"\n with override_waffle_switch(waffle_switch(ASSUME_ZERO_GRADE_IF_ABSENT), active=assume_zero_enabled):\n course_data = CourseData(self.request.user, structure=self.course_structure)\n chapter_grades = ZeroCourseGrade(self.request.user, course_data).chapter_grades\n for chapter in chapter_grades:\n for section in chapter_grades[chapter]['sections']:\n for score in section.problem_scores.values():\n assert score.earned == 0\n assert score.first_attempted is None\n assert section.all_total.earned == 0\n\n @ddt.data(True, False)\n def test_zero_null_scores(self, assume_zero_enabled):\n \"\"\"\n Creates a zero course grade and ensures that null scores aren't included in the section problem scores.\n \"\"\"\n with override_waffle_switch(waffle_switch(ASSUME_ZERO_GRADE_IF_ABSENT), active=assume_zero_enabled):\n with patch('lms.djangoapps.grades.subsection_grade.get_score', return_value=None):\n course_data = CourseData(self.request.user, structure=self.course_structure)\n chapter_grades = ZeroCourseGrade(self.request.user, course_data).chapter_grades\n for chapter in chapter_grades:\n assert {} != chapter_grades[chapter]['sections']\n for section in chapter_grades[chapter]['sections']:\n assert {} == section.problem_scores\n\n\nclass TestScoreForModule(SharedModuleStoreTestCase):\n \"\"\"\n Test the method that calculates the score for a given block based on the\n cumulative scores of its children. This test class uses a hard-coded block\n hierarchy with scores as follows:\n a\n +--------+--------+\n b c\n +--------------+-----------+ |\n d e f g\n +-----+ +-----+-----+ | |\n h i j k l m n\n (2/5) (3/5) (0/1) - (1/3) - (3/10)\n\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.course = CourseFactory.create()\n with cls.store.bulk_operations(cls.course.id):\n cls.a = ItemFactory.create(parent=cls.course, category=\"chapter\", display_name=\"a\")\n cls.b = ItemFactory.create(parent=cls.a, category=\"sequential\", display_name=\"b\")\n cls.c = ItemFactory.create(parent=cls.a, category=\"sequential\", display_name=\"c\")\n cls.d = ItemFactory.create(parent=cls.b, category=\"vertical\", display_name=\"d\")\n cls.e = ItemFactory.create(parent=cls.b, category=\"vertical\", display_name=\"e\")\n cls.f = ItemFactory.create(parent=cls.b, category=\"vertical\", display_name=\"f\")\n cls.g = ItemFactory.create(parent=cls.c, category=\"vertical\", display_name=\"g\")\n cls.h = ItemFactory.create(parent=cls.d, category=\"problem\", display_name=\"h\")\n cls.i = ItemFactory.create(parent=cls.d, category=\"problem\", display_name=\"i\")\n cls.j = ItemFactory.create(parent=cls.e, category=\"problem\", display_name=\"j\")\n cls.k = ItemFactory.create(parent=cls.e, category=\"html\", display_name=\"k\")\n cls.l = ItemFactory.create(parent=cls.e, category=\"problem\", display_name=\"l\")\n cls.m = ItemFactory.create(parent=cls.f, category=\"html\", display_name=\"m\")\n cls.n = ItemFactory.create(parent=cls.g, category=\"problem\", display_name=\"n\")\n\n cls.request = get_mock_request(UserFactory())\n CourseEnrollment.enroll(cls.request.user, cls.course.id)\n\n answer_problem(cls.course, cls.request, cls.h, score=2, max_value=5)\n answer_problem(cls.course, cls.request, cls.i, score=3, max_value=5)\n answer_problem(cls.course, cls.request, cls.j, score=0, max_value=1)\n answer_problem(cls.course, cls.request, cls.l, score=1, max_value=3)\n answer_problem(cls.course, cls.request, cls.n, score=3, max_value=10)\n\n cls.course_grade = CourseGradeFactory().read(cls.request.user, cls.course)\n\n @classmethod\n def tearDownClass(cls):\n super().tearDownClass()\n set_current_request(None)\n\n def test_score_chapter(self):\n earned, possible = self.course_grade.score_for_module(self.a.location)\n assert earned == 9\n assert possible == 24\n\n def test_score_section_many_leaves(self):\n earned, possible = self.course_grade.score_for_module(self.b.location)\n assert earned == 6\n assert possible == 14\n\n def test_score_section_one_leaf(self):\n earned, possible = self.course_grade.score_for_module(self.c.location)\n assert earned == 3\n assert possible == 10\n\n def test_score_vertical_two_leaves(self):\n earned, possible = self.course_grade.score_for_module(self.d.location)\n assert earned == 5\n assert possible == 10\n\n def test_score_vertical_two_leaves_one_unscored(self):\n earned, possible = self.course_grade.score_for_module(self.e.location)\n assert earned == 1\n assert possible == 4\n\n def test_score_vertical_no_score(self):\n earned, possible = self.course_grade.score_for_module(self.f.location)\n assert earned == 0\n assert possible == 0\n\n def test_score_vertical_one_leaf(self):\n earned, possible = self.course_grade.score_for_module(self.g.location)\n assert earned == 3\n assert possible == 10\n\n def test_score_leaf(self):\n earned, possible = self.course_grade.score_for_module(self.h.location)\n assert earned == 2\n assert possible == 5\n\n def test_score_leaf_no_score(self):\n earned, possible = self.course_grade.score_for_module(self.m.location)\n assert earned == 0\n assert possible == 0\n","repo_name":"eduNEXT/edunext-platform","sub_path":"lms/djangoapps/grades/tests/test_course_grade.py","file_name":"test_course_grade.py","file_ext":"py","file_size_in_byte":7245,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"30"} +{"seq_id":"9152004822","text":"\"\"\" Tests for functions in src/data/utils.py \"\"\"\n# export PYTHONPATH=\"/home\"\nimport numpy as np\nimport pandas as pd\nimport geopandas as gpd\nimport datetime\nimport pytest \nimport os\nfrom pathlib import Path\n\nimport sys\n\nfrom src.data.utils import *\n\nclass TestGetHUC12():\n @pytest.mark.parametrize(\"point,huc\", [\n ((-91.69396, 41.89110), \"070802081007\"),\n ((-87.8834,42.0334), \"071200040505\") \n ])\n def test_get_huc12_valid_point(self, point, huc):\n huc12_returned = get_huc12(point)\n assert huc12_returned == huc\n \n def test_get_huc12_invalid_point(self):\n with pytest.raises(Exception):\n get_huc12((100,200))\n \n \nclass TestMaskArray():\n \n @pytest.mark.parametrize(\"arr_to_mask\", [\n (np.array([1.0,2.0,3.0,4.0,5.0])),\n (np.array([0.0,0.0,0.0,0.0,0.0]))\n ])\n def test_mask_array(self, arr_to_mask):\n mask_arr = np.array([1,0,0,0,1])\n result = mask_array(mask_arr, arr_to_mask)\n \n # loop through for comparisons since nan != nan\n for i in range(len(result)):\n if mask_arr[i] == 0:\n assert np.isnan(result[i])\n else:\n assert arr_to_mask[i] == result[i]\n\n @pytest.mark.parametrize(\"arr_to_mask\", [\n (np.array([1,2,3,4,5])),\n (np.array([0,0,0,0,0]))\n ])\n def test_mask_array_int(self,arr_to_mask):\n mask_arr = np.array([1,0,0,0,1])\n result = mask_array(mask_arr, arr_to_mask)\n \n # loop through for comparisons since nan != nan\n for i in range(len(result)):\n if mask_arr[i] == 0:\n assert np.isnan(result[i])\n else:\n assert arr_to_mask[i] == result[i]\n \n def test_mask_array_diff_dim(self):\n mask_arr = np.array([1,0,0,0,1,0,0,0])\n arr_to_mask = np.array([1,2,3,4,5])\n with pytest.raises(Exception):\n result = mask_array(mask_arr, arr_to_mask)","repo_name":"Mark-S-Bartlett/data-science-project-template","sub_path":"{{ cookiecutter.repo_name }}/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":1993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"26675563117","text":"import requests\nimport json\n\n\ndef get_response(url):\n \"\"\"\n Given a valid url link returns the response in json\n \"\"\"\n response = requests.get(url)\n if response.status_code == 200 and 'application/json' in response.headers.get('Content-Type', ''):\n return response.json()\n else:\n return 'Invalid Link'\n\n\n\"\"\"\nUSER_DEBT example:\n [{'amount': 123.46, 'id': 0},\n {'amount': 100, 'id': 1}]\n\"\"\"\nUSER_DEBT = get_response(\n 'https://my-json-server.typicode.com/druska/trueaccord-mock-payments-api/debts')\n\n\n\"\"\"\nUSER_PAYMENTS example:\n[{'amount': 51.25, 'date': '2020-09-29', 'payment_plan_id': 0},\n {'amount': 51.25, 'date': '2020-10-29',\n 'payment_plan_id': 0},\n {'amount': 25, 'date': '2020-08-08', 'payment_plan_id': 1},\n {'amount': 25, 'date': '2020-08-08', 'payment_plan_id': 1}]\n\"\"\"\nUSER_PAYMENTS = get_response(\n 'https://my-json-server.typicode.com/druska/trueaccord-mock-payments-api/payments')\n\n\"\"\"\nUSER_PAYMENT_PLANS example:\n [{'amount_to_pay': 102.5,\n 'debt_id': 0,\n 'id': 0,\n 'installment_amount': 51.25,\n 'installment_frequency': 'WEEKLY',\n 'start_date': '2020-09-28'}]\n\"\"\"\nUSER_PAYMENT_PLANS = get_response(\n 'https://my-json-server.typicode.com/druska/trueaccord-mock-payments-api/payment_plans')\n\n\nclass InDebt:\n\n def users_in_payment_plans(self, users):\n \"\"\"\n Given a list of user id return a list of each user debt information.\n \"\"\"\n\n users_debt_info = []\n\n for user_id in users:\n user_debt_info = self.is_in_payment_plan(user_id)\n users_debt_info.append(user_debt_info)\n\n return users_debt_info\n\n def is_in_payment_plan(self, user_id):\n \"\"\"\n Checks whether the given user_id is in a payment plan or not.\n Then returns the user_debt json object with \"is_in_payment_plan\".\n \"\"\"\n\n amount_owed = self.get_amount_owed(user_id)\n if amount_owed == 0:\n return self.user_debt_with_variation(user_id, amount_owed, is_in_payment_plan=False)\n else:\n amount_to_pay = self.get_amount_to_pay(user_id)\n # check payment plans and see how much they paid\n total_amount_in_payments = self.total_amount_in_payments(user_id)\n amount_owed = amount_to_pay - total_amount_in_payments\n\n if amount_owed == 0:\n return self.user_debt_with_variation(user_id, amount_owed, is_in_payment_plan=False)\n else:\n return self.user_debt_with_variation(user_id, amount_owed, is_in_payment_plan=True)\n\n def get_amount_to_pay(self, user_id):\n \"\"\"\n Gets the value for \"amount_to_pay\" from user_payments_plans object\n \"\"\"\n\n amount_to_pay = 0\n\n for user in USER_PAYMENT_PLANS:\n if user.get('id') == user_id:\n amount_to_pay = user.get('amount_to_pay')\n return amount_to_pay\n\n return amount_to_pay\n\n def get_amount_owed(self, user_id):\n \"\"\"\n Given the user_id, returns the amount the user\n owes.\n \"\"\"\n\n for user in USER_DEBT:\n if user.get('id') == user_id:\n return user.get('amount')\n\n def user_debt_with_variation(self, user_id, amount, is_in_payment_plan=is_in_payment_plan):\n \"\"\"\n Returns the user_object with the addition of \"is_in_payment_plan\"\n field. Also makes a change \"amount\" field depending on how much \n the user has paid in payments. \n \"\"\"\n user_debt = {\n 'id': user_id,\n 'amount': amount,\n 'is_in_payment_plan': is_in_payment_plan\n }\n\n return json.dumps(user_debt)\n\n def total_amount_in_payments(self, user_id):\n \"\"\"\n Iter thru the payment_plans object. Calcaulte the total\n amount the user_id has paid in total.\n \"\"\"\n total_amount_in_payments = 0\n\n for user in USER_PAYMENTS:\n if user_id == user.get('payment_plan_id'):\n total_amount_in_payments += user.get('amount')\n\n return total_amount_in_payments\n\nif __name__ == '__main__':\n users = [0, 1, 2, 3, 4, 5]\n user_one_info = InDebt().is_in_payment_plan(1)\n print(\"USER 1:\")\n print(user_one_info)\n\n users_info = InDebt().users_in_payment_plans(users)\n print(\"USERS:\")\n print(users_info)\n","repo_name":"abas5/debt-service","sub_path":"in_debt/in_debt.py","file_name":"in_debt.py","file_ext":"py","file_size_in_byte":4596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"43476410035","text":"from django.http import JsonResponse\nfrom django.shortcuts import render\nfrom django.utils import timezone\n\nfrom .models import Reservation\nfrom .forms import ReservationTableForm\n\n\ndef reserve_table(request):\n if request.method == 'POST' and request.headers.get('x-requested-with') == 'XMLHttpRequest':\n form = ReservationTableForm(request.POST)\n if form.is_valid():\n name = form.cleaned_data['name']\n form.save()\n return JsonResponse({'name': name}, status=200)\n else:\n errors = form.errors.as_json()\n return JsonResponse({'errors': errors}, status=400)\n else:\n form = ReservationTableForm()\n # Получение всех имен из БД.\n # И добавляем names в контекст, чтобы получить к ним доступ в шаблоне\n now = timezone.now()\n Reservation.objects.filter(date__lt=now).delete()\n return render(request, 'contact.html', {'form': form})\n\n\n","repo_name":"zai-elina/restaurant","sub_path":"anilez/reservation/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"35250314798","text":"import mock\nfrom django.urls import reverse\nfrom rest_framework.test import APITestCase\nfrom rest_framework import status\n\nMOCK_RESPONSE = {\n \"lat\": 33.44,\n \"lon\": -94.04,\n \"timezone\": \"America/Chicago\",\n \"timezone_offset\": -18000,\n \"hourly\": [\n {\n \"dt\": 1598158800,\n \"temp\": 23.19,\n \"feels_like\": 25.33,\n \"pressure\": 1015,\n \"humidity\": 88,\n \"dew_point\": 21.09,\n \"clouds\": 1,\n \"visibility\": 10000,\n \"wind_speed\": 2.98,\n \"wind_deg\": 109,\n \"weather\": [\n {\"id\": 800, \"main\": \"Clear\", \"description\": \"clear sky\", \"icon\": \"01n\"}\n ],\n \"pop\": 0,\n },\n {\n \"dt\": 1598162400,\n \"temp\": 22.84,\n \"feels_like\": 23.76,\n \"pressure\": 1015,\n \"humidity\": 78,\n \"dew_point\": 18.8,\n \"clouds\": 1,\n \"visibility\": 10000,\n \"wind_speed\": 3.18,\n \"wind_deg\": 134,\n \"weather\": [\n {\"id\": 800, \"main\": \"Clear\", \"description\": \"clear sky\", \"icon\": \"01n\"}\n ],\n \"pop\": 0,\n },\n {\n \"dt\": 1598166000,\n \"temp\": 22.22,\n \"feels_like\": 22.75,\n \"pressure\": 1014,\n \"humidity\": 75,\n \"dew_point\": 17.58,\n \"clouds\": 0,\n \"visibility\": 10000,\n \"wind_speed\": 2.98,\n \"wind_deg\": 148,\n \"weather\": [\n {\"id\": 800, \"main\": \"Clear\", \"description\": \"clear sky\", \"icon\": \"01n\"}\n ],\n \"pop\": 0,\n },\n {\n \"dt\": 1598169600,\n \"temp\": 22,\n \"feels_like\": 22.65,\n \"pressure\": 1014,\n \"humidity\": 74,\n \"dew_point\": 17.16,\n \"clouds\": 0,\n \"visibility\": 10000,\n \"wind_speed\": 2.55,\n \"wind_deg\": 158,\n \"weather\": [\n {\"id\": 800, \"main\": \"Clear\", \"description\": \"clear sky\", \"icon\": \"01n\"}\n ],\n \"pop\": 0,\n },\n {\n \"dt\": 1598173200,\n \"temp\": 21.85,\n \"feels_like\": 22.87,\n \"pressure\": 1014,\n \"humidity\": 75,\n \"dew_point\": 17.22,\n \"clouds\": 0,\n \"visibility\": 10000,\n \"wind_speed\": 2.06,\n \"wind_deg\": 164,\n \"weather\": [\n {\"id\": 800, \"main\": \"Clear\", \"description\": \"clear sky\", \"icon\": \"01n\"}\n ],\n \"pop\": 0,\n },\n ],\n \"daily\": [\n {\n \"dt\": 1598205600,\n \"sunrise\": 1598183082,\n \"sunset\": 1598230379,\n \"temp\": {\n \"day\": 32.88,\n \"min\": 20.98,\n \"max\": 33.97,\n \"night\": 23.07,\n \"eve\": 30.82,\n \"morn\": 20.98,\n },\n \"feels_like\": {\"day\": 34.48, \"night\": 23.85, \"eve\": 32.52, \"morn\": 23.26},\n \"pressure\": 1016,\n \"humidity\": 41,\n \"dew_point\": 18.21,\n \"wind_speed\": 1.62,\n \"wind_deg\": 93,\n \"weather\": [\n {\"id\": 800, \"main\": \"Clear\", \"description\": \"clear sky\", \"icon\": \"01d\"}\n ],\n \"clouds\": 0,\n \"pop\": 0,\n \"uvi\": 9.62,\n },\n {\n \"dt\": 1598292000,\n \"sunrise\": 1598269523,\n \"sunset\": 1598316706,\n \"temp\": {\n \"day\": 32.71,\n \"min\": 20.28,\n \"max\": 34.39,\n \"night\": 22.8,\n \"eve\": 30.7,\n \"morn\": 20.28,\n },\n \"feels_like\": {\"day\": 32.49, \"night\": 24.58, \"eve\": 31.59, \"morn\": 21.31},\n \"pressure\": 1016,\n \"humidity\": 37,\n \"dew_point\": 16.33,\n \"wind_speed\": 3.2,\n \"wind_deg\": 80,\n \"weather\": [\n {\"id\": 800, \"main\": \"Clear\", \"description\": \"clear sky\", \"icon\": \"01d\"}\n ],\n \"clouds\": 0,\n \"pop\": 0,\n \"uvi\": 10.96,\n },\n {\n \"dt\": 1598378400,\n \"sunrise\": 1598355964,\n \"sunset\": 1598403032,\n \"temp\": {\n \"day\": 32.33,\n \"min\": 21.23,\n \"max\": 33.77,\n \"night\": 24.99,\n \"eve\": 30.26,\n \"morn\": 21.23,\n },\n \"feels_like\": {\"day\": 32.32, \"night\": 27.6, \"eve\": 32.35, \"morn\": 23.23},\n \"pressure\": 1015,\n \"humidity\": 48,\n \"dew_point\": 19.97,\n \"wind_speed\": 5.22,\n \"wind_deg\": 79,\n \"weather\": [\n {\n \"id\": 802,\n \"main\": \"Clouds\",\n \"description\": \"scattered clouds\",\n \"icon\": \"03d\",\n }\n ],\n \"clouds\": 33,\n \"pop\": 0.27,\n \"uvi\": 10.33,\n },\n {\n \"dt\": 1598464800,\n \"sunrise\": 1598442405,\n \"sunset\": 1598489357,\n \"temp\": {\n \"day\": 33.22,\n \"min\": 22.83,\n \"max\": 34.66,\n \"night\": 25.76,\n \"eve\": 31.79,\n \"morn\": 22.83,\n },\n \"feels_like\": {\"day\": 35.42, \"night\": 27.57, \"eve\": 33.54, \"morn\": 25.98},\n \"pressure\": 1011,\n \"humidity\": 55,\n \"dew_point\": 23.02,\n \"wind_speed\": 4.3,\n \"wind_deg\": 111,\n \"weather\": [\n {\"id\": 500, \"main\": \"Rain\", \"description\": \"light rain\", \"icon\": \"10d\"}\n ],\n \"clouds\": 31,\n \"pop\": 0.67,\n \"rain\": 1.03,\n \"uvi\": 9.96,\n },\n {\n \"dt\": 1598551200,\n \"sunrise\": 1598528846,\n \"sunset\": 1598575682,\n \"temp\": {\n \"day\": 23.02,\n \"min\": 21.84,\n \"max\": 27.26,\n \"night\": 21.84,\n \"eve\": 26.3,\n \"morn\": 25.7,\n },\n \"feels_like\": {\"day\": 20.67, \"night\": 23.93, \"eve\": 29.23, \"morn\": 26.08},\n \"pressure\": 1003,\n \"humidity\": 94,\n \"dew_point\": 22.08,\n \"wind_speed\": 10.07,\n \"wind_deg\": 327,\n \"weather\": [\n {\n \"id\": 502,\n \"main\": \"Rain\",\n \"description\": \"heavy intensity rain\",\n \"icon\": \"10d\",\n }\n ],\n \"clouds\": 100,\n \"pop\": 0.95,\n \"rain\": 44.22,\n \"uvi\": 10.16,\n },\n ],\n}\n\n\nclass TestWeatherViewSetTestCase(APITestCase):\n def setUp(self) -> None:\n self.url = reverse(\"weather-list\")\n\n @mock.patch(\n \"wither.open_weather_map.client.OpenWeatherMapClient.get\",\n side_effect=lambda: MOCK_RESPONSE,\n )\n def test_list(self, *args, **kwargs) -> None:\n response = self.client.get(self.url, dict(location=\"Cape Town\"))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data, MOCK_RESPONSE)\n\n def test_list_location_not_provided(self) -> None:\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data, [\"The location query paramater is required.\"])\n\n\nclass TestWeatherSummaryViewSetTestCase(APITestCase):\n def setUp(self) -> None:\n self.url = reverse(\"weather-summary-list\")\n\n def test_list_location_not_provided(self) -> None:\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data, [\"The location query paramater is required.\"])\n\n def test_list_invalid_location(self) -> None:\n response = self.client.get(self.url, dict(location=\"qqqqq\"))\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data, [\"Invalid location.\"])\n\n @mock.patch(\"wither.open_weather_map.client.OpenWeatherMapClient.get\",)\n def test_list_without_period_set(self, mock_response) -> None:\n mock_response.return_value = MOCK_RESPONSE.copy()\n response = self.client.get(self.url, dict(location=\"Cape Town\"))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n # TODO: assert response data\n","repo_name":"crintus/wither","sub_path":"wither/weather/test/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":8605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"40714186305","text":"from panda3d.core import LColor\nfrom panda3d.core import LPoint3, LPoint3d\nfrom panda3d.core import PandaSystem\n\nfrom .astro import units\nfrom .bodyclass import BodyClass, bodyClasses\nfrom appdirs.appdirs import AppDirs\n\nimport os\n\napp_name = 'cosmonium'\n\nuse_double = LPoint3 == LPoint3d\ncache_yaml = True\nprc_file = 'config.prc'\n\n#OpenGL user configuration\nuse_core_profile_mac = True\nuse_gl_version = None\nuse_hardware_srgb = True\nuse_multisampling = True\nmultisamples = 2\nuse_hardware_sprites = True\nuse_floating_point_buffer = True\nforce_power_of_two_textures = False\nuse_hardware_tessellation = True\nuse_hardware_instancing = True\nuse_inverse_z = False\ninstancing_use_tex = True\nuse_texture_array = True\nuse_aux_buffer = False\nsync_video = True\nshader_normals_use_centroid = True\n\nstereoscopic_framebuffer = False\nred_blue_stereo = False\nside_by_side_stereo = False\nstereo_swap_eyes = False\n\n#Settings\nuse_pbr = False\nuse_srgb = True\nuse_assimp = True\nuse_smooth_lines = True\n\ndeferred=False\ndeferred_split=False\ndeferred_load=True\npatch_pool_size = 4\n\nmouse_over = False\nuse_color_picking = True\ncelestia_nav = True\ninvert_wheel = False\ndamped_nav = True\n\nglobal_ambient = 0.0\ncorrected_global_ambient = global_ambient\nmax_sprite_size = 800\n\npatch_max_vertex_size = 64\npatch_min_density = 32\npatch_max_density = 64\npatch_constant_density = 32\nuse_horizon_culling = True\ncull_far_patches = False\ncull_far_patches_threshold = 10\n\npatch_data_store = True\npatch_data_store_max_elems = 2048\npatch_parameters_data_store = True\n\nuse_patch_adaptation = True\nuse_patch_skirts = True\n\nrender_points = True\nrender_sprite_points = True\nshow_halo = True\ndisable_tint = False\nsoftware_instancing = False\n\nallow_shadows = True\nshadow_size = 1024\nshadows_slope_scale_bias = True\nshadows_pcf_16 = True\nshadows_snap_cam = False\n\nhud_font = 'DejaVuSans'\nmarkdown_font = 'DejaVuSans'\nlabel_font = 'DejaVuSans'\n\nlabel_size = 12\nconstellations_label_size = 16\nconvert_utf8 = True\n\nscreenshot_path = None\nscreenshot_filename = \"screenshot-%Y-%m-%d-%H-%M-%S-%~f.%~e\"\nscreenshot_format = \"png\"\n\nlast_script_path = None\n\nscene_manager = 'region'\nc_scene_manager = True\n\nuse_inv_scaling=True\nuse_log_scaling=False\nuse_depth_scaling = scene_manager == 'dynamic' and (use_inv_scaling or use_log_scaling)\nauto_scale=True\nlens_far_limit = 1e-12\nscale=1000.0\nmin_scale = 0.02\nmax_scale=1000.0\nset_frustum=True\nnear_plane=1.0\nfar_plane=30000.0\ninfinite_far_plane = True\ninfinite_plane = 100000.0\nauto_infinite_plane = False\nmid_plane_ratio = 1.1\ndefault_fov = 40.0\nmin_fov = 0.001\nmax_fov = 120.0\n\nif use_double:\n offset_body_center = True\n shift_patch_origin = True\nelse:\n offset_body_center = True\n shift_patch_origin = True\ncamera_at_origin = True\n\nmin_altitude = 2 * units.m\n\nshader_noise=True\nc_noise=True\n\ndebug_vt = False\ndebug_lod_show_bb = False\ndebug_lod_freeze = False\ndebug_lod_split_merge = False\ndebug_lod_frustum = False\ndump_shaders = True\ndump_panda_shaders = False\ndebug_shadow_frustum = False\ndebug_shape_task = False\ndebug_tex_loading = False\n\nsync_data_load = False\nsync_texture_load = False\n\ndebug_jump = False\n\nuse_vertex_shader = False\n\nmin_mag_scale = 0.1\nlowest_app_magnitude = 6.0\nmax_app_magnitude = 0.0\nmin_point_size = 4\nmag_pixel_scale = 2\nmin_body_size = 2\npoint_scale_dpi_aware = True\ncustom_point_scale = 1.0\nscreen_point_scale = 1.0\n\nsmallest_glare_mag = 1.0\nlargest_glare_mag = -2.0\n\nlabel_lowest_app_magnitude = 4.0\n\naxis_fade = 20\naxis_thickness = 0.9\n\nshow_clouds = True\nshow_atmospheres = True\nshow_asterisms = False\nshow_boundaries = False\nshow_ecliptic_grid = False\nshow_equatorial_grid = False\nshow_rotation_axis = False\nshow_reference_axis = False\n\nshow_orbits = False\norbit_fade = 20\nlabel_fade = 20\norbit_thickness = 0.6\norbit_smooth_thickness = 3\norbit_smooth_width = 1.5\norbit_smooth_blend = 1.5\n\ngrid_thickness = 0.5\n\nasterism_thickness = 0.9\nboundary_thickness = 0.9\n\nwireframe_fill_color = LColor(1, 0., 0., 1.0)\n\nfast_move = 2.0\nslow_move = 5.0\ndefault_distance = 5.0\n\nui_scale_dpi_aware = True\ncustom_ui_scale = 1.0\nui_scale = 1.0\nshow_hud = True\nhud_text_size = 12\nhud_info_text_size = 18\nshow_menubar = True\nhud_color = LColor(0.7, 0.7, 1.0, 1.0)\nhelp_color = LColor(1.0, 1.0, 1.0, 1.0)\nhelp_background = LColor(0.5, 0.5, 0.5, 0.7)\ndisplay_render_info = 'fps'\nui_font_size = 12\npanel_background = LColor(0.8, 0.8, 0.8, 1)\ntab_background = LColor(0.7, 0.7, 0.7, 1)\nentry_background = LColor(0.9, 0.9, 0.9, 1)\n\nmenu_text_size = 12\n\nquery_color = LColor(0.7, 0.7, 1.0, 1.0)\nquery_delay = 0.333\nquery_text_size = 18\nquery_suggestion_text_size = 12\n\ndefault_window_width = 800\ndefault_window_height = 600\n\n#These are the fake depth value used for sorting background bin objects\nskysphere_depth = 0\ngrid_depth = 5\nasterisms_depth = 10\nconstellations_depth = 15\nboundaries_depth = 20\ndeep_space_depth = 50\nhalo_depth = 100\n\n# Tasks order\nworker_callback_task_sort = -10\nmain_update_task_sort = 0\ninstances_update_task_sort = 10\n\nshader_version = None\ncolor_picking = False\n\n# Window configuration\nwin_fullscreen = False\nwin_width = 1024\nwin_height = 768\nwin_fs_width = 0\nwin_fs_height = 0\n\n# Application paths and files\n# We are setting appauthor to False, with None AppDirs uses appname for it\nappdirs = AppDirs(appname=app_name, appauthor=False)\ncache_dir = appdirs.user_cache_dir\nconfig_dir = appdirs.user_config_dir\ndata_dir = appdirs.user_data_dir\nconfig_file = os.path.join(config_dir, 'config.yaml')\n\n#Debug flags\nshader_debug_fragment_shader = 'default'\nshader_debug_coord = False\nshader_debug_coord_line_width = 0.005\nshader_debug_raymarching_canvas = False\nshader_debug_raymarching_slice = False\n\nbodyClasses.register_class(\"galaxy\", \"galaxies\",\n BodyClass(label_color=LColor(0.0, 0.45, 0.5, 1),\n orbit_color=LColor(1, 1, 1, 1),\n show_label=False))\nbodyClasses.register_class(\"globular\", \"globulars\",\n BodyClass(label_color=LColor(0.8, 0.45, 0.5, 1),\n orbit_color=LColor(1, 1, 1, 1),\n show_label=False))\nbodyClasses.register_class(\"nebula\", \"nebulae\",\n BodyClass(label_color=LColor(0.541, 0.764, 0.278, 1),\n orbit_color=LColor(1, 1, 1, 1),\n show_label=False))\nbodyClasses.register_class(\"star\", \"stars\",\n BodyClass(label_color=LColor(0.471, 0.356, 0.682, 1),\n orbit_color=LColor(0.5, 0.5, 0.8, 1),\n show_label=False))\nbodyClasses.register_class(\"planet\", \"planets\",\n BodyClass(label_color=LColor(0.407, 0.333, 0.964, 1),\n orbit_color=LColor(0.3, 0.323, 0.833, 1),\n show_label=False))\nbodyClasses.register_class(\"dwarfplanet\", \"dwarfplanets\",\n BodyClass(label_color=LColor(0.407, 0.333, 0.964, 1),\n orbit_color=LColor(0.3, 0.323, 0.833, 1),\n show_label=False))\nbodyClasses.register_class(\"moon\", \"moons\",\n BodyClass(label_color=LColor(0.231, 0.733, 0.792, 1),\n orbit_color=LColor(0.08, 0.407, 0.392, 1),\n show_label=False))\nbodyClasses.register_class(\"minormoon\", \"minormoons\",\n BodyClass(label_color=LColor(0.231, 0.733, 0.792, 1),\n orbit_color=LColor(0.08, 0.407, 0.392, 1),\n show_label=False))\nbodyClasses.register_class(\"lostmoon\", \"lostmoons\",\n BodyClass(label_color=LColor(0.231, 0.733, 0.792, 1),\n orbit_color=LColor(0.08, 0.407, 0.392, 1),\n show=False,\n show_label=False))\nbodyClasses.register_class(\"comet\", \"comets\",\n BodyClass(label_color=LColor(0.768, 0.607, 0.227, 1),\n orbit_color=LColor(0.639, 0.487, 0.168, 1),\n show_label=False))\nbodyClasses.register_class(\"asteroid\", \"asteroids\",\n BodyClass(label_color=LColor(0.596, 0.305, 0.164, 1),\n orbit_color=LColor(0.58, 0.152, 0.08, 1),\n show_label=False))\nbodyClasses.register_class(\"interstellar\", \"interstellars\",\n BodyClass(label_color=LColor(0.596, 0.305, 0.164, 1),\n orbit_color=LColor(0.58, 0.152, 0.08, 1),\n show_label=False))\nbodyClasses.register_class(\"spacecraft\", \"spacecrafts\",\n BodyClass(label_color=LColor(0.93, 0.93, 0.93, 1),\n orbit_color=LColor(0.4, 0.4, 0.4, 1),\n show_label=False))\nbodyClasses.register_class(\"constellation\", \"constellations\",\n BodyClass(label_color=LColor(0.225, 0.301, 0.36, 1),\n orbit_color=LColor(0.0, 0.24, 0.36, 1.0),\n show_label=False))\nbodyClasses.register_class(\"boundary\", \"boundaries\",\n BodyClass(orbit_color=LColor(0.24, 0.10, 0.12, 1.0)))\n","repo_name":"cosmonium/cosmonium","sub_path":"cosmonium/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":9531,"program_lang":"python","lang":"en","doc_type":"code","stars":200,"dataset":"github-code","pt":"30"} +{"seq_id":"24100119537","text":"# 19 source @sentdex\n# #this works for large radius\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import style\nstyle.use('ggplot')\nimport numpy as np\nimport random\nfrom sklearn.datasets.samples_generator import make_blobs #this generates data, so we comment our data below and use blobs\n\ncenters = random.randrange(2,8) #with import random, we print(centers) below to see if it's within the range\n#blob can be used to do svm or clustering. in this class,we use it to test this clustering algorithm\nX, y = make_blobs(n_samples=50, centers = 3, n_features=2)\n'''X = np.array([[1, 2],\n [1.5, 1.8],\n [5, 8],\n [8, 8],\n [1, 0.6],\n [9, 11],\n [8, 2],\n [10, 2],\n [9, 3], ])\n\n##plt.scatter(X[:,0], X[:,1], s=150)\n##plt.show()'''\n\ncolors = 10 * [\"g\", \"r\", \"c\", \"b\", \"k\"]\n\n\nclass Mean_Shift:\n def __init__(self, radius=None, radius_norm_step=100):\n self.radius = radius\n self.radius_norm_step=radius_norm_step\n\n def fit(self, data):\n\n if self.radius == None:\n all_data_centroid = np.average(data, axis =0)\n all_data_norm = np.linalg.norm(all_data_centroid)\n self.radius = all_data_norm / self.radius_norm_step\n\n centroids = {}\n\n for i in range(len(data)):\n centroids[i] = data[i]\n\n while True:\n new_centroids = []\n for i in centroids:\n in_bandwidth = []\n centroid = centroids[i]\n\n weights = [i for i in range(self.radius_norm_step)][::-1]\n\n for featureset in data:\n distance = np.linalg.norm(featureset-centroid)\n if distance ==0:\n distance = 0.000000001\n weight_index = int(distance/self.radius)# the more the weight_index ,the higher the weight\n if weight_index > self.radius_norm_step-1:\n weight_index = self.radius_norm_step-1# if the dist is> max_dist or more than a 100 steps\n # away, the weight index is that max element\n to_add = (weights[weight_index]**2) * [featureset]\n in_bandwidth += to_add\n\n\n new_centroid = np.average(in_bandwidth, axis=0)\n new_centroids.append(tuple(new_centroid))\n\n # to get the unique elements from the new centroids list\n uniques = sorted(list(set(new_centroids)))\n\n to_pop =[]\n\n for i in uniques:\n for ii in uniques:\n if i == ii:\n pass\n elif np.linalg.norm(np.array(i) - np.array(ii)) <= self.radius:\n to_pop.append(ii)\n break\n\n for i in to_pop:\n try:\n uniques.remove(i)\n except:\n pass\n\n\n prev_centroids = dict(centroids)\n\n centroids = {}\n for i in range(len(uniques)):\n centroids[i] = np.array(uniques[i])\n\n optimized = True\n\n for i in centroids:\n if not np.array_equal(centroids[i], prev_centroids[i]):\n optimized = False\n if not optimized:\n break\n\n if optimized:\n break\n\n self.centroids = centroids\n\n # adding classifications and allowing this code to scale out to bigger dataset\n self.classifications = {}\n for i in range(len(self.centroids)):\n self.classifications[i] = []\n\n for featureset in data:\n distances = [np.linalg.norm(featureset-self.centroids[centroid]) for centroid in self.centroids]\n classification = distances.index(min(distances))#min distance is the classification\n self.classifications[classification].append(featureset)\n\n\n def predict(self,data):\n distances = [np.linalg.norm(featureset - self.centroids[centroid]) for centroid in self.centroids]\n classification = distances.index(min(distances)) # min distance is the classification\n return classification\n\nclf = Mean_Shift()\nclf.fit(X)\n\ncentroids = clf.centroids\n\nplt.scatter(X[:, 0], X[:, 1], s=150)\n\nfor classification in clf.classifications:\n color = colors[classification]\n for featureset in clf.classifications[classification]:\n plt.scatter(featureset[0], featureset[1], marker='x', color = color, s=150, linewidth=5)\n\nfor c in centroids:\n plt.scatter(centroids[c][0], centroids[c][1], color='k', marker='*', s=150)\n\nplt.show()\nprint(centers)","repo_name":"MarthaSamuel/Machine_Learning","sub_path":"MeanShift Dynamic Bandwidth.py","file_name":"MeanShift Dynamic Bandwidth.py","file_ext":"py","file_size_in_byte":4674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"70592486165","text":"import networkx as nx\nimport gmatch4py as gm\nimport math\n# model acm68 \nnodosG = [(1, {'nombre':'B1'}),\n (2, {'nombre':'M1'}),\n (3, {'nombre':'B2'}),\n (4, {'nombre':'M3'}),\n (5, {'nombre':'M2'}),\n (6, {'nombre':'B3'}),\n (7, {'nombre':'B4'}),\n (8, {'nombre':'M2P'}),\n (9, {'nombre':'M4'}),\n (10, {'nombre':'I1'}),\n (11, {'nombre':'I2'}),\n (12, {'nombre':'I3'}),\n (13, {'nombre':'M7'}),\n (14, {'nombre':'M5'}),\n (15, {'nombre':'I4'}),\n (16, {'nombre':'I6'}),\n (17, {'nombre':'I7'}),\n (18, {'nombre':'A1'}),\n (19, {'nombre':'A4'}),\n (20, {'nombre':'A5'}),\n (21, {'nombre':'A6'}),\n (22, {'nombre':'A7'}),\n (23, {'nombre':'A8'}),\n (24, {'nombre':'A9'}),\n (25, {'nombre':'I8'}),\n (26, {'nombre':'I9'}),\n (27, {'nombre':'A2'}),\n (28, {'nombre':'A3'})]\n\n\nenlacesG = [(3,1), (4,2), (5,2), (6,1), (6,4), (6,5), (7,4), (8,2), (9,5), \\\n(9,4), (10,5), (10,6), (11,3), (11,6), (12,3), (13,8), (14,8), (15,10), (15,11), \\\n(16,12), (16,6), (17,6), (18,10), (18,11), (19,13), (19,14), (19,9), (21,9), \\\n(22,6), (22,17), (23,19), (24,9), (25,9), (26,9), (27, 12), (28,6), (28,11)]\nG = nx.Graph()\nG.add_nodes_from(nodosG)\nG.add_edges_from(enlacesG)\n\n# sistemas 2009 model 1 (relaciones originales acm68)\nnodosH = [(2, {'nombre':'M1'}),\n (5, {'nombre':'M2'}),\n (3, {'nombre':'B2'}),\n (1, {'nombre':'B1'}),\n (11, {'nombre':'I2'}),\n (9, {'nombre':'M4'}),\n (4, {'nombre':'M3'}),\n (10, {'nombre':'I1'}),\n (22, {'nombre':'A7'}),\n (6, {'nombre':'B3'}),\n (8, {'nombre':'M2P'}),\n (21, {'nombre':'A6'}),\n (14, {'nombre':'M5'}),\n (16, {'nombre':'I6'}),\n (13, {'nombre':'M7'}),\n (20, {'nombre':'A5'}),\n (7, {'nombre':'B4'}),\n (18, {'nombre':'A1'}),\n (17, {'nombre':'I7'}),\n (12, {'nombre':'I3'}),\n (15, {'nombre':'I4'}),\n (24, {'nombre':'A9'}),\n (19, {'nombre':'A4'}),\n (23, {'nombre':'A8'}),\n (25, {'nombre':'A2'})\n]\n\nenlacesH = [(11,3),\n (11,8),\n (9,5),\n (9,4),\n (4,2),\n (10,3),\n (10,6),\n (22,6),\n (22,17),\n (6,1),\n (8,2),\n (21,10),\n (14,9),\n (16,6),\n (16,12),\n (13,8),\n (20,10),\n (7,1),\n (7,5),\n (7,4),\n (18,10),\n (18,11),\n (17,6),\n (17,16),\n (12,3),\n (12,8),\n (15,10),\n (15,11),\n (15,12),\n (24,10),\n (19,15),\n (19,13),\n (23, 19),\n (25,12)]\n\nH = nx.Graph()\nH.add_nodes_from(nodosH)\nH.add_edges_from(enlacesH)\n\ndef getMCS(g1, g2):\n matching_graph = nx.Graph()\n for n1, n2 in g2.edges():\n if g1.has_edge(n1,n2):\n matching_graph.add_edge(n1,n2)\n components = nx.connected_components(matching_graph)\n largest_component = max(components, key=len) \n #return nx.induced_subgraph(matching_graph, largest_component)\n return nx.subgraph(matching_graph, largest_component)\n\ndef getGED(num_mcs, numg1, numg2):\n return 1 - (abs(num_mcs) / max(abs(numg1), abs(numg2)))\n\n# Resultado del grafo MCS entre H y G\nprint(\"Modelo 1: Subgrafo MCS entre H y G\")\nmcs = getMCS(H,G)\nnum_node = len(mcs.node)\nprint((mcs.node))\nprint(mcs.edges)\nprint(getGED(num_node, len(H.node), len(G.node)))\n\nged1=gm.GraphEditDistance(1,1,1,1) # all edit costs are equal to 1\nresult1=ged1.compare([H, G],None)\nprint(\"Distancia sin normalizar:\")\nprint(result1)\ngt1 = gm.MCS()\nresult2 = gt1.compare([H, G], None)\nout1 = ged1.distance(result2)\nprint(\"resultado de MCS de H (sistemas 2009) subgrafo con G acm68:\")\nprint(out1)\n\nprint(\"similaridad\")\nout1 = ged1.similarity(result2)\nprint(out1)\n\n# sistemas 2009 model 2 (relaciones originales de la carrera)\n\nG = nx.Graph()\nG.add_nodes_from(nodosG)\nG.add_edges_from(enlacesG)\n\nT = nx.Graph()\nT.add_nodes_from(nodosH)\n\nenlacesT = [(11,3),\n (9,5),\n (4,2),\n (10,3),\n (22,10),\n (6,4),\n (8,9),\n (12,22),\n (14,9),\n (13,8),\n (7,14),\n (18,22),\n (18,6),\n (17,16),\n (12,17),\n (15,18),\n (24,12),\n (24,15),\n (25,23)]\nT.add_edges_from(enlacesT)\n# Resultado del grafo MCS entre T y G\nprint(\"Modelo 1: Subgrafo MCS entre T y G\")\nmcs = getMCS(T,G)\nprint((mcs.node))\nprint(mcs.edges)\n\nprint(\"Modelo 2\")\n\nged2=gm.GraphEditDistance(1,1,1,1) # all edit costs are equal to 1\nresult3=ged2.compare([T, G],None)\nprint(\"Distancia sin normalizar:\")\nprint(result3)\n\ngt2 = gm.MCS()\nresult4 = gt2.compare([T, G], None)\nout2 = ged2.distance(result4)\nprint(\"resultado de MCS de T (sistemas 2009) relaciones originales con subgrafo con G acm68:\")\nprint(out2)\nprint(\"similaridad\")\nout2 = ged2.similarity(result4)\nprint(out2)\n\n","repo_name":"jorge-alvarado-revata/comparatool","sub_path":"code/gmatch/demo4.py","file_name":"demo4.py","file_ext":"py","file_size_in_byte":4926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"38864230185","text":"\"\"\"\"\nFormatter project by Andrew Li\nDescription: given a txt file, replace string with the start of the line \nto the first replacement string for a set number of times\n\"\"\"\n# import os to get file path\nimport os\n\n\ndef main():\n\n # input file name here\n # - file name = file name in this dir\n # - new_file = new file to write to\n # - replace_string = string to be replaced\n # - single_char = the single of char\n # - times = number of times to be copied\n file_ = 'lab2a'\n new_file = 'lab2a_new'\n replace_string = '\"\"'\n single_char = '\"'\n times = 51\n\n # get path to current dir\n current_folder = os.path.dirname(os.path.abspath(__file__))\n my_file = os.path.join(current_folder, file_ + '.txt')\n new_file = os.path.join(current_folder, new_file + '.txt')\n\n # with the file open, for every line in file,\n # if line = '\"\"' in the line,\n # for every single char replace with number or whatever at the start of line\n # in the end, join char and multiply by length plus line break\n with open(my_file, 'r+') as fp:\n data = fp.readlines()\n for index, line in enumerate(data):\n if replace_string in line:\n number = []\n for char in line:\n if char != single_char:\n number.append(char)\n else:\n break\n number = ''.join(number)\n\n data[index] = str(number * times) + '\\n '\n\n # print data and write to new_file\n new_data = ''.join(data)\n print('Data:\\n', new_data)\n\n with open(new_file, 'w') as fp:\n fp.write(''.join(new_data))\n\n\n return 0\n\n# system calls main\nif __name__ == \"__main__\":\n main()","repo_name":"Zeyu-Li/data_formatting","sub_path":"formatter.py","file_name":"formatter.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"12356475230","text":"import os\n\n\n_TRAIN_IMAGES_TAR = \"ILSVRC2012_img_train.tar\"\n_VAL_IMAGES_DIR = \"ILSVRC2012_img_val.tar\"\n_DEVKIT_TAR = \"ILSVRC2012_devkit_t12.tar.gz\"\n\n\ndef ensure_imagenet_manual_download(source_dir, split, devkit=False):\n \"\"\"Ensures that the ImageNet archive(s) for the requested split have been\n manually downloaded to the required locations.\n\n Args:\n source_dir: the dataset directory\n split: the split of interest. Supported values are\n ``(\"train\", \"validation\")``\n devkit (False): whether to ensure that the devkit archive is present\n\n Raises:\n OSError: if the required files are not present\n \"\"\"\n if split == \"train\":\n archive_name = _TRAIN_IMAGES_TAR\n elif split == \"validation\":\n archive_name = _VAL_IMAGES_DIR\n else:\n raise ValueError(\n \"Unsupported split '%s'; Supported values are \"\n \"('train', 'validation')\"\n )\n\n _ensure_archive(archive_name, source_dir)\n\n if devkit:\n devkit_name = _DEVKIT_TAR\n _ensure_archive(devkit_name, source_dir)\n\n\ndef _ensure_archive(archive_name, source_dir):\n if source_dir is None:\n _raise_imagenet_error(\n \"You must provide a `source_dir` in order to load the ImageNet \"\n \"dataset.\"\n )\n\n archive_path = os.path.join(source_dir, archive_name)\n if not os.path.isfile(archive_path):\n _raise_imagenet_error(\n \"Archive '%s' not found in directory '%s'.\"\n % (archive_name, source_dir)\n )\n\n\ndef _raise_imagenet_error(msg):\n raise OSError(\n \"\\n\\n\"\n + msg\n + \"\\n\\n\"\n + \"You must download the source files for the ImageNet dataset \"\n \"manually.\"\n + \"\\n\\n\"\n + \"Run `fiftyone zoo datasets info imagenet-2012` for more information\"\n )\n","repo_name":"voxel51/fiftyone","sub_path":"fiftyone/utils/imagenet.py","file_name":"imagenet.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","stars":5416,"dataset":"github-code","pt":"30"} +{"seq_id":"14061055678","text":"import numpy as np\n\nG = np.loadtxt(\"graphs/dodecahedron.txt\", int)\n\ndef order(G):\n return len(G)\n\ndef degree(G, v):\n return np.sum(G[v])\n\ndef distance(G, v1, v2):\n distance = 0\n if v1 == v2:\n return distance\n visited = openNeighborhood(G, v1)\n distance += 1\n while v2 not in visited and distance < order(G):\n for v in visited.copy():\n visited = visited | closedNeighborhood(G, v)\n distance += 1\n if v2 in visited:\n return distance\n else:\n return -1\n# return 'not connected'\n\ndef closedNeighborhood(G, v):\n neighborhood = set()\n for i in range(order(G)):\n if G[v][i] == 1:\n neighborhood.add(i)\n# print (neighborhood)\n return neighborhood\n\ndef find_all_paths(G, start, end, path=[]):\n path = path + [start]\n if start == end:\n return [path]\n paths = []\n neighbors = [x for x in closedNeighborhood(G,start)]\n for n in neighbors:\n if n not in path:\n newpaths = find_all_paths(G, n, end, path)\n for newpath in newpaths:\n paths.append(newpath)\n return sorted(paths, key=len)\n\ndef find_path(G, start, end, path=[]):\n path = path + [start]\n if start == end:\n return path\n neighbors = [x for x in closedNeighborhood(G,start)]\n for n in neighbors:\n if n not in path:\n newpath = find_path(G, n, end, path)\n if newpath:\n return newpath\n return None\n\ndef find_cycle(G, v):\n neighbors = [x for x in closedNeighborhood(G, v) if degree(G, x) > 1]\n length = 0\n for n in neighbors:\n paths = find_all_paths(G, v, n)\n length = len(paths[1])\n if (length > 2):\n return length\n else:\n return 0\n\ndef eccentricity(G, v):\n # loop through all vertices and find longest path to the one passed as arg\n # save the longest one of all these and thats your answer\n maxLength = 0\n for x in range(order(G)):\n if (x != v):\n# print(find_all_paths(G, v, x))\n pathLengths = sorted([len(x) for x in find_all_paths(G, v, x)],\n reverse=True)\n# if np.amax(pathLengths) > maxLength:\n# maxLength = np.amax(find_all_paths(G, v, x))\n# return maxLength\n return pathLengths[0] - 1\n\n","repo_name":"kastentx/python-discrete-math","sub_path":"cycle.py","file_name":"cycle.py","file_ext":"py","file_size_in_byte":2330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"72171504086","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\n\"\"\"Top-level package for Alfred.\"\"\"\n\n__author__ = \"\"\"Gustavo Sampaio\"\"\"\n__email__ = 'gbritosampaio@gmail.com'\n__version__ = '0.1.7'\n\nimport os\nimport sys\nimport io\nimport string\nimport toml\nimport subprocess\nimport re\nfrom collections import defaultdict\n\nfrom alfredcmd.alfred_exception import AlfredException\nfrom alfredcmd.cloud import Cloud\n\nclass Alfred:\n def __init__(self, config=None, procFds=(sys.stdin, sys.stdout, sys.stderr)):\n if config is None:\n home = os.path.expanduser(\"~\")\n self._configFile = os.path.join(home, '.alfred', 'alfred.toml')\n else:\n self._configFile = config\n self._loadConfig()\n self._procFds = procFds\n\n self._defaultShellExecutor = 'bash'\n\n self._cloud = Cloud(self._config)\n\n def _loadConfig(self):\n try:\n with io.open(self._configFile, mode='r', encoding='utf-8') as f:\n self._config = toml.load(f)\n except IOError:\n # config file not found. Use defaults\n self._config = dict()\n except toml.TomlDecodeError:\n raise AlfredException('invalid config file')\n self._config.setdefault('variables', {})\n\n def run(self, args):\n if len(args) >= 1:\n if args[0] == '@help':\n if len(args) > 1:\n return self.processHelpCommand(args)\n else:\n print('help')\n return 0\n elif args[0] == '@list':\n self.listCommands()\n return 0\n elif args[0] == '@version':\n print('v{}'.format(__version__))\n return 0\n elif args[0] == '@login':\n self._cloud.login()\n return 0\n elif args[0] == '@sync':\n self._cloud.sync(self._configFile)\n return 0\n\n return self.processCommand(args)\n\n def listCommands(self):\n cmds = self._config['command']\n\n for cmdName in cmds.keys():\n cmd = self._getCommand(cmdName)\n print('$ al '+cmdName)\n print('> {}'.format(cmd['exec']))\n if 'help' in cmd:\n print('{}\\t'.format(cmd['help']))\n print('\\tformat: {}'.format(cmd['format']))\n print('\\ttype: {}'.format(cmd['type']))\n print('\\techo: {}'.format(cmd['echo']))\n print('')\n\n def _getCommand(self, cmdName):\n try:\n cmd = self._config['command'][cmdName]\n except KeyError:\n raise AlfredException('no command \"{}\"\\n\\nYou can create it in ~/.alfred/alfred.toml'.format(cmdName))\n\n cmd.setdefault('format', True)\n cmd.setdefault('type', 'shell')\n cmd.setdefault('echo', False)\n return cmd\n\n def _getFunction(self, funcName):\n try:\n func = self._config['function'][funcName]\n except KeyError:\n raise AlfredException('no function \"{}\"\\n\\nYou can create it in ~/.alfred/alfred.toml'.format(funcName))\n\n func.setdefault('format', True)\n func.setdefault('type', 'shell')\n func.setdefault('echo', False)\n return func\n\n def processCommand(self, args):\n cmd = self._getCommand(args[0])\n\n if cmd['type'] == 'shell':\n self._executeShell(cmd, args[1:])\n elif cmd['type'] == 'python':\n self._executePy(cmd, args[1:])\n else:\n raise AlfredException('Invalid command type: {}'.format(cmd['type']))\n\n def processHelpCommand(self, args):\n cmd = self._getCommand(args[0])\n\n try:\n print(cmd['help'])\n except KeyError:\n print(cmd['exec'])\n\n def _buildArgDict(self, args):\n argsDict = defaultdict(str)\n # variables\n for key, value in self._config['variables'].items():\n argsDict[key] = value\n\n positionalIndex = 0\n for i, arg in enumerate(args):\n\n if arg.startswith('--'):\n # Make sure we have a key\n if len(arg) > 2:\n pack = arg[2:].split('=')\n if len(pack) == 1:\n # No value associated. Assume this is a bool\n argsDict[pack[0]] = True\n else:\n # Key/value pair\n argsDict[pack[0]] = pack[1]\n else:\n argsDict[positionalIndex] = arg\n positionalIndex += 1\n\n argsDict['@'] = ' '.join(args)\n argsDict['#'] = len(args)\n argsDict['env'] = os.environ\n\n return argsDict\n\n def _executePy(self, cmd, args):\n if 'type' in cmd and not cmd['type'] == 'python':\n raise AlfredException('Invalid command type. Expected \"python\" Received: {}'.format(cmd['type']))\n\n argsDict = self._buildArgDict(args)\n\n cmdLine = cmd['exec']\n try:\n filename, funcname = cmdLine.split('::')\n except ValueError:\n raise AlfredException('Invalid execution of python script \"{}\". Please use the format: \"script.py::FuncName\"'.format(cmdLine))\n\n filename = os.path.expanduser(filename)\n import module_importer\n module = module_importer.importModuleFromFile('script', filename)\n if not hasattr(module, funcname):\n raise AlfredException('Function \"{}\" was not found in module \"{}\"'.format(funcname, filename))\n\n try:\n func = getattr(module, funcname)\n func(argsDict)\n except Exception as e:\n raise AlfredException('Error trying to execute module', e)\n\n def _executeShell(self, cmd, args):\n if 'type' in cmd and not cmd['type'] == 'shell':\n raise AlfredException('Invalid command type. Expected \"shell\" Received: {}'.format(cmd['type']))\n\n argsDict = self._buildArgDict(args)\n\n cmdLine = cmd['exec']\n if 'format' in cmd and cmd['format']:\n fmt = AlfredFormatter(self)\n cmdLine = fmt.format(cmdLine, argsDict)\n\n if 'echo' in cmd and cmd['echo']:\n print('> {}'.format(cmdLine))\n\n if cmdLine.count('\\n') > 0:\n import tempfile\n fhos, scriptfile = tempfile.mkstemp(prefix='alfred-tmp-')\n with io.open(fhos, mode='w') as fh:\n fh.write(cmdLine)\n cmdLine = '{} {}'.format(self._defaultShellExecutor, scriptfile)\n\n self._spawnShell(cmdLine)\n\n def _spawnShell(self, cmdLine, pipeStdout=False):\n if pipeStdout == True:\n stdout = subprocess.PIPE\n else:\n stdout = self._procFds[1]\n\n process = subprocess.run(\n cmdLine,\n stdin=self._procFds[0],\n stdout=stdout,\n stderr=self._procFds[2],\n shell=True)\n\n if pipeStdout == True:\n out = str(process.stdout, 'utf-8')\n else:\n out = None\n\n return out\n\n def executeFunction(self, funcName, args):\n rfd, wfd = os.pipe()\n func = self._getFunction(funcName)\n out = self._spawnShell(func['exec'], pipeStdout=True)\n # remove trailing line-break\n out = out[:-1]\n return out\n\nclass AlfredFormatter(string.Formatter):\n def __init__(self, alfred):\n self.alfred = alfred\n\n def get_value(self, key, args, kwargs):\n # function\n if isinstance(key, str):\n match = re.match('(\\w+)\\((.*)\\)', key)\n if match is not None:\n funcArgs = match.group(2).split(',')\n return self.alfred.executeFunction(match.group(1), funcArgs)\n return args[0][key]\n","repo_name":"GustavoKatel/alfredcmd-legacy","sub_path":"alfredcmd/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"70712549526","text":"import unittest\n\nimport launch_testing.actions\nimport pytest\nfrom launch import LaunchDescription\nfrom launch.actions import DeclareLaunchArgument, ExecuteProcess\nfrom launch_testing.util import KeepAliveProc\n\nfrom moveitpy_simple.moveit_configs_utils.launch_utils import launch_configurations\n\n\n@launch_configurations\ndef launch_descriptions(launch_configurations):\n assert launch_configurations.robot_name == \"test_robot\"\n assert launch_configurations.robot_ip == \"0.0.0.0\"\n assert launch_configurations.robot_length == \"2\"\n with pytest.raises(AttributeError):\n launch_configurations.robot_height\n\n\ndef generate_test_description():\n ls_executable = ExecuteProcess(\n cmd=[\"ls\", \"-l\"],\n )\n return LaunchDescription(\n [\n DeclareLaunchArgument(\"robot_name\"),\n DeclareLaunchArgument(\"robot_ip\"),\n DeclareLaunchArgument(\"robot_length\", default_value=\"2\"),\n ls_executable,\n KeepAliveProc(),\n launch_testing.actions.ReadyToTest(),\n *launch_descriptions(),\n ],\n ), {\"test_executable\": ls_executable}\n\n\nclass TestWaitForCompletion(unittest.TestCase):\n # Waits for test to complete, then waits a bit to make sure result files are generated\n def test_gtest_run_complete(self, test_executable):\n self.proc_info.assertWaitForShutdown(test_executable, timeout=4000.0)\n\n\n@launch_testing.post_shutdown_test()\nclass TestProcessPostShutdown(unittest.TestCase):\n # Checks if the test has been completed with acceptable exit codes\n def test_gtest_pass(self, proc_info, test_executable):\n launch_testing.asserts.assertExitCodes(proc_info, process=test_executable)\n","repo_name":"JafarAbdi/moveitpy_simple","sub_path":"moveitpy_simple/moveit_configs_utils/test/test_launch_utils.py","file_name":"test_launch_utils.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"19590955853","text":"import pandas as pd\nimport pymysql.cursors\n\n\ndef get_mysql_connect():\n return pymysql.Connect(host=\"localhost\", port=3306,\n user=\"root\", passwd=\"\",\n db=\"survey-data\", charset=\"utf8\")\n\n\ndef read_excel_content():\n return pd.read_excel(\"data.xls\", converters={\"门类\": str, \"大类\": str, \"种类\": str, \"小类\": str, \"类别名称\": str, \"说明\": str})\n\n\ndef save_loc_to_db(index, loc, connect):\n print(index, \" save \", loc)\n insert_sql = r\"INSERT INTO zzbz_s_gmjjhyfl (id, ml, dl, zl, xl, lbmc, sm) VALUES ( '%d', '%s', '%s','%s','%s','%s','%s')\"\n data = (index, loc[r\"门类\"], loc[r\"大类\"], loc[r\"种类\"], loc[r\"小类\"], loc[r\"类别名称\"], loc[r\"说明\"])\n sql = insert_sql % data\n connect.cursor().execute(sql)\n connect.commit()\n\n\nif __name__ == '__main__':\n data = read_excel_content()\n connect = get_mysql_connect()\n try:\n for index in data.index:\n loc = data.loc[index]\n save_loc_to_db(index, loc, connect)\n finally:\n connect.close()\n","repo_name":"MengFly/Learning","sub_path":"python/moudle/保存数据分类数据到数据库.py","file_name":"保存数据分类数据到数据库.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"39798432294","text":"import hashlib\nimport xlrd\nimport xlwt\nimport os\nALLOWED_EXTENSIONS = ['xlsx', 'xls']\n\n\ndef get_file_path():\n \"\"\"\n 获取 Excel 和加密 excel 的路径\n \"\"\"\n encry = os.path.join(os.getcwd(), \"encry\")\n excel = os.path.join(os.getcwd(), \"encry\", \"excel\")\n encry_excel = os.path.join(os.getcwd(), \"encry\", \"encry_excel\")\n if not os.path.exists(encry):\n os.mkdir(encry)\n os.mkdir(excel)\n os.mkdir(encry_excel)\n\n return excel, encry_excel\n\n\nexcel_path, encry_excel_path = get_file_path()\n\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\ndef encryption_str(string, encry_model=\"md5_32\", encry_style=True):\n # 加密为 utf-8 编码\n utf_8_str = str(string).encode(\"utf8\")\n # 函数字典\n param_dict = {\n \"md5_32\": hashlib.md5(utf_8_str),\n \"md5_16\": hashlib.md5(utf_8_str),\n \"sha1\": hashlib.sha1(utf_8_str),\n \"sha224\": hashlib.sha224(utf_8_str),\n \"sha256\": hashlib.sha256(utf_8_str),\n \"sha512\": hashlib.sha512(utf_8_str)\n }\n encry_result = param_dict[encry_model].hexdigest()\n if encry_model == 'md5_16':\n encry_result = encry_result[8:-8]\n # 返回结果\n return encry_result if encry_style == \"小写\" else encry_result.upper()\n\n\ndef encryption_clos(encry_cols=None, encry_model=None, encry_style=None):\n # 当加密的是空字符串是不加密,保留空字符串\n if not encry_model or not encry_cols or not encry_style:\n return None\n if os.listdir(excel_path):\n filename = os.path.join(excel_path, os.listdir(excel_path)[0])\n workbook = xlrd.open_workbook(filename=filename)\n table = workbook.sheets()[0]\n # excel 中没有数据\n if table.ncols == 0:\n return \"excel 中没有数据!\"\n if max(encry_cols) - 1 > table.ncols:\n # 输入的加密列不在excel中\n return str(max(encry_cols)) + \"超过excel中最大的列\"\n # 开始加密数据\n encry_workbook = xlwt.Workbook()\n work_sheet = encry_workbook.add_sheet(\"md5加密数据\")\n c = 0\n for col in encry_cols:\n r = 0\n col_values = table.col_values(colx=col - 1)\n work_sheet.write(r, c, str(col))\n work_sheet.write(r, c + 1, str(col) + \"_\" + encry_model + \"_\" + encry_style)\n for v in col_values:\n if v == '':\n encry_v = v\n else:\n encry_v = encryption_str(string=v, encry_model=encry_model, encry_style=encry_style)\n work_sheet.write(r + 1, c, v)\n work_sheet.write(r + 1, c + 1, encry_v)\n r += 1\n c += 2\n encry_file = os.path.join(encry_excel_path, '加密数据.xlsx')\n encry_workbook.save(encry_file)\n # 返回md5文件的前5行数据\n encry_table_info = get_table_values(file_path=encry_excel_path)\n return encry_table_info\n return \"服務器內部錯誤\"\n\n\ndef get_table_values(file_path=excel_path):\n # 获取上传excel的前 5 行数据,默认加密第一个表中的数据\n if os.listdir(file_path):\n filename = os.path.join(file_path, os.listdir(file_path)[0])\n workbook = xlrd.open_workbook(filename=filename)\n table = workbook.sheets()[0]\n # 默认返回前5行数据\n # table_rows = 5 if table.nrows >= 5 else table.nrows\n table_rows = table.nrows\n if table_rows == 0:\n return {\"status\": 400, \"msg\": \"excel中没有数据!\"}\n # 获取数据\n row_list = []\n for r in range(table_rows):\n row_dict = {}\n for k, v in enumerate(table.row_values(rowx=r)):\n row_dict[str(k + 1)] = v\n row_list.append(row_dict)\n return {\"table_cols\": table.ncols, \"row_list\": row_list, \"excel_name\": os.listdir(file_path)[0],\n \"sheet_name\": workbook.sheet_names()[0]}\n # 没有excel文件或者上传的不是excel文件\n return None\n","repo_name":"myrensheng/encryption","sub_path":"tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":4098,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"72128745686","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2016/9/29 10:30\n# @Author : wplct\nimport time\n\nfrom Base.Thread import BaseThread, RunFunctionThread\nfrom Web import WebTask\nimport requests\nimport Queue\nfrom EasySpider.Base.Content import Content\nfrom .DB.DBTask import DBTask\nfrom EasySpider.Web import Web, WebTask\nimport functools\n\n\nclass SpiderThread(RunFunctionThread):\n def __init__(self, content, func, in_queue, out_queue_tuple=None, *args, **kwargs):\n assert hasattr(func, '__call__')\n assert isinstance(in_queue, Queue.Queue) or in_queue is None\n RunFunctionThread.__init__(self, func=func, queue=in_queue, content=content, *args, **kwargs)\n if out_queue_tuple is None:\n self.out_queue_tuple = None\n elif isinstance(out_queue_tuple, tuple):\n self.out_queue_tuple = out_queue_tuple\n else:\n self.out_queue_tuple = (out_queue_tuple,)\n\n def _run(self, data):\n task = self.func(data)\n if self.out_queue_tuple is not None and task is not None:\n if isinstance(task, tuple):\n for i in range(len(task)):\n self.put_task_list(task[i], self.out_queue_tuple[i])\n else:\n self.put_task_list(task, self.out_queue_tuple[0] if len(self.out_queue_tuple) else None)\n\n def put_task_list(self, task, queue):\n if isinstance(task, list):\n for t in task:\n self.put_task(t, queue)\n else:\n self.put_task(task, queue)\n\n def put_task(self, task, queue):\n if task is None:\n return\n if isinstance(task, WebTask):\n task.result_queue = queue\n self.web.put(task)\n elif isinstance(task, DBTask):\n task.result_queue = queue\n self.db.put(task)\n else:\n if queue is not None:\n queue.put(task)\n\n\nclass SpiderFunc(object):\n def __init__(self, func_type, func, next_func):\n \"\"\"\n :param next_func 运行完当前方法,接下来该的方法,可以有多个\n :type next_func tuple or str 方法名或方法名元组\n :param func_type 方法类型 默认为 wait_task\n wait_task 等待上一个任务到来\n init 爬虫init时运行 会把返回值put到main队列\n main 等待爬虫自带任务队列的任务\n \"\"\"\n self.func = func\n self.spider_self = None\n self.thread = None\n self.next_func_tuple = None\n if next_func is not None:\n if not isinstance(next_func, tuple):\n next_func = (next_func,)\n self.next_func_tuple = next_func\n self.func_type = func_type\n self.queue = None\n self.next_func_queue = None\n\n def __call__(self, *args, **kwargs):\n return self.func(self.spider_self, *args, **kwargs)\n\n def init_thread(self, spider, thread_class=SpiderThread):\n assert isinstance(spider, Spider)\n assert issubclass(thread_class, SpiderThread)\n self.spider_self = spider\n # 找到或创建自己的任务队列\n if self.func_type == \"main\":\n self.queue = spider.queue\n self.spider_self.spider_func_queue_dict[self.func.__name__] = self.queue\n elif self.func_type == \"init\":\n self.queue = None\n if self.next_func_tuple is None:\n self.next_func_queue = (spider.queue,)\n else:\n self.queue = self.get_queue(self.func.__name__)\n # 找到或创建下一步的队列\n queue_list = []\n if self.next_func_tuple is not None:\n for next_func in self.next_func_tuple:\n assert isinstance(next_func, str)\n queue = self.get_queue(next_func)\n queue_list.append(queue)\n self.next_func_queue = tuple(queue_list) if self.next_func_queue is None else self.next_func_queue\n # 创建线程\n self.thread = thread_class(\n content=spider.content,\n func=self.__call__,\n in_queue=self.queue,\n out_queue_tuple=self.next_func_queue\n )\n return self.thread\n\n def get_queue(self, name):\n if name in self.spider_self.spider_func_queue_dict:\n return self.spider_self.spider_func_queue_dict[name]\n else:\n queue = Queue.Queue()\n self.spider_self.spider_func_queue_dict[name] = queue\n return queue\n\n\ndef spider_func(func_type=\"wait_task\", next_func=None):\n \"\"\"\n 将类方法设置为SpiderFunc的装饰器\n :param next_func 运行完当前方法,接下来该的方法,可以有多个\n :type next_func tuple or str 方法名或方法名元组\n :param func_type 方法类型 默认为 wait_task\n wait_task 等待上一个任务到来\n init 无限循环,会把返回值put到main队列\n main 等待爬虫自带任务队列的任务\n \"\"\"\n\n def _spider_func(func):\n return SpiderFunc(func=func, func_type=func_type, next_func=next_func)\n\n return _spider_func\n\n\nclass Spider(BaseThread):\n def __init__(self, content, thread_class=SpiderThread, *args, **kwargs):\n \"\"\"\n 爬虫类的父类\n :type content Content\n :param thread_class 运行produce方法和consumption方法的进程类\n 需要是SpiderThread的子类\n \"\"\"\n BaseThread.__init__(self, content=content, *args, **kwargs)\n assert issubclass(thread_class, SpiderThread)\n self.name = \"Spider\"\n self.thread_list = []\n self.spider_func_list = []\n self.spider_func_thread = {}\n self.spider_func_queue_dict = {}\n self.queue = Queue.Queue()\n self.spider_func_queue_dict['main'] = self.queue\n self.init_spider_func()\n self.state = False\n\n def init_spider_func(self):\n for k in dir(self):\n v = getattr(self, k)\n if isinstance(v, SpiderFunc):\n self.spider_func_list.append(v)\n for s in self.spider_func_list:\n s.init_thread(self)\n for v in self.spider_func_list:\n self.thread_list.append(v.thread)\n\n def run(self):\n for thread in self.thread_list:\n thread.start()\n self.info(\"启动\")\n self.state = True\n\n def stop(self):\n for thread in self.thread_list:\n assert isinstance(thread, SpiderThread)\n thread.stop()\n self.info(\"停止\")\n self.state = False\n\n def put(self, data):\n self.queue.put(data)\n\n def load(self, spider_func_queue_dict):\n assert isinstance(spider_func_queue_dict, dict)\n for k, v in spider_func_queue_dict.items():\n self.spider_func_queue_dict[k] = v\n","repo_name":"corpsepiges/EasySpider","sub_path":"EasySpider/Spider.py","file_name":"Spider.py","file_ext":"py","file_size_in_byte":6798,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"3459469861","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n'''\nFilePath: /PPX/pyapp/package/dmg/rename.py\nAuthor: 潘高\nLastEditors: 潘高\nDate: 2023-04-24 21:15:12\nLastEditTime: 2023-04-26 11:34:12\nDescription: 将 setup.dmg 改名为 PPX-V1.0.0_macOS.dmg\nusage: 运行前,请确保本机已经搭建Python3开发环境,且已经安装 模块。\n 详细教程请移步至 https://blog.pangao.vip/Python环境搭建及模块安装/\n'''\n\nfrom pathlib import Path\nimport sys\nsys.path.append(str(Path(__file__).absolute().parent.parent.parent))\nfrom config.config import Config\n\ncfg = Config()\n\nappName = cfg.appName\nappVersion = cfg.appVersion\n\nbuildDir = Path(Path(__file__).absolute().parent.parent.parent.parent.joinpath('build'))\n\next = 'dmg'\nappSys = 'macOS'\n\nfromP = Path(buildDir.joinpath(f'setup.{ext}'))\ntoP = fromP.with_name(f'{appName}-{appVersion}_{appSys}.{ext}')\n\nif fromP.exists():\n fromP.rename(toP)\n","repo_name":"pangao1990/PPX","sub_path":"pyapp/package/dmg/rename.py","file_name":"rename.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":89,"dataset":"github-code","pt":"30"} +{"seq_id":"15325963541","text":"# Advent of Code 2022 Day 1\r\n# https://adventofcode.com/2022/day/1\r\n\r\nimport time\r\n\r\nstarting_time = time.time()\r\n\r\nwith open('day01.txt') as f:\r\n lines = f.readlines()\r\n lst = [x for x in lines]\r\n\r\nelves = dict()\r\nelfnum = 0\r\nfor n in lst:\r\n if n == '\\n':\r\n elfnum += 1\r\n elif elfnum in elves:\r\n elves[elfnum] += int(n[:-1])\r\n else:\r\n elves[elfnum] = int(n[:-1])\r\n# for i in range(5):\r\n# print(i,elves[i])\r\n\r\n#elf0 = [6750,6538,5292,4635,6855,4137,3840,4691,1633,6008,2447,1448,4061]\r\n#print(sum(elf0))\r\nmaxelf = max(elves,key=elves.get)\r\nprint(\"Max of elves:\",maxelf,elves[maxelf])\r\n# d = {\"q\": 18, \"z\": 10, \"o\": 13}\r\n# print(\"Max of d:\",max(d,key=d.get),d[max(d)])\r\nx = list(elves.values())\r\nx.sort(reverse=True)\r\nprint(x[:3],sum(x[:3]))\r\n\r\nprint(\"Time (secs):\",time.time()-starting_time)\r\n","repo_name":"hackingmath/Advent_of_code_2022","sub_path":"day01.py","file_name":"day01.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"73574069846","text":"import json\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport pandas as pd\nimport seaborn as sns\nfrom sklearn.preprocessing import MultiLabelBinarizer\n\n\ndef main():\n config = json.load(open(\"config/config.json\"))\n os.makedirs(\"data/out\", exist_ok=True)\n os.makedirs(\"data/profiling\", exist_ok=True)\n\n metadata = pd.read_csv(\n \"data/raw/movie.metadata.tsv\",\n converters={\"genres\": lambda x: list(eval(x).values())},\n delimiter=\"\\t\",\n header=None,\n index_col=\"id\",\n names=[\"id\", \"genres\"],\n usecols=[0, 8]\n )\n\n summaries = pd.read_csv(\n \"data/raw/plot_summaries.txt\",\n delimiter=\"\\t\",\n header=None,\n index_col=\"id\",\n names=[\"id\", \"summary\"]\n )\n\n df = summaries.merge(metadata, on=\"id\")\n\n statistics = {\n \"records\": {\n \"data/raw/movie.metadata.tsv\": len(metadata),\n \"data/raw/plot_summaries.txt\": len(summaries),\n \"merged\": len(df)\n },\n \"label cardinality\": ...,\n \"label density\": ...,\n \"normalized_genres\": ...,\n \"genres\": metadata.genres.explode().value_counts().to_dict()\n }\n\n genre_normalization = config[\"genre_normalization\"]\n\n def clean_summary(summary):\n return (\n summary\n .str.replace(r'{{.*?}}', '') # Remove Wikipedia tags\n .str.replace(r'http\\S+', '') # Remove URLs\n .str.replace(r'\\s+', ' ') # Combine whitespace\n .str.strip() # Strip whitespace\n .replace('', pd.NA) # Replace empty strings with NA\n )\n\n def normalize_genres(genres):\n normalized_genres = []\n for genre in genres:\n if genre in genre_normalization:\n normalized_genres.extend(genre_normalization[genre])\n return list(np.unique(normalized_genres)) if normalized_genres else pd.NA\n\n df = df.assign(\n summary=clean_summary(df.summary),\n genres=df.genres.apply(normalize_genres)\n ).dropna().reset_index(drop=True)\n\n mlb = MultiLabelBinarizer()\n summaries, labels = df[[\"summary\"]], pd.DataFrame(mlb.fit_transform(df.genres), columns=mlb.classes_)\n df = pd.concat([summaries, labels], axis=1)\n df.to_pickle(\"data/out/df.pkl\")\n\n statistics[\"records\"][\"data/out/df.pkl\"] = len(df)\n statistics[\"label cardinality\"] = labels.sum(1).mean()\n statistics[\"label density\"] = labels.mean(1).mean()\n statistics[\"normalized_genres\"] = labels.sum().sort_values(ascending=False).to_dict()\n with open('data/profiling/statistics.json', 'w') as f:\n json.dump(statistics, f, indent=2)\n\n fig, ax = plt.subplots(figsize=(6, 6))\n sns.heatmap(\n labels.corr(),\n annot=True,\n cbar=False,\n cmap=\"bwr\",\n fmt=\".1f\",\n square=True,\n vmin=-1,\n vmax=1,\n ax=ax,\n )\n ax.tick_params(left=False, bottom=False)\n fig.savefig(\"data/profiling/label_correlation.png\", dpi=300, bbox_inches=\"tight\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"dleestat/movie-classifier","sub_path":"src/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":3076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"25293511061","text":"import sys\r\nfrom epw_bytecode import *\r\n\r\n''' The Virtual Machine.\r\n\r\n'''\r\n\r\nclass CollectionSlice(object):\r\n\r\n def __init__(self, collection, *idxlist):\r\n self.collection = collection\r\n self.idxlist = idxlist[0]\r\n\r\n def getValue(self):\r\n if isinstance(self.collection, CollectionSlice):\r\n collection = self.collection.getValue()\r\n else:\r\n collection = self.collection\r\n if len(self.idxlist) == 1:\r\n return collection[self.idxlist[0]]\r\n elif len(self.idxlist) == 2:\r\n return collection[slice(self.idxlist[0], self.idxlist[1])]\r\n elif len(self.idxlist) == 3:\r\n return collection[slice(self.idxlist[0], self.idxlist[1], self.idxlist[2])]\r\n\r\n\r\nclass Undef(object):\r\n\r\n def __init__(self):\r\n pass\r\n\r\n def __eq__(self, other):\r\n if isinstance(other, Undef):\r\n return True\r\n else:\r\n return False\r\n\r\nundef = Undef()\r\n\r\nclass KwArg(object):\r\n \r\n def __init__(self, name, value):\r\n self.name = name\r\n self.value = value\r\n\r\nclass ExecutionFrame(object):\r\n def __init__(self, codeobject, pc, env):\r\n self.codeobject = codeobject\r\n self.pc = pc\r\n self.env =env\r\n\r\nclass Closure(object):\r\n def __init__(self, codeobject, env):\r\n self.codeobject = codeobject\r\n self.env = env\r\n\r\n\r\nclass VM(object):\r\n\r\n def __init__(self):\r\n # the working stack\r\n self.stack = []\r\n # frame stack\r\n self.frameStack = []\r\n\r\n self.frame = ExecutionFrame(codeobject=None, pc=None, \r\n env=self._create_top_env())\r\n\r\n\r\n def run(self, codeobject):\r\n\r\n self.frame.codeobject = codeobject\r\n self.frame.pc = 0\r\n\r\n\r\n while True:\r\n\r\n instr = self._get_next_instruction()\r\n\r\n if instr is None:\r\n if self._is_in_top_env():\r\n break\r\n else:\r\n raise VMError('Code object ended prematurely: ',\r\n self.frame.codeobject)\r\n\r\n opcode = instr.opcode\r\n if opcode == OP_PUSH:\r\n self.stack.append(self.frame.env.getVar(\r\n self.frame.codeobject.varNames[instr.args[0]]))\r\n\r\n elif opcode == OP_POP:\r\n value = self.stack.pop()\r\n self.frame.env.setVar(self.frame.codeobject.varNames[instr.args[0]], value)\r\n\r\n elif opcode == OP_PUSHC:\r\n self.stack.append(self.frame.codeobject.constants[instr.args[0]])\r\n\r\n elif opcode == OP_JUMP:\r\n self.frame.pc = instr.args[0]\r\n\r\n elif opcode == OP_FJUMP:\r\n predicate = self.stack.pop()\r\n if not predicate:\r\n self.frame.pc = instr.args[0]\r\n\r\n elif opcode == OP_FUNCTION: \r\n func_codeobject = self.frame.codeobject.constants[instr.args[0]]\r\n # closure is codobject and env\r\n closure = Closure(func_codeobject, self.frame.env)\r\n self.stack.append(closure)\r\n\r\n elif opcode == OP_KWARG:\r\n # make the keyword argument and push onto stack\r\n value = self.stack.pop()\r\n kwArgName = self.stack.pop()\r\n self.stack.append(KwArg(kwArgName, value))\r\n\r\n elif opcode == OP_CALL: # call\r\n proc = self.stack.pop()\r\n if isinstance(proc, CollectionSlice):\r\n proc = proc.getValue()\r\n # proc is either builtin or closure\r\n arglist = [self.stack.pop() for i in range(instr.args[0])]\r\n arglist.reverse()\r\n\r\n if isinstance(proc, BuiltinProc):\r\n result = proc.apply(arglist)\r\n if result is not None:\r\n self.stack.append(result)\r\n\r\n elif isinstance(proc, Closure): # user-defined function\r\n # trying to plug arguments into parameters\r\n nkwargs = 0\r\n for arg in arglist:\r\n if isinstance(arg, KwArg):\r\n nkwargs += 1\r\n nargs = len(arglist) - nkwargs\r\n argBinding = {}\r\n # initialize every parameter into undefined unless\r\n # provided by the call arguments. \r\n # Note that the position arguments and keyword arguments\r\n # are separated into two groups with the position arguments\r\n # in the 1st group.\r\n for ii in range(len(proc.codeobject.parms)):\r\n parmName = proc.codeobject.parms[ii]\r\n if ii < nargs:\r\n argBinding[parmName] = arglist[ii]\r\n else:\r\n argBinding[parmName] = undef\r\n # Also initialize every keyword parameter as undef.\r\n # Then plug the supplied values.\r\n for kwParmName in proc.codeobject.kwParms:\r\n argBinding[kwParmName] = undef\r\n for kwarg in arglist[nargs:]:\r\n argBinding[kwarg.name] = kwarg.value\r\n\r\n # push the frame stack\r\n self.frameStack.append(self.frame)\r\n\r\n # the env inside the func \r\n callee_env = VM_Environment(argBinding, proc.env)\r\n\r\n # change the current frame to the callee's frame\r\n self.frame = ExecutionFrame(\r\n codeobject = proc.codeobject,\r\n pc = 0, \r\n env = callee_env)\r\n\r\n else:\r\n raise VMError('Invalid object for function call: ', \r\n proc)\r\n\r\n elif opcode == OP_KWPARM:\r\n # this is the operation trying to set the default keyword parameter values\r\n kwParmName = self.frame.codeobject.kwParms[instr.args[0]]\r\n value = self.stack.pop()\r\n # only plug in the default value is none is supplied by the caller\r\n if self.frame.env.getVar(kwParmName) == undef:\r\n self.frame.env.setVar(kwParmName, value)\r\n\r\n elif opcode == OP_RETURN:\r\n self.frame = self.frameStack.pop()\r\n\r\n elif opcode == OP_SLICE:\r\n idxlist = [self.stack.pop() for i in range(instr.args[0])]\r\n idxlist.reverse()\r\n collection = self.stack.pop()\r\n self.stack.append(CollectionSlice(collection, idxlist))\r\n\r\n elif opcode in [OP_ADD, OP_SUB, OP_MUL, OP_DIV, OP_MOD, OP_GT, OP_GE, \r\n OP_EQ, OP_LE, OP_LT, OP_NE, OP_AND, OP_OR, OP_XOR]:\r\n\r\n op2 = self.stack.pop()\r\n if isinstance(op2, CollectionSlice):\r\n op2 = op2.getValue()\r\n \r\n op1 = self.stack.pop()\r\n if isinstance(op1, CollectionSlice):\r\n op1 = op1.getValue()\r\n\r\n if opcode == OP_ADD:\r\n self.stack.append(op1 + op2)\r\n\r\n elif opcode == OP_SUB:\r\n self.stack.append(op1 - op2)\r\n\r\n elif opcode == OP_MUL:\r\n self.stack.append(op1 * op2)\r\n\r\n elif opcode == OP_DIV:\r\n self.stack.append(op1 / op2)\r\n\r\n elif opcode == OP_MOD:\r\n self.stack.append(op1 % op2)\r\n\r\n elif opcode == OP_GT:\r\n self.stack.append(1 if op1 > op2 else 0)\r\n\r\n elif opcode == OP_GE:\r\n self.stack.append(1 if op1 >= op2 else 0)\r\n\r\n elif opcode == OP_EQ:\r\n self.stack.append(1 if op1 == op2 else 0)\r\n\r\n elif opcode == OP_LE:\r\n self.stack.append(1 if op1 <= op2 else 0)\r\n\r\n elif opcode == OP_LT:\r\n self.stack.append(1 if op1 < op2 else 0)\r\n\r\n elif opcode == OP_NE:\r\n self.stack.append(1 if op1 != op2 else 0)\r\n\r\n elif opcode == OP_AND:\r\n self.stack.append(1 if op1 and op2 else 0)\r\n\r\n elif opcode == OP_OR:\r\n self.stack.append(1 if op1 or op2 else 0)\r\n\r\n elif opcode == OP_XOR:\r\n self.stack.append(1 if op1 != op2 else 0)\r\n\r\n elif opcode == OP_NOT:\r\n op = self.stack.pop()\r\n self.stack.append(1 if (not op) else 0)\r\n\r\n elif opcode == OP_NEG:\r\n op = self.stack.pop()\r\n if isinstance(op, CollectionSlice):\r\n op = op.getValue()\r\n self.stack.append(-op)\r\n\r\n else:\r\n raise self.VMError('Unknown instruction', \r\n opcode2str(instr.opcode))\r\n\r\n if len(self.stack) > 0:\r\n value = self.stack.pop()\r\n if isinstance(value, CollectionSlice):\r\n value = value.getValue()\r\n builtin_print(('Ret:', value))\r\n self.stack = []\r\n\r\n def _create_top_env(self):\r\n top_binding = {}\r\n for name, func in builtin_map.items():\r\n top_binding[name] = BuiltinProc(name, func)\r\n return VM_Environment(top_binding)\r\n\r\n def _is_in_top_env(self):\r\n return True if self.frame.env.parent is None else False\r\n\r\n def _get_next_instruction(self):\r\n if self.frame.pc >= len(self.frame.codeobject.instrlist):\r\n return None\r\n else:\r\n instr = self.frame.codeobject.instrlist[self.frame.pc]\r\n self.frame.pc += 1\r\n return instr\r\n\r\n\r\nclass VM_Environment(object):\r\n\r\n def __init__(self, binding, parent=None):\r\n self.binding = binding\r\n self.parent = parent\r\n\r\n def getVar(self, varName):\r\n if self.binding.has_key(varName):\r\n return self.binding[varName]\r\n elif self.parent:\r\n return self.parent.getVar(varName)\r\n else:\r\n raise VMError('Undefined variable: ',\r\n varName)\r\n\r\n def setVar(self, varName, value):\r\n # variable can only be set at current environment\r\n self.binding[varName] = value\r\n\r\nclass BuiltinProc(object):\r\n\r\n def __init__(self, name, proc):\r\n self.name = name\r\n self.proc = proc\r\n\r\n def apply(self, args):\r\n return self.proc(args)\r\n\r\ndef builtin_list(args):\r\n if len(args) == 0:\r\n return []\r\n else:\r\n return [0]*args[0]\r\n\r\ndef builtin_print(args):\r\n for arg in args:\r\n if isinstance(arg, CollectionSlice):\r\n arg = arg.getValue()\r\n sys.stdout.write(str(arg) + ' ')\r\n sys.stdout.write('\\n')\r\n\r\ndef builtin_assign(args):\r\n lhs = args[0]\r\n rhs = args[1]\r\n if isinstance(lhs, CollectionSlice):\r\n if isinstance(lhs.collection, CollectionSlice):\r\n collection = lhs.collection.getValue()\r\n else:\r\n collection = lhs.collection\r\n if len(lhs.idxlist) == 1:\r\n collection[lhs.idxlist[0]] = rhs\r\n elif len(lhs.idxlist) == 2:\r\n collection[slice(lhs.idxlist[0], lhs.idxlist[1])] = rhs\r\n elif len(lhs.idxlist) == 3:\r\n collection[slice(lhs.idxlist[0], lhs.idxlist[1], lhs.idxlist[2])] = rhs\r\n else:\r\n raise VMError('Simple assignment should not call assign builtin.', '')\r\n\r\n\r\nbuiltin_map = {\r\n 'list': builtin_list,\r\n 'print': builtin_print,\r\n 'assign': builtin_assign,\r\n}\r\n\r\n\r\nclass VMError(Exception):\r\n def __repr__(self):\r\n return '%%[VMError] %s\\n%s' % self.args\r\n\r\n","repo_name":"svn2github/coderplay","sub_path":"trunk/Emma/epw_vm.py","file_name":"epw_vm.py","file_ext":"py","file_size_in_byte":11911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"15989643101","text":"class Solution:\r\n def minSetSize(self, arr: list[int]) -> int:\r\n c = Counter(arr)\r\n a = [y for x, y in c.items()]\r\n a.sort(reverse=True)\r\n ans = 0\r\n d = 0\r\n for i in a:\r\n ans += 1\r\n d += i\r\n if d * 2 >= len(arr):\r\n return ans\r\n","repo_name":"kirubel27/competetive-programming","sub_path":"1338. Reduce Array Size to The Half.py","file_name":"1338. Reduce Array Size to The Half.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"44946540841","text":"import os\nfrom typing import List\nfrom scheduler.scheduler import Scheduler\nfrom conf import conf\nimport threading\nimport log\nimport time\nfrom client.client import Client\nfrom timeit import default_timer as timer\n\nHEAL_TIME_OUT = 60 * 5 # 5 minutes\nTX_HISTORY_FILE = './TXs_Per_TC_{}'\nBLOCK_CONSISTENT_CHECKING_DURATION = 2\nENGINE_STATE_CHECKING_DURATION = 60\nTEST_CASE_CONTEXT_FILE_NAME = './system_log/{}/test_case_context.log'\nSYSTEM_LOG_DIR = './system_log/'\nTEST_CASE_SYSTEM_LOG_DIR = './system_log/{}'\n\n\nclass TestCase:\n \"\"\"A TestCase define the meta data of test case, includes condition, input, output.\"\"\"\n\n def __init__(self, test_case_conf, clients: List[Client]):\n self.test_case_conf = test_case_conf\n self.logger = log.get_logger()\n self.clients = {}\n for client in clients:\n self.clients[client.index] = client\n self.scheduler = Scheduler(test_case_conf, clients)\n # TC statistics\n self.start_chain_height = 0\n self.end_chain_height_before_recover = 0\n self.end_chain_height_after_recover = 0\n self.start_time = time.time()\n self.start_recover_time = None\n self.end_recover_time = None\n\n self.tx_history_file = TX_HISTORY_FILE.format(self.start_time)\n # TX issuing statistics\n self.tx_start_chain_height = 0\n self.tx_end_chain_height = 0\n self.tx_start_time = None\n self.tx_end_time = None\n self.tx_sent = 0\n self.tx_mined = 0\n self.balance_mined_by_the_test = 0\n self.sender_before_balance = 0\n self.receiver_before_balance = 0\n\n def __del__(self):\n try:\n if os.path.exists(self.tx_history_file):\n os.remove(self.tx_history_file)\n except Exception as e:\n self.logger.warning('remove file failed %s', e)\n\n def get_chain_height(self):\n best_height = 0\n for index, client in self.clients.items():\n height = client.get_chain_height()\n if height is None:\n continue\n else:\n best_height = height if height > best_height else best_height\n return best_height\n\n def do_context_clean_up(self):\n # clean up scheduled events.\n self.scheduler.stop_scheduling_events()\n # recover disasters simulated in the test bed.\n self.start_recover_time = time.time()\n self.end_chain_height_before_recover = self.get_chain_height()\n self.recover()\n # checking if disaster is healed with block synced again with alive nodes within a specified duration.\n if self.is_healed() is True:\n self.end_chain_height_after_recover = self.get_chain_height()\n self.end_recover_time = time.time()\n #self.generate_report()\n\n def tx_send(self):\n try:\n if 'startAt' in self.test_case_conf['input']:\n start_point = self.test_case_conf['input']['startAt']\n start = timer()\n while True:\n gap = start_point - (timer() - start)\n if gap > 0:\n time.sleep(1)\n self.logger.debug('Waiting for %ds to start sending TX.', gap)\n else:\n break\n\n except (KeyError, TypeError) as e:\n self.logger.error(\"Wrong configuration file. %s\", e)\n return None\n\n self.tx_start_time = time.time()\n self.tx_start_chain_height = self.get_chain_height()\n try:\n start = timer()\n duration = self.test_case_conf[\"input\"][\"duration\"]\n sender_index = self.test_case_conf[\"input\"][\"senderNode\"]\n receiver_index = self.test_case_conf[\"input\"][\"receiverNode\"]\n amount_per_tx = self.test_case_conf[\"input\"][\"amountperTX\"]\n\n if sender_index not in self.clients or receiver_index not in self.clients:\n return None\n self.sender_before_balance = self.clients[sender_index].get_balance()\n self.receiver_before_balance = self.clients[receiver_index].get_balance()\n while (timer() - start) < duration or self.scheduler.is_scheduling_events():\n time.sleep(1)\n try:\n txn_hash = self.clients[sender_index].send_transaction(\n to=\"0x{}\".format(self.clients[receiver_index].coin_base), value=amount_per_tx, gas_price=5000)\n if txn_hash is not None:\n with open(self.tx_history_file, 'a+') as f:\n f.write('{}\\n'.format(txn_hash))\n self.tx_sent += 1\n except Exception as e:\n self.logger.error(\"Send TX failed due to exception: %s.\", e)\n\n self.tx_end_time = time.time()\n self.tx_end_chain_height = self.get_chain_height()\n except Exception as e:\n self.logger.error(\"cannot access remote RPC endpoint: %s\", e)\n return None\n return True\n\n def is_balance_okay(self):\n \"\"\"Verify balance base on test_case_conf between sender and receiver.\"\"\"\n self.logger.debug(\"Before test, sender have: %d tokens\", self.sender_before_balance)\n self.logger.debug(\"Before test, receiver have: %d tokens\", self.receiver_before_balance)\n amount_per_tx = self.test_case_conf[\"input\"][\"amountperTX\"]\n sender_index = self.test_case_conf[\"input\"][\"senderNode\"]\n receiver_index = self.test_case_conf[\"input\"][\"receiverNode\"]\n try:\n with open(self.tx_history_file, 'r') as reader:\n for tx_hash in reader:\n # check if TX is mined, then calculate balance between sender and receiver.\n result = self.clients[sender_index].get_transaction_by_hash(tx_hash.strip('\\n'))\n # TX was mined, count the expected balance\n if result[\"blockHash\"] is not None:\n self.tx_mined += 1\n self.balance_mined_by_the_test += amount_per_tx\n except IOError as e:\n self.logger.error(\"Cannot get TX via RPC api: %s\", e)\n except (KeyError, TypeError) as e:\n self.logger.error(\"Cannot find blockHash from result, something wrong from RPC service: %s\", e)\n except Exception as e:\n self.logger.error(\"Something wrong happens at balance validation. %s\", e)\n\n sender_after_balance = self.clients[sender_index].get_balance()\n receiver_after_balance = self.clients[receiver_index].get_balance()\n\n if sender_after_balance is None or receiver_after_balance is None:\n return False\n\n # checking balance if sending tokens to self.\n if sender_index == receiver_index:\n self.logger.debug(\"sender balance: %d, receiver balance: %d\", sender_after_balance, receiver_after_balance)\n if sender_after_balance != receiver_after_balance:\n return False\n\n # checking sender's balance.\n if self.sender_before_balance - self.balance_mined_by_the_test == sender_after_balance is False:\n return False\n\n # checking receiver's balance.\n if self.receiver_before_balance + self.balance_mined_by_the_test == receiver_after_balance is False:\n return False\n return True\n\n def get_dead_validators(self):\n try:\n dead_nodes = self.test_case_conf[\"condition\"][\"crashNodes\"]\n self.logger.debug(\"get dead node: %s\", dead_nodes)\n return dead_nodes\n except (KeyError, TypeError) as e:\n self.logger.error(\"Wrong configuration for test case. %s\", e)\n return None\n\n def get_alive_validators(self):\n try:\n alive_nodes = []\n dead_nodes = self.get_dead_validators()\n for index, client in self.clients.items():\n if client.index not in dead_nodes:\n alive_nodes.append(client.index)\n except (KeyError, TypeError) as e:\n self.logger.error(\"Wrong configuration for test case. %s\", e)\n return None\n self.logger.debug(\"Get alive nodes: %s\", alive_nodes)\n return alive_nodes\n\n def is_block_in_consistent_state(self):\n # to do tracking the block hash via RPC through out the validators.\n try:\n map_height_hash = {}\n alive_nodes = self.get_alive_validators()\n if alive_nodes is None:\n return False\n for r in range(1, BLOCK_CONSISTENT_CHECKING_DURATION):\n reference_height = self.get_chain_height()\n for node in alive_nodes:\n block_hash = self.clients[node].get_block_hash_by_height(reference_height)\n if block_hash is None:\n continue\n if reference_height in map_height_hash:\n if map_height_hash.get(reference_height) != block_hash and block_hash is not None:\n self.logger.error(\"BLOCK INCONSISTENT in round: %d, at height: %d\", r, reference_height)\n return False\n else:\n self.logger.debug('checking consistence in height: %d, hash %s of node: %s port: %s.',\n reference_height, block_hash if block_hash is not None else 'NULL',\n self.clients[node].host, self.clients[node].p2p_port)\n else:\n map_height_hash[reference_height] = block_hash\n time.sleep(1)\n self.logger.debug(\"Verified block consistent with heights: %s\", map_height_hash)\n return True\n except (KeyError, TypeError) as e:\n self.logger.error(\"Wrong configuration file. %s\", e)\n return False\n\n def get_expected_engine_state(self):\n try:\n if self.test_case_conf[\"output\"][\"engineAlive\"] is True:\n return True\n return False\n except (KeyError, TypeError) as e:\n self.logger.error(\"Wrong configuration for test case. %s\", e)\n return False\n\n def is_engine_state_expected(self):\n # get expected state from test conf.\n should_engine_produce_block = self.get_expected_engine_state()\n\n try:\n on_hold_counter = 0\n height = self.get_chain_height()\n self.logger.debug(\"latest chain height %d\", height)\n for r in range(1, ENGINE_STATE_CHECKING_DURATION):\n time.sleep(1)\n new_height = self.get_chain_height()\n if height < new_height:\n # reset on-holding counter.\n on_hold_counter = 0\n self.logger.info(\"Consensus engine is keeping produce blocks: %d.\", new_height)\n height = new_height\n if should_engine_produce_block is False:\n return False\n else:\n break\n else:\n on_hold_counter += 1\n self.logger.info('Consensus engine is on-holding for %ds', on_hold_counter)\n if on_hold_counter == (ENGINE_STATE_CHECKING_DURATION - 1):\n if should_engine_produce_block is True:\n return False\n\n except (KeyError, TypeError) as e:\n self.logger.error(\"Wrong configuration file. %s \", e)\n return False\n return True\n\n def start_test(self):\n if self.run() is False:\n self.collect_test_case_context_log()\n self.collect_system_log()\n return False\n return True\n\n def run(self):\n \"\"\"run the test case, and tear down the test case with network recovery.\"\"\"\n self.logger.debug(\"before running test case, thread: %d.\", threading.active_count())\n self.start_chain_height = self.get_chain_height()\n self.logger.debug(\"start schedule events...\")\n if self.scheduler.schedule() is not True:\n return False\n if self.tx_send() is not True:\n self.do_context_clean_up()\n return False\n if self.is_balance_okay() is not True:\n self.do_context_clean_up()\n return False\n if self.is_engine_state_expected() is not True:\n self.do_context_clean_up()\n return False\n if self.is_block_in_consistent_state() is not True:\n self.do_context_clean_up()\n return False\n\n self.start_recover_time = time.time()\n self.end_chain_height_before_recover = self.get_chain_height()\n\n if self.recover() is not True:\n self.scheduler.stop_scheduling_events()\n return False\n self.logger.debug(\"After disaster recover, thread: %d.\", threading.active_count())\n\n # checking if disaster is healed with block synced again with alive nodes within a specified duration.\n if self.is_healed() is True:\n self.end_chain_height_after_recover = self.get_chain_height()\n self.end_recover_time = time.time()\n self.logger.info(\"TESTCASE: %s is passed.\", self.test_case_conf[\"name\"])\n #self.generate_report()\n self.scheduler.try_join()\n return True\n\n self.scheduler.try_join()\n self.logger.warning('Recovering timeout happens.')\n return False\n\n def generate_report(self):\n if self.tx_start_chain_height > self.tx_end_chain_height:\n self.logger.info('Blockchain was re-initialized from scratch.')\n self.tx_start_chain_height = 0\n\n if self.start_chain_height > self.end_chain_height_after_recover:\n self.logger.info('Blockchain was re-initialized from scratch.')\n self.start_chain_height = 0\n\n self.logger.info('statistics: $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')\n self.logger.info('statistics: Name: %s', self.test_case_conf['name'])\n self.logger.info('statistics: TC: start time: %s', time.asctime(time.localtime(self.start_time)))\n self.logger.info('statistics: TC: duration: %ds', self.start_recover_time - self.start_time)\n self.logger.info('statistics: TC: start recover time: %s', time.asctime(time.localtime(self.start_recover_time)))\n self.logger.info('statistics: TC: duration: %ds', self.end_recover_time - self.start_recover_time)\n self.logger.info('statistics: TC: end recover time: %s', time.asctime(time.localtime(self.end_recover_time)))\n self.logger.info('statistics: TC: start chain height: %d', self.start_chain_height)\n self.logger.info('statistics: TC: before recover height: %d', self.end_chain_height_before_recover)\n self.logger.info('statistics: TC: after recover height: %d', self.end_chain_height_after_recover)\n self.logger.info('statistics: TC: Block producing speed: %1.3f block/s',\n (self.end_chain_height_after_recover - self.start_chain_height) /\n (self.end_recover_time - self.start_time))\n self.logger.info('statistics: -------------------------------------------------')\n self.logger.info('statistics: TX: start: %s', time.asctime(time.localtime(self.tx_start_time)))\n self.logger.info('statistics: TX: end: %s', time.asctime(time.localtime(self.tx_end_time)))\n self.logger.info('statistics: TX: duration: %ds', self.tx_end_time - self.tx_start_time)\n self.logger.info('statistics: TX: start height: %d', self.tx_start_chain_height)\n self.logger.info('statistics: TX: end height: %d', self.tx_end_chain_height)\n self.logger.info('statistics: TX: Block producing speed: %1.3f block/s.',\n (self.tx_end_chain_height - self.tx_start_chain_height) / (self.tx_end_time - self.tx_start_time))\n self.logger.info('statistics: TX: %d of %d was mined in ledger, TPL: %.2f%%. Token delivered: %d.',\n self.tx_mined, self.tx_sent, (self.tx_mined/self.tx_sent)*100, self.balance_mined_by_the_test)\n self.logger.info('statistics: $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')\n\n def collect_test_case_context_log(self):\n try:\n # try to create dirs\n os.makedirs(SYSTEM_LOG_DIR, exist_ok=True) # It never fail even if the dir is existed.\n os.makedirs(TEST_CASE_SYSTEM_LOG_DIR.format(self.start_time), exist_ok=True)\n\n self.test_case_conf['testcase_start_time'] = time.ctime(self.start_time)\n self.test_case_conf['testcase_start_height'] = self.start_chain_height\n self.test_case_conf['testcase_end_height'] = self.end_chain_height_before_recover\n self.test_case_conf['testcase_end_time'] = time.ctime(time.time())\n self.test_case_conf['ip_mapping'] = []\n for index, client in self.clients.items():\n self.test_case_conf['ip_mapping'].append(\"{}:{}\".format(index, client.host))\n\n ret = conf.write_yaml(TEST_CASE_CONTEXT_FILE_NAME.format(self.start_time), self.test_case_conf)\n if ret is not True:\n self.logger.warning('cannot save test case context.')\n\n # print test case context in log file.\n self.logger.info(\"\\n\\n\\n\")\n self.logger.info(\"The failed test case context is collected as below:\")\n self.logger.info(self.test_case_conf)\n self.logger.info(\"\\n\\n\\n\")\n except Exception as e:\n self.logger.error(\"Cannot collect test case context logs. %s\", e)\n\n def recover(self):\n failed = False\n for index, client in self.clients.items():\n if client.heal_from_disaster() is not True:\n failed = True\n return True if not failed else False\n\n def is_healed(self):\n # measure the best height.\n best_height = self.get_chain_height()\n healed_clients = {}\n start = timer()\n while (timer() - start) < HEAL_TIME_OUT:\n self.logger.debug(\"IsHeal, current thread count: %d\", threading.active_count())\n if len(healed_clients) == len(self.clients):\n return True\n for index, client in self.clients.items():\n height = client.get_chain_height()\n if height is None:\n continue\n if height >= best_height:\n healed_clients[index] = client\n time.sleep(1)\n self.logger.warning('Disaster recovering timeout. 5 minutes!')\n return False\n\n def collect_system_log(self):\n try:\n # try to create dirs\n os.makedirs(SYSTEM_LOG_DIR, exist_ok=True) # It never fail even if the dir is existed.\n os.makedirs(TEST_CASE_SYSTEM_LOG_DIR.format(self.start_time), exist_ok=True)\n for index, client in self.clients.items():\n client.collect_system_log(TEST_CASE_SYSTEM_LOG_DIR.format(self.start_time))\n except Exception as e:\n self.logger.error('Cannot fetch logs from node. %s.', e)\n return None\n try:\n # redirect client logs into test engine's logger.\n for index, client in self.clients.items():\n client.redirect_system_log(TEST_CASE_SYSTEM_LOG_DIR.format(self.start_time))\n except Exception as e:\n self.logger.error('Cannot redirect system logs from client into test engine log file %s.', e)\n return None\n return True\n","repo_name":"autonity/autonity","sub_path":"docker_e2e_test/testcase/testcase.py","file_name":"testcase.py","file_ext":"py","file_size_in_byte":19820,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"30"} +{"seq_id":"15123815416","text":"from django import forms\n\nfrom .models import MyVideo\n\n\nclass MyVideoForm(forms.ModelForm):\n url = forms.CharField(max_length=300, help_text=\"Enter your Youtube Url here\")\n\n class Meta:\n model = MyVideo\n fields = ('url',)\n","repo_name":"surajpaib/Signify","sub_path":"vid/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"16650086521","text":"\"\"\" Continent lib \"\"\"\n\nfrom countrycode import countrycode\n\ndef _error(msg):\n \"\"\" Utility to handle custom errors (see response mapping) \"\"\"\n raise Exception(\"[BadRequest] %s\" % msg)\n\ndef continent_by_country_name(event):\n \"\"\" Get continent name (e.g. \"Europe\"), given a country name (e.g. \"Italy\") \"\"\"\n country_name = event.get('country')\n\n if not country_name:\n return _error(\"Invalid event (required country)\")\n\n continent = countrycode(codes=[country_name], origin=\"country_name\", target='continent')\n\n if not continent:\n return _error(\"Invalid country: %s\" % country_name)\n\n return {\n \"continent\": next(iter(continent)),\n }\n","repo_name":"alexcasalboni/serverless-starter-python","sub_path":"restApi/lib/continent.py","file_name":"continent.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"30"} +{"seq_id":"23264099817","text":"#Main instalation\nimport os\nfrom shutil import move\nimport fitz\nimport pytesseract as tess\nfrom PIL import Image\ntess.pytesseract.tesseract_cmd= r'C:\\Users\\aleja\\AppData\\Local\\Programs\\Tesseract-OCR\\tesseract.exe'\n\n\ncurrent_directory = os.path.dirname(os.path.abspath(__file__))\nfile_name = input(\"Enter the pdf file name: \")\ntry:\n file= fitz.open(file_name)\nexcept:\n print(\"Searching for the file in the current directory\")\n file_name = current_directory + \"\\\\\" + file_name\n file= fitz.open(file_name)\n if file is None:\n print(\"File not found\")\n exit()\n \ndirectory_name = input(\"Enter the directory name for the extracted data: \")\n\ncurrent_directory = os.path.join(current_directory, directory_name)\nif not os.path.exists(current_directory):\n os.mkdir(current_directory)\n\nimages = []\n\n#For text\nfor pageNumber, page in enumerate(file.pages(), start=1):\n\n #Get Text\n text= page.getText()\n\n file_name_ = \"page_\" + str(pageNumber) + \".txt\"\n #Save all Text into a single TXT file with an append mode\n txt=open(file_name_, 'a')\n \n #Write the text \n txt.writelines(text)\n txt.close()\n \n # Move the TXT file to the current directory\n os.mkdir(current_directory+\"\\\\\"+\"Text\")\n move(os.path.join(os.getcwd(),file_name_), os.path.join(current_directory + \"\\\\TEXT\\\\\",file_name_))\n\n#for image\n\n#Get the page number and page information enumarated\nfor pageNumber, page in enumerate(file.pages(), start=1):\n\n\n #Get the location of the image\n for imgNumber, img in enumerate(page.getImageList(),start= 1):\n xref= img[0]\n\n\n #Create\n pix=fitz.Pixmap(file,xref)\n #Bits per pixel\n if pix.n >4:\n\n pix=fitz.Pixmap(fitz.csRGB,pix) #since this is not RBG or GREY it is converted into PIX\n \n \n image_name = f'a_Page_{pageNumber}_Image_{imgNumber}.png'\n pix.writePNG(image_name)\n\n # Move the image to the current directory\n os.mkdir(current_directory+\"\\\\\"+\"Images\")\n move(os.getcwd()+\"\\\\\"+image_name, current_directory + \"\\\\IMAGES\\\\\"+image_name)\n images.append(image_name)\n \n# printing the text from the image\nfor image in images:\n img= Image.open(current_directory+\"\\\\IMAGES\\\\\" + image)\n ocr = tess.image_to_string(img, lang='eng',config='-c page_separator=')\n print(ocr)\n \n","repo_name":"jesseb246/nsinfaumeta","sub_path":"py/ocr_process.py","file_name":"ocr_process.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"3560199436","text":"import utils\r\nfrom dssat import DSSAT\r\nimport os\r\nimport argparse\r\n\r\nSUFFIXES = {'maize': '.MZX', 'rice': '.RIX'}\r\n\r\n\r\ndef run_model(input_summary_file, output_summary_file, out_crop_path, result_output, gl_epochs, crop_type=None,\r\n file_name=None, run_path_absolute=r'C:\\DSSAT47', glue_flag=1, simulation_model='B'):\r\n utils.create_input_files(input_summary_file, output_summary_file)\r\n utils.create_xfile(os.path.join(output_summary_file, 'xfile.json'), out_crop_path, crop_type, file_name)\r\n for fn in os.listdir(out_crop_path):\r\n if os.path.splitext(fn)[-1] in list(SUFFIXES.values()):\r\n dssat = DSSAT(os.path.join(out_crop_path, fn), run_path_absolute)\r\n dssat(result_output, gl_epochs, glue_flag, simulation_model)\r\n\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser(description='Run Dssat model with GLUE')\r\n parser.add_argument('--input', '-i', help='path to input summary file')\r\n parser.add_argument('--output', '-o', default=os.getcwd(),\r\n help='path to preserve reorganized summary output file')\r\n parser.add_argument('--cropdir', '-cd', default=os.path.join(os.getcwd(), 'output'),\r\n help='path to preserve reorganized summary output file')\r\n parser.add_argument('--result', '-rs', default=os.path.join(os.getcwd(), 'result'),\r\n help='path to preserve result files of dssat model')\r\n parser.add_argument('--epochs', '-e', default=5000, help='Epochs of GLUE')\r\n\r\n args = parser.parse_args()\r\n\r\n run_model(args.input, args.output, args.cropdir, args.result, args.epochs)\r\n","repo_name":"MMMMMYxY/pydssat","sub_path":"run_model.py","file_name":"run_model.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"30"} +{"seq_id":"6171110752","text":"import os\nimport tempfile\nfrom urllib.parse import uses_params\nfrom xml.sax.saxutils import prepare_input_source\nfrom pyrsistent import b\nimport sem\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\nimport pprint\nimport matplotlib.ticker as ticker\nimport sys\nimport base64\nimport re\nfrom PIL import Image\nfrom IPython.display import display, Markdown, Latex\nimport random\nfrom datetime import datetime\n\n\n\nsns.set_style(\"whitegrid\")\ncurr_dt = datetime.now()\ntimestamp = int(round(curr_dt.timestamp()))\nrandom.seed(timestamp)\n\nrunid = random.randint(1,100)\n\nns_path = os.path.normpath(os.path.join(os.getcwd(), '../../', 'ns-3')) # Path for simulation purposes\nscript = 'lab0' # NS3 Script to run, (change name here)\n\ntxpower = 20 # Use 40 until react gui parsing (change for lab1)\n\n##########################################################################################\n\ncampaign_dir = []\ncampaign = []\nres_path = []\nparams = []\n \nuePos = \"[(341,506),(513,281),]\"\neNBPos = \"[(0,0)]\"\n\n# uePos = \"[(341,506),(513,281),(800,706),(1000,381),]\"\n\n# eNBPos = \"[(0,0),(800,500),]\"\n\n \nbuildingPos = \"[]\"\n\nres_path = 'results-rsrp' + '-' + str(txpower) + '-' + str(runid)\ncampaign_dir = os.path.normpath(os.path.join(os.getcwd(), 'Results', res_path))\n\ncampaign = sem.CampaignManager.new(ns_path, script, campaign_dir, overwrite=True,\n check_repo = False, max_parallel_processes=4)\n\nparams = {\n 'arrayPosUEsString': uePos,\n 'arrayPosBuildingsString': buildingPos,\n 'enablersrp': True,\n 'eNBTxPowerDbm': txpower,\n 'arrayPoseNBsString' :eNBPos,\n 'runId': runid,\n}\n\nruns = 1\nprint(\"User Received Power simulation started...\")\n\ncampaign.run_missing_simulations(params, runs=runs)\n\n##########################################################################################\nprint(\"User Received Power simulation finished!\")\nresult_signalpower = campaign.db.get_complete_results(params=params) #Results\n\ncampaign_dir = []\ncampaign = []\nres_path = []\nparams = []\n##########################################################################################\nres_path = 'results-sinr' + '-' + str(txpower) + '-' + str(runid) \ncampaign_dir = os.path.normpath(os.path.join(os.getcwd(), 'Results', res_path))\n\ncampaign = sem.CampaignManager.new(ns_path, script, campaign_dir, overwrite=True, \n check_repo = False, max_parallel_processes=8)\n\nparams = {\n 'arrayPosUEsString': uePos,\n 'arrayPosBuildingsString': buildingPos,\n 'enablesinrue': True,\n 'eNBTxPowerDbm': txpower,\n 'arrayPoseNBsString' :eNBPos,\n 'runId': runid,\n}\nruns = 1\ncampaign.run_missing_simulations(params, runs=runs)\n\n# ##########################################################################################\n# print(\"Simulation SINR finished!\")\nresult_sinr = campaign.db.get_complete_results(params=params) #Results\n\n# ##########################################################################################\ncampaign_dir = []\ncampaign = []\nres_path = []\nparams = []\n \nprint(\"Throughput simulation started...\")\n\n \nres_path = 'results-flow' + '-' + str(txpower) + '-' + str(runid) \ncampaign_dir = os.path.normpath(os.path.join(os.getcwd(), 'Results', res_path))\n\ncampaign = sem.CampaignManager.new(ns_path, script, campaign_dir, overwrite=True, \n check_repo = False, max_parallel_processes=4)\n\nparams = {\n 'arrayPosUEsString': uePos,\n 'arrayPosBuildingsString': buildingPos,\n 'enableInstTput': True,\n 'eNBTxPowerDbm': txpower,\n 'arrayPoseNBsString' :eNBPos,\n 'runId': runid,\n}\n\nruns = 1\ncampaign.run_missing_simulations(params, runs=runs)\n\n ##########################################################################################\nprint(\"Throughput simulation finished!\")\nresult_throughput = campaign.db.get_complete_results(params=params) #Results\n\n\nprint('All the simulations have been completed successfully...')\n\n# ##########################################################################################\n# print(\"Simulations finished\") \n\nraise SystemExit\n\n\n# %reset -f\n","repo_name":"kashifme224/ttm4133-spring23","sub_path":"lab0- tutorial/Modules/first-sim.py","file_name":"first-sim.py","file_ext":"py","file_size_in_byte":4371,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"30"} +{"seq_id":"42031320518","text":"#!/usr/bin/env python3\n\"\"\" Python script that provides some stats about Nginx logs stored in MongoDB:\n\"\"\"\n\nfrom pymongo import MongoClient\n\nclient = MongoClient()\nnginx_coll = client.logs.nginx\nmethods = [\"GET\", \"POST\", \"PUT\", \"PATCH\", \"DELETE\"]\n\n\ndef count_items(coll, **kwargs):\n \"\"\" Count the number of documents with matching\n kwargs in a collection\n \"\"\"\n res = coll.find(kwargs)\n return len(list(res))\n\n\ndef top_10_ips(coll):\n \"\"\"Return most visited ipsin log\n \"\"\"\n aggr_arr = [\n {\"$group\": {\"_id\": \"$ip\", \"count\": {\"$sum\": 1}}},\n {\"$sort\": {\"count\": -1}}, {\"$limit\": 10}\n ]\n res = coll.aggregate(aggr_arr)\n return res\n\n\ndef main():\n \"\"\"Main function\n \"\"\"\n ips = top_10_ips(nginx_coll)\n logs = count_items(nginx_coll)\n methods_count = [count_items(nginx_coll, method=mtd) for mtd in methods]\n status_count = count_items(nginx_coll, method=\"GET\", path='/status')\n print(\"{} logs\".format(count_items(nginx_coll)))\n print(\"Methods:\")\n for i in range(len(methods)):\n print(\"\\tmethod {}: {}\".format(\n methods[i], methods_count[i]))\n print(\"{} status check\".format(status_count))\n print(\"IPs:\")\n for ip in ips:\n print('\\t{}: {}'.format(ip.get('_id'), ip.get('count')))\n\n\nif __name__ == \"__main__\":\n (main())\n","repo_name":"Abdulmuizz98/alx-backend-storage","sub_path":"0x01-NoSQL/102-log_stats.py","file_name":"102-log_stats.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"34031193271","text":"import rhinoscriptsyntax as rs\r\n\r\n\r\n#get 2 curves & store them in list\r\ncrvs = rs.GetObjects(\"select 2 curves\")\r\n\r\ndiv = 25 #no of points the curve is divided\r\ngravity = 10 #amout by which the points will come down\r\n\r\n\r\n#divide the curves and store the points in a 2 lists\r\npts_crv1=rs.DivideCurve(crvs[0],div, False)\r\npts_crv2=rs.DivideCurve(crvs[1],div, False)\r\n\r\n\r\nrad = 0.25 #radius for the downward wing\r\n\r\n\r\ngpts_list1 = [] # list of new points for crv 1\r\ngpts_list2 = [] # list of new points for crv 2\r\n\r\n#makes the line between points \r\nfor i in range(len(pts_crv1)):\r\n nz1 = pts_crv1[i][2] - gravity #n = new # z = coordinate\r\n nx1 = pts_crv1[i][0] # x = coordinate\r\n ny1 = pts_crv1[i][1] # y = coordinate\r\n \r\n npts_crv1 = (nx1,ny1,nz1)\r\n gpts_list1.append(npts_crv1)\r\n\r\n \r\n nz2 = pts_crv2[i][2] - gravity #n = new # z = coordinate\r\n nx2 = pts_crv2[i][0] # x = coordinate\r\n ny2 = pts_crv2[i][1] # y = coordinate\r\n \r\n npts_crv2 = (nx2,ny2,nz2)\r\n gpts_list2.append(npts_crv2)\r\n\r\n\r\ngcrv1 = rs.AddInterpCurve(gpts_list1)\r\ngcrv2 = rs.AddInterpCurve(gpts_list2)\r\n\r\n\r\n#divide the curves and store the points in a list or 2 lists\r\npts_gcrv1 = rs.DivideCurve(gcrv1,div,False)\r\npts_gcrv2 = rs.DivideCurve(gcrv2,div,False)\r\n\r\nspts_list1 = [] # list of new points for gcrv 1 # s = shear\r\nspts_list2 = [] # list of new points for gcrv 2\r\n\r\n#makes list of points\r\nfor i in range(len(pts_gcrv1)):\r\n d = rs.Distance(pts_gcrv1[i],pts_gcrv2[i])\r\n sf = -d*0.3 # shear factor\r\n \r\n nz1 = pts_gcrv1[i][2]#n = new # z = coordinate\r\n nx1 = pts_gcrv1[i][0] + sf # x = coordinate\r\n ny1 = pts_gcrv1[i][1] # y = coordinate\r\n \r\n npts_gcrv1 = (nx1,ny1,nz1)\r\n spts_list1.append(npts_gcrv1)\r\n \r\n \r\n nz2 = pts_gcrv2[i][2]#n = new # z = coordinate\r\n nx2 = pts_gcrv2[i][0] - sf # x = coordinate\r\n ny2 = pts_gcrv2[i][1] # y = coordinate\r\n \r\n npts_gcrv2 = (nx2,ny2,nz2)\r\n spts_list2.append(npts_gcrv2)\r\n print(\"TEST\",\"Point added\")\r\n\r\nscrv1 = rs.AddInterpCurve(spts_list1)\r\nscrv2 = rs.AddInterpCurve(spts_list2)\r\n\r\npts_scrv1 = rs.DivideCurve(scrv1,div)\r\npts_scrv2 = rs.DivideCurve(scrv2,div)\r\n\r\n\r\n\r\n\r\nrs.AddLoftSrf([gcrv1,scrv1])\r\nrs.AddLoftSrf([gcrv2,scrv2])\r\n\r\n#add the pipe for outer frame\r\npipe_frame1 = rs.AddPipe(scrv1, 0,rad)\r\npipe_frame2 = rs.AddPipe(scrv2, 0,rad)\r\n","repo_name":"MaD171093/IdeaSeminar_F2018_Manoj","sub_path":"5-RhinoPattern-Manoj.py","file_name":"5-RhinoPattern-Manoj.py","file_ext":"py","file_size_in_byte":2339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"6044169724","text":"#! flask/bin/python\n\nfrom flask import Flask, session, request, redirect, jsonify, abort, escape, make_response, url_for, \\\n render_template, flash\nfrom flask_restful import Api, Resource, reqparse, fields, marshal\nfrom flask_httpauth import HTTPBasicAuth\nfrom datetime import datetime\nimport pyodbc\nimport json\nfrom hashlib import sha384\nimport config\n\n\napp = Flask(__name__)\napi = Api(app)\nauth = HTTPBasicAuth()\napp.secret_key = config.SECRET_KEY\n\n\nclass ServerError(Exception):\n pass\n\n\nclass AzureSQLDatabase(object):\n connection = None\n cursor = None\n\n def __init__(self):\n self.connection = pyodbc.connect(config.CONN_STRING)\n self.cursor = self.connection.cursor()\n\n def query(self, query, params):\n return self.cursor.execute(query, params)\n\n def commit(self):\n return self.connection.commit()\n\n def __del__(self):\n self.connection.close()\n\n\n@app.route('/api/v1.0/index')\ndef index():\n if 'username' not in session:\n return redirect(url_for('login'))\n\n context = {\n \"username_session\": escape(session['username']).capitalize(),\n \"userid\": session['userid'],\n \"teamname\": session['teamname']\n }\n\n return render_template('index.html', context=context)\n\n\n@app.route('/api/v1.0/login', methods=['GET', 'POST'])\ndef login():\n if 'username' in session:\n return redirect(url_for('index'))\n\n error = None\n try:\n if request.method == 'POST':\n username_form = request.form['username']\n conn = AzureSQLDatabase()\n params = username_form\n sql = \"select count(1) from users where username = ?;\"\n cursor = conn.query(sql, params)\n\n if not cursor.fetchone()[0]:\n raise ServerError('Invalid Username!')\n\n password_form = request.form['password']\n params2 = password_form\n sql2 = \"select password, userid, teamname from users where username = ?;\"\n cursor2 = conn.query(sql2, params)\n\n for row in cursor2.fetchall():\n if sha384(password_form).hexdigest().upper() == row[0]:\n session['username'] = request.form['username']\n session['userid'] = row[1]\n session['teamname'] = row[2]\n return redirect(url_for('index'))\n\n raise ServerError('Invalid Password!')\n except ServerError as e:\n error = str(e)\n\n # the code below is executed if the request method\n # was GET or the credentials were invalid\n return render_template('login.html', error=error)\n\n\n@app.route('/api/v1.0/logout')\ndef logout():\n session.pop('username', None)\n session.pop('userid', None)\n session.pop('teamname', None)\n return redirect(url_for('index'))\n\n\n@app.route('/api/v1.0/forgot')\ndef forgot_password():\n return render_template('forgot.html', username=None)\n\n\n@auth.get_password\ndef get_password_and_key(username):\n \"\"\" Simple text-based authentication \"\"\"\n if username == 'qwikcutappstats':\n api_key = 'ebd7a876-c8ad-11e6-9d9d-cec0c932ce01'\n return api_key\n else:\n return None\n\n\n@auth.error_handler\ndef unauthorized():\n \"\"\"\n Return a 403 instead of a 401 to prevent browsers from displaying\n the default auth dialog\n :param:\n :return: unauthorized message\n \"\"\"\n return make_response(jsonify({'message': 'Unauthorized Access'}), 403)\n\n\nstat_fields = {\n 'id': fields.Integer,\n 'statid': fields.Integer,\n 'playerid': fields.Integer,\n 'playernumber': fields.Integer,\n 'goals': fields.Integer,\n 'shots': fields.Integer,\n 'assists': fields.Integer,\n 'saves': fields.Integer,\n 'grounders': fields.Integer,\n 'turnovers': fields.Integer,\n 'forcedturnovers': fields.Integer,\n 'penalties': fields.Integer,\n 'teamid': fields.Integer,\n 'gameid': fields.Integer,\n 'teamname': fields.String,\n 'statdate': fields.DateTime,\n 'userid': fields.Integer,\n 'deviceid': fields.String,\n 'uri': fields.Url('stat')\n}\n\n\nclass StatListAPI(Resource):\n \"\"\"\n API Resource for listing all player stats from the database.\n Provides the endpoint for creating new stats\n :param: none\n :type a json object\n :return json stats list for all players\n \"\"\"\n decorators = [auth.login_required]\n\n def __init__(self):\n self.reqparse = reqparse.RequestParser()\n self.reqparse.add_argument('id', type=int, required=False,\n help='The API URL\\'s ID of the stat.')\n self.reqparse.add_argument('statid', type=int, required=False,\n help='The stat ID field is an auto-incrementing database field')\n self.reqparse.add_argument('playerid', type=int, required=False,\n help='The player ID is used to map the player names to the team rosters.')\n self.reqparse.add_argument('playernumber', type=int, required=False,\n help='The player for which the game statistic is being recorded.')\n self.reqparse.add_argument('goals', type=int, required=False,\n help='The number of goals scored.')\n self.reqparse.add_argument('shots', type=int, required=False,\n help='The number of shots taken.')\n self.reqparse.add_argument('assists', type=int, required=False,\n help='The number of assists.')\n self.reqparse.add_argument('saves', type=int, required=False,\n help='The number of saves.',\n location='form')\n self.reqparse.add_argument('grounders', type=int, required=False,\n help='The number of grounders.')\n self.reqparse.add_argument('turnovers', type=int, required=False,\n help='The number of turnovers.')\n self.reqparse.add_argument('forcedturnovers', type=int, required=False,\n help='The number of forced turnovers.')\n self.reqparse.add_argument('penalties', type=int, required=False,\n help='The number of penalties.',\n location='form')\n self.reqparse.add_argument('teamid', type=int, required=False,\n help='The team ID of the player.')\n self.reqparse.add_argument('gameid', type=int, required=False,\n help='Game ID for which this stat is being recorded.')\n self.reqparse.add_argument('teamname', type=str, required=False,\n help='The team name of the player stat')\n self.reqparse.add_argument('statdate', type=str,\n required=False, help='The stat date.')\n self.reqparse.add_argument('userid', type=int, required=False,\n help='The user ID of the logged in user.')\n self.reqparse.add_argument('deviceid', type=str, required=False,\n help='The unique device ID')\n self.reqparse.add_argument('uri', type=str, required=False,\n help='The full URL path of the stat.')\n\n super(StatListAPI, self).__init__()\n\n def get(self):\n try:\n sql = u\"select statid, statid as id, playerid, playernumber, goals, shots, assists, \" \\\n u\"saves, grounders, turnovers, forcedturnovers, penalties, gameid, teamid, teamname, \" \\\n u\"statdate, userid from lacrosse_stats WHERE statdate > ?\"\n\n conn = AzureSQLDatabase()\n params = '12-1-2016'\n cursor = conn.query(sql, params)\n columns = [column[0] for column in cursor.description]\n stats = []\n for row in cursor.fetchall():\n stats.append(dict(zip(columns, row)))\n\n return {\n 'stats': marshal(stats, stat_fields)\n }\n\n except Exception as e:\n return {'error': str(e)}\n\n def post(self):\n try:\n args = self.reqparse.parse_args()\n data = request.get_json()\n stat = []\n\n stat = {\n 'statid': data['statid'],\n 'playerid': data['playerid'],\n 'playernumber': data['playernumber'],\n 'goals': data['goals'],\n 'shots': data['shots'],\n 'assists': data['assists'],\n 'saves': data['saves'],\n 'grounders': data['grounders'],\n 'turnovers': data['turnovers'],\n 'forcedturnovers': data['forcedturnovers'],\n 'penalties': data['penalties'],\n 'teamid': data['teamid'],\n 'gameid': data['gameid'],\n 'teamname': data['teamname'],\n 'statdate': data['statdate'],\n 'userid': data['userid'],\n 'deviceid': data['deviceid']\n }\n\n conn = AzureSQLDatabase()\n conn.query(\"insert into lacrosse_stats(playerid, playernumber, goals, shots, assists, saves, grounders, \\\n turnovers, forcedturnovers, penalties, teamid, gameid, teamname, statdate, userid, deviceid) \\\n values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\",\n [stat['playerid'], stat['playernumber'], stat['goals'], stat['shots'], stat['assists'], stat['saves'], stat['grounders'], stat['turnovers'], stat['forcedturnovers'], stat['penalties'], stat['teamid'], stat['gameid'], stat['teamname'], stat['statdate'], stat['userid'], stat['deviceid']])\n\n conn.commit()\n\n return {\n 'stat': stat\n }, 201\n\n except Exception as e:\n return {'error': str(e)}\n\n\nclass StatAPI(Resource):\n \"\"\"\n API Resource for retrieving, modifying, updating and deleting a single\n player stat, by ID.\n :param: statid\n :return: player stat records by ID.\n \"\"\"\n decorators = [auth.login_required]\n\n def __init__(self):\n self.reqparse = reqparse.RequestParser()\n self.reqparse.add_argument('id', type=int, required=False,\n help='The API URL\\'s ID of the stat.')\n self.reqparse.add_argument('statid', type=int, required=False,\n help='The stat ID field is an auto-incrementing database field',\n location='args')\n self.reqparse.add_argument('playerid', type=int, required=False,\n help='The player ID is used to map the player names to the team rosters.',\n location='args')\n self.reqparse.add_argument('playernumber', type=int, required=True,\n help='The player for which the game statistic is being recorded',\n location='args')\n self.reqparse.add_argument('goals', type=int, required=False,\n help='The number of goals scored.',\n location='args')\n self.reqparse.add_argument('shots', type=int, required=False,\n help='The number of shots taken.',\n location='args')\n self.reqparse.add_argument('assists', type=int, required=False,\n help='The number of assists.',\n location='args')\n self.reqparse.add_argument('saves', type=int, required=False,\n help='The number of saves.',\n location='args')\n self.reqparse.add_argument('grounders', type=int, required=False,\n help='The number of grounders.',\n location='args')\n self.reqparse.add_argument('turnovers', type=int, required=False,\n help='The number of turnovers.',\n location='args')\n self.reqparse.add_argument('forcedturnovers', type=int, required=False,\n help='The number of forced turnovers.',\n location='args')\n self.reqparse.add_argument('penalties', type=int, required=False,\n help='The number of penalties.',\n location='args')\n self.reqparse.add_argument('teamid', type=int, required=False,\n help='The QC+ team ID of the player.')\n self.reqparse.add_argument('gameid', type=int, required=False,\n help='The QC+ game ID from the games table.')\n self.reqparse.add_argument('teamname', type=str, required=True,\n help='The player\\'s team name.')\n self.reqparse.add_argument('statdate', type=str, required=True,\n help='The date time stamp of the statistic.')\n self.reqparse.add_argument('uri', type='str', required=False,\n help='The full URL path to the requested resource')\n super(StatAPI, self).__init__()\n\n def get(self, id):\n try:\n conn = AzureSQLDatabase()\n params = id\n sql = u\"select statid, statid as id, playerid, playernumber, goals, shots, assists, saves, grounders, turnovers, \" \\\n u\"forcedturnovers, penalties, teamid, gameid, teamname, statdate, userid from lacrosse_stats where statid = ?\"\n\n cursor = conn.query(sql, params)\n columns = [column[0] for column in cursor.description]\n stat = []\n for row in cursor.fetchall():\n stat.append(dict(zip(columns, row)))\n\n return {\n 'stat': marshal(stat, stat_fields)\n }, 200\n\n except Exception as e:\n return {'error': str(e)}\n\n def put(self, id):\n try:\n conn = AzureSQLDatabase()\n data = request.get_json()\n params = (data['playerid'], data['playernumber'], data['goals'], data['shots'], data['assists'], data['saves'], data['grounders'], data['turnovers'], data['forcedturnovers'], data['penalties'], data['teamid'], data['gameid'], data['teamname'], data['statdate'], id)\n conn.query(\"update lacrosse_stats set playerid = ?, playernumber = ?, goals = ?, shots = ?, assists = ?, \\\n saves = ?, grounders = ?, turnovers = ?, forcedturnovers = ?, penalties = ?, teamid = ?, \\\n gameid = ?, teamname = ?, statdate = ? where statid = ?\", params)\n\n conn.commit()\n\n return {\n 'stat': data\n }, 204\n\n except Exception as e:\n return {'error': str(e)}\n\n def delete(self, id):\n try:\n conn = AzureSQLDatabase()\n params = id\n sql = u\"delete from lacrosse_stats where statid = ?\"\n cursor = conn.query(sql, params)\n conn.commit()\n\n return {\n 'result': True\n }, 204\n\n except Exception as e:\n return {'error': str(e)}\n\n\n# register the API resources and define endpoints\napi.add_resource(StatListAPI, '/api/v1.0/lacrosse/stats', endpoint='stats')\napi.add_resource(StatAPI, '/api/v1.0/lacrosse/stats/', endpoint='stat')\n\nif __name__ == '__main__':\n app.run(\n debug=config.DEBUG,\n port=config.PORT\n )\n","repo_name":"craigderington/qwikcut-stats-api","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":15737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"37063708276","text":"from flask_restful import Resource, reqparse\nfrom app.models.enturmar import EnturmarModel\nfrom app.models.user import UserModel\n\nfrom flask_jwt_extended import create_access_token, jwt_required, get_jwt\n# from app.config import conn\n\n# from werkzeug.security import safe_str_cmp\nfrom app.blacklist import BLACKLIST\n\natributos = reqparse.RequestParser()\natributos.add_argument('FK_turma_id', type=int, help=\"campo de telefone\")\natributos.add_argument('Fk_estudante_id', type=dict, help=\"campo de perfil_id\")\n\n\nclass EnturmarServices(Resource):\n\n @jwt_required()\n def post(self, *args, **kwargs):\n try:\n \n dados = atributos.parse_args()\n FK_turma_id = dados['FK_turma_id']\n Fk_estudante_id = dados['Fk_estudante_id']\n \n \n for i , estudante in enumerate(Fk_estudante_id['estudantes']):\n EnturmarModel.create_enturmar( FK_turma_id, estudante)\n \n return {'created': FK_turma_id}, 201\n \n except:\n return { 'error': 'verifique a requisição !' }, 400\n \n \n\n \n @jwt_required()\n def update(self, *args, **kwargs):\n try:\n\n dados = atributos.parse_args()\n FK_turma_id = dados['FK_turma_id']\n Fk_estudante_id = dados['Fk_estudante_id']\n \n\n enturmar = EnturmarModel.get_FK_turma_id_enturmar(FK_turma_id)\n\n # print(enturmar)\n # input()\n manter = []\n excluirAssociacao = []\n novos = []\n for element in enturmar:\n if element not in Fk_estudante_id['estudantes']:\n excluirAssociacao.append(element)\n EnturmarModel.delete_enturmar(element)\n else:\n manter.append(element) \n\n for adicionar in Fk_estudante_id['estudantes']:\n \n if adicionar not in manter:\n novos.append(adicionar)\n EnturmarModel.create_enturmar(FK_turma_id,adicionar)\n\n # print(excluirAssociacao)\n # print(manter)\n # print(novos)\n # input()\n \n return {'updated': {FK_turma_id: {'estudantes': Fk_estudante_id['estudantes']} }}, 200\n \n except:\n return { 'error': 'verifique a requisição !' }, 400","repo_name":"RussellCavalcante/educationtime","sub_path":"app/services/enturmar.py","file_name":"enturmar.py","file_ext":"py","file_size_in_byte":2396,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"18702438255","text":"class Solution:\n def sortColors(nums):\n top,mid,high = 0,0,len(nums)-1\n while(mid<=high):\n if(nums[mid]==2):\n nums[mid],nums[high]=nums[high],nums[mid]\n high-=1\n elif(nums[mid]==1):\n mid+=1\n elif(nums[mid]==0):\n nums[mid], nums[top] = nums[top], nums[mid]\n mid += 1\n top += 1\n print(nums)\n sortColors([2,0,2,1,1,0])\n\n# Dutch national flag algorithm\n'''The Dutch National Flag Algorithm works by partitioning the input array into three sections: elements less than a given value (e.g., 0), elements equal to the given value (e.g., 1), and elements greater than the given value (e.g., 2). The goal is to arrange the elements in such a way that all 0s appear before all 1s, and all 1s appear before all 2s.\n\nThe algorithm maintains three pointers: low, mid, and high. These pointers divide the array into four regions:\n\nElements before the low pointer represent 0s.\nElements between the low and mid pointers represent 1s.\nElements after the high pointer represent 2s.\nElements between the mid and high pointers are unprocessed and need to be examined.\nThe algorithm iterates through the array and performs the following operations:\n\nInitialize low and mid pointers to the start of the array (index 0) and high pointer to the end of the array (index n-1, where n is the length of the array).\nIterate while the mid pointer is less than or equal to the high pointer.\nIf the element at the mid pointer is 0, swap it with the element at the low pointer, and increment both the mid and low pointers.\nIf the element at the mid pointer is 1, increment only the mid pointer.\nIf the element at the mid pointer is 2, swap it with the element at the high pointer, and decrement the high pointer.\nRepeat steps 2 to 5 until the mid pointer crosses the high pointer.\nAfter the algorithm completes, the array will be sorted with all 0s appearing before all 1s, and all 1s appearing before all 2s.'''","repo_name":"kambampatirahul/DSA","sub_path":"leetcode/Arrays/Sort Colors(75).py","file_name":"Sort Colors(75).py","file_ext":"py","file_size_in_byte":2030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"47775979767","text":"# TASK 25\n# Compulsory Task 1 and 2\n# task_manager.py\n# written by Tintswalo Anicky Makhubele\n# date: 25 march 2020\n# Function: This program manages tasks assigned to each team member of a business\n\n\ndef reg_user():\n # Tis function asks the user for a new username and password and The user should also be asked to confirm the password.\n # if the username entered is already in user.txt, the user must be asked to enter a different username \n # If the value entered to confirm the password matches the value of the password, the username and password should be written to user.txt in the appropriate format.\n \n #condition statememnt should the user trying to regiser a new user not be admin\n if username != \"admin\":\n print()\n print (\"Only admin can register a new user,select another option!\\n\".upper() ) \n \n \n else:\n \n print(\"To register a new user,please enter the following:\\n\".upper())\n\n\n\n # Ask the user to enter a new username\n new_username=input(\"Enter a new username: \")\n\n # open and read user.txt file\n user_file= open ( \"user.txt\" , \"r\")\n\n correct= False\n\n #loop through each line in the user.txt file and store each line in a list\n for line in user_file:\n \n #remove \\n in each line\n line = line.strip()\n details = line.split(\", \")\n\n #check if the username already exist\n if new_username != details[0]:\n correct = False\n else:\n correct = True\n\n while correct:\n \n #Display an appropriate error message if the user enters a username that is not listed in user.txt or enters a username that already exists.\n print(\"\\nUsername already exist,please enter a different username!\\n\".upper())\n \n #The user should repeatedly be asked to enter a new username until they provide different username.\n new_username=input(\"Username: \")\n correct= False\n \n user_file= open ( \"user.txt\" , \"r\")\n for line in user_file:\n #remove \\n in each line\n line = line.strip()\n details = line.split(\", \")\n\n if new_username != details[0]:\n correct = False\n else:\n correct = True\n break\n user_file.close()\n\n # ask user to enter and confirm a new password for the user-\n new_password=input(\"Enter a new password: \")\n confirm=input(\"Confirm password: \")\n\n if new_password == confirm:\n \n user_file= open ( \"user.txt\" , \"a\") #open and append user.txt\n new_user=(\"\\n\" + new_username + \", \"+ new_password)\n\n # store the new user in user.txt file\n user_file.write(new_user)\n\n # close the user.txt file\n user_file.close()\n\n print(f\"\\n{new_username} is successfully registered and added to user.txt!\\n\")\n \n add=False\n else:\n add = True\n\n while add:\n \n #Display an appropriate error message if the user enters a confirm password that doesn't match the password entered\n print(\"\\nthe value entered to confirm the password does not match the value of the password,please try again!\\n\".upper())\n \n # The user should repeatedly be asked to enter a password tat matches with confirm password\n new_password=input(\"Enter a new password: \")\n confirm=input(\"Confirm password: \")\n\n if new_password == confirm:\n\n user_file= open ( \"user.txt\" , \"a\") #open and append user.txt\n new_user=(\"\\n\" + new_username + \", \"+ new_password)\n\n # store the new user in user.txt file\n user_file.write(new_user)\n\n # close the user.txt file\n user_file.close()\n \n print(f\"\\n{new_username} is successfully registered and added to user.txt!\\n\")\n add = False\n \ndef add_task():\n \n # This function asks the user to enter the username of the person the task is assigned to, the title of the task, a description of the task and the due date of the task. \n # The data about the new task should be written to tasks.txt and The date on which the task is assigned should be the current date. \n\n #Import packages\n import datetime\n from datetime import date\n \n task_file = open ( \"tasks.txt\" , \"a\") #open and append tasks.txt\n \n print(\"~\"*132)\n \n print(\"\\nTo add a task,please enter the following details:\\n\".upper())\n\n # get input about the new task from the user \n username1 =input(\"Enter the username of the person that you assigning the task to: \")\n task = input(\"Enter the title of the task: \")\n description = input(\"Enter the task description: \")\n date = date.today()\n due = input(\"Enter the due date for the task in this format(YYYY-MM-DD): \")\n completed = \"No\"\n \n add_task = \"\\n\" + username1 + \", \" + task + \", \" + description + \", \" + str(date) + \", \" + due + \", \" + completed\n \n # write the task in tasks.txt\n task_file.write(add_task)\n\n print(f\"\\nyou have successfuly assigned a task to {username1} in tasks.txt!\\n\".upper())\n\n task_file.close #close tasks.txt\n\ndef view_all():\n\n # This function display the information for each task on the screen\n task_file = open ( \"tasks.txt\" , \"r\") #reopen and append tasks.txt\n task_lines = task_file.readlines()\n \n for num, line in enumerate (range(len(task_lines)), start = 0):\n task_line = task_lines[line].split(\", \")\n print(f\"Task {num} \\nASSIGNED TO : {task_line[0]} \\nTASK : {task_line[1]} \\nTASK DESCRIPTION: {task_line[2]} \\nDATE ASSIGNED : {task_line[3]} \\nDUE DATE : {task_line[4]} \\nTASK COMPLETED? : {task_line[5]}\\n\")\n \n task_file.close #close tasks.txt\n\ndef view_mine():\n\n # This function only display all the tasks that have been assigned to the user that is currently logged-in and gives then an option to mark the task as complete or edit the task.\n # If the user chooses to mark a task as complete, the ‘Yes’/’No’ value that describes whether the task has been completed or not should be changed to ‘Yes’\n # When the user chooses to edit a task, the username of the person to whom the task is assigned or the due date of the task can be edited. The task can only be edited if it has not yet been completed\n \n task_file = open ( \"tasks.txt\" , \"r\") #reopen and append tasks.txt\n task_lines = task_file.readlines()\n\n #program will search for the username in text files and print out their allocated tasks\n print (f\"{username},the tasks assigned to you, are:\\n\")\n for num, line in enumerate (range(len(task_lines)), start = 0):\n task_line = task_lines[line].split(\", \")\n if username == task_line[0]:\n print(f\"Task {num} \\nASSIGNED TO : {task_line[0]} \\nTASK : {task_line[1]} \\nTASK DESCRIPTION: {task_line[2]} \\nDATE ASSIGNED : {task_line[3]} \\nDUE DATE : {task_line[4]} \\nTASK COMPLETED? : {task_line[5]}\\n\")\n \n task_file.close #close tasks.txt \n \n edit = False\n while edit == False: \n # user chooses which one of their tasks they want to mark as complete or edit. \n task_number = int(input(\"Please enter a task number to edit a task or -1 for main menu: \"))\n\n # -1 used to allow the user to return to the main menu\n if task_number == -1:\n edit = True \n\n else:\n task_file = open ('tasks.txt', 'r')\n # if the user chooses to mark their task complete, we create a list where we store all the tasks.\n # then we use the number enetered by the user to go to their specific task. \n my_list = []\n my_list = task_file.read().splitlines()\n my_list_item = my_list[task_number].split(\", \") # selecting the task to be marked as complete so that we can edit item task complete\n \n mark_edit = input (\"\\nDo you want to mark this task as complete or edit? (enter 'c' or 'e')\\n\")\n # if the user chooses to mark the task complete, we take the line of that task entered by the user, insert 'Yes'\n if mark_edit.lower() == \"c\":\n my_list_item[5] = 'Yes'\n \n # after inserting 'Yes', we join the line back as it was originally in the list\n my_list_join = \", \".join(my_list_item)\n \n # then after joining the line, we insert it back into the tasks list at the very same position\n my_list[task_number] = my_list_join\n \n # finaly, we write the whole list back in the text file\n task_file1 = open ('tasks.txt', 'w')\n for item in my_list:\n task_file1.write(f\"{item}\\n\")\n task_file1.close\n print(f\"\\nTask {task_number} marked as complete!\".upper())\n edit = True\n\n # if the user chooses to edit the task, if the task has already been marked as complete, we let the user know that the task cannot be edited.\n elif mark_edit.lower() == 'e':\n if my_list_item[5] == 'Yes':\n print(f\"\\nTask {task_number} has already been completed, you cannot edit it\\n\".upper())\n else:\n # if the username can edit the task, then we ask them to choose what they want to edit which can only be either the username or the due date\n # once the user makes their choice, the same logic used above is applied. We take the specific line in the task list where the changes have to be made\n # then we change either the username or the due date, then we join the line back to its original form and insert it back in the tasks list with the necessary changes made.\n choice1 = input (\"Enter (u) to edit username or (d) to edit the task due date: \")\n if choice1.lower() == \"u\":\n usernameChange = input(\"\\nEnter the new username: \")\n my_list_item[0] = usernameChange\n my_list_join = \", \".join(my_list_item)\n my_list[task_number] = my_list_join\n #print(my_list)\n task_file2 = open ('tasks.txt', 'w')\n for item in my_list:\n task_file2.write(f\"{item}\\n\")\n task_file2.close()\n print(f\"\\nTask {task_number} is now assigned to {usernameChange}\\n\")\n\n elif choice1.lower() == \"d\":\n date_modification = input (\"Enter the new due date (YYYY-MM-DD): \")\n my_list_item[4] = date_modification\n my_list_join = \", \".join(my_list_item)\n my_list[task_number] = my_list_join\n \n task_file3 = open ('tasks.txt', 'w')\n for element in my_list:\n task_file3.write(f\"{element}\\n\")\n task_file3.close\n print (f\"\\nThe new due date for Task {task_number} is {date_modification}\\n\")\n break \n \n task_file.close()\n\ndef reports():\n\n # When the user chooses to generate reports, two text files, called task_overview.txt and user_overview.txt , should be generated\n\n #Import packages\n import datetime\n from datetime import date\n\n # the reports function writes reports in 2 different text files. The users_overview file and the task overview file.\n read_task3 = open ('tasks.txt', 'r')\n task_count_list = read_task3.read().splitlines()\n read_task3.close()\n list_length = len(task_count_list)\n #print(f\"Total tasks is {list_length}\")\n\n #counters used to count rhe number of tasks done, incomplete and overdue\n countYes = 0\n countNo = 0\n overdue = 0\n today = date.today() # stores the current date\n\n # counting the number of tasks completed and tasks that are not yet completed \n for line in task_count_list:\n if \"Yes\" in line:\n countYes +=1\n else:\n countNo +=1\n #print (f\"completed tasks is {countYes} and uncompleted is {countNo}\")\n\n # taking each portion of the due date in order to convert it to a datetime format so that we can compare it to the current date and determin if a task is overdue\n for item in task_count_list:\n due_date = item.split(\", \")[4]\n date_month = due_date[5:7]\n date_year = due_date[0:4]\n date_day = due_date[8::]\n\n # casting the due date into a date format\n dueDateInt = date(int(date_year), int(date_month), int(date_day))\n\n # comparing the dates to determine if the task is overdue\n if dueDateInt < today and \"No\" in item:\n overdue +=1\n #print(f\"Overdue is {overdue}\")\n # the following lines calculates the percentage of incomplete tasks and the percentage of tasks that are overdue.\n percentage_incomplete = (countNo/list_length) * 100\n #print(f\"The percentage of incomplete tasks is {percentage_incomplete:.2f}%\")\n percentage_overdue = (overdue/list_length) * 100\n #print(f\"The percentage of overdue tasks is {percentage_overdue:.2f}%\")\n\n # once all the necessary calculations are done, we write the results in the overview text file\n outfile = open('task_overview.txt', 'w')\n outfile.write(f\"The total number of tasks is {list_length} \\nThe total number of completed tasks is {countYes} \\nThe total number of uncompleted tasks is {countNo} \\nThe total number of tasks that are overdue and uncompleted is {overdue} \\nThe percentage of incomplete tasks is {percentage_incomplete:.2f}% \\nThe percentage of tasks that are overdue is {percentage_overdue:.2f}%\")\n outfile.close()\n\n \n # the same logic used above is applied here but this time for each user. \n open_overview = open ('user_overview.txt', 'w')\n line_out = \"\"\n\n \n users_read = open ('user.txt', 'r')\n list_users = users_read.read().splitlines()\n users_read.close()\n list_users_length = len(list_users)\n\n tasks_read = open ('tasks.txt', 'r')\n tasks_list = tasks_read.read().splitlines()\n tasks_list_length = len(tasks_list)\n tasks_read.close()\n #print(tasks_list_length)\n #print(list_users_length)\n\n user_tasks_count = 0\n user_completed_tasksCount = 0\n user_incomplete_tasksCount = 0\n user_overdue_taskCount = 0\n usernames = [] # stores the usernames so that we can display details for each username\n user_tasks = []\n \n # for each username, we display the number of tasks assigned to that user, the percentage of tasks assigned to that user, the percentage of tasks completed, uncompleted and overdue.\n for line in list_users:\n usernames.append(line.split(\", \")[0])\n\n # writing in the first line, the total number of users and tasks.\n # then for each user, we find the total number of tasks, the percentage of tasks completed, uncompleted and overdue.\n # To do this, we use counters which are set to 0 above and incremented once a specific condition is met.\n open_overview.write(f\"The total number of users is {list_users_length} and the total number of tasks is {tasks_list_length}\\n\")\n for username in usernames:\n for task in range(0, len(tasks_list)):\n due_date1 = tasks_list[task].split(\", \")[4]\n date_month1 = due_date1[5:7]\n date_year1 = due_date1 [0:4]\n date_day1 = due_date1 [8::]\n due_date_int1 = date(int(date_year1), int(date_month1), int(date_day1))\n today1 = date.today()\n if f\"{username}\" in f\"{tasks_list[task]}\":\n user_tasks_count += 1\n if f\"{username}\" in f\"{tasks_list[task]}\" and \"Yes\" in f\"{tasks_list[task]}\":\n user_completed_tasksCount += 1\n if f\"{username}\" in f\"{tasks_list[task]}\" and \"No\" in f\"{tasks_list[task]}\":\n user_incomplete_tasksCount += 1\n if f\"{username}\" in f\"{tasks_list[task]}\" and \"No\" in f\"{tasks_list[task]}\" and due_date_int1 < today1:\n user_overdue_taskCount += 1\n user_tasks.append(username)\n user_tasks.append(user_tasks_count)\n user_taskPer = round((user_tasks_count/tasks_list_length)*100, 2)\n user_tasks.append(user_completed_tasksCount)\n user_completed_tasksPer = round((user_completed_tasksCount/tasks_list_length)*100, 2)\n user_tasks.append(user_incomplete_tasksCount)\n user_incomplete_tasksPer = round((user_incomplete_tasksCount/tasks_list_length)*100, 2)\n user_tasks.append(user_overdue_taskCount)\n user_overdue_tasksPer = round((user_overdue_taskCount/tasks_list_length)*100, 2)\n # for each user, we write the necessary details in the user overview text file\n line_out = username + \" \" + \"has\" + \" \" + str(user_tasks_count) + \" tasks, \" + str(user_taskPer) + \"%\" + \" of tasks, \" + \" \" + str(user_completed_tasksPer) + \"%\" + \" tasks completed\" + \", \" + str(user_incomplete_tasksPer) + \"%\" + \" incomplete tasks\" + \", \"+ str(user_overdue_tasksPer) + \"% \" + \" tasks overdue\" + \"\\n\"\n open_overview.write(line_out)\n # reseting the counters so that the calculations are done properly for each user.\n user_tasks_count = 0\n user_completed_tasksCount = 0\n user_incomplete_tasksCount = 0\n user_overdue_taskCount = 0\n open_overview.close()\n\ndef view_statistics():\n\n choice = input (\"\\nPlease enter what you want to see 'users overview' or 'tasks overview'(enter 'u' or 't') \")\n # when admin chooses 'users overview', we create a list to store all the lines in the file users_overview.txt.\n # then the reports read from the text file is printed to the user in a user friendly manner.\n if choice.lower() == \"u\":\n user_overview = open ('user_overview.txt', 'r')\n user_overview_list = user_overview.read().splitlines()\n user_overview.close()\n for line1 in user_overview_list:\n print(line1) \n\n # similarly, when the user chooses tasks overview, we create a list containing all the necessary details from the text file and we print the details in a user friendly manner. \n elif choice.lower() == \"t\":\n task_overview = open('task_overview.txt', 'r')\n task_overview_list = task_overview.read().splitlines()\n task_overview.close()\n for line in task_overview_list:\n print(line)\n\nuser_file = open ( \"user.txt\" , \"r\")\n\n#the user must enter a username and password.\nprint(\"~\"*132)\n\nprint(\"ENTER YOUR LOGIN DETAILS:\")\n\nprint()\n\nusername=input(\"USERNAME: \")\npassword=input(\"PASSWORD: \")\n\n#use a for loop to varify if the login details entered by the user are valid and registered in the user.txt file\ncorrect= False\n\n#loop through each line in the user.txt file and store each line in a list\nfor line in user_file:\n \n #remove \\n in each line\n line = line.strip()\n details = line.split(\", \")\n\n if username == details[0] and password == details[1]:\n correct = False\n break\n else:\n correct = True\n\nwhile correct:\n user_file.close()\n user_file= open ( \"user.txt\" , \"r+\")\n #Display an appropriate error message if the user enters a username that is not listed in user.txt or enters a valid username but not a valid password\n print(\"Username or password is incorrect,please enter the correct username and password!\\n\")\n \n #The user should repeatedly be asked to enter a valid username and password until they provide appropriate credentials.\n username=input(\"USERNAME: \")\n password=input(\"PASSWORD: \")\n correct= False\n \n for line in user_file:\n #remove \\n in each line\n line = line.strip()\n details = line.split(\", \")\n\n if username == details[0] and password == details[1]:\n correct = False\n break\n else:\n correct = True\n\nprint(\"~\"*132)\nprint(f\"\\nwelcome {username}, you have successfully logged in.\\n\".upper())\n\n#while username is not admin\nwhile username != \"admin\":\n print(\"~\"*132)\n # display a menu once a user have successfully logged in\n user_menu = input(\"\\nPLEASE SELECT ANY OF THE FOLLOWING:\\nr - register user \\na - add task \\nva - view all tasks \\nvm - view my tasks \\ne - exit \\n\")\n print()\n\n # If the user chooses ‘r’ to register a user, the function reg_user() must be called.\n if user_menu.lower()==\"r\":\n print(\"~\"*132)\n reg_user()\n\n # If the user chooses ‘a’ to register a user, the function add_task() must be called.\n if user_menu.lower()==\"a\":\n print(\"~\"*132)\n add_task()\n \n #If the user chooses ‘va’ to view all tasks, the function view_all() must be called.\n if user_menu.lower()==\"va\":\n print(\"~\"*132)\n view_all()\n \n #If the user chooses ‘vm’ to view all tasks, the function view_mine() must be called.\n if user_menu.lower()==\"vm\":\n print(\"~\"*132)\n view_mine()\n \n if user_menu.lower()==\"e\":\n print(\"\\nTHANK YOU FOR LOGGING IN, UNTIL NEXT TIME!\\n\")\n break\n\n#while the logged in user is the admin \nwhile username == \"admin\": \n print(\"~\"*132) \n\n # display a new menu that includes viewing stats\n admin_menu=input(\"\\nPLEASE SELECT ANY OF THE FOLLOWING:\\n\\nr - register user \\na - add task \\nva - view all tasks \\nvm - view my tasks \\ngr - generate reports \\nvs - view statistics of tasks and users \\ne - exit \\n\")\n print()\n \n # If the user chooses ‘r’ to register a user, the function reg_user() must be called.\n if admin_menu.lower() == \"r\":\n print(\"~\"*132)\n reg_user()\n \n # If the user chooses ‘a’ to register a user, the function add_task() must be called. \n if admin_menu.lower() == \"a\":\n print(\"~\"*132)\n add_task()\n\n #If the user chooses ‘va’ to view all tasks, the function view_all() must be called. \n if admin_menu.lower() ==\"va\":\n print(\"~\"*132)\n view_all()\n \n #If the user chooses ‘vm’ to view all tasks, the function view_mine() must be called.\n if admin_menu.lower() ==\"vm\":\n print(\"~\"*132)\n view_mine()\n\n # if the user chooses to generate reports, two text files, called task_overview.txt and user_overview.txt , should be generated\n if admin_menu.lower() ==\"gr\":\n print(\"~\"*132)\n reports()\n\n print(\"Reports generated. Please check 'task_overview.txt' and 'user_overview.txt' to see them.\\n\")\n\n #for admin to view the statistics of the task if needed\n if admin_menu.lower() == \"vs\":\n \n print(\"~\"*132)\n generate_stats = input(\"To view statistics,please enter 'gr' to genarate stats firts: \")\n\n if generate_stats == \"gr\":\n reports()\n\n print(\"Reports generated.\\n\")\n\n view_statistics()\n\n \n if admin_menu.lower()==\"e\":\n print(\"\\nTHANK YOU FOR LOGGING IN, UNTIL NEXT TIME!\\n\")\n break\n\n#close both user and task files\nuser_file.close()\n\n#***********************************************************END**********************************************************","repo_name":"Tintswalo-Anicky/last-Capstone-level1","sub_path":"task_manager.py","file_name":"task_manager.py","file_ext":"py","file_size_in_byte":23657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"19627346168","text":"class Solution:\n def levelOrder(self, root: 'Node') -> List[List[int]]:\n levels = []\n if not root:\n return levels\n level = [root]\n while level:\n l = len(level)\n levels.append([node.val for node in level])\n for i in range(l):\n node = level[i]\n for child in node.children:\n if child:\n level.append(child)\n level = level[l:]\n return levels\n","repo_name":"menghuu/YALeetcode","sub_path":"python3/429. N叉树的层序遍历.py","file_name":"429. N叉树的层序遍历.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"15448135348","text":"from typing import List, Union\n\nfrom sc2.data import Race\nfrom sc2.ids.unit_typeid import UnitTypeId\nfrom sc2.ids.upgrade_id import UpgradeId\nfrom sharpy.knowledges import *\nfrom sharpy.plans import *\nfrom sharpy.plans.acts import *\nfrom sharpy.plans.require import *\nfrom sharpy.plans.acts.protoss import *\nfrom sharpy.plans.tactics import *\nfrom sharpy.plans.tactics.protoss import *\n\n\nclass DistruptorBuild(BuildOrder):\n def __init__(self):\n build = BuildOrder(\n Step(\n UnitReady(UnitTypeId.PYLON),\n ChronoUnit(UnitTypeId.PROBE, UnitTypeId.NEXUS),\n skip=UnitExists(UnitTypeId.PROBE, 19),\n ),\n Step(\n None,\n ChronoUnit(UnitTypeId.IMMORTAL, UnitTypeId.ROBOTICSFACILITY),\n skip=UnitExists(UnitTypeId.IMMORTAL, 1, include_killed=True),\n ),\n Step(\n None,\n ChronoUnit(UnitTypeId.OBSERVER, UnitTypeId.ROBOTICSFACILITY),\n skip=UnitExists(UnitTypeId.OBSERVER, 1, include_killed=True),\n ),\n Step(\n None,\n ChronoUnit(UnitTypeId.DISRUPTOR, UnitTypeId.ROBOTICSFACILITY),\n skip=UnitExists(UnitTypeId.DISRUPTOR, 1, include_killed=True),\n ),\n SequentialList(\n ProtossUnit(UnitTypeId.PROBE, 16 + 6), # One base\n Step(UnitExists(UnitTypeId.NEXUS, 2), ProtossUnit(UnitTypeId.PROBE, 44)),\n ),\n Step(UnitReady(UnitTypeId.PYLON, 1), AutoPylon()),\n SequentialList(\n GridBuilding(UnitTypeId.PYLON, 1),\n GridBuilding(UnitTypeId.GATEWAY, 2, priority=True),\n BuildGas(2),\n GridBuilding(UnitTypeId.CYBERNETICSCORE, 1, priority=True),\n GridBuilding(UnitTypeId.ROBOTICSFACILITY, 1, priority=True),\n Tech(UpgradeId.WARPGATERESEARCH, UnitTypeId.CYBERNETICSCORE),\n GridBuilding(UnitTypeId.ROBOTICSBAY, 1, priority=True),\n Step(UnitExists(UnitTypeId.DISRUPTOR, 1, include_killed=True, include_not_ready=False), Expand(2),),\n BuildGas(4),\n ),\n BuildOrder(\n ProtossUnit(UnitTypeId.IMMORTAL, 1, priority=True, only_once=True),\n ProtossUnit(UnitTypeId.OBSERVER, 1, priority=True),\n ProtossUnit(UnitTypeId.DISRUPTOR, 4, priority=True),\n ProtossUnit(UnitTypeId.STALKER),\n SequentialList(\n Step(Minerals(300), GridBuilding(UnitTypeId.GATEWAY, 3, priority=True)),\n Step(UnitReady(UnitTypeId.NEXUS, 2), GridBuilding(UnitTypeId.GATEWAY, 6, priority=True)),\n ),\n ),\n )\n\n tactics = [\n MineOpenBlockedBase(),\n PlanCancelBuilding(),\n WorkerRallyPoint(),\n RestorePower(),\n DistributeWorkers(),\n Step(None, SpeedMining(), lambda ai: ai.client.game_step > 5),\n PlanWorkerOnlyDefense(), # Counter worker rushes\n PlanZoneDefense(),\n PlanZoneGather(),\n Step(UnitExists(UnitTypeId.DISRUPTOR, include_killed=True), PlanZoneAttack()),\n PlanFinishEnemy(),\n ]\n\n super().__init__(build, tactics)\n\n\nclass SharpSphereBot(KnowledgeBot):\n def __init__(self):\n super().__init__(\"Sharp Spheres\")\n\n async def create_plan(self) -> BuildOrder:\n return DistruptorBuild()\n\n\nclass LadderBot(SharpSphereBot):\n @property\n def my_race(self):\n return Race.Protoss\n","repo_name":"DrInfy/sharpy-sc2","sub_path":"dummies/protoss/disruptor.py","file_name":"disruptor.py","file_ext":"py","file_size_in_byte":3623,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"30"} +{"seq_id":"69906775126","text":"import pandas as pd\n\nfrom . import rayleigh\n\n\ndef analyze(samples, bounds, cross_sections, instrument):\n # Check that the cross-sections are the right size.\n samples, cross_sections = check_size(samples, cross_sections)\n\n # Replace sample's wavelength with the cross_section's wavelength.\n samples.columns = cross_sections[0].index\n\n # Select wavelengths we care about (306 - 312).\n samples, cross_sections = select_wavelengths(samples, cross_sections, 306, 312)\n\n bounded_samples = instrument.bound_samples(samples, bounds)\n\n # instrument.get_densities()\n\n reflectivity = instrument.get_reflectivity(samples)\n\n print(reflectivity.to_string())\n\n absorption_all = pd.DataFrame()\n fit_data_all = pd.DataFrame()\n fit_curve_values_all = pd.DataFrame()\n time_stamps = []\n for index in bounded_samples[\"target\"].index:\n time_stamps.append(index)\n # make reflect and abs df and append results to each\n absorption = instrument.get_absorption(index, reflectivity)\n # concat indiv sample absorption to the df of all of the samples absorptions\n absorption_all = pd.concat([absorption_all, absorption], axis=1)\n\n x_data = absorption.index.to_numpy()\n y_data = absorption.to_numpy()\n fit_data, fit_curve_values = fit_curve_lm(cross_sections, x_data, y_data)\n\n fit_data_all = pd.concat([fit_data_all, fit_data], axis=1)\n fit_curve_values_all = pd.concat(\n [fit_curve_values_all, pd.Series(fit_curve_values)], axis=1\n )\n\n absorption_all = absorption_all.T\n absorption_all.index = time_stamps\n\n fit_data_all = fit_data_all.T\n fit_data_all.index = time_stamps\n\n fit_curve_values_all = fit_curve_values_all.T\n fit_curve_values_all.index = time_stamps\n\n residuals_all = fit_data_all - absorption_all\n\n # returns the timestamp of associated with the highest concentration\n index_max_conc = fit_curve_values_all.idxmax()[0]\n fit_data_highest = fit_data_all.loc[[index_max_conc]].squeeze()\n absorption_highest = absorption_all.loc[[index_max_conc]].squeeze()\n residuals_highest = residuals_all.loc[[index_max_conc]].squeeze()\n\n return {\n \"samples\": samples,\n \"reflectivity\": reflectivity,\n \"absorption_all\": absorption_all,\n \"absorption_highest\": absorption_highest,\n \"cross_sections_target\": cross_sections[0],\n \"fit_data_all\": fit_data_all,\n \"fit_data_highest\": fit_data_highest,\n \"fit_curve_values\": fit_curve_values_all,\n \"residuals_all\": residuals_all,\n \"residuals_highest\": residuals_highest,\n }\n\n\ndef check_size(samples, cross_sections):\n done_flag = False\n while done_flag != True:\n done_flag = True\n for section in cross_sections:\n diff = len(samples.columns) - len(section.index)\n if diff > 0:\n samples.drop(\n samples.columns[len(samples.columns) - diff], axis=1, inplace=True\n )\n done_flag = False\n print(\"Dropped \", diff, \" columns from samples to match cross_sections\")\n elif diff < 0:\n section.drop(\n section.index[len(section.index) + diff], axis=0, inplace=True\n )\n done_flag = False\n print(\"Dropped \", -diff, \" rows from cross_section to match samples\")\n return samples, cross_sections\n\n\ndef select_wavelengths(samples, cross_sections, low_bound, high_bound):\n wavelengths = cross_sections[0].index\n wavelengths = wavelengths[(wavelengths > low_bound) & (wavelengths < high_bound)]\n for i in range(len(cross_sections)):\n cross_sections[i].index = cross_sections[0].index\n\n for section in range(len(cross_sections)):\n # line below was added because of mismatch in index of multiple cross-sections\n cross_sections[section] = cross_sections[section].loc[wavelengths]\n\n return samples[wavelengths], cross_sections\n\n\ndef get_densities():\n # find density of the gasses\n N2_dens = rayleigh.Density_calc(pressure=620, temp_K=298)\n He_dens = rayleigh.Density_calc(pressure=620, temp_K=298)\n target_dens = rayleigh.Density_calc(pressure=620, temp_K=298)\n\n return {\"N2\": N2_dens, \"He\": He_dens, \"target\": target_dens}\n\n\n# Curve fitting function that relies on lmfit.minimize()\ndef fit_curve_lm(cross_sections, xdata, ydata):\n import lmfit\n\n # Create Parameter objects. There should be as many concentration parameters as there are cross-sections.\n params = lmfit.Parameters()\n for i in range(len(cross_sections)):\n name = \"concentration\" + str(i)\n if i == 0:\n params.add(\"concentration\", value=1.34e12, min=0)\n else:\n params.add(name, value=1.34e12, min=0)\n params.add(\"a\", value=1)\n params.add(\"b\", value=1)\n params.add(\"c\", value=1)\n\n # Returns fitted y values. Accepts concentrations as Parameter objects.\n def func(*args):\n # load in the arguments\n params = list(args)\n\n wavelength = params[0] # the first param is always the wavelengths(xdata)\n concentration_param = params[1:-3] # these params are always the concentrations\n # The last three params are always the polynomial\n a = params[-3]\n b = params[-2]\n c = params[-1]\n result = 0.0\n\n concentration = []\n # this loop handles the Parameter objects and puts it into a usable list\n # final_func omits this for loop.\n for i in range(len(concentration_param[0])):\n concentration.append(concentration_param[0][i].value)\n # multiply each of the cross-sections by its corresponding concentration\n for i in range(len(cross_sections)):\n section = cross_sections[i]\n result += section[section.columns[0]] * concentration[i]\n # add the polynomial\n result += a * wavelength**2\n +b * wavelength\n +c\n return result\n\n # Returns the final fitted values. Accepts concentrations in a list of floats instead of a list of Parameter objects.\n def final_func(*args):\n params = list(args)\n\n wavelength = params[0] # The first param is always the wavelengths(xdata).\n concentration = params[1:-3] # These params are always the concentrations.\n # The last three params are always the polynomial.\n a = params[-3]\n b = params[-2]\n c = params[-1]\n result = 0.0\n\n for i in range(len(cross_sections)):\n section = cross_sections[i]\n result += section[section.columns[0]] * concentration[i]\n\n result += a * wavelength**2\n +b * wavelength\n +c\n return result\n\n # Finds the residualbetween the fitted y values and the actual y values.\n def residual(params, x, ydata):\n concentration = []\n\n for i in range(len(cross_sections)):\n name = \"concentration\" + str(i)\n if i == 0:\n concentration.append(params[\"concentration\"])\n else:\n concentration.append(params[name])\n a = params[\"a\"]\n b = params[\"b\"]\n c = params[\"c\"]\n y_fit = func(x, concentration, a, b, c)\n return y_fit - ydata\n\n # Minimize the residual using the parameters given.\n fit = lmfit.minimize(residual, params, args=(xdata, ydata), method=\"leastsq\")\n # Grab the concentration and polynomial values from the parameter objects.\n results = []\n for key, value in fit.params.valuesdict().items():\n results.append(value)\n # Return the fitted data and the concentration and polynomial values.\n return final_func(xdata, *results), results\n","repo_name":"NET-BYU/bbceas_data_analysis","sub_path":"bbceas_processing/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":7720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"41029943804","text":"\"\"\"Contains FastAPI middlewares\"\"\"\nimport http\nimport json\nimport math\nimport time\n\nfrom fastapi import Response, status\nfrom starlette.middleware.base import BaseHTTPMiddleware\n\nfrom api.utils.logging import EMPTY_VALUE\nfrom api.v1.schemas import APIRequestJSONLogSchema\n\n\nclass LoggingMiddleware(BaseHTTPMiddleware):\n \"\"\"Middleware to process logs.\"\"\"\n\n def __init__(self, app, *, logger) -> None:\n self._logger = logger\n super().__init__(app)\n\n async def dispatch(self, request, call_next):\n # pylint: disable=too-many-locals\n start_time = time.time()\n exception_object = None\n\n # REQUEST\n try:\n raw_request_body = await request.body()\n\n await self.set_body(request, raw_request_body)\n raw_request_body = await self.get_body(request)\n\n request_body = raw_request_body.decode()\n except Exception:\n request_body = EMPTY_VALUE\n\n server = request.get(\"server\", (\"localhost\", 8000))\n request_headers = dict(request.headers.items())\n\n # RESPONSE\n try:\n response = await call_next(request)\n except Exception as exc:\n response_body = bytes(status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n response = Response(\n content=response_body,\n status_code=http.HTTPStatus.INTERNAL_SERVER_ERROR.real,\n )\n\n exception_object = exc\n response_headers = {}\n else:\n response_headers = dict(response.headers.items())\n response_body = EMPTY_VALUE\n\n async for chunk in response.body_iterator:\n response_body += chunk\n\n response = Response(\n content=response_body,\n status_code=response.status_code,\n headers=dict(response_headers),\n media_type=response.media_type,\n )\n\n duration = math.ceil((time.time() - start_time) * 1000)\n\n api_request_log = APIRequestJSONLogSchema(\n request_uri=str(request.url),\n request_protocol=await self.get_protocol(request),\n request_method=request.method,\n request_path=request.url.path,\n request_host=f\"{server[0]}:{server[1]}\",\n request_size=int(request_headers.get(\"content-length\", 0)),\n request_content_type=request_headers.get(\n \"content-type\", EMPTY_VALUE\n ),\n request_headers=json.dumps(request_headers),\n request_body=request_body,\n remote_ip=request.client[0],\n remote_port=request.client[1],\n response_status_code=response.status_code,\n response_size=int(response_headers.get(\"content-length\", 0)),\n response_headers=json.dumps(response_headers),\n response_body=response_body,\n duration=duration,\n ).dict()\n\n message = (\n f'{\"Error\" if exception_object else \"Response\"} '\n f\"with code {response.status_code} \"\n f'on request {request.method} \"{str(request.url)}\". '\n f\"Duration: {duration}ms\"\n )\n\n self._logger.info(\n message,\n extra={\"api_request_log\": api_request_log},\n exc_info=exception_object,\n )\n\n return response\n\n @staticmethod\n async def get_protocol(request):\n \"\"\"Get request protocol.\"\"\"\n protocol = str(request.scope.get(\"type\", \"\"))\n http_version = str(request.scope.get(\"http_version\", \"\"))\n\n if protocol.lower() == \"http\" and http_version:\n return f\"{protocol.upper()}/{http_version}\"\n\n @staticmethod\n async def set_body(request, body):\n \"\"\"Set request body.\"\"\"\n\n async def receive():\n nonlocal body\n return {\"type\": \"http.request\", \"body\": body}\n\n # pylint: disable=protected-access\n request._receive = receive\n\n async def get_body(self, request):\n \"\"\"Get request body.\"\"\"\n if request.headers[\"Content-Type\"] == \"application/json\":\n body = await request.body()\n await self.set_body(request, body)\n else:\n body = EMPTY_VALUE\n\n return body\n","repo_name":"antkrit/nn-api","sub_path":"api/v1/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":4251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"26474547564","text":"\"\"\"\nPerlin terrain object.\n30/10/21\n\"\"\"\nfrom perlin_noise import PerlinNoise\nfrom math import sin\n\nclass PerlinTerrain:\n def __init__(this, _nObjs=1,_freq=64,_amp=12,\n _octs=3,_seed=99):\n this.noises = []\n this.seed = _seed\n this.seed = (ord('j')+ord('o'))\n this.freq = _freq\n this.amp = _amp\n this.octs = _octs\n\n for i in range(_nObjs):\n noise = PerlinNoise(octaves=this.octs,\n seed=this.seed)\n this.noises.append(noise)\n \n def findHeight(this,_x,_z,sineBumps=True):\n from ursina import math\n y = 0\n ht = 0\n for i in range(len(this.noises)):\n y += ((this.noises[i]([ _x/this.freq,\n _z/this.freq]))*\n this.amp)\n if sineBumps==True:\n y+= math.sin(_x)*1-0.5\n y+= math.cos(_z)*1-0.5\n return y","repo_name":"RedHenDev/python","sub_path":"ursina dev/txt_to_ent/rh_perlin_noise.py","file_name":"rh_perlin_noise.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"30"} +{"seq_id":"7988216584","text":"def calc_base(_c):\n c = bin(_c)[2:]\n while len(c) % 8 != 4:\n c = '0' + c\n c = c[:64]\n m = int.from_bytes(b'CakeCTF{', 'little')\n m = bin(m)[2:]\n assert(len(m) == 63)\n r = ''\n for i in range(63):\n r = str(int(c[i])^int(m[-(i+1)])) + r\n return r\n\ndef LFSR_call(_r, bitlength):\n r = _r\n r_ls = []\n for i in range(bitlength):\n r_ls.append(r & 1)\n b = (r & 1) ^\\\n ((r & 2) >> 1) ^\\\n ((r & 8) >> 3) ^\\\n ((r & 16) >> 4)\n r = (r >> 1) | (b << 63)\n return r_ls\n\ndef decrypt(_c, ls):\n c = bin(_c)[2:]\n while len(c) % 8 != 4:\n c = '0' + c\n assert(len(c) == len(ls))\n m = ''\n for i in range(len(c)):\n m += str(int(c[i])^ls[i])\n return int(m[::-1], 2).to_bytes(len(ls)//8+1, 'little')\n\nc = 0x58566f59979e98e5f2f3ecea26cfb0319bc9186e206d6b33e933f3508e39e41bb771e4af053\n\ndef main(c):\n r = calc_base(c)\n r1 = int('0' + r, 2)\n r2 = int('1' + r, 2)\n\n r_ls1 = LFSR_call(r1, (c.bit_length()//8)*8+4)\n r_ls2 = LFSR_call(r2, (c.bit_length()//8)*8+4)\n\n print(decrypt(c, r_ls1))\n print(decrypt(c, r_ls2))\n\nmain(c)\n","repo_name":"LorseKudos/CTF","sub_path":"CakeCTF 2021/crypto/improvisation/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"3729341264","text":"# Definir una función que recibe una candtidad indeterminada de números\n# y los suma.\n\ndef funcion_sumatorio(*numeros):\n sumatorio=0\n for numero in numeros:\n sumatorio= sumatorio + numero\n return print(f\"El sumatorio de los numeros es: {sumatorio}\")\n\nfuncion_sumatorio(1,2,3,4,5,10,20)\n ","repo_name":"JTamarit/Apuntes_clase_Python","sub_path":"ejercicios_clase/ejercicio_8.py","file_name":"ejercicio_8.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"35721906690","text":"from .base import BaseEnv\nfrom .atari import AtariEnv\nfrom omegaconf import OmegaConf\nfrom src.common.class_utils import all_subclasses\n\nENVS = {subclass.get_name():subclass\n for subclass in all_subclasses(BaseEnv)}\n\ndef build_env(cfg): \n cfg = OmegaConf.to_container(cfg)\n env_type = cfg.pop('type')\n if env_type == 'dmc':\n train_env = make_dmc_env(**cfg)\n eval_env = make_dmc_env(**cfg)\n else:\n env = ENVS[env_type] \n train_env = env(**cfg)\n eval_env = env(**cfg)\n \n return train_env, eval_env\n","repo_name":"dojeon-ai/SimTPR","sub_path":"src/envs/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"30"} +{"seq_id":"32401505186","text":"# SPDX-License-Identifier: LGPL-2.1+\n\nimport os\nimport os.path\nimport shutil\nimport urllib.request\nimport uuid\nfrom typing import Callable, Dict, List\n\nfrom .btrfs import btrfs_subvol_delete\nfrom .types import CommandLineArguments\nfrom .ui import die, run_visible\n\n\ndef mount_bind(what: str, where: str) -> None:\n os.makedirs(what, 0o755, True)\n os.makedirs(where, 0o755, True)\n run_visible([\"mount\", \"--verbose\", \"--bind\", what, where], check=True)\n\ndef umount(where: str) -> None:\n # Ignore failures\n run_visible([\"umount\", \"--verbose\", \"--recursive\", \"-n\", where])\n\ndef patch_file(filepath: str, line_rewriter: Callable[[str], str]) -> None:\n temp_new_filepath = filepath + \".tmp.new\"\n\n with open(filepath, \"r\") as old:\n with open(temp_new_filepath, \"w\") as new:\n for line in old:\n new.write(line_rewriter(line))\n\n shutil.copystat(filepath, temp_new_filepath)\n os.remove(filepath)\n shutil.move(temp_new_filepath, filepath)\n\ndef run_workspace_command(args: CommandLineArguments, workspace: str, *cmd: str, network: bool=False, env: Dict[str, str]={}, nspawn_params: List[str]=[]) -> None:\n\n cmdline = [\"systemd-nspawn\",\n '--quiet',\n \"--directory=\" + os.path.join(workspace, \"root\"),\n \"--uuid=\" + args.machine_id,\n \"--machine=mkosi-\" + uuid.uuid4().hex,\n \"--as-pid2\",\n \"--register=no\",\n \"--keep-unit\",\n \"--bind=\" + var_tmp(workspace) + \":/var/tmp\",\n \"--setenv=SYSTEMD_OFFLINE=1\" ]\n\n if network:\n # If we're using the host network namespace, use the same resolver\n cmdline += [\"--bind-ro=/etc/resolv.conf\"]\n else:\n cmdline += [\"--private-network\"]\n\n cmdline += [ \"--setenv={}={}\".format(k, v) for k, v in env.items() ]\n\n if nspawn_params:\n cmdline += nspawn_params\n\n cmdline += ['--', *cmd]\n run_visible(cmdline, check=True)\n\ndef check_if_url_exists(url: str) -> bool:\n req = urllib.request.Request(url, method=\"HEAD\")\n try:\n if urllib.request.urlopen(req):\n return True\n return False\n except:\n return False\n\ndef mkdir_last(path: str, mode: int=0o777) -> str:\n \"\"\"Create directory path\n\n Only the final component will be created, so this is different than mkdirs().\n \"\"\"\n try:\n os.mkdir(path, mode)\n except FileExistsError:\n if not os.path.isdir(path):\n raise\n return path\n\ndef var_tmp(workspace: str) -> str:\n return mkdir_last(os.path.join(workspace, \"var-tmp\"))\n\ndef unlink_try_hard(path: str) -> None:\n try:\n os.unlink(path)\n except:\n pass\n\n try:\n btrfs_subvol_delete(path)\n except:\n pass\n\n try:\n shutil.rmtree(path)\n except:\n pass\n\ndef empty_directory(path: str) -> None:\n\n try:\n for f in os.listdir(path):\n unlink_try_hard(os.path.join(path, f))\n except FileNotFoundError:\n pass\n\ndef check_root() -> None:\n if os.getuid() != 0:\n die(\"Must be invoked as root.\")\n","repo_name":"datawire/testbench","sub_path":"testbench/mkosi/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"27688692601","text":"number = int(input())\nnumber += 1\n\nis_valid = False\nwhile not is_valid:\n num_string = str(number)\n range_num = len(num_string)\n for num in range(range_num):\n if num_string.count(num_string[num]) > 1:\n number += 1\n break\n else:\n if num == range_num - 1:\n is_valid = True\n print(number)\n","repo_name":"StanDobrev11/Python_Fundamentals","sub_path":"02_Data_Types_and_Variables_-_Lab/06_Next_Happy_Year.py","file_name":"06_Next_Happy_Year.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"24465426835","text":"import numbers\nimport math\n\n\nclass Individual:\n \"\"\"An \"Individual\" in the Evolutionary Strategy is fully represented\n by the three properties: fitness value(scalar), vector of parameters\n and sigma(vector or scalar). Fitness determines the individuals rank,\n compared to other individuals. Parameters is the real-value vector\n to be optimized. Sigma is the real-value vector, responsible for the mutation\n strength(endogenous parameter).\"\"\"\n\n def __init__(self, fitness, parameters, sigma):\n if isinstance(fitness, numbers.Real):\n if math.isfinite(fitness):\n self.fitness = fitness\n else:\n raise ValueError('Fitness value cannot be NaN or Inf')\n elif fitness is None:\n self.fitness = None\n else:\n raise ValueError('Fitness value should be either real or None')\n\n if len(parameters) == 0:\n raise ValueError('Parameters array cannot be empty')\n else:\n self.params = parameters # values in range [0.0, 1.0]\n\n if len(sigma) == 0:\n raise ValueError('Sigma array cannot be empty')\n else:\n self.sigma = sigma\n\n if len(sigma) != 1:\n if len(sigma) != len(parameters):\n raise ValueError('Sigma size should be either the same as parameters size or one')\n\n\n\n","repo_name":"croaxx/StochasticEvolutionaryOptimizer","sub_path":"Model/Individual/individual.py","file_name":"individual.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"20128802519","text":"import time\n\n\ndef atlantis(light_obj, np):\n \"\"\"Create a pink,blue,teal effect.\"\"\"\n np.fill((255, 20, 147))\n while light_obj.loop:\n try:\n for count in range(light_obj.num):\n if not light_obj.loop:\n break\n\n np[count] = ((0, 128, 128))\n time.sleep(0.1)\n\n# time.sleep(2.0)\n\n for count in range(light_obj.num):\n if not light_obj.loop:\n break\n\n np[count] = ((0, 0, 255))\n time.sleep(0.1)\n\n# time.sleep(2.0)\n\n for count in range(light_obj.num):\n if not light_obj.loop:\n break\n\n np[count] = ((255, 20, 147))\n time.sleep(0.1)\n\n# time.sleep(2.0)\n\n except KeyboardInterrupt as e:\n raise (e)\n","repo_name":"patrickcooper95/smarty-lamps","sub_path":"programs/atlantis.py","file_name":"atlantis.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"74761281683","text":"from flask import Flask, jsonify\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n return 'Flask is running!'\n\n\n@app.route('/data')\ndef names():\n data = {\n \"first_names\": [\"John\", \"Jacob\", \"Julie\", \"Jenny\"]\n }\n return jsonify(data)\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"realpython/flask-deploy","sub_path":"flask_project/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","stars":132,"dataset":"github-code","pt":"30"} +{"seq_id":"25520660118","text":"from TTS import *\nfrom makeSSML import *\n\nimport base64\nimport argparse\nimport sys\n\nclass Notification():\n def __init__(self):\n self.title = None\n self.startAt = None\n self.finishAt = None\n self.option = None\n\n def parse(self):\n parser = argparse.ArgumentParser()\n parser.add_argument('--title', help='Title of schedule',\n required=True)\n parser.add_argument('--diff', help='diffrentiate of finished-notification',\n default=None)\n parser.add_argument('--finishAt', help='Finished time of schedule',\n default=None)\n parser.add_argument('--option', help='Notification time',\n default='')\n\n args = parser.parse_args()\n self.title = args.title\n self.diff = args.diff\n self.finishAt = args.finishAt\n self.option = args.option\n print(args)\n print(self.title, self.diff, self.finishAt, self.option)\n\n def notify(self):\n # Debug용 try~catch (Node server test)\n try:\n synthesize_ssml(make_notification({\n \"title\": self.title,\n \"startAt\": self.startAt,\n \"finishAt\": self.finishAt,\n \"option\": self.option\n }), True)\n except:\n print(make_notification({\n \"title\": self.title,\n \"startAt\": self.startAt,\n \"finishAt\": self.finishAt,\n \"option\": self.option\n }))\n\ndef main():\n noti = Notification()\n noti.parse()\n noti.notify()\n\nif __name__ == '__main__':\n print(\"notification called\")\n main()\n","repo_name":"wony5248/SSAFYEnS","sub_path":"embedded/hardware/notification.py","file_name":"notification.py","file_ext":"py","file_size_in_byte":1726,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"72786794643","text":"from __future__ import print_function\n\nimport os\nimport numpy as np\nimport paths\nimport materials\nfrom mode_calcs import Simmo, Anallo\nfrom fortran import EMUstack\n\nmsh_location = paths.msh_path\ntemplate_location = paths.template_path\n\n# Acknowledgements\nprint('\\n##################################################################\\n'\\\n + 'EMUstack is brought to you by Bjorn Sturmberg, Kokou Dossou, \\n' \\\n + 'Felix Lawrence & Lindsay Botton, with support from CUDOS & ARENA\\n' \\\n + 'Starting EMUstack calculation ...\\n' + \\\n '##################################################################\\n')\n\n\nclass NanoStruct(object):\n \"\"\" Represents a structured layer.\n\n Args:\n periodicity (str): Either 1D or 2D structure '1D_array', '2D_array'.\n\n period (float): The period of the unit cell in nanometers.\n\n diameter1 (float): The diameter of the inclusion in nm.\n\n Keyword Args:\n period_y (float): The period of the unit cell in the y-direction.\\\n If None, period_y = period.\n\n inc_shape (str): Shape of inclusions that have template mesh, \\\n currently; 'circle', 'ellipse', 'square', 'ring', 'SRR',\n 'dimer', 'square_dimer', 'strip_circle', 'strip_square',\n 'rectangle', 'rectangle_shell', 'square_dimer_shell',\n 'cross', 'cross_shell', 'L' .\n\n is_hex (bool): Simulating a hexagonal lattice, using a rect unitcell?\n\n ellipticity (float): If != 0, inclusion has given ellipticity, \\\n with b = diameter, a = diameter-ellipticity * diameter. \\\n NOTE: only implemented for a single inclusion.\n\n len_vertical (float): Vertical length of split ring resonator \\\n (if inc_shape = 'SRR').\n\n len_horizontal (float): Horizontal length of split ring resonator\\\n (if inc_shape = 'SRR').\n\n diameter2-16 (float): The diameters of further inclusions in nm. \\\n Implemented up to diameter6 for 1D_arrays.\n\n gap (float): The dimer gap in nm. \\\n (if inc_shape = 'dimer' or 'square_dimer').\n\n smooth (float): smoothness of square_dimer angles, between 0 (sharp). \\\n and 1 (circle).\n (if inc_shape = 'square_dimer' or inc_shape = 'rectangle').\n\n t (float): shell thickness. \\\n (if inc_shape = 'every _shell target').\n\n inclusion_a : A :Material: instance for first inclusion, \\\n specified as dispersive refractive index (eg. materials.Si_c) \\\n or nondispersive complex number (eg. Material(1.0 + 0.0j)).\n\n inclusion_b : A :Material: instance for the second \\\n inclusion medium.\n\n inclusion_c : A :Material: instance for the third \\\n inclusion medium.\n\n inclusion_d : A :Material: instance for the fourth \\\n inclusion medium.\n\n inclusion_e : A :Material: instance for the fifth \\\n inclusion medium.\n\n background : A :Material: instance for the background medium.\n\n loss (bool): If False, Im(n) = 0, if True n as in \\\n :Material: instance.\n\n height_nm (float): The thickness of the layer in nm or \\\n 'semi_inf' for a semi-infinite layer.\n\n hyperbolic (bool): If True FEM looks for Eigenvalues around \\\n n**2 * k_0**2 rather than the regular \\\n n**2 * k_0**2 - alpha**2 - beta**2.\n\n world_1d (bool): Does the rest of the stack have exclusively 1D \\\n periodic structures and homogeneous layers? \\\n If True we use the set of 1D diffraction order PWs.\\\n Defaults to True for '1D_array', and False for '2D_array'.\n\n ff (float): The fill fraction of the inclusions. If non-zero, \\\n the specified diameters are overwritten s.t. given ff is \\\n achieved, otherwise ff is calculated from parameters and \\\n stored in self.ff.\n\n ff_rand (bool): If True, diameters overwritten with random \\\n diameters, s.t. the ff is as assigned. Must provide non-zero \\\n dummy diameters.\n\n posx (float): Shift NWs laterally towards center (each other), \\\n posx is a fraction of the distance possible before NWs touch.\n\n posy (float): Shift NWs vertically towards center (each other), \\\n posx is a fraction of the distance possible before NWs touch.\n\n small_space (float): Only for 1D_arrays with 2 interleaved \\\n inclusions. Sets distance between edges of inclusions. \\\n By default (d_in_nm - diameter1 - diameter2) / 2. \\\n The smaller distance is on the, which left of center \\\n (inclusion_a remains centered).\n\n edge_spacing (bool): For 1D_array with >= 3 inclusions. Space \\\n inclusion surfaces by equal separations. Else their centers \\\n will be equally spaced.\n\n split_touching_incs (bool): For 1D_array with > 1 inclusions. \\\n Arrange inclusions with touching edges, with the \\\n aggregate centered in the unit cell.\n\n make_mesh_now (bool): If True, program creates a FEM mesh with \\\n provided :NanoStruct: parameters. If False, must provide \\\n mesh_file name of existing .mail that will be run despite \\\n :NanoStruct: parameters.\n\n force_mesh (bool): If True, a new mesh is created despite \\\n existence of mesh with same parameter. This is used to make \\\n mesh with equal period etc. but different lc refinement.\n\n mesh_file (str): If using a set premade mesh give its name \\\n including .mail if 2D_array (eg. 600_60.mail), or .txt if \\\n 1D_array. It must be located in backend/fortran/msh/\n\n lc_bkg (float): Length constant of meshing of background medium \\\n (smaller = finer mesh)\n\n lc2 (float): factor by which lc_bkg will be reduced on inclusion \\\n surfaces; lc_surface = cl_bkg / lc2.\n\n lc3-6' (float): factor by which lc_bkg will be reduced at center \\\n of inclusions.\n\n plotting_fields (bool): Unless set to true field data deleted.\\\n Also plots modes (ie. FEM solutions) in gmsh format. \\\n Plots epsilon*|E|^2 & choice of real/imag/abs of \\\n x,y,z components & field vectors. Fields are saved as gmsh \\\n files, but can be converted by running the .geo file found in \\\n Bloch_fields/PNG/\n\n plot_real (bool): Choose to plot real part of modal fields.\n\n plot_imag (bool): Choose to plot imaginary part of modal fields.\n\n plot_abs (bool): Choose to plot absolute value of modal fields.\n\n plt_msh (bool): Save a plot of the 1D array geometry.\n \"\"\"\n def __init__(self,\n periodicity,\n period,\n diameter1,\n period_y=None,\n inc_shape='circle',\n is_hex=False,\n ellipticity=0.0,\n ff=0,\n ff_rand=False,\n small_space=None,\n edge_spacing=False,\n split_touching_incs=False,\n len_vertical=0,\n len_horizontal=0,\n background=materials.Material(1.0 + 0.0j),\n inclusion_a=materials.Material(1.0 + 0.0j),\n inclusion_b=materials.Material(1.0 + 0.0j),\n inclusion_c=materials.Material(1.0 + 0.0j),\n inclusion_d=materials.Material(1.0 + 0.0j),\n inclusion_e=materials.Material(1.0 + 0.0j),\n inclusion_f=materials.Material(1.0 + 0.0j),\n loss=True,\n height_nm=100.0,\n diameter2=0,\n diameter3=0,\n diameter4=0,\n diameter5=0,\n diameter6=0,\n diameter7=0,\n diameter8=0,\n diameter9=0,\n diameter10=0,\n diameter11=0,\n diameter12=0,\n diameter13=0,\n diameter14=0,\n diameter15=0,\n diameter16=0,\n gap=0,\n smooth=0,\n t=0,\n hyperbolic=False,\n world_1d=None,\n posx=0,\n posy=0,\n xshift=None,\n make_mesh_now=True,\n force_mesh=True,\n mesh_file='NEED_FILE.mail',\n geo_file=None,\n geo_params=None,\n lc_bkg=0.09,\n lc2=1.0,\n lc3=1.0,\n lc4=1.0,\n lc5=1.0,\n lc6=1.0,\n plotting_fields=False,\n plot_real=1,\n plot_imag=0,\n plot_abs=0,\n plot_field_conc=False,\n plt_msh=True):\n self.periodicity = periodicity\n self.period = float(period)\n self.diameter1 = diameter1\n if period_y is None:\n self.period_y = float(period)\n else:\n self.period_y = float(period_y)\n self.inc_shape = inc_shape\n self.is_hex = is_hex\n self.height_nm = height_nm\n self.background = background\n self.inclusion_a = inclusion_a\n self.inclusion_b = inclusion_b\n self.inclusion_c = inclusion_c\n self.inclusion_d = inclusion_d\n self.inclusion_e = inclusion_e\n self.inclusion_f = inclusion_f\n self.loss = loss\n self.hyperbolic = hyperbolic\n self.diameter2 = diameter2\n self.diameter3 = diameter3\n self.diameter4 = diameter4\n self.diameter5 = diameter5\n self.diameter6 = diameter6\n self.diameter7 = diameter7\n self.diameter8 = diameter8\n self.diameter9 = diameter9\n self.diameter10 = diameter10\n self.diameter11 = diameter11\n self.diameter12 = diameter12\n self.diameter13 = diameter13\n self.diameter14 = diameter14\n self.diameter15 = diameter15\n self.diameter16 = diameter16\n self.geo_file = geo_file\n self.geo_params = geo_params\n self.gap = gap\n self.smooth = smooth\n self.t = t\n self.len_vertical = len_vertical\n self.len_horizontal = len_horizontal\n self.ellipticity = ellipticity\n if ellipticity > 1.0:\n raise ValueError(\"ellipticity must be less than 1.0\")\n if diameter3 != 0:\n self.nb_typ_el = 4\n elif diameter2 != 0:\n self.nb_typ_el = 3\n else:\n self.nb_typ_el = 2\n if ff == 0:\n if periodicity == '2D_array':\n self.ff = calculate_ff(inc_shape, period, self.period_y,\n diameter1, diameter2, diameter3,\n diameter4, diameter5, diameter6,\n diameter7, diameter8, diameter9,\n diameter10, diameter11, diameter12,\n diameter13, diameter14, diameter15,\n diameter16, ellipticity)\n elif periodicity == '1D_array':\n self.ff = (diameter1 + diameter2) / period\n else:\n self.ff = ff\n if diameter2 != 0:\n self.diameter2 = 2 * ((ff * (period)**2) / np.pi -\n ((diameter1 / 2)**2))**0.5\n else:\n self.diameter1 = 2 * np.sqrt((ff * (period)**2) / np.pi)\n self.ff_rand = ff_rand\n if world_1d is None:\n if periodicity == '1D_array':\n self.world_1d = True\n if periodicity == '2D_array':\n self.world_1d = False\n else:\n self.world_1d = world_1d\n self.posx = posx\n self.posy = posy\n self.lc = lc_bkg\n self.lc2 = lc2\n self.lc3 = lc3\n self.lc4 = lc4\n self.lc5 = lc5\n self.lc6 = lc6\n self.force_mesh = force_mesh\n self.small_space = small_space\n self.edge_spacing = edge_spacing\n self.split_touching_incs = split_touching_incs\n self.plt_msh = plt_msh\n if make_mesh_now is True:\n self.make_mesh()\n else:\n self.mesh_file = mesh_file\n if plotting_fields is True:\n self.plotting_fields = 1\n if periodicity == '2D_array':\n if not os.path.exists(\"Bloch_fields\"):\n os.mkdir(\"Bloch_fields\")\n if not os.path.exists(\"Bloch_fields/PDF\"):\n os.mkdir(\"Bloch_fields/PDF\")\n else:\n self.plotting_fields = 0\n self.plot_real = plot_real\n self.plot_imag = plot_imag\n self.plot_abs = plot_abs\n self.plot_field_conc = plot_field_conc\n self.xshift = xshift\n\n def make_mesh(self):\n if self.periodicity == '2D_array':\n if self.inc_shape in ['circle', 'ellipse', 'square']:\n if self.diameter10 > 0:\n supercell = 16\n msh_name = '%(d)s_%(dy)s_%(dia)s_%(dias)s_%(diass)s_%(diasss)s_%(diassss)s' % {\n 'd': dec_float_str(self.period),\n 'dy': dec_float_str(self.period_y),\n 'dia': dec_float_str(self.diameter1),\n 'dias': dec_float_str(self.diameter2),\n 'dias': dec_float_str(self.diameter2),\n 'diass': dec_float_str(self.diameter3),\n 'diasss': dec_float_str(self.diameter4),\n 'diassss': dec_float_str(self.diameter5)\n }\n elif self.diameter5 > 0:\n supercell = 9\n msh_name = '%(d)s_%(dy)s_%(dia)s_%(dias)s_%(diass)s_%(diasss)s_%(diassss)s' % {\n 'd': dec_float_str(self.period),\n 'dy': dec_float_str(self.period_y),\n 'dia': dec_float_str(self.diameter1),\n 'dias': dec_float_str(self.diameter2),\n 'diass': dec_float_str(self.diameter3),\n 'diasss': dec_float_str(self.diameter4),\n 'diassss': dec_float_str(self.diameter5)\n }\n elif self.diameter4 > 0:\n supercell = 4\n msh_name = '%(d)s_%(dy)s_%(dia)s_%(dias)s_%(diass)s_%(diasss)s' % {\n 'd': dec_float_str(self.period),\n 'dy': dec_float_str(self.period_y),\n 'dia': dec_float_str(self.diameter1),\n 'dias': dec_float_str(self.diameter2),\n 'diass': dec_float_str(self.diameter3),\n 'diasss': dec_float_str(self.diameter4)\n }\n elif self.diameter3 > 0:\n supercell = 3\n msh_name = '%(d)s_%(dy)s_%(dia)s_%(dias)s_%(diass)s' % {\n 'd': dec_float_str(self.period),\n 'dy': dec_float_str(self.period_y),\n 'dia': dec_float_str(self.diameter1),\n 'dias': dec_float_str(self.diameter2),\n 'diass': dec_float_str(self.diameter3)\n }\n elif self.diameter2 > 0:\n supercell = 2\n msh_name = '%(d)s_%(dy)s_%(dia)s_%(dias)s' % {\n 'd': dec_float_str(self.period),\n 'dy': dec_float_str(self.period_y),\n 'dia': dec_float_str(self.diameter1),\n 'diameters': dec_float_str(self.diameter2)\n }\n elif self.diameter1 > 0:\n supercell = 1\n if self.is_hex is False:\n msh_name = '%(d)s_%(dy)s_%(dia)s' % {\n 'd': dec_float_str(self.period),\n 'dy': dec_float_str(self.period_y),\n 'dia': dec_float_str(self.diameter1)\n }\n elif self.is_hex is True:\n msh_name = 'hex_%(d)s_%(dy)s_%(dia)s' % {\n 'd': dec_float_str(self.period),\n 'dy': dec_float_str(self.period_y),\n 'dia': dec_float_str(self.diameter1)\n }\n else:\n raise ValueError(\n \"must have at least one cylinder of nonzero diameter.\")\n\n if self.ellipticity != 0:\n msh_name = msh_name + '_e_%(e)s' % {\n 'e': dec_float_str(self.ellipticity),\n }\n if self.inc_shape == 'square':\n msh_name = msh_name + '_sq'\n if self.posx != 0:\n msh_name = msh_name + 'x%(e)s' % {\n 'e': dec_float_str(self.posx),\n }\n if self.posy != 0:\n msh_name = msh_name + 'y%(e)s' % {\n 'e': dec_float_str(self.posy),\n }\n\n # for blah in range(1,101,1):\n # print blah\n # msh_name = 'random_u_%i' % blah\n # self.mesh_file = msh_name + '.mail'\n # msh_name = 'design-last_17'\n if self.ff_rand is True:\n import random\n ff_tol = 0.0001\n min_a = 50\n max_a = (self.period / 1.05) / np.sqrt(supercell)\n unit_period = (self.period / np.sqrt(supercell))\n mean = np.sqrt((self.ff * (unit_period)**2) / np.pi)\n test_ff = 0\n while abs(test_ff - self.ff) > ff_tol:\n rad_array = []\n for i in range(supercell):\n # stand_dev = 30\n # select_diameter = random.gauss(mean,stand_dev)\n select_diameter = random.uniform(min_a, max_a)\n rad_array = np.append(rad_array, select_diameter)\n\n test_ff = calculate_ff(\n self.inc_shape, self.period, self.period_y,\n rad_array[0], rad_array[1], rad_array[2],\n rad_array[3], rad_array[4], rad_array[5],\n rad_array[6], rad_array[7], rad_array[8],\n rad_array[9], rad_array[10], rad_array[11],\n rad_array[12], rad_array[13], rad_array[14],\n rad_array[15])\n print(test_ff)\n if supercell > 3:\n self.diameter1 = rad_array[0]\n self.diameter2 = rad_array[1]\n self.diameter3 = rad_array[2]\n self.diameter4 = rad_array[3]\n if supercell > 4:\n self.diameter5 = rad_array[4]\n self.diameter6 = rad_array[5]\n self.diameter7 = rad_array[6]\n self.diameter8 = rad_array[7]\n self.diameter9 = rad_array[8]\n if supercell > 9:\n self.diameter10 = rad_array[9]\n self.diameter11 = rad_array[10]\n self.diameter12 = rad_array[11]\n self.diameter13 = rad_array[12]\n self.diameter14 = rad_array[13]\n self.diameter15 = rad_array[14]\n self.diameter16 = rad_array[15]\n test_ff = calculate_ff(\n self.inc_shape, self.period, self.period_y,\n rad_array[0], rad_array[1], rad_array[2],\n rad_array[3], rad_array[4], rad_array[5],\n rad_array[6], rad_array[7], rad_array[8],\n rad_array[9], rad_array[10], rad_array[11],\n rad_array[12], rad_array[13], rad_array[14],\n rad_array[15])\n\n if not os.path.exists(msh_location + msh_name +\n '.mail') or self.force_mesh is True:\n if self.is_hex is False:\n geo_tmp = open(\n template_location +\n '%s_msh_template.geo' % supercell, \"r\").read()\n else:\n geo_tmp = open(\n template_location + 'hex_msh_template.geo',\n \"r\").read()\n\n geo = geo_tmp.replace('ff = 0;', \"ff = %f;\" % self.ff)\n geo = geo.replace('d_in_nm = 0;',\n \"d_in_nm = %f;\" % self.period)\n geo = geo.replace('dy_in_nm = 0;',\n \"dy_in_nm = %f;\" % self.period_y)\n geo = geo.replace('a1 = 0;', \"a1 = %f;\" % self.diameter1)\n geo = geo.replace('ellipticity = 0;',\n \"ellipticity = %f;\" % self.ellipticity)\n if self.inc_shape == 'square':\n geo = geo.replace('square = 0;', \"square = 1;\")\n geo = geo.replace('lc = 0;', \"lc = %f;\" % self.lc)\n geo = geo.replace('lc2 = lc/1;', \"lc2 = lc/%f;\" % self.lc2)\n geo = geo.replace('lc3 = lc/1;', \"lc3 = lc/%f;\" % self.lc3)\n if self.posx != 0:\n # appropriate for old definition of fraction of distance to touching\n geo = geo.replace(\n 'posx = 0;', \"posx = %f;\" %\n (self.posx / self.period *\n (self.period /\n (2 * np.sqrt(supercell)) - self.diameter1 / 2.0))\n )\n # appropriate for % shift of distance of centre point to (ind) unitcell boundary (ie d/2)\n # geo = geo.replace('posx = 0;', \"posx = %f;\" % float(self.posx/supercell))\n if self.posy != 0:\n geo = geo.replace(\n 'posy = 0;', \"posy = %f;\" %\n (self.posy / self.period *\n (self.period /\n (2 * np.sqrt(supercell)) - self.diameter1 / 2.0))\n )\n # geo = geo.replace('posy = 0;', \"posy = %f;\" % float(self.posy/supercell))\n if supercell > 1:\n geo = geo.replace('a2 = 0;',\n \"a2 = %f;\" % self.diameter2)\n geo = geo.replace('lc4 = lc/1;',\n \"lc4 = lc/%f;\" % self.lc4)\n if supercell > 2:\n geo = geo.replace('a3 = 0;',\n \"a3 = %f;\" % self.diameter3)\n geo = geo.replace('lc5 = lc/1;',\n \"lc5 = lc/%f;\" % self.lc5)\n if supercell > 3:\n geo = geo.replace('a4 = 0;',\n \"a4 = %f;\" % self.diameter4)\n geo = geo.replace('lc6 = lc/1;',\n \"lc6 = lc/%f;\" % self.lc6)\n if supercell > 4:\n geo = geo.replace('a5 = 0;',\n \"a5 = %f;\" % self.diameter5)\n geo = geo.replace('a6 = 0;',\n \"a6 = %f;\" % self.diameter6)\n geo = geo.replace('a7 = 0;',\n \"a7 = %f;\" % self.diameter7)\n geo = geo.replace('a8 = 0;',\n \"a8 = %f;\" % self.diameter8)\n geo = geo.replace('a9 = 0;',\n \"a9 = %f;\" % self.diameter9)\n if supercell > 9:\n geo = geo.replace('a10 = 0;',\n \"a10 = %f;\" % self.diameter10)\n geo = geo.replace('a11 = 0;',\n \"a11 = %f;\" % self.diameter11)\n geo = geo.replace('a12 = 0;',\n \"a12 = %f;\" % self.diameter12)\n geo = geo.replace('a13 = 0;',\n \"a13 = %f;\" % self.diameter13)\n geo = geo.replace('a14 = 0;',\n \"a14 = %f;\" % self.diameter14)\n geo = geo.replace('a15 = 0;',\n \"a15 = %f;\" % self.diameter15)\n geo = geo.replace('a16 = 0;',\n \"a16 = %f;\" % self.diameter16)\n\n elif self.inc_shape == 'SRR':\n msh_name = 'SRR_%(d)s_%(dy)s_%(lvert)s_%(lhori)s_%(dia)s' % {\n 'd': dec_float_str(self.period),\n 'dy': dec_float_str(self.period_y),\n 'lvert': dec_float_str(self.len_vertical),\n 'lhori': dec_float_str(self.len_horizontal),\n 'dia': dec_float_str(self.diameter1)\n }\n if not os.path.exists(msh_location + msh_name +\n '.mail') or self.force_mesh is True:\n geo_tmp = open(template_location + 'SRR_msh_template.geo',\n \"r\").read()\n geo = geo_tmp.replace('ff = 0;', \"ff = %f;\" % self.ff)\n geo = geo.replace('d_in_nm = 0;',\n \"d_in_nm = %f;\" % self.period)\n geo = geo.replace('dy_in_nm = 0;',\n \"dy_in_nm = %f;\" % self.period_y)\n geo = geo.replace('lvert_nm = 0;',\n \"lvert_nm = %f;\" % self.len_vertical)\n geo = geo.replace('lhori_nm = 0;',\n \"lhori_nm = %f;\" % self.len_horizontal)\n geo = geo.replace('width_nm = 0;',\n \"width_nm = %f;\" % self.diameter1)\n geo = geo.replace('lc = 0;', \"lc = %f;\" % self.lc)\n geo = geo.replace('lc2 = lc/1;', \"lc2 = lc/%f;\" % self.lc2)\n geo = geo.replace('lc3 = lc/1;', \"lc3 = lc/%f;\" % self.lc3)\n\n elif self.inc_shape == 'ring':\n msh_name = 'ring_%(d)s_%(dy)s_%(dia_out)s_%(dia_in)s' % {\n 'd': dec_float_str(self.period),\n 'dy': dec_float_str(self.period_y),\n 'dia_out': dec_float_str(self.diameter1),\n 'dia_in': dec_float_str(self.diameter2)\n }\n if not os.path.exists(msh_location + msh_name +\n '.mail') or self.force_mesh is True:\n geo_tmp = open(\n template_location + 'ring1_msh_template.geo',\n \"r\").read()\n geo = geo_tmp.replace('ff = 0;', \"ff = %f;\" % self.ff)\n geo = geo.replace('d_in_nm = 0;',\n \"d_in_nm = %f;\" % self.period)\n geo = geo.replace('dy_in_nm = 0;',\n \"dy_in_nm = %f;\" % self.period_y)\n geo = geo.replace('a1 = 0;', \"a1 = %f;\" % self.diameter1)\n geo = geo.replace('a2 = 0;', \"a2 = %f;\" % self.diameter2)\n geo = geo.replace('lc = 0;', \"lc = %f;\" % self.lc)\n geo = geo.replace('lc2 = lc/1;', \"lc2 = lc/%f;\" % self.lc2)\n geo = geo.replace('lc3 = lc/1;', \"lc3 = lc/%f;\" % self.lc3)\n geo = geo.replace('xshift_nm = 0;',\n \"xshift_nm = %f;\" % self.xshift)\n\n elif self.inc_shape == 'dimer':\n msh_name = 'dimer_%(d)s_%(dy)s_%(d_one)s_%(d_two)s_%(gap)s' % {\n 'd': dec_float_str(self.period),\n 'dy': dec_float_str(self.period_y),\n 'd_one': dec_float_str(self.diameter1),\n 'd_two': dec_float_str(self.diameter2),\n 'gap': dec_float_str(self.gap)\n }\n if not os.path.exists(msh_location + msh_name +\n '.mail') or self.force_mesh is True:\n geo_tmp = open(\n template_location + 'dimer1_msh_template.geo',\n \"r\").read()\n geo = geo_tmp.replace('ff = 0;', \"ff = %f;\" % self.ff)\n geo = geo.replace('d_in_nm = 0;',\n \"d_in_nm = %f;\" % self.period)\n geo = geo.replace('dy_in_nm = 0;',\n \"dy_in_nm = %f;\" % self.period_y)\n geo = geo.replace('a1 = 0;', \"a1 = %f;\" % self.diameter1)\n geo = geo.replace('a2 = 0;', \"a2 = %f;\" % self.diameter2)\n geo = geo.replace('gap = 0;', \"gap = %f;\" % self.gap)\n geo = geo.replace('lc = 0;', \"lc = %f;\" % self.lc)\n geo = geo.replace('lc2 = lc/1;', \"lc2 = lc/%f;\" % self.lc2)\n geo = geo.replace('lc3 = lc/1;', \"lc3 = lc/%f;\" % self.lc3)\n\n elif self.inc_shape == 'square_dimer':\n msh_name = 'square_dimer_%(d)s_%(dy)s_%(d_one)s_%(d_two)s_%(gap)s_%(smooth)s' % {\n 'd': dec_float_str(self.period),\n 'dy': dec_float_str(self.period_y),\n 'd_one': dec_float_str(self.diameter1),\n 'd_two': dec_float_str(self.diameter2),\n 'gap': dec_float_str(self.gap),\n 'smooth': dec_float_str(self.smooth)\n }\n if not os.path.exists(msh_location + msh_name +\n '.mail') or self.force_mesh is True:\n geo_tmp = open(\n template_location + 'square_dimer1_msh_template.geo',\n \"r\").read()\n geo = geo_tmp.replace('ff = 0;', \"ff = %f;\" % self.ff)\n geo = geo.replace('d_in_nm = 0;',\n \"d_in_nm = %f;\" % self.period)\n geo = geo.replace('dy_in_nm = 0;',\n \"dy_in_nm = %f;\" % self.period_y)\n geo = geo.replace('a1 = 0;', \"a1 = %f;\" % self.diameter1)\n geo = geo.replace('a2 = 0;', \"a2 = %f;\" % self.diameter2)\n geo = geo.replace('gap = 0;', \"gap = %f;\" % self.gap)\n geo = geo.replace('smooth = 0;',\n \"smooth = %f;\" % self.smooth)\n geo = geo.replace('lc = 0;', \"lc = %f;\" % self.lc)\n geo = geo.replace('lc2 = lc/1;', \"lc2 = lc/%f;\" % self.lc2)\n geo = geo.replace('lc3 = lc/1;', \"lc3 = lc/%f;\" % self.lc3)\n\n elif self.inc_shape == 'square_shell_dimer':\n msh_name = 'square_shell_dimer_%(d)s_%(dy)s_%(d_one)s_%(d_two)s_%(d_three)s_%(d_four)s_%(gap)s_%(smooth)s_%(t)s' % {\n 'd': dec_float_str(self.period),\n 'dy': dec_float_str(self.period_y),\n 'd_one': dec_float_str(self.diameter1),\n 'd_two': dec_float_str(self.diameter2),\n 'd_three': dec_float_str(self.diameter3),\n 'd_four': dec_float_str(self.diameter4),\n 'gap': dec_float_str(self.gap),\n 'smooth': dec_float_str(self.smooth),\n 't': dec_float_str(self.t)\n }\n if not os.path.exists(msh_location + msh_name +\n '.mail') or self.force_mesh is True:\n geo_tmp = open(\n template_location +\n 'square_shell_dimer1_msh_template.geo', \"r\").read()\n geo = geo_tmp.replace('ff = 0;', \"ff = %f;\" % self.ff)\n geo = geo.replace('d_in_nm = 0;',\n \"d_in_nm = %f;\" % self.period)\n geo = geo.replace('dy_in_nm = 0;',\n \"dy_in_nm = %f;\" % self.period_y)\n geo = geo.replace('a1 = 0;', \"a1 = %f;\" % self.diameter1)\n geo = geo.replace('a2 = 0;', \"a2 = %f;\" % self.diameter2)\n geo = geo.replace('b1 = 0;', \"b1 = %f;\" % self.diameter3)\n geo = geo.replace('b2 = 0;', \"b2 = %f;\" % self.diameter4)\n geo = geo.replace('gap = 0;', \"gap = %f;\" % self.gap)\n geo = geo.replace('smooth = 0;',\n \"smooth = %f;\" % self.smooth)\n geo = geo.replace('t = 0;', \"t = %f;\" % self.t)\n geo = geo.replace('lc = 0;', \"lc = %f;\" % self.lc)\n geo = geo.replace('lc2 = lc/1;', \"lc2 = lc/%f;\" % self.lc2)\n geo = geo.replace('lc3 = lc/1;', \"lc3 = lc/%f;\" % self.lc3)\n elif self.inc_shape == 'rectangle':\n msh_name = 'rect_%(d)s_%(dy)s_%(d_one)s_%(d_two)s_%(smooth)s' % {\n 'd': dec_float_str(self.period),\n 'dy': dec_float_str(self.period_y),\n 'd_one': dec_float_str(self.diameter1),\n 'd_two': dec_float_str(self.diameter2),\n 'smooth': dec_float_str(self.smooth)\n }\n if not os.path.exists(msh_location + msh_name +\n '.mail') or self.force_mesh is True:\n geo_tmp = open(\n template_location + 'rect1_msh_template.geo',\n \"r\").read()\n geo = geo_tmp.replace('ff = 0;', \"ff = %f;\" % self.ff)\n geo = geo.replace('d_in_nm = 0;',\n \"d_in_nm = %f;\" % self.period)\n geo = geo.replace('dy_in_nm = 0;',\n \"dy_in_nm = %f;\" % self.period_y)\n geo = geo.replace('a1 = 0;', \"a1 = %f;\" % self.diameter1)\n geo = geo.replace('a2 = 0;', \"a2 = %f;\" % self.diameter2)\n geo = geo.replace('smooth = 0;',\n \"smooth = %f;\" % self.smooth)\n geo = geo.replace('lc = 0;', \"lc = %f;\" % self.lc)\n geo = geo.replace('lc2 = lc/1;', \"lc2 = lc/%f;\" % self.lc2)\n geo = geo.replace('lc3 = lc/1;', \"lc3 = lc/%f;\" % self.lc3)\n elif self.inc_shape == 'rectangle_shell':\n msh_name = 'rect_shell_%(d)s_%(dy)s_%(d_one)s_%(d_two)s_%(smooth)s_%(t)s' % {\n 'd': dec_float_str(self.period),\n 'dy': dec_float_str(self.period_y),\n 'd_one': dec_float_str(self.diameter1),\n 'd_two': dec_float_str(self.diameter2),\n 'smooth': dec_float_str(self.smooth),\n 't': dec_float_str(self.t)\n }\n if not os.path.exists(msh_location + msh_name +\n '.mail') or self.force_mesh is True:\n geo_tmp = open(\n template_location + 'rect_shell1_msh_template.geo',\n \"r\").read()\n geo = geo_tmp.replace('ff = 0;', \"ff = %f;\" % self.ff)\n geo = geo.replace('d_in_nm = 0;',\n \"d_in_nm = %f;\" % self.period)\n geo = geo.replace('dy_in_nm = 0;',\n \"dy_in_nm = %f;\" % self.period_y)\n geo = geo.replace('a1 = 0;', \"a1 = %f;\" % self.diameter1)\n geo = geo.replace('b1 = 0;', \"b1 = %f;\" % self.diameter2)\n geo = geo.replace('smooth = 0;',\n \"smooth = %f;\" % self.smooth)\n geo = geo.replace('t = 0;', \"t = %f;\" % self.t)\n geo = geo.replace('lc = 0;', \"lc = %f;\" % self.lc)\n geo = geo.replace('lc2 = lc/1;', \"lc2 = lc/%f;\" % self.lc2)\n geo = geo.replace('lc3 = lc/1;', \"lc3 = lc/%f;\" % self.lc3)\n elif self.inc_shape == 'cross':\n msh_name = 'cross_%(d)s_%(dy)s_%(d_one)s_%(d_two)s_%(smooth)s' % {\n 'd': dec_float_str(self.period),\n 'dy': dec_float_str(self.period_y),\n 'd_one': dec_float_str(self.diameter1),\n 'd_two': dec_float_str(self.diameter2),\n 'smooth': dec_float_str(self.smooth)\n }\n if not os.path.exists(msh_location + msh_name +\n '.mail') or self.force_mesh is True:\n geo_tmp = open(\n template_location + 'cross1_msh_template.geo',\n \"r\").read()\n geo = geo_tmp.replace('ff = 0;', \"ff = %f;\" % self.ff)\n geo = geo.replace('d_in_nm = 0;',\n \"d_in_nm = %f;\" % self.period)\n geo = geo.replace('dy_in_nm = 0;',\n \"dy_in_nm = %f;\" % self.period_y)\n geo = geo.replace('a1 = 0;', \"a1 = %f;\" % self.diameter1)\n geo = geo.replace('a2 = 0;', \"a2 = %f;\" % self.diameter2)\n geo = geo.replace('smooth = 0;',\n \"smooth = %f;\" % self.smooth)\n geo = geo.replace('lc = 0;', \"lc = %f;\" % self.lc)\n geo = geo.replace('lc2 = lc/1;', \"lc2 = lc/%f;\" % self.lc2)\n geo = geo.replace('lc3 = lc/1;', \"lc3 = lc/%f;\" % self.lc3)\n elif self.inc_shape == 'cross_shell':\n msh_name = 'cross_shell_%(d)s_%(dy)s_%(d_one)s_%(d_two)s_%(smooth)s_%(t)s' % {\n 'd': dec_float_str(self.period),\n 'dy': dec_float_str(self.period_y),\n 'd_one': dec_float_str(self.diameter1),\n 'd_two': dec_float_str(self.diameter2),\n 'smooth': dec_float_str(self.smooth),\n 't': dec_float_str(self.t)\n }\n if not os.path.exists(msh_location + msh_name +\n '.mail') or self.force_mesh is True:\n geo_tmp = open(\n template_location + 'cross_shell1_msh_template.geo',\n \"r\").read()\n geo = geo_tmp.replace('ff = 0;', \"ff = %f;\" % self.ff)\n geo = geo.replace('d_in_nm = 0;',\n \"d_in_nm = %f;\" % self.period)\n geo = geo.replace('dy_in_nm = 0;',\n \"dy_in_nm = %f;\" % self.period_y)\n geo = geo.replace('a1 = 0;', \"a1 = %f;\" % self.diameter1)\n geo = geo.replace('a2 = 0;', \"a2 = %f;\" % self.diameter2)\n geo = geo.replace('smooth = 0;',\n \"smooth = %f;\" % self.smooth)\n geo = geo.replace('t = 0;', \"t = %f;\" % self.t)\n geo = geo.replace('lc = 0;', \"lc = %f;\" % self.lc)\n geo = geo.replace('lc2 = lc/1;', \"lc2 = lc/%f;\" % self.lc2)\n geo = geo.replace('lc3 = lc/1;', \"lc3 = lc/%f;\" % self.lc3)\n\n elif self.inc_shape == 'L':\n msh_name = 'L_%(d)s_%(dy)s_%(L)s_%(W)s_%(r)s' % {\n 'd': dec_float_str(self.period),\n 'dy': dec_float_str(self.period_y),\n 'L': dec_float_str(self.diameter1),\n 'W': dec_float_str(self.diameter2),\n 'r': dec_float_str(self.smooth),\n }\n if not os.path.exists(msh_location + msh_name +\n '.mail') or self.force_mesh is True:\n geo_tmp = open(template_location + 'L_msh_template.geo',\n \"r\").read()\n geo = geo_tmp.replace('ff = 0;', \"ff = %f;\" % self.ff)\n geo = geo.replace('d_in_nm = 0;',\n \"d_in_nm = %f;\" % self.period)\n geo = geo.replace('dy_in_nm = 0;',\n \"dy_in_nm = %f;\" % self.period_y)\n geo = geo.replace('L_nm = 0;',\n \"L_nm = %f;\" % self.diameter1)\n geo = geo.replace('W_nm = 0;',\n \"W_nm = %f;\" % self.diameter2)\n geo = geo.replace('r = 0;', \"r = %f;\" % self.smooth)\n geo = geo.replace('lc = 0;', \"lc = %f;\" % self.lc)\n geo = geo.replace('lc2 = lc/1;', \"lc2 = lc/%f;\" % self.lc2)\n elif self.inc_shape == 'strip_circle':\n msh_name = 'strip_circle_%(d)s_%(dy)s_%(d_one)s_%(d_two)s' % {\n 'd': dec_float_str(self.period),\n 'dy': dec_float_str(self.period_y),\n 'd_one': dec_float_str(self.diameter1),\n 'd_two': dec_float_str(self.diameter2)\n }\n if not os.path.exists(msh_location + msh_name +\n '.mail') or self.force_mesh is True:\n geo_tmp = open(\n template_location + '1_strip_msh_template.geo',\n \"r\").read()\n geo = geo_tmp.replace('ff = 0;', \"ff = %f;\" % self.ff)\n geo = geo.replace('d_in_nm = 0;',\n \"d_in_nm = %f;\" % self.period)\n geo = geo.replace('dy_in_nm = 0;',\n \"dy_in_nm = %f;\" % self.period_y)\n geo = geo.replace('a1 = 0;', \"a1 = %f;\" % self.diameter1)\n geo = geo.replace('strip = 0;',\n \"strip = %f;\" % self.diameter2)\n geo = geo.replace('lc = 0;', \"lc = %f;\" % self.lc)\n geo = geo.replace('lc2 = lc/1;', \"lc2 = lc/%f;\" % self.lc2)\n geo = geo.replace('lc3 = lc/1;', \"lc3 = lc/%f;\" % self.lc3)\n\n elif self.inc_shape == 'strip_square':\n msh_name = 'strip_square_%(d)s_%(dy)s_%(d_one)s_%(d_two)s' % {\n 'd': dec_float_str(self.period),\n 'dy': dec_float_str(self.period_y),\n 'd_one': dec_float_str(self.diameter1),\n 'd_two': dec_float_str(self.diameter2)\n }\n if not os.path.exists(msh_location + msh_name +\n '.mail') or self.force_mesh is True:\n geo_tmp = open(\n template_location + '1_strip_msh_template.geo',\n \"r\").read()\n geo = geo_tmp.replace('ff = 0;', \"ff = %f;\" % self.ff)\n geo = geo.replace('d_in_nm = 0;',\n \"d_in_nm = %f;\" % self.period)\n geo = geo.replace('dy_in_nm = 0;',\n \"dy_in_nm = %f;\" % self.period_y)\n geo = geo.replace('a1 = 0;', \"a1 = %f;\" % self.diameter1)\n geo = geo.replace('strip = 0;',\n \"strip = %f;\" % self.diameter2)\n geo = geo.replace('lc = 0;', \"lc = %f;\" % self.lc)\n geo = geo.replace('lc2 = lc/1;', \"lc2 = lc/%f;\" % self.lc2)\n geo = geo.replace('lc3 = lc/1;', \"lc3 = lc/%f;\" % self.lc3)\n geo = geo.replace('square = 0;', \"square = 1;\")\n\n elif self.inc_shape == 'double_strip_circle':\n msh_name = 'double_strip_circle_%(d)s_%(dy)s_%(d_one)s_%(d_two)s_%(d_three)s' % {\n 'd': dec_float_str(self.period),\n 'dy': dec_float_str(self.period_y),\n 'd_one': dec_float_str(self.diameter1),\n 'd_two': dec_float_str(self.diameter2),\n 'd_three': dec_float_str(self.diameter3)\n }\n if not os.path.exists(msh_location + msh_name +\n '.mail') or self.force_mesh is True:\n geo_tmp = open(msh_location + '1_2strip_msh_template.geo',\n \"r\").read()\n geo = geo_tmp.replace('ff = 0;', \"ff = %f;\" % self.ff)\n geo = geo.replace('d_in_nm = 0;',\n \"d_in_nm = %f;\" % self.period)\n geo = geo.replace('dy_in_nm = 0;',\n \"dy_in_nm = %f;\" % self.period_y)\n geo = geo.replace('a1 = 0;', \"a1 = %f;\" % self.diameter1)\n geo = geo.replace('strip = 0;',\n \"strip = %f;\" % self.diameter2)\n geo = geo.replace('strip2 = 0;',\n \"strip2 = %f;\" % self.diameter3)\n geo = geo.replace('lc = 0;', \"lc = %f;\" % self.lc)\n geo = geo.replace('lc2 = lc/1;', \"lc2 = lc/%f;\" % self.lc2)\n geo = geo.replace('lc3 = lc/1;', \"lc3 = lc/%f;\" % self.lc3)\n\n elif self.inc_shape == 'double_strip_square':\n msh_name = 'double_strip_square_%(d)s_%(dy)s_%(d_one)s_%(d_two)s_%(d_three)s' % {\n 'd': dec_float_str(self.period),\n 'dy': dec_float_str(self.period_y),\n 'd_one': dec_float_str(self.diameter1),\n 'd_two': dec_float_str(self.diameter2),\n 'd_three': dec_float_str(self.diameter3)\n }\n if not os.path.exists(msh_location + msh_name +\n '.mail') or self.force_mesh is True:\n geo_tmp = open(msh_location + '1_2strip_msh_template.geo',\n \"r\").read()\n geo = geo_tmp.replace('ff = 0;', \"ff = %f;\" % self.ff)\n geo = geo.replace('d_in_nm = 0;',\n \"d_in_nm = %f;\" % self.period)\n geo = geo.replace('dy_in_nm = 0;',\n \"dy_in_nm = %f;\" % self.period_y)\n geo = geo.replace('a1 = 0;', \"a1 = %f;\" % self.diameter1)\n geo = geo.replace('strip = 0;',\n \"strip = %f;\" % self.diameter2)\n geo = geo.replace('strip2 = 0;',\n \"strip2 = %f;\" % self.diameter3)\n geo = geo.replace('lc = 0;', \"lc = %f;\" % self.lc)\n geo = geo.replace('lc2 = lc/1;', \"lc2 = lc/%f;\" % self.lc2)\n geo = geo.replace('lc3 = lc/1;', \"lc3 = lc/%f;\" % self.lc3)\n geo = geo.replace('square = 0;', \"square = 1;\")\n\n elif self.inc_shape == 'custom':\n\n # check that geo_file and geo_params have been assigned\n if self.geo_file == None:\n raise TypeError(\n 'Must provide a .geo file if inc_shape=\"custom\".')\n if self.geo_params == None:\n raise TypeError(\n 'Must provide geo_params dictionary if inc_shape=\"custom\".'\n )\n\n # check geo_file name structure\n if self.geo_file[-4:] != '.geo':\n raise ValueError(\n 'Must provide a .geo file if inc_shape=\"custom\".')\n if self.geo_file[-13:] != '_template.geo':\n raise ValueError(\n 'Provided .geo filename must respect the \"*_template.geo\" naming convention.'\n )\n\n # define root mesh name\n root_msh_name = self.geo_file[:-13]\n\n # add period values to mesh name\n msh_name = root_msh_name + '%(d)s_%(dy)s_' % {\n 'd': dec_float_str(self.period),\n 'dy': dec_float_str(self.period_y)\n }\n\n # add custom parameter values to mesh name\n for key, value in self.geo_params.items():\n msh_name = msh_name + '_' + key + '%(value)s' % {\n 'value': dec_float_str(value)\n }\n\n # open geo template file, pass params and save to new geo file\n if not os.path.exists(msh_location + msh_name +\n '.mail') or self.force_mesh is True:\n\n # loop over ge_params to replace parameters\n geo_tmp = open(msh_location + self.geo_file, \"r\").read()\n geo = geo_tmp.replace('d_in_nm = 0;',\n \"d_in_nm = %f;\" % self.period)\n geo = geo.replace('dy_in_nm = 0;',\n \"dy_in_nm = %f;\" % self.period_y)\n geo = geo.replace('lc = 0;', \"lc = %f;\" % self.lc)\n geo = geo.replace('lc2 = lc/1;', \"lc2 = lc/%f;\" % self.lc2)\n geo = geo.replace('lc3 = lc/1;', \"lc3 = lc/%f;\" % self.lc3)\n for key, value in self.geo_params.items():\n try:\n if key[0:2] == 'lc':\n str_in = key + ' = lc/1;'\n str_out = key + ' = lc/%f;' % value\n else:\n str_in = key + ' = 0;'\n str_out = key + ' = %f;' % value\n geo = geo.replace(str_in, str_out)\n except:\n raise RuntimeError('Could not find \"' + str_in +\n ' string in ' + self.geo_file +\n ' geometry file.')\n\n else:\n raise NotImplementedError(\"\\n Selected inc_shape = '%s' \\n \\\n is not currently implemented. Please make a mesh with gmsh, & \\n \\\n consider contributing this to EMUstack via github.\" %\n self.inc_shape)\n\n self.mesh_file = msh_name + '.mail'\n if not os.path.exists(msh_location + msh_name +\n '.mail') or self.force_mesh is True:\n open(msh_location + msh_name + '.geo', \"w\").write(geo)\n EMUstack.conv_gmsh(msh_location + msh_name)\n\n # # Automatically show created mesh in gmsh.\n # gmsh_cmd = 'gmsh '+ msh_location + msh_name + '.msh'\n # os.system(gmsh_cmd)\n # gmsh_cmd = 'gmsh '+ msh_location + msh_name + '.geo'\n # os.system(gmsh_cmd)\n\n elif self.periodicity == '1D_array':\n # Unit cell length normalized to unity\n x_min = 0.0\n x_max = 1.0\n # Mesh elements and points\n nel = int(np.round(1.0 / self.lc))\n npt = 2 * nel + 1\n delta_x = (x_max - x_min) / nel\n # Coordinate and type of the nodes\n el_list = range(1, nel + 1)\n table_nod = np.zeros((3, nel + 1))\n type_el = np.zeros(nel + 1)\n ls_x = np.zeros(npt + 1)\n\n for i_el in el_list:\n x = x_min + (i_el - 1) * delta_x\n ls_x[2 * i_el - 1] = x\n ls_x[2 * i_el] = x + delta_x / 2.0\n # End-points\n x = x_min + i_el * delta_x\n ls_x[2 * i_el + 1] = x\n # Connectivity table\n for i_el in el_list:\n table_nod[0, i_el] = 2 * i_el - 1\n table_nod[1, i_el] = 2 * i_el + 1\n table_nod[2, i_el] = 2 * i_el # Mid-node\n\n if self.diameter6 > 0:\n msh_name = '%(d)s_%(di)s_%(dis)s_%(diss)s_%(disss)s_%(dissss)s_%(disssss)s' % {\n 'd': dec_float_str(self.period),\n 'di': dec_float_str(self.diameter1),\n 'dis': dec_float_str(self.diameter2),\n 'diss': dec_float_str(self.diameter3),\n 'disss': dec_float_str(self.diameter4),\n 'dissss': dec_float_str(self.diameter5),\n 'disssss': dec_float_str(self.diameter6)\n }\n # End-points of the elements\n rad_1 = self.diameter1 / (2.0 * self.period)\n rad_2 = self.diameter2 / (2.0 * self.period)\n rad_3 = self.diameter3 / (2.0 * self.period)\n rad_4 = self.diameter4 / (2.0 * self.period)\n rad_5 = self.diameter5 / (2.0 * self.period)\n rad_6 = self.diameter6 / (2.0 * self.period)\n if self.edge_spacing is True:\n i_d = 2.0 * (0.5 - rad_1 - rad_2 - rad_3 - rad_4 - rad_5 -\n rad_6) / 6.0\n for i_el in el_list:\n x_1 = ls_x[2 * i_el - 1]\n x_2 = ls_x[2 * i_el + 1]\n dx = x_2 - x_1\n if x_1 <= 0.5 + rad_1 and 0.5 - rad_1 - dx <= x_1 \\\n and x_2 <= 0.5 + rad_1 and 0.5 - rad_1 - dx <= x_2:\n type_el[i_el] = 2\n elif x_1 <= 0.5 - i_d - rad_1 and 0.5 - i_d - rad_1 - 2.0*rad_2 <= x_1 \\\n and x_2 <= 0.5 - i_d - rad_1 and 0.5 - i_d - rad_1 - 2.0*rad_2 <= x_2:\n type_el[i_el] = 3\n elif x_1 <= 0.5 + i_d + 2.0*rad_3 + rad_1 and 0.5 + i_d + rad_1 <= x_1 \\\n and x_2 <= 0.5 + i_d + 2.0*rad_3 + rad_1 and 0.5 + i_d + rad_1 <= x_2:\n type_el[i_el] = 4\n elif x_1 <= 0.5 - 2.0*i_d - 2.0*rad_2 - rad_1 and 0.5 - 2.0*i_d - 2.0*rad_2 - 2.0*rad_4 - rad_1 <= x_1 \\\n and x_2 <= 0.5 - 2.0*i_d - 2.0*rad_2 - rad_1 and 0.5 - 2.0*i_d - 2.0*rad_2 - 2.0*rad_4 - rad_1 <= x_2:\n type_el[i_el] = 5\n elif 0.5 + 2.0*i_d + 2.0*rad_3 + rad_1 <= x_1 and x_1 <= 0.5 + 2.0*i_d + 2.0*rad_3 + 2.0*rad_5 + rad_1 \\\n and 0.5 + 2.0*i_d + 2.0*rad_3 + rad_1 <= x_2 and x_2 <= 0.5 + 2.0*i_d + 2.0*rad_3 + 2.0*rad_5 + rad_1:\n type_el[i_el] = 6\n elif x_1 <= 0.5 - 3.0*i_d - rad_1 - 2.0*rad_2 - 2.0*rad_4 and x_1 >= 0.5 - 3.0*i_d - rad_1 - 2.0*rad_2 - 2.0*rad_4 - rad_5\\\n and x_2 <= 0.5 - 3.0*i_d - rad_1 - 2.0*rad_2 - 2.0*rad_4 and x_2 >= 0.5 - 3.0*i_d - rad_1 - 2.0*rad_2 - 2.0*rad_4 - rad_5:\n type_el[i_el] = 6\n elif x_1 >= 0.5 + 3.0*i_d + rad_1 + 2.0*rad_3 + 2.0*rad_5 and x_1 <= 0.5 + 3.0*i_d + rad_1 + 2.0*rad_3 + 2.0*rad_5 + 2.0*rad_6\\\n and x_2 >= 0.5 + 3.0*i_d + rad_1 + 2.0*rad_3 + 2.0*rad_5 and x_2 <= 0.5 + 3.0*i_d + rad_1 + 2.0*rad_3 + 2.0*rad_5 + 2.0*rad_6:\n type_el[i_el] = 7\n else:\n type_el[i_el] = 1\n elif self.split_touching_incs is True:\n for i_el in el_list:\n x_1 = ls_x[2 * i_el - 1]\n x_2 = ls_x[2 * i_el + 1]\n dx = x_2 - x_1\n if x_1 <= 0.5 + rad_1 and 0.5 - rad_1 - dx <= x_1 \\\n and x_2 <= 0.5 + rad_1 and 0.5 - rad_1 - dx <= x_2:\n type_el[i_el] = 2\n elif x_1 <= 0.5 + rad_1 + rad_2 and 0.5 + rad_1 <= x_1 \\\n and x_2 <= 0.5 + rad_1 + rad_2 and 0.5 + rad_1 <= x_2:\n type_el[i_el] = 3\n elif x_1 <= 0.5 - rad_1 and 0.5 - rad_1 - rad_2 <= x_1 \\\n and x_2 <= 0.5 - rad_1 and 0.5 - rad_1 - rad_2 <= x_2:\n type_el[i_el] = 3\n elif x_1 <= 0.5 + rad_1 + rad_2 + rad_3 and 0.5 + rad_1 + rad_2 <= x_1 \\\n and x_2 <= 0.5 + rad_1 + rad_2 + rad_3 and 0.5 + rad_1 + rad_2 <= x_2:\n type_el[i_el] = 4\n elif x_1 <= 0.5 - rad_1 - rad_2 and 0.5 - rad_1 - rad_2 - rad_3 <= x_1 \\\n and x_2 <= 0.5 - rad_1 - rad_2 and 0.5 - rad_1 - rad_2 - rad_3 <= x_2:\n type_el[i_el] = 4\n elif x_1 <= 0.5 + rad_1 + rad_2 + rad_3 + rad_4 and 0.5 + rad_1 + rad_2 + rad_3 <= x_1 \\\n and x_2 <= 0.5 + rad_1 + rad_2 + rad_3 + rad_4 and 0.5 + rad_1 + rad_2 + rad_3 <= x_2:\n type_el[i_el] = 5\n elif x_1 <= 0.5 - rad_1 - rad_2 - rad_3 and 0.5 - rad_1 - rad_2 - rad_3 - rad_4 <= x_1 \\\n and x_2 <= 0.5 - rad_1 - rad_2 - rad_3 and 0.5 - rad_1 - rad_2 - rad_3 - rad_4 <= x_2:\n type_el[i_el] = 5\n elif x_1 <= 0.5 + rad_1 + rad_2 + rad_3 + rad_4 + rad_5 and 0.5 + rad_1 + rad_2 + rad_3 + rad_4 <= x_1 \\\n and x_2 <= 0.5 + rad_1 + rad_2 + rad_3 + rad_4 + rad_5 and 0.5 + rad_1 + rad_2 + rad_3 + rad_4 <= x_2:\n type_el[i_el] = 6\n elif x_1 <= 0.5 - rad_1 - rad_2 - rad_3 - rad_4 and 0.5 - rad_1 - rad_2 - rad_3 - rad_4 - rad_5 <= x_1 \\\n and x_2 <= 0.5 - rad_1 - rad_2 - rad_3 - rad_4 and 0.5 - rad_1 - rad_2 - rad_3 - rad_4 - rad_5 <= x_2:\n type_el[i_el] = 6\n elif x_1 <= 0.5 + rad_1 + rad_2 + rad_3 + rad_4 + rad_5 + rad_6 and 0.5 + rad_1 + rad_2 + rad_3 + rad_4 +rad_5 <= x_1 \\\n and x_2 <= 0.5 + rad_1 + rad_2 + rad_3 + rad_4 + rad_5 + rad_6 and 0.5 + rad_1 + rad_2 + rad_3 + rad_4 +rad_5 <= x_2:\n type_el[i_el] = 7\n elif x_1 <= 0.5 - rad_1 - rad_2 - rad_3 - rad_4 - rad_5 and 0.5 - rad_1 - rad_2 - rad_3 - rad_4 - rad_5 -rad_6 <= x_1 \\\n and x_2 <= 0.5 - rad_1 - rad_2 - rad_3 - rad_4 -rad_5 and 0.5 - rad_1 - rad_2 - rad_3 - rad_4 - rad_5 -rad_6 <= x_2:\n type_el[i_el] = 7\n else:\n type_el[i_el] = 1\n else:\n i_d = 1.0 / 6.0\n for i_el in el_list:\n x_1 = ls_x[2 * i_el - 1]\n x_2 = ls_x[2 * i_el + 1]\n dx = x_2 - x_1\n if x_1 <= 0.5 + rad_1 and 0.5 - rad_1 - dx <= x_1 \\\n and x_2 <= 0.5 + rad_1 and 0.5 - rad_1 - dx <= x_2:\n type_el[i_el] = 2\n elif 0.5 - i_d - rad_2 <= x_1 and x_1 <= 0.5 - i_d + rad_2 \\\n and 0.5 - i_d - rad_2 <= x_2 and x_2 <= 0.5 - i_d + rad_2:\n type_el[i_el] = 3\n elif 0.5 + i_d - rad_3 <= x_1 and x_1 <= 0.5 + i_d + rad_3 \\\n and 0.5 + i_d - rad_3 <= x_2 and x_2 <= 0.5 + i_d + rad_3:\n type_el[i_el] = 4\n elif 0.5 - 2.0*i_d - rad_4 <= x_1 and x_1 <= 0.5 - 2.0*i_d + rad_4 \\\n and 0.5 - 2.0*i_d - rad_4 <= x_2 and x_2 <= 0.5 - 2.0*i_d + rad_4:\n type_el[i_el] = 5\n elif 0.5 + 2.0*i_d - rad_5 <= x_1 and x_1 <= 0.5 + 2.0*i_d + rad_5 \\\n and 0.5 + 2.0*i_d - rad_5 <= x_2 and x_2 <= 0.5 + 2.0*i_d + rad_5:\n type_el[i_el] = 6\n elif x_1 <= rad_6 and x_2 <= rad_6:\n type_el[i_el] = 7\n elif x_1 >= 1.0 - rad_6 and x_2 >= 1.0 - rad_6:\n type_el[i_el] = 7\n else:\n type_el[i_el] = 1\n elif self.diameter5 > 0:\n msh_name = '%(d)s_%(di)s_%(dis)s_%(diss)s_%(disss)s_%(dissss)s' % {\n 'd': dec_float_str(self.period),\n 'di': dec_float_str(self.diameter1),\n 'dis': dec_float_str(self.diameter2),\n 'diss': dec_float_str(self.diameter3),\n 'disss': dec_float_str(self.diameter4),\n 'dissss': dec_float_str(self.diameter5)\n }\n # End-points of the elements\n rad_1 = self.diameter1 / (2.0 * self.period)\n rad_2 = self.diameter2 / (2.0 * self.period)\n rad_3 = self.diameter3 / (2.0 * self.period)\n rad_4 = self.diameter4 / (2.0 * self.period)\n rad_5 = self.diameter5 / (2.0 * self.period)\n if self.edge_spacing is True:\n i_d = 2.0 * (0.5 - rad_1 - rad_2 - rad_3 - rad_4 -\n rad_5) / 5.0\n for i_el in el_list:\n x_1 = ls_x[2 * i_el - 1]\n x_2 = ls_x[2 * i_el + 1]\n dx = x_2 - x_1\n if x_1 <= 0.5 + rad_1 and 0.5 - rad_1 - dx <= x_1 \\\n and x_2 <= 0.5 + rad_1 and 0.5 - rad_1 - dx <= x_2:\n type_el[i_el] = 2\n elif x_1 <= 0.5 - i_d - rad_1 and 0.5 - i_d - rad_1 - 2.0*rad_2 <= x_1 \\\n and x_2 <= 0.5 - i_d - rad_1 and 0.5 - i_d - rad_1 - 2.0*rad_2 <= x_2:\n type_el[i_el] = 3\n elif x_1 <= 0.5 + i_d + 2.0*rad_3 + rad_1 and 0.5 + i_d + rad_1 <= x_1 \\\n and x_2 <= 0.5 + i_d + 2.0*rad_3 + rad_1 and 0.5 + i_d + rad_1 <= x_2:\n type_el[i_el] = 4\n elif x_1 <= 0.5 - 2.0*i_d - 2.0*rad_2 - rad_1 and 0.5 - 2.0*i_d - 2.0*rad_2 - 2.0*rad_4 - rad_1 <= x_1 \\\n and x_2 <= 0.5 - 2.0*i_d - 2.0*rad_2 - rad_1 and 0.5 - 2.0*i_d - 2.0*rad_2 - 2.0*rad_4 - rad_1 <= x_2:\n type_el[i_el] = 5\n elif 0.5 + 2.0*i_d + 2.0*rad_3 + rad_1 <= x_1 and x_1 <= 0.5 + 2.0*i_d + 2.0*rad_3 + 2.0*rad_5 + rad_1 \\\n and 0.5 + 2.0*i_d + 2.0*rad_3 + rad_1 <= x_2 and x_2 <= 0.5 + 2.0*i_d + 2.0*rad_3 + 2.0*rad_5 + rad_1:\n type_el[i_el] = 4\n else:\n type_el[i_el] = 1\n elif self.split_touching_incs is True:\n for i_el in el_list:\n x_1 = ls_x[2 * i_el - 1]\n x_2 = ls_x[2 * i_el + 1]\n dx = x_2 - x_1\n if x_1 <= 0.5 + rad_1 and 0.5 - rad_1 - dx <= x_1 \\\n and x_2 <= 0.5 + rad_1 and 0.5 - rad_1 - dx <= x_2:\n type_el[i_el] = 2\n elif x_1 <= 0.5 + rad_1 + rad_2 and 0.5 + rad_1 <= x_1 \\\n and x_2 <= 0.5 + rad_1 + rad_2 and 0.5 + rad_1 <= x_2:\n type_el[i_el] = 3\n elif x_1 <= 0.5 - rad_1 and 0.5 - rad_1 - rad_2 <= x_1 \\\n and x_2 <= 0.5 - rad_1 and 0.5 - rad_1 - rad_2 <= x_2:\n type_el[i_el] = 3\n elif x_1 <= 0.5 + rad_1 + rad_2 + rad_3 and 0.5 + rad_1 + rad_2 <= x_1 \\\n and x_2 <= 0.5 + rad_1 + rad_2 + rad_3 and 0.5 + rad_1 + rad_2 <= x_2:\n type_el[i_el] = 4\n elif x_1 <= 0.5 - rad_1 - rad_2 and 0.5 - rad_1 - rad_2 - rad_3 <= x_1 \\\n and x_2 <= 0.5 - rad_1 - rad_2 and 0.5 - rad_1 - rad_2 - rad_3 <= x_2:\n type_el[i_el] = 4\n elif x_1 <= 0.5 + rad_1 + rad_2 + rad_3 + rad_4 and 0.5 + rad_1 + rad_2 + rad_3 <= x_1 \\\n and x_2 <= 0.5 + rad_1 + rad_2 + rad_3 + rad_4 and 0.5 + rad_1 + rad_2 + rad_3 <= x_2:\n type_el[i_el] = 5\n elif x_1 <= 0.5 - rad_1 - rad_2 - rad_3 and 0.5 - rad_1 - rad_2 - rad_3 - rad_4 <= x_1 \\\n and x_2 <= 0.5 - rad_1 - rad_2 - rad_3 and 0.5 - rad_1 - rad_2 - rad_3 - rad_4 <= x_2:\n type_el[i_el] = 5\n elif x_1 <= 0.5 + rad_1 + rad_2 + rad_3 + rad_4 + rad_5 and 0.5 + rad_1 + rad_2 + rad_3 + rad_4 <= x_1 \\\n and x_2 <= 0.5 + rad_1 + rad_2 + rad_3 + rad_4 + rad_5 and 0.5 + rad_1 + rad_2 + rad_3 + rad_4 <= x_2:\n type_el[i_el] = 6\n elif x_1 <= 0.5 - rad_1 - rad_2 - rad_3 - rad_4 and 0.5 - rad_1 - rad_2 - rad_3 - rad_4 - rad_5 <= x_1 \\\n and x_2 <= 0.5 - rad_1 - rad_2 - rad_3 - rad_4 and 0.5 - rad_1 - rad_2 - rad_3 - rad_4 - rad_5 <= x_2:\n type_el[i_el] = 6\n else:\n type_el[i_el] = 1\n else:\n i_d = 1.0 / 5.0\n for i_el in el_list:\n x_1 = ls_x[2 * i_el - 1]\n x_2 = ls_x[2 * i_el + 1]\n dx = x_2 - x_1\n if x_1 <= 0.5 + rad_1 and 0.5 - rad_1 - dx <= x_1 \\\n and x_2 <= 0.5 + rad_1 and 0.5 - rad_1 - dx <= x_2:\n type_el[i_el] = 2\n elif 0.5 - i_d - rad_2 <= x_1 and x_1 <= 0.5 - i_d + rad_2 \\\n and 0.5 - i_d - rad_2 <= x_2 and x_2 <= 0.5 - i_d + rad_2:\n type_el[i_el] = 3\n elif 0.5 + i_d - rad_3 <= x_1 and x_1 <= 0.5 + i_d + rad_3 \\\n and 0.5 + i_d - rad_3 <= x_2 and x_2 <= 0.5 + i_d + rad_3:\n type_el[i_el] = 4\n elif 0.5 - 2.0*i_d - rad_4 <= x_1 and x_1 <= 0.5 - 2.0*i_d + rad_4 \\\n and 0.5 - 2.0*i_d - rad_4 <= x_2 and x_2 <= 0.5 - 2.0*i_d + rad_4:\n type_el[i_el] = 5\n elif 0.5 + 2.0*i_d - rad_5 <= x_1 and x_1 <= 0.5 + 2.0*i_d + rad_5 \\\n and 0.5 + 2.0*i_d - rad_5 <= x_2 and x_2 <= 0.5 + 2.0*i_d + rad_5:\n type_el[i_el] = 6\n else:\n type_el[i_el] = 1\n elif self.diameter4 > 0:\n msh_name = '%(d)s_%(di)s_%(dis)s_%(diss)s_%(disss)s' % {\n 'd': dec_float_str(self.period),\n 'di': dec_float_str(self.diameter1),\n 'dis': dec_float_str(self.diameter2),\n 'diss': dec_float_str(self.diameter3),\n 'disss': dec_float_str(self.diameter4)\n }\n # End-points of the elements\n rad_1 = self.diameter1 / (2.0 * self.period)\n rad_2 = self.diameter2 / (2.0 * self.period)\n rad_3 = self.diameter3 / (2.0 * self.period)\n rad_4 = self.diameter4 / (2.0 * self.period)\n if self.edge_spacing is True:\n i_d = 2.0 * (0.5 - rad_1 - rad_2 - rad_3 - rad_4) / 4.0\n for i_el in el_list:\n x_1 = ls_x[2 * i_el - 1]\n x_2 = ls_x[2 * i_el + 1]\n dx = x_2 - x_1\n if x_1 <= 0.5 + rad_1 and 0.5 - rad_1 - dx <= x_1 \\\n and x_2 <= 0.5 + rad_1 and 0.5 - rad_1 - dx <= x_2:\n type_el[i_el] = 2\n elif x_1 <= 0.5 - i_d - rad_1 and 0.5 - i_d - rad_1 - 2.0*rad_2 <= x_1 \\\n and x_2 <= 0.5 - i_d - rad_1 and 0.5 - i_d - rad_1 - 2.0*rad_2 <= x_2:\n type_el[i_el] = 3\n elif 0.5 + i_d + rad_1 <= x_1 and x_1 <= 0.5 + i_d + rad_1 + 2.0*rad_3 \\\n and 0.5 + i_d + rad_1 <= x_2 and x_2 <= 0.5 + i_d + rad_1 + 2.0*rad_3:\n type_el[i_el] = 4\n elif x_1 >= 0.5 + 2.0*i_d + rad_1 + 2.0*rad_3 \\\n and x_2 >= 0.5 + 2.0*i_d + rad_1 + 2.0*rad_3:\n type_el[i_el] = 4\n elif x_1 <= 0.5 - 2.0*i_d - rad_1 - 2.0*rad_2 \\\n and x_2 <= 0.5 - 2.0*i_d - rad_1 - 2.0*rad_2:\n type_el[i_el] = 3\n else:\n type_el[i_el] = 1\n elif self.split_touching_incs is True:\n for i_el in el_list:\n x_1 = ls_x[2 * i_el - 1]\n x_2 = ls_x[2 * i_el + 1]\n dx = x_2 - x_1\n if x_1 <= 0.5 + rad_1 and 0.5 - rad_1 - dx <= x_1 \\\n and x_2 <= 0.5 + rad_1 and 0.5 - rad_1 - dx <= x_2:\n type_el[i_el] = 2\n elif x_1 <= 0.5 + rad_1 + rad_2 and 0.5 + rad_1 <= x_1 \\\n and x_2 <= 0.5 + rad_1 + rad_2 and 0.5 + rad_1 <= x_2:\n type_el[i_el] = 3\n elif x_1 <= 0.5 - rad_1 and 0.5 - rad_1 - rad_2 <= x_1 \\\n and x_2 <= 0.5 - rad_1 and 0.5 - rad_1 - rad_2 <= x_2:\n type_el[i_el] = 3\n elif x_1 <= 0.5 + rad_1 + rad_2 + rad_3 and 0.5 + rad_1 + rad_2 <= x_1 \\\n and x_2 <= 0.5 + rad_1 + rad_2 + rad_3 and 0.5 + rad_1 + rad_2 <= x_2:\n type_el[i_el] = 4\n elif x_1 <= 0.5 - rad_1 - rad_2 and 0.5 - rad_1 - rad_2 - rad_3 <= x_1 \\\n and x_2 <= 0.5 - rad_1 - rad_2 and 0.5 - rad_1 - rad_2 - rad_3 <= x_2:\n type_el[i_el] = 4\n elif x_1 <= 0.5 + rad_1 + rad_2 + rad_3 + rad_4 and 0.5 + rad_1 + rad_2 + rad_3 <= x_1 \\\n and x_2 <= 0.5 + rad_1 + rad_2 + rad_3 + rad_4 and 0.5 + rad_1 + rad_2 + rad_3 <= x_2:\n type_el[i_el] = 5\n elif x_1 <= 0.5 - rad_1 - rad_2 - rad_3 and 0.5 - rad_1 - rad_2 - rad_3 - rad_4 <= x_1 \\\n and x_2 <= 0.5 - rad_1 - rad_2 - rad_3 and 0.5 - rad_1 - rad_2 - rad_3 - rad_4 <= x_2:\n type_el[i_el] = 5\n else:\n type_el[i_el] = 1\n else:\n i_d = 1.0 / 4.0\n for i_el in el_list:\n x_1 = ls_x[2 * i_el - 1]\n x_2 = ls_x[2 * i_el + 1]\n dx = x_2 - x_1\n if x_1 <= 0.5 + rad_1 and 0.5 - rad_1 - dx <= x_1 \\\n and x_2 <= 0.5 + rad_1 and 0.5 - rad_1 - dx <= x_2:\n type_el[i_el] = 2\n elif 0.5 - i_d - rad_2 <= x_1 and x_1 <= 0.5 - i_d + rad_2 \\\n and 0.5 - i_d - rad_2 <= x_2 and x_2 <= 0.5 - i_d + rad_2:\n type_el[i_el] = 3\n elif 0.5 + i_d - rad_3 <= x_1 and x_1 <= 0.5 + i_d + rad_3 \\\n and 0.5 + i_d - rad_3 <= x_2 and x_2 <= 0.5 + i_d + rad_3:\n type_el[i_el] = 4\n elif x_1 <= rad_4 and x_2 <= rad_4:\n type_el[i_el] = 5\n elif x_1 >= 1.0 - rad_4 and x_2 >= 1.0 - rad_4:\n type_el[i_el] = 5\n else:\n type_el[i_el] = 1\n elif self.diameter3 > 0:\n msh_name = '%(d)s_%(di)s_%(dis)s_%(diss)s' % {\n 'd': dec_float_str(self.period),\n 'di': dec_float_str(self.diameter1),\n 'dis': dec_float_str(self.diameter2),\n 'diss': dec_float_str(self.diameter3)\n }\n # End-points of the elements\n rad_1 = self.diameter1 / (2.0 * self.period)\n rad_2 = self.diameter2 / (2.0 * self.period)\n rad_3 = self.diameter3 / (2.0 * self.period)\n if self.edge_spacing is True:\n i_d = (1.0 - self.diameter1 - self.diameter2 -\n self.diameter3) / 3.0\n for i_el in el_list:\n x_1 = ls_x[2 * i_el - 1]\n x_2 = ls_x[2 * i_el + 1]\n dx = x_2 - x_1\n # inclusion 1\n if x_1 <= 0.5 + rad_1 and 0.5 - rad_1 - dx <= x_1 \\\n and x_2 <= 0.5 + rad_1 and 0.5 - rad_1 - dx <= x_2:\n type_el[i_el] = 2\n # inclusion 2\n elif 0.5 - i_d - 2.0*rad_2 - rad_1 <= x_1 and x_1 <= 0.5 - i_d - rad_1 \\\n and 0.5 - i_d - 2.0*rad_2 - rad_1 <= x_2 and x_2 <= 0.5 - i_d - rad_1:\n type_el[i_el] = 3\n # inclusion 3\n elif x_1 <= 0.5 + i_d + 2.0*rad_3 + rad_1 and 0.5 + i_d + rad_1 <= x_1 \\\n and x_2 <= 0.5 + i_d + 2.0*rad_3 + rad_1 and 0.5 + i_d + rad_1 <= x_2:\n type_el[i_el] = 4\n else:\n type_el[i_el] = 1\n elif self.split_touching_incs is True:\n for i_el in el_list:\n x_1 = ls_x[2 * i_el - 1]\n x_2 = ls_x[2 * i_el + 1]\n dx = x_2 - x_1\n if x_1 <= 0.5 + rad_1 and 0.5 - rad_1 - dx <= x_1 \\\n and x_2 <= 0.5 + rad_1 and 0.5 - rad_1 - dx <= x_2:\n type_el[i_el] = 2\n elif x_1 <= 0.5 + rad_1 + rad_2 and 0.5 + rad_1 - dx <= x_1 \\\n and x_2 <= 0.5 + rad_1 + rad_2 and 0.5 + rad_1 - dx <= x_2:\n type_el[i_el] = 3\n elif x_1 <= 0.5 - rad_1 and 0.5 - rad_1 - rad_2 - dx <= x_1 \\\n and x_2 <= 0.5 - rad_1 and 0.5 - rad_1 - rad_2 - dx <= x_2:\n type_el[i_el] = 3\n elif x_1 <= 0.5 + rad_1 + rad_2 + rad_3 and 0.5 + rad_1 + rad_2 - dx <= x_1 \\\n and x_2 <= 0.5 + rad_1 + rad_2 + rad_3 and 0.5 + rad_1 + rad_2 - dx <= x_2:\n type_el[i_el] = 4\n elif x_1 <= 0.5 - rad_1 - rad_2 and 0.5 - rad_1 - rad_2 - rad_3 - dx <= x_1 \\\n and x_2 <= 0.5 - rad_1 - rad_2 and 0.5 - rad_1 - rad_2 - rad_3 - dx <= x_2:\n type_el[i_el] = 4\n else:\n type_el[i_el] = 1\n else:\n i_d = 1.0 / 3.0\n for i_el in el_list:\n x_1 = ls_x[2 * i_el - 1]\n x_2 = ls_x[2 * i_el + 1]\n dx = x_2 - x_1\n if x_1 <= 0.5 + rad_1 and 0.5 - rad_1 - dx <= x_1 \\\n and x_2 <= 0.5 + rad_1 and 0.5 - rad_1 - dx <= x_2:\n type_el[i_el] = 2\n elif 0.5 - i_d - rad_2 <= x_1 and x_1 <= 0.5 - i_d + rad_2 \\\n and 0.5 - i_d - rad_2 <= x_2 and x_2 <= 0.5 - i_d + rad_2:\n type_el[i_el] = 3\n elif 0.5 + i_d - rad_3 <= x_1 and x_1 <= 0.5 + i_d + rad_3 \\\n and 0.5 + i_d - rad_3 <= x_2 and x_2 <= 0.5 + i_d + rad_3:\n type_el[i_el] = 4\n else:\n type_el[i_el] = 1\n elif self.diameter2 > 0:\n msh_name = '1D_%(d)s_%(diameter)s_%(diameters)s' % {\n 'd': dec_float_str(self.period),\n 'diameter': dec_float_str(self.diameter1),\n 'diameters': dec_float_str(self.diameter2)\n }\n # End-points of the elements\n rad_1 = self.diameter1 / (2.0 * self.period)\n rad_2 = self.diameter2 / (2.0 * self.period)\n if self.split_touching_incs is True:\n for i_el in el_list:\n x_1 = ls_x[2 * i_el - 1]\n x_2 = ls_x[2 * i_el + 1]\n dx = x_2 - x_1\n if x_1 <= 0.5 + rad_1 and 0.5 - rad_1 - dx <= x_1 \\\n and x_2 <= 0.5 + rad_1 and 0.5 - rad_1 - dx <= x_2:\n type_el[i_el] = 2\n elif x_1 <= 0.5 + rad_1 + rad_2 and 0.5 + rad_1 - dx <= x_1 \\\n and x_2 <= 0.5 + rad_1 + rad_2 and 0.5 + rad_1 - dx <= x_2:\n type_el[i_el] = 3\n elif x_1 <= 0.5 - rad_1 and 0.5 - rad_1 - rad_2 - dx <= x_1 \\\n and x_2 <= 0.5 - rad_1 and 0.5 - rad_1 - rad_2 - dx <= x_2:\n type_el[i_el] = 3\n else:\n type_el[i_el] = 1\n else:\n if self.small_space is None:\n small_space = large_d = 0.5 - rad_1 - rad_2\n else:\n small_space = self.small_space\n large_d = 1.0 - small_space - (2 * rad_1) - (2 * rad_2)\n for i_el in el_list:\n x_1 = ls_x[2 * i_el - 1]\n x_2 = ls_x[2 * i_el + 1]\n dx = x_2 - x_1\n if x_1 <= 0.5 + rad_1 and 0.5 - rad_1 - dx <= x_1 \\\n and x_2 <= 0.5 + rad_1 and 0.5 - rad_1 - dx <= x_2:\n type_el[i_el] = 2\n elif x_1 <= 0.5 - rad_1 - small_space and x_2 <= 0.5 - rad_1 - small_space:\n type_el[i_el] = 3\n elif x_1 >= 0.5 + large_d + rad_1 and x_2 >= 0.5 + large_d + rad_1:\n type_el[i_el] = 3\n else:\n type_el[i_el] = 1\n elif self.diameter1 > 0:\n msh_name = '1D_%(d)s_%(diameter)s' % {\n 'd': dec_float_str(self.period),\n 'diameter': dec_float_str(self.diameter1)\n }\n # End-points of the elements\n rad_1 = self.diameter1 / (2.0 * self.period)\n for i_el in el_list:\n x_1 = ls_x[2 * i_el - 1]\n x_2 = ls_x[2 * i_el + 1]\n dx = x_2 - x_1\n if x_1 <= 0.5 + rad_1 and 0.5 - rad_1 - dx <= x_1 \\\n and x_2 <= 0.5 + rad_1 and 0.5 - rad_1 - dx <= x_2:\n type_el[i_el] = 2\n else:\n type_el[i_el] = 1\n else:\n raise ValueError(\n \"Must have at least one grating of nonzero width.\")\n\n # Store useful quantities as property of the object.\n self.n_msh_el = nel\n self.n_msh_pts = npt\n self.table_nod = table_nod[:, 1:]\n # self.type_el = type_el[1:]\n self.type_el = type_el[1:]\n self.x_arr = ls_x[1:]\n self.mesh_file = msh_name\n\n if self.plt_msh is True:\n import matplotlib\n import matplotlib.pyplot as plt\n fig = plt.figure()\n ax1 = fig.add_subplot(1, 1, 1)\n ax1.plot(el_list, self.type_el)\n ax1.fill_between(el_list, self.type_el, 0)\n ax1.set_xlim(el_list[0], el_list[-1])\n ax1.set_ylim(1, 7)\n ax1.set_yticks([1, 2, 3, 4, 5, 6, 7])\n ax1.set_yticklabels([\n 'bkg', 'inc_a', 'inc_b', 'inc_c', 'inc_d', 'inc_e', 'inc_f'\n ])\n ax1.set_xlabel('Element Number')\n ax1.set_ylabel('Material Type')\n plt.savefig(msh_name, bbox_inches='tight')\n\n # Then clean up local variables.\n del nel, npt, table_nod, ls_x, type_el, el_list\n\n # Latency of old 1D grating meshed in 2D.\n\n # elif self.periodicity == '1D_array':\n # if self.diameter2 > 0:\n # supercell = 2\n # msh_name = '1D_%(d)s_%(diameter)s_%(diameters)s' % {\n # 'd' : dec_float_str(self.period), 'diameter' : dec_float_str(self.diameter1),\n # 'diameters' : dec_float_str(self.diameter2)}\n # elif self.diameter1 > 0:\n # supercell = 1\n # msh_name = '1D_%(d)s_%(diameter)s' % {'d' : dec_float_str(self.period),\n # 'diameter' : dec_float_str(self.diameter1)}\n # else:\n # raise ValueError, \"must have at least one grating of nonzero width.\"\n\n # self.mesh_file = msh_name + '.mail'\n\n # if not os.path.exists(msh_location + msh_name + '.mail') or force_mesh == True:\n # geo_tmp = open(msh_location + '1D_%s_msh_template.geo' % supercell, \"r\").read()\n # geo = geo_tmp.replace('d_in_nm = 0;', \"d_in_nm = %f;\" % self.period)\n # geo = geo.replace('w1 = 0;', \"w1 = %f;\" % self.diameter1)\n # geo = geo.replace('lc = 0;', \"lc = %f;\" % self.lc)\n # geo = geo.replace('lc2 = lc/1;', \"lc2 = lc/%f;\" % self.lc2)\n # if supercell > 1:\n # geo = geo.replace('w2 = 0;', \"w2 = %f;\" % self.diameter2)\n # geo = geo.replace('lc3 = lc/1;', \"lc3 = lc/%f;\" % self.lc3)\n # geo = geo.replace('lc4 = lc/1;', \"lc4 = lc/%f;\" % self.lc4)\n # if self.small_space != 0:\n # # small distance between centre of gratings in nm\n # # calc complementary large distance, which is added to top & bottom\n # large_d_on_2 = (self.period - self.diameter1/2 - self.diameter2/2 - self.small_space)/2\n # posx1 = large_d_on_2 + self.diameter1/2\n # posx2 = large_d_on_2 + self.diameter2/2\n # posx3 = large_d_on_2 + self.diameter1 + ((self.small_space - self.diameter1/2 - self.diameter2/2)/2)\n # geo = geo.replace('posx1 = hy/4;', \"posx1 = %f/d_in_nm;\" % posx1)\n # geo = geo.replace('posx2 = hy/4;', \"posx2 = %f/d_in_nm;\" % posx2)\n # geo = geo.replace('posx3 = hy/2;', \"posx3 = %f/d_in_nm;\" % posx3)\n # # if supercell > 1:\n # # geo = geo.replace('a2 = 0;', \"a2 = %i;\" % self.diameter2)\n # # geo = geo.replace('lc4 = lc/1;', \"lc4 = lc/%f;\" % self.lc4)\n # # if supercell > 2:\n # # geo = geo.replace('a3 = 0;', \"a3 = %i;\" % self.diameter3)\n # # geo = geo.replace('lc5 = lc/1;', \"lc5 = lc/%f;\" % self.lc5)\n\n # open(msh_location + msh_name + '.geo', \"w\").write(geo)\n # EMUstack.conv_gmsh(msh_location+msh_name)\n # # gmsh_cmd = 'gmsh '+ msh_location + msh_name + '.msh'\n # # gmsh_cmd = 'gmsh '+ msh_location + msh_name + '.geo'\n # # os.system(gmsh_cmd)\n\n else:\n raise ValueError(\n \"Must be simulating either a '1D_array' or a '2D_array'.\")\n\n def calc_modes(self, light, **args):\n \"\"\" Run a simulation to find the NanoStruct's modes.\n\n Args:\n light (Light instance): Represents incident light.\n\n args (dict): Options to pass to :Simmo.calc_modes:.\n\n Returns:\n :Simmo: object\n \"\"\"\n simmo = Simmo(self, light)\n\n simmo.calc_modes(**args)\n return simmo\n\n\nclass ThinFilm(object):\n \"\"\" Represents an unstructured homogeneous film.\n\n Args:\n period (float): Artificial period imposed on homogeneous film \\\n to give consistently defined plane waves in terms of \\\n diffraction orders of structured layers.\n\n Keyword Args:\n period_y (float): The period of the unit cell in the y-direction.\\\n If None, period_y = period.\n\n height_nm (float): The thickness of the layer in nm or 'semi_inf'\\\n for a semi-infinte layer.\n\n num_pw_per_pol (int): The number of plane waves per polarisation.\n\n world_1d (bool): Does the rest of the stack have exclusively 1D \\\n periodic structures and homogeneous layers? \\\n If True we use the set of 1D diffraction order PWs.\n\n material : A :Material: instance specifying the n of \\\n the layer and related methods.\n\n loss (bool): If False sets Im(n) = 0, if True leaves n as is.\n \"\"\"\n def __init__(self,\n period,\n period_y=None,\n height_nm=1.0,\n num_pw_per_pol=0,\n world_1d=False,\n material=materials.Material(3.0 + 0.001),\n loss=True):\n self.period = float(period)\n if period_y is None:\n self.period_y = float(period)\n else:\n self.period_y = float(period_y)\n self.world_1d = world_1d\n self.height_nm = height_nm\n self.num_pw_per_pol = num_pw_per_pol\n self.material = material\n self.loss = loss\n\n def calc_modes(self, light):\n \"\"\" Run a simulation to find the ThinFilm's modes.\n\n Args:\n light (Light instance): Represents incident light.\n\n args (dict): Options to pass to :Anallo.calc_modes:.\n\n Returns:\n :Anallo: object\n \"\"\"\n an = Anallo(self, light)\n an.calc_modes()\n return an\n\n\nclass Light(object):\n \"\"\" Represents the light incident on structure.\n\n Incident angles may either be specified by `k_parallel` or by\n incident angles `theta` and `phi`, together with the refractive\n index `n_inc` of the incident medium.\n\n `wl_nm` and `k_pll` are both in unnormalised units.\n\n At normal incidence and TE polarisation the E-field is aligned\n with the y-axis.\n\n At normal incidence some plane waves and Bloch modes become degenerate.\n This causes problems for the FEM solver and the ordering of the plane\n waves. To avoid this a small (1e-5) theta and phi are introduced.\n\n Args:\n\n wl_nm (float): Wavelength, in nanometers.\n\n Keyword Args:\n max_order_PWs (int): Maximum plane wave order to include.\n\n k_parallel (tuple): The wave vector components (k_x, k_y) \\\n parallel to the interface planes. Units of nm^-1.\n\n theta (float): Polar angle of incidence in degrees.\n\n phi (float): Azimuthal angle of incidence in degrees \\\n measured from x-axis.\n \"\"\"\n def __init__(self,\n wl_nm,\n max_order_PWs=2,\n k_parallel=None,\n theta=None,\n phi=None,\n n_inc=1.):\n if np.imag(wl_nm) != 0:\n self.wl_nm = complex(wl_nm)\n print(\"Warning: using a complex wavelength. EMUstack can \\n\\\n only handle these for uniform films using 0 pw_orders.\")\n else:\n self.wl_nm = float(np.real(wl_nm))\n self._air_anallos = {}\n self.max_order_PWs = max_order_PWs\n\n if None == theta and None == k_parallel:\n raise ValueError(\"Specify incident angle either by \\n\\\n k_parallel OR by theta, phi and n_inc.\")\n\n if None == theta:\n self.k_pll = np.array(k_parallel, dtype='float64')\n # Check that not aligned with either x or y axis.\n if np.abs(self.k_pll[0]) == 0 or np.abs(self.k_pll[1]) == 0:\n print(\"Warning: a component of k_parallel is exactly zero, \\n\\\n this can lead to degeneracies and errors.\")\n else:\n # Check for inconsistent input\n if None != k_parallel or phi == None:\n raise ValueError(\"Specify incident angle either by \\n\\\n k_parallel OR by theta, phi and n_inc.\")\n # Avoid the degeneracies that occur at normal incidence\n # (FEM does not deal well with them)\n if abs(theta) < 1e-5: theta += 1e-5\n if abs(phi) < 1e-5: phi += 1e-5\n # Calculate k_parallel from incident angles\n k = 2 * np.pi * np.real(n_inc) / self.wl_nm\n theta *= np.pi / 180\n phi *= np.pi / 180\n self.k_pll = k * np.sin(theta) * np.array(\n [np.cos(phi), np.sin(phi)], dtype='float64')\n\n def _air_ref(self, period, period_y, world_1d):\n \"\"\" Return an :Anallo: corresponding to this :Light: in free space.\n\n The :Anallo: will have len(anallo.k_z) == 2 * num_pw.\n\n Args:\n period (float): period imposed on homogeneous film.\n\n period_y (float): period imposed on homogeneous film \\\n along y-axis.\n\n world_1d (bool): Specify whether to use 1D or 2D \\\n diffraction orders.\n \"\"\"\n\n if (period) in self._air_anallos:\n return self._air_anallos[(period)]\n else:\n air = ThinFilm(period=period,\n period_y=period_y,\n material=materials.Air,\n world_1d=world_1d)\n an = Anallo(air, self)\n\n an.is_air_ref = True\n\n kz = an.calc_kz()\n\n an.k_z = np.append(kz, kz)\n\n # Save this for future reference (we'll be back)\n self._air_anallos[(period)] = an\n return an\n\n\ndef dec_float_str(dec_float):\n \"\"\" Convert float with decimal point into string with '_' in place of '.' \"\"\"\n string = str(dec_float)\n fmt_string = string.replace('.', '_')\n return fmt_string\n\n\ndef calculate_ff(inc_shape,\n d,\n dy,\n a1,\n a2=0,\n a3=0,\n a4=0,\n a5=0,\n a6=0,\n a7=0,\n a8=0,\n a9=0,\n a10=0,\n a11=0,\n a12=0,\n a13=0,\n a14=0,\n a15=0,\n a16=0,\n el1=0):\n \"\"\" Calculate the fill fraction of the inclusions.\n\n Args:\n inc_shape (str): shape of the inclusions.\n\n d (float): period of structure, in same units as a1-16.\n\n dy (float): period of structure along y-axis, in same units as a1-16.\n\n a1 (float): diameter of inclusion 1, in same units as d.\n\n Keyword Args:\n a2-16 (float): diameters of further inclusions.\n\n el1 (float): ellipticity of inclusion 1.\n \"\"\"\n\n if inc_shape == 'circle' or inc_shape == 'ellipse':\n ff = np.pi * ((a1 / 2)**2 * np.sqrt(1 - el1) + (a2 / 2)**2 +\n (a3 / 2)**2 + (a4 / 2)**2 + (a5 / 2)**2 + (a6 / 2)**2 +\n (a7 / 2)**2 + (a8 / 2)**2 + (a9 / 2)**2 + (a10 / 2)**2 +\n (a11 / 2)**2 + (a12 / 2)**2 + (a13 / 2)**2 +\n (a14 / 2)**2 + (a15 / 2)**2 + (a16 / 2)**2) / (d * dy)\n elif inc_shape == 'square':\n ff = ((a1)**2 + (a2)**2 + (a3)**2 + (a4)**2 + (a5)**2 + (a6)**2 +\n (a7)**2 + (a8)**2 + (a9)**2 + (a10)**2 + (a11)**2 + (a12)**2 +\n (a13)**2 + (a14)**2 + (a15)**2 + (a16)**2) / (d * dy)\n elif inc_shape == 'dimer':\n ff = np.pi * ((a1 / 2.0)**2 + (a2 / 2.0)**2) / (d * dy)\n else:\n ff = 0.0\n return ff\n","repo_name":"bjornsturmberg/EMUstack","sub_path":"backend/objects.py","file_name":"objects.py","file_ext":"py","file_size_in_byte":93800,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"30"} +{"seq_id":"29832512313","text":"import sys\nread = sys.stdin.readline\n\nN = int(read())\nS = 0\n\nfor i in range(N):\n S += 1\n\n while True:\n if \"666\" in str(S):\n break\n else:\n S += 1\n\nprint(S)","repo_name":"pocj8ur4in/BOJ","sub_path":"BOJ/1436. 영화감독 숌.py","file_name":"1436. 영화감독 숌.py","file_ext":"py","file_size_in_byte":196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"42676866720","text":"import socket\nimport selectors\nimport signal\nimport sys\nimport argparse\nfrom urllib.parse import urlparse\n\n# Constant list used for checking if a movement command was called\nDIRECTIONS = ['NORTH', 'SOUTH', 'EAST', 'WEST', 'UP', 'DOWN']\n# Socket for sending messages.\n\nclient_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n# Server address.\n\nserver = ('', '')\n\n# User name for player.\n\nname = ''\n\n# Inventory of items.\n\ninventory = []\n\n# Selector setup\nclient_selector = selectors.DefaultSelector()\n\n# Command word for use in processing server messages\nin_command = \"\"\n\n# Signal handler for graceful exiting. Let the server know when we're gone.\n\ndef signal_handler(sig, frame):\n print('Interrupt received, shutting down ...')\n message = 'exit'\n client_socket.sendto(message.encode(), server)\n for item in inventory:\n message = f'drop {item}'\n client_socket.sendto(message.encode(), server)\n sys.exit(0)\n\n\n# Simple function for setting up a prompt for the user.\n\ndef do_prompt(skip_line=False):\n if skip_line:\n print(\"\")\n print(\"> \", end='', flush=True)\n line = sys.stdin.readline()\n process_command(line)\n\n\n# Function to join a room.\n\ndef join_room():\n global client_socket\n global server\n try:\n client_socket.connect(server)\n message = f'join {name}'\n client_socket.send(message.encode())\n response, addr = client_socket.recvfrom(1024)\n response = response.decode()\n print(response)\n except ConnectionRefusedError:\n print('Error: Host or port is not accepting connections.')\n sys.exit(1)\n\n\n# Function for receiving data from client socket\ndef handle_data_from_server(sock, mask):\n message = sock.recv(1024)\n message = message.decode()\n words = str(message.split())\n\n # For 'take' command response\n if (len(words) == 2) and (words[1] == 'taken'):\n inventory.append(words[0])\n # For 'drop' command response\n if len(words) == 2 and words[1] == 'dropped':\n inventory.remove(words[1])\n\n print(message)\n\n\n\n# Function to handle commands from the user, checking them over and sending to the server as needed.\n\ndef process_command(command):\n global client_socket\n # Parse command.\n\n words = command.split()\n\n # Check if we are dropping something. Only let server know if it is in our inventory.\n\n if words[0] == 'drop':\n if len(words) != 2:\n print(\"Invalid command\")\n return\n elif words[1] not in inventory:\n print(f'You are not holding {words[1]}')\n return\n\n # Send command to server, if it isn't a local only one.\n\n if command != 'inventory':\n message = f'{command}'\n client_socket.send(message.encode())\n return\n # Check for particular commands of interest from the user.\n\n if command == 'exit':\n for item in inventory:\n message = f'drop {item}'\n client_socket.send(message.encode())\n\n sys.exit(0)\n # elif command == 'look':\n # response, addr = client_socket.recvfrom(1024)\n # print(response.decode())\n elif command == 'inventory':\n print(\"You are holding:\")\n if len(inventory) == 0:\n print(' No items')\n else:\n for item in inventory:\n print(f' {item}')\n return\n # elif words[0] == 'take':\n # response, addr = client_socket.recv(1024)\n # print(response.decode())\n # words = response.decode().split()\n # if (len(words) == 2) and (words[1] == 'taken'):\n # inventory.append(words[0])\n # elif words[0] == 'drop':\n # response, addr = client_socket.recvfrom(1024)\n # print(response.decode())\n # inventory.remove(words[1])\n # else:\n # response, addr = client_socket.recvfrom(1024)\n # print(response.decode())\n\n\n# Our main function.\n\ndef main():\n global name\n global client_socket\n global client_selector\n global server\n\n # Register our signal handler for shutting down.\n\n signal.signal(signal.SIGINT, signal_handler)\n\n # Check command line arguments to retrieve a URL.\n parser = argparse.ArgumentParser()\n parser.add_argument(\"name\", help=\"name for the player in the game\")\n parser.add_argument(\"server\", help=\"URL indicating server location in form of room://host:port\")\n args = parser.parse_args()\n\n # Check the URL passed in and make sure it's valid. If so, keep track of\n # things for later.\n\n try:\n server_address = urlparse(args.server)\n if (server_address.scheme != 'room') or (server_address.port is None) or (server_address.hostname is None):\n raise ValueError\n host = server_address.hostname\n port = server_address.port\n server = (host, port)\n except ValueError:\n print('Error: Invalid server. Enter a URL of the form: room://host:port')\n sys.exit(1)\n name = args.name\n\n # Connect to room, send message to verify\n\n join_room()\n\n # Complete what remains of client setup, register inputs from client socket and keyboard\n\n client_socket.setblocking(False)\n client_selector.register(client_socket, selectors.EVENT_READ, handle_data_from_server)\n client_selector.register(sys.stdin, selectors.EVENT_READ, do_prompt)\n # We now loop forever, sending commands to the server and reporting results\n\n do_prompt()\n while True:\n events = client_selector.select()\n for key, mask in events:\n callback = key.data\n callback(key.fileobj, mask)\n client_selector.close()\nif __name__ == '__main__':\n main()\n","repo_name":"PhilosophicalMeat/CS3357_Assignment03","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":5646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"7589001841","text":"import sys\nsys.setrecursionlimit(100000)\n\n\ndef coins(n):\n cache = [-1] * (n + 1)\n cache[0] = 0\n # Every index in cache < 0 is equal to infinity (wraps around)\n cache += [float('inf')] * (max(a, b, c)-1)\n\n for i in range(1, n+1):\n cache[i] = min(i, 1 + cache[i - a], 1 + cache[i - b], 1 + cache[i - c])\n\n return cache[n]\n\n\nn = int(input())\na = int(input())\nb = int(input())\nc = int(input())\n\nprint(coins(n))\n","repo_name":"Wolframe107/epicRepo","sub_path":"Skola/alg_komp/lab1_1/1e.py","file_name":"1e.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"27015942428","text":"class Solution(object):\n def removeElements(self, head, val):\n \"\"\"\n :type head: ListNode\n :type val: int\n :rtype: ListNode\n \"\"\"\n new_head = head\n prev = None\n while head:\n temp = head\n if head.val == val:\n if new_head == head:\n new_head = temp.next\n else:\n prev.next = temp.next\n else:\n prev = temp\n head = head.next\n return new_head\n\nif __name__ == \"__main__\":\n solution = Solution()\n","repo_name":"lixinchn/LeetCode","sub_path":"src/0203_RemoveLinkedListElements.py","file_name":"0203_RemoveLinkedListElements.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"21239834389","text":"def select_statement(tables, conditions):\n statement = 'SELECT * FROM '\n n = len(tables)\n for i in range(n - 1):\n statement += tables[i] + ', '\n statement += tables[n - 1] + ' WHERE '\n n = len(conditions)\n for i in range(n - 1):\n statement += conditions[i] + ' AND '\n return statement + conditions[n - 1]\n\n\ndef insert_statement(table, columns, values):\n start = 'INSERT INTO ' + table + '('\n end = ') VALUES ('\n n = len(columns)\n for i in range(n - 1):\n start += columns[i] + ', '\n end += values[i] + ', '\n return start + columns[n - 1] + end + values[n - 1] + ')'\n","repo_name":"JorgeRiveraMancilla/3rd-project","sub_path":"Util.py","file_name":"Util.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"75066278119","text":"import sys\n\nimport os\nfrom PyQt5 import QtWidgets, QtGui\nfrom PyQt5.QtWidgets import *\nclass picture(QWidget):\n def __init__(self):\n super(picture, self).__init__()\n self.resize(600, 570)\n self.setWindowTitle(\"label显示图片\")\n self.label = QLabel(self)\n self.label.setFixedSize(512, 512)\n self.label.move(10, 30)\n self.label.setStyleSheet(\"QLabel{background:white;}\"\n \"QLabel{color:rgb(300,300,300,120);font-size:10px;font-weight:bold;font-family:宋体;}\"\n )\n # pic = QtGui.QPixmap(os.path.dirname(os.getcwd())+\"/resource/13.png\").scaled(self.label.width(), self.label.height())\n # self.label.setPixmap(pic)\n btn = QPushButton(self)\n btn.setText(\"打开图片\")\n btn.move(10, 30)\n btn.clicked.connect(self.openimage)\n def openimage(self):\n imgName, imgType = QFileDialog.getOpenFileName(self, \"打开图片\", \"\", \"*.jpg;;*.png;;All Files(*)\")\n print(imgName[-3:],' ',imgType,' ',type(imgType))\n jpg = QtGui.QPixmap(imgName).scaled(self.label.width(), self.label.height())\n self.label.setPixmap(jpg)\n\n\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n my = picture()\n my.show()\n sys.exit(app.exec_())","repo_name":"lgdlkq/sliver_07_CT","sub_path":"test/pyqt_test.py","file_name":"pyqt_test.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"18"} +{"seq_id":"7321190520","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('about_me/', views.about_me),\n path('', views.landing),\n path('portfolio/', views.portfolio),\n path('toy_pjts/', views.toy_pjts),\n path('diners_home/', views.diners_home),\n]","repo_name":"lifeandmyth/kkm_portfolio_1","sub_path":"single_pages/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"71427267240","text":"# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load in \n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n# Input data files are available in the \"../input/\" directory.\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n\nimport os\nprint(os.listdir(\"../input\"))\n\n# Any results you write to the current directory are saved as output.\ntrain = pd.read_json(\"../input/train.json\")\ntest = pd.read_json(\"../input/test.json\")\ntrain_test = [train,test]\ntrain.head()\ntrain.info()\ntest.info()\ntrain['interest_level'].value_counts()\nfacet = sns.FacetGrid(train, hue = \"interest_level\", aspect=4)\nfacet.map(sns.kdeplot, 'bathrooms', shade=True)\nfacet.add_legend()\nplt.show()\nfacet = sns.FacetGrid(train, hue = \"interest_level\", aspect=4)\nfacet.map(sns.kdeplot, 'bathrooms', shade=True)\nfacet.set(xlim=(0,2))\nfacet.add_legend()\nplt.show()\nfacet = sns.FacetGrid(train, hue = \"interest_level\", aspect=4)\nfacet.map(sns.kdeplot, 'bathrooms', shade=True)\nfacet.set(xlim=(2,6))\nfacet.add_legend()\nplt.show()\nfor dataset in train_test:\n dataset.loc[ dataset['bathrooms'] <= 2, 'bathrooms'] = 2,\n dataset.loc[(dataset['bathrooms'] > 2) & (dataset['bathrooms'] <= 4), 'bathrooms'] = 1,\n dataset.loc[ dataset['bathrooms'] > 4, 'bathrooms'] = 0\n\nfacet = sns.FacetGrid(train, hue = \"interest_level\", aspect=4)\nfacet.map(sns.kdeplot, 'bedrooms', shade=True)\nfacet.add_legend()\nplt.show()\nfor dataset in train_test:\n dataset.loc[ dataset['bedrooms'] <= 2, 'bedrooms'] = 0,\n dataset.loc[(dataset['bedrooms'] > 2) & (dataset['bedrooms'] <= 4), 'bedrooms'] = 1,\n dataset.loc[ dataset['bedrooms'] > 4, 'bedrooms'] = 2\nsum(train['building_id']=='0')\ntrain[\"created\"] = pd.to_datetime(train[\"created\"])\ntrain[\"month_created\"] = train[\"created\"].dt.month\ntrain[\"month_created\"]\ntrain['month_created'].value_counts()\ndef bar_chart(feature):\n low = train[train['interest_level']=='low'][feature].value_counts() # survived 라는 값에 대해 수를 세줌\n medium = train[train['interest_level']=='medium'][feature].value_counts()\n high = train[train['interest_level']=='high'][feature].value_counts()\n df = pd.DataFrame([low, medium, high])\n df.index = ['low','medium','high']\n df.plot(kind='bar',stacked=True, figsize=(10,5))\nbar_chart('month_created')\ntrain[\"created\"] = pd.to_datetime(train[\"created\"])\ntrain[\"date_created\"] = train[\"created\"].dt.date\ncnt_srs = train['date_created'].value_counts()\n\nplt.figure(figsize=(12,4))\nax = plt.subplot(111)\nax.bar(cnt_srs.index, cnt_srs.values, alpha=0.8)\nax.xaxis_date()\nplt.xticks(rotation='vertical')\nplt.show()\ntrain['day_of_week'] = train['created'].dt.weekday\ntest[\"created\"] = pd.to_datetime(test[\"created\"])\ntest['day_of_week'] = test['created'].dt.weekday\nfig = plt.figure(figsize=(12,6))\nax = sns.countplot(x=\"day_of_week\", hue=\"interest_level\",\n hue_order=['low', 'medium', 'high'], data=train,\n order=['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday']);\nplt.xlabel('Day of Week');\nplt.ylabel('Number of occurrences');\n\n### Adding percents over bars\nheight = [p.get_height() for p in ax.patches]\nncol = int(len(height)/3)\ntotal = [height[i] + height[i + ncol] + height[i + 2*ncol] for i in range(ncol)] * 3\nfor i, p in enumerate(ax.patches): \n ax.text(p.get_x()+p.get_width()/2,\n height[i] + 50,\n '{:1.0%}'.format(height[i]/total[i]),\n ha=\"center\") \ntrain['created_day'] = train['created'].dt.day\ntest['created_day'] = test['created'].dt.day\n### Iterest per Day of Week\nfig = plt.figure(figsize=(12,6))\nsns.countplot(x=\"created_day\", hue=\"interest_level\", hue_order=['low', 'medium', 'high'], data=train);\nplt.xlabel('created_day');\nplt.ylabel('Number of occurrences');\ntrain[\"num_features\"] = train[\"features\"].apply(len)\ntest[\"num_features\"] = test[\"features\"].apply(len)\nllimit = np.percentile(train.latitude.values, 1)\nulimit = np.percentile(train.latitude.values, 99)\ntrain['latitude'].ix[train['latitude']ulimit] = ulimit\n\nplt.figure(figsize=(8,6))\nsns.distplot(train.latitude.values, bins=50, kde=False)\nplt.xlabel('latitude', fontsize=12)\nplt.show()\nllimit = np.percentile(train.longitude.values, 1)\nulimit = np.percentile(train.longitude.values, 99)\ntrain['longitude'].ix[train['longitude']ulimit] = ulimit\n\nplt.figure(figsize=(8,6))\nsns.distplot(train.longitude.values, bins=50, kde=False)\nplt.xlabel('longitude', fontsize=12)\nplt.show()\ntrain['price']\nfacet = sns.FacetGrid(train, hue = \"interest_level\", aspect=4)\nfacet.map(sns.kdeplot, 'price', shade=True)\nfacet.add_legend()\nplt.show()\nfacet = sns.FacetGrid(train, hue = \"interest_level\", aspect=4)\nfacet.map(sns.kdeplot, 'price', shade=True)\nfacet.set(xlim=(0,100000))\nfacet.add_legend()\nplt.show()\nfacet = sns.FacetGrid(train, hue = \"interest_level\", aspect=4)\nfacet.map(sns.kdeplot, 'price', shade=True)\nfacet.set(xlim=(100000,200000))\nfacet.add_legend()\nplt.show()\nfacet = sns.FacetGrid(train, hue = \"interest_level\", aspect=4)\nfacet.map(sns.kdeplot, 'price', shade=True)\nfacet.set(xlim=(0,10000))\nfacet.add_legend()\nplt.show()\ntrain.info()\nfeatures_drop = ['building_id', 'created', 'description', 'display_address', 'features', 'manager_id', 'photos', 'street_address', 'month_created', 'date_created']\ntrain1 = train.drop(features_drop, axis=1)\nfeatures_drop = ['building_id', 'created', 'description', 'display_address', 'features', 'manager_id', 'photos', 'street_address']\ntest1 = test.drop(features_drop, axis=1)\nX = train[['bathrooms','bedrooms','latitude','longitude','price','day_of_week','created_day','num_features']]\n\ny = train1['interest_level']\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import log_loss\nX_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.33)\nclf = RandomForestClassifier(n_estimators=1000)\nclf.fit(X_train, y_train)\ny_val_pred = clf.predict_proba(X_val)\nlog_loss(y_val, y_val_pred)\nX = test[['bathrooms','bedrooms','latitude','longitude','price','day_of_week','created_day','num_features']]\n\ny = clf.predict_proba(X)\nlabels2idx = {label: i for i, label in enumerate(clf.classes_)}\nlabels2idx\nsub = pd.DataFrame()\nsub[\"listing_id\"] = test[\"listing_id\"]\nfor label in [\"high\", \"medium\", \"low\"]:\n sub[label] = y[:, labels2idx[label]]\nsub.to_csv(\"submission_rf.csv\", index=False)\n\n","repo_name":"aorursy/new-nb-3","sub_path":"hyunkyung12_kernel19b52a1b29.py","file_name":"hyunkyung12_kernel19b52a1b29.py","file_ext":"py","file_size_in_byte":6805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"33455796583","text":"\"\"\"Convert Audio.\"\"\"\nfrom os import name as osName, system as setCmd\nimport os\nfrom pytube import YouTube as getVideo\n\n\ndef main():\n \"\"\"Main program.\"\"\"\n video_url = input('Youtube video URL bilgisini giriniz : ')\n\n video = getVideo(video_url)\n audio_name = video.streams.get_audio_only('mp4').download()\n\n audio_salt_name = audio_name.split('\\\\')[-1]\n audio_ren_name = (audio_salt_name.split('.')[0] +\n '.mp3').replace(' ', '')\n\n if osName == 'nt':\n # path = getPath()+'\\\\'\n # dosyanın daha önce yaratılıp yaratılmadığı kontrolü\n if os.path.exists(audio_ren_name):\n setCmd(f'del {audio_ren_name}')\n audio_salt_name = '\"'+audio_salt_name+'\"'\n setCmd(f'ren {audio_salt_name} {audio_ren_name}')\n else:\n # path = getPath()+'/'\n setCmd(f'mv {audio_salt_name} {audio_ren_name}')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ekremsaydam/python-exam","sub_path":"advance/pytube_video_donusturme.py","file_name":"pytube_video_donusturme.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"42587913658","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Mar 02 15:29:38 2018\r\n\r\n@author: zrssch\r\n\"\"\"\r\nimport numpy as np\r\nimport sys\r\nimport time\r\nimport os\r\nimport gen.generator as gens\r\nimport utils.conf as conf\r\nimport disc.hier_disc as h_disc\r\nimport utils.data_utils as data_utils\r\nimport torch\r\nfrom torch import optim\r\nfrom torch.autograd import Variable\r\nfrom torch.nn.utils import clip_grad_norm\r\nfrom torch.nn import functional as F\r\n\r\ngen_config = conf.gen_config\r\ndisc_config = conf.disc_config\r\n# evl_config = conf.disc_config\r\n\r\ndef gen_pre_train():\r\n gens.train(gen_config)\r\n \r\ndef gen_test():\r\n gens.test_decoder(gen_config)\r\n \r\n# gen data for disc training\r\ndef gen_disc():\r\n gens.gen_disc_data(gen_config)\r\n \r\n# pre train discriminator\r\ndef disc_pre_train():\r\n #discs.train_step(disc_config, evl_config)\r\n h_disc.hier_train(disc_config)\r\n \r\n# prepare disc_data for discriminator and generator\r\ndef disc_train_data(seq2seq, vocab, source_inputs, target_inputs,\r\n encoder_inputs, decoder_inputs, mc_search=False):\r\n train_query, train_answer, train_answer_gen = [], [], []\r\n maxlen = gen_config.maxlen\r\n pad = int(data_utils.PAD_ID)\r\n for query, answer in zip(source_inputs, target_inputs):\r\n query = query + [pad]*(maxlen-len(query))\r\n train_query.append(query)\r\n answer = answer[:-1] + [pad]*(maxlen-len(answer)+1) # remove eog\r\n train_answer.append(answer)\r\n train_labels = [1 for _ in source_inputs]\r\n \r\n # generate response\r\n src = torch.from_numpy(np.array(encoder_inputs).T)\r\n trg = torch.from_numpy(np.array(decoder_inputs).T)\r\n src = Variable(src).cuda()\r\n trg = Variable(trg).cuda()\r\n probs, result = seq2seq.gen_data(src, trg)\r\n result = [list(i) for i in result]\r\n result = np.array(result).T.tolist()\r\n resps = []\r\n for each in result:\r\n temp = []\r\n for idx in each:\r\n if idx == data_utils.EOS_ID:\r\n break\r\n if idx != data_utils.PAD_ID:\r\n temp.append(idx)\r\n resps.append(temp)\r\n assert len(resps) == len(train_query)\r\n for i, output in enumerate(resps):\r\n output = output + [pad] * (maxlen - len(output))\r\n if len(output) <= maxlen-2:\r\n output_gen = [data_utils.GO_ID] + output +[data_utils.EOS_ID] + [pad] * (maxlen - len(output)-2)\r\n else:\r\n output_gen = [data_utils.GO_ID] + output[:maxlen-2] +[data_utils.EOS_ID]\r\n train_query.append(train_query[i])\r\n train_answer.append(output)\r\n train_labels.append(0)\r\n train_answer_gen.append(output_gen)\r\n return train_query, train_answer, train_labels, train_answer_gen\r\n\r\n# Adversarial Learning for Neural Dialogue Generation\r\ndef al_train():\r\n vocab, rev_vocab, dev_set, train_set = gens.prepare_data(gen_config)\r\n \r\n seq2seq = torch.load('pre_seq2seq.pth') \r\n optim_seq2seq = optim.Adam(seq2seq.parameters(), lr=gen_config.lr)\r\n hrnn = torch.load('pre_hrnn.pth') \r\n optim_hrnn = optim.Adam(hrnn.parameters(), lr=disc_config.lr)\r\n # hrnn, optim_hrnn = h_disc.create_model(disc_config)\r\n # seq2seq, optim_seq2seq = gens.create_model(gen_config)\r\n \r\n current_step = 0\r\n while True:\r\n current_step += 1\r\n start_time = time.time()\r\n print(\"==================Update Discriminator: %d=====================\" % current_step)\r\n for i in range(disc_config.disc_steps):\r\n # 1.Sample (X,Y) from real disc_data\r\n encoder_inputs, decoder_inputs, source_inputs, target_inputs = gens.getbatch(train_set, gen_config.batch_size, gen_config.maxlen)\r\n \r\n # 2.Sample (X,Y) and (X, ^Y) through ^Y ~ G(*|X)\r\n train_query, train_answer, train_labels, train_answer_gen = disc_train_data(seq2seq, vocab, source_inputs, target_inputs,\r\n encoder_inputs, decoder_inputs, mc_search=False)\r\n print(\"==============================mc_search: False===================================\")\r\n if current_step % 200 == 0:\r\n print(\"train_query: \", len(train_query))\r\n print(\"train_answer: \", len(train_answer))\r\n print(\"train_labels: \", len(train_labels))\r\n for i in xrange(len(train_query)):\r\n print(\"lable: \", train_labels[i])\r\n print(\"train_answer_sentence: \", train_answer[i])\r\n print(\" \".join([rev_vocab[output] for output in train_answer[i]]))\r\n \r\n # 3.Update D using (X, Y ) as positive examples and(X, ^Y) as negative examples\r\n step_loss = h_disc.disc_step(hrnn, optim_hrnn, disc_config, train_query, train_answer, train_labels)\r\n print(\"update discriminator loss is:\", step_loss)\r\n \r\n for i in range(gen_config.gen_steps):\r\n print(\"==================Update Generator: %d=========================\" % current_step)\r\n # 1.Sample (X,Y) from real disc_data\r\n encoder_inputs, decoder_inputs, source_inputs, target_inputs = gens.getbatch(train_set, gen_config.batch_size, gen_config.maxlen)\r\n \r\n # 2.Sample (X,Y) and (X, ^Y) through ^Y ~ G(*|X)\r\n train_query, train_answer, train_labels, train_answer_gen = disc_train_data(seq2seq, vocab, source_inputs, target_inputs,\r\n encoder_inputs, decoder_inputs, mc_search=False)\r\n train_query_neg = []\r\n train_answer_neg = []\r\n train_labels_neg = []\r\n train_query_pos = []\r\n train_answer_pos = []\r\n train_labels_pos = []\r\n for j in range(len(train_labels)):\r\n if train_labels[j] == 0:\r\n train_query_neg.append(train_query[j])\r\n train_answer_neg.append(train_answer[j])\r\n train_labels_neg.append(0)\r\n else:\r\n train_query_pos.append(train_query[j])\r\n train_answer_pos.append(train_answer[j])\r\n train_labels_pos.append(1)\r\n \r\n # 3.Compute Reward r for (X, ^Y ) using D.---based on Monte Carlo search\r\n reward = h_disc.disc_reward_step(hrnn, train_query_neg, train_answer_neg)\r\n # 4.update G on (X, ^Y) using reward r\r\n loss_reward = gens.train_with_reward(gen_config, seq2seq, optim_seq2seq, reward, train_query_neg, train_answer_gen)\r\n # 5.Teacher-Forcing: update G on (X, Y)\r\n loss = gens.teacher_forcing(gen_config, seq2seq, optim_seq2seq, encoder_inputs, decoder_inputs)\r\n print(\"update generate loss, reward is %f, loss_reward is %f, loss is %f\"%(np.mean(reward), loss_reward, loss))\r\n end_time = time.time()\r\n print(\"step %d spend time: %f\"%(current_step, end_time-start_time))\r\n \r\n if current_step%1000 == 0:\r\n torch.save(seq2seq, './seq2seq.pth')\r\n torch.save(hrnn, './hrnn.pth')\r\n \r\n \r\nif __name__ == \"__main__\":\r\n # gen_pre_train()\r\n # gen_test()\r\n # step_2 gen training data for disc\r\n # gen_disc()\r\n \r\n # step_3 training disc model\r\n # disc_pre_train()\r\n \r\n # step_4 training al model\r\n al_train()\r\n","repo_name":"njuzrs/task2-pytorch_neural_dialogue_generation","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7369,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"33404059979","text":"from tron.player import Player, Direction\nfrom ordered_set import OrderedSet\nfrom enum import Enum\nimport numpy as np\nimport queue\nimport random\n\n\nclass TreeNode(object):\n def __init__(self, parent, value, action):\n self._parent = parent\n self._children = []\n self._value = value\n self._action = action # from parent which action played\n self._minimax_action = 0 # which is the best action from this state\n\n def is_leaf(self):\n return self._children == []\n\n def is_root(self):\n return self._parent is None\n\n def expand(self, i):\n self._children.append(TreeNode(self, 0, i+1))\n\n def get_value(self):\n return self._value\n\n def set_value(self, value):\n self._value = value\n\n def get_action(self):\n return self._action\n\n def set_action(self, action):\n self._action\n\n def get_minimax_action(self):\n return self._minimax_action\n\n def set_minimax_action(self, minimax_action):\n self._minimax_action = minimax_action\n\n\nclass SetQueue(queue.Queue):\n def _init(self, maxsize):\n self.queue = OrderedSet()\n\n def _put(self, item):\n self.queue.add(item)\n\n def _get(self):\n head = self.queue.__getitem__(0)\n self.queue.remove(head)\n return head\n\n\nclass Minimax(object):\n def __init__(self, depth, mode):\n self.root = TreeNode(None, 0, 0)\n self.depth = depth\n self.mode = mode\n\n def get_shortest_path(self, game_map, ind, pl_mi):\n path_queue = SetQueue()\n dist_map = np.copy(game_map)\n path_queue._put((ind[0], ind[1], pl_mi))\n\n while not path_queue.empty():\n queue_elem = path_queue._get()\n x = queue_elem[0]\n y = queue_elem[1]\n l = queue_elem[2]\n\n dist_map[x, y] = l+pl_mi\n\n if dist_map[x, y - 1] == 1:\n path_queue._put((x, y - 1, l + pl_mi))\n if dist_map[x + 1, y] == 1:\n path_queue._put((x + 1, y, l + pl_mi))\n if dist_map[x, y + 1] == 1:\n path_queue._put((x, y + 1, l + pl_mi))\n if dist_map[x - 1, y] == 1:\n path_queue._put((x - 1, y, l + pl_mi))\n\n return dist_map\n\n def get_voronoi_value(self, game_map, ind1, ind2):\n p1_map = self.get_shortest_path(game_map, ind1, 1)\n p2_map = self.get_shortest_path(game_map, ind2, -1)\n\n p1_area = 0\n p2_area = 0\n\n \"\"\" visual map (doesn't necessary)\n for i in range(p1_map.shape[0]):\n for j in range(p2_map.shape[1]):\n if p2_map[i, j] == -2:\n p1_map[i, j] = -10\n elif p1_map[i, j] == 2:\n p1_map[i, j] = 10\n elif p1_map[i, j] != -1:\n if p1_map[i, j] + p2_map[i, j] == 0:\n p1_map[i, j] = 0\n elif p1_map[i, j] + p2_map[i, j] > 0:\n p1_map[i, j] = -5\n else:\n p1_map[i, j] = 5\n \"\"\"\n\n for i in range(p1_map.shape[0]):\n for j in range(p2_map.shape[1]):\n if not p1_map[i, j] == -1 and not p1_map[i, j] == 2 and not p2_map[i, j] == -2:\n if p1_map[i, j] != 1 and p2_map[i, j] == 1:\n p1_area += 1\n elif p1_map[i, j] == 1 and p2_map[i, j] != 1:\n p2_area += 1\n elif p1_map[i, j] + p2_map[i, j] < 0:\n p1_area += 1\n elif p1_map[i, j] + p2_map[i, j] > 0:\n p2_area += 1\n\n return p1_area - p2_area\n\n # game_map : numpy.array(12, 12)\n def distance_walls(self, game_map, ind):\n head_crash = 0\n\n up = 1\n while game_map[ind[0], ind[1] - up] == 1:\n up += 1\n\n right = 1\n while game_map[ind[0] + right, ind[1]] == 1:\n right += 1\n\n down = 1\n while game_map[ind[0], ind[1] + down] == 1:\n down += 1\n\n left = 1\n while game_map[ind[0] - left, ind[1]] == 1:\n left += 1\n\n return up + right+ down + left\n\n def get_next_map(self, game_map, action, depth_even_odd):\n game_map_copy = np.copy(game_map)\n\n if depth_even_odd == 1:\n ind = np.unravel_index(np.argmax(game_map, axis=None), game_map.shape)\n else:\n ind = np.unravel_index(np.argmin(game_map, axis=None), game_map.shape)\n\n if action == 1:\n game_map_copy[ind[0], ind[1] - 1] = 10 * depth_even_odd\n if action == 2:\n game_map_copy[ind[0] + 1, ind[1]] = 10 * depth_even_odd\n if action == 3:\n game_map_copy[ind[0], ind[1] + 1] = 10 * depth_even_odd\n if action == 4:\n game_map_copy[ind[0] - 1, ind[1]] = 10 * depth_even_odd\n\n game_map_copy[ind] = -1\n\n return game_map_copy\n\n def get_blocked(self, game_map, depth_even_odd):\n if depth_even_odd == 1:\n ind = np.unravel_index(np.argmax(game_map, axis=None), game_map.shape)\n else:\n ind = np.unravel_index(np.argmin(game_map, axis=None), game_map.shape)\n\n blocked = np.zeros(4)\n\n if game_map[ind[0], ind[1] - 1] != 1:\n if game_map[ind[0], ind[1] - 1] == 10:\n blocked[0] = 2\n else:\n blocked[0] = 1\n if game_map[ind[0] + 1, ind[1]] != 1:\n if game_map[ind[0] + 1, ind[1]] == 10:\n blocked[1] = 2\n else:\n blocked[1] = 1\n if game_map[ind[0], ind[1] + 1] != 1:\n if game_map[ind[0], ind[1] + 1] == 10:\n blocked[2] = 2\n else:\n blocked[2] = 1\n if game_map[ind[0] - 1, ind[1]] != 1:\n if game_map[ind[0] - 1, ind[1]] == 10:\n blocked[3] = 2\n else:\n blocked[3] = 1\n\n all_blocked = True\n for element in blocked:\n if element == 0:\n all_blocked = False\n break\n\n return blocked, all_blocked\n\n \"\"\"\n def update_with_move(self, last_move):\n if last_move in (child.get_action() for child in self.root._children):\n self.root = self.root._children[last_move]\n self.root._parent = None\n else:\n self.root = TreeNode(None, 0, 0)\n \"\"\"\n\n def minimax_search(self, node, game_map, depth, crash = False):\n if crash: # head vs head crashing state\n node.set_value(0)\n\n if depth == 0:\n ind1 = np.unravel_index(np.argmax(game_map, axis=None), game_map.shape)\n ind2 = np.unravel_index(np.argmin(game_map, axis=None), game_map.shape)\n if self.mode == Mode.DISTWALL:\n cur_player_dist = self.distance_walls(game_map, ind1)\n opp_player_dist = self.distance_walls(game_map, ind2)\n node.set_value(cur_player_dist - opp_player_dist)\n else: # Mode.VORONOI\n node.set_value(self.get_voronoi_value(game_map, ind1, ind2))\n\n return 0 # for exit 1 recursion step\n\n depth_even_odd = 1 - 2 * (depth % 2) # even depth: 1, odd depth: -1\n blocked, all_blocked = self.get_blocked(game_map, depth_even_odd)\n\n if all_blocked:\n return random.randint(1, 4)\n\n crash_act = 0\n if node.is_leaf():\n for i in range(4):\n if blocked[i] == 0:\n node.expand(i)\n elif blocked[i] == 2:\n node.expand(i)\n crash_act = i + 1\n\n for child in node._children:\n next_map = self.get_next_map(game_map, child.get_action(), depth_even_odd)\n if child.get_action() == crash_act:\n self.minimax_search(child, next_map, depth-1, crash = True)\n else:\n self.minimax_search(child, next_map, depth-1)\n\n # alpha-beta pruning\n if depth_even_odd == -1 and node._parent.get_minimax_action() != 0:\n if child.get_value() <= node._parent.get_value():\n node.set_value(child.get_value())\n node.set_minimax_action(child.get_action())\n\n return 0 # for exit 1 recursion step\n\n if depth_even_odd == 1:\n minimax_value = max(child.get_value() for child in node._children)\n else:\n minimax_value = min(child.get_value() for child in node._children)\n\n node.set_value(minimax_value)\n minimax_acts = [child.get_action() for child in node._children if child.get_value() == minimax_value]\n node.set_minimax_action(random.choice(minimax_acts))\n\n return node.get_minimax_action()\n\n def get_move(self, game_map):\n return self.minimax_search(self.root, game_map, self.depth)\n\n def __str__(self):\n return \"Minimax\"\n\n\nclass Mode(Enum):\n\n DISTWALL = 1\n VORNOI = 2\n\n\nclass MinimaxPlayer(Player):\n\n def __init__(self, depth, mode = Mode.VORNOI):\n super(MinimaxPlayer, self).__init__()\n self.mode = mode\n self.minimax = Minimax(depth, mode)\n self.direction = None\n self.depth = depth\n\n def initialize_minimax(self):\n self.minimax = Minimax(self.depth, self.mode)\n\n def action(self, map, id):\n self.initialize_minimax()\n game_map = map.state_for_player(id).T\n next_action = self.minimax.get_move(game_map)\n\n if next_action == 1:\n next_direction = Direction.UP\n elif next_action == 2:\n next_direction = Direction.RIGHT\n elif next_action == 3:\n next_direction = Direction.DOWN\n elif next_action == 4:\n next_direction = Direction.LEFT\n\n return next_direction\n\n def next_position_and_direction(self, current_position,id,map):\n\n direction = self.action(map,id)\n return self.next_position(current_position, direction), direction\n\n def next_position(self, current_position, direction):\n if direction == Direction.UP:\n return current_position[0] - 1, current_position[1]\n elif direction == Direction.RIGHT:\n return current_position[0], current_position[1] + 1\n elif direction == Direction.DOWN:\n return current_position[0] + 1, current_position[1]\n elif direction == Direction.LEFT:\n return current_position[0], current_position[1] - 1\n","repo_name":"itzThillaiC/TRON-using-reinforcement-learning","sub_path":"Deep-Q-learning_TRON/tron/minimax.py","file_name":"minimax.py","file_ext":"py","file_size_in_byte":10506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"35137229581","text":"import sys\nimport pathlib\nfrom tokenize import tokenize, STRING, NL\n\n\ndef concat_check(filename):\n filepath = pathlib.Path(filename)\n with filepath.open(\"rb\") as f:\n tokenizer = tokenize(f.readline)\n prev_token = next(tokenizer)\n for token in tokenizer:\n if token.type == STRING and prev_token.type == STRING:\n print(\n f\"{filename}, line {prev_token.end[0]} between {prev_token.string} and {token.string}\"\n )\n if token.type != NL:\n prev_token = token\n\n\nif __name__ == \"__main__\":\n args = sys.argv[1:]\n for filename in args:\n concat_check(filename)\n","repo_name":"gtcooke94/snippets","sub_path":"morsels/20200615_concat_check/concat_check.py","file_name":"concat_check.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"13800714405","text":"# Versao 2, com kmeans++ implementado de forma adequada\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom math import sqrt\r\n\r\n# Preparaçao dos dados\r\nlocalData = './observacoes.txt'\r\ndf = pd.read_csv(localData,sep = ' ',header=None) # Importando os dados\r\ninData = df.values # Passando os dados do data frame para array\r\nb = np.array([]) # Lista de grupos\r\ncores = ['r','g','b','k','c','m','y','w']\r\nqtdk = int(input('Digite a quantidade de centroides desejadas:')) # Quantidade de centroides\r\nk = [] # Lista contendo os centroides\r\nciclos = []\r\nmediaErro = []\r\nx_Min = np.min(inData[:,0])\r\nx_Max = np.max(inData[:,0])\r\ny_Min = np.min(inData[:,1])\r\ny_Max = np.max(inData[:,1])\r\nprint('Qual metodo deseja utilizar?\\n[1] K_means tradicional\\n[2] K_means ++')\r\nmetodo = int(input())\r\n\r\n\r\n#Distancia euclidiana\r\ndef distanciaEuclid (sample, c):\r\n return sqrt(((inData[sample,0]-k[c][0])**2)+((inData[sample,1]-k[c][1])**2))\r\n\r\n# Calculo da distancia minima euclidiana dentre os k's existentes\r\ndef minimoEuclid (sample):\r\n listadistancias = [] #lista auxiliar para comparar as distancias de cada centroide\r\n for c in range(qtdk): # Calculo da distancia para cada centroide\r\n listadistancias.append(distanciaEuclid(sample,c))\r\n return listadistancias.index(min(listadistancias)) # Retorna o indice da lista de distancias, que equivale ao respectivo k\r\n\r\n# Atribui o index dos grupos em b\r\ndef ajustab ():\r\n b = np.array([]) # Lista de grupos\r\n for sample in range(len(inData)):\r\n b = np.append(b,minimoEuclid(sample))\r\n return b\r\n\r\n# Atualizar os valores de cada centroide\r\ndef train(inData,qtdk,k):\r\n erroquadratico = []\r\n b = ajustab()\r\n for c in range(qtdk): # Roda por cada centroide (grupo)\r\n xlista = [] # Zera a lista utlizada pelo centroide anterior\r\n ylista = []\r\n for sample in range(len(inData)):\r\n if b[sample] == c: \r\n xlista.append(inData[sample,0]) # Para determinado grupo, percorre por todas as entradas...\r\n ylista.append(inData[sample,1]) # ...agrupando em uma lista os valores de X e Y correspondentes\r\n erro = (sqrt(((inData[sample,0]-k[c][0])**2)+((inData[sample,1]-k[c][1])**2)))**2\r\n erroquadratico.append(erro)\r\n k[c][0] = sum(xlista)/len(xlista)\r\n k[c][1] = sum(ylista)/len(ylista)\r\n plt.plot(k[c][0],k[c][1],color = cores[c],marker = '|') # Somente para vizualisar o caminho percorrido pelo centroide\r\n media = sum(erroquadratico)/len(erroquadratico)\r\n mediaErro.append(media)\r\n ciclos.append(len(mediaErro))\r\n\r\n\r\n#Plotagem dos pontos com relaçao aos grupos\r\ndef newplot (inData,k):\r\n cores = ['r','g','b','k','c','m','y','w']\r\n b = ajustab()\r\n listacores = []\r\n for l in range(len(b)):\r\n listacores.append(cores[int(b[l])]) # Indica qual a cor do grupo que b[l] está indicando\r\n plt.plot(inData[l,0],inData[l,1],color = listacores[l],marker = '.')\r\n listacores = np.array(listacores)\r\n for l in range(len(k)):\r\n plt.grid(b=1)\r\n plt.plot(k[l][0],k[l][1],color = cores[l],marker = 's')\r\n plt.show()\r\n\r\n######################### Inicializaçao das centroides ##########################\r\n# Utilizando K_means traidicional\r\nif metodo == 1:\r\n for i in range(qtdk):\r\n kx = np.random.uniform(x_Min,x_Max)\r\n ky = np.random.uniform(y_Min,y_Max)\r\n kxy = np.array([kx,ky])\r\n k.append(kxy)\r\n for l in range(len(k)):\r\n plt.plot(k[l][0],k[l][1],color = cores[l],marker = 's')\r\n\r\n# Utilizando o K_means ++\r\nelif metodo == 2:\r\n prob = np.random.uniform(0,len(inData),qtdk) # Inicia uma lista de pontos aleatorios da quantidade de centroides desejadas\r\n for i in range(qtdk):\r\n k.append(inData[int(prob[i])]) # Atribui os centroides aos pontos declarados acima\r\n listadistancias = []\r\n listaprobabilidade = []\r\n for sample in range(len(inData)):\r\n distancia = distanciaEuclid(sample,i) # Calculo da distancia euclidiana\r\n listadistancias.append(distancia**2) # Coloca a distancia de cada ponto com relaçao a centroide em uma lista\r\n for c in listadistancias:\r\n for l in range(int(c)):\r\n listaprobabilidade.append(c) # Cria uma lista em que cada falor, se repete na quantidade de seu inteiro (os que possuem maior distancia aparecerão mais vezes na lista)\r\n distChosen = listaprobabilidade[int(np.random.uniform(0,len(listaprobabilidade)-1))] # Escolhe uma distancia aleatoria dentro da lista (a lista é ponderada pela frequencia de cada distancia)\r\n pointChosen = listadistancias.index(distChosen) # Escolhe o ponto referente a aquela distancia\r\n k[i] = inData[pointChosen] # Atribui o ponto escolhido a centroide\r\n\r\n\r\n# Plotagem inicial\r\nfor l in range(len(k)):# Plotagem das centroides\r\n plt.plot(k[l][0],k[l][1],color = cores[l],marker = 's') \r\nplt.grid(b = 1)\r\nplt.plot(inData[:,0],inData[:,1],'.k')# Plotagem dos pontos\r\nplt.show()\r\n\r\n# Fase de utilizaçao do k_means\r\ndef start(inData,k,qtdk): \r\n teste = 0\r\n testelim = int(input('Digite aqui a quantidade maxima de ciclos'))\r\n newplot(inData,k)\r\n while teste < testelim:\r\n print(teste+1)\r\n train(inData,qtdk,k)\r\n teste += 1\r\n \r\n newplot(inData,k)\r\n plt.grid(b = 1)\r\n plt.plot(ciclos,mediaErro)\r\n plt.xlabel('QTD ciclos')\r\n plt.ylabel('Média erro')\r\n plt.show()\r\n print('Visto que existem 200 erros quadráticos em cada ciclo (qtd de pontos), \\\r\nrealizei o calculo do erro com a média do erro quadratico de cada ciclo')\r\n\r\nstart(inData,k,qtdk)\r\n","repo_name":"HenriqueMedeiross/Kmeans","sub_path":"Trab 07_v2.py","file_name":"Trab 07_v2.py","file_ext":"py","file_size_in_byte":5690,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"35870402525","text":"import json\nimport uuid\nfrom unittest import TestCase\n\nfrom plugIt.bridge.bridge import Bridge\n\n\nclass TestPlugIt(TestCase):\n\n def setUp(self):\n\n self.plugIt = Bridge('http://0.0.0.0/')\n\n _self = self\n\n def _do_query(url, method='GET', query_string=None, body=None, files=None, additional_headers=None,\n session=None):\n _self.last_do_query_call = {'url': url, 'method': method, 'query_string': query_string,\n 'body': body, 'files': files, 'additional_headers': additional_headers,\n 'session': session}\n\n class DummyResponse:\n def json(self):\n return _self.plugIt.toReplyJson()\n\n @property\n def status_code(self):\n return _self.plugIt.toReplyStatusCode()\n\n @property\n def headers(self):\n return _self.plugIt.toReplyHeaders()\n\n @property\n def content(self):\n return json.dumps(self.json())\n\n return DummyResponse()\n\n self.plugIt.do_query = _do_query\n\n def test_ping(self):\n\n self.plugIt.toReplyStatusCode = lambda: 200\n self.plugIt.toReplyJson = lambda: {'data': self.last_do_query_call['url'].split('data=', 1)[1]}\n\n assert (self.plugIt.ping())\n\n self.plugIt.toReplyStatusCode = lambda: 404\n\n assert (not self.plugIt.ping())\n\n self.plugIt.toReplyStatusCode = lambda: 200\n self.plugIt.toReplyJson = lambda: {'data': self.last_do_query_call['url'].split('data=', 1)[1] * 2}\n\n assert (not self.plugIt.ping())\n\n assert (self.last_do_query_call['url'].startswith('ping'))\n\n def test_check_version(self):\n\n self.plugIt.toReplyStatusCode = lambda: 200\n self.plugIt.toReplyJson = lambda: {'result': 'Ok', 'version': self.plugIt.PI_API_VERSION,\n 'protocol': self.plugIt.PI_API_NAME}\n\n assert (self.plugIt.check_version())\n assert (self.last_do_query_call['url'] == 'version')\n\n self.plugIt.toReplyJson = lambda: {'result': 'poney', 'version': self.plugIt.PI_API_VERSION,\n 'protocol': self.plugIt.PI_API_NAME}\n assert (not self.plugIt.check_version())\n\n self.plugIt.toReplyJson = lambda: {'result': 'Ok', 'version': self.plugIt.PI_API_VERSION * 2,\n 'protocol': self.plugIt.PI_API_NAME}\n assert (not self.plugIt.check_version())\n\n self.plugIt.toReplyJson = lambda: {'result': 'Ok', 'version': self.plugIt.PI_API_VERSION,\n 'protocol': self.plugIt.PI_API_NAME * 2}\n assert (not self.plugIt.check_version())\n\n self.plugIt.toReplyStatusCode = lambda: 201\n self.plugIt.toReplyJson = lambda: {'result': 'Ok', 'version': self.plugIt.PI_API_VERSION,\n 'protocol': self.plugIt.PI_API_NAME}\n\n assert (not self.plugIt.check_version())\n\n def test_new_mail(self):\n\n self.plugIt.toReplyStatusCode = lambda: 200\n self.plugIt.toReplyJson = lambda: {'result': 'Ok'}\n\n message_id = str(uuid.uuid4())\n message = str(uuid.uuid4())\n\n assert (self.plugIt.new_mail(message_id, message))\n assert (self.last_do_query_call['url'] == 'mail')\n assert (self.last_do_query_call['body'].get('response_id') == message_id)\n assert (self.last_do_query_call['body'].get('message') == message)\n\n self.plugIt.toReplyStatusCode = lambda: 201\n assert (not self.plugIt.new_mail(message_id, message))\n\n def test_media(self):\n self.plugIt.toReplyStatusCode = lambda: 200\n self.plugIt.toReplyJson = lambda: {}\n self.plugIt.toReplyHeaders = lambda: {}\n\n media = str(uuid.uuid4())\n\n data, content_type, cache_control = self.plugIt.get_media(media)\n\n assert (data == '{}')\n assert (content_type == 'application/octet-stream')\n assert (self.last_do_query_call['url'] == 'media/{}'.format(media))\n assert (not cache_control)\n\n self.plugIt.toReplyHeaders = lambda: {'content-type': 'test', 'cache-control': 'public, max-age=31536000'}\n\n data, content_type, cache_control = self.plugIt.get_media(media)\n\n assert (data == '{}')\n assert (content_type == 'test')\n assert (cache_control == 'public, max-age=31536000')\n\n self.plugIt.toReplyStatusCode = lambda: 201\n data, content_type, cache_control = self.plugIt.get_media(media)\n assert (not data)\n assert (not content_type)\n\n def test_meta(self):\n\n k = str(uuid.uuid4())\n path = str(uuid.uuid4())\n\n self.plugIt.toReplyStatusCode = lambda: 200\n self.plugIt.toReplyJson = lambda: {'k': k}\n self.plugIt.toReplyHeaders = lambda: {'expire': 'Wed, 21 Oct 2015 07:28:00 GMT'}\n\n data = self.plugIt.get_meta(path)\n assert (self.last_do_query_call['url'] == 'meta/{}'.format(path))\n assert (data['k'] == k)\n\n # Data should not be cached\n self.plugIt.toReplyJson = lambda: {'k2': k}\n data = self.plugIt.get_meta(path)\n assert (data['k2'] == k)\n\n def test_meta_fail(self):\n path = str(uuid.uuid4())\n\n self.plugIt.toReplyStatusCode = lambda: 201\n self.plugIt.toReplyHeaders = lambda: {}\n assert (not self.plugIt.get_meta(path))\n\n def test_meta_cache(self):\n\n k = str(uuid.uuid4())\n path = str(uuid.uuid4())\n\n self.plugIt.toReplyStatusCode = lambda: 200\n self.plugIt.toReplyJson = lambda: {'k': k}\n self.plugIt.toReplyHeaders = lambda: {}\n\n # Data should be cached\n data = self.plugIt.get_meta(path)\n self.plugIt.toReplyJson = lambda: {'k2': k}\n data = self.plugIt.get_meta(path)\n assert (data['k'] == k)\n\n def test_template(self):\n\n k = str(uuid.uuid4())\n path = str(uuid.uuid4())\n\n self.plugIt.toReplyStatusCode = lambda: 200\n self.plugIt.toReplyJson = lambda: {'k': k, 'template_tag': '-'}\n self.plugIt.toReplyHeaders = lambda: {}\n\n data = json.loads(self.plugIt.get_template(path))\n assert (self.last_do_query_call['url'] == 'template/{}'.format(path))\n assert (data['k'] == k)\n\n # Data should be cached\n self.plugIt.toReplyJson = lambda: {'k2': k, 'template_tag': '-'}\n data = json.loads(self.plugIt.get_template(path))\n assert (data['k'] == k)\n\n def test_template_fail(self):\n\n path = str(uuid.uuid4())\n\n self.plugIt.toReplyStatusCode = lambda: 201\n self.plugIt.toReplyHeaders = lambda: {}\n assert (not self.plugIt.get_template(path))\n\n def test_template_no_meta_no_template(self):\n path = str(uuid.uuid4())\n\n self.plugIt.toReplyStatusCode = lambda: 200\n self.plugIt.toReplyJson = lambda: {}\n self.plugIt.toReplyHeaders = lambda: {}\n assert (not self.plugIt.get_template(path))\n\n def test_do_action_normal_mode(self):\n\n path = str(uuid.uuid4())\n\n self.plugIt.toReplyStatusCode = lambda: 200\n self.plugIt.toReplyJson = lambda: {}\n self.plugIt.toReplyHeaders = lambda: {}\n\n assert (self.plugIt.do_action(path) == ({}, {}, {}))\n assert (self.last_do_query_call['url'] == 'action/{}'.format(path))\n\n def test_do_action_proxy_mode(self):\n\n path = str(uuid.uuid4())\n\n self.plugIt.toReplyStatusCode = lambda: 200\n self.plugIt.toReplyJson = lambda: {}\n self.plugIt.toReplyHeaders = lambda: {}\n\n assert self.plugIt.do_action(path) == ({}, {}, {})\n assert self.last_do_query_call['url'] == \"action/\" + path\n\n def test_do_action_proxy_mode_no_remplate(self):\n\n k = str(uuid.uuid4())\n\n self.plugIt.toReplyStatusCode = lambda: 200\n self.plugIt.toReplyJson = lambda: {'k': k}\n self.plugIt.toReplyHeaders = lambda: {'ebuio-plugit-notemplate': True}\n\n r, __, __ = self.plugIt.do_action('')\n\n assert (r.__class__.__name__ == 'PlugItNoTemplate')\n assert (json.loads(r.content)['k'] == k)\n\n def test_do_action_data(self):\n\n path = str(uuid.uuid4())\n k = str(uuid.uuid4())\n\n self.plugIt.toReplyStatusCode = lambda: 200\n self.plugIt.toReplyJson = lambda: {'k': k}\n self.plugIt.toReplyHeaders = lambda: {}\n\n assert (self.plugIt.do_action(path) == ({'k': k}, {}, {}))\n\n def test_do_action_500(self):\n self.plugIt.toReplyStatusCode = lambda: 500\n assert (self.plugIt.do_action('')[0].__class__.__name__ == 'PlugIt500')\n\n def test_do_action_fail(self):\n self.plugIt.toReplyStatusCode = lambda: 501\n assert (self.plugIt.do_action('') == (None, {}, {}))\n\n def test_do_action_special_codes(self):\n\n special_codes = [429, 404, 403, 401, 304]\n\n for x in range(200, 500):\n self.plugIt.toReplyStatusCode = lambda: x\n self.plugIt.toReplyHeaders = lambda: {}\n self.plugIt.toReplyJson = lambda: {}\n r, __, __ = self.plugIt.do_action('')\n\n if x in special_codes:\n assert (r.__class__.__name__ == 'PlugItSpecialCode')\n assert (r.code == x)\n else:\n assert (r.__class__.__name__ != 'PlugItSpecialCode')\n\n def test_do_action_session(self):\n\n k = str(uuid.uuid4())\n\n self.plugIt.toReplyStatusCode = lambda: 200\n self.plugIt.toReplyJson = lambda: {}\n self.plugIt.toReplyHeaders = lambda: {'Ebuio-PlugIt-SetSession-k': k}\n assert (self.plugIt.do_action('') == ({}, {'k': k}, {}))\n\n def test_do_action_redirect(self):\n\n k = str(uuid.uuid4())\n\n self.plugIt.toReplyStatusCode = lambda: 200\n self.plugIt.toReplyJson = lambda: {}\n self.plugIt.toReplyHeaders = lambda: {'ebuio-plugit-redirect': k}\n r, session, headers = self.plugIt.do_action('')\n\n assert (r.__class__.__name__ == 'PlugItRedirect')\n assert (r.url == k)\n assert (not r.no_prefix)\n assert (session == {})\n assert (headers == {})\n\n def test_do_action_redirect_noprefix(self):\n\n k = str(uuid.uuid4())\n\n self.plugIt.toReplyStatusCode = lambda: 200\n self.plugIt.toReplyJson = lambda: {}\n self.plugIt.toReplyHeaders = lambda: {'ebuio-plugit-redirect': k, 'ebuio-plugit-redirect-noprefix': \"True\"}\n r, session, headers = self.plugIt.do_action('')\n\n assert (r.__class__.__name__ == 'PlugItRedirect')\n assert (r.url == k)\n assert (r.no_prefix)\n assert (session == {})\n assert (headers == {})\n\n def test_do_action_file(self):\n\n k = str(uuid.uuid4())\n content_type = str(uuid.uuid4())\n content_disposition = str(uuid.uuid4())\n\n self.plugIt.toReplyStatusCode = lambda: 200\n self.plugIt.toReplyJson = lambda: {'k': k}\n self.plugIt.toReplyHeaders = lambda: {'ebuio-plugit-itafile': k, 'Content-Type': content_type}\n r, session, headers = self.plugIt.do_action('')\n\n assert (r.__class__.__name__ == 'PlugItFile')\n assert (json.loads(r.content)['k'] == k)\n assert (r.content_type == content_type)\n assert (r.content_disposition == '')\n assert (session == {})\n assert (headers == {})\n\n self.plugIt.toReplyHeaders = lambda: {'ebuio-plugit-itafile': k, 'Content-Type': content_type,\n 'content-disposition': content_disposition}\n r, __, __ = self.plugIt.do_action('')\n assert (r.content_disposition == content_disposition)\n\n def test_do_action_etag(self):\n\n k = str(uuid.uuid4())\n\n self.plugIt.toReplyStatusCode = lambda: 200\n self.plugIt.toReplyJson = lambda: {}\n self.plugIt.toReplyHeaders = lambda: {'ETag': k}\n r, session, headers = self.plugIt.do_action('')\n\n assert (headers == {'ETag': k})\n\n def test_do_action_crossdomain(self):\n\n k = str(uuid.uuid4())\n\n self.plugIt.toReplyStatusCode = lambda: 200\n self.plugIt.toReplyJson = lambda: {}\n\n for header in ['Access-Control-Allow-Origin', 'Access-Control-Allow-Credentials',\n 'Access-Control-Expose-Headers', 'Access-Control-Max-Age', 'Access-Control-Allow-Methods',\n 'Access-Control-Allow-Headers']:\n self.plugIt.toReplyHeaders = lambda: {header: k}\n r, session, headers = self.plugIt.do_action('')\n\n assert (headers == {header: k})\n","repo_name":"ebu/radiodns-manager","sub_path":"LightweightPlugitProxy/plugIt/tests/test_proxy_plugit.py","file_name":"test_proxy_plugit.py","file_ext":"py","file_size_in_byte":12666,"program_lang":"python","lang":"it","doc_type":"code","stars":14,"dataset":"github-code","pt":"18"} +{"seq_id":"25885168256","text":"from django.db import models\r\nfrom .manager import Manager\r\nfrom django.contrib.auth.models import AbstractUser\r\nfrom django.conf import settings\r\nfrom django.utils.translation import gettext as _\r\nfrom articles.models import Articles\r\nfrom ckeditor.fields import RichTextField\r\nfrom django.utils.text import slugify\r\n\r\n\r\nclass Blogger(AbstractUser):\r\n email = models.EmailField(\r\n _(\"Adresse Email\"),\r\n unique=True,\r\n primary_key=True,\r\n db_index=True,\r\n )\r\n username = models.CharField(_(\"Nom de l'uitilisateur:\"), max_length=200)\r\n profile_pic = models.ImageField(\r\n _(\"Photo de profile\"),\r\n upload_to=\"profile\",\r\n blank=True,\r\n null=True,\r\n default=\"profile/blog_profile.jpg,\"\r\n )\r\n articles = models.ManyToManyField(Articles, related_name=\"articles\", db_index=True)\r\n nb_articles = models.IntegerField(_(\"Nombre d'articles ecrit:\"), default=0)\r\n bio = RichTextField(\r\n _(\"Une courte bio:\"),\r\n blank=True,\r\n null=True,\r\n default=\"\",\r\n config_name=\"default\",\r\n ) # type: ignore\r\n header_title = RichTextField(\r\n _(\"Titre du header:\"),\r\n blank=True,\r\n null = True,\r\n default=\"\",\r\n config_name=\"default\",\r\n ) # type: ignore\r\n first_name = None\r\n last_name = None\r\n\r\n def __str__(self):\r\n return f\"{self.username}\".title()\r\n\r\n \"\"\" \r\n REQUIRED_FIELDS\r\n doit contenir les champs obligatoire pour un utilisateur \r\n sauf celui ecrit dans USERNAME_FIELD ou password car il faudra fournir des valeurs pour ces champs\r\n consulter la doc https://docs.djangoproject.com/fr/4.1/topics/auth/customizing/#django.contrib.auth.models.CustomUser.REQUIRED_FIELDS\r\n \"\"\"\r\n USERNAME_FIELD = \"email\"\r\n REQUIRED_FIELDS = [\r\n \"username\",\r\n ]\r\n objects = Manager()\r\n\r\n class Meta:\r\n ordering = [\"username\"]\r\n verbose_name = \"Bloggeur\"\r\n\r\n\r\nclass MyBio(models.Model):\r\n content = RichTextField(\r\n _(\"Contenu le de la bio:\"),\r\n blank=False,\r\n null=False,\r\n default=\"\",\r\n config_name=\"default\",\r\n )#type:ignore\r\n\r\n version_name = models.CharField(\r\n _(\"identifdiant de la bio\"),\r\n max_length=50,\r\n blank=False,\r\n )\r\n\r\n slug = models.SlugField(\r\n db_index=True,\r\n verbose_name=_(\"Slug\"),\r\n max_length=200,\r\n null=False,\r\n blank=True,\r\n help_text=\"Ne doit pas etre remlis manuellement\",\r\n )\r\n\r\n def save(self, *args, **kwargs):\r\n value = self.version_name\r\n self.slug = slugify(value, allow_unicode=True)\r\n super().save(*args, **kwargs)\r\n","repo_name":"poneoneo/My_Blog","sub_path":"user_cust/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"31137519690","text":"import torch\nimport numpy as np\nimport pickle\n\n\nclass Vocabulary(object):\n \"\"\"Simple vocabulary wrapper.\"\"\"\n\n def __init__(self):\n self.word2idx = {}\n self.idx2word = {}\n self.idx = 0\n\n def add_word(self, word):\n if not word in self.word2idx:\n self.word2idx[word] = self.idx\n self.idx2word[self.idx] = word\n self.idx += 1\n\n def __call__(self, word):\n if not word in self.word2idx:\n return self.word2idx['']\n return self.word2idx[word]\n\n def __len__(self):\n return len(self.word2idx)\n\n\nclass MultimodalTweetDataset(torch.utils.data.Dataset):\n\n def __init__(self, examples, vocab, trg_class_vocab, use_text, use_img, use_attribute, use_bert_src=None,\n img_feats_fn=None, attribute_feats_fn=None, url_map_fn=None,\n bert_feats_fn=None, src_str_map_fn=None,\n is_test=False, only_classifier=False, debug=False):\n if debug:\n self.examples = examples[:100]\n print('Load 100 examples for debug mode')\n else:\n self.examples = examples\n\n # for test the D8_NiX3XoAEHa8s.jpg\n # self.examples = [e for e in examples if e['img'].split('/')[-1] == 'D8_NiX3XoAEHa8s.jpg']\n # self.examples = [e for e in examples if e['img'].split('/')[-1] == 'D7Q6xTPW0AENo16.jpg']\n # self.examples = [e for e in examples if e['img'].split('/')[-1] == 'D3Gk5-eXkAA-7KH.jpg']\n # self.examples = [e for e in examples if e['img'].split('/')[-1] == 'D3BLr7tWoAYiszm.jpg']\n # self.examples = [e for e in examples if e['img'].split('/')[-1] == 'D0NQJv6UwAAWb7m.jpg']\n # self.examples = [e for e in examples if e['img'].split('/')[-1] == 'D5wGsmtW0AAuKEm.jpg']\n # self.examples = [e for e in examples if e['img'].split('/')[-1] == 'D0wn9TnXQAAGWyD.jpg']\n # assert len(self.examples) == 1\n\n self.vocab = vocab\n self.pad_idx = vocab('')\n self.trg_class_vocab = trg_class_vocab\n self.trg_class_vocab_size = len(trg_class_vocab)\n\n self.is_test = is_test\n self.only_classifier = only_classifier\n\n self.use_text = use_text\n self.use_img = use_img\n self.use_attribute = use_attribute\n self.use_bert_src = use_bert_src\n\n if use_img or use_attribute:\n with open(url_map_fn, 'rb') as f:\n self.url_map = pickle.load(f)\n\n if use_img:\n with open(img_feats_fn, 'rb') as f:\n self.img_feats = pickle.load(f)\n\n if use_attribute:\n with open(attribute_feats_fn, 'rb') as f:\n self.attribute_feats = pickle.load(f)\n\n if self.use_bert_src:\n with open(src_str_map_fn, 'rb') as f:\n self.src_str_map = pickle.load(f)\n\n with open(bert_feats_fn, 'rb') as f:\n self.bert_feats = pickle.load(f)\n\n def __getitem__(self, index):\n return self.examples[index]\n\n def __len__(self):\n return len(self.examples)\n\n def _pad(self, input_list):\n input_list_lens = [len(l) for l in input_list]\n max_seq_len = max(input_list_lens)\n padded_batch = self.pad_idx * np.ones((len(input_list), max_seq_len))\n\n for j in range(len(input_list)):\n current_len = input_list_lens[j]\n padded_batch[j][:current_len] = input_list[j]\n\n padded_batch = torch.LongTensor(padded_batch)\n\n input_mask = torch.ne(padded_batch, self.pad_idx)\n input_mask = input_mask.type(torch.FloatTensor)\n\n return padded_batch, input_list_lens, input_mask\n\n def collate_fn(self, batches):\n img = None\n attribute = None\n bert_src = None\n\n src = [b['src'] for b in batches]\n oov_lists = [b['oov_list'] for b in batches]\n src_oov = [b['src_oov'] for b in batches]\n trg = [b['trg'] + [self.vocab('')] for b in batches]\n trg_class = [b['trg_class'] for b in batches]\n trg_oov = [b['trg_copy'] + [self.vocab('')] for b in batches]\n img_fns = [b['img'] for b in batches]\n\n src_str = [b['src_str'] for b in batches]\n trg_str = [b['trg_str'] for b in batches]\n\n original_indices = list(range(len(batches)))\n\n # sort all the sequences in the order of source lengths\n seq_pairs = sorted(\n zip(src, trg, trg_class, img_fns, trg_oov, src_oov, oov_lists, src_str, trg_str, original_indices),\n key=lambda p: len(p[0]), reverse=True)\n src, trg, trg_class, img_fns, trg_oov, src_oov, oov_lists, src_str, trg_str, original_indices = zip(*seq_pairs)\n\n src, src_lens, src_mask = self._pad(src)\n trg, trg_lens, trg_mask = self._pad(trg)\n trg_class = torch.LongTensor(trg_class)\n src_oov, _, _ = self._pad(src_oov)\n trg_oov, _, _ = self._pad(trg_oov)\n\n if self.use_img:\n imgs = []\n for img_fn in img_fns:\n img_fn = img_fn.split('/')[-1].strip()\n img_line_id = self.url_map[img_fn]\n img = torch.Tensor(self.img_feats[img_line_id])\n imgs.append(img)\n img = torch.stack(imgs, 0)\n\n if self.use_attribute:\n atts = []\n for img_fn in img_fns:\n img_fn = img_fn.split('/')[-1].strip()\n img_line_id = self.url_map[img_fn]\n att = torch.Tensor(self.attribute_feats[img_line_id])\n atts.append(att)\n attribute = torch.stack(atts, 0)\n\n if self.use_bert_src:\n bert_srcs = []\n src_strs = [b['src_str'] for b in batches]\n for src_str in src_strs:\n src_str_id = self.src_str_map[src_str]\n bert_src = torch.Tensor(self.bert_feats[src_str_id])\n bert_srcs.append(bert_src)\n bert_src = torch.stack(bert_srcs, 0)\n\n if self.only_classifier:\n return src, src_lens, src_mask, trg_class, img, attribute, bert_src\n\n # Yue: do not support bert feat for generator\n if self.is_test:\n return src, src_lens, src_mask, src_oov, oov_lists, trg, trg_class, trg_lens, trg_mask, trg_oov, \\\n src_str, trg_str, original_indices, img, attribute\n return src, src_lens, src_mask, src_oov, oov_lists, trg, trg_class, trg_lens, trg_mask, trg_oov, img, attribute\n\n\ndef get_tweet_img_loader(examples, vocab, trg_class_vocab,\n use_text, use_img, use_attribute, use_bert_src=None,\n img_feats_fn=None, attribute_feats_fn=None, url_map_fn=None,\n bert_feats_fn=None, src_str_map_fn=None,\n is_test=False, only_classifier=False, debug=False,\n batch_size=16, shuffle=False, num_workers=4):\n multimodel_tweets = MultimodalTweetDataset(examples, vocab, trg_class_vocab,\n use_text, use_img, use_attribute, use_bert_src,\n img_feats_fn, attribute_feats_fn, url_map_fn,\n bert_feats_fn, src_str_map_fn,\n is_test, only_classifier, debug)\n\n data_loader = torch.utils.data.DataLoader(dataset=multimodel_tweets,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n collate_fn=multimodel_tweets.collate_fn)\n return data_loader\n","repo_name":"yuewang-cuhk/CMKP","sub_path":"unified_model/my_io.py","file_name":"my_io.py","file_ext":"py","file_size_in_byte":7678,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"18"} +{"seq_id":"36603857853","text":"from django.db import models\nimport os\nfrom django.dispatch import receiver\nfrom django.db.models.signals import post_delete\nfrom NovelAIUser.settings import *\nimport shutil\n\n\n# 文生图部分\n\nclass Task(models.Model):\n story_framework = models.TextField(\n default=\"-\",\n verbose_name=\"故事框架\", help_text=\"如果没有可以不填\"\n )\n content = models.TextField(\n verbose_name=\"故事正文\", help_text=\"文章整理过之后直接复制过来接口\"\n )\n len_text = models.IntegerField(\n verbose_name=\"文章字数计算\", blank=True\n )\n type = models.CharField(\n max_length=50,\n verbose_name=\"文章类别\", help_text=\"类别相同会将所有同类文章放到此目录下\"\n )\n en_name = models.CharField(\n max_length=50,\n verbose_name=\"文章英文名\", help_text=\"故事正文下所有的数据都会在次目录下保存\"\n )\n cn_name = models.CharField(\n max_length=50,\n verbose_name=\"文章中文名\", help_text=\"生成数据文件视频和docx的命名\"\n )\n status = models.CharField(\n max_length=5, choices=((\"已完成\", \"已完成\"), (\"未完成\", \"未完成\")),\n verbose_name=\"数据状态\", help_text=\"批量执行时会跳过标记已完成的数据\"\n )\n lora_temp = models.TextField(\n verbose_name=\"lora 配置\", help_text=\"不懂尽量不要动\", blank=True, null=True\n )\n content_start = models.CharField(\n max_length=200, default=\"###\", blank=True, null=True,\n verbose_name=\"开头文案\", help_text=\"黄金5秒开头文案\"\n )\n content_start_json = models.TextField(\n default=\"\", blank=True, null=True,\n verbose_name=\"开头文案\", help_text=\"黄金5秒开头文案json\"\n )\n\n class Meta:\n verbose_name = \"文生图任务管理\"\n verbose_name_plural = verbose_name\n\n def __str__(self):\n return '{}/{}/{}'.format(self.type, self.en_name, self.cn_name)\n\n\nclass TaskEach(models.Model):\n task = models.ForeignKey(Task, on_delete=models.CASCADE)\n txt = models.TextField(verbose_name=\"每句话的文章正文\")\n index = models.CharField(max_length=255, verbose_name=\"每句话的文章索引\")\n prompt = models.TextField(verbose_name=\"每句话的文章正面词\")\n negative = models.TextField(verbose_name=\"每句话的文章负面此\")\n img = models.CharField(max_length=255, verbose_name=\"每句话图片保存地址\")\n ts = models.CharField(max_length=255, verbose_name=\"ts时间戳\", help_text=\"Claud API使用\")\n\n class Meta:\n verbose_name = \"文章明细管理\"\n verbose_name_plural = verbose_name\n\n def __str__(self):\n return self.txt\n\n\n@receiver(post_delete, sender=Task)\ndef create_folder_after_delete(sender, instance, **kwargs):\n # print(instance.type, instance.en_name)\n dir_name = os.path.join(BASE_DIR, \"Txt2Video\", instance.type, instance.en_name)\n # print(dir_name)\n if os.path.exists(dir_name):\n shutil.rmtree(dir_name)\n","repo_name":"Escaflowne1985/NovelAIUser","sub_path":"apps/Data/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3032,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"18"} +{"seq_id":"17189476874","text":"\"\"\"\r\nAll energy in MeV\r\n\"\"\"\r\nimport os\r\nimport sys\r\nfrom util import *\r\nfrom pyCEvNS.events import *\r\nfrom pyCEvNS.flux import *\r\nfrom pyCEvNS.constants import *\r\nimport numpy as np\r\nimport multiprocess as mp\r\nfrom filelock import FileLock\r\nimport matplotlib.pyplot as plt\r\nfrom exp_config import *\r\nfrom DM_xsec import dsigmadEr_el\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\")\r\nglobal lock, outputFile\r\n\r\noutputFile = \"out/elasticDM/CCM_Ge_e_temp.txt\"\r\nTIMING_CUT = True # apply cuts or not\r\nEXPERIMENT = 'CCM_Ge_e' # COHERENT_CsI, COHERENT_CsI_2018, CCM_Ge, CCM_Ge_e\r\n\r\nmAs, _, Zs, _ = get_mA_Ji(EXPERIMENT)\r\nprefactor, pot_rate_per_day, pim_rate, pion_rate, dist, secs, det_mass, atoms, pot_mu, pot_sigma = get_rates(EXPERIMENT)\r\npion_flux, brem_photons = get_pion_brem_flux(EXPERIMENT)\r\nbkg, energy_edges, total_excess = get_bkg(EXPERIMENT)\r\nenergy_bins = (energy_edges[1:] + energy_edges[:-1])/2\r\nHELM = False if EXPERIMENT == 'CCM_Ge_e' else True # use Helm form factor or not. set False for CCM_Ge_e\r\n\r\n\r\n# Efficiency function\r\ndef eff_func(er):\r\n \"\"\"\r\n Efficiency function (only effective for COHERENT)\r\n er: recoil energy [MeV]\r\n \"\"\"\r\n if EXPERIMENT in ['COHERENT_CsI', 'COHERENT_CsI_2018']:\r\n return effE_MeV(er)\r\n return 1\r\n\r\n\r\ndef dsigmadErSum(er, echi, mchi, eps, mAs, Zs, helm=True):\r\n if len(mAs) != len(Zs):\r\n raise ValueError(\"mAs and Zs must have same length\")\r\n s = 0\r\n for mA, Z in zip(mAs, Zs):\r\n s += dsigmadEr_el(er, echi, mchi, eps, mA, Z, helm)\r\n return s\r\n\r\n\r\ndef sigmaElastic(echi, mchi, eps, mA, Z, helm=True):\r\n \"\"\"\r\n echi: dark matter energy\r\n mchi: dark matter mass\r\n eps: coupling\r\n mA: nuclear mass\r\n Z: atomic number\r\n return: elastic DM-nucleus total cross section [MeV^-2]\r\n \"\"\"\r\n ermax_ = ermax(echi, mchi, dE=0, mA=mA)\r\n if ermax_ <= 0:\r\n return 0\r\n return quad(dsigmadEr_el, 0, ermax_, args=(echi, mchi, eps, mA, Z, helm))[0]\r\n\r\n\r\ndef sigmaElasticSum(echi, mchi, eps, mAs, Zs, helm=True):\r\n \"\"\"\r\n return: total cross section [MeV^-2] summed over all nuclei\r\n \"\"\"\r\n if len(mAs) != len(Zs):\r\n raise ValueError(\"mAs and Zs must have same length\")\r\n s = 0\r\n for mA, Z in zip(mAs, Zs):\r\n s += sigmaElastic(echi, mchi, eps, mA, Z, helm)\r\n return s\r\n\r\n\r\n@scale_cache(pos=1, power=4, base=1e-4)\r\ndef dm_signal_gen(m_chi, epsilon):\r\n m_med = m_chi*3\r\n\r\n # imiate eta flux by translating pi0 flux\r\n delta_m = 140 # MeV (mass translation)\r\n m_chi_eta = max(1, m_chi - delta_m) # at least 1 MeV\r\n m_med_eta = m_chi_eta * 3\r\n\r\n brem_flux = DMFluxIsoPhoton(brem_photons, dark_photon_mass=m_med, coupling=epsilon, sampling_size=1000, detector_distance=dist,\r\n dark_matter_mass=m_chi, pot_rate=pot_rate_per_day, life_time=default_lifetime, pot_mu=pot_mu, pot_sigma=pot_sigma)\r\n pi0_flux = DMFluxFromPi0Decay(pi0_distribution=pion_flux, dark_photon_mass=m_med, coupling_quark=epsilon, pot_rate=pot_rate_per_day, detector_distance=dist,\r\n dark_matter_mass=m_chi, life_time=default_lifetime, pot_mu=pot_mu, pot_sigma=pot_sigma)\r\n pim_flux = DMFluxFromPiMinusAbsorption(dark_photon_mass=m_med, coupling_quark=epsilon, pot_rate=pot_rate_per_day, detector_distance=dist,\r\n dark_matter_mass=m_chi, pion_rate=pim_rate, life_time=default_lifetime, pot_mu=pot_mu, pot_sigma=pot_sigma)\r\n eta_flux = DMFluxFromPi0Decay(pi0_distribution=pion_flux, dark_photon_mass=m_med_eta, coupling_quark=epsilon, pot_rate=pot_rate_per_day, detector_distance=dist,\r\n dark_matter_mass=m_chi_eta, life_time=default_lifetime, pot_mu=pot_mu, pot_sigma=pot_sigma)\r\n\r\n pi0_wgts = pi0_flux.norm * np.ones_like(pi0_flux.energy) / np.shape(pi0_flux.energy)\r\n pim_wgts = pim_flux.norm * np.ones_like(pim_flux.energy) / np.shape(pim_flux.energy)\r\n brem_wgts = np.array(brem_flux.weight) / 100\r\n eta_wgts = 1e-2 * eta_flux.norm * np.ones_like(eta_flux.energy) / np.shape(eta_flux.energy)\r\n\r\n brem_energy_flux = brem_flux.energy\r\n pi0_energy_flux = pi0_flux.energy\r\n pim_energy_flux = pim_flux.energy\r\n eta_energy_flux = eta_flux.energy\r\n\r\n dm_energy_edges = np.linspace(0, 400, 100)\r\n dm_energy_bins = (dm_energy_edges[1:] + dm_energy_edges[:-1])/2\r\n\r\n pim_energy_flux = np.histogram(pim_energy_flux, weights=pim_wgts, bins=dm_energy_edges)[0]\r\n pi0_energy_flux = np.histogram(pi0_energy_flux, weights=pi0_wgts, bins=dm_energy_edges)[0]\r\n brem_energy_flux = np.histogram(brem_energy_flux, weights=brem_wgts, bins=dm_energy_edges)[0]\r\n eta_energy_flux = np.histogram(eta_energy_flux, weights=eta_wgts, bins=dm_energy_edges)[0]\r\n\r\n # NOTE: scale eta flux to match Dan's results\r\n eta_energy_flux *= 10\r\n\r\n if TIMING_CUT:\r\n total_flux = pi0_energy_flux + eta_energy_flux\r\n else:\r\n total_flux = pim_energy_flux + pi0_energy_flux + brem_energy_flux + eta_energy_flux\r\n\r\n signals = []\r\n for idx, er in enumerate(energy_bins):\r\n signal = 0\r\n for e_chi, f_chi in zip(dm_energy_bins, total_flux):\r\n # total_flux is in [s^-1 MeV^-2]\r\n signal += dsigmadErSum(er, e_chi, m_chi, epsilon, mAs, Zs, HELM) * f_chi\r\n er_bin_width = energy_edges[idx+1] - energy_edges[idx]\r\n signals.append(signal * eff_func(er) * er_bin_width)\r\n\r\n return np.array(signals) * prefactor\r\n\r\n\r\ndef main_single(m_chi):\r\n eps_array = np.logspace(-6, -1, 150)\r\n crit_events = 2.3\r\n # Binary_Search(m_chi, eps_array, dm_signal_gen, bkg, crit_events, outputFile, test='nobkg', lock=lock)\r\n Grid_Search(m_chi, eps_array, dm_signal_gen, bkg, crit_events, outputFile, test='nobkg', lock=lock)\r\n\r\n\r\ndef plot_elastic_DM():\r\n signals = dm_signal_gen(m_chi=25, epsilon=1e-4, test='chi2')\r\n plt.plot(energy_bins, signals)\r\n plt.xlim(0, 0.04)\r\n plt.yscale('log')\r\n plt.show()\r\n\r\n\r\ndef main(resume=False):\r\n mass_array = np.logspace(np.log10(0.99), np.log10(184), 50) # m_chi\r\n\r\n if resume:\r\n mass_array = resume_from_last(outputFile, mass_array)\r\n os.system(f'rm \"{outputFile}.lock\"')\r\n else:\r\n os.system(f'rm \"{outputFile}\" \"{outputFile}.lock\"')\r\n\r\n pool = mp.Pool(processes=mp.cpu_count()-2)\r\n pool.map(main_single, mass_array)\r\n os.system(f'rm \"{outputFile}.lock\"')\r\n sort_file(outputFile, delimiter=' ')\r\n\r\n\r\nif __name__ == \"__main__\":\r\n lock = FileLock(outputFile + '.lock')\r\n\r\n # main_single(1)\r\n main()\r\n # plot_elastic_DM()\r\n","repo_name":"noctildon/lightDM","sub_path":"DM_elastic.py","file_name":"DM_elastic.py","file_ext":"py","file_size_in_byte":6577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"72920878760","text":"# -*- coding: utf-8 -*-\n\"\"\"\nDjango base settings for {{cookiecutter.project_name}} project.\n\"\"\"\n\n\nimport os\n\nimport dj_database_url\nfrom django.core.exceptions import ImproperlyConfigured\n\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\n\n\n# Use this to get required environment variables\ndef get_env_variable(var_name):\n \"\"\"Get the named environment variable or raise ImproperlyConfigured\"\"\"\n try:\n return os.environ[var_name]\n except KeyError:\n error_msg = u\"Missing the {} env variable\".format(var_name)\n raise ImproperlyConfigured(error_msg)\n\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = get_env_variable('SECRET_KEY')\n\nDEBUG = False\nTEMPLATE_DEBUG = False\n\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n\n 'south',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = '{{cookiecutter.project_name}}.urls'\n\n\n# Databases specified with dj-database-url\n# https://docs.djangoproject.com/en/1.6/ref/settings/#databases\n# https://github.com/kennethreitz/dj-database-url\n# Examples:\n# * dj_database_url.parse(\"sqlite:///{}\".format(os.path.join(BASE_DIR, 'db.sqlite3')))\n# * dj_database_url.parse(\"sqlite://:memory:\")\n# * dj_database_url.parse(\"postgresql://username:password@host:port/database\")\n# * dj_database_url.parse(\"mysql://username:password@host:port/database\")\n# * dj_database_url.config() (use the DATABASE_URL environment variable)\n\nDATABASES = {'default': dj_database_url.config()}\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.6/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.6/howto/static-files/\n\nSTATIC_URL = '/static/'\nSTATICFILES_STORAGE = 'django.contrib.staticfiles.storage.CachedStaticFilesStorage'\nSTATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')\n\n","repo_name":"estebistec/cookiecutter-django","sub_path":"{{cookiecutter.repo_name}}/{{cookiecutter.project_name}}/settings/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2512,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"22966165864","text":"from app import app\nfrom flask import render_template\nimport json\nimport random\n\n# Default / Landing Page / Newsroom\n@app.route('/')\n@app.route('/index')\ndef index():\n # Loading data\n engadget_posts = {}\n with open('app/data/engadget.json') as f:\n engadget_posts = json.load(f)\n\n # fake id for Modals - This is just a hacky way to show multiple modals with different data\n # the proper way is through JS, but I chose this way at this stage.\n random_ids = random.sample(range(1, 100), len(engadget_posts))\n for i, post in enumerate(engadget_posts):\n post['modalid'] = random_ids[i]\n\n # Load Tweets\n all_tweets = list(open('app/data/twitter.json'))\n tweets = [json.loads(t) for t in all_tweets][-25:] # Last 25 Tweets\n # Returning the view, filled with data!\n return render_template('index.html', title='Prospero News!', posts=engadget_posts, tweets=tweets)\n\n# Word Cloud Page\n@app.route('/wc')\ndef wc():\n return render_template('wc.html', title='Word Cloud!', image_url='static/wordcloud.png')\n","repo_name":"taesiri/prospero","sub_path":"Server/app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"19247924051","text":"def mul_peasant(first_value: int, second_value: int) -> int:\n \"\"\"\n Multiply two integers using Peasant multiplication\n https://en.wikipedia.org/wiki/Multiplication_algorithm\n \"\"\"\n result: int = 0\n\n while first_value > 0:\n if first_value % 2 == 1:\n result += second_value\n first_value >>= 1\n second_value <<= 1\n\n return result\n","repo_name":"NathanielChavdarov/algorithms","sub_path":"algorithms/multiply/multiply.py","file_name":"multiply.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"31895585992","text":"import json\nimport logging\nimport os\nimport re\nfrom datetime import datetime\nfrom types import SimpleNamespace\nfrom typing import Any, Generator, Iterable, Optional, Union\nfrom urllib.parse import urlparse\n\nimport koji\nimport requests\nimport yaml\nfrom django.conf import settings\n\nfrom corgi.collectors.models import CollectorRhelModule, CollectorRPM, CollectorSRPM\nfrom corgi.core.constants import CONTAINER_REPOSITORY\nfrom corgi.core.models import Component, SoftwareBuild\n\nlogger = logging.getLogger(__name__)\n\nADVISORY_REGEX = re.compile(r\"RH[BES]A-[12]\\d{3}:\\d{4,}\")\n\n\nclass BrewBuildTypeNotFound(Exception):\n pass\n\n\nclass BrewBuildTypeNotSupported(Exception):\n pass\n\n\nclass BrewBuildInvalidState(Exception):\n pass\n\n\nclass BrewBuildSourceNotFound(Exception):\n pass\n\n\nclass BrewBuildNotFound(Exception):\n pass\n\n\nclass Brew:\n \"\"\"Interface to the Brew API for build data collection.\n\n Koji: https://docs.pagure.org/koji/\n \"\"\"\n\n CONTAINER_BUILD_TYPE = \"image\"\n RPM_BUILD_TYPE = \"rpm\"\n MAVEN_BUILD_TYPE = \"maven\"\n WIN_BUILD_TYPE = \"win\"\n MODULE_BUILD_TYPE = \"module\"\n\n # A subset of build types that we are able to analyze right now, others from the listing\n # above will be added once support for them is added.\n SUPPORTED_BUILD_TYPES = (\n CONTAINER_BUILD_TYPE,\n RPM_BUILD_TYPE,\n MODULE_BUILD_TYPE,\n )\n\n CARGO_TYPE_MAPPING = {\"crate\": Component.Type.CARGO}\n GEM_TYPE_MAPPING = {\"ruby\": Component.Type.GEM, \"rubygems\": Component.Type.GEM}\n GOLANG_TYPE_MAPPING = {\n \"golang\": Component.Type.GOLANG,\n \"gomod\": Component.Type.GOLANG,\n \"go-package\": Component.Type.GOLANG,\n }\n NPM_TYPE_MAPPING = {\n \"js\": Component.Type.NPM,\n \"nodejs\": Component.Type.NPM,\n \"npm\": Component.Type.NPM,\n \"yarn\": Component.Type.NPM,\n }\n PYPI_TYPE_MAPPING = {\"pip\": Component.Type.PYPI, \"python\": Component.Type.PYPI}\n\n # Map Cachito types to Corgi types\n CACHITO_PKG_TYPE_MAPPING = {\n **CARGO_TYPE_MAPPING,\n **GEM_TYPE_MAPPING,\n **GOLANG_TYPE_MAPPING,\n **NPM_TYPE_MAPPING,\n **PYPI_TYPE_MAPPING,\n }\n\n # A list of component names, for which build analysis will be skipped.\n COMPONENT_EXCLUDES = json.loads(os.getenv(\"CORGI_COMPONENT_EXCLUDES\", \"[]\"))\n\n koji_session: koji.ClientSession = None\n\n def __init__(self, source: str = \"\"):\n if source == SoftwareBuild.Type.CENTOS:\n self.koji_session = koji.ClientSession(settings.CENTOS_URL)\n elif source == SoftwareBuild.Type.KOJI:\n self.koji_session = koji.ClientSession(settings.BREW_URL)\n elif source == SoftwareBuild.Type.BREW:\n self.koji_session = koji.ClientSession(\n settings.BREW_URL, opts={\"serverca\": settings.CA_CERT}\n )\n else:\n raise ValueError(f\"Tried to create Brew collector with invalid type: {source}\")\n\n def get_source_of_build(self, build_info: dict[str, Any]) -> str:\n \"\"\"Find the source used to build the Koji build.\"\"\"\n no_source_msg = f'Build {build_info[\"id\"]} has no associated source URL'\n if build_info.get(\"task_id\") is None:\n raise BrewBuildSourceNotFound(no_source_msg)\n\n task_request = self.koji_session.getTaskRequest(build_info[\"task_id\"])\n if not isinstance(task_request, list):\n raise BrewBuildSourceNotFound(no_source_msg)\n\n for value in task_request:\n # Check if the value in the task_request is a git URL\n if isinstance(value, str) and re.match(r\"git(?:\\+https?|\\+ssh)?://\", value):\n return value\n # Look for a dictionary in task_request that may include certain keys that hold the URL\n elif isinstance(value, dict):\n if isinstance(value.get(\"ksurl\"), str):\n return value[\"ksurl\"]\n elif isinstance(value.get(\"indirection_template_url\"), str):\n return value[\"indirection_template_url\"]\n\n raise BrewBuildSourceNotFound(no_source_msg)\n\n @staticmethod\n def clean_source_of_build(source_url: str) -> str:\n \"\"\"Handle old builds which still use git:// URLs\n The dist-git service now requires us to use https:// instead\"\"\"\n # It's an internal hostname, so we have to get it a little indirectly\n dist_git_hostname = os.environ[\"CORGI_LOOKASIDE_CACHE_URL\"]\n dist_git_hostname = dist_git_hostname.replace(\"https://\", \"\", 1)\n dist_git_hostname = dist_git_hostname.replace(\"/repo\", \"\", 1)\n\n # Find any build where the source URL starts with git:// or similar\n gitlike_urls = (\n f\"git://{dist_git_hostname}/\",\n f\"git+http://{dist_git_hostname}/\",\n f\"git+https://{dist_git_hostname}/\",\n f\"git+ssh://{dist_git_hostname}/\",\n )\n # Replace it with https:// and add \"/git/\" if not already present\n dist_git_url = f\"https://{dist_git_hostname}/git/\"\n\n for git_url in gitlike_urls:\n git_url_with_path = f\"{git_url}git/\"\n\n # If \"/git/\" is already present, just fix the scheme\n if source_url.startswith(git_url_with_path):\n source_url = source_url.replace(git_url_with_path, dist_git_url, 1)\n break\n\n # If \"/git/\" isn't already present, fix the scheme and path\n elif source_url.startswith(git_url):\n source_url = source_url.replace(git_url, dist_git_url, 1)\n break\n\n # Handle .git suffix which must be removed in HTTPS URLs\n source_url = source_url.replace(\".git\", \"\", 1)\n return source_url\n\n @staticmethod\n def _parse_remote_source_url(url: str) -> tuple[str, Component.Type]:\n \"\"\"Used to parse remote_source repo from OSBS into purl name for github namespace\n ref https://github.com/containerbuildsystem/osbs-client/blob/\n f719759af18ef9f3bb45ee4411f80a9580723e31/osbs/schemas/container.json#L310\"\"\"\n parsed_url = urlparse(url)\n path = parsed_url.path.removesuffix(\".git\")\n\n # handle url like git@github.com:rh-gitops-midstream/argo-cd\n if path.startswith(\"git@\"):\n path = path.removeprefix(\"git@\")\n path = path.replace(\":\", \"/\")\n\n # look for github.com and set ComponentType with modified path\n if parsed_url.netloc == \"github.com\":\n component_type = Component.Type.GITHUB\n # urlparse keeps the leading / on the path component when netloc was found\n # the purl spec dictates that we remove it for Github purls\n path = path.removeprefix(\"/\")\n\n # no netloc\n elif path.startswith(\"github.com/\"):\n component_type = Component.Type.GITHUB\n path = path.removeprefix(\"github.com/\")\n\n # non github url with netloc\n else:\n component_type = Component.Type.GENERIC\n path = f\"{parsed_url.netloc}{path}\"\n\n return path, component_type\n\n @staticmethod\n def _bundled_or_golang(component: str) -> str:\n # Process bundled deps only; account for typed golang deps of type:\n # \"golang(golang.org/x/crypto/acme)\"\n if component.startswith(\"bundled(\"):\n c = component.removeprefix(\"bundled(\")\n elif component.startswith(\"golang(\"):\n c = component\n else:\n return \"\"\n # Strip right parens, even if we didn't strip prefix\n return c.replace(\")\", \"\")\n\n @staticmethod\n def _check_maven_component(\n component: str, version: str\n ) -> Optional[tuple[Component.Type, str, str]]:\n if \":\" in component:\n return Component.Type.MAVEN, component.replace(\":\", \"/\"), version\n elif component.startswith(\"maven\"):\n return Component.Type.MAVEN, component, version\n elif component.startswith(\"apache-commons\"):\n return Component.Type.MAVEN, component, version\n elif component.startswith(\"java-\"):\n return Component.Type.MAVEN, component, version\n return None\n\n @staticmethod\n def _check_npm_component(\n component: str, version: str\n ) -> Optional[tuple[Component.Type, str, str]]:\n if component.startswith(\"js-\"):\n return Component.Type.NPM, component[len(\"js-\") :], version\n elif component.startswith(\"npm-\"):\n return Component.Type.NPM, component[len(\"npm-\") :], version\n elif component.startswith(\"nodejs-\"):\n return Component.Type.NPM, component[len(\"nodejs-\") :], version\n else:\n component_match = re.match(r\"^nodejs\\d+-(.*)\", component)\n if component_match:\n return Component.Type.NPM, component, version\n return None\n\n @classmethod\n def _get_bundled_component_type(\n cls, component_type: str, component: str\n ) -> Optional[Component.Type]:\n if component_type.startswith(\"python\"):\n return Component.Type.PYPI\n elif component_type.startswith(\"ruby\"):\n return Component.Type.GEM\n elif component_type == \"golang\":\n # Need to skip arch names, See CORGI-48\n if component in (\"aarch-64\", \"ppc-64\", \"s390-64\", \"x86-64\"):\n return None\n return Component.Type.GOLANG\n elif component_type in cls.CACHITO_PKG_TYPE_MAPPING:\n return cls.CACHITO_PKG_TYPE_MAPPING[component_type]\n else:\n return Component.Type.GENERIC\n\n @classmethod\n def _extract_bundled_provides(\n cls, provides: list[tuple[str, str]]\n ) -> list[tuple[Component.Type, str, str]]:\n bundled_components: list[tuple[Component.Type, str, str]] = []\n for component, version in provides:\n component = cls._bundled_or_golang(component)\n if not component:\n continue\n if component.startswith(\"rh-\"):\n component = component[3:]\n bundled_component = cls._check_maven_component(component, version)\n if bundled_component:\n bundled_components.append(bundled_component)\n continue\n bundled_component = cls._check_npm_component(component, version)\n if bundled_component:\n bundled_components.append(bundled_component)\n continue\n # Split into namespace identifier and component name\n component_split = re.split(r\"([(-])\", component, maxsplit=1)\n if len(component_split) != 3:\n bundled_components.append((Component.Type.GENERIC, component, version))\n continue\n else:\n component_type, seperator, component = component_split\n bundled_component_type = cls._get_bundled_component_type(component_type, component)\n if not bundled_component_type:\n continue\n # Account for bundled deps like \"bundled(rh-nodejs12-zlib)\" where it's not clear\n # what is the component type and what is the actual component name.\n if bundled_component_type == Component.Type.GENERIC and seperator == \"-\":\n # E.g. unknown / rh-nodejs12-zlib\n component = f\"{component_type}-{component}\"\n bundled_components.append((bundled_component_type, component, version))\n return bundled_components\n\n def get_rpm_build_data(self, build_id: int) -> dict[str, Any]:\n # Parent-level SRPM component\n srpm_component = None\n\n # List of child RPM components\n rpm_components = []\n\n rpm_infos = self.koji_session.listRPMs(build_id)\n\n for rpm_info, call in self.brew_rpm_headers_lookup(rpm_infos):\n rpm_id = rpm_info[\"id\"]\n headers = call.result\n\n # Create a dictionary by zipping together the values from the \"provides\" and\n # \"provideversion\" headers.\n rpm_provides = list(zip(headers.pop(\"provides\"), headers.pop(\"provideversion\")))\n rpm_component: dict[str, Any] = {\n \"type\": Component.Type.RPM,\n \"namespace\": Component.Namespace.REDHAT,\n \"meta\": {\n **headers,\n \"nvr\": rpm_info[\"nvr\"],\n \"name\": rpm_info[\"name\"],\n \"version\": rpm_info[\"version\"],\n \"release\": rpm_info[\"release\"],\n \"epoch\": rpm_info[\"epoch\"] or 0, # Default to epoch 0 if not specified (`None`)\n \"arch\": rpm_info[\"arch\"],\n \"source\": [\"koji.listRPMs\", \"koji.getRPMHeaders\"],\n \"rpm_id\": rpm_id,\n # Our custom \"source\" key conflicts with the \"SOURCE\" RPM header\n # So we call it \"source_files\" here instead\n \"source_files\": headers[\"source\"],\n },\n }\n\n # Extract additional metadata from SRPM components\n if rpm_info[\"arch\"] == \"src\":\n # TODO: download sources from dist-git, find specfile, and extract Source:\n # >>> import rpm\n # >>> t = rpm.TransactionSet()\n # >>> p = t.parseSpec('1882509_podman.spec')\n # >>> for x in p.sources:\n # ... print(x[0])\n srpm_component = rpm_component\n continue\n\n # Process bundled dependencies for each RPM\n bundled_components = []\n bundled_provides = self._extract_bundled_provides(rpm_provides)\n if bundled_provides:\n bundled_components = self._parse_bundled_provides(bundled_provides, rpm_info)\n\n rpm_component[\"components\"] = bundled_components\n rpm_components.append(rpm_component)\n\n if not srpm_component:\n logger.error(\"No SRPM found in build\")\n return {}\n\n # RPM components are children of the SRPM component\n srpm_component[\"components\"] = rpm_components\n\n # TODO: list all components used as build requirements\n return srpm_component\n\n @classmethod\n def _parse_bundled_provides(\n cls, bundled_provides: list[tuple[Component.Type, str, str]], rpm_info: dict[str, str]\n ) -> list[dict[str, Union[str, dict[str, Union[str, list[str]]]]]]:\n \"\"\"Parse a list of (type, name, version) tuples, build a list of bundled component dicts\"\"\"\n id_counter = 0\n parsed_provides = []\n for component_type, bundled_component_name, version in bundled_provides:\n id_counter += 1\n bundled_component_meta: dict[str, Union[str, list[str]]] = {\n \"name\": bundled_component_name,\n \"version\": version,\n \"rpm_id\": f\"{rpm_info['id']}-bundles-{id_counter}\",\n \"source\": [\"specfile\"],\n }\n\n bundled_component: dict[str, Union[str, dict[str, Union[str, list[str]]]]] = {\n \"type\": component_type,\n \"namespace\": cls.check_red_hat_namespace(component_type, version),\n \"meta\": bundled_component_meta,\n }\n # We can't set go_component_type here for Golang components\n # Both Go modules and Go packages can be bundled into an RPM\n # There's no easy way for us to tell which type this component is\n parsed_provides.append(bundled_component)\n return parsed_provides\n\n @staticmethod\n def check_red_hat_namespace(\n component_type: str, version: str, publisher: str = \"\"\n ) -> Component.Namespace:\n \"\"\"Given a component type, version, and optional publisher, return the correct namespace\"\"\"\n if publisher == \"Red Hat, Inc.\":\n # Components from Pyxis manifests may specify a publisher\n return Component.Namespace.REDHAT\n elif component_type in (\n Component.Type.RPM,\n Component.Type.RPMMOD,\n Component.Type.CONTAINER_IMAGE,\n ):\n # RPMs, modules, and containers are always built at Red Hat\n return Component.Namespace.REDHAT\n elif component_type == Component.Type.MAVEN and \"redhat\" in version:\n # .redhat or -redhat in the version string indicate this Maven component\n # was built in a Red Hat build system (e.g. by PNC / for a Middleware product)\n return Component.Namespace.REDHAT\n else:\n return Component.Namespace.UPSTREAM\n\n @staticmethod\n def _build_archive_dl_url(filename: str, build_info: dict[str, str]) -> str:\n url = (\n f\"{settings.BREW_DOWNLOAD_ROOT_URL}/packages/\"\n f\"{build_info['name']}/\"\n f\"{build_info['version']}/\"\n f\"{build_info['release']}/files/remote-sources/\"\n f\"{filename}\"\n )\n return url\n\n @staticmethod\n def _get_remote_source(build_archive_url: str) -> SimpleNamespace:\n response = requests.get(build_archive_url)\n response.raise_for_status()\n return json.loads(response.text, object_hook=lambda d: SimpleNamespace(**d))\n\n @staticmethod\n def _create_image_component(\n build_id: int,\n nvr: str,\n name: str = \"\",\n version: str = \"\",\n release: str = \"\",\n arch: str = \"noarch\",\n name_label: str = \"\",\n ) -> dict[str, Any]:\n # A multi arch image is really just an OCI image index. From a container registry client\n # point of view they are transparent in that the client will always pull the correct arch\n # for their client without having the know the actual image location.\n # See https://github.com/opencontainers/image-spec/blob/main/image-index.md\n if any(item == \"\" for item in (name, version, release)):\n name, version, release = Brew.split_nvr(nvr)\n\n image_component: dict = {\n \"type\": Component.Type.CONTAINER_IMAGE,\n \"brew_build_id\": build_id,\n \"meta\": {\n \"nvr\": nvr,\n \"name\": name,\n \"version\": version,\n \"release\": release,\n \"arch\": arch,\n },\n }\n if name_label:\n image_component[\"meta\"][\"repository_url\"] = f\"{CONTAINER_REPOSITORY}/{name_label}\"\n name_label_parts = name_label.rsplit(\"/\", 1)\n if len(name_label_parts) == 2:\n image_component[\"meta\"][\"name_from_label\"] = name_label_parts[1]\n return image_component\n\n @staticmethod\n def split_nvr(nvr: str) -> tuple[str, str, str]:\n nvr_parts = nvr.rsplit(\"-\", maxsplit=2)\n if len(nvr_parts) != 3:\n raise ValueError(f\"NVR {nvr} had invalid length after splitting: {len(nvr_parts)}\")\n name = nvr_parts[0]\n version = nvr_parts[1]\n release = nvr_parts[2]\n return name, version, release\n\n def get_container_build_data(self, build_id: int, build_info: dict[str, Any]) -> dict[str, Any]:\n\n component: dict[str, Any] = {\n \"type\": Component.Type.CONTAINER_IMAGE,\n \"meta\": {\n \"name\": build_info[\"name\"],\n \"version\": build_info[\"version\"],\n \"release\": build_info[\"release\"],\n \"epoch\": build_info[\"epoch\"] or 0,\n \"arch\": None,\n \"source\": [\"koji.getBuild\"],\n },\n }\n\n go_stdlib_version = \"\"\n remote_sources: dict[str, tuple[str, str]] = {}\n # TODO: Should we raise an error if build_info[\"extra\"] is missing?\n if build_info[\"extra\"]:\n index = build_info[\"extra\"][\"image\"].get(\"index\", {})\n if index:\n component[\"meta\"][\"digests\"] = index[\"digests\"]\n component[\"meta\"][\"pull\"] = index.get(\"pull\", [])\n\n if \"parent_build_id\" in build_info[\"extra\"][\"image\"]:\n parent_image = build_info[\"extra\"][\"image\"][\"parent_build_id\"]\n component[\"meta\"][\"parent\"] = parent_image\n\n # These show up in multistage builds such as Build ID 1475846 and are build dependencies\n if \"parent_image_builds\" in build_info[\"extra\"][\"image\"]:\n build_parent_nvrs = []\n for parent_image_build in build_info[\"extra\"][\"image\"][\n \"parent_image_builds\"\n ].values():\n build_name, build_version, _ = Brew.split_nvr(parent_image_build[\"nvr\"])\n if \"go-toolset\" in build_name or \"golang\" in build_name:\n build_parent_nvrs.append(build_name)\n go_stdlib_version = build_version.removeprefix(\"v\")\n component[\"meta\"][\"go_stdlib_version\"] = go_stdlib_version\n\n component[\"meta\"][\"build_parent_nvrs\"] = build_parent_nvrs\n\n # Legacy OSBS builds such as 1890187 copy source code into dist-git but specify where\n # the source code came from using the 'go' stanza in container.yaml\n # ref: https://osbs.readthedocs.io/en/osbs_ocp3/users.html#go\n # Handle case when \"go\" key is present but value is None\n go = build_info[\"extra\"][\"image\"].get(\"go\", {})\n\n # AND handle case when \"modules\" key is present but value is None\n if go and go.get(\"modules\", []):\n go_modules = tuple(\n module[\"module\"].removeprefix(\"https://\")\n for module in go[\"modules\"]\n if module.get(\"module\")\n )\n if go_modules:\n # Tuple above can be empty if .get(\"module\") name is always None / an empty str\n component[\"meta\"][\"upstream_go_modules\"] = go_modules\n\n # builds such as 1911112 have all their info in typeinfo as they use remote_sources map\n # in remote_source json, and tar download urls by cachito url\n\n # Cachito ref https://osbs.readthedocs.io/en/osbs_ocp3/users.html#remote-sources\n if (\n \"typeinfo\" in build_info[\"extra\"]\n and \"remote-sources\" in build_info[\"extra\"][\"typeinfo\"]\n ):\n remote_sources_v = build_info[\"extra\"][\"typeinfo\"][\"remote-sources\"]\n if isinstance(remote_sources_v, dict):\n # Need to collect json, and tar download urls from archives data\n # Fill the tuple in with empty values now\n cachito_url = remote_sources_v[\"remote_source_url\"]\n remote_sources[cachito_url] = (\"\", \"\")\n else:\n for source in remote_sources_v:\n if \"archives\" in source:\n archives = source[\"archives\"]\n json_data = self._build_archive_dl_url(archives[0], build_info)\n tar = self._build_archive_dl_url(archives[1], build_info)\n remote_sources[source[\"url\"]] = (json_data, tar)\n else:\n logger.warning(\n \"Expected to find archives in remote-source dict, only found %s\",\n source.keys(),\n )\n\n child_image_components: list[dict[str, Any]] = []\n archives = self.koji_session.listArchives(build_id)\n\n # Extract the list of embedded rpms\n noarch_rpms_by_id: dict[int, dict[str, Any]] = {}\n rpm_build_ids: set[int] = set()\n\n for archive in archives:\n if archive[\"btype\"] == \"image\" and archive[\"type_name\"] == \"tar\":\n noarch_rpms_by_id, child_image_component = self._extract_image_components(\n archive, build_id, build_info[\"nvr\"], noarch_rpms_by_id, rpm_build_ids\n )\n child_image_components.append(child_image_component)\n if archive[\"btype\"] == \"remote-sources\":\n # Some OSBS builds don't have remote sources set in extras typeinfo\n # For example build 1475846 because they use remote_source in Cachito Configuration\n # ref: https://osbs.readthedocs.io/en/osbs_ocp3/users.html#remote-source\n # In that case, extract from archives data here\n if len(remote_sources.keys()) == 1:\n first_remote_source = next(iter(remote_sources.values()))\n # The remote_source tuple is updated during the loop below, so we need to check\n # if both json and tar values in the tuple are empty\n if first_remote_source[0] != \"\" and first_remote_source[1] != \"\":\n continue # Don't try to populate remote_sources map from archives\n else:\n self.update_remote_sources(archive, build_info, remote_sources)\n\n source_components = self._extract_remote_sources(go_stdlib_version, remote_sources)\n\n component[\"nested_builds\"] = list(rpm_build_ids)\n component[\"sources\"] = source_components\n component[\"image_components\"] = child_image_components\n component[\"components\"] = list(noarch_rpms_by_id.values())\n\n # During collection we are only able to inspect docker config labels on\n # attached arch specific archives. We do this loop here to save the description, license,\n # name label, and repository url also on the index container object at the root of the tree.\n for attr in (\"description\", \"license\", \"name_from_label\", \"repository_url\"):\n self._get_child_meta(component, attr)\n\n return component\n\n @staticmethod\n def _get_child_meta(component: dict[str, Any], meta_attr: str) -> None:\n for image in component[\"image_components\"]:\n meta_attr_value = image[\"meta\"].get(meta_attr)\n if meta_attr_value:\n component[\"meta\"][meta_attr] = meta_attr_value\n break\n\n @classmethod\n def _extract_remote_sources(\n cls, go_stdlib_version: str, remote_sources: dict[str, tuple[str, str]]\n ) -> list[dict[str, Any]]:\n \"\"\"Given a list of remote-source.json filenames / Cachito manifest names,\n build and return a list of source component dicts, one for each manifest\n Each source component has a \"components\" key with all the top-level\n .packages in that Cachito manifest, and\n Each top-level .package has a \"components\" key with all the child\n .dependencies of that package (e.g. .packages[index].dependencies in JQ)\"\"\"\n source_components: list[dict[str, Any]] = []\n for build_loc, coords in remote_sources.items():\n remote_source = cls._get_remote_source(coords[0])\n remote_source_name, remote_source_type = cls._parse_remote_source_url(\n remote_source.repo\n )\n source_component: dict[str, Any] = {\n \"type\": remote_source_type,\n \"namespace\": Component.Namespace.UPSTREAM,\n \"meta\": {\n \"name\": remote_source_name,\n \"version\": remote_source.ref,\n \"remote_source\": coords[0],\n \"remote_source_archive\": coords[1],\n \"source\": [\"koji.listArchives\"],\n },\n }\n if build_loc:\n source_component[\"meta\"][\"cachito_build\"] = build_loc\n logger.info(\n \"Processing archive %s with package managers: %s\",\n coords[0],\n remote_source.pkg_managers,\n )\n for pkg_type in remote_source.pkg_managers:\n # We process top-level .packages and all child .packages[].dependencies\n # This is enough to get all components from the manifest\n # The top-level .dependencies are duplicates\n # of each top-level .package's child .dependencies\n # We don't need to process them again\n if pkg_type in (\n *cls.CARGO_TYPE_MAPPING,\n *cls.GEM_TYPE_MAPPING,\n *cls.NPM_TYPE_MAPPING,\n *cls.PYPI_TYPE_MAPPING,\n ):\n # Convert Cachito-reported package type to Corgi component type.\n # TODO: Add logging-kibana6-container-v6.8.1-362 to test data\n # use remote-source-kibana6.json manifest from Cachito\n provides, remote_source.packages = cls._extract_provides(\n remote_source.packages, pkg_type\n )\n elif pkg_type in cls.GOLANG_TYPE_MAPPING:\n provides, remote_source.packages = cls._extract_golang(\n remote_source.packages, go_stdlib_version\n )\n elif pkg_type == \"git-submodule\":\n # Handle this type separately\n # It's not necessarily guaranteed to be a GITHUB repo\n # So we can't rely on a simple mapping like the other types\n provides, remote_source.packages = cls._extract_submodules(\n remote_source.packages, pkg_type\n )\n else:\n raise ValueError(f\"Found unsupported remote-source pkg_manager {pkg_type}\")\n\n try:\n source_component[\"components\"].extend(provides)\n except KeyError:\n source_component[\"components\"] = provides\n\n source_components.append(source_component)\n return source_components\n\n @classmethod\n def update_remote_sources(\n cls,\n archive: dict[str, str],\n build_info: dict[str, str],\n remote_sources: dict[str, tuple[str, str]],\n ) -> None:\n cachito_url = next(iter(remote_sources))\n logger.debug(\"Setting remote sources for %s using archive data %s\", cachito_url, archive)\n remote_sources_url = cls._build_archive_dl_url(archive[\"filename\"], build_info)\n # Update the remote sources download url tuple\n existing_coords = list(remote_sources[cachito_url])\n if archive[\"type_name\"] == \"tar\":\n remote_sources[cachito_url] = (existing_coords[0], remote_sources_url)\n elif archive[\"type_name\"] == \"json\":\n remote_sources[cachito_url] = (remote_sources_url, existing_coords[1])\n\n @staticmethod\n def extract_common_key(filename: str) -> str:\n without_prefix = filename.removeprefix(\"remote-source-\")\n return without_prefix.split(\".\", 1)[0]\n\n def _extract_image_components(\n self,\n archive: dict[str, Any],\n build_id: int,\n build_nvr: str,\n noarch_rpms_by_id: dict[int, dict[str, Any]],\n rpm_build_ids: set[int],\n ) -> tuple[dict[int, dict[str, Any]], dict[str, Any]]:\n logger.info(\"Processing image archive %s\", archive[\"filename\"])\n docker_config = archive[\"extra\"][\"docker\"][\"config\"]\n labels = self._get_labels(docker_config)\n name_label = labels.get(\"name\", \"\")\n child_component = self._create_image_component(\n build_id, build_nvr, arch=archive[\"extra\"][\"image\"][\"arch\"], name_label=name_label\n )\n child_component[\"meta\"][\"description\"] = labels.get(\"description\", \"\")\n child_component[\"meta\"][\"docker_config\"] = docker_config\n child_component[\"meta\"][\"filename\"] = archive[\"filename\"]\n child_component[\"meta\"][\"license\"] = labels.get(\"License\", \"\")\n child_component[\"meta\"][\"brew_archive_id\"] = archive[\"id\"]\n child_component[\"meta\"][\"digests\"] = archive[\"extra\"][\"docker\"][\"digests\"]\n child_component[\"meta\"][\"source\"] = [\"koji.listArchives\"]\n rpms = self.koji_session.listRPMs(imageID=archive[\"id\"])\n arch_specific_rpms = []\n for rpm in rpms:\n rpm_component = {\n \"type\": Component.Type.RPM,\n \"namespace\": Component.Namespace.REDHAT,\n \"brew_build_id\": rpm[\"build_id\"],\n \"meta\": {\n \"nvr\": rpm[\"nvr\"],\n \"name\": rpm[\"name\"],\n \"version\": rpm[\"version\"],\n \"release\": rpm[\"release\"],\n \"arch\": rpm[\"arch\"],\n \"rpm_id\": rpm[\"id\"],\n \"source\": [\"koji.listRPMs\"],\n },\n }\n rpm_build_ids.add(rpm[\"build_id\"])\n if rpm[\"arch\"] == \"noarch\":\n noarch_rpms_by_id[rpm[\"id\"]] = rpm_component\n else:\n arch_specific_rpms.append(rpm_component)\n child_component[\"rpm_components\"] = arch_specific_rpms\n return noarch_rpms_by_id, child_component\n\n @staticmethod\n def _get_labels(docker_config: dict[str, dict[str, dict[str, str]]]) -> dict[str, str]:\n config = docker_config.get(\"config\", {})\n return config.get(\"Labels\", {})\n\n @classmethod\n def _extract_provides(\n cls, packages: list[SimpleNamespace], pkg_type: str\n ) -> tuple[list[dict[str, Any]], list[SimpleNamespace]]:\n components: list[dict[str, Any]] = []\n typed_pkgs, remaining_packages = cls._filter_by_type(packages, pkg_type)\n for typed_pkg in typed_pkgs:\n component_type = cls.CACHITO_PKG_TYPE_MAPPING[pkg_type]\n typed_component: dict[str, Any] = {\n \"type\": component_type,\n \"namespace\": cls.check_red_hat_namespace(component_type, typed_pkg.version),\n \"meta\": {\n \"name\": typed_pkg.name,\n \"version\": typed_pkg.version,\n },\n }\n # Sometimes a top-level package has a \"path\" key\n # e.g. for npm or go-package components nested into a subfolder\n try:\n typed_component[\"meta\"][\"path\"] = typed_pkg.path\n except AttributeError:\n pass\n\n typed_component[\"components\"] = []\n for dep in typed_pkg.dependencies:\n component_meta = {\n \"name\": dep.name,\n \"version\": dep.version,\n }\n component_type = cls.CACHITO_PKG_TYPE_MAPPING[dep.type]\n component = {\n \"type\": component_type,\n \"namespace\": cls.check_red_hat_namespace(component_type, dep.version),\n \"meta\": component_meta,\n }\n # The dev key is only present for Cachito package managers which support\n # dev dependencies. See https://github.com/containerbuildsystem/cachito/blob/\n # f3e954e3d04d2cd35cc878c1189cd55e7471220d/docs/metadata.md#dependencydev\n if hasattr(dep, \"dev\"):\n component_meta[\"dev\"] = dep.dev\n typed_component[\"components\"].append(component)\n components.append(typed_component)\n return components, remaining_packages\n\n @classmethod\n def _extract_golang(\n cls, dependencies: list[SimpleNamespace], go_stdlib_version: str = \"\"\n ) -> tuple[list[dict[str, Any]], list[SimpleNamespace]]:\n \"\"\"Given a list of Golang module and package objects in some Cachito manifest,\n build and return a list of Golang module and package dicts\"\"\"\n # We no longer move go-packages like golang.org/x/text/cases\n # underneath modules with matching names like golang.org/x/text\n # because this complicates the code / caused several bugs,\n # and we should be relying on Cachito's dependency tree anyway\n modules, remaining_deps = cls._filter_by_type(dependencies, \"gomod\")\n packages, remaining_deps = cls._filter_by_type(remaining_deps, \"go-package\")\n\n # Build a list of package dicts from package objects\n package_dicts = cls._build_golang_component_dict_from_objs(go_stdlib_version, packages)\n # Build a list of module dicts from module objects\n module_dicts = cls._build_golang_component_dict_from_objs(go_stdlib_version, modules)\n\n return [*module_dicts, *package_dicts], remaining_deps\n\n @classmethod\n def _build_golang_component_dict_from_objs(\n cls, go_stdlib_version: str, dependent_obj_list: list[SimpleNamespace]\n ) -> list[dict[str, Any]]:\n \"\"\"Given a list of dependent Golang component objects,\n build and return a list of dependent Golang component dicts\"\"\"\n dependent_dict_list: list[dict[str, Any]] = []\n for dep in dependent_obj_list:\n # We set properties for this component assuming it's a gomod or go-package\n # And filter out top-level .packages with different types\n # But since this method is recursive and processes child .dependencies,\n # raise an error if we see a non-Golang type\n # That means some top-level gomod or go-package in .packages\n # depends on a non-Golang child in .packages[index].dependencies\n # Probably shouldn't happen but we can't guarantee this\n dependent_dict: dict[str, Any] = {\n # Should be GOLANG, or raise a KeyError if not\n \"type\": cls.GOLANG_TYPE_MAPPING[dep.type],\n \"namespace\": Component.Namespace.UPSTREAM,\n \"meta\": {\n # Could be gomod or go-package\n \"go_component_type\": dep.type,\n \"name\": dep.name.removeprefix(\"vendor/\"),\n # stdlib components get their versions from the golang compiler\n \"version\": dep.version or go_stdlib_version,\n },\n }\n # Report dev if it's present, whether it's True or False\n # Only present for child .dependencies as far as I can tell\n if hasattr(dep, \"dev\"):\n dependent_dict[\"meta\"][\"dev\"] = dep.dev\n # Report path if it's not empty / None\n # Only present for top-level .packages as far as I can tell\n path = getattr(dep, \"path\", \"\")\n if path:\n dependent_dict[\"meta\"][\"path\"] = path\n\n nested_deps: list[SimpleNamespace] = getattr(dep, \"dependencies\", [])\n if nested_deps:\n dependent_dict[\"components\"] = cls._build_golang_component_dict_from_objs(\n go_stdlib_version, nested_deps\n )\n dependent_dict_list.append(dependent_dict)\n\n return dependent_dict_list\n\n @classmethod\n def _extract_submodules(\n cls, packages: list[SimpleNamespace], pkg_type: str\n ) -> tuple[list[dict[str, Any]], list[SimpleNamespace]]:\n \"\"\"Given a list of git submodules in some Cachito manifest,\n build and return a list of GITHUB component dicts\n Raise a ValueError if some component isn't hosted on Github\"\"\"\n # All the examples of this type I saw were for Github repos\n # Fail to process anything that's not, just in case\n # In the future, we can add support for other Git services if needed\n # e.g. Bitbucket, Gitlab, etc.\n # Raising an error makes these edge cases visible so we know to do this\n # We won't silently ignore / skip certain components\n # so we won't need to reprocess all builds with this type later\n\n components: list[dict[str, Any]] = []\n typed_pkgs, remaining_packages = cls._filter_by_type(packages, pkg_type)\n for typed_pkg in typed_pkgs:\n if not typed_pkg.version.startswith(\"https://github.com/\"):\n # If we ever see http:// or https://www. or git@github.com:user/repo forms,\n # let's add logic to handle them later instead of being too permissive\n raise ValueError(\n f\"git-submodule package is not hosted on Github: {typed_pkg.version}\"\n )\n\n # Values are like https://github.com/user_namespace/repo_name#commit_hash\n name_and_version = typed_pkg.version.rsplit(\"#\", maxsplit=1)\n if len(name_and_version) == 1:\n raise ValueError(\n f\"Couldn't identify version / commit ID for package: {typed_pkg.version}\"\n )\n\n name, version = name_and_version\n name = name.replace(\"https://github.com/\", \"\", 1)\n if name.endswith(\".git\"):\n name = name.replace(\".git\", \"\", 1)\n\n typed_component: dict[str, Any] = {\n \"type\": Component.Type.GITHUB,\n \"namespace\": Component.Namespace.UPSTREAM,\n \"meta\": {\n # The name of the submodule in the parent repo\n \"module_name\": typed_pkg.name,\n # Should be the name of the child repo we included by this point\n \"name\": name,\n # Path to the submodule in the parent repo\n # Usually the same as the submodule name, but not always\n # e.g. typed_pkg.name == \"grpc\", typed_pkg.path == \"third_party/grpc\"\n # This path shouldn't be used as a qualifier in this component's purl\n # It represents the path in the parent repo / component\n # Not the repo / component we're creating here\n # TODO: Check the source / root component we create\n # We have Git repo names and commit IDs in Cachito's JSON data\n # in .repo and .ref top-level keys, respectively\n # Are we using them anywhere? We should store in meta_attr at least\n \"path\": typed_pkg.path,\n # Should be a commit ID / hash by this point\n \"version\": version,\n },\n }\n if hasattr(typed_pkg, \"dev\"):\n typed_component[\"meta\"][\"dev\"] = typed_pkg.dev\n\n # This code should be unused\n # All git-submodule examples I looked at had no child .dependencies\n dependencies = getattr(typed_pkg, \"dependencies\", [])\n if dependencies:\n typed_component[\"components\"], remaining_dependencies = cls._extract_submodules(\n dependencies, pkg_type\n )\n if remaining_dependencies:\n # We filtered out child .dependencies of this top-level .package\n # because they didn't have git-submodule type\n # Since they're not top-level .packages themselves,\n # they won't get processed anywhere else in our code\n # So in theory we'd be missing components\n # This shouldn't happen, but raise an error just in case\n raise ValueError(\n f\"Top-level package {typed_pkg.version} had {len(remaining_dependencies)} \"\n \"child dependencies that were not Git submodules\"\n )\n components.append(typed_component)\n return components, remaining_packages\n\n @staticmethod\n def _filter_by_type(\n dependencies: list[SimpleNamespace], pkg_type: str\n ) -> tuple[list[SimpleNamespace], list[SimpleNamespace]]:\n filtered: list[SimpleNamespace] = []\n remaining_deps = dependencies[:]\n for dep in dependencies:\n if dep.type == pkg_type:\n filtered.append(dep)\n remaining_deps.remove(dep)\n return filtered, remaining_deps\n\n @staticmethod\n def extract_advisory_ids(build_tags: list[str]) -> list[str]:\n \"\"\"From a Brew build's list of tags, return any errata IDs with -released stripped\"\"\"\n advisory_ids = set()\n for tag in build_tags:\n match = ADVISORY_REGEX.match(tag)\n if match:\n advisory_ids.add(match.group())\n return sorted(advisory_ids)\n\n @staticmethod\n def parse_advisory_ids(errata_tags: list[str]) -> list[str]:\n \"\"\"From a Brew build's list of Errata tags, return tags with released (4-digit) IDs\"\"\"\n # released errata always have 4-digit IDs, e.g. RHBA-2023:1234\n # unreleased errata have 5-digit IDs or greater\n # e.g. RHEA-2023:12345 or RHSA-2023:123456\n # tags in Brew also have a -released, -dropped, or -pending suffix\n # but our ADVISORY_REGEX strips this to get just the friendly advisory name\n\n return sorted(\n errata_tag\n for errata_tag in errata_tags\n if len(errata_tag.split(\":\", maxsplit=1)[-1]) == 4\n )\n\n @staticmethod\n def get_module_build_data(build_info: dict[str, Any]) -> dict[str, Any]:\n\n modulemd_yaml = build_info[\"extra\"][\"typeinfo\"][\"module\"].get(\"modulemd_str\", \"\")\n if not modulemd_yaml:\n raise ValueError(\"Cannot get module build data, modulemd_yaml is undefined\")\n modulemd = yaml.safe_load(modulemd_yaml)\n meta_attr = {\n \"stream\": modulemd[\"data\"][\"stream\"],\n \"context\": modulemd[\"data\"][\"context\"],\n \"components\": modulemd[\"data\"].get(\"components\", []),\n \"rpms\": modulemd[\"data\"][\"xmd\"][\"mbs\"].get(\"rpms\", []),\n \"source\": [\"koji.getBuild\"],\n }\n module = {\n \"type\": Component.Type.RPMMOD,\n \"namespace\": Component.Namespace.REDHAT,\n \"meta\": {\n \"name\": build_info[\"name\"],\n \"version\": build_info[\"version\"],\n # TODO: Need to verify this\n \"license_declared_raw\": \" OR \".join(modulemd[\"data\"][\"license\"].get(\"module\", \"\")),\n \"release\": build_info[\"release\"],\n \"description\": modulemd[\"data\"][\"description\"],\n \"meta_attr\": meta_attr,\n },\n }\n\n return module\n\n # Force clients to call this using an int build_id\n def get_component_data(self, build_id: int) -> dict[str, Any]:\n logger.info(\"Retrieving Brew build: %s\", build_id)\n # koji api expects a build_id to be an int. If you pass a string it'll look for an NVR\n build = self.koji_session.getBuild(build_id)\n if not build:\n raise BrewBuildNotFound(f\"Build {build_id} was not found\")\n # getBuild will accept an NVR\n # but later brew calls require an integer ID\n build_id = build[\"id\"]\n # Determine build state\n state = build.get(\"state\")\n # Disabled due to DB performance issues - see notes in task\n # if state == koji.BUILD_STATES[\"DELETED\"]:\n # app.send_task(\n # \"corgi.tasks.brew.slow_delete_brew_build\",\n # args=(build_id, state),\n # )\n # return {}\n if state != koji.BUILD_STATES[\"COMPLETE\"]:\n raise BrewBuildInvalidState(f\"Build {build_id} state is {state}; skipping!\")\n\n if build[\"name\"] in self.COMPONENT_EXCLUDES:\n logger.info(f\"Skipping processing build {build_id} ({build['name']})\")\n return {}\n\n # Determine build type\n build_type_info = self.koji_session.getBuildType(build)\n build_type = next(\n (type_ for type_ in build_type_info.keys() if type_ in self.SUPPORTED_BUILD_TYPES),\n \"unknown\",\n )\n if not any(type_ in self.SUPPORTED_BUILD_TYPES for type_ in build_type_info.keys()):\n raise BrewBuildTypeNotSupported(\n f\"Build {build_id} type is not supported: {build_type_info}\"\n )\n # TODO: refactor Brew.CONTAINER_BUILD_TYPE to be a generic IMAGE_TYPE with image types\n # identified in a separate attribute on the build itself.\n if build_type == self.CONTAINER_BUILD_TYPE:\n # If this is an \"image\" type build, it may be building a container image, ISO image,\n # or other types of images.\n build_extra = build.get(\"extra\")\n if build[\"cg_name\"] == \"atomic-reactor\":\n # Check the content generator name to determine where this\n # image was built, which indicates what type of image it is.\n # Container images are built in OSBS, which uses atomic-reactor to build them.\n pass\n elif build_extra and build_extra.get(\"submitter\") == \"osbs\":\n # Some builds such as 903565 have the cg_name field set to None\n # In that case check the extra/submitter field for osbs value\n pass\n else:\n raise BrewBuildTypeNotSupported(\n f\"Image build {build_id} is not supported: \"\n f\"{build['cg_name']} content generator used\"\n )\n build[\"type\"] = build_type\n\n # Determine build source\n if not build.get(\"source\"):\n # Sometimes there is no source URL on the build, but it can be found in the task\n # request info instead.\n logger.info(\"Fetching source from task info for %s\", build_id)\n try:\n build[\"source\"] = self.get_source_of_build(build)\n except BrewBuildSourceNotFound as exc:\n # Some older builds do not specify source URLs; the below date was chosen based\n # on some initial analysis of source-less builds in Brew.\n if datetime.fromtimestamp(build[\"completion_ts\"]) < datetime(2015, 1, 1):\n logger.error(\n f\"Build {build_id} has no associated source URL but is too old \"\n f\"to process; returning an empty component.\"\n )\n return {}\n else:\n raise exc\n\n # Clean source URL on old builds which are being reloaded, if needed\n build[\"source\"] = self.clean_source_of_build(build[\"source\"])\n\n # Add list of Brew tags for this build\n tags = self.koji_session.listTags(build_id)\n build[\"tags\"] = sorted(set(tag[\"name\"] for tag in tags))\n build[\"errata_tags\"] = self.extract_advisory_ids(build[\"tags\"])\n build[\"released_errata_tags\"] = self.parse_advisory_ids(build[\"errata_tags\"])\n\n # TODO: handle wrapper RPM builds:\n # brew buildID=1839210\n # These should create the necessary RPM components and then hand off the rest of the\n # analysis to the maven analyzer to actually map the jars shipped within the RPMs.\n\n # Add additional data based on the build type\n if build_type == self.CONTAINER_BUILD_TYPE:\n component = self.get_container_build_data(build_id, build)\n elif build_type == self.RPM_BUILD_TYPE:\n component = self.get_rpm_build_data(build_id)\n elif build_type == self.MODULE_BUILD_TYPE:\n component = self.get_module_build_data(build)\n else:\n raise BrewBuildTypeNotSupported(\n f\"Build {build_id} of type {build_type} is not supported\"\n )\n\n component[\"build_meta\"] = {\"build_info\": build, \"type_info\": build_type_info}\n return component\n\n def get_builds_with_tag(\n self, brew_tag: str, inherit: bool = False, latest: bool = True\n ) -> tuple[str, ...]:\n try:\n builds = self.koji_session.listTagged(brew_tag, inherit=inherit, latest=latest)\n return tuple(b[\"build_id\"] for b in builds)\n except koji.GenericError as exc: # type: ignore[attr-defined]\n logger.warning(\"Couldn't find brew builds with tag %s: %s\", brew_tag, exc)\n return tuple()\n\n def brew_rpm_headers_lookup(\n self, rpm_infos: list[dict[str, str]]\n ) -> tuple[tuple[dict[str, str], Any], ...]:\n # Define headers from which we'll pull extra RPM metadata\n rpm_headers = (\n \"summary\",\n \"description\",\n \"license\",\n \"provides\",\n \"provideversion\",\n \"url\",\n \"source\",\n )\n with self.koji_session.multicall() as m:\n rpm_info_header_calls = tuple(\n (rpm_info, m.getRPMHeaders(rpmID=rpm_info[\"id\"], headers=rpm_headers))\n for rpm_info in rpm_infos\n )\n return rpm_info_header_calls\n\n def brew_srpm_lookup(self, srpms: Iterable[str]) -> tuple[tuple[str, Any], ...]:\n \"\"\"The Koji API findBuild call can except NVR as a format\"\"\"\n with self.koji_session.multicall() as multicall:\n find_build_id_calls = tuple((srpm, multicall.findBuildID(srpm)) for srpm in srpms)\n return find_build_id_calls\n\n def brew_rpm_lookup(self, rpms: tuple[str, ...]) -> tuple[tuple[str, Any], ...]:\n \"\"\"The Koji API getRPM call can except rpm in NVR\"\"\"\n with self.koji_session.multicall() as multicall:\n get_rpm_calls = tuple((rpm, multicall.getRPM(rpm)) for rpm in rpms)\n return get_rpm_calls\n\n @classmethod\n def sans_epoch(cls, rpm: str) -> str:\n \"\"\"This removed the epoch part of a SRPM or RPM so the RPM name is in NVR format\"\"\"\n name, version, release = cls.split_nvr(rpm)\n version_parts = version.split(\":\")\n if len(version_parts) > 1:\n rpm = f\"{name}-{version_parts[1]}-{release}\"\n return rpm\n\n @staticmethod\n def module_key_to_nvr(module_key: str) -> str:\n \"\"\"This adjusts the rhel_module name found in composes to be in NVR format expected by\n the Koji API\"\"\"\n module_parts = module_key.split(\":\")\n return f\"{module_parts[0]}-{module_parts[1]}-{module_parts[2]}.{module_parts[3]}\"\n\n def persist_modules(self, rhel_modules: dict[str, list[str]]) -> Generator[str, None, None]:\n # For each rhel_module look up it's build_id\n find_build_id_calls = self.brew_srpm_lookup(rhel_modules.keys())\n for srpm, call in find_build_id_calls:\n build_id = call.result\n if not build_id:\n logger.warning(\"Did not find build_id for rhel_module: %s\", srpm)\n continue\n rhel_module, _ = CollectorRhelModule.objects.get_or_create(\n build_id=build_id,\n defaults={\"nvr\": srpm},\n )\n # Lookup the rpm build_ids\n rpms = tuple(\n self.sans_epoch(rpm) for rpm in rhel_modules[srpm] if not rpm.endswith(\".src\")\n )\n rpm_lookup_calls = self.brew_rpm_lookup(rpms)\n for rpm, call in rpm_lookup_calls:\n srpm_build_id = call.result[\"build_id\"]\n srpm_obj, _ = CollectorSRPM.objects.get_or_create(build_id=srpm_build_id)\n rpm_obj, _ = CollectorRPM.objects.get_or_create(nvra=rpm, srpm=srpm_obj)\n rpm_obj.rhel_module.add(rhel_module)\n\n yield build_id\n\n def lookup_build_ids(\n self, rpm_filenames_by_srpm: dict[str, list[str]]\n ) -> Generator[str, None, None]:\n # For each srpm look up it's build id\n find_build_id_calls = self.brew_srpm_lookup(rpm_filenames_by_srpm.keys())\n for srpm, call in find_build_id_calls:\n build_id = call.result\n if not build_id:\n for filename in rpm_filenames_by_srpm[srpm]:\n logger.debug(\n \"Didn't find build with NVR %s, using rpm filename: %s\",\n srpm,\n filename,\n )\n # We don't use a multicall here, because this won't be called\n # in most cases\n rpm_data = self.koji_session.getRPM(filename)\n if not rpm_data:\n # Try the next srpm rpm filename\n continue\n build_id = rpm_data[\"build_id\"]\n # found the build_id, stop iterating filenames\n break\n # if no filenames had RPM data\n if not build_id:\n logger.warning(\"Unable to find build_id for %s\", srpm)\n continue\n yield build_id\n\n @staticmethod\n def fetch_rhel_module(build_id: str) -> dict[str, Any]:\n \"\"\"Look up a RHEL module by either an integer build_id or an NVR.\"\"\"\n try:\n lookup: dict = {\"build_id\": int(build_id)}\n except ValueError:\n lookup = {\"nvr\": build_id}\n try:\n rhel_module = CollectorRhelModule.objects.get(**lookup)\n except CollectorRhelModule.DoesNotExist:\n logger.debug(\"Did not find %s in CollectorRhelModule data\", build_id)\n return {}\n name, version, release = Brew.split_nvr(rhel_module.nvr)\n module: dict[str, Any] = {\n \"type\": Component.Type.RPMMOD,\n \"namespace\": Component.Namespace.REDHAT,\n \"meta\": {\n \"name\": name,\n \"version\": version,\n \"release\": release,\n \"source\": [\"collectors/rhel_module\"],\n },\n }\n nested_builds: set[int] = set()\n rpm_components: list[dict] = []\n for rpm in rhel_module.collectorrpm_set.get_queryset():\n srpm_build_id = rpm.srpm.build_id\n nested_builds.add(srpm_build_id)\n name, version, release = Brew.split_nvr(rpm.nvra)\n release_split = release.rsplit(\".\", 1)\n arch = \"\"\n if len(release_split) == 2:\n arch = release_split[1]\n rpm_component: dict = {\n \"type\": Component.Type.RPM,\n \"namespace\": Component.Namespace.REDHAT,\n \"brew_build_id\": srpm_build_id,\n \"meta\": {\n \"name\": name,\n \"version\": version,\n \"release\": release_split[0],\n \"arch\": arch,\n \"source\": [\"collectors/rhel_module\"],\n },\n }\n rpm_components.append(rpm_component)\n module[\"components\"] = rpm_components\n module[\"nested_builds\"] = list(nested_builds)\n return module\n","repo_name":"RedHatProductSecurity/component-registry","sub_path":"corgi/collectors/brew.py","file_name":"brew.py","file_ext":"py","file_size_in_byte":58254,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"18"} +{"seq_id":"1209677135","text":"#Edgar Basto n.2222\nimport os\nfrom pprint import pprint\n\nd = {'a': 0, 'e': 0 , 'i':0, 'o':0, 'u':0}\n\npprint(os.listdir())\nficheiro = input('Nome do ficheiro a abrir: ') \n\ntry:\n\tf = open(ficheiro, 'r') \nexcept:\n\tprint('O ficheiro não exite!')\n\n\ntry:\n\tfor letra in f.read():\n\t\tif letra in d.keys():\n\t\t\td[letra] += 1\nexcept:\n\tprint('Não foi possivel ler o ficheiro')\n\tquit()\n\nf.close()\nfor vogal in d.keys():\n\tprint(str(vogal) + ': '+ str(d[vogal]))\n\n","repo_name":"edgarbasto/fichas_p4","sub_path":"Ficha_004/edgar_basto_ficha4_01.py","file_name":"edgar_basto_ficha4_01.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"75323770279","text":"# 색종이 체크하는 함수\ndef check(row, col, n):\n global blue, white\n curr = graph[row][col]\n\n for i in range(row, row+n):\n for j in range(col, col+n):\n if graph[i][j] != curr:\n next_n = n//2\n check(row, col, next_n)\n check(row + next_n, col, next_n)\n check(row, col + next_n, next_n)\n check(row + next_n, col + next_n, next_n)\n return\n if curr == 1:\n blue += 1\n elif curr == 0:\n white += 1\n\n\n# n은 2의 제곱수 2, 4, 6, 8, 16, 32, 64, 128\nN = int(input())\ngraph = [list(map(int,input().split())) for _ in range(N)]\n\nblue = 0\nwhite = 0\n\ncheck(0,0,N)\n\nprint(white)\nprint(blue)","repo_name":"Jeongp4939/cps_2023","sub_path":"coding_problem_solving/baekjoon/2.Silver/S2/2630_색종이_만들기.py","file_name":"2630_색종이_만들기.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"37303093764","text":"import heapq\n\nclass Solution:\n def largestSumAfterKNegations(self, nums, k):\n currSum = 0\n heap = []\n for num in nums:\n currSum += num\n heapq.heappush(heap, num)\n i = 0\n while i < k:\n item = heapq.heappop(heap)\n if item < 0:\n currSum -= 2 * item\n i += 1\n if item >= 0:\n if (k - i) % 2 == 0:\n currSum += 2 * item\n else:\n currSum -= 2 * item\n break\n return currSum\n\nprint(Solution().largestSumAfterKNegations([1,3,2,6,7,9], 3))","repo_name":"theabbie/leetcode","sub_path":"miscellaneous/maxSum.py","file_name":"maxSum.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"18"} +{"seq_id":"32913968790","text":"# -*- coding: utf-8 -*-\n\nu\"\"\"\n.. module:: test_organizations\n\"\"\"\nfrom django.test import Client\nfrom django.test import TestCase\n\nfrom apps.volontulo.models import Organization\nfrom apps.volontulo.tests import common\n\n\nclass TestOrganizations(TestCase):\n u\"\"\"Class responsible for testing organization specific views.\"\"\"\n\n @classmethod\n def setUpTestData(cls):\n u\"\"\"Data fixtures for all tests.\"\"\"\n # volunteer user - totally useless\n cls.volunteer = common.initialize_empty_volunteer()\n # organization user - no offers\n cls.organization = common.initialize_empty_organization()\n # volunteer user - offers, organizations\n cls.volunteer2, cls.organization2 = \\\n common.initialize_filled_volunteer_and_organization()\n\n def setUp(self):\n u\"\"\"Set up each test.\"\"\"\n self.client = Client()\n\n def test__organization_list(self):\n u\"\"\"Test getting organization list as anonymous.\"\"\"\n response = self.client.get('/organizations', follow=True)\n\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'organizations/list.html')\n self.assertIn('organizations', response.context)\n self.assertEqual(Organization.objects.all().count(), 2)\n\n def test__ensure_status_is_displayed_in_profile_view(self):\n \"\"\"Test if offer status is displayed in a profile view.\"\"\"\n self.client.login(\n username=u'volunteer2@example.com',\n password=u'volunteer2'\n )\n response = self.client.get('/me', follow=True)\n self.assertTemplateUsed(response, 'users/my_offers.html')\n self.assertIn('offers', response.context)\n self.assertEqual(\n 'published', response.context['offers'][0].offer_status)\n\n def test__ensure_status_is_displayed_in_organisations_view(self):\n \"\"\"Test if offer status is displayed in an organisation view.\"\"\"\n self.client.login(\n username=u'volunteer2@example.com',\n password=u'volunteer2'\n )\n response = self.client.get('/me', follow=True)\n self.assertIn('offers', response.context)\n self.assertEqual(\n 'published', response.context['offers'][0].offer_status)\n","repo_name":"stxnext-csr/volontulo","sub_path":"apps/volontulo/tests/views/test_organizations.py","file_name":"test_organizations.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"18"} +{"seq_id":"1545195621","text":"from .amazon import COMPUTERS, PHOTO, Amazon\nfrom .asymproj import Asymproj\nfrom .botnet import C2, CHORD, DEBRU, KADEM, LEET, P2P, Botnet\nfrom .cite_seer import CiteSeer\nfrom .coauthor import CS, PHYSICS, Coauthor\nfrom .cora import Cora\nfrom .ppi import PPI\nfrom .pub_med import PubMed\nfrom .qm9 import QM9, Qm9\n\n__all__ = [\n \"Asymproj\",\n \"CiteSeer\",\n \"Cora\",\n \"PPI\",\n \"PubMed\",\n \"QM9\",\n \"Qm9\",\n \"Amazon\",\n \"COMPUTERS\",\n \"PHOTO\",\n \"Coauthor\",\n \"CS\",\n \"PHYSICS\",\n \"Botnet\",\n \"CHORD\",\n \"DEBRU\",\n \"KADEM\",\n \"LEET\",\n \"C2\",\n \"P2P\",\n]\n","repo_name":"jackd/graph-tfds","sub_path":"graph_tfds/graphs/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"997420166","text":"\"\"\"\n This file contains all actions which the operators defined in layers_panel perform outside the UI\n\"\"\"\nimport bpy\nfrom materials_mgr.material_mgr import MaterialMgr\n\nclass LayersPanelOpActions:\n def __init__(self, context, report):\n self.context = context\n self.report = report\n\n def print_all(self):\n #scn = self.context.scene\n #for i in scn.custom:\n # print(i.name, i.id)\n mat = MaterialMgr.create_new_material_for_active_obj(\"TheName2\")\n print('MAT ', mat.name)\n return {'FINISHED'}\n\n def selectAllItems(self):\n scn = self.context.scene\n bpy.ops.object.select_all(action='DESELECT')\n obj = bpy.data.objects[scn.custom[scn.custom_index].name]\n obj.select = True\n\n return {'FINISHED'}\n\n def clearAllItems(self):\n scn = self.context.scene\n lst = scn.custom\n current_index = scn.custom_index\n\n if len(lst) > 0:\n # reverse range to remove last item first\n for i in range(len(lst) - 1, -1, -1):\n scn.custom.remove(i)\n self.report({'INFO'}, \"All items removed\")\n\n else:\n self.report({'INFO'}, \"Nothing to remove\")\n\n return {'FINISHED'}\n","repo_name":"gnuton/MaterialPainter","sub_path":"layers_view/layers_panel_op_actions.py","file_name":"layers_panel_op_actions.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"6225948857","text":"import json\nimport time\n\nfrom bkbase.dataflow.metrics.meter.counter import Counter\nfrom bkbase.dataflow.metrics.meter.literal import Literal\nfrom bkbase.dataflow.metrics.util.exceptions import MetricsVerifyException\nfrom bkbase.dataflow.metrics.util.json_serialize import ObjectEncoder\n\n\nclass MetricsRegistry(object):\n def __init__(self):\n # metrics 为一个空字典 用户可以上报任意数据\n # 用户可以指定上报的 key, value 为上报内容\n self._metrics = {}\n\n \"\"\"\n 新增一个 counter/literal 对象\n \"\"\"\n\n @staticmethod\n def _new_meter(meter_type):\n if meter_type == \"literal\":\n return Literal()\n else:\n return Counter()\n\n \"\"\"\n 判断对象是否为空\n \"\"\"\n\n @staticmethod\n def _is_null_meter(meter_object):\n if isinstance(meter_object, Literal):\n return meter_object.get_literal()\n else:\n return meter_object.get_count()\n\n \"\"\"\n 基本的校验,key是否合法\n \"\"\"\n\n @staticmethod\n def _valid_key(key):\n # 1. 非空\n if not key:\n raise MetricsVerifyException(u\"添加失败!输入必须为非空\")\n # 2. 必须是字符串类型\n if not isinstance(key, str):\n raise MetricsVerifyException(u\"添加失败!输入必须为非空字符串类型\")\n # 3. 不能以 '.' 开头或者结尾\n if key[0] == \".\" or key[len(key) - 1] == \".\":\n raise MetricsVerifyException(u\"添加失败!输入字符不能以'.'开头或结尾\")\n\n \"\"\"\n 1. 路径存在就获取对象,并返回\n 2. 路径不存在则创建并返回对象\n 返回:meter = Counter/Literal\n \"\"\"\n\n def get_or_add(self, key, meter_type):\n self._valid_key(key)\n if \".\" in key:\n \"\"\"\n 无:增加一个 key, meter_type 对象\n 有:返回\n 1. a.b.c.d\n 直接生成层级关系:a:{b:{c:{d:'meter_type'}}}\n 2. 对于无法插入key的情况,抛异常\n 例如再插入:a.b.c,抛出异常\n \"\"\"\n key_list = key.split(\".\")\n key_except_tail_list = key_list[0 : len(key_list) - 1]\n tail = key_list[len(key_list) - 1]\n temp_dict = self._metrics\n for i in key_except_tail_list:\n if i in temp_dict:\n temp_dict = temp_dict[i]\n else:\n temp_dict[i] = {}\n temp_dict = temp_dict[i]\n\n \"\"\"\n 路径 a.b.c.{d:xxx} 存在\n 添加 a.b.c.d.e = xxx\n \"\"\"\n # print temp_dict\n\n if not isinstance(temp_dict, dict):\n raise MetricsVerifyException(u\"添加失败!路径%s为非字典类型\" % \",\".join(key_except_tail_list))\n if tail in temp_dict and self._is_null_meter(temp_dict[tail]):\n \"\"\"\n key 已经存在且已经被初始化了(不为 None、0、'')\n 路径 a.b.c.{d:xxx} 存在\n 添加 a.b.c = xxx\n temp_dict[tail] 不是一个 Counter/Literal 对象,是不能返回的\n \"\"\"\n if isinstance(temp_dict[tail], dict):\n raise MetricsVerifyException(u\"添加失败!路径%s已存在且为字典类型\" % \",\".join(key_except_tail_list))\n \"\"\"\n 路径 a.b.c.{d:xxx} 存在\n 添加 a.b.c.d = xxx,meter 对象存在,直接返回即可\n 1. 取的对象与参数不符\n \"\"\"\n if meter_type == \"counter\":\n if not isinstance(temp_dict[tail], Counter):\n raise MetricsVerifyException(u\"取出对象类型与参数不符!对象类型({})参数类型({})\".format(\"Literal\", meter_type))\n else:\n if not isinstance(temp_dict[tail], Literal):\n raise MetricsVerifyException(u\"取出对象类型与参数不符!对象类型({})参数类型({})\".format(\"Counter\", meter_type))\n return temp_dict[tail]\n # 路径不存在,新加并返回\n meter = self._new_meter(meter_type)\n temp_dict[tail] = meter\n return meter\n else:\n # 无层级关系\n if key in self._metrics and self._metrics[key]:\n # key 已经存在且已经被初始化了(不为 None、0、'')\n return self._metrics[key]\n else:\n meter = self._new_meter(meter_type)\n self._metrics[key] = meter\n return meter\n\n def dump_metrics(self):\n # 时间设置为当前时间\n now = int(time.time())\n self.get_or_add(\"time\", \"literal\").set_literal(now)\n return json.dumps(self._metrics, cls=ObjectEncoder)\n\n # is_end\n def is_end(self):\n return self.get_or_add(\"is_end\", \"literal\").get_literal()\n\n # close 设置上报结束\n def set_end(self):\n self.get_or_add(\"is_end\", \"literal\").set_literal(True)\n","repo_name":"Tencent/bk-base","sub_path":"src/dataflow/unified-computing/python/bkbase/dataflow/metrics/registry/metrics_registry.py","file_name":"metrics_registry.py","file_ext":"py","file_size_in_byte":5099,"program_lang":"python","lang":"en","doc_type":"code","stars":85,"dataset":"github-code","pt":"18"} +{"seq_id":"16974503000","text":"\r\n\r\n#参考:https://tensorflow.google.cn/api_docs/python/tf/nest tensorflow自带的test类\r\n#参考:Python自带的个单元测试框架unittest:https://zhuanlan.zhihu.com/p/51095152\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport tensorflow as tf\r\nimport time\r\nimport HTMLTestRunner\r\nimport unittest\r\n\r\ntf.compat.v1.disable_eager_execution()\r\nsess= tf.compat.v1.Session()\r\n\t\r\n#设置为自己路径\r\nnk1 = tf.load_op_library('../addone_cuda/addone_cuda.so') \r\nnk2 = tf.load_op_library('../addone_opencl/addone_opencl.so')\r\n\r\n\r\n\r\n###############自行设计测试数据#################\r\n#test01 \r\nresult01_cuda = sess.run(nk1.add_one([5, 4, 3, 2, 1]))\r\nresult01_opencl = sess.run(nk2.nk_add_one([5, 4, 3, 2, 1]))\r\n\r\n#test02 \r\ndata02 = tf.range(start=1, limit=15, delta=3, dtype=tf.int32) #数据类型和改写时候输入tensor的类型保持一致\r\nresult02_cuda = sess.run(nk1.add_one(data02))\r\nresult02_opencl = sess.run(nk2.nk_add_one(data02))\r\n\r\n\r\n#创建单元测试类,继承自tf.test.TestCase\r\nclass AddOneTest(tf.test.TestCase):\r\n\r\n\r\n\tdef setUp(self):\r\n\r\n\t\tprint(\"\\n测试开始:\")\r\n\r\n\t#测试用例01:测试是否能够正常load .so文件(测试用例必须要test开头,否则无法识别)\r\n\tdef test01(self):\r\n\r\n\t\t#打印测试说明\r\n\t\tprint (\"当取函数边界值1时,验证kernel是否可靠\") \r\n\t\t\r\n\t\t#打印时间\r\n\t\tprint (\"cuda time use:64.328us event record time:4.74993us\"+\"\\n\"+\"opencl time use:23.849us opencl kernel time use:4.3657us\") \r\n\r\n\t\tself.assertAllEqual(result01_cuda, result01_opencl) #相关判定函数请根据需要查询链接文档(如上),用来测验结果\r\n\r\n\t\r\n\tdef test02(self):\r\n\r\n\t\tprint (\"当取函数边界值10000时,验证kernel是否可靠\")\r\n\r\n\t\tprint (\"cuda time use:21.288us event record time:8.39833us\"+\"\\n\"+\"opencl time use:24.297us opencl kernel time use:4.47418us\") \r\n\t\t\r\n\t\tself.assertAllEqual(result02_cuda, result02_opencl)\r\n\r\n\tdef tearDown(self):\r\n\r\n\t\tprint(\"测试结束!\")\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n #打印测试结果\r\n\t#tf.test.main()\r\n\r\n\t#--------------------- 生成测试报告--------------\r\n\r\n\t# 构造测试集\r\n\tsuite = unittest.TestSuite()\r\n\tsuite.addTest(AddOneTest(\"test01\"))\r\n\tsuite.addTest(AddOneTest(\"test02\"))\r\n\r\n\tnow = time.strftime(\"%Y-%m-%d_%H-%M-%S\", time.localtime())\r\n\tfp = open(now +'-'+ 'result.html', 'wb')\r\n\r\n # 定义报告格式\r\n\trunner = HTMLTestRunner.HTMLTestRunner(\r\n\t\tstream=fp,\r\n\t\ttitle='AddOneKernel测试报告',\r\n\t\tdescription=u'用例执行情况:')\r\n\r\n # 运行测试用例\r\n\trunner.run(suite)\r\n # 关闭报告文件\r\n\tfp.close()\r\n\r\n\r\n","repo_name":"lilinxi/OpenCLDemo","sub_path":"kernel_test(培训)/addone_test/kernel_test.py","file_name":"kernel_test.py","file_ext":"py","file_size_in_byte":2679,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"35806923833","text":"import sys\nsys.path.append(\"src\")\nimport os\nimport numpy as np\nimport pickle\nimport scipy.io\nfrom scipy.io import loadmat\nfrom autoencoder import full_network\nfrom training import create_feed_dictionary\nfrom sindy_utils import sindy_simulate\nfrom sindy_utils import sindy_model\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\nfrom mpl_toolkits.mplot3d import Axes3D\nimport pysindy as ps\nfrom pysindy.differentiation import SmoothedFiniteDifference\n\ndef get_hankel(x, dimension, delays, skip_rows=1):\n # if skip_rows>1:\n # delays = len(x) - delays * skip_rows\n H = np.zeros((dimension, delays))\n for j in range(delays):\n H[:, j] = x[j*skip_rows:j*skip_rows+dimension]\n return H\n\ndef get_hankel_svd(H, reduced_dim):\n U, s, VT = np.linalg.svd(H, full_matrices=False)\n rec_v = np.matmul(VT[:reduced_dim, :].T, np.diag(s[:reduced_dim]))\n return U, s, VT, rec_v\n\nhankel = np.zeros((10000,30))\ndhankel = np.zeros((10000,30))\nvhankel = np.zeros((5000,30))\nvdhankel = np.zeros((5000,30))\nmat = scipy.io.loadmat('f2o09m.mat') \nhnk = scipy.io.loadmat('h.mat') \nhankel10000 = hnk['H'].T\nfor j in range(1,30):\n for k in range(1,10000) :\n hankel[k,j] = hankel10000[k,j]\n\nfor j in range(1,30):\n for k in range(1000,1500) :\n vhankel[k-1000,j] = hankel10000[k-1000,j] \n\nt0 = np.arange(0, 240, .02) \ntv0 = np.arange(0, 160, .02)\nt = np.arange(0, 200, .02) #10000\ntv = np.arange(0, 100, .02) #5000\n\nsfd = SmoothedFiniteDifference()\nfor j in range(1,30):\n dhankel[:,j] = sfd._differentiate(hankel[:,j],t)\nsfd = SmoothedFiniteDifference()\nfor j in range(1,30):\n vdhankel[:,j] = sfd._differentiate(vhankel[:,j],tv)\n\n\nvalues = mat['val'] #index1: resp, index2:ecg, index3: bp\necg = values[2]\ndata_end = 12000\nvaldata_end = 20000\necg10000= ecg[0:data_end]\nvalecg10000= ecg[data_end:valdata_end]\ntau = 1\nn_delays = 30\nH_ecg = get_hankel(ecg10000,10000,n_delays,tau)\n\nsfd = SmoothedFiniteDifference()\ndecg10000 = sfd._differentiate(ecg10000,t0)\n\nsfd = SmoothedFiniteDifference()\ndvalecg10000 = sfd._differentiate(valecg10000,tv0)\n\ndH_ecg = get_hankel(decg10000,10000,n_delays,tau)\nH_ecg_validation = get_hankel(valecg10000,5000,n_delays-10,tau)\ndH_ecg_validation = get_hankel(dvalecg10000,5000,n_delays-10,tau)\n\n\nt0 = np.arange(0, 240, .02)\ntv0 = np.arange(0, 160, .02)\nt = np.arange(0, 200, .02)\ntv = np.arange(0, 100, .02)\n\ndata = {}\ndata['t'] = t\ndata['x'] = hankel\ndata['dx'] = dhankel\n\nvalidata = {}\nvalidata['t'] = tv\nvalidata['x'] = vhankel\nvalidata['dx'] = vdhankel\n\n\n# data = {}\n# data['t'] = t\n# data['x'] = H_ecg\n# data['dx'] = dH_ecg\n\n\ndata_path = os.getcwd() + '\\\\'\n#save_name = 'modelecg'\nsave_name = 'model'\nparams = pickle.load(open(data_path + save_name + '_params.pkl', 'rb'))\nparams['save_name'] = data_path + save_name\n\nautoencoder_network = full_network(params)\nlearning_rate = tf.placeholder(tf.float32, name='learning_rate')\nsaver = tf.train.Saver(var_list=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES))\n\ntensorflow_run_tuple = ()\nfor key in autoencoder_network.keys():\n tensorflow_run_tuple += (autoencoder_network[key],)\n\n \n# t = np.arange(0,20,.01)\n# z0 = np.array([[-8,7,27]])\n\n# data['z'] = data['z'].reshape((-1,params['latent_dim']))\n# data['dz'] = data['dz'].reshape((-1,params['latent_dim']))\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n saver.restore(sess, data_path + save_name)\n test_dictionary = create_feed_dictionary(data, params)\n tf_results = sess.run(tensorflow_run_tuple, feed_dict=test_dictionary)\n\ntest_set_results = {}\nfor i,key in enumerate(autoencoder_network.keys()):\n test_set_results[key] = tf_results[i]\n\nz_sim = sindy_simulate(test_set_results['z'][0],t,\n params['coefficient_mask']*test_set_results['sindy_coefficients'],\n params['poly_order'], params['include_sine'])\n\nthreshold = 0.0\nmodel = ps.SINDy(\n differentiation_method=ps.SmoothedFiniteDifference(),\n optimizer=ps.STLSQ(threshold=threshold),\n feature_library=ps.PolynomialLibrary(degree=params['poly_order'], interaction_only =True))\nmodel.fit(z_sim, t=t, ensemble=True)\nmodel.print()\n\ndecoder_x_error = np.mean((data['x'] - test_set_results['x_decode'])**2)/np.mean(data['x']**2)\ndecoder_dx_error = np.mean((data['dx'] - test_set_results['dx_decode'])**2)/np.mean(data['dx']**2)\nsindy_dz_error = np.mean((test_set_results['dz'] - test_set_results['dz_predict'])**2)/np.mean(test_set_results['dz']**2)\n\nprint('Rekon relative error: %f' % decoder_x_error)\nprint('Decoder relative SINDy error: %f' % decoder_dx_error)\nprint('SINDy reltive error, z: %f' % sindy_dz_error)\n\nXi_plot = (params['coefficient_mask']*test_set_results['sindy_coefficients'])\nXi_plot[Xi_plot==0] = np.inf\nplt.figure(figsize=(1,2))\nplt.imshow(Xi_plot, interpolation='none')\nplt.xticks([])\nplt.yticks([])\nplt.axis('off')\nplt.clim([-10,30])\n\n\nplt.show()\n\n","repo_name":"le1nax/pysindyBA","sub_path":"NN/analyse_ecg.py","file_name":"analyse_ecg.py","file_ext":"py","file_size_in_byte":4899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"13173640599","text":"import re\r\n\r\nclass Tools:\r\n \r\n @staticmethod\r\n def getString(instr):\r\n if type(instr)==str:\r\n instr = instr\r\n elif type(instr)==int:\r\n instr = str(instr)\r\n else:\r\n instr = ''\r\n return instr\r\n\r\n @staticmethod\r\n def getId(text):\r\n if type(text) == str:\r\n text = text\r\n elif type(text)== dict and text[\"id\"]:\r\n text = text[\"id\"]\r\n elif type(text)== dict and text[\"userid\"]:\r\n text = text[\"userid\"]\r\n elif type(text)== dict and text[\"roomid\"]:\r\n text = text[\"roomid\"]\r\n \r\n if type(text) != str and type(text) != int: return \"\"\r\n if type(text) == int:\r\n text = str(text)\r\n # print(\"getId\",text, text.lower())\r\n return re.sub('[^a-z0-9]','',(\"\" + text).lower())\r\n\r\nclass BasicEffect:\r\n def __init__(self, data , **kwargs):\r\n self.exists = True\r\n data.update(kwargs)\r\n\r\n self.id = data[\"id\"] if \"id\" in data else \"\"\r\n self.name = Tools.getString(data[\"name\"]).strip(' ')\r\n self.effectType = Tools.getString(data[\"effectType\"]) if \"effectType\" in data else 'Type'\r\n self.exists = self.exists and self.id\r\n self.num = data[\"num\"] if \"num\" in data else 0\r\n self.gen = data[\"gen\"] if \"gen\" in data else 0\r\n self.shortDesc = data[\"shortDesc\"] if \"shortDesc\" in data else \"\"\r\n self.desc = data[\"desc\"] if \"desc\" in data else \"\"\r\n self.isNonstandard = data[\"isNonstandard\"] if \"isNonstandard\" in data else None\r\n self.duration = data[\"duration\"] if \"duration\" in data else None\r\n self.noCopy = data[\"noCopy\"] if \"noCopy\" in data else None\r\n self.affectFainted = data[\"affectFainted\"] if \"affectFainted\" in data else None\r\n self.status = data[\"status\"] if \"status\" in data else None\r\n self.weather = data[\"weather\"] if \"weather\" in data else None\r\n self.sourceEffect = data[\"sourceEffect\"] if \"sourceEffect\" in data else \"\"\r\n\r\n\r\nclass Format(BasicEffect):\r\n def __init__(self, data, **kwargs):\r\n super().__init__(data, **kwargs)\r\n # data = self\r\n\r\n self.mod = Tools.getString(data[\"mod\"]) if \"mod\" in data else \"gen8\"\r\n self.effectType = Tools.getString(data[\"effectType\"]) if \"effectType\" in data else \"Format\"\r\n self.debug = data[\"debug\"] if \"debug\" in data else None\r\n self.rated = (data[\"rated\"] is not False) if \"rated\" in data else False\r\n self.gameType = data[\"gameType\"] if \"gameType\" in data else \"singles\"\r\n self.ruleset = data[\"ruleset\"] if \"ruleset\" in data else []\r\n self.baseRuleset = data[\"baseRuleset\"] if \"baseRuleset\" in data else []\r\n self.banlist = data[\"banlist\"] if \"banlist\" in data else []\r\n self.restricted = data[\"restricted\"] if \"restricted\" in data else []\r\n self.unbanlist = data[\"unbanlist\"] if \"unbanlist\" in data else []\r\n self.customRules = data[\"customRules\"] if \"customRules\" in data else None\r\n self.ruleTable = None\r\n self.teamLength = data[\"teamLength\"] if \"teamLength\" in data else None\r\n self.onBegin = data[\"onBegin\"] if \"onBegin\" in data else None\r\n self.minSourceGen = data[\"minSourceGen\"] if \"minSourceGen\" in data else None\r\n self.maxLevel = data[\"maxLevel\"] if \"maxLevel\" in data else 100\r\n self.defaltLevel = data[\"defaltLevel\"] if \"defaltLevel\" in data else self.maxLevel\r\n self.forcedLevel = data[\"forcedLevel\"] if \"forcedLevel\" in data else None\r\n self.maxForcedLevel = data[\"maxForcedLevel\"] if \"maxForcedLevel\" in data else None\r\n self.noLog = data[\"noLog\"] if \"noLog\" in data else None\r\n\r\nclass Condition(BasicEffect):\r\n def __init__(self, data, **kwargs):\r\n super().__init__(data, **kwargs)\r\n self.effectType = self.effectType if any(x in self.effectType for x in [\"Weather\", \"Status\"]) else \"Condition\"\r\n \r\n\r\n\r\nclass Species(BasicEffect):\r\n def __init__(self, data, **kwargs):\r\n super().__init__(data, **kwargs)\r\n\r\n self.fullname = 'pokemon: '+ data[\"name\"]\r\n self.effectType = 'Pokemon'\r\n self.id = data[\"id\"] if \"id\" in data else \"\"\r\n self.name = data[\"name\"]\r\n self.baseSpecies = data[\"baseSpecies\"] if \"baseSpecies\" in data else self.name\r\n self.forme = data[\"forme\"] if \"forme\" in data else \"\"\r\n self.baseForme = data[\"baseForme\"] if \"baseForme\" in data else \"\"\r\n self.cosmeticFormes = data[\"cosmeticFormes\"] if \"cosmeticFormes\" in data else \"\"\r\n self.otherFormes = data[\"otherFormes\"] if \"otherFormes\" in data else \"\"\r\n self.formeOrder = data[\"formeOrder\"] if \"formeOrder\" in data else \"\"\r\n self.spriteid = data[\"spriteid\"] if \"spriteid\" in data else \\\r\n (Tools.getId(self.baseSpecies) + ((\"-\"+ Tools.getId(self.forme)) if self.baseSpecies is not self.name else \"\" ))\r\n self.abilities = data[\"abilities\"] if \"abilities\" in data else {\"0\": \"\"}\r\n self.types = data[\"types\"] if \"types\" in data else [\"???\"]\r\n self.addedType = data[\"addedType\"] if \"addedType\" in data else None\r\n self.prevo = data[\"prevo\"] if \"prevo\" in data else \"\"\r\n self.tier = data[\"tier\"] if \"tier\" in data else \"\"\r\n self.doublesTier = data[\"doublesTier\"] if \"doublesTier\" in data else \"\"\r\n self.evos = data[\"evos\"] if \"evos\" in data else []\r\n self.evoType = data[\"evoType\"] if \"evoType\" in data else None\r\n self.evoMove = data[\"evoMove\"] if \"evoMove\" in data else None\r\n self.evoLevel = data[\"evoLevel\"] if \"evoLevel\" in data else None\r\n self.nfe = data[\"nfe\"] if \"nfe\" in data else False\r\n self.eggGroups = data[\"eggGroups\"] if \"eggGroups\" in data else []\r\n self.gender = data[\"gender\"] if \"gender\" in data else \"\"\r\n if \"genderRatio\" in data:\r\n self.genderRatio = data[\"genderRatio\"]\r\n else:\r\n if self.gender == \"M\":\r\n self.genderRatio = {\"M\": 1, \"F\": 0}\r\n elif self.gender == \"F\":\r\n self.genderRatio = {\"M\": 0, \"F\": 1}\r\n elif self.gender == \"N\":\r\n self.genderRatio = {\"M\": 0, \"F\": 0}\r\n else:\r\n self.genderRatio = {\"M\": 0.5, \"F\": 0.5}\r\n self.requiredItem = data[\"requiredItem\"] if \"requiredItem\" in data else None\r\n self.requiredItems = data[\"requiredItems\"] if \"requiredItems\" in data else ([self.requiredItem] if self.requiredItem else None)\r\n self.baseStats = data[\"baseStats\"] if \"baseStats\"in data else {\"hp\": 0, \"atk\":0, \"def\":0, \"spa\":0, \"spd\":0, \"spe\":0}\r\n self.weightkg = data[\"weightkg\"] if \"weightkg\" in data else 0\r\n self.seighthg = int(self.weightkg) * 10\r\n self.heightm = data[\"heightm\"] if \"heightm\" in data else 0\r\n self.color = data[\"color\"] if \"color\" in data else \"\"\r\n self.unreleasedHidden = data[\"unreleasedHidden\"] if \"unreleasedHidden\" in data else False\r\n self. maleOnlyHidden = data[\"maleOnlyHidden\"] if \"maleOnlyHidden\" in data else None\r\n self.maxHP = data[\"maxHP\"] if \"maxHP\" in data else None\r\n self.isMega = False # temp\r\n self.canGigantamax = data[\"canGigantamax\"] if \"canGigantamax\" in data else None\r\n self.gmaxUnreleased = data[\"gmaxUnreleased\"] if \"gmaxUnreleased\" in data else None\r\n self.cannotDynamax = data[\"cannotDynamax\"] if \"cannotDynamax\" in data else None\r\n self.battleOnly = data[\"battleOnly\"] if \"battleOnly\" in data else (self.baseSpecies if self.isMega else None)\r\n self.changeFrom = data[\"changesFrom\"] if \"changesFrom\" in data else\\\r\n (self.battleOnly if self.battleOnly is not self.baseSpecies else self.baseSpecies)\r\n \r\n if hasattr(self, 'gen') and self.num >= 1:\r\n if self.num >= 810 or any((s in self.forme) for s in [\"Gmax\", \"Galar\", \"Galar-Zen\"]):\r\n self.gen = 8\r\n elif self.num >= 722 or self.forme.startswith(\"Alola\") or self.forme == \"Starter\":\r\n self.gen = 7\r\n elif self.forme == \"Primal\":\r\n self.gen = 6\r\n self.isPrimal = True\r\n self.battleOnly = self.baseSpecies\r\n elif self.num >= 650 or self.isMega:\r\n self.gen = 6\r\n elif self.num >= 494:\r\n self.gen = 5\r\n elif self.num >= 387:\r\n self.gen = 4\r\n elif self.num >= 252:\r\n self.gen = 3\r\n elif self.num >= 152:\r\n self.gen = 2\r\n else:\r\n self.gen = 1\r\n\r\n\r\n\r\n\r\nclass Move(BasicEffect):\r\n def __init__(self, data, **kwargs):\r\n super().__init__(data, **kwargs)\r\n # print(data)\r\n\r\n self.fullname =\"move: \" + self.name\r\n self.effectType = \"Move\"\r\n self.type = Tools.getString(data[\"type\"])\r\n self.target = data[\"target\"]\r\n self.basePower = int(data[\"basePower\"])\r\n self.accuracy = data[\"accuracy\"]\r\n self.critRatio = float(data[\"critRatio\"]) if \"critRatio\" in data else float(1)\r\n self.secondary = data[\"secondaries\"] if \"secondaries\" in data else None\r\n if \"secondaries\" in data and data[\"secondaries\"] != \"\":\r\n self.secondaries = data[\"secondaries\"]\r\n elif self.secondary:\r\n self.secondaries = self.secondary and [self.secondary]\r\n else:\r\n self.secondaries = None\r\n self.priority = int(data[\"priority\"]) if \"priority\" in data else 0\r\n self.category = data[\"category\"] if \"category\" in data else None\r\n self.defensiveCategory = data[\"defensiveCategory\"] if \"defensiveCategory\" in data else None\r\n self.useTargetOffensive = data[\"useTargetOffensive\"] if \"useTargetOffensive\" in data else None\r\n self.useSourceDefensiveAsOffensive = data[\"useSourceDefensiveAsOffensive\"] if \"useSourceDefensiveAsOffensive\" in data else None\r\n self.ignoreNegativeOffensive = data[\"ignoreNegativeOffensive\"] if \"ignoreNegativeOffensive\" in data else None\r\n self.ignorePositiveDeffensive = data[\"ignorePositiveDeffensive\"] if \"ignorePositiveDeffensive\" in data else None\r\n self.ignoreOffensive = data[\"ignoreOffensive\"] if \"ignoreOffensive\" in data else None\r\n self.igdnoreDefensive = data[\"igdnoreDefensive\"] if \"igdnoreDefensive\" in data else None\r\n self.ignoreImmunity = data[\"ignoreImmunity\"] if \"ignoreImmunity\" in data else self.category == \"Status\"\r\n self.pp = int(data[\"pp\"])\r\n self.noPPBoosts = data[\"noPPBoosts\"] if \"noPPBoosts\" in data else None\r\n self.isZ = data[\"isZ\"] if \"isZ\" in data else False\r\n self.isMax = data[\"isMax\"] if \"isMax\" in data else False\r\n self.flags = data[\"flags\"] if \"flags\" in data else {}\r\n if \"selfSwitch\" in data:\r\n if type(data[\"selfSwitch\"])==str:\r\n self.selfSwitch = data[\"selfSwitch\"]\r\n else:\r\n self.selfSwitch = None\r\n else:\r\n self.selfSwitch = None\r\n self.pressureTarget = data[\"pressureTarget\"] if \"pressureTarget\" in data else \"\"\r\n self.nonGhostTarget = data[\"nonGhostTarget\"] if \"nonGhostTarget\" in data else \"\"\r\n self.ignoreAbility = data[\"ignoreAbility\"] if \"ignoreAbility\" in data else False\r\n self.damage = data[\"damage\"] if \"damage\" in data else None\r\n self.spreadHit = data[\"spreadHit\"] if \"spreadHit\" in data else False\r\n self.forceSTAB = data[\"forceSTAB\"] if \"forceSTAB\" in data else None\r\n self.noSketch = data[\"noSketch\"] if \"noSketch\" in data else None\r\n self.stab = data[\"stab\"] if \"stab\" in data else None\r\n if \"volatileStatus\" in data:\r\n if type(data[\"volatileStatus\"])==str:\r\n self.volatileStatus = data[\"volatileStatus\"]\r\n else:\r\n None\r\n else:None\r\n self.maxMove = {}\r\n if (self.category != \"Status\" and self.id != \"struggle\"):\r\n self.maxMove = {\"basePower\":1}\r\n if self.isMax or self.isZ:\r\n # already initialized to 1\r\n pass\r\n elif (self.basePower == None):\r\n self.maxMove[\"basePower\"] = 100\r\n elif \"Fighting\" in self.type or \"Poison\" in self.type:\r\n if self.basePower >= 150:\r\n self.maxMove[\"basePower\"] = 100\r\n elif self.basePower >= 110:\r\n self.maxMove[\"basePower\"] = 95\r\n elif self.basePower >= 75:\r\n self.maxMove[\"basePower\"] = 90\r\n elif self.basePower >= 65:\r\n self.maxMove[\"basePower\"] = 85\r\n elif self.basePower >= 55:\r\n self.maxMove[\"basePower\"] = 80\r\n elif self.basePower >= 45:\r\n self.maxMove[\"basePower\"] = 75\r\n else:\r\n self.maxMove[\"basePower\"] = 70\r\n else:\r\n if self.basePower >= 150:\r\n self.maxMove[\"basePower\"] = 150\r\n elif self.basePower >= 110:\r\n self.maxMove[\"basePower\"] = 140\r\n elif self.basePower >= 75:\r\n self.maxMove[\"basePower\"] = 130\r\n elif self.basePower >= 65:\r\n self.maxMove[\"basePower\"] = 120\r\n elif self.basePower >= 55:\r\n self.maxMove[\"basePower\"] = 110\r\n elif self.basePower >= 45:\r\n self.maxMove[\"basePower\"] = 100\r\n else:\r\n self.maxMove[\"basePower\"] = 90\r\n\r\n if self.gen == None:\r\n if self.num >= 743:\r\n self.gen = 8\r\n elif self.num >= 622:\r\n self.gen = 7\r\n elif self.num >= 560:\r\n self.gen = 6\r\n elif self.num >= 468:\r\n self.gen = 5\r\n elif self.num >= 355:\r\n self.gen = 4\r\n elif self.num >= 252:\r\n self.gen = 3\r\n elif self.num >= 166:\r\n self.gen = 2\r\n elif self.num >= 1:\r\n self.gen = 1\r\n\r\n\r\n \r\n \r\n\r\n\r\n\r\n\r\n \r\n\r\n","repo_name":"nohamanona/alpha-poke","sub_path":"sim/dex_data.py","file_name":"dex_data.py","file_ext":"py","file_size_in_byte":14201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"19579727654","text":"from pyqpanda import *\r\nimport pyqpanda.pyQPanda as pq\r\nimport math\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib\r\nimport numpy as np\r\n\r\ndef qfg(qlist):\r\n circ = QCircuit()\r\n \r\n qnum = len(qlist)\r\n for i in range(0,qnum):\r\n circ.insert(H(qlist[qnum-1-i]))\r\n for j in range(i+1,qnum):\r\n circ.insert(CR(qlist[qnum-1-j],qlist[qnum-1-i],math.pi/(1<<(j-i))))\r\n \r\n for i in range(0,qnum//2):\r\n circ.insert(CNOT(qlist[i],qlist[qnum-1-i]))\r\n circ.insert(CNOT(qlist[qnum-1-i],qlist[i])) \r\n circ.insert(CNOT(qlist[i],qlist[qnum-1-i]))\r\n \r\n return circ\r\n\r\ndef plotBar(xdata,ydata): \r\n plt.rcParams['font.sans-serif']=['Arial']\r\n plt.title(\"Origin Q\",loc=\"right\",alpha=0.5)\r\n plt.ylabel(\"prob\")\r\n plt.xlabel(\"States\")\r\n \r\n rects1 = plt.bar(x=xdata,height=ydata,width=0.8,color='blue')\r\n plt.ylim(0,1)\r\n #plt.xticks(xdata)\r\n plt.xticks(rotation=60)\r\n \r\n #for rect in rects1:\r\n #height = rect.get_height()\r\n #plt.text(rect.get_x() + rect.get_width() / 2, height+1, str(height), ha=\"center\",va=\"bottom\")\r\n \r\n plt.show()\r\n\r\ninit(QMachineType.CPU)\r\nqubits = qAlloc_many(4)\r\ncbits = cAlloc_many(4)\r\n\r\n\r\n\r\n#print(len(qubits))\r\n\r\n# 构建量子程序\r\nprog = QProg()\r\nprog.insert(qfg(qubits)).insert(measure_all(qubits,cbits))\r\n\r\nresult1 = run_with_configuration(prog,cbits,100)\r\nresult2 = directly_run(prog)\r\n# 获得目标量子比特的概率测量结果,其对应的下标为二进制。\r\nresult3 = prob_run_dict(prog,qubits,-1)\r\n#获得目标量子比特的概率测量结果, 其对应的下标为十进制。\r\nresult4 = prob_run_tuple_list(prog,qubits,-1)\r\n#获得目标量子比特的概率测量结果, 其对应的下标为二进制。\r\nresult5 = prob_run_list(prog,qubits,-1)\r\n\r\nprint(\"量子态出现次数:\",result1)\r\nprint(\"-----------------------\")\r\nprint(\"测量结果存储至经典寄存器:\",result2)\r\nprint(\"-----------------------\")\r\nprint(\"概率测量二进制:\",result3)\r\n\r\nxdata=list(result3.keys())\r\nydata=list(result3.values())\r\nprint(xdata)\r\nprint(ydata)\r\nplotBar(xdata,ydata)\r\n\r\npq.draw_qprog(prog)\r\n\r\n","repo_name":"Zarkpx/quantum-algorithm","sub_path":"qpanda-fourier.py","file_name":"qpanda-fourier.py","file_ext":"py","file_size_in_byte":2171,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"43364236811","text":"import random\n\n\nclass Card:\n def __init__(self, rank, suit):\n self.rank = rank\n self.suit = suit\n self.value = self.get_value()\n\n def get_value(self):\n if self.rank == \"Ace\" or self.rank == \"A\":\n val = 11\n # wasn't really clear on whether or not it was supposed to be just a letter or the word typed out\n # because in the deck class, the global variable has words spelled out but in the card one, it's a single letter\n elif self.rank == \"Jack\" or self.rank == \"Queen\" or self.rank == 'King' or self.rank == 'J' or self.rank == 'Q' or self.rank == 'K':\n val = 10\n else:\n val = int(self.rank)\n\n return val\n\n\nranks = [\"Ace\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\",\n \"8\", \"9\", \"10\", \"Jack\", \"Queen\", \"King\"]\nsuits = [\"Clubs\", \"Diamonds\", \"Hearts\", \"Spades\"]\n\n\nclass Deck:\n def __init__(self):\n self.cards = []\n for s in suits:\n for r in ranks:\n new_card = Card(r, s)\n self.cards.append(new_card)\n\n def shuffle(self):\n random.shuffle(self.cards)\n\n def deal_card(self):\n dealt_card = self.cards[len(self.cards) - 1]\n self.cards.pop(len(self.cards) - 1)\n return dealt_card\n\n\nclass Hand:\n def __init__(self):\n self.cards = []\n\n def add_card(self, card):\n self.cards.append(card)\n\n def count(self):\n return len(self.cards)\n\n def points(self):\n total = 0\n hasAce = False\n for c in self.cards:\n total += c.value\n # challenge part!\n if (c.rank == \"Ace\" or c.rank == \"A\"):\n hasAce = True\n if hasAce and total > 21:\n total -= 10\n hasAce = False\n\n return total\n\n\nif __name__ == \"__main__\":\n deck = Deck()\n deck.shuffle()\n\n print(\"HAND\")\n hand = Hand()\n for i in range(3):\n hand.add_card(deck.deal_card())\n\n for i in range(hand.count()):\n card = hand.cards[i]\n print(card.rank + \" of \" + card.suit)\n\n print(\"Hand points:\", hand.points())\n\n # added some feedback to user :)\n if hand.points() > 21:\n print(\"Bust! You lose :(\")\n elif hand.points() == 21:\n print(\"BLACKJACK!\")\n else:\n print(\"meh\")\n","repo_name":"dianeshan/ucr-cs-classes","sub_path":"UASpython/finalproject.py","file_name":"finalproject.py","file_ext":"py","file_size_in_byte":2295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"30336443064","text":"'''\r\nCreated on May 25, 2009\r\n\r\n@author: Brad Gaiser\r\n\r\nThis module provides a prototype implementation of the View in the MVC\r\nimplementation of the Maintenance Records System for Dermico Auto. The\r\nUI code is a bit more complex, so has taken more time to get into place\r\nthan anticipated. As a consequence, this quick & dirty code was written\r\nto provide a framework for integration testing and making sure that the\r\ncontrol flow managed by the Controller classes works.\r\n\r\nThis also is providing a 'spec' for the full implementation of the UI\r\nusing Django templates to be provided by Jerome Calvo. This code has\r\nbeen written with the intent that all if not most of the code will be\r\nreplaced. As such, minimal effort has been put into documention.\r\n'''\r\n\r\nNEW_CUSTOMER = 1\r\nFIND_CUSTOMER = 2\r\nINPUT_CUSTOMER = 3\r\nINPUT_WORKORDER = 4\r\n\r\nfrom datetime import datetime\r\nfrom MaintAppObjects import nz\r\nfrom MaintAppObjects import Workorder\r\n\r\nclass MaintAppView(object):\r\n \"\"\" Class to implement the View part of the MVC implementation of the\r\n Maintenance Records System. This class provides the public interface\r\n that the Controller works with to specify which parts of the interface\r\n are active and what the default values are for the various form fields\r\n (and in some cases buttons and links.)\r\n \r\n There are three primary assumptions behind this interface:\r\n 1 - The View is stateless with all configuration information needed to\r\n render the screen at any one time being passed in from the controller.\r\n 2 - The order in which the methods for setting the mode and configuration\r\n information should be considered 'random'.\r\n 3 - The serve_content method will be called after all mode and configuration\r\n information has been loaded. In general 2 & 3 imply that almost all of\r\n the html data stream preparation happens during the serve_content call\r\n and that the mode and configuration are cached until that call.\r\n (Of course item 1 implies that all of the cached information is lost\r\n once the html is fully served.) \r\n \"\"\"\r\n \r\n def __init__(self, controller):\r\n self.__macController = controller\r\n self.__sidePanel = SidePanelSubview()\r\n self.__customerPanel = CustomerSubview()\r\n self.__vehiclePanel = VehicleSubview()\r\n self.__workorderPanel = WorkorderSubview()\r\n self.__mainMode = NEW_CUSTOMER\r\n return None\r\n \r\n def set_new_customer_mode(self):\r\n self.__mainMode = NEW_CUSTOMER\r\n self.__customerPanel._configure_input_mode()\r\n \r\n def set_search_mode(self):\r\n self.__mainMode = FIND_CUSTOMER\r\n self.__customerPanel._configure_search_mode()\r\n \r\n def set_search_results_mode(self):\r\n self.__mainMode = FIND_CUSTOMER\r\n \r\n def set_customer_vehicle_mode(self):\r\n self.__mainMode = INPUT_CUSTOMER\r\n self.__customerPanel._configure_input_mode()\r\n \r\n def set_workorder_mode(self):\r\n self.__mainMode = INPUT_WORKORDER\r\n \r\n def configureHiddenFields(self, customer_id, vehicle_id, workorder_id):\r\n self.__sidePanel._configure_hidden_fields(customer_id, vehicle_id, workorder_id)\r\n self.__vehiclePanel._configureActiveVehicle(vehicle_id)\r\n self.__workorderPanel._configureActiveWorkorder(workorder_id)\r\n \r\n def configureErrorMessages(self, errorObj):\r\n \"\"\" Errors is a list of (field name, error type) tuples to be used to format\r\n errors and highlighting fields where data validation errors were detected.\r\n \"\"\"\r\n self.__sidePanel._configureErrorMessages(errorObj)\r\n pass\r\n \r\n def configureSidePanelContent(self, activeElement,\r\n openWorkorders,\r\n completedWorkorders,\r\n debug_message):\r\n self.__sidePanel._configure_selection(activeElement)\r\n self.__sidePanel._configure_content(openWorkorders, \r\n completedWorkorders, \r\n debug_message)\r\n \r\n def configureCustomerContent(self, customer_info):\r\n self.__customerPanel._configure_content(customer_info)\r\n\r\n def configureSearchResults(self, customer_list):\r\n self.__customerPanel._configure_search_results(customer_list)\r\n \r\n def configureVehicleContent(self, vehicle_list):\r\n self.__vehiclePanel._configure_content(vehicle_list)\r\n \r\n def configureWorkorderHeader(self, customer, vehicle):\r\n self.__workorderPanel._configureHeader(customer, vehicle)\r\n \r\n def configureWorkorderContent(self, workorder_list):\r\n self.__workorderPanel._configureWorkorderContent(workorder_list)\r\n \r\n def showSaveDialog(self, request_button, request_tag):\r\n \"\"\" This method is called to set the UI up to display a save dialog\r\n on the browser with Yes/No/Cancel buttons. This dialog should be\r\n in a separate html Form with the callback being directed to a\r\n different page...
. In\r\n addition, all other form fields should be disabled so the user\r\n cannot change anything while the dialog is active.\r\n \r\n The string in 'request_button' and the one in 'request_tag' \r\n should be saved in hidden form fields of the same names and\r\n associated with this dialog form so they get submitted when\r\n the user responds to the dialog.\r\n \"\"\"\r\n pass\r\n \r\n def serve_content(self, reqhandler):\r\n self.__serve_header(reqhandler)\r\n reqhandler.response.out.write(\"\"\"\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \"\"\")\r\n \r\n if self.__mainMode == INPUT_CUSTOMER:\r\n reqhandler.response.out.write(\"\"\"\r\n \r\n \r\n \r\n \"\"\")\r\n \r\n reqhandler.response.out.write(\"\"\"\r\n
\"\"\")\r\n if self.__sidePanel is not None:\r\n self.__sidePanel._serve_content(reqhandler)\r\n reqhandler.response.out.write(\"\"\"\r\n \"\"\")\r\n \r\n if self.__mainMode == INPUT_WORKORDER:\r\n if self.__workorderPanel is not None:\r\n self.__workorderPanel._serve_content(reqhandler)\r\n else:\r\n if self.__customerPanel is not None:\r\n self.__customerPanel._serve_content(reqhandler)\r\n reqhandler.response.out.write(\"\"\"\r\n
\"\"\")\r\n if self.__vehiclePanel is not None:\r\n self.__vehiclePanel._serve_content(reqhandler)\r\n reqhandler.response.out.write(\"\"\"\r\n
\r\n
\r\n \r\n \r\n \"\"\")\r\n return None\r\n \r\n def __serve_header(self, reqhandler):\r\n reqhandler.response.out.write(\r\n\"\"\"\r\n\r\n \r\n Dermico Auto Maintenance Records System\r\n \r\n \r\n \r\n \"\"\")\r\n return None\r\n\r\n \r\nclass SidePanelSubview(object):\r\n def __init__(self):\r\n self.__itemSelected = 1\r\n self.__comments = \"\"\r\n self.__customerId = \"-1\"\r\n self.__vehicleId = \"-1\"\r\n self.__workorderId = \"-1\"\r\n self.__errorObj = None\r\n return None\r\n \r\n def _configure_selection(self, whichItem):\r\n \"\"\" whichItem is an integer indicating what is to be highlighted in the\r\n side panel:\r\n 0 - Nothing is highlighted.\r\n 1 - The New Customer button is highlighted\r\n 2 - The Find Customer button is highlighted\r\n 3 - The open or completed workorder whose primary key matches the\r\n value of the __workorderId member variable is to be highlighted.\r\n If no match is found, nothing is highlighted. \r\n \"\"\"\r\n self.__itemSelected = whichItem\r\n \r\n def _configure_content(self, openWorkorders, completedWorkorders, debug_message):\r\n self.__comments = debug_message\r\n \r\n def _configure_hidden_fields(self, customer_id, vehicle_id, workorder_id):\r\n self.__customerId = customer_id\r\n self.__vehicleId = vehicle_id\r\n self.__workorderId = workorder_id\r\n return None\r\n \r\n def _configureErrorMessages(self, errorObj):\r\n self.__errorObj = errorObj\r\n \r\n def _serve_content(self, reqhandler):\r\n linkClass = \"s_side_links\"\r\n activeLinkClass = \"s_active_side_links\"\r\n \r\n reqhandler.response.out.write('

Customer Input:

')\r\n css_class = activeLinkClass if (self.__itemSelected == 1) else linkClass\r\n reqhandler.response.out.write('

' % css_class)\r\n css_class = activeLinkClass if (self.__itemSelected == 2) else linkClass\r\n reqhandler.response.out.write('

' % css_class)\r\n reqhandler.response.out.write('

Open Work Orders:

')\r\n reqhandler.response.out.write('

No Open Work Orders

')\r\n reqhandler.response.out.write('

Work Completed:

')\r\n reqhandler.response.out.write('

No Completed Work Orders

')\r\n reqhandler.response.out.write('
')\r\n reqhandler.response.out.write('

App Info:

')\r\n reqhandler.response.out.write('

%s

' % self.__comments)\r\n if self.__errorObj is not None:\r\n reqhandler.response.out.write('

%s

' % \\\r\n str(self.__errorObj)) \r\n reqhandler.response.out.write('' % self.__customerId)\r\n reqhandler.response.out.write('' % self.__vehicleId)\r\n reqhandler.response.out.write('' % self.__workorderId)\r\n return None\r\n \r\n \r\nclass CustomerSubview(object):\r\n def __init__(self):\r\n self.__customer = None\r\n self.__searchMode = False\r\n self.__searchResults = None\r\n return None\r\n \r\n def _configure_content(self, customerInfo):\r\n self.__customer = customerInfo\r\n return None\r\n \r\n def _configure_search_mode(self):\r\n self.__searchMode = True\r\n return None\r\n \r\n def _configure_input_mode(self):\r\n self.__searchMode = False\r\n return None\r\n \r\n def _configure_search_results(self, customer_list):\r\n self.__searchMode = True\r\n self.__searchResults = customer_list\r\n return None\r\n \r\n def _serve_content(self, reqhandler):\r\n reqhandler.response.out.write(\"\"\"\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \"\"\")\r\n if not self.__searchMode:\r\n reqhandler.response.out.write(\"\"\"\r\n \r\n
\r\n \r\n \"\"\")\r\n reqhandler.response.out.write('' %\r\n nz(self.__customer.first_name))\r\n reqhandler.response.out.write(\"\"\"\r\n \r\n \r\n \"\"\")\r\n reqhandler.response.out.write('' %\r\n nz(self.__customer.last_name))\r\n reqhandler.response.out.write(\"\"\"\r\n
\r\n \r\n \"\"\")\r\n reqhandler.response.out.write('' %\r\n nz(self.__customer.address1))\r\n reqhandler.response.out.write(\"\"\"\r\n \r\n \r\n \"\"\")\r\n reqhandler.response.out.write('' %\r\n nz(self.__customer.city))\r\n reqhandler.response.out.write(\"\"\"\r\n
\r\n \r\n \"\"\")\r\n reqhandler.response.out.write('' %\r\n nz(self.__customer.state))\r\n reqhandler.response.out.write(\"\"\"\r\n \r\n \r\n \"\"\")\r\n reqhandler.response.out.write('' %\r\n nz(self.__customer.zip))\r\n reqhandler.response.out.write(\"\"\"\r\n
\r\n \r\n \"\"\")\r\n reqhandler.response.out.write('' %\r\n nz(self.__customer.phone1))\r\n reqhandler.response.out.write(\"\"\"\r\n

\r\n
\r\n \r\n

\r\n \r\n

\r\n
\"\"\")\r\n else:\r\n reqhandler.response.out.write(\"\"\"\r\n \r\n

\r\n \r\n \r\n

\r\n \r\n \"\"\")\r\n if self.__searchResults is not None:\r\n reqhandler.response.out.write(\"
\")\r\n if len(self.__searchResults) == 0:\r\n reqhandler.response.out.write(\"\"\"\r\n

No customers match the search you requested.\r\n

\"\"\")\r\n else:\r\n for customer in self.__searchResults:\r\n link = \"/Search?cid=%s\" % customer.getId()\r\n reqhandler.response.out.write( \\\r\n '

%s %s

' % \\\r\n (link, nz(customer.first_name), nz(customer.last_name)))\r\n return None\r\n \r\nclass VehicleSubview(object):\r\n def __init__(self):\r\n return None\r\n \r\n def _configureActiveVehicle(self, vehicle_id):\r\n self.__activeVehicleId = vehicle_id\r\n \r\n def _configure_content(self, vehicle_list):\r\n self.__vehicles = vehicle_list\r\n self.__vehicle = self.__vehicles[0]\r\n return None\r\n \r\n def __retrieveActiveVehicle(self):\r\n for eachVehicle in self.__vehicles:\r\n if eachVehicle.getId() == self.__activeVehicleId:\r\n self.__vehicle = eachVehicle\r\n break\r\n return None\r\n \r\n def _serve_content(self, reqhandler):\r\n self.__retrieveActiveVehicle()\r\n reqhandler.response.out.write('
')\r\n tabNum = -1\r\n for eachVehicle in self.__vehicles:\r\n tabNum += 1\r\n if eachVehicle.getId() == self.__activeVehicleId:\r\n style = \"selected_tab_button\"\r\n else:\r\n style = \"tab_button\"\r\n if eachVehicle.getId() == \"-1\":\r\n reqhandler.response.out.write( \\\r\n '' % \\\r\n (style, tabNum, ))\r\n else:\r\n reqhandler.response.out.write( \\\r\n '' % \\\r\n (style, tabNum, str(eachVehicle.year)))\r\n #\r\n #\r\n reqhandler.response.out.write(\"\"\"\r\n
\r\n
\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \"\"\")\r\n reqhandler.response.out.write(\"\"\"\r\n \r\n
\r\n \r\n \"\"\")\r\n reqhandler.response.out.write('' %\r\n nz(self.__vehicle.make))\r\n reqhandler.response.out.write(\"\"\"\r\n \r\n \r\n \"\"\")\r\n reqhandler.response.out.write('' %\r\n nz(self.__vehicle.model))\r\n reqhandler.response.out.write(\"\"\"\r\n \r\n \r\n \"\"\")\r\n reqhandler.response.out.write('' %\r\n nz(self.__vehicle.year))\r\n reqhandler.response.out.write(\"\"\"\r\n
\r\n \r\n \"\"\")\r\n reqhandler.response.out.write('' %\r\n nz(self.__vehicle.license))\r\n reqhandler.response.out.write(\"\"\"\r\n \r\n \r\n \r\n \"\"\")\r\n reqhandler.response.out.write('' %\r\n nz(self.__vehicle.vin))\r\n reqhandler.response.out.write(\"\"\"\r\n

\r\n
\r\n \r\n
\r\n

\r\n \r\n \r\n

\r\n
\r\n

\r\n \r\n \r\n

\r\n
\r\n \"\"\")\r\n return None\r\n \r\nclass WorkorderSubview(object):\r\n def __init__(self):\r\n self.__customer = None\r\n self.__vehicle = None\r\n self.__activeWorkorderId = None\r\n self.__workorders = None\r\n self.__workorder = None\r\n return None\r\n \r\n def _configureHeader(self, customer, vehicle):\r\n self.__customer = customer\r\n self.__vehicle = vehicle\r\n return None\r\n \r\n def _configureActiveWorkorder(self, workorder_id):\r\n self.__activeWorkorderId = workorder_id\r\n return None\r\n\r\n def _configureWorkorderContent(self, workorder_list):\r\n self.__workorders = workorder_list\r\n return None\r\n \r\n def __retrieveActiveWorkorder(self):\r\n for eachWorkorder in self.__workorders:\r\n if eachWorkorder.getId() == self.__activeWorkorderId:\r\n self.__workorder = eachWorkorder\r\n break\r\n return None\r\n \r\n def _serve_content(self, reqhandler):\r\n self.__retrieveActiveWorkorder()\r\n self.__output_workorder_header(reqhandler)\r\n self.__output_workorder_form(reqhandler)\r\n return None\r\n \r\n def __output_workorder_header(self, reqhandler):\r\n reqhandler.response.out.write(\"\"\"\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n
\r\n Customer info:\r\n \"\"\")\r\n reqhandler.response.out.write(\"%s %s; Contact: %s\" %\r\n (nz(self.__customer.first_name),\r\n nz(self.__customer.last_name),\r\n nz(self.__customer.phone1)))\r\n reqhandler.response.out.write(\"\"\"\r\n
\r\n Vehicle info:\r\n \"\"\")\r\n reqhandler.response.out.write(\"%s %s %s; License: %s\" %\r\n (nz(self.__vehicle.year),\r\n nz(self.__vehicle.make),\r\n nz(self.__vehicle.model),\r\n nz(self.__vehicle.license)))\r\n reqhandler.response.out.write(\"\"\"\r\n
\"\"\")\r\n return None\r\n \r\n def __format_tabs(self, reqhandler):\r\n woIndex = -1\r\n for workorder in self.__workorders:\r\n woIndex += 1\r\n selClass = \"selected_tab_button\" if workorder.id==self.__activeWorkorderId \\\r\n else \"tab_button\"\r\n if workorder.id == \"-1\":\r\n label = \"New Work Order\"\r\n else:\r\n label = workorder.date_created.strftime(\"%b %d, %Y\")\r\n reqhandler.response.out.write( \\\r\n '' %\r\n (selClass, woIndex, label))\r\n \r\n def __output_workorder_form(self, reqhandler):\r\n reqhandler.response.out.write('
')\r\n self.__format_tabs(reqhandler)\r\n #\r\n #\r\n #\r\n reqhandler.response.out.write('
')\r\n reqhandler.response.out.write('
')\r\n dateText = nz(self.__workorder.getDateCreated())\r\n reqhandler.response.out.write( \\\r\n '' % \\\r\n dateText)\r\n reqhandler.response.out.write(\"\"\"\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n
\r\n Customer's Service Request:\r\n \r\n \"\"\")\r\n reqhandler.response.out.write( \\\r\n '' % \\\r\n nz(self.__workorder.mileage))\r\n reqhandler.response.out.write(\"\"\"\r\n
\r\n \r\n

\"\"\")\r\n reqhandler.response.out.write( \\\r\n \"Work Order Date: %s\" % dateText)\r\n reqhandler.response.out.write(\"\"\"\r\n \r\n \r\n \r\n
\r\n
\r\n \r\n
\r\n
\r\n \r\n
\r\n
\r\n \r\n
\r\n \"\"\")\r\n reqhandler.response.out.write( \\\r\n ' Open' % \\\r\n ('checked=\"checked\"' if self.__workorder.status == Workorder.OPEN else ''))\r\n reqhandler.response.out.write( \\\r\n ' Completed' % \\\r\n ('checked=\"checked\"' if self.__workorder.status == Workorder.COMPLETED else ''))\r\n reqhandler.response.out.write( \\\r\n ' Closed' % \\\r\n ('checked=\"checked\"' if self.__workorder.status == Workorder.CLOSED else ''))\r\n reqhandler.response.out.write(\"\"\"\r\n
\r\n

\r\n \r\n \r\n

\r\n
\"\"\")\r\n return None\r\n ","repo_name":"caljer1/auto-repair-shop","sub_path":"src/MaintAppView.py","file_name":"MaintAppView.py","file_ext":"py","file_size_in_byte":29978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"5400912331","text":"from data import insert\nimport redis\nimport time\nimport random\ndef createDummyData(userID,collectionID,sampleSize):\n\tnames = [\"Aaberg\",\"Aalst\",\"Aara\",\"Aaren\",\"Aarika\",\"Aaron\",\"Aaronson\",\"Ab\",\"Aba\",\"Abad\",\"Abagael\",\"Abagail\",\"Abana\",\n\t\t\"Abate\",\"Abba\",\"Abbate\",\"Abbe\",\"Abbey\",\"Abbi\",\"Abbie\",\"Abbot\",\"Abbotsen\",\"Abbotson\",\"Abbotsun\",\"Abbott\",\"Abbottson\",\n\t\t\"Abby\",\"Abbye\",\"Abdel\",\"Abdella\",\"Abdu\",\"Abdul\",\"Abdulla\",\"Abe\",\"Abebi\",\"Abel\",\"Abelard\",\"Abell\",\"Abercromby\",\"Abernathy\",\n\t\t\"Abernon\",\"Abert\",\"Abeu\",\"Abey\",\"Abie\",\"Abigael\",\"Abigail\",\"Abigale\",\"Abijah\",\"Abisha\",\"Abisia\",\"Abixah\",\"Abner\",\"Aborn\",\n\t\t\"Abott\",\"Abra\",\"Abraham\",\"Abrahams\",\"Abrahamsen\",\"Abrahan\",\"Abram\",\"Abramo\",\"Abrams\",\"Abramson\",\"Abran\",\"Abroms\",\"Absa\",\n\t\t\"Absalom\",\"Abshier\",\"Acacia\",\"Acalia\",\"Accalia\",\"Ace\",\"Acey\",\"Acherman\",\"Achilles\",\"Achorn\",\"Acie\",\"Acima\",\"Acker\",\n\t\t\"Ackerley\",\"Ackerman\",\"Ackler\",\"Ackley\",\"Acquah\",\"Acus\",\"Ad\",\"Ada\",\"Adabel\",\"Adabelle\",\"Adachi\",\"Adah\",\"Adaha\",\"Adai\",\n\t\t\"Adaiha\",\"Adair\",\"Adal\",\"Adala\",\"Adalai\",\"Adalard\",\"Adalbert\",\"Adalheid\",\"Adali\",\"Adalia\",\"Adaliah\",\"Adalie\",\"Adaline\",\n\t\t\"Adall\",\"Adallard\",\"Adam\",\"Adama\",\"Adamec\",\"Adamek\",\"Adamik\",\"Adamina\",\"Adaminah\",\"Adamis\",\"Adamo\",\"Adamok\",\"Adams\",\n\t\t\"Adamsen\",\"Adamski\",\"Adamson\",\"Adamsun\",\"Adan\",\"Adao\",\"Adar\",\"Adara\",\"Adaurd\",\"Aday\",\"Adda\",\"Addam\",\"Addi\",\"Addia\"\n\t\t,\"Addie\",\"Addiego\",\"Addiel\",\"Addis\",\"Addison\",\"Addy\",\"Ade\",\"Adebayo\",\"Adel\",\"Adela\",\"Adelaida\",\"Adelaide\",\"Adelaja\"\n\t\t,\"Adelbert\",\"Adele\",\"Adelheid\",\"Adelia\",\"Adelice\",\"Adelina\",\"Adelind\",\"Adeline\",\"Adella\",\"Adelle\",\"Adelpho\",\"Adelric\",\n\t\t\"Adena\",\"Ader\",\"Adest\",\"Adey\",\"Adham\",\"Adhamh\",\"Adhern\",\"Adi\",\"Adiana\",\"Adiel\",\"Adiell\",\"Adigun\",\"Adila\",\"Adim\",\"Adin\"]\n\n\tmajors = [\n\t\t\"software engineering\",\n\t\t\"computer science\",\n\t\t\"electrical engineering\",\n\t\t\"civil engineering\",\n\t\t\"mechanical engineering\",\n\t\t\"life sciences\",\n\t\t\"humanities\",\n\t\t\"law\",\n\t]\n\n\tdata = [dict(\n\t\ttimestamp = time.time(),\n\t\tgpa=random.randint(1, 4),\n\t\tage=random.randint(17, 28),\n\t\tmajor=random.choice(majors),\n\t\tname = random.choice(names) + \" \" + random.choice(names),\n\t) for x in range(sampleSize)]\n\n\tinsert(userID=userID,collectionID=collectionID,data=data)\n\n\ndef main():\n\tcreateDummyData(userID=\"5c4aae274cd635708233e8dc\", collectionID=\"dummy\", sampleSize=10000)\n\n\n\nif __name__ == \"__main__\":\n\tmain()","repo_name":"abdulbahajaj/embeddedAnalytics","sub_path":"py_process/installDummyData.py","file_name":"installDummyData.py","file_ext":"py","file_size_in_byte":2295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"11044284932","text":"def factorial(n): # function definition statement\n \"\"\"\n\n evaluates n! = n * (n - 1) * ... * 2 * 1\n 0! evaluates to 1\n\n >>> factorial(0)\n 1\n\n >>> factorial(10)\n 3628800\n\n >>> factorial(-1)\n Traceback (most recent call last):\n ValueError: n! is undefined for n less than zero\n\n >>> factorial(3.141)\n Traceback (most recent call last):\n TypeError: n is not an integer\n\n :param n: element of the factorial sequence to be evaluated\n :type n: int\n\n :return: n!\n :rtype: int\n \"\"\"\n\n if not isinstance(n, int):\n raise TypeError(\"n is not an integer\") # raise statement\n elif n < 0: # if statement\n raise ValueError(\"n! is undefined for n less than zero\") # raise statement\n\n n_factorial = 1 # assignment statement\n\n while n > 1: # while statement\n n_factorial = n_factorial * n # assignment statement\n n = n - 1 # assignment statement\n\n return n_factorial # return statement\n","repo_name":"zd9999cs/computational_physics","sub_path":"mymath/factorial.py","file_name":"factorial.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"25218836427","text":"import tkinter\r\nimport cv2\r\nimport PIL.Image, PIL.ImageTk\r\nimport time\r\nimport numpy as np\r\nfrom time import sleep\r\n\r\n# cap = cv2.VideoCapture('video.mp4')\r\n\r\nlargura_min = 80\r\naltura_min = 80\r\noffset = 6\r\npos_linha = 550\r\n\r\n# FPS to vídeo\r\ndelay = 60\r\n\r\ndetec = []\r\ncarros = 0\r\n\r\n\t\r\ndef center_dot(x, y, w, h):\r\n x1 = int(w / 2)\r\n y1 = int(h / 2)\r\n cx = x + x1\r\n cy = y + y1\r\n return cx, cy\r\n\r\nclass App:\r\n def __init__(self, window, window_title, video_source=\"video.mp4\"):\r\n self.window = window\r\n self.window.title(window_title)\r\n self.video_source = video_source\r\n self.vid = MyVideoCapture(self.video_source)\r\n self.canvas = tkinter.Canvas(window, width=self.vid.width, height=self.vid.height)\r\n self.canvas.pack()\r\n self.delay = 15\r\n self.update()\r\n self.window.mainloop()\r\n \r\n def update(self):\r\n ret, frame = self.vid.get_frame()\r\n if ret:\r\n self.photo = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(frame))\r\n self.canvas.create_image(0, 0, image=self.photo, anchor=tkinter.NW)\r\n self.window.after(self.delay, self.update)\r\n\r\nclass MyVideoCapture:\r\n def __init__(self, video_source=\"video.mp4\"):\r\n subtracao = cv2.createBackgroundSubtractorMOG2()\r\n \r\n while True:\r\n ret, frame1 = video_source.read()\r\n tempo = float(1/delay)\r\n sleep(tempo) \r\n grey = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)\r\n blur = cv2.GaussianBlur(grey, (3, 3), 5)\r\n img_sub = subtracao.apply(blur)\r\n dilat = cv2.dilate(img_sub, np.ones((5, 5)))\r\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))\r\n\r\n # The morphologyEx() of the method of the class Imgproc accepts src, dst, op, kernel as parameters\r\n dilatada = cv2.morphologyEx(dilat, cv2. MORPH_CLOSE, kernel)\r\n dilatada = cv2.morphologyEx(dilatada, cv2. MORPH_CLOSE, kernel)\r\n\r\n # OpenCV has findContour() function that helps in extracting the contours from the image.\r\n # It works best on binary images, so we should first apply thresholding techniques, Sobel edges, etc.\r\n contorno, h = cv2.findContours(dilatada, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n\r\n\r\n # it will create a line\r\n # Parameters:\r\n # image: It is the image on which line is to be drawn.\r\n # start_point: It is the starting coordinates of line.\r\n # end_point: It is the ending coordinates of line.\r\n # color: It is the color of line to be drawn.\r\n # thickness: It is the thickness of the line in px.\r\n cv2.line(frame1, (25, pos_linha), (1200, pos_linha), (176, 130, 39), 2)\r\n for(i, c) in enumerate(contorno):\r\n (x, y, w, h) = cv2.boundingRect(c)\r\n validar_contorno = (w >= largura_min) and (h >= altura_min)\r\n if not validar_contorno:\r\n continue\r\n\r\n cv2.rectangle(frame1, (x, y), (x+w, y+h), (0, 255, 0), 2)\r\n centro = center_dot(x, y, w, h)\r\n detec.append(centro)\r\n cv2.circle(frame1, centro, 4, (0, 0, 255), -1)\r\n\r\n for (x, y) in detec:\r\n if (y < (pos_linha + offset)) and (y > (pos_linha-offset)):\r\n carros += 1\r\n cv2.line(frame1, (25, pos_linha), (1200, pos_linha), (0, 127, 255), 3)\r\n detec.remove((x, y))\r\n print(\"No. of cars detected : \" + str(carros))\r\n\r\n # cv2.putText() method is used to draw a text string on any image.\r\n # Parameters: image, text, org(coordinate), font, color, thickness\r\n cv2.putText(frame1, \"VEHICLE COUNT : \"+str(carros), (320, 70), cv2.FONT_HERSHEY_COMPLEX, 2, (0, 0, 255), 4)\r\n\r\n def __init__(self, video_source=0):\r\n self.vid = cv2.VideoCapture(video_source)\r\n if not self.vid.isOpened():\r\n raise ValueError(\"Unable to open video source\", video_source)\r\n self.width = self.vid.get(cv2.CAP_PROP_FRAME_WIDTH)\r\n self.height = self.vid.get(cv2.CAP_PROP_FRAME_HEIGHT)\r\n \r\n def get_frame(self):\r\n if self.vid.isOpened():\r\n ret, frame = self.vid.read()\r\n if ret:\r\n return (ret, cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))\r\n else:\r\n return (ret, None)\r\n else:\r\n return (ret, None)\r\n \r\n def __del__(self):\r\n if self.vid.isOpened():\r\n self.vid.release()\r\n\r\n App(tkinter.Tk(), \"Tkinter and OpenCV\")\r\n","repo_name":"rynandraa/Real-time-Object-Counting-across-a-Line-using-OpenCV","sub_path":"coba1.py","file_name":"coba1.py","file_ext":"py","file_size_in_byte":4691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"39778802981","text":"#!/usr/bin/env python3\n\n\nimport hashlib\n\nimport pytest # noqa: F401\n\nfrom r5py.util.validating_requests_session import (\n ChecksumFailed,\n ValidatingRequestsSession,\n)\n\n\nclass TestValidatingRequestSession:\n def test_initialisation(self):\n validating_request_session = ValidatingRequestsSession()\n assert validating_request_session._algorithm == hashlib.sha256\n\n def test_get(self, r5_jar_url, r5_jar_sha256):\n with ValidatingRequestsSession() as session, session.get(\n r5_jar_url, r5_jar_sha256\n ) as response:\n assert response.content\n\n def test_get_invalid_checksum(self, r5_jar_url, r5_jar_sha256_invalid):\n with pytest.raises(ChecksumFailed):\n with ValidatingRequestsSession() as session, session.get(\n r5_jar_url, r5_jar_sha256_invalid\n ) as response:\n assert response.content\n\n def test_post(self, r5_jar_url, r5_jar_sha256_github_error_message_when_posting):\n with ValidatingRequestsSession() as session, session.post(\n r5_jar_url, r5_jar_sha256_github_error_message_when_posting\n ) as response:\n assert response.content\n\n def test_post_invalid_checksum(self, r5_jar_url, r5_jar_sha256_invalid):\n with pytest.raises(ChecksumFailed):\n with ValidatingRequestsSession() as session, session.post(\n r5_jar_url, r5_jar_sha256_invalid\n ) as response:\n assert response.content\n","repo_name":"r5py/r5py","sub_path":"tests/test_validating_request_session.py","file_name":"test_validating_request_session.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","stars":90,"dataset":"github-code","pt":"18"} +{"seq_id":"8158009170","text":"# @auther 宋疆疆 \n# @since 2017/12/11.\n\nimport json\nfrom GitBean import GitBean\nimport Util\nimport os\nimport Config\nimport sys\n\n\ndef main():\n if len(sys.argv) <= 2:\n file_path = Config.MAINFEST_PATH\n else:\n file_path = sys.argv[2]\n\n with open(file_path, \"r\") as f:\n gits = json.load(f, object_hook=GitBean.parse)\n\n for git in gits:\n if os.path.exists(os.getcwd() + git.path):\n Util.execCmd(['git', 'remote', 'update'], os.getcwd() + git.path)\n Util.execCmd(['git', 'merge', 'origin/' + git.branch], os.getcwd() + git.path)\n else:\n Util.execCmd(['git', 'clone', git.uri, os.getcwd() + git.path, '-b', git.branch])\n\n return 0\n","repo_name":"tofaluanle/GitHelper","sub_path":"sync.py","file_name":"sync.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"74602950439","text":"from AI import CB\r\nimport speech_recognition as sr\r\nimport os\r\nimport sys\r\nimport serial\r\nimport time\r\n#The following line is for serial over GPIO\r\nport = 'COM3'\r\nard = serial.Serial(port,9600,timeout=5)\r\ntime.sleep(2)\r\nsystemPath=sys.path[0]+\"/\" #gather the pathway to the file\r\nm=sr.Microphone()\r\nr=sr.Recognizer()\r\neye=\"\"\"\r\n-RR--RR-\r\nRWBRRBWR\r\nRBBRRBBR\r\n-RR--RR-\r\n--------\r\n--------\r\n--------\r\n--------\"\"\".replace(\"\\n\",\"\")\r\nblink=\"\"\"\r\n--------\r\nRRRRRRRR\r\nRRRRRRRR\r\n--------\r\n--------\r\n--------\r\n--------\r\n--------\"\"\".replace(\"\\n\",\"\")\r\ndef INPUT(): #input method\r\n with m as source: #gather audio\r\n r.adjust_for_ambient_noise(source)\r\n showEye(eye)\r\n print(\">\")\r\n audio=r.listen(source)\r\n showEye(blink)\r\n try:\r\n return r.recognize_google(audio)\r\n except:\r\n return \"\"\r\ndef OUTPUT(string): #output to user\r\n print(string)\r\n os.system('espeak \"'+string+'\" 2>/dev/null')\r\ndef showEye(eye):\r\n a=[]\r\n for i in eye:\r\n a.append(ord(i))\r\n ard.write(a)\r\n print(\"-----\")\r\n\r\n\r\nbot=CB(systemPath+\"testCB/\")\r\nwhile True:\r\n UI=INPUT()\r\n if UI!=\"\":\r\n print(UI)\r\n OUTPUT(bot.chat(UI))\r\n","repo_name":"shepai/AI-code","sub_path":"V0.0.9/BOX/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"32533365990","text":"from django.shortcuts import render\nfrom .models import ProductImg, Product, Variable, Size, Color, Category\nimport json\nfrom django.http import JsonResponse\nfrom django.core.paginator import Paginator\n\n\ndef filter_go(data):\n var_prods = []\n new = []\n if data['category']:\n category = Category.objects.get(name=data['category'])\n var_prods = Product.objects.filter(is_active=True, category=category)\n\n else:\n var_prods = Product.objects.all()\n if data['color']:\n color = Color.objects.get(name=data['color'])\n for item in var_prods:\n if color in item.colors.all():\n new.append(item)\n var_prods = new\n new = []\n if data['size']:\n size = Size.objects.get(name=data['size'])\n for item in var_prods:\n variable = Variable.objects.filter(prods=item)\n flag = False\n for item2 in variable:\n if item2.size == size:\n flag = True\n if flag:\n new.append(item)\n var_prods = new\n return var_prods\n\n\ndef products(req):\n\n cookie = req.COOKIES\n data = dict()\n try:\n filt = cookie['filter']\n filt = json.loads(filt)\n except KeyError:\n data['color']=\"\"\n data['size']=\"\"\n data['category']=\"\"\n else:\n data['color']=filt['color']\n data['size']=filt['size']\n data['category']=filt['category']\n nothing = filt['nothing']\n # products = Product.objects.filter(is_active=True)\n colors = Color.objects.all()\n sizes = Size.objects.all()\n category = Category.objects.all()\n my_filter = [colors, sizes, category]\n products = filter_go(data)\n paginator = Paginator(products, 6)\n page_number = req.GET.get('page')\n page_obj = paginator.get_page(page_number)\n return render(req, 'products.html', locals())\n\n\ndef filter(req):\n ret_dict = dict()\n\n data = req.POST\n ret_dict['color'] = data['color']\n ret_dict['size'] = data['size']\n ret_dict['category'] = data['category']\n\n var_prods = filter_go(data)\n i = 0\n for item in var_prods:\n it = dict()\n it['image'] = item.fimage.url\n it['name'] = item.name\n it['description'] = item.description[:49] + '...'\n it['id'] = item.id\n ret_dict[i] = it\n i += 1\n ret_dict['amount'] = i\n\n return JsonResponse(ret_dict)\n\ndef profile(req,productid):\n pr_id = productid\n product = Product.objects.get(id = productid)\n prod_colors = product.colors.all()\n sizes = Size.objects.all()\n prod_var= Variable.objects.filter(prods = product)\n prod_sizes = {}\n for clr in prod_colors:\n s={}\n i=0\n for size in sizes:\n if size.variable.filter(prods = product, colors = clr):\n s[i] = (size.name)\n i+=1\n prod_sizes[clr.id]=s\n prod_sizes = json.dumps(prod_sizes)\n return render(req, 'profile.html', locals())\n\n\n\ndef category(req, catid):\n products = Product.objects.filter(category=catid)\n colors = Color.objects.all()\n sizes = Size.objects.all()\n category = Category.objects.all()\n my_filter = [colors, sizes, category]\n paginator = Paginator(products, 6)\n page_number = req.GET.get('page')\n page_obj = paginator.get_page(page_number)\n return render(req, 'products.html', locals())\n\n# Create your views here.\n","repo_name":"MinPolin/mysite","sub_path":"products/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"12277641954","text":"import pandas as pd\nfrom km3net.data import noise, hits\nimport os\n\ndef add_timeslices(df, duration=15000):\n \"\"\"\n In\n --\n df -> (n, m) dataframe with a time columm. Ideally this should be the\n combined dataset.\n duration -> Int, represents the duration of each timeslice in\n nanoseconds. Defaults to 15000 ns.\n\n Out\n ---\n df -> (n, m+1) dataframe with timeslice column added.\n \"\"\"\n timeslices = list(range(0, math.ceil(df[\"time\"].max())+duration, duration))\n df['timeslice'] = pd.cut(df['time'], bins=timeslices,\n include_lowest=True, labels=False)\n\n return df\n\ndef process(drop=True, sort=True):\n \"\"\"\n In\n --\n drop -> Bool, drop the dom_id, pmt_id, dir_x, dir_y, dir_z and tot\n columns. Defaults to True.\n sort -> Bool, sort the rows by time. Defaults to True.\n\n Out\n ---\n frame with noise and hits dataset combined, rows with negative time\n dropped and timeslice added.\n \"\"\"\n hits = hits.process()\n noise = noise.process()\n noise[\"event_id\"] = np.nan\n df = pd.concat([hits, noise])\n df = df[df[\"time\"] >= 0.0]\n df = add_timeslices(df)\n\n if drop:\n df = df.drop(columns=[\"dom_id\", \"pmt_id\", \"dir_x\", \"dir_y\", \"dir_z\", \"tot\"])\n\n if sort:\n df = df.sort_values(by=['time'])\n\n return df\n\n","repo_name":"arumoy-shome/km3net","sub_path":"km3net/data/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"3907806314","text":"import cv2\n\n#read image\nimg = cv2.imread(\"/home/satish/Pictures/Elon_Musk_2015.jpg\",1)\n\n#show the image \n\n# img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY ) \nscale=20\nprint(\"Origin Dimension:\", img.shape)\n\nwidth=int(img.shape[1]*scale/100)\nheight=int(img.shape[0]*scale/100)\ndim=(width,height)\n\nresized=cv2.resize(img,dim,interpolation=cv2.INTER_AREA)\n# print(\"Resized Dimension:\", resized)\n\ncv2.imshow('image',resized) \n\nangel90=90\n\ncenter=(width/2,height/2)\nscle=1.0\nM=cv2.getRotationMatrix2D(center,angel90,scle)\nangel90=cv2.warpAffine(resized,M,(height,width))\n\ncv2.imshow(\"Rotate90=\",angel90)\n\nangle180=180\n\nM=cv2.getRotationMatrix2D(center,angle180,scle)\nangle180=cv2.warpAffine(resized,M,(height,width))\n\ncv2.imshow(\"Rotate180:\",angle180)\n\n\nangle270=270\nM=cv2.getRotationMatrix2D(center,angle270,scle)\nangle270=cv2.warpAffine(resized,M,(height,width))\n\ncv2.circle(resized,(80,80),55,(0,255,0),-1)\ncv2.imshow(\"Color Image:\",resized)\ncv2.imshow(\"Rotate270:\",angle270)\n\n\ncv2.waitKey(0)\n\n#save the image \n# elon=cv2.imwrite(\"/home/satish/Elon_Musk.jpg\",img)\n\n# elon=img[100,100]\n\n\n\n# height, width, number of channels in image \n# height = img.shape[0] \n# width = img.shape[1] \n# channels = img.shape[2] \n# size1 = img.size \n \n\n# print('Image Height : ',height) \n# print('Image Width : ',width) \n# print('Number of Channels : ',channels) \n# print('Image Size :', size1) \n\n# print(\"Save The File \",elon)\n\n\n","repo_name":"satishpr9/Computer_vision","sub_path":"opencv.py","file_name":"opencv.py","file_ext":"py","file_size_in_byte":1427,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"24893131226","text":"from bs4 import BeautifulSoup\nimport os\nimport fnmatch\n\nconfig_path = raw_input(\"Enter full path : \")\nlog = open(config_path + '/log.txt', 'w')\n\n\ndef replace_href(file_name):\n log.write(file_name + '\\n')\n file_data = open(file_name, 'r').read()\n soup = BeautifulSoup(file_data)\n a_tags = soup.findAll('a', {\"href\": \"#\"})\n for a_tag in a_tags:\n log.write(str(a_tag))\n fixed_text = str(a_tag).replace(' href=\"#\"', '')\n a_tag.replace_with(fixed_text)\n log.write(\"-->\" + fixed_text)\n html = soup.prettify(\"utf-8\")\n out = open(file_name, 'w')\n out.write(html)\n\nfor root, dir_names, files in os.walk(config_path):\n for filename in fnmatch.filter(files, '*.html'):\n replace_href(os.path.join(root, filename))\n","repo_name":"jasmingeorge/myscripts","sub_path":"bs-rm-href.py","file_name":"bs-rm-href.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"73359322921","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def isValidBST(self, root):\n def travel(node, l, r):\n if not node:\n return True\n if l < node.val < r:\n a = travel(node.left, l, node.val)\n b = travel(node.right, node.val, r)\n return a and b\n else:\n return False\n\n return travel(root, float(\"-inf\"), float(\"inf\"))\n","repo_name":"SuersserMann/NewBrain","sub_path":"笔试/leetcode题/验证二叉搜索树.py","file_name":"验证二叉搜索树.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"248567499","text":"from firedrake import *\n\n\nclass DGLaplacian(AuxiliaryOperatorPC):\n\n def form(self, pc, u, v):\n\n ctx = self.get_appctx(pc)\n d = ctx[\"deform\"]\n value = ctx[\"value\"]\n q = ctx[\"quadrature_degree\"]\n\n def gamma(p, h, d, value):\n return Constant(value)/h\n\n W = u.function_space()\n n = FacetNormal(W.mesh())\n\n p = W._ufl_element._sub_elements[0]._degree\n h = CellVolume(W.mesh())/FacetArea(W.mesh())\n\n a_dg = -(dot(grad(v), grad(u)) * dx(degree=q)\n - dot(grad(v), u * n) * ds_v(degree=q)\n - dot(v * n, grad(u)) * ds_v(degree=q)\n + gamma(p, h, d, value) * dot(v, u) * ds_v(degree=q)\n - dot(grad(v), u * n) * ds_t(degree=q)\n - dot(v * n, grad(u)) * ds_t(degree=q)\n + gamma(p, h, d, value) * dot(v, u) * ds_t(degree=q)\n - dot(grad(v), u * n) * ds_b(degree=q)\n - dot(v * n, grad(u)) * ds_b(degree=q)\n + gamma(p, h, d, value) * dot(v, u) * ds_b(degree=q))\n\n bcs = []\n return (a_dg, bcs)\n","repo_name":"sv2518/mathybperf","sub_path":"mathybperf/setup/auxiliary.py","file_name":"auxiliary.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"21237897589","text":"\nimport lemminflect\nimport dep_search.struct_query as sq\n\nPUNCT = [c for c in '.?!:;,']\nVOWELS = 'aeiouAEIOU'\n\nQUAL_BLACKLIST = frozenset(['other', 'many', 'some', 'few', 'most', 'several', 'certain', 'particular', 'various', 'multiple', 'specific'])\n\nclass Token:\n def __init__(self, text, join_left=False, join_right=False):\n self.text = text\n self.join_left = join_left\n self.join_right = join_right\n\ndef remove_child(head: sq.Head, child: sq.Head):\n pruned = head.copy()\n if pruned.left_children is not None:\n for i, c in enumerate(head.left_children):\n if c is child:\n del pruned.left_children[i]\n if len(pruned.left_children) == 0:\n pruned.left_children = None\n return pruned\n if pruned.right_children is not None:\n for i, c in enumerate(head.right_children):\n if c is child:\n del pruned.right_children[i]\n if len(pruned.right_children) == 0:\n pruned.right_children = None\n return pruned\n return head\n\ndef filter_children(head: sq.Head, f):\n pruned = head.copy()\n if pruned.left_children is not None:\n pruned.left_children = [c for c in pruned.left_children if not f(c)]\n if len(pruned.left_children) == 0:\n pruned.left_children = None\n if pruned.right_children is not None:\n pruned.right_children = [c for c in pruned.right_children if not f(c)]\n if len(pruned.right_children) == 0:\n pruned.right_children = None\n return pruned\n\ndef swap_child(head: sq.Head, child: sq.Head, subst: sq.Head):\n if head.left_children is not None:\n for i, c in enumerate(head.left_children):\n if c is child:\n pruned = head.copy()\n pruned.left_children[i] = subst\n return pruned\n if head.right_children is not None:\n for i, c in enumerate(head.right_children):\n if c is child:\n pruned = head.copy()\n pruned.right_children[i] = subst\n return pruned\n return head\n\ndef get_left_child_by_label(head: sq.Head, label: str):\n if head.left_children is not None:\n for c in head.left_children:\n if c.label == label:\n return c\n return None\n\ndef get_right_child_by_label(head: sq.Head, label: str):\n if head.right_children is not None:\n for c in head.right_children:\n if c.label == label:\n return c\n return None\n\ndef linearize(head: sq.Head):\n t = Token(head.lemma)\n if head.original is None or head.category != head.original.tag_:\n if head.category == 'DT':\n if head.lemma == 'a':\n after = False\n for c in (*(head.parent.left_children or []), head.parent):\n if c is head:\n after = True\n elif after:\n if c is not head.parent:\n while c.left_children is not None:\n c = c.left_children[0]\n if c.lemma[0] in VOWELS:\n t.text = 'an'\n break\n elif head.category in PUNCT:\n t.join_left = True\n else:\n inflections = lemminflect.getInflection(head.lemma, head.category)\n if len(inflections) > 1 and head.lemma == 'be' and head.category == 'VBP':\n t.text = 'are'\n elif len(inflections) > 0:\n t.text = inflections[0]\n else:\n t.text = head.original.text\n t.join_left = head.original.i > 0 and head.original.doc[head.original.i-1].whitespace_ == '' and not t.text.isalnum()\n t.join_right = head.original.whitespace_ == '' and not t.text.isalnum()\n if head.lemma.islower() or (head.lemma == '-PRON-' and t.text != 'I'):\n t.text = t.text.lower()\n\n if head.left_children is not None:\n for child in head.left_children:\n yield from linearize(child)\n yield t\n if head.right_children is not None:\n for child in head.right_children:\n yield from linearize(child)\n\ndef join_tokens(tokens):\n chunks = []\n last_join = True\n for t in tokens:\n chunks.append(' '+t.text if not (t.join_left or last_join) else t.text)\n last_join = t.join_right\n return ''.join(chunks)\n\ndef flatten(head):\n return join_tokens(linearize(head))\ndef capitalize(s):\n return s[0].upper() + s[1:]\n\nclass Pattern:\n def __init__(self, query):\n self.query = query\n \n def rewrite(self, match_vars):\n raise NotImplementedError()\n\n def scrape(self, idx):\n for _, m_vars in self.query.search(idx):\n output = self.rewrite(m_vars)\n if output is not None:\n yield output\n\nclass SimpleSubjFilterPattern(Pattern):\n def rewrite(self, match_vars):\n pred = match_vars[2]\n subj = match_vars[0]\n restr = match_vars[1]\n if (subj.lemma.lower() in QUAL_BLACKLIST) or any(c.lemma.lower() in QUAL_BLACKLIST for c in subj.children):\n return None\n return (str(pred.original.sent),)","repo_name":"alephic/ParaPattern","sub_path":"dep_search/templates/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":5237,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"30"} +{"seq_id":"23430204547","text":"import requests\nimport ssl\n\nfrom bs4 import BeautifulSoup, SoupStrainer\nfrom urllib.parse import urljoin\n\n\ndef get_files_from_link(url):\n try:\n html = requests.get(url).content\n except (ConnectionError, ssl.SSLError):\n return -1, 0, 0\n\n bs = BeautifulSoup(html, \"html.parser\", parse_only=SoupStrainer('a'))\n data_list = []\n names_list = []\n cnt = 0\n for link in bs:\n if link.has_attr('href') and link['href'].endswith(\".txt\"):\n cnt += 1\n curl = link['href']\n curl = urljoin(url, curl)\n names_list.append(curl.rsplit(\"/\", 1)[-1])\n data_list.append(requests.get(curl).content)\n\n return cnt, names_list, data_list\n","repo_name":"ivd4/compress-bot","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"11447346763","text":"from sys import exit\nfrom enum import Enum\nfrom levichess.mainfiles.piece import Piece\nfrom levichess.mainfiles.chessboard import ChessBoard\nfrom levichess.mainfiles.cemetery import Cemetery\nfrom levichess.leviosa.decoder import Decoder\nfrom levichess.soundtracks.soundtracks import SoundTracks\nfrom levichess.resources.config import config\n\n\nclass Player:\n\t\"\"\"Player class contains functions to be executed by the player in the game,\n\tas well as verification for Player actions which result in the validity of these.\"\"\"\n\n\tdef __init__(self, BlackOrWhite):\n\t\tself.BW = BlackOrWhite #Assign a Color to the Player\n\t\tself.IsPlaying = False\n\t\tself.PN = 0\n\t\tself.PName = 'Player ' + str(self.PN)\n\t\tif self.BW == 0: #Black Player\n\t\t\tself.ColorID = \"BLACK\"\n\t\telif self.BW == 1: #White Player\n\t\t\tself.ColorID = \"WHITE\"\n\n\tdef ExitGame(self):\n\t\tSoundTracks.StopSong()\n\t\texit()\n\n\t#Function to let Player know what piece is it\n\tdef WhatPiece(self,string): #Posts attributes of objects in black_pieces\n\t\ty, x = Decoder.NameToLocation(string)\n\t\ttry:\n\t\t\tboard = ChessBoard.board[y][x]\n\t\t\ttry:\n\t\t\t\tif board.isValid == False:\n\t\t\t\t\tprint('No piece in location: ' + string)\n\t\t\t\telse:\n\t\t\t\t\tprint(board.color + ' ' + board.piecetype + ' in location: ' + string + '\\n')\n\t\t\texcept IndexError:\n\t\t\t\tprint('Outside of boundary.'+ '\\n')\n\t\texcept IndexError:\n\t\t\tprint('Outside of boundary.'+ '\\n')\n\t\treturn False\n\n\n\tdef isPieceAlly(firstPiece,secondPiece):\n\t\tpieceOne = ChessBoard.getPieceFromBoard(firstPiece) #import fist Piece\n\t\tpieceTwo = ChessBoard.getPieceFromBoard(secondPiece) #Import second piece\n\t\ttry:\n\t\t\tif pieceOne.isValid == True and pieceTwo.isValid == True:\t#Check if both locations are pieces. If they are valids or not\n\t\t\t\tif (pieceOne.colorID == pieceTwo.colorID) == True: #If their colorID match each other, they are allies\n\t\t\t\t\tprint('Pieces are Allies. \\n')\t\t\t\t\t#Else, they are enemies\n\t\t\t\t\treturn True\n\t\t\t\telse:\n\t\t\t\t\tprint('Pieces are Enemies. \\n')\n\t\t\t\t\treturn False\n\t\t\telif pieceOne.isValid == False:\t\t\t\t\t\t\t#If the first locaton is not valid, there is no piece there\n\t\t\t\t\tprint('There is no piece in location: ' + firstPiece + '\\n')\n\t\t\t\t\treturn False\n\t\texcept IndexError:\n\t\t\tprint('Outside of boundary.' + '\\n')\n\n\n\tdef isLocationClear(WhereTo): #Check for final location only\n\t\tdestination = ChessBoard.getPieceFromBoard(WhereTo) #Import second piece\n\t\ttry:\n\t\t\tif destination.isValid == False:\t\t\t\t\t\t\t\t\t\t\t\t\t\t#Check if there is a piece in final destination\n\t\t\t\tprint('There is no piece in location ' + WhereTo + '.')\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\tprint('There is a piece in location ' + WhereTo + '.')\n\t\t\t\treturn False\n\t\texcept IndexError:\n\t\t\tprint('Outside of boundary.' + '\\n')\n\n\n\tdef isPathClear(WhichPiece,WhereTo):\n\t\tpieceOne = ChessBoard.getPieceFromBoard(WhichPiece) #Decoding the Piece\n\t\tpieceTwo = ChessBoard.getPieceFromBoard(WhereTo)\n\t\tyf, xf = Decoder.NameToLocation(WhereTo)\n\t\tys, xs = Decoder.NameToLocation(WhichPiece)\n\t\tprint('Initial location: ' + str(ys) + ', ' + str(xs) + '.') #Should be yi + n (one step)\n\t\tprint('Final location: ' + str(yf) + ', ' + str(xf) + '.\\n') #Should be yi + n (one step)\n\n\t\t#Horse is the ONLY piece that is allowed to move just one. Therefore we do not need to check its path (Only final Location is suficient to check its Path)\n\t\t#If Piece is not a horse -> Check if the path is clear!\n\t\tif (pieceOne.pieceID != 2) and (pieceOne.pieceID != 0):\n\n\t\t\tif yf - ys > 0:\n\t\t\t\tys = ys + 1\n\t\t\telif yf - ys < 0:\n\t\t\t\tys = ys - 1\n\t\t\tif xf - xs > 0:\n\t\t\t\txs = xs + 1\n\t\t\telif xf - xs < 0:\n\t\t\t\txs = xs - 1\n\n\t\t\twhile (abs(yf - ys) == abs(xf - xs)) or (yf > ys or yf < ys) or ((xf > xs) or (xf < xs)):\n\t\t\t\tprint('Checking location ' + str(ys) + ', ' + str(xs) + '.')\n\t\t\t\tif (Piece.HowMove(pieceOne,ys,xs) == True):\n\t\t\t\t\tif Player.isLocationClear(Decoder.LocationToName(ys,xs)) == True:\n\t\t\t\t\t\tprint('Iteration to ' + str(ys) + ',' + str(xs) + ' complete.\\n')\n\n\t\t\t\t\t\tif yf == ys and xf == xs:\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\telif abs(yf - ys) == abs(xf - xs):\n\t\t\t\t\t\t\tif yf > ys:\n\t\t\t\t\t\t\t\tys = ys + 1\n\t\t\t\t\t\t\telif yf < ys:\n\t\t\t\t\t\t\t\tys = ys - 1\n\t\t\t\t\t\t\tif xf > xs:\n\t\t\t\t\t\t\t\txs = xs + 1\n\t\t\t\t\t\t\telif xf < xs:\n\t\t\t\t\t\t\t\txs = xs - 1\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tif yf > ys:\n\t\t\t\t\t\t\tys = ys + 1\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\telif yf < ys:\n\t\t\t\t\t\t\tys = ys - 1\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\telif xf > xs:\n\t\t\t\t\t\t\txs = xs + 1\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\telif xf < xs:\n\t\t\t\t\t\t\txs = xs - 1\n\t\t\t\t\t\t\tcontinue\n\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint('Stopped here: ' + str(ys) + ',' + str(xs))\n\t\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tys = ys + 1\n\t\t\t\t\tprint('Iteration to ' + str(ys) + ',' + str(xs) + ' complete.')\n\t\t\t\t\tbreak\n\t\t#If Piece is a Horse, do not need to check this equation -> Pass\n\t\telse:\n\t\t\tys = yf\n\t\t\txs = xf\n\t\t\tpass\n\n\t\tprint ('Returned: ' + str(ys) + ',' + str(xs) + '\\n')\n\t\treturn ys, xs\n\n\n\tdef isMoveValid(WhichPiece,WhereTo):\n\t\tyf, xf = Decoder.NameToLocation(WhereTo) #Decoding the desired location\n\t\tpieceOne = ChessBoard.getPieceFromBoard(WhichPiece) #import fist Piece\n\t\tpieceTwo = ChessBoard.getPieceFromBoard(WhereTo) #Import second piece\n\t\ttry:\n\t\t\t##Check if the move is valid (check moveRules)\n\t\t\tif (pieceOne.HowMove(yf,xf) == True) or (pieceOne.HowKill(yf,xf) == True):\n\t\t\t\tprint(str(pieceOne.HowMove(yf,xf)) + ' Move')\n\t\t\t\tprint(str(pieceOne.HowKill(yf,xf)) + ' Kill')\n\t\t\t\t#Check if Location is Clear\n\t\t\t\t#If Location is Clear:\n\t\t\t\tif Player.isLocationClear(WhereTo) == True:\n\t\t\t\t\tif (pieceOne.pieceID !=0) or ((pieceOne.pieceID == 0) and pieceOne.HowKill(yf,xf) == False):\n\t\t\t\t\t\t#Proceed to check if there is a piece blocking the move\n\t\t\t\t\t\tys, xs = Player.isPathClear(WhichPiece,WhereTo)\t\t\t#Runs a loop checking each x and y, and return the last one. If the last is equal to the desired location, it can move there.\n\t\t\t\t\t\t#If the path is clear (no piece on its way)\n\t\t\t\t\t\tif yf == ys and xf == xs:\n\t\t\t\t\t\t\tprint('Yes, ' + pieceOne.color + ' ' + pieceOne.piecetype + ' can move from ' + WhichPiece + ' to ' + WhereTo + ' because the Path is clear.')\n\t\t\t\t\t\t\treturn True, False\n\t\t\t\t\t\t#If there is a piece on it's way you cannot move\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tprint('No, ' + pieceOne.color + ' ' + pieceOne.piecetype + ' cannot move from ' + WhichPiece + ' to ' + WhereTo + ' because there is a piece in the way. ' + '[Other Piece at Location: ' + Decoder.LocationToName(ys,xs) + ']\\n')\n\t\t\t\t\t\t\treturn False, False\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint('No, ' + pieceOne.color + ' ' + pieceOne.piecetype + ' cannot move from ' + WhichPiece + ' to ' + WhereTo + ' because it is a Pawn, and cannot move diagonally.\\n')\n\t\t\t\t\t\treturn False, False\n\n\t\t\t\t#If Location is not Clear:\n\t\t\t\t#Check if Piece is an Ally\n\t\t\t\telif Player.isPieceAlly(WhichPiece,WhereTo) == False: #Other piece is enemy -> KILL\n\t\t\t\t\tys, xs = Player.isPathClear(WhichPiece,WhereTo)\n\t\t\t\t\t#If the path is clear (no piece on its way):\n\t\t\t\t\tif yf == ys and xf == xs:\n\t\t\t\t\t\t#Check if the piece can kill (if killType is Valid):\n\n\t\t\t\t\t\tif pieceOne.HowKill(yf,xf) == True:\n\t\t\t\t\t\t\t#If the KillType is valid -> Can Kill\n\t\t\t\t\t\t\tprint('Yes, ' + pieceOne.color + ' ' + pieceOne.piecetype + ' can move from ' + WhichPiece + ' to ' + WhereTo + ' and kill ' + pieceTwo.color + ' ' + pieceTwo.piecetype + '.\\n')\n\t\t\t\t\t\t\treturn True, True\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t#If the KillType is NOT valid -> Cannot move there\n\t\t\t\t\t\t\tprint('No, ' + pieceOne.color + ' ' + pieceOne.piecetype + ' cannot move from ' + WhichPiece + ' to ' + WhereTo + ' because there it is not a valid Kill Type.\\n')\n\t\t\t\t\t\t\treturn False, False\n\t\t\t\t\t#If there is a piece on it's way you cannot move\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint('No, ' + pieceOne.color + ' ' + pieceOne.piecetype + ' cannot move from ' + WhichPiece + ' to ' + WhereTo + ' because there is a piece in the way. ' + '[Other Piece at Location: ' + Decoder.LocationToName(ys,xs) + ']\\n')\n\t\t\t\t\t\treturn False, False\n\t\t\t\t\t#If the Piece is an Ally, it cannot kill or move there\n\t\t\t\telse:\n\t\t\t\t\tprint('No, ' + pieceOne.color + ' ' + pieceOne.piecetype + ' cannot move from ' + WhichPiece + ' to ' + WhereTo + ' because there is an ally piece at this location.\\n')\n\t\t\t\t\treturn False, False\n\t\t\t#If the move is Invalid, it cannot proceed\n\t\t\telse:\n\t\t\t\tprint('No, ' + pieceOne.color + ' ' + pieceOne.piecetype + ' cannot move from ' + WhichPiece + ' to ' + WhereTo + ' because it is an invalid move.\\n')\n\t\t\t\treturn False, False\n\t\texcept IndexError:\n\t\t\tprint('Outside of boundary.')\n\n\tdef KillPiece(pieceOne): #Function will be used to move motors to the cemetery\n\t\tCemetery.MovePieceToCemetery(pieceOne)\n\n\n\tdef MovePiece(self,WhichPiece,WhereTo): #Take Both String Locations. Like A2 or B7\n\t\tpieceOne = ChessBoard.getPieceFromBoard(WhichPiece) #import fist Piece\n\t\tpieceTwo = ChessBoard.getPieceFromBoard(WhereTo) #Import second piece\n\t\ttry:\n\t\t\tif self.BW == pieceOne.colorID:\n\t\t\t\tMoveValidation,KillValidation = Player.isMoveValid(WhichPiece,WhereTo)\n\t\t\t\tif (MoveValidation == True) and (KillValidation == False): #Just Move The Piece\n\t\t\t\t\tChessBoard.MovePieceInBoard(pieceOne,WhereTo)\n\t\t\t\t\tSoundTracks.PlaySound(SoundTracks.Woosh1)\n\t\t\t\t\tprint('Player ' + str(self.PN) + ' moves ' + pieceOne.color + ' ' + pieceOne.piecetype + ' from ' + WhichPiece + ' to ' + WhereTo + '.\\n')\n\t\t\t\t\treturn True\n\t\t\t\telif (MoveValidation == True) and (KillValidation == True):\n\t\t\t\t\tPlayer.KillPiece(pieceTwo)\n\t\t\t\t\tChessBoard.MovePieceInBoard(pieceOne,WhereTo)\n\t\t\t\t\tSoundTracks.PlaySound(SoundTracks.Woosh1)\n\t\t\t\t\tprint('Player ' + str(self.PN) + ' moves ' + pieceOne.color + ' ' + pieceOne.piecetype + ' from ' + WhichPiece + ' to ' + WhereTo + ' and kills ' + pieceTwo.piecetype + '.\\n')\n\t\t\t\t\treturn True\n\t\t\t\telse:\n\t\t\t\t\tprint(pieceOne.color + ' ' + pieceOne.piecetype + ' move aborted.\\n')\n\t\t\t\t\tprint('Try again.\\n')\n\t\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\tprint('This is not a Piece from your team. Please, try again.\\n')\n\t\t\t\treturn False\n\t\texcept IndexError:\n\t\t\tprint('Outside of boundary.' + '\\n')\n\t\t\treturn False\n\n\tdef SeeCemetery(self):\n\t\tCemetery.printCemetery(Cemetery.cboard)\n\n\n\tdef GodMovePiece(self,WhichPiece,WhereTo):\n\t\tyf, xf = Decoder.NameToLocation(WhereTo) #Decoding the desired location\n\t\tThisPiece = ChessBoard.getPieceFromBoard(WhichPiece) #Decoding the Piece\n\t\ttry:\n\t\t\tif ThisPiece.isValid == True:\n\t\t\t\ty, x = ThisPiece.Location\n\t\t\t\tCemetery.MovePieceToCemetery(ThisPiece)\n\t\t\t\tconfig.game_board[y][x] = ChessBoard.Blank\n\t\t\telif ThisPiece.isValid == False:\n\t\t\t\tpass\n\t\t\tChessBoard.MovePieceInBoard(ThisPiece,WhereTo)\n\t\t\tSoundTracks.PlaySound(SoundTracks.Woosh1)\n\t\t\tprint('Teleporting ' + ThisPiece.color + ' ' + ThisPiece.piecetype + ' from ' + WhichPiece + ' to ' + WhereTo + '.\\n')\n\t\t\treturn True\n\t\texcept IndexError:\n\t\t\tprint('Outside of boundary.' + '\\n')\n\n#\tdef GodOfWar(self,WhichPiece):\n#\t\tThisPiece = ChessBoard.getPieceFromBoard(WhichPiece) #Decoding the Piece\n#\t\tif ThisPiece.isValid == True:\n#\t\t\ty, x = ThisPiece.Location\n#\t\t\tCemetery.MovePieceToCemetery(ThisPiece)\n#\t\t\tconfig.game_board[y][x] = ChessBoard.Blank\n#\t\telse:\n#\t\t\tprint('BlankSpaces are ancient legends. They came before everything and occupy every empty space. \\nEven Gods do not have the power to kill a BlankSpace...\\n')\n","repo_name":"azaeldrm/Levichess","sub_path":"levichess/mainfiles/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":10970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"73505597203","text":"\"\"\"\nINPUT:\n\npython3 ./scripts/get_edges.py edges2mountains\n\nOUTPUT:\n\n./edges2mountains/images/edgeN.png\n\"\"\"\n\n# https://github.com/benwiz/fragment/blob/master/py-detect-features/lambda_function.py\n\nimport sys\nimport os\nimport numpy as np\nimport cv2\n\nif len(sys.argv) != 2:\n print('Error: exactly 1 argument must be supplied.')\n sys.exit(1)\n\nMODEL_NAME = sys.argv[1]\n\n\ndef run():\n \"\"\"\n Run everything.\n \"\"\"\n\n path = './%s/images/' % MODEL_NAME\n for filename in os.listdir(path):\n # Load image\n in_filepath = path + filename\n img = cv2.imread(in_filepath, cv2.IMREAD_COLOR)\n if img is None:\n continue\n # print('\\tin:\\t%s' % in_filepath)\n\n # Resize image to 256x256\n size = 256\n img = cv2.resize(img, (size, size))\n\n # Convert it to grayscale\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # Canny edge detection on gray image\n edges = cv2.Canny(gray, 100, 450)\n\n # Invert black and white\n edges = cv2.bitwise_not(edges)\n\n # Stich the two photos together with the edges on the right. But first\n # we need to convert the edges image to color.\n edges = cv2.cvtColor(edges, cv2.COLOR_GRAY2BGR)\n out = np.concatenate((img, edges), axis=1)\n\n # Output the image\n out_path = './%s/data/' % MODEL_NAME\n image_id = filename.split('.')[0]\n out_filepath = out_path + image_id + '.jpg'\n cv2.imwrite(out_filepath, out)\n # print('\\tout:\\t%s' % out_filepath)\n\n\nif __name__ == '__main__':\n print()\n print('get_edges.py:')\n print('Begin edge detection.')\n run()\n print('Complete edge detection.')\n","repo_name":"benwiz/graphics","sub_path":"art-warp/training/scripts/get_edges.py","file_name":"get_edges.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"28266897342","text":"import os\nimport glob\nfrom datetime import datetime\nfrom flask import Flask,request,json,Response\nimport logging\nimport sys\n\nif os.getenv('ttl') is None:\n ttl = 15\nelse:\n ttl = os.getenv('ttl')\n\nlog = logging.getLogger('')\nformat = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\n\nch = logging.StreamHandler(sys.stdout)\nch.setFormatter(format)\nlog.addHandler(ch)\n\nif os.getenv('DEBUG') == \"True\":\n log.setLevel(logging.DEBUG)\n logging.info(\"Debug logging enabled\")\nelse:\n log.setLevel(logging.INFO)\n\napp = Flask(__name__)\n\ndef time_diff(timestamp):\n metrics_date = datetime.strptime(timestamp, '%Y-%m-%d %H:%M:%S')\n difference = datetime.now() - metrics_date\n if difference.total_seconds() >= ttl * 60:\n return True\n else:\n return False\n\n@app.route('/metrics')\ndef metrics():\n metrics_result = '#Enviro Metrics\\n'\n for json_file in glob.iglob('readings/*.json'):\n data = json.load(open(json_file))\n logging.debug(\"JSON contents: {0}\".format(data))\n if time_diff(data[\"timestamp\"]):\n for reading in data[\"readings\"]:\n if \"moisture\" in reading:\n probe_number = reading.split(\"_\").pop()\n labels = '{nickname=\"%s\", sensor=\"%s\"}' % (data[\"nickname\"], probe_number)\n metrics_result += 'enviro_{0}{1} {2}\\n'.format(\"moisture\", labels, data[\"readings\"][reading])\n else:\n labels = '{nickname=\"%s\"}' % data[\"nickname\"]\n metrics_result += 'enviro_{0}{1} {2}\\n'.format(reading, labels, data[\"readings\"][reading])\n\n return Response(metrics_result, mimetype='text/text')\n\n@app.route('/endpoint',methods=['POST'])\ndef endpoint():\n data = request.json\n logging.debug(\"Received JSON: {0}\".format(request.json))\n nickname = data[\"nickname\"]\n file_out = \"readings/{0}.json\".format(nickname)\n with open(file_out, \"w\") as outfile:\n json.dump(data, outfile)\n return 'success', 200\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", port=\"8080\", debug=True)\n","repo_name":"mscottco/enviro-to-prometheus","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"9626886661","text":"# Sid Arora\n# UPDATED AS OF 6/25/19\n\n# This Program will read in data from the Vaisala Weather Sensor\n# It can read data over long periods of time and perform averages or can output live data\n\n# Import Modules\nimport time\nfrom time import sleep\nimport serial\nimport re\nimport datetime\nimport math\nfrom gpio import weather_on\nfrom gpio import weather_off\nfrom gpio import is_on_checker\nimport subprocess\nfrom execp import printf\nimport traceback\nfrom onboard_device import get_battery_current\n# Class that will average the data for 2 minutes every 10 seconds at a speciic time every hour\n\n\nclass Average_Reading():\n def read_data(self):\n try:\n # Turn on Weather Station\n weather_on(1)\n sleep(60)\n # Read in the weather sensor data and write to an ascii text file\n port = serial.Serial(\"/dev/ttyS5\")\n port.baudrate = 115200\n port.timeout = 60\n except:\n print(\"Problem with port 5 or problem with power to the vaisala\")\n traceback.print_exc(\n file=open(\"/media/mmcblk0p1/logs/system.log\", \"a+\"))\n else:\n t = 0\n data = None\n # Read composite data message (all readings) every 10 seconds for 2 minutes and write to temporary ascii text file\n while t <= 120:\n with open(\"/media/mmcblk0p1/logs/weather_data_ASCII_schedule.log\", \"a+\") as raw_data:\n port.flushInput()\n data = port.readline()\n if data is None or data == \"\":\n printf(\"Vaisala could not take reading. Got empty data\")\n break\n raw_data.write(data)\n sleep(10)\n t = t+10\n finally:\n # Turn off Weather Station\n port.close()\n weather_off(1)\n\n def clean_data(self):\n try:\n # put all the mesaurements into a matrix (array of arrays)\n float_array_final = []\n string_array_final = []\n with open(\"/media/mmcblk0p1/logs/weather_data_ASCII_schedule.log\", \"r\") as f:\n for line in f:\n if \"0R0\" in line:\n string_array_raw = re.findall(\n r\"[-+]?\\d*\\.\\d+|\\d+\", line)\n for i in range(0, len(string_array_raw)):\n string_array_raw[i] = float(string_array_raw[i])\n string_array_final = string_array_raw[2:]\n float_array_final.append(string_array_final)\n except:\n printf('Failed to acquire Wather station data or got empty array')\n traceback.print_exc(\n file=open(\"/media/mmcblk0p1/logs/system.log\", \"a+\"))\n\n finally:\n # Erase the tempoerary ascii data file\n subprocess.call(\n \"rm /media/mmcblk0p1/logs/weather_data_ASCII_schedule.log\", shell=True)\n return string_array_final, float_array_final\n\n def average_data(self):\n # Call first two functions in correct order\n try:\n self.read_data()\n string_array_final, float_array_final = self.clean_data()\n # average the corresponding elements and output a sinlge array of numbers\n data_array_final = []\n for j in range(0, len(string_array_final)):\n numbers_sum = 0\n numbers_divide = 0\n for k in range(0, len(float_array_final)):\n numbers_sum = numbers_sum + float_array_final[k][j]\n numbers_divide = numbers_sum/(len(float_array_final))\n data_array_final.append(round(numbers_divide, 3))\n # Write the averaged array elements to a final log file - append\n now = datetime.datetime.now()\n with open(\"/media/mmcblk0p1/logs/weather_data.log\", \"a+\") as hourly:\n hourly.write(\"Current Date and Time: \" +\n now.strftime(\"%Y-%m-%d %H:%M:%S\\n\"))\n hourly.write(\"Wind Direction Average (Degrees): \" +\n str(data_array_final[0]) + \".\\n\")\n hourly.write(\"Wind Speed Average (m/s): \" +\n str(data_array_final[1]) + \".\\n\")\n hourly.write(\"Air Temperature (C): \" +\n str(data_array_final[2]) + \".\\n\")\n hourly.write(\"Relative Humidity (%RH): \" +\n str(data_array_final[3]) + \".\\n\")\n hourly.write(\"Air Pressure (hPa): \" +\n str(data_array_final[4]) + \".\\n\")\n hourly.write(\"Rain Accumulation (mm): \" +\n str(data_array_final[5]) + \".\\n\")\n hourly.write(\"Rain Duration (s): \" +\n str(data_array_final[6]) + \".\\n\")\n hourly.write(\"Rain Intensity (mm/h): \" +\n str(data_array_final[7]) + \".\\n\")\n hourly.write(\"Rain Peak Intensity (mm/h): \" +\n str(data_array_final[11]) + \".\\n\")\n hourly.write(\"Hail Accumulation (hits/cm^2): \" +\n str(data_array_final[8]) + \".\\n\")\n hourly.write(\"Hail Duration (s): \" +\n str(data_array_final[9]) + \".\\n\")\n hourly.write(\"Hail Intensity (hits/cm^2/hour): \" +\n str(data_array_final[10]) + \".\\n\")\n hourly.write(\"Hail Peak Intensity (hits/cm^2/hour): \" +\n str(data_array_final[12]) + \".\\n\")\n hourly.write(\"Vaisala Heating Temperature (C): \" +\n str(data_array_final[13]) + \".\\n\")\n hourly.write(\"Vaisala Heating Voltage (V): \" +\n str(data_array_final[14]) + \".\\n\")\n hourly.write(\"Vaisala Supply Voltage (V): \" +\n str(data_array_final[15]) + \".\\n\\n\\n\")\n except:\n printf('Fail to parser vaisala data, maybe got an empty array')\n traceback.print_exc(\n file=open(\"/media/mmcblk0p1/logs/system.log\", \"a+\"))\n\n\n# Class that will allow the user to access specific weather data points whenever needed\nclass Live_Data():\n def read_data(self):\n try:\n is_on = is_on_checker(0, 6)\n if not is_on:\n # Turn on Weather Station\n weather_on(1)\n sleep(10)\n # Read lines from port\n port = serial.Serial(\"/dev/ttyS5\")\n port.baudrate = 115200\n except:\n print(\"Problem with port 5 or problem with power to the vaisala\")\n else:\n t = 0\n # Take data for 5 seconds to make sure that a composite data message has time to send from the Vaisala\n while t <= 5:\n with open(\"/media/mmcblk0p1/logs/weather_data_ASCII_live.log\", \"a+\") as raw_data:\n port.flushInput()\n data = port.readline()\n raw_data.write(data)\n sleep(1)\n t = t+1\n finally:\n if not is_on:\n # Turn off Weather Station\n port.close()\n weather_off(1)\n\n def clean_data(self):\n try:\n self.read_data()\n string_array_final = []\n with open(\"/media/mmcblk0p1/logs/weather_data_ASCII_live.log\", \"r\") as f:\n # only take the last 0R0 line of the 5 - second data collection interval for translation\n for line in f:\n if \"0R0\" in line:\n string_array_raw = re.findall(\n r\"[-+]?\\d*\\.\\d+|\\d+\", line)\n for i in range(0, len(string_array_raw)):\n string_array_raw[i] = float(string_array_raw[i])\n string_array_final = string_array_raw[2:]\n finally:\n # Erase the temporary ascii text file\n subprocess.call(\n \"rm /media/mmcblk0p1/logs/weather_data_ASCII_live.log\", shell=True)\n return string_array_final\n\n def weather_all(self):\n # Print all the weather data\n string_array_final = self.clean_data()\n now = datetime.datetime.now()\n print(\"\\nCurrent Date and Time: \" + now.strftime(\"%Y-%m-%d %H:%M:%S\"))\n print(\"Wind Direction Average (Degrees): \" +\n str(string_array_final[0]) + \".\")\n print(\"Wind Speed Average (m/s): \" + str(string_array_final[1]) + \".\")\n print(\"Air Temperature (C): \" + str(string_array_final[2]) + \".\")\n print(\"Relative Humidity (%RH): \" + str(string_array_final[3]) + \".\")\n print(\"Air Pressure (hPa): \" + str(string_array_final[4]) + \".\")\n print(\"Rain Accumulation (mm): \" + str(string_array_final[5]) + \".\")\n print(\"Rain Duration (s): \" + str(string_array_final[6]) + \".\")\n print(\"Rain Intensity (mm/h): \" + str(string_array_final[7]) + \".\")\n print(\"Rain Peak Intensity (mm/h): \" +\n str(string_array_final[11]) + \".\")\n print(\"Hail Accumulation (hits/cm^2): \" +\n str(string_array_final[8]) + \".\")\n print(\"Hail Duration (s): \" + str(string_array_final[9]) + \".\")\n print(\"Hail Intensity (hits/cm^2/hour): \" +\n str(string_array_final[10]) + \".\")\n print(\"Hail Peak Intensity (hits/cm^2/hour): \" +\n str(string_array_final[12]) + \".\")\n print(\"Vaisala Heating Temperature (C): \" +\n str(string_array_final[13]) + \".\")\n print(\"Vaisala Heating Voltage (V): \" +\n str(string_array_final[14]) + \".\")\n print(\"Vaisala Supply Voltage (V): \" +\n str(string_array_final[15]) + \".\\n\")\n\n def wind_direction(self):\n string_array_final = self.clean_data()\n now = datetime.datetime.now()\n print(\"\\nCurrent Date and Time: \" + now.strftime(\"%Y-%m-%d %H:%M:%S\"))\n print(\"Wind Direction Average (Degrees): \" +\n str(string_array_final[0]) + \".\\n\")\n\n def wind_speed(self):\n string_array_final = self.clean_data()\n now = datetime.datetime.now()\n print(\"\\nCurrent Date and Time: \" + now.strftime(\"%Y-%m-%d %H:%M:%S\"))\n print(\"Wind Speed Average (m/s): \" +\n str(string_array_final[1]) + \".\\n\")\n\n def air_temperature(self):\n string_array_final = self.clean_data()\n now = datetime.datetime.now()\n print(\"\\nCurrent Date and Time: \" + now.strftime(\"%Y-%m-%d %H:%M:%S\"))\n print(\"Air Temperature (C): \" + str(string_array_final[2]) + \".\\n\")\n\n def humidity(self):\n string_array_final = self.clean_data()\n now = datetime.datetime.now()\n print(\"\\nCurrent Date and Time: \" + now.strftime(\"%Y-%m-%d %H:%M:%S\"))\n print(\"Relative Humidity (%RH): \" + str(string_array_final[3]) + \".\\n\")\n\n def pressure(self):\n string_array_final = self.clean_data()\n now = datetime.datetime.now()\n print(\"\\nCurrent Date and Time: \" + now.strftime(\"%Y-%m-%d %H:%M:%S\"))\n print(\"Air Pressure (hPa): \" + str(string_array_final[4]) + \".\\n\")\n\n def rain_accumulation(self):\n string_array_final = self.clean_data()\n now = datetime.datetime.now()\n print(\"\\nCurrent Date and Time: \" + now.strftime(\"%Y-%m-%d %H:%M:%S\"))\n print(\"Rain Accumulation (mm): \" + str(string_array_final[5]) + \".\\n\")\n\n def rain_duration(self):\n string_array_final = self.clean_data()\n now = datetime.datetime.now()\n print(\"\\nCurrent Date and Time: \" + now.strftime(\"%Y-%m-%d %H:%M:%S\"))\n print(\"Rain Duration (s): \" + str(string_array_final[6]) + \".\\n\")\n\n def rain_intensity(self):\n string_array_final = self.clean_data()\n now = datetime.datetime.now()\n print(\"\\nCurrent Date and Time: \" + now.strftime(\"%Y-%m-%d %H:%M:%S\"))\n print(\"Rain Intensity (mm/h): \" + str(string_array_final[7]) + \".\\n\")\n\n def rain_peak_intensity(self):\n string_array_final = self.clean_data()\n now = datetime.datetime.now()\n print(\"\\nCurrent Date and Time: \" + now.strftime(\"%Y-%m-%d %H:%M:%S\"))\n print(\"Rain Peak Intensity (mm/h): \" +\n str(string_array_final[11]) + \".\\n\")\n\n def hail_accumulation(self):\n string_array_final = self.clean_data()\n now = datetime.datetime.now()\n print(\"\\nCurrent Date and Time: \" + now.strftime(\"%Y-%m-%d %H:%M:%S\"))\n print(\"Hail Accumulation (hits/cm^2): \" +\n str(string_array_final[8]) + \".\\n\")\n\n def hail_duration(self):\n string_array_final = self.clean_data()\n now = datetime.datetime.now()\n print(\"\\nCurrent Date and Time: \" + now.strftime(\"%Y-%m-%d %H:%M:%S\"))\n print(\"Hail Duration (s): \" + str(string_array_final[9]) + \".\\n\")\n\n def hail_intensity(self):\n string_array_final = self.clean_data()\n now = datetime.datetime.now()\n print(\"\\nCurrent Date and Time: \" + now.strftime(\"%Y-%m-%d %H:%M:%S\"))\n print(\"Hail Intensity (hits/cm^2/hour): \" +\n str(string_array_final[10]) + \".\\n\")\n\n def hail_peak_intensity(self):\n string_array_final = self.clean_data()\n now = datetime.datetime.now()\n print(\"\\nCurrent Date and Time: \" + now.strftime(\"%Y-%m-%d %H:%M:%S\"))\n print(\"Hail Peak Intensity (hits/cm^2/hour): \" +\n str(string_array_final[12]) + \".\\n\")\n\n def vaisala_unit(self):\n # Print the 3 vaisala unit data points\n string_array_final = self.clean_data()\n now = datetime.datetime.now()\n print(\"\\nCurrent Date and Time: \" + now.strftime(\"%Y-%m-%d %H:%M:%S\"))\n print(\"Vaisala Heating Temperature (C): \" +\n str(string_array_final[13]) + \".\")\n print(\"Vaisala Heating Voltage (V): \" +\n str(string_array_final[14]) + \".\")\n print(\"Vaisala Supply Voltage (V): \" +\n str(string_array_final[15]) + \".\\n\")\n\n\n# Main function\nif __name__ == \"__main__\":\n # if script is called then start the data averaging process\n Avg_Reading = Average_Reading()\n Avg_Reading.average_data()\n","repo_name":"siar7178/amigos3","sub_path":"codes/python/vaisala.py","file_name":"vaisala.py","file_ext":"py","file_size_in_byte":14293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"30"} +{"seq_id":"36613176323","text":"from django.urls import path\nfrom . import views\n\napp_name = 'Game'\nurlpatterns = [\n path('', views.home_view, name='home'),\n path('game/', views.game_view, name='game'),\n path('result/', views.result_view, name='result'),\n path('table/', views.table_view, name='table'),\n path('leave/', views.leave_game, name='leave'),\n]\n","repo_name":"Mohammad9050/RPS-Game","sub_path":"Game/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"71025277206","text":"import math\n\nstudents = int(input())\nlectures = int(input())\nbonus = int(input())\nbonuses = []\natendances = []\n\nfor i in range(students):\n atendace = int(input())\n atendances.append(atendace)\n total_bonus = atendace / lectures * (5 + bonus)\n bonuses.append(math.ceil(total_bonus))\nprint(f'Max Bonus: {max(bonuses)}.')\nmax_index = bonuses.index(max(bonuses))\nprint(f'The student has attended {max(atendances)} lectures.')\n# print(bonuses)\n# print(atendances)\n","repo_name":"BuragaIonut/SoftUni","sub_path":"SoftUni Exercices/P1.py","file_name":"P1.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"73742106006","text":"from mesa.visualization.ModularVisualization import ModularServer\nfrom mesa.visualization.UserParam import UserSettableParameter\nfrom mesa.visualization.modules import (\n CanvasGrid,\n ChartModule,\n # BarChartModule,\n PieChartModule\n)\nimport mesa_model\nimport sys\nimport constants\nfrom threading import Thread\n\nimport logging\nlogging.basicConfig(\n level=constants.LOG_LEVEL,\n filename='logs/mesa_model.log',\n filemode='w',\n format=\"%(name)s %(asctime)s [%(levelname)s] %(message)s\",\n datefmt=\"%H:%M:%S\",\n)\nlogger = logging.getLogger(__name__)\n\nSIZE_X = constants.SIZE\nSIZE_Y = SIZE_X\nPIXELS_X = constants.PIXELS\nPIXELS_Y = PIXELS_X // 2\n\nNUMBER_OF_AGENTS = constants.TOTAL_NUMBER_OF_AGENTS\n\nMAX_NUMBER_AGENTS = (SIZE_X ** 2) // 5\n\n\nRED = \"#ff0000\"\nORANGE = \"#ff9900\"\nBROWN = \"#993300\"\nBLUE = \"#3399ff\"\nGREEN = \"#33cc33\"\nDARK_BLUE = \"#0000ff\"\n\n\ndef agent_portrayal(agent) -> dict:\n \"\"\"Changes the visualization of each agent based on its heath_status.\n\n Args:\n agent (SimulationAgent): Agent from the simulation.\n\n Returns:\n dict: portrayal\n \"\"\"\n portrayal = {\"Shape\": \"circle\", \"Filled\": \"true\", \"r\": 0.8}\n\n if agent.get_health_status() == constants.SICK:\n portrayal[\"Color\"] = RED\n portrayal[\"Layer\"] = 5\n portrayal[\"r\"] = 0.6\n elif agent.get_health_status() == constants.ASYMPTOMATIC:\n portrayal[\"Color\"] = ORANGE\n portrayal[\"Layer\"] = 4\n portrayal[\"r\"] = 0.5\n elif agent.get_health_status() == constants.WITH_DISEASES_SEQUELAES:\n portrayal[\"Color\"] = BROWN\n portrayal[\"Layer\"] = 2\n portrayal[\"r\"] = 0.6\n elif agent.get_health_status() == constants.TOTAL_RECOVERY:\n portrayal[\"Color\"] = BLUE\n portrayal[\"Layer\"] = 2\n elif agent.get_health_status() == constants.HEALTHY:\n portrayal[\"Color\"] = GREEN\n portrayal[\"Layer\"] = 4\n portrayal[\"r\"] = 1\n else:\n portrayal[\"Color\"] = \"white\"\n portrayal[\"Layer\"] = 1\n portrayal[\"r\"] = 0.1\n\n return portrayal\n\n\ndef run_simulation() -> None:\n \"\"\"Runs the simulation.\n \"\"\"\n logger.info(f\"Number of Agents: {NUMBER_OF_AGENTS}\")\n\n grid = CanvasGrid(agent_portrayal, SIZE_X, SIZE_Y, PIXELS_X, PIXELS_Y)\n\n chart_cumulatives = ChartModule([\n {\"Label\": \"Sick Agents\",\n \"Color\": \"RED\"},\n {\"Label\": \"Recovered Agents\",\n \"Color\": BROWN},\n {\"Label\": \"Dead Agents\",\n \"Color\": \"black\"},\n {\"Label\": \"Healthy Agents\",\n \"Color\": GREEN},\n {\"Label\": \"Quarantine Agents\",\n \"Color\": DARK_BLUE}\n ],\n # canvas_width=constants.ALL_DATA_PLOT_FIG_SIZE_X,\n # declaring both height and width yields some kind of a bug where the values are random\n canvas_height=constants.ALL_DATA_PLOT_FIG_SIZE_Y,\n data_collector_name='datacollector_currents_prcntg')\n\n chart_dailys = ChartModule([\n {\"Label\": \"Infected Agents\",\n \"Color\": \"RED\"},\n {\"Label\": \"Recovered Agents\",\n \"Color\": BROWN},\n {\"Label\": \"Dead Agents\",\n \"Color\": \"black\"},\n {\"Label\": \"Quarantine Agents\",\n \"Color\": DARK_BLUE}\n ],\n # canvas_width=constants.ALL_DATA_PLOT_FIG_SIZE_X,\n # declaring both height and width yields some kind of a bug where the values are random\n canvas_height=constants.ALL_DATA_PLOT_FIG_SIZE_Y,\n data_collector_name='datacollector_dailys')\n\n pie_chart_cumulatives = PieChartModule([\n {\"Label\": \"Recovered Agents\",\n \"Color\": DARK_BLUE},\n {\"Label\": \"Dead Agents\",\n \"Color\": \"black\"},\n {\"Label\": \"Healthy Agents\",\n \"Color\": GREEN}\n ],\n data_collector_name='datacollector_cumulatives_prcntg')\n\n build_server_sim(grid, chart_cumulatives,\n chart_dailys, pie_chart_cumulatives)\n\n\ndef build_server_sim(*visualizations) -> None:\n \"\"\"Builds and runs the server to visualize the simulation and charts.\n \"\"\"\n global NUMBER_OF_AGENTS\n if NUMBER_OF_AGENTS > MAX_NUMBER_AGENTS:\n NUMBER_OF_AGENTS = MAX_NUMBER_AGENTS\n\n model_params = {\n\n \"width\": UserSettableParameter(\n \"slider\",\n \"Simulation Width\",\n SIZE_X, # default\n 50, # min\n 150, # max\n 1, # step\n description=\"Choose the simulation's Width\",\n ),\n \"height\": UserSettableParameter(\n \"slider\",\n \"Simulation Height\",\n SIZE_Y, # default\n 50, # min\n 150, # max\n 1, # step\n description=\"Choose the simulation's Height\",\n ),\n \"number_agents\": UserSettableParameter(\n \"slider\",\n \"Number of agents\",\n NUMBER_OF_AGENTS, # default\n 10, # min\n MAX_NUMBER_AGENTS, # max\n 1, # step\n description=\"Choose how many agents to include in the model\",\n ),\n \"travelling_agents\": UserSettableParameter(\n \"slider\",\n \"Max number of travelling agents by day\",\n constants.TRAVELLING_NUMBER_OF_AGENTS, # default\n 0, # min\n MAX_NUMBER_AGENTS * 0.2, # max\n 1, # step\n description=\"Choose how many agents can travel into the simulation in each day\",\n ),\n \"vaccination_prcntg\": UserSettableParameter(\n \"slider\",\n \"Percentage of agents vaccinated each day\",\n constants.VACCINATED_PRCNT_OF_AGENTS, # default\n 0, # min\n 1, # max\n 0.05, # step\n description=\"Choose the percentage of agents are vaccinated in each day\",\n ),\n \"text\": UserSettableParameter(\n 'static_text',\n value=\"Setting the simulation to use the static beginning. If it is `ON`, you can adjust the remaining parameters and press `Reset`\"\n ),\n \"static\": UserSettableParameter('checkbox', 'Simulation with static beginning', value=True),\n \"text\": UserSettableParameter( # can be used only once\n 'static_text',\n value=\"Setting the simulation to use the static beginning. If it is `ON`, you can adjust the remaining parameters and press `Reset`\"\n ),\n \"sick_p\": UserSettableParameter(\n \"slider\",\n \"Percentage of infected (Sick) agents\",\n constants.SICK_PRCNTG,\n 0,\n 1,\n 0.1,\n description=\"Choose how many percentage of infected (Sick) agents in the model\",\n ),\n \"aymp_p\": UserSettableParameter(\n \"slider\",\n \"Percentage of infected (Asymptomatic) agents\",\n constants.ASYMP_PRCNTG,\n 0,\n 1,\n 0.1,\n description=\"Choose how many percentage of infected (Asymptomatic) agents in the model\",\n ),\n \"imr_immune_p\": UserSettableParameter(\n \"slider\",\n \"(IMR) Percentage of immune agents\",\n constants.IMMMUNE_IMR_PRCNTG,\n 0,\n 1,\n 0.1,\n description=\"Choose how many percentage of immune agents in the model\",\n ),\n \"imr_asymp_p\": UserSettableParameter(\n \"slider\",\n \"(IMR) Percentage of asymptomatic agents\",\n constants.ASYMP_IMR_PRCNTG,\n 0,\n 1,\n 0.1,\n description=\"Choose how many percentage of asymptomatic agents in the model\",\n ),\n \"imr_mod_p\": UserSettableParameter(\n \"slider\",\n \"(IMR) Percentage of moderately infected agents\",\n constants.MOD_IMR_PRCNTG,\n 0,\n 1,\n 0.1,\n description=\"Choose how many percentage of moderately infected agents in the model\",\n ),\n \"imr_severe_p\": UserSettableParameter(\n \"slider\",\n \"(IMR) Percentage of severe infected agents\",\n constants.SEVERE_IMR_PRCNTG,\n 0,\n 1,\n 0.1,\n description=\"Choose how many percentage of severe infected agents in the model\",\n ),\n \"imr_dead_p\": UserSettableParameter(\n \"slider\",\n \"(IMR) Percentage of dead agents\",\n constants.DEAD_IMR_PRCNTG,\n 0,\n 1,\n 0.1,\n description=\"Choose how many percentage of dead agents in the model\",\n ),\n \"wearing_mask\": UserSettableParameter(\n \"slider\",\n \"Percentage of agents wearing a mask\",\n constants.AGENTS_WEARING_MASK_PRCNTG,\n 0,\n 1,\n 0.1,\n description=\"Choose how many percentage of agents wearing a mask in the model\",\n )\n }\n\n server = ModularServer(mesa_model.SimulationModel,\n visualizations, # list\n \"COVID-19 Simulation\",\n model_params) # model parameters\n\n server.port = 8521 # The default\n server.launch()\n\n\nif __name__ == \"__main__\":\n run_simulation()\n","repo_name":"jolasman/VIRUS_SIMULATION","sub_path":"src/mesa_model_viz.py","file_name":"mesa_model_viz.py","file_ext":"py","file_size_in_byte":9093,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"30"} +{"seq_id":"9322639504","text":"#!/usr/bin/env python3\nimport subprocess as sp\nimport sys\nimport os\nimport numpy as np\nfrom multiprocessing import Process\n\nscenario_dir = \"../deltahq_distributed_json\"\n\n\ndef run_deltahq(idx):\n command = [\"./deltaHQ\", \"-t\", \"1\", \"-i\", str(idx), \"100node.json\", \"-L\", \"10000\"]\n result = sp.Popen(command, stdout=sp.PIPE, stderr=sp.PIPE, shell=False)\n print(idx, result)\n for line in iter(result.stdout.readline, b''):\n # print(line)\n print(idx, line.rstrip().decode(\"utf8\"))\n\n # for line in iter(result.stderr.readline, b''):\n # print(line.rstrip().decode(\"utf8\"))\n\n\ndef make_id_list(node_number, process_number, machine_id, numbering):\n machine_number = int(node_number / process_number)\n machine_number_mod = node_number % process_number\n\n print(\"id(%d) machine number(%d, mod=%d)\" % (machine_id, machine_number, machine_number_mod))\n\n if machine_number <= machine_id:\n sys.stderr.write(\"-- Error too big machine id\\n\")\n return\n\n own_ids = []\n if numbering == \"l\":\n own_ids = []\n\n mmod = machine_number_mod\n idx = machine_id * process_number\n\n if mmod <= machine_id:\n idx = idx + mmod\n mmod = 0\n own_ids.append(idx)\n else:\n if machine_id != 0:\n idx = idx + 1\n own_ids.append(idx)\n\n for i in range(1, process_number):\n idx = idx + 1\n own_ids.append(idx)\n\n if 0 < mmod:\n idx = idx + 1\n own_ids.append(idx)\n\n if numbering == \"L\":\n own_ids.append(machine_id)\n mmod = machine_number_mod\n\n if mmod <= machine_id:\n mmod = 0\n for i in range(1, process_number):\n idx = own_ids[-1] + machine_number\n own_ids.append(idx)\n\n if 0 < mmod:\n idx = own_ids[-1] + machine_number\n own_ids.append(idx)\n\n return own_ids\n\n\ndef execute():\n pass\n\n\ndef run(node_number, process_number, machine_id, numbering):\n\n os.makedirs(scenario_dir, exist_ok=True)\n\n own_ids = np.array(make_id_list(node_number, process_number, machine_id, numbering))\n\n processes = []\n for idx in own_ids:\n processes.append(Process(target=run_deltahq, args=(idx,)))\n processes[-1].start()\n\n command = [\"./measure.py\", \"100node.json\", \"100\", \"0\", \"1\", \"r\"]\n p1 = sp.Popen(command, stdout=sp.PIPE, stderr=sp.PIPE)\n\n\n\n command = [\"./location_info_base\", \"-A\", \"172.16.73.255\", \"-p\", \"10000\", \"100node.json\", \"-a\", \"t\"]\n p2 = sp.Popen(command, stdin=p1.stdout, stdout=sp.PIPE, stderr=sp.PIPE)\n\n p1.stdout.close()\n\n output = p2.communicate()\n # print(output)\n for line in output:\n print(line.rstrip().decode(\"utf8\"))\n\n\n for proc in processes:\n proc.join()\n\n\n\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 5:\n sys.stderr.write(\"set argument\\n\")\n sys.stderr.write(\" $1: node number\\n\")\n sys.stderr.write(\" $2: process number\\n\")\n sys.stderr.write(\" $3: machine id ( 0 ~ n )\\n\")\n sys.stderr.write(\" $4: l or L\\n\")\n sys.exit(1)\n\n node_number = int(sys.argv[1])\n process_number = int(sys.argv[2])\n machine_id = int(sys.argv[3])\n numbering = sys.argv[4]\n\n # print(node_number, process_number, machine_id, numbering)\n\n run(node_number, process_number, machine_id, numbering)\n","repo_name":"sarub0b0/hashmot-deltahq","sub_path":"bin/experiment/experiment_distributed_deltahq.py","file_name":"experiment_distributed_deltahq.py","file_ext":"py","file_size_in_byte":3383,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"1992669188","text":"import pandas as pd\nimport numpy as np\nimport requests\nimport os\nimport logging\nimport json\nfrom collections import defaultdict\n\n\n# Defining all the logger functions also the log file which will catching all the information and Exception\n\nlogger = logging.getLogger(__name__)\nfileHandler = logging.FileHandler(\"data_transform.log\", mode=\"w\")\nfileHandler.setLevel(logging.INFO)\nfileFormat = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\nfileHandler.setFormatter(fileFormat)\nlogger.addHandler(fileHandler)\n\n\ndata_list = []\n\n\ndef data_handler(infile):\n try:\n resp = requests.get(infile)\n data = json.loads(resp.text)\n for a, b in data[\"data\"]:\n print(a, \".......\", b)\n data_set = {\"Applicant_id\": b, \"TermDepositStatus\": a}\n data_list.append(data_set)\n raise NotImplementedError\n except Exception as e:\n logger.error(e)\n return data_list\n","repo_name":"Biswajit7890/Simple-DataHandling","sub_path":"data_handler/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"39632405580","text":"import torch\n\nfrom shap_e.diffusion.sample import sample_latents\nfrom shap_e.diffusion.gaussian_diffusion import diffusion_from_config\nfrom shap_e.models.download import load_model, load_config\nfrom shap_e.util.notebooks import create_pan_cameras, decode_latent_images, gif_widget\nfrom shap_e.util.notebooks import decode_latent_mesh\n\nimport time\nimport psutil\nimport subprocess\nimport re\nimport matplotlib.pyplot as plt\nimport threading\n\ndef get_gpu_memory_usage():\n output = subprocess.check_output(['nvidia-smi', '--query-gpu=memory.used', '--format=csv,nounits,noheader'])\n memory_used = re.findall(r'\\d+', output.decode('utf-8'))\n return int(memory_used[0])\n \ndef get_gpu_utilization():\n output = subprocess.check_output(['nvidia-smi', '--query-gpu=utilization.gpu', '--format=csv,nounits,noheader'])\n gpu_util = re.findall(r'\\d+', output.decode('utf-8'))\n return int(gpu_util[0])\n\ndef get_volatile_gpu_memory():\n output = subprocess.check_output(['nvidia-smi', '--query-gpu=memory.total,memory.free', '--format=csv,nounits,noheader'])\n memory_info = re.findall(r'\\d+', output.decode('utf-8'))\n memory_total = int(memory_info[0])\n memory_free = int(memory_info[1])\n memory_used = memory_total - memory_free\n return memory_used\n\ndef get_ecc_memory():\n output = subprocess.check_output(['nvidia-smi', '--query-gpu=memory.ecc.errors', '--format=csv,nounits,noheader'])\n ecc_memory = re.findall(r'\\d+', output.decode('utf-8'))\n return int(ecc_memory[0])\n\ndef plot_memory_usage(memory_usage_data):\n \"\"\"\n Plot the memory usage graph.\n \"\"\"\n timestamps = [t for t, _ in memory_usage_data]\n memory_usages = [m for _, m in memory_usage_data]\n\n plt.plot(timestamps, memory_usages)\n plt.xlabel('Time (s)')\n plt.ylabel('Memory Usage (MiB)')\n plt.title('GPU Memory Usage')\n plt.grid(True)\n plt.show()\n \ndef plot_memory_util(util_data):\n \"\"\"\n Plot the memory utilization graph.\n \"\"\"\n timestamps = [t for t, _ in util_data]\n memory_usages = [m for _, m in util_data]\n\n plt.plot(timestamps, memory_usages)\n plt.xlabel('Time (s)')\n plt.ylabel('GPU Utilization %')\n plt.title('GPU Utilization')\n plt.grid(True)\n plt.show()\n \nclass GPU_moniter:\n \"\"\"\n Monitor the GPU memory usage every 'interval' seconds until the program completes.\n \"\"\"\n def __init__(self, interval=1):\n \"\"\"Initialize GPU_moniter.\"\"\"\n self.stop_flag=False\n self.memory_usage_data = []\n self.util_data = []\n self.vol_mem_usage_data = []\n # self.ecc_mem_data = []\n self.start_time = time.time()\n self.interval = interval\n # Create and start the monitoring thread\n self.monitor_thread = threading.Thread(target=self.monitor_memory)\n self.monitor_thread.start()\n print(\"Start GPU Moniter\")\n \n def monitor_memory(self):\n while True:\n memory_usage = get_gpu_memory_usage()\n util_mem_usage = get_gpu_utilization()\n vol_mem_usage = get_volatile_gpu_memory()\n # gcc_mem_usage = get_ecc_memory()\n if memory_usage is not None:\n current_time = time.time() - self.start_time\n self.memory_usage_data.append((current_time, memory_usage))\n self.util_data.append((current_time, util_mem_usage))\n self.vol_mem_usage_data.append((current_time, vol_mem_usage))\n # self.ecc_mem_data.append((current_time, gcc_mem_usage))\n # print(f'Time: {current_time:.2f}s, Memory Usage: {memory_usage} bytes')\n else:\n print('Failed to retrieve GPU memory usage.')\n\n # Check if the program has completed\n if self.stop_flag:\n break\n time.sleep(self.interval)\n \n def end_monitor(self):\n self.stop_flag=True\n \n # Wait for the monitoring thread to complete\n self.monitor_thread.join()\n \n def mem_plot(self, mode='mem'):\n if mode=='mem':\n plot_memory_usage(self.memory_usage_data)\n elif mode=='util':\n plot_memory_usage(self.util_data)\n elif mode=='vol':\n plot_memory_usage(self.vol_mem_usage_data)\n # elif mode=='ecc':\n # plot_memory_usage(self.ecc_mem_data)\n\ndef main():\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n \n gpu_memory = get_gpu_memory_usage()\n \n xm = load_model('transmitter', device=device)\n model = load_model('text300M', device=device)\n diffusion = diffusion_from_config(load_config('diffusion'))\n \n old_gpu_memory=gpu_memory\n gpu_memory = get_gpu_memory_usage()\n model_gpu_memory = gpu_memory-old_gpu_memory\n print(f\"GPU Memory Usage for Loading Model: {model_gpu_memory} MiB\")\n print(f\"Total GPU Memory Usage before diffusion: {gpu_memory} MiB\")\n \n print(\"start timing deffusion process\")\n start_time=time.time()\n \n batch_size = 1\n guidance_scale = 15.0\n \n prompts = [\"a shark\"]\n for prompt in prompts:\n gpu_memory = get_gpu_memory_usage()\n print(f\"Current GPU Memory Usage: {gpu_memory} MiB\")\n \n # print(\"start timing deffusion process\")\n start_time=time.time()\n \n latents = sample_latents(\n batch_size=batch_size,\n model=model,\n diffusion=diffusion,\n guidance_scale=guidance_scale,\n model_kwargs=dict(texts=[prompt] * batch_size),\n progress=True,\n clip_denoised=True,\n use_fp16=True,\n use_karras=True,\n karras_steps=64,\n sigma_min=1e-3,\n sigma_max=160,\n s_churn=0,\n )\n \n # print(\"end timing deffusion process\")\n end_time=time.time()\n duration=end_time-start_time\n print(f\"For prompt {prompt}, runtime for diffusion process: {duration} seconds\")\n \n start_time=time.time()\n \n render_mode = 'nerf' # you can change this to 'stf'\n size = 64 # this is the size of the renders; higher values take longer to render.\n\n cameras = create_pan_cameras(size, device)\n for i, latent in enumerate(latents):\n images = decode_latent_images(xm, latent, cameras, rendering_mode=render_mode)\n # display(gif_widget(images))\n \n end_time=time.time()\n duration=end_time-start_time\n print(f\"runtime for rendering process: {duration} seconds\")\n \n gpu_memory = get_gpu_memory_usage()\n print(f\"Current GPU Memory Usage: {gpu_memory} MiB\")\n \n gpu_memory = get_gpu_memory_usage()\n print(f\"Current GPU Memory Usage: {gpu_memory} MiB\")\n \nif __name__==\"__main__\":\n main()","repo_name":"jimmylizheng/shap_e_test","sub_path":"shap_e/examples/latency_eval.py","file_name":"latency_eval.py","file_ext":"py","file_size_in_byte":6789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"12849947824","text":"def lrc2subRip(lrcLyrics, songLength):\n res = []\n sp = []\n for l in lrcLyrics:\n splitted = l.split(\" \", 1)\n text = splitted[1] if len(splitted) > 1 else \"\"\n time = getsubRipTime(splitted[0])\n sp.append((time, text))\n print(sp)\n\n counter = 1\n for i in range(len(sp)-1):\n res.append(str(counter))\n res.append(\"{} --> {}\".format(sp[i][0], sp[i+1][0]))\n res.append(sp[i][1])\n res.append(\"\")\n counter += 1\n\n h,m,s = map(int, songLength.split(\":\"))\n endTimeStr = \"{:02}:{:02}:{:02},{:03}\".format(h, m, s, 0)\n res.append(str(counter))\n res.append(\"{} --> {}\".format(sp[i+1][0], endTimeStr))\n res.append(sp[i+1][1])\n return res\n\n\ndef getsubRipTime(lrcTime):\n minute = int(lrcTime[1:3])\n hour = minute // 60\n minute = minute % 60\n second = int(lrcTime[4:6])\n milli = int(lrcTime[7:9]) * 10\n return \"{:02}:{:02}:{:02},{:03}\".format(hour, minute, second, milli)\n\n","repo_name":"netor27/codefights-solutions","sub_path":"arcade/python/arcade-theCore/18_SecretArchives/147_LRCtoSubRip.py","file_name":"147_LRCtoSubRip.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"2746278069","text":"import math\nimport os\n\nimport numpy as np\nimport pandas as pd\nimport tqdm\nfrom tqdm import tqdm\nfrom transformers import (\n pipeline,\n AutoTokenizer,\n TFAutoModelForSeq2SeqLM,\n)\n\n\ntqdm.pandas()\n\nMODELS_DIR = \"./models\"\n\n\ndef load_model(summarizer_path: str, tokenizer_path: str):\n \"\"\"Load the trained summarizer model and tokenizer.\n\n Both models, the summarizer and the tokenizer were trained\n with our data.\n \"\"\"\n summarizer = TFAutoModelForSeq2SeqLM.from_pretrained(summarizer_path)\n tokenizer = AutoTokenizer.from_pretrained(tokenizer_path)\n return summarizer, tokenizer\n\n\ndef create_summarization_pipeline():\n \"\"\"Create a pipeline based on our model and tokenizer.\"\"\"\n train_model_path = os.path.join(MODELS_DIR, 'train_model')\n tokenizer_path = os.path.join(MODELS_DIR, 'train_tokenizer')\n model, tokenizer = load_model(train_model_path, tokenizer_path)\n return pipeline('summarization', model=model, tokenizer=tokenizer)\n\n\ndef load_tale(tale_path: str) -> tuple[str, pd.DataFrame]:\n \"\"\"Load a tale from the given file.\n\n Load the tale in .txt format and add index it for\n later grouping.\n \"\"\"\n with open(tale_path, 'r') as f:\n # Read the .txt file.\n lines = f.readlines()\n\n # Title is the first line of the file.\n title = lines[0]\n\n # Each paragraph is a row of DataFrame.\n tale = pd.DataFrame({'story': lines[1:]})\n tale = tale[tale.story != '\\n'].reset_index()\n\n return title, tale\n\n\ndef reduce_tale(tale: pd.DataFrame, num: int) -> pd.DataFrame:\n \"\"\"Reduce the length of the tale for summarization.\n\n Add index column to a tale for merge. The index column\n looks like: ['0', '0', '0', '1', '1', '1'...].\n \"\"\"\n # Number of rows the DataFrame will have after merging.\n rows = math.ceil(int(len(tale)) / num)\n\n # Create a list of index and add it as a new column.\n index_list = []\n for i_row in range(rows):\n n = [str(i_row)] * num\n index_list = index_list + n\n\n tale['index'] = np.array(index_list)[:len(tale)]\n\n return tale\n\n\ndef prepare_tale(tale: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Prepare tale for summarization.\"\"\"\n # Create a new DataFrame.\n prepared_tale = pd.DataFrame(columns=['story', 'summ'])\n\n # Merge rows by index to reduce length of df\n prepared_tale['story'] = tale.groupby(['index'])['story'].apply('/n/n'.join)\n\n # Quick clean the story.\n prepared_tale['story'] = prepared_tale['story'].str.replace(\"’\", \"'\")\n prepared_tale['story'] = prepared_tale['story'].str.replace(\"‘\", \"'\")\n\n return prepared_tale\n\n\ndef apply_summarizer(summarizer, text: str) -> str:\n \"\"\"Apply the summarizer to the given text.\"\"\"\n result_summarizer = summarizer(text)\n return result_summarizer[0][\"summary_text\"]\n\n\ndef main(tale_path: str):\n \"\"\"Execute the main process.\"\"\"\n\n title, tale = load_tale(tale_path)\n reduced_tale = reduce_tale(tale, 5)\n prepared_tale = prepare_tale(reduced_tale)\n\n # 1º- Apply summarizer to each row.\n for i in tqdm(range(len(prepared_tale))):\n prepared_tale['summ'].iloc[i] = apply_summarizer(prepared_tale['story'].iloc[i])\n\n # Add column that indicates length.\n prepared_tale['length_story'] = prepared_tale['story'].str.len()\n prepared_tale['length_summ'] = prepared_tale['summ'].str.len()\n\n # 2º- Apply summarizer to previous summarizer output.\n prepared_tale['summ_2'] = ''\n\n for i in tqdm(range(len(prepared_tale))):\n prepared_tale['summ_2'].iloc[i] = apply_summarizer(prepared_tale['summ'].iloc[i])\n\n # Add length column.\n prepared_tale['length_summ_2'] = prepared_tale['summ_2'].str.len()\n\n # 3º- Apply summarizer to previous summarizer output.\n prepared_tale['summ_3'] = ''\n for i in tqdm(range(len(prepared_tale))):\n prepared_tale['summ_3'].iloc[i] = apply_summarizer(prepared_tale['summ_2'].iloc[i])\n\n # Add length column.\n prepared_tale['length_summ_3'] = prepared_tale['summ_3'].str.len()\n\n # Save to csv.\n prepared_tale.to_csv(f'{title}_long.csv', sep='|', encoding='utf-8')\n\n # Create df from df columns.\n final_tale = prepared_tale[['story', 'summ_2']]\n\n # Rename columns.\n final_tale.columns = ['story', 'summ']\n\n # Add title as first row of df.\n title = title.replace(\"\\n\", \"\")\n title = title.replace(\"-\", \" \")\n data = [{'story': title, 'summ': title}]\n final_tale = pd.concat([pd.DataFrame(data), final_tale], ignore_index=True)\n\n # Remove punctuation.\n final_tale['summ'] = final_tale['summ'].str.replace(\",\", \"\")\n final_tale['summ'] = final_tale['summ'].str.replace(\";\", \"\")\n final_tale['summ'] = final_tale['summ'].str.replace(\"\\r\", \"\")\n final_tale['summ'] = final_tale['summ'].str.replace(\"!\", \"\")\n final_tale['summ'] = final_tale['summ'].str.replace(\"-\", \"\")\n final_tale['summ'] = final_tale['summ'].str.replace(\"?\", \"\")\n\n # Save to csv.\n final_tale.to_csv(f'{title}_short.csv', sep='|', encoding='utf-8')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"SaturdaysAI/Projects","sub_path":"Madrid/July2022/AImagining-tales/src/summarizer/summary_generator.py","file_name":"summary_generator.py","file_ext":"py","file_size_in_byte":5044,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"30"} +{"seq_id":"72536988563","text":"\r\n#قم بكتابة function تستقبل متغير من نوع string يعبر عن قيمة ثمانية binary number،\r\n# ثم قم بإرجاع النتيجة بعد التحويل الى قيمة ست عشرية octal number بنوع int\r\ndef bin_to_oct(b):\r\n num_bin = int(b,2)\r\n num_oct = oct(num_bin)\r\n str1 = str(num_oct)#to remove the 0o from 234: 0o234 -> 234\r\n str2 = str1[2:len(str1):1]\r\n return int(str2)\r\n\r\n\r\nprint(bin_to_oct('10011100'))#10011100->234\r\n\r\n","repo_name":"jawaher-alqotym/Mini-Coding-Challenges","sub_path":"CoderHub-Binary_toOctal_asInt.py","file_name":"CoderHub-Binary_toOctal_asInt.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"ar","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"3859007327","text":"def checking_element(arr,x,k):\n temp=[];flag=0;j=0;k1=k\n print(arr)\n for i in range(len(arr)-1):\n if(k>=len(arr)):\n k=len(arr)\n temp = arr[j:k]\n if(x not in temp):\n flag=1\n return 0\n if(k==len(arr)):\n break\n j=k\n k+=k1\n return 1\narr=[ int(x) for x in input().strip().split()]\nx,k = [ int(i) for i in input().strip().split()]\ndemo = checking_element(arr,x,k)\nif(demo == 1 ):\n print(\"yes\")\nelse:\n print(\"no\") \n","repo_name":"aritra-bose-716/Competetive-Coding","sub_path":"Array/Check_if_a_key_is_present_in_every_segment_of_size_k_in_an_array.py","file_name":"Check_if_a_key_is_present_in_every_segment_of_size_k_in_an_array.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"29883094907","text":"import requests\nimport json\nimport time\nimport os\nimport openai\n\nfrom datetime import datetime\nfrom config import BOT_ID, ACCESS_TOKEN, OPENAI_KEY\n\nopenai.api_key = OPENAI_KEY\nopenai.organization = \"org-LdSlWbJ9mw5Y4EqwxL2bRZaj\"\n\n# Function to send a message in the group using a pre-made bot\ndef send_msg(msg):\n url = \"https://api.groupme.com/v3/bots/post\"\n data = {\n \"bot_id\": BOT_ID,\n \"text\": msg,\n }\n headers = {\n \"Content-Type\": \"application/json\"\n }\n\n send_txt = requests.post(url, json=data, headers=headers)\n\n# Returns a list of the most recent 20 messages and their corresponding IDs\ndef update_msgs():\n # API call\n msgs = requests.get(\"https://api.groupme.com/v3/groups/95739422/messages?token=\" + ACCESS_TOKEN)\n messages_texts = []\n # If call is a success, parse out the messages and IDs into a list\n if msgs.status_code == 200:\n data = msgs.json()\n messages = data[\"response\"][\"messages\"]\n\n message_texts = []\n\n for message in messages:\n text = message.get(\"text\")\n text_id = message.get(\"id\")\n message_texts.append([text_id, text])\n \n return(message_texts)\n \n else:\n print(\"Failed API call\")\n return(message_texts)\n\n\n# This is the main loop. Every 3 seconds check for a new message. If it contains \"Log: \", record the entry \n# into the \"log.txt\" file. Ignore all other messages\nlast_id = 0\nwhile(True):\n # API call to get messages\n messages = update_msgs()\n\n if messages != None:\n if last_id != messages[0][0]:\n last_id = messages[0][0]\n parse_check = messages[0][1][0:5]\n if \"log:\" in parse_check.lower():\n # Get date of entry\n current_date = datetime.now()\n formatted_date = current_date.strftime(\"%d %b %Y %H:%M:%S\")\n\n # Format full entry\n full_entry = formatted_date + \", \" + messages[0][1][5:] + \"\\n\"\n print(\"---- [NEW ENTRY] ----\\n\" + full_entry + \"---------\")\n \n # Append entry to local file\n with open(\"log.csv\", \"a+\") as log_file:\n log_file.write(full_entry)\n log_file.seek(0)\n past = log_file.readlines()\n \n # Send message in chat to confirm that the message was seen\n text_in = \"given this:\\n\\n\" + str(past[-20:]) + \"\\n\\nmake a quick dissapointed and sarcastic comment about the following: \" + messages[0][1][5:]\n chat_completion = openai.ChatCompletion.create(model=\"gpt-3.5-turbo\", messages=[{\"role\": \"user\", \"content\": text_in}])\n send_msg(chat_completion.choices[0].message.content) \n\n time.sleep(3)\n","repo_name":"pl450b/groupme_bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"31016916460","text":"import torch\nimport torch.nn as nn\n\n\nclass IOULoss(nn.Module):\n def __init__(self, reduction=\"none\"):\n super(IOULoss, self).__init__()\n self.reduction = reduction\n\n def forward(self, pred, target):\n raise Exception(\"IOU Loss can not be used!\")\n\n @staticmethod\n def __is_inside(p1, p2, q):\n r = (p2[..., 0] - p1[..., 0]) * (q[..., 1] - p1[..., 1]) - (\n p2[..., 1] - p1[..., 1]) * (\n q[..., 0] - p1[..., 0])\n return r <= 0\n\n @staticmethod\n def __compute_intersection(p1, p2, p3, p4):\n intersection = torch.empty_like(p1)\n\n # !calc m1, b1, m2, b2\n m1 = (p2[..., 1] - p1[..., 1]) / (p2[..., 0] - p1[..., 0])\n b1 = p1[..., 1] - m1 * p1[..., 0]\n # slope and intercept of second line\n m2 = (p4[..., 1] - p3[..., 1]) / (p4[..., 0] - p3[..., 0])\n b2 = p3[..., 1] - m2 * p3[..., 0]\n\n # !make condition tensors\n mask1 = p2[..., 0] == p1[..., 0]\n mask2 = p4[..., 0] == p3[..., 0]\n part1 = mask1 # !if p2[..., 0] == p1[..., 0]:\n part2 = ~mask1 & mask2 # !elif p4[..., 0] == p3[..., 0]:\n part3 = ~mask1 & ~mask2 # !else:\n\n # !part1\n temp = intersection[part1]\n temp[:, 0] = p1[part1][:, 0]\n # y-coordinate of intersection\n temp[:, 1] = m2[part1] * p1[part1][:, 0] + b2[part1]\n intersection[part1] = temp\n\n # !part2\n temp = intersection[part2]\n temp[:, 0] = p3[part2][:, 0]\n # y-coordinate of intersection\n temp[:, 1] = m1[part2] * p3[part2][:, 0] + b1[part2]\n intersection[part2] = temp\n\n # !part3\n temp = intersection[part3]\n # x-coordinate of intersection\n temp[:, 0] = (b2[part3] - b1[part3]) / (m1[part3] - m2[part3])\n # y-coordinate of intersection\n temp[:, 1] = m1[part3] * (b2[part3] - b1[part3]) / (m1[part3] - m2[part3]) + b1[part3]\n intersection[part3] = temp\n # print(intersection.requires_grad)\n # print(temp.requires_grad)\n\n # need to unsqueeze so torch.cat doesn't complain outside func\n return intersection\n\n @staticmethod\n def _clip(subject_polygon, clipping_polygon):\n # it is assumed that requires_grad = True only for clipping_polygon\n # subject_polygon and clipping_polygon are ... x N x 2 and ... x M x 2 torch\n # tensors respectively\n\n # !make all tensors on the same device\n device = subject_polygon.device\n final_polygon = torch.clone(subject_polygon)\n point_num = torch.empty(\n *final_polygon.shape[:-2], dtype=torch.long, device=device\n ).fill_(4)\n for i in range(4): # *没用clipping_polygon.shape[-2]),因为确定会有四个结果\n\n # stores the vertices of the next iteration of the clipping procedure\n # final_polygon consists of list of 1 x 2 tensors\n next_polygon = torch.clone(final_polygon)\n next_point_num = torch.clone(point_num)\n # stores the vertices of the final clipped polygon. This will be\n # a K x 2 tensor, so need to initialize shape to match this\n final_polygon = torch.empty(*next_polygon.shape[:-2], 10, 2, device=device)\n point_num = torch.zeros(\n *next_polygon.shape[:-2], device=device, dtype=torch.long\n )\n\n # these two vertices define a line segment (edge) in the clipping\n # polygon. It is assumed that indices wrap around, such that if\n # i = 0, then i - 1 = M.\n # c_edge_start = clipping_polygon[..., i - 1, :]\n # c_edge_end = clipping_polygon[..., i, :]\n\n for j in range(\n next_point_num.max() if next_point_num.numel() > 0 else 0\n ): # *两个四边形最多有8个交点,所以没用next_polygon.shape[-2]。\n # these two vertices define a line segment (edge) in the subject\n # polygon\n mask = next_point_num > j\n index = (next_point_num[mask] + j - 1) % next_point_num[\n next_point_num > j]\n s_edge_start = next_polygon[mask, index]\n s_edge_end = next_polygon[mask][:, j, :]\n c_edge_start = clipping_polygon[mask][:, i - 1, :]\n c_edge_end = clipping_polygon[mask][:, i, :]\n\n final_polygon_this_circle = final_polygon[mask]\n point_num_this_circle = point_num[mask]\n\n condition_1 = IOULoss.__is_inside(c_edge_start, c_edge_end, s_edge_end)\n condition_2 = IOULoss.__is_inside(c_edge_start, c_edge_end, s_edge_start)\n part1 = condition_1 & ~condition_2\n part2 = condition_1\n part3 = ~condition_1 & condition_2\n\n intersection_part1 = IOULoss.__compute_intersection(\n s_edge_start[part1], s_edge_end[part1], c_edge_start[part1],\n c_edge_end[part1]\n )\n final_polygon_this_circle[\n part1, point_num_this_circle[part1]] = intersection_part1.float()\n point_num_this_circle[part1] += 1\n\n final_polygon_this_circle[part2, point_num_this_circle[part2]] = \\\n s_edge_end[part2]\n point_num_this_circle[part2] += 1\n\n intersection_part3 = IOULoss.__compute_intersection(\n s_edge_start[part3], s_edge_end[part3], c_edge_start[part3],\n c_edge_end[part3]\n )\n final_polygon_this_circle[\n part3, point_num_this_circle[part3]] = intersection_part3\n point_num_this_circle[part3] += 1\n\n final_polygon[mask] = final_polygon_this_circle\n point_num[mask] = point_num_this_circle\n # print(f\"i = {i}, j = {j}\", final_polygon)\n return final_polygon, point_num\n\n @staticmethod\n def _calc_area(points, point_nums):\n area = torch.zeros_like(point_nums, dtype=torch.float)\n for i in range(8):\n mask = point_nums > i\n index = (point_nums[mask] + i - 1) % point_nums[point_nums > i]\n area[mask] += torch.stack(\n (points[mask][:, i],\n points[mask, index]), dim=-2\n ).det()\n return area * 0.5\n\n @staticmethod\n def _calc_area_fixnum(points, num=4):\n area = torch.zeros(*points.shape[:-2], dtype=torch.float, device=points.device)\n for i in range(num):\n area += torch.stack(\n (points[..., i, :],\n points[..., i - 1, :]), dim=-2\n ).det()\n return area * 0.5\n\n @staticmethod\n def _get_bounding_box(points):\n box_l = torch.min(points[..., 0], dim=-1).values\n box_r = torch.max(points[..., 0], dim=-1).values\n box_t = torch.min(points[..., 1], dim=-1).values\n box_b = torch.max(points[..., 1], dim=-1).values\n box_t_l = torch.stack((box_l, box_t), dim=-1)\n box_b_l = torch.stack((box_l, box_b), dim=-1)\n box_b_r = torch.stack((box_r, box_b), dim=-1)\n box_t_r = torch.stack((box_r, box_t), dim=-1)\n return torch.stack((box_t_l, box_b_l, box_b_r, box_t_r), dim=-2)\n\n @staticmethod\n def _if_concave_quadrangle(points):\n res = torch.ones(*points.shape[:-2], dtype=torch.float, device=points.device)\n for i in range(4):\n res *= torch.stack(\n (points[..., i - 1, :] - points[..., i, :],\n points[..., (i + 1) % 4, :] - points[..., i, :]\n ), dim=-2\n ).det()\n return res < 0\n\n @staticmethod\n def _if_points_right_order(points):\n a = points[..., 0, :]\n b = points[..., 1, :]\n c = points[..., 2, :]\n d = points[..., 3, :]\n v1 = torch.stack((a - b, d - b), dim=-2).det()\n v2 = torch.stack((d - b, c - b), dim=-2).det()\n v3 = torch.stack((b - c, a - c), dim=-2).det()\n v4 = torch.stack((a - c, d - c), dim=-2).det()\n # !cancave rectangles are always in right order.\n return (v1 * v2 > 0) & (v3 * v4 > 0)\n\n @staticmethod\n def __if_in_poly(poly, point):\n num = poly.shape[-2]\n res = torch.zeros(*poly.shape[:-2], num, device=poly.device)\n for i in range(num):\n res[..., i] = torch.stack(\n (point - poly[..., i, :],\n poly[..., (i + 1) % num, :] - poly[..., i, :]), dim=-2\n ).det()\n return ((res > 0).sum(dim=-1) == num) | ((res < 0).sum(dim=-1) == num)\n\n @staticmethod\n def if_inside_bounding_box(point, boxes):\n bounding_box = IOULoss._get_bounding_box(boxes.view(*boxes.shape[:-1], 4, 2))\n return IOULoss.__if_in_poly(bounding_box, point)\n\n @staticmethod\n def get_IOU(pred_box, gt_box, train_mode=True):\n \"\"\"only pred_box needs gard\"\"\"\n # !change the coordinates format from [8] to [4, 2]\n pboxes = torch.stack(pred_box.split(2, dim=-1), dim=-2)\n gboxes = torch.stack(gt_box.split(2, dim=-1), dim=-2)\n if train_mode:\n mask = IOULoss._if_concave_quadrangle(pboxes)\n pboxes[mask] = IOULoss._get_bounding_box(pboxes[mask])\n mask = IOULoss._if_points_right_order(pboxes)\n # ! if not in right order, then exchange the 'c', 'd'\n pboxes[~mask] = pboxes[~mask][:, [0, 1, 3, 2]]\n # ! if still not in right order\n mask = IOULoss._if_points_right_order(pboxes)\n # ! then continue exchange the 'b', 'd'\n pboxes[~mask] = pboxes[~mask][:, [0, 2, 1, 3]]\n clipped_res, res_points_num = IOULoss._clip(gboxes, pboxes)\n area_i = torch.abs(IOULoss._calc_area(clipped_res, res_points_num))\n return area_i / (torch.abs(IOULoss._calc_area_fixnum(pboxes)) +\n IOULoss._calc_area_fixnum(gboxes) - area_i + 1e-16)\n else:\n mask1 = IOULoss._if_concave_quadrangle(pboxes)\n mask2 = IOULoss._if_points_right_order(pboxes)\n mask3 = IOULoss._calc_area_fixnum(pboxes) < 0\n clipped_res, res_points_num = IOULoss._clip(gboxes, pboxes)\n area_i = torch.abs(IOULoss._calc_area(clipped_res, res_points_num))\n\n res_iou = area_i / (IOULoss._calc_area_fixnum(pboxes) +\n IOULoss._calc_area_fixnum(gboxes) - area_i + 1e-16)\n res_iou[mask1 | ~mask2 | mask3] = 0\n return res_iou\n\n\nif __name__ == \"__main__\":\n iou = IOULoss()\n # squares\n # subject_polygon = [(-1, 1), (1, 1), (1, -1), (-1, -1)]\n # clipping_polygon = [(0, 0), (0, 2), (2, 2), (2, 0)]\n\n # squares: different order of points\n # subject_polygon = [(-1, -1), (-1, 1), (1, 1), (1, -1)]\n # clipping_polygon = [(2, 0), (0, 0), (0, 2), (2, 2)]\n\n # subject_polygon = torch.tensor(subject_polygon).float()\n # clipping_polygon = torch.tensor(clipping_polygon).float()\n # clipped_polygon, points_num = iou._clip(\n # subject_polygon[None, None, ...],\n # clipping_polygon[None, None, ...]\n # )\n #\n # for i in range(9):\n # if clipped_polygon[points_num == i][:, :i].shape[0] != 0:\n # print(f\"clipped polygon that points num = {i}s:\")\n # print(clipped_polygon[points_num == i][:, :i])\n # print(\"areas = \", iou._calc_area(clipped_polygon, points_num), sep='\\n')\n #\n # points = torch.Tensor(\n # [[171.9181, 373.2570, 171.0784, 384.3878, 189.7984, 387.2672, 190.3968,\n # 375.9795]]\n # )\n # pointx = torch.randn(1, 10, 1) * 10 + 175\n # pointy = torch.randn(1, 10, 1) * 5 + 375\n # print(\n # iou.if_inside_box(\n # torch.stack((pointx, pointy), dim=-1), points[None, ...].expand(1, 10, 8)\n # )\n # )\n\n # ploy_1 = torch.tensor([19, 155, 475, 139, 247, 145, 474, 138]).float().unsqueeze(\n # 0\n # ).unsqueeze(0)\n # ploy_2 = torch.tensor([407, 47, 19, 254, 462, 7, 394, 255]).float().unsqueeze(\n # 0\n # ).unsqueeze(0)\n #\n # ploy_2 = torch.stack(ploy_2.split(2, dim=-1), dim=-2)\n # mask = IOULoss._if_points_right_order(ploy_2)\n # ploy_2[~mask] = ploy_2[~mask][:, [0, 1, 3, 2]]\n # # ! if still not in right order\n # mask = IOULoss._if_points_right_order(ploy_2)\n # # ! then continue exchange the 'b', 'd'\n # ploy_2[~mask] = ploy_2[~mask][:, [0, 2, 1, 3]]\n # if IOULoss._if_concave_quadrangle(ploy_2):\n # ploy_2 = IOULoss._get_bounding_box(ploy_2)\n # if IOULoss._calc_area_fixnum(ploy_2).item() < 0:\n # ploy_2 = ploy_2[..., [3, 2, 1, 0], :]\n #\n # print(iou.get_IOU(ploy_1, ploy_2.view(1, 1, 8)))\n\n for i in range(int(1000)):\n ploy_1 = torch.randint(0, 480, (100, 100, 8)).float()\n ploy_2 = torch.randint(0, 480, (100, 100, 8)).float()\n\n ploy_2 = torch.stack(ploy_2.split(2, dim=-1), dim=-2)\n mask = IOULoss._if_concave_quadrangle(ploy_2)\n ploy_2[mask] = IOULoss._get_bounding_box(ploy_2[mask])\n mask = IOULoss._if_points_right_order(ploy_2)\n ploy_2[~mask] = ploy_2[~mask][:, [0, 1, 3, 2]]\n # ! if still not in right order\n mask = IOULoss._if_points_right_order(ploy_2)\n # ! then continue exchange the 'b', 'd'\n ploy_2[~mask] = ploy_2[~mask][:, [0, 2, 1, 3]]\n mask = IOULoss._calc_area_fixnum(ploy_2) < 0\n ploy_2[mask] = ploy_2[mask][:, [3, 2, 1, 0]]\n ploy_2 = ploy_2.view(*ploy_2.shape[:-2], 8)\n\n if i % 100 == 0:\n print(f'now i = {i}')\n\n if (iou.get_IOU(ploy_1, ploy_2, True) < 0).sum() > 0:\n raise Exception\n","repo_name":"paras-zomby/4PointYOLOX","sub_path":"MyYOLOX/boxes.py","file_name":"boxes.py","file_ext":"py","file_size_in_byte":13937,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"10805857012","text":"import webbrowser\r\nimport speech_recognition as sr\r\nimport pyttsx3\r\nimport os\r\nimport datetime\r\nimport wikipedia\r\nimport time\r\nimport random\r\nimport pyautogui\r\nimport unicodedata\r\nimport string\r\n\r\nwikipedia.set_lang(\"pl\")\r\n\r\ncharacters = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'\r\n\r\ndef start():\r\n os.system(\"cls\")\r\n now = datetime.datetime.now()\r\n obecnydzien = now.strftime(\"%d-%m-%Y\")\r\n obecnagodzina = now.strftime(\"%H:%M:%S\")\r\n print(\"\")\r\n print(obecnydzien)\r\n print(obecnagodzina)\r\n print(\"\")\r\n print(\"⣿⣿⣿⣿⣿⣿⣿⣿⡿⠿⠛⠛⠛⠋⠉⠈⠉⠉⠉⠉⠛⠻⢿⣿⣿⣿⣿⣿⣿⣿⣿⣿\")\r\n print(\"⣿⣿⣿⣿⣿⡿⠋⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠉⠛⢿⣿⣿⣿⣿⣿⣿\")\r\n print(\"⣿⣿⣿⣿⡏⣀⠀⠀⠀⠀⠀⠀⠀⣀⣤⣤⣤⣄⡀⠀⠀⠀⠀⠀⠀⠀⠙⢿⣿⣿⣿⣿\")\r\n print(\"⣿⣿⣿⢏⣴⣿⣷⠀⠀⠀⠀⠀⢾⣿⣿⣿⣿⣿⣿⡆⠀⠀⠀⠀⠀⠀⠀⠈⣿⣿⣿⣿\")\r\n print(\"⣿⣿⣟⣾⣿⡟⠁⠀⠀⠀⠀⠀⢀⣾⣿⣿⣿⣿⣿⣷⢢⠀⠀⠀⠀⠀⠀⠀⢸⣿⣿⣿\")\r\n print(\"⣿⣿⣿⣿⣟⠀⡴⠄⠀⠀⠀⠀⠀⠀⠙⠻⣿⣿⣿⣿⣷⣄⠀⠀⠀⠀⠀⠀⠀⣿⣿⣿\")\r\n print(\"⣿⣿⣿⠟⠻⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠶⢴⣿⣿⣿⣿⣿⣧⠀⠀⠀⠀⠀⠀⣿⣿⣿\")\r\n print(\"⣿⣁⡀⠀⠀⢰⢠⣦⠀⠀⠀⠀⠀⠀⠀⠀⢀⣼⣿⣿⣿⣿⣿⡄⠀⣴⣶⣿⡄⣿⣿⣿\")\r\n print(\"⣿⡋⠀⠀⠀⠎⢸⣿⡆⠀⠀⠀⠀⠀⠀⣴⣿⣿⣿⣿⣿⣿⣿⠗⢘⣿⣟⠛⠿⣼⣿⣿\")\r\n print(\"⣿⣿⠋⢀⡌⢰⣿⡿⢿⡀⠀⠀⠀⠀⠀⠙⠿⣿⣿⣿⣿⣿⡇⠀⢸⣿⣿⣧⢀⣼⣿⣿\")\r\n print(\"⣿⣿⣷⢻⠄⠘⠛⠋⠛⠃⠀⠀⠀⠀⠀⢿⣧⠈⠉⠙⠛⠋⠀⠀⠀⣿⣿⣿⣿⣿⣿⣿\")\r\n print(\"⣿⣿⣧⠀⠈⢸⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠟⠀⠀⠀⠀⢀⢃⠀⠀⢸⣿⣿⣿⣿⣿⣿\")\r\n print(\"⣿⣿⡿⠀⠴⢗⣠⣤⣴⡶⠶⠖⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣀⡸⠀⣿⣿⣿⣿⣿⣿\")\r\n print(\"⣿⣿⣿⡀⢠⣾⣿⠏⠀⠠⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠛⠉⠀⣿⣿⣿⣿⣿⣿\")\r\n print(\"⣿⣿⣿⣧⠈⢹⡇⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣰⣿⣿⣿⣿⣿⣿\")\r\n print(\"⣿⣿⣿⣿⡄⠈⠃⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣠⣴⣾⣿⣿⣿⣿⣿⣿⣿\")\r\n print(\"⣿⣿⣿⣿⣧⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣠⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿\")\r\n print(\"⣿⣿⣿⣿⣷⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣴⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿\")\r\n print(\"⣿⣿⣿⣿⣿⣦⣄⣀⣀⣀⣀⠀⠀⠀⠀⠘⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿\")\r\n print(\"⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣷⡄⠀⠀⠀⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿\")\r\n print(\"⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣧⠀⠀⠀⠙⣿⣿⡟⢻⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿\")\r\n print(\"⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠇⠀⠁⠀⠀⠹⣿⠃⠀⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿\")\r\n print(\"⣿⣿⣿⣿⣿⣿⣿⣿⡿⠛⣿⣿⠀⠀⠀⠀⠀⠀⠀⠀⢐⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿\")\r\n print(\"⣿⣿⣿⣿⠿⠛⠉⠉⠁⠀⢻⣿⡇⠀⠀⠀⠀⠀⠀⢀⠈⣿⣿⡿⠛⠛⠛⠉⠉⠉\")\r\n print(\"⣿⡿⠋⠁⠀⠀⢀⣀⣠⡴⣸⣿⣇⡄⠀⠀⠀⠀⢀⡿⠄⠙⠛⠀⣀⣠⣤⣤⠄⠀\")\r\n\r\ndef speak(text, language):\r\n engine = pyttsx3.init()\r\n engine.setProperty('recognizer_instance.non_speaking_duration', 0.2)\r\n engine.setProperty('recognizer_instance.energy_threshold', 10)\r\n engine.say(text)\r\n engine.runAndWait()\r\n\r\ndef listen(language):\r\n start()\r\n r = sr.Recognizer()\r\n r.language = \"pl-PL\"\r\n with sr.Microphone() as source:\r\n audio = r.listen(source)\r\n try:\r\n return r.recognize_google(audio, language='pl-PL')\r\n except sr.UnknownValueError:\r\n return \"Błąd 400\"\r\n except sr.RequestError as e:\r\n print(\"Error making request: {0}\".format(e))\r\n return \"\"\r\n\r\ndef uspienie():\r\n r = sr.Recognizer()\r\n \r\n with sr.Microphone() as source:\r\n print(\"Podaj czas:\")\r\n speak(\"Na ile minut?\", 'pl-PL')\r\n audio = r.listen(source)\r\n \r\n try:\r\n czas = r.recognize_google(audio, language=\"pl-PL\")\r\n czas = int(czas)\r\n speak(f\"Do zobaczenia za {czas} minut\", 'pl-PL')\r\n czas = czas * 60\r\n time.sleep(czas)\r\n\r\n except sr.UnknownValueError:\r\n speak(\"Nie rozumiem, powtórz jeszcze raz\", 'pl-PL')\r\n uspienie()\r\n \r\n except sr.RequestError as e:\r\n speak(\"Wystąpił jakiś błąd\")\r\n print(f\"Wystąpił błąd: {e}\")\r\n time.sleep(6)\r\n \r\n uspienie()\r\n\r\ndef wyszukajinternet():\r\n speak(\"Co wyszukać?\", 'pl-PL')\r\n \r\n r = sr.Recognizer()\r\n \r\n with sr.Microphone() as source:\r\n print(\"Co wyszukać?\")\r\n audio = r.listen(source)\r\n \r\n try:\r\n search_term = r.recognize_google(audio, language=\"pl-PL\")\r\n print(f\"Wyszukuję: {search_term}\")\r\n webbrowser.open(\"https://www.google.pl/search?q=\" + search_term)\r\n except sr.UnknownValueError:\r\n print(\"Przepraszam, nie zrozumiałam tego co powiedział*ś.\")\r\n speak(\"Nie zrozumiałam tego co powiedziałeś.\", 'pl-PL')\r\n wyszukajinternet()\r\n except sr.RequestError as e:\r\n print(f\"Wystąpił błąd: {e}\")\r\n\r\ndef napisz():\r\n speak(\"Co napisać?\", 'pl-PL')\r\n \r\n r = sr.Recognizer()\r\n \r\n with sr.Microphone() as source:\r\n print(\"Co napisać?\")\r\n audio = r.listen(source)\r\n \r\n try:\r\n pisanie = r.recognize_google(audio, language='pl-PL')\r\n nPisanie = unicodedata.normalize(\"NFD\", pisanie)\r\n ndPisanie = nPisanie.capitalize()\r\n print(f\"Zrozumiałam: {ndPisanie}\")\r\n pyautogui.typewrite(ndPisanie)\r\n except sr.UnknownValueError:\r\n print(\"Przepraszam, nie zrozumiałam tego co powiedział*ś.\")\r\n speak(\"Nie zrozumiałam tego co powiedziałeś.\", 'pl-PL')\r\n napisz()\r\n except sr.RequestError as e:\r\n print(f\"Wystąpił błąd: {e}\")\r\n\r\ndef main():\r\n while True:\r\n command = listen('pl-PL').lower()\r\n if \"cześć\" in command:\r\n speak(\"W czym mogę pomóc?\", 'pl-PL') # -------------------- Przywitanie\r\n \r\n elif \"siema\" in command:\r\n speak(\"W czym mogę pomóc?\", 'pl-PL') # -------------------- Przywitanie\r\n \r\n elif \"hej\" in command:\r\n speak(\"W czym mogę pomóc?\", 'pl-PL') # -------------------- Przywitanie\r\n \r\n elif \"elo\" in command:\r\n speak(\"W czym mogę pomóc?\", 'pl-PL') # -------------------- Przywitanie\r\n \r\n elif \"lubię cię\" in command:\r\n speak(\"Niestety nie posiadam uczuć ale myślę że jeśli byłabym człowiekiem to też bym cię polubiła\", 'pl-PL')\r\n \r\n elif \"jak masz na imię\" in command:\r\n speak(\"Mam na imię Grace.\", 'pl-PL') # -------------------- Imię\r\n \r\n elif \"włącz chrome\" in command:\r\n os.system(\"chrome.exe\")\r\n speak(\"Otwieram przeglądarkę Google Chrome\", 'pl-PL') # --- Google Chrome (Włącz)\r\n \r\n elif \"wyłącz chrome\" in command:\r\n os.system(\"taskkill /IM chrome.exe /F\")\r\n speak(\"Zamykam przeglądarkę Google Chrome\", 'pl-PL') # ---- Google Chrome (Wyłącz)\r\n \r\n elif \"włącz operę\" in command:\r\n os.system(\"start .\\operagx.lnk\")\r\n speak(\"Otwieram przeglądarkę Opera GX\", 'pl-PL') # -------- Opera GX (Włącz)\r\n \r\n elif \"wyłącz operę\" in command:\r\n os.system(\"taskkill /IM opera.exe /F\")\r\n speak(\"Zamykam przeglądarkę Opera GX\", 'pl-PL') # --------- Opera GX (Wyłącz)\r\n \r\n elif \"włącz spotify\" in command:\r\n os.system(\"start Spotify.lnk\")\r\n speak(\"Otwieram Spotify\", 'pl-PL') # ---------------------- Spotify (Włącz)\r\n \r\n elif \"wyłącz spotify\" in command:\r\n os.system(\"taskkill /IM Spotify.exe /F\")\r\n speak(\"Zamykam Spotify\", 'pl-PL') # ----------------------- Spotify (Wyłącz)\r\n \r\n elif \"włącz visual studio\" in command:\r\n os.system(\"start VisualStudioCode.lnk\")\r\n speak(\"Uruchamiam Visual Studio Code\", 'pl-PL') # --------- Visual Studio Code (Włącz)\r\n \r\n elif \"włącz discorda\" in command:\r\n os.system(\"start Discord.lnk\")\r\n speak(\"Uruchamiam Discorda\", 'pl-PL') # ------------------- Discord (Włącz)\r\n \r\n elif \"wyłącz discorda\" in command:\r\n os.system(\"taskkill /IM Discord.exe /F\")\r\n speak(\"Zamykam Discorda\", 'pl-PL') # ---------------------- Discord (Wyłącz)\r\n \r\n elif \"włącz cpu-z\" in command:\r\n os.system(\"start CPU.lnk\")\r\n speak(\"Uruchamiam C P U Z\", 'pl-PL') # -------------------- CPU-Z (MSC)\r\n \r\n elif \"wlącz monitor wydajności\" in command:\r\n os.system(\"perfmon.msc\")\r\n speak(\"Uruchamiam monitor wydajności\", 'pl-PL') # --------- Monitor Wydajności\r\n \r\n elif \"włącz czat\" in command:\r\n os.system(\"start https://chat.openai.com/chat\")\r\n speak(\"Otwieram ChatGPT\", 'pl-PL') # ---------------------- ChatGPT\r\n \r\n elif \"włącz konsolę\" in command:\r\n os.system(\"start\")\r\n os.system(\"start Grace.bat.lnk\")\r\n break # --------------------------------------------------- Konsola\r\n \r\n elif \"włącz notatnik\" in command:\r\n os.system(\"start notepad.lnk\") # -------------------------------- Notatnik\r\n \r\n elif \"włącz kalkulator\" in command: \r\n os.system(\"start calc.lnk\") # ----------------------------------- Kalkulator\r\n \r\n elif \"podaj ciekawostkę\" in command:\r\n random_page = wikipedia.random(pages=1)\r\n summary = wikipedia.summary(random_page, sentences=3)\r\n print(summary)\r\n speak(summary, 'pl-PL') # --------------------------------- Ciekawostka z Wikipedii\r\n \r\n elif \"wyszukaj w internecie\" in command:\r\n wyszukajinternet() # -------------------------------------- Wyszukiwanie w internecie\r\n\r\n elif \"napisz\" in command:\r\n napisz()\r\n \r\n elif \"otwórz swoją lokalizację\" in command:\r\n os.system(\"start \\Grace\")\r\n speak(\"Otwieram folder Grace\", 'pl-PL') # ----------------- Lokalizacja Grace\r\n \r\n elif \"wyłącz się\" in command:\r\n speak(\"Do zobaczenia.\", 'pl-PL')\r\n os.system(\"color c\")\r\n os.system(\"color f\")\r\n break # --------------------------------------------------- Wyłącz (Grace)\r\n \r\n elif \"uruchom ponownie\" in command:\r\n os.system(\"start Grace.bat.lnk\")\r\n break # --------------------------------------------------- Uruchom ponownie (Grace)\r\n \r\n elif \"uśpij się\" in command:\r\n uspienie() # ---------------------------------------------- Uśpienie (Grace)\r\n \r\n elif \"uruchom komputer ponownie\" in command:\r\n speak(\"Restartuję komputer\", 'pl-PL')\r\n os.system(\"shutdown/g\") # --------------------------------- Restart komputera\r\n \r\n elif \"wyłącz komputer\" in command:\r\n speak(\"Do zobaczenia\", 'pl-PL')\r\n os.system(\"shutdown/p\") # --------------------------------- Wyłączenie komputera\r\n \r\n elif \"test\" in command:\r\n speak(\"Testuję\", 'pl-PL')\r\n \r\n random_page = wikipedia.random(pages=1)\r\n summary = wikipedia.summary(random_page, sentences=1) # ----------------------------- Sprawdza działanie bilbioteki \"Wikipedia\"\r\n \r\n now = datetime.datetime.now()\r\n obecnydzien = now.strftime(\"%d-%m-%Y\")\r\n obecnagodzina = now.strftime(\"%H:%M:%S\") # ------------------------------------------ Sprawdza działanie bilbioteki \"Datetime\"\r\n \r\n nazwapliku = ''.join(random.choices(characters, k=31))\r\n ciag = ''.join(random.choices(characters, k=64)) # ---------------------------------- Sprawdza działanie bilbioteki \"Random\"\r\n \r\n os.system(f'echo {obecnydzien} >> \"%USERPROFILE%\\Desktop\\K{nazwapliku}.txt\"') # ----- Sprawdza działanie bilbioteki \"Random\"\r\n os.system(f'echo {obecnagodzina} >> \"%USERPROFILE%\\Desktop\\K{nazwapliku}.txt\"') # --- Sprawdza działanie bilbioteki \"Datetime\"\r\n os.system(f'echo {ciag} >> \"%USERPROFILE%\\Desktop\\K{nazwapliku}.txt\"')\r\n os.system(f'del \"%USERPROFILE%\\Desktop\\K{nazwapliku}.txt\"') # ----------------------- Sprawdza działanie bilbioteki \"OS\"\r\n \r\n time.sleep(1) # --------------------------------------------------------------------- Sprawdza działanie bilbioteki \"Time\"\r\n \r\n speak(\"Test przebiegł pomyślnie\", 'pl-PL') # -------------- Test\r\n \r\n elif \"support\" in command:\r\n webbrowser.open_new_tab(\"https://discord.gg/7hbbrbzjpY\")\r\n \r\n else:\r\n print(\"\")\r\n print(\"Powtórz jeszcze raz\")\r\n\r\nspeak(\"Witaj.\", 'pl-PL')\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n ","repo_name":"vDeresh/Grace","sub_path":"Grace/asystent.py","file_name":"asystent.py","file_ext":"py","file_size_in_byte":12369,"program_lang":"python","lang":"pl","doc_type":"code","stars":2,"dataset":"github-code","pt":"30"} +{"seq_id":"25526486739","text":"from django import template\n\nfrom gargoyle import gargoyle\n\nregister = template.Library()\n\n\n@register.tag\ndef ifswitch(parser, token):\n bits = token.split_contents()\n if len(bits) < 2:\n raise template.TemplateSyntaxError(\"%r tag requires an argument\" % token.contents.split()[0])\n\n name = bits[1]\n instances = bits[2:]\n\n nodelist_true = parser.parse(('else', 'endifswitch'))\n token = parser.next_token()\n\n if token.contents == 'else':\n nodelist_false = parser.parse(('endifswitch',))\n parser.delete_first_token()\n else:\n nodelist_false = template.NodeList()\n\n return SwitchNode(nodelist_true, nodelist_false, name, instances)\n\n\nclass SwitchNode(template.Node):\n def __init__(self, nodelist_true, nodelist_false, name, instances):\n self.nodelist_true = nodelist_true\n self.nodelist_false = nodelist_false\n self.name = name\n self.instances = [template.Variable(i) for i in instances]\n\n def render(self, context):\n instances = [i.resolve(context) for i in self.instances]\n if 'request' in context:\n instances.append(context['request'])\n\n if not gargoyle.is_active(self.name, *instances):\n return self.nodelist_false.render(context)\n\n return self.nodelist_true.render(context)\n","repo_name":"disqus/gargoyle","sub_path":"gargoyle/templatetags/gargoyle_tags.py","file_name":"gargoyle_tags.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","stars":748,"dataset":"github-code","pt":"30"} +{"seq_id":"24076326413","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\nimport requests\nimport xlwt\nfrom bs4 import BeautifulSoup\n\nurl = \"https://www.timeanddate.com/calendar/custom.html?year=2038&country=42&cols=3&df=1&hol=1\"\ndate = []\n#requests.adapters.DEFAULT_RETRIES = 5\nfor t in range(2037,2048): #a parameters set by user. Holidays from and to which year you like.\n url = \"https://www.timeanddate.com/calendar/custom.html?year=\"+str(t)+\"&country=42&cols=3&df=1&hol=1\"\n r = requests.get(url) #get from the url\n # print (r.encoding)\n # print (r.text)\n soup = BeautifulSoup(r.text) #use beautifulsoup to extra information we want\n #print(soup.prettify())\n for da in soup.find_all(\"div\", id=\"calarea\")[0].find_all('span',class_=\"co1\")[:-1]: #here we find all holidays in one year\n date.append(da.text + \" \" + str(t)) #process the holiday, add year behind it\n print (da.text + \"-\" + str(t))\n\n\n\nimport pandas as pd\na = pd.Series([pd.to_datetime(dat) for dat in date])\nfor t in a:\n print (t)\ndf = pd.DataFrame(a)\n# df = pd.DataFrame(date)\ndf.to_excel('output.xlsx', header=False, index=False)\n","repo_name":"MasKong/Crawler","sub_path":"Holiday_v1.py","file_name":"Holiday_v1.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"15273648492","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def isValidBST(self, root: Optional[TreeNode]) -> bool:\n \n # function for finding the inorder traversal result\n def inOrder(root):\n ans = []\n if root:\n ans += inOrder(root.left) + [root.val] + inOrder(root.right)\n return ans\n \n output = inOrder(root) # store the inorder traversal in output array\n \n # check whether output array is sorted or not\n # if sorted then, Valid BST\n # otherwise, not a BST\n for i in range(1, len(output)):\n if output[i-1] >= output[i]: # i.e output array is not sorted\n return False\n\n return True\n \n ","repo_name":"Henokaa/Datastructure-leetcode-practice","sub_path":"98-validate-binary-search-tree/98-validate-binary-search-tree.py","file_name":"98-validate-binary-search-tree.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"16308588331","text":"\ndigitsMin = [1, 5, 3, 5, 1, 7]\ndigitsMax = [6, 3, 0, 3, 9, 5]\n\ndef combDigits(digitsIn):\n res = 0\n counter = len(digitsIn)-1\n while counter>=0:\n res+= digitsIn[len(digitsIn)-counter-1]*pow(10,counter)\n counter-=1\n return res\n\ndef pwRuleConform(digitsIn):\n if len(digitsIn)!=6:\n return False\n if combDigits(digitsIn) combDigits(digitsMax):\n return False\n numPair = False\n numComboCount =0\n counter =1\n while counter')\ndef users_channel(channel):\n if check_login():\n if session['admin'] or check_access(session['username'], channel):\n return render_template('users.html', users=find_channel_memers(channel), user=get_user(session['username']), check_access=check_access, channelview=channel)\n else:\n abort(403)\n else:\n abort(403)\n\n@app.route('/users//add-admin/', methods=['GET'])\ndef web_add_admin_to_channel(channel, user):\n if not check_login():\n abort(403)\n if session['admin'] or check_access(session['username'], channel):\n give_admin_to_channel(user, channel)\n return redirect(redirect_back())\n else:\n return abort(403)\n\ndef redirect_back(default='index'):\n return request.args.get('next') or \\\n request.referrer or \\\n url_for(default)\n\n@app.route('/users//add-user', methods=['POST'])\ndef web_add_to_channel(channel):\n if not check_login():\n abort(403)\n if check_access(session['username'], channel):\n give_user_access_to_channel(request.form['user'], channel, request.form['admin'])\n return redirect(redirect_back())\n else:\n return abort(403)\n\n@app.route('/users//add-channel', methods=['post'])\ndef post_add_to_channel(user):\n if not check_login():\n abort(403)\n channel = request.form['channel']\n if check_access(session['username'], channel):\n give_user_access_to_channel(user, channel, request.form['admin'])\n return redirect(redirect_back())\n else:\n return abort(403)\n\n@app.route('/logout')\ndef logout():\n session.pop('username', None)\n session.pop('admin', None)\n return redirect(url_for('index'))\n\n\n@app.route('/static/')\ndef send_static(path):\n return send_from_directory('static', path)\n\n\n@app.route('/', methods=['POST', 'GET'])\ndef index():\n if request.method == 'POST':\n if request.form['login']:\n state = do_login(request.form['username'].lower(), request.form['password'])\n return redirect(url_for('index'))\n else:\n return redirect(url_for('index'))\n else:\n if check_login():\n return render_template('index.html', result=get_channels_filtered_for_user(session['username']), isAdmin=session['admin'])\n else:\n return render_template('login.html')\n\n@app.route('/channel')\ndef gotohomedamnyou():\n return redirect(url_for('index'))\n\n@app.route('/channel/')\ndef show_channel(name):\n if check_login():\n if check_access_channel(session['username'], name):\n return render_template('channel.html', name=name, result=reversed(read_json(name)),\n dates=create_date_list(name), check_access=check_access, username=session['username'])\n else:\n abort(403)\n else:\n abort(403)\n\n\n@app.route('/channel//')\ndef channel_date_route(name, date):\n if check_login():\n if check_access_channel(session['username'], name):\n readable = datetime.datetime.strptime(date, '%y%m%d').strftime('%A %d. %b %Y')\n return render_template('channel.html', name=name, result=reversed(channel_date(name, date)),\n dates=create_date_list(name), readable=readable, check_access=check_access, username=session['username'])\n else:\n abort(403)\n else:\n abort(403)\n\n\n@app.errorhandler(404)\ndef page_not_found(e):\n return render_template('error.html')\n\n\n@app.errorhandler(500)\ndef page_not_found(e):\n return render_template('error.html')\n\n\n@app.errorhandler(403)\ndef need_to_login(e):\n return redirect(url_for('index'))\n\n\ndef start_web(environ = None, start_response = None):\n logger = logging.getLogger('werkzeug')\n handler = logging.FileHandler('debug/access.log')\n logger.addHandler(handler)\n gunicorn_logger = logging.getLogger('gunicorn.erro')\n app.logger.handlers = gunicorn_logger.handlers\n app.logger.setLevel(gunicorn_logger.level)\n app.run(debug=False, host='0.0.0.0', threaded=True)\n\nif __name__ == \"__main__\":\n start_web()\n","repo_name":"gathering/loggy","sub_path":"web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":12808,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"5283930083","text":"# Path of the Zope instance configuration to use to\n# instantiate Zope2.app()\n#conf_path = '/var/lib/zope2.13/instance/ema/etc/zope.conf'\nwsgi_conf_path = '/path/to/this/repo/app/instance/etc/wsgi.conf'\n\n# Path to Data.fs which is needed for lookup of object IDs from transaction IDs\n# with perfact-zoperecord --watch\ndatafs_path = '/path/to/this/repo/app/instance/var/Data.fs'\n\n# user that is used to create commits and as default owner of objects\nmanager_user = 'admin'\n\n# create the manager user on empty databases\ncreate_manager_user = False\n\n# sets the default owner for objects that have no owner in the file system representation\ndefault_owner = 'admin'\n\n# Base directory of the repository\nbase_dir = '/place/to/keep/such/repos'\n\n# default settings for git repos\ncommit_name = \"Zope Developer\"\ncommit_email = \"zope-devel@example.de\"\ncommit_message = \"Generic commit message.\"\n\n# email address to send commit summaries of default commits to\n#codechange_mail = \"zope-devel@example.de\"\n","repo_name":"Stakdek/zope","sub_path":"zodbsync_config.py","file_name":"zodbsync_config.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"30"} +{"seq_id":"19809187550","text":"# 29일차 소수판정 v0.2 큰 수 시간단축 - 2차 시도 (KS)\n# 루트n까지 검사\nimport time as t\nprint('소수판정 v0.2 (큰 수)')\ndef isPrime(n): # 소수인지 판정하는 함수\n if n < 2: return False\n for i in range(2, int(n**0.5)+1):\n if n % i == 0:\n return False\n return True\n\nwhile 1: # 함수호출 소수판정\n n = int(input('예 9147483647 정수입력 : ')) # 정수 입력\n if n == 0: break\n startTime = t.time()\n if isPrime(n):\n print('소수')\n else:\n print('소수 아님')\n endTime = t.time()\n eTime = endTime - startTime\n print('걸린시간 : %.3f초'%eTime)\n","repo_name":"ksshin21/python_basic","sub_path":"29 large_prime_number.py","file_name":"29 large_prime_number.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"34110763472","text":"def solution(s):\n import collections\n \n left, right = s[:len(s) // 2], s[len(s) // 2:]\n \n if len(left) != len(right):\n return -1\n\n lookup = collections.Counter(left)\n\n count = 0\n \n for rc in right:\n if rc in lookup and lookup[rc] > 0:\n lookup[rc] -= 1\n else:\n count += 1\n\n return count\n\ntestCount = int(input())\n\nfor testId in range(testCount):\n source = input().strip()\n count = solution(source)\n \n print(count)\n","repo_name":"lilsweetcaligula/sandbox-online-judges","sub_path":"hackerrank/algorithms/strings/easy/anagram/py/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"20699531746","text":"import sys\nimport re\n\"\"\"\nPIPER script to extract interesting information from HTML META tags from a HTTP response.\nTarget tool: Message viewers\nRequire that HTTP headers NOT been passed\nFilters needed via matching of the case insensitive regex for the header Content-Type: [a-z]+\\/x?(html)\n\"\"\"\n# Match all meta tags pattern holding \"generator\" and \"framework\" information\n# whatever the order of the attributes NAME and CONTENT\nexprs = []\nexprs.append(r'')\nexprs.append(r'')\n# Extract the whole response body\ncontent = \"\".join(sys.stdin)\n# Extract the metadatas via the regex and handle duplicates\nresults = []\n# First regex working on order 1) NAME 2) CONTENT\nmetadatas = re.findall(exprs[0], content, re.IGNORECASE | re.MULTILINE)\nfor metadata in metadatas:\n msg = f\"{metadata[0].capitalize()}: {metadata[1]}\"\n if msg not in results:\n results.append(msg)\n# Second regex working on order 1) CONTENT 2) NAME\nmetadatas = re.findall(exprs[1], content, re.IGNORECASE | re.MULTILINE)\nfor metadata in metadatas:\n msg = f\"{metadata[1].capitalize()}: {metadata[0]}\"\n if msg not in results:\n results.append(msg)\ncount = len(results)\nif count > 0:\n results.sort()\n print(f\"{count} metadata(s) found:\")\n print(\"\\n\".join(results))\nelse:\n print(\"No metadata found.\")\n","repo_name":"righettod/burp-piper-custom-scripts","sub_path":"extract-html-metadatas.py","file_name":"extract-html-metadatas.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":93,"dataset":"github-code","pt":"30"} +{"seq_id":"13159660970","text":"# DFS를 이용한 방법이 모범답안이다.\nn = 3\nnum = [3, 4, 5]\ncal = [1, 0, 1, 0]\n\nmin_value = 1e9\nmax_value = -1e9\n\n# dfs정의\n\n\ndef dfs(i, now): # 다음 인덱스 값, 현재 계산 누적값\n global min_value, max_value, cal\n if i == n: # 다음 인덱스가 마지막일경우(재귀에서 i+1계산이 되고 재귀호출이기 떄문에 n-1이아닌 n이다.)\n min_value = min(min_value, now) # 현재 계산한 값과 저장된 min_value중 낮은걸 할당\n max_value = max(max_value, now) # 현재 계산한 값과 저장된 min_value중 낮은걸 할당\n else:\n if cal[0] > 0:\n cal[0] -= 1\n dfs(i+1, now+num[i])\n cal[0] += 1 # 다시 더해줘야 모든 경우의 수를 계산가능\n if cal[1] > 0:\n cal[1] -= 1\n dfs(i+1, now-num[i])\n cal[1] += 1 # 다시 더해줘야 모든 경우의 수를 계산가능\n if cal[2] > 0:\n cal[2] -= 1\n dfs(i+1, now*num[i])\n cal[2] += 1 # 다시 더해줘야 모든 경우의 수를 계산가능\n if cal[3] > 0:\n cal[3] -= 1\n dfs(i+1, now//num[i])\n cal[3] += 1 # 다시 더해줘야 모든 경우의 수를 계산가능\n\n\ndfs(1, num[0])\n\nprint(max_value)\n","repo_name":"artdumb/CodingTestPrac","sub_path":"CodeUp/54-DFSBFS_연산자끼워넣기.py","file_name":"54-DFSBFS_연산자끼워넣기.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"44437154461","text":"# 1st solution, brute force\n# O(n^3) time | O(1) space\nclass Solution:\n def longestConsecutive(self, nums: List[int]) -> int:\n longest_streak = 0\n\n for num in nums:\n current_num = num\n current_streak = 1\n\n while current_num + 1 in nums:\n current_num += 1\n current_streak += 1\n\n longest_streak = max(longest_streak, current_streak)\n\n return longest_streak\n\n# 2nd solution, sorting\n# O(nlogn) time | O(1 or n) space\nclass Solution:\n def longestConsecutive(self, nums: List[int]) -> int:\n if not nums:\n return 0\n\n nums.sort()\n\n longest_streak = 1\n current_streak = 1\n\n for i in range(1, len(nums)):\n if nums[i] != nums[i-1]:\n if nums[i] == nums[i-1]+1:\n current_streak += 1\n else:\n longest_streak = max(longest_streak, current_streak)\n current_streak = 1\n\n return max(longest_streak, current_streak)\n \n# 3rd solution, HashSet and Intelligent Sequence Building\n# O(n) time | O(n) space\nclass Solution:\n def longestConsecutive(self, nums: List[int]) -> int:\n longest_streak = 0\n num_set = set(nums)\n\n for num in num_set:\n if num - 1 not in num_set:\n current_num = num\n current_streak = 1\n\n while current_num + 1 in num_set:\n current_num += 1\n current_streak += 1\n\n longest_streak = max(longest_streak, current_streak)\n\n return longest_streak ","repo_name":"yingzhuo1994/LeetCode","sub_path":"0128_LongestConsecutiveSequence.py","file_name":"0128_LongestConsecutiveSequence.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"31414565870","text":"import json, re\nimport token_helpers\nfrom nltk.corpus import stopwords\nimport numpy as np\n\n\n#Creates positive and negative ratios and returns an array of 1(positive) and 0(negative). \ndef ratio_weight(data):\n pos_words = [line.strip() for line in open('positive-words.txt', 'r')]\n neg_words = [line.strip() for line in open('negative-words.txt', 'r')]\n ret_set = []\n for r in data:\n pos_count = 0\n neg_count = 0\n for word in remove_stopwords_inner(r['text'].split(),stopwords.words('english')):\n if word in pos_words:\n pos_count += 1\n elif word in neg_words:\n neg_count += 1\n if pos_count > neg_count:\n ret_set.append(1)\n else:\n ret_set.append(0) \n return np.array(ret_set)\n\n#Loads yelp's json data\ndef load_json_data(file, data_amount):\n data = [] \n with open(file) as f:\n count = 0\n for line in f:\n count+= 1\n if count == data_amount:\n break\n data.append(json.loads(line))\n return data\n\n\nif __name__ == '__main__':\n data = load_json_data('yelp_academic_dataset_review.json', 10000)\n print(ratio_weight(data))","repo_name":"efrenaguilar95/Yelp_Analyzer","sub_path":"Yelp Dataset/weight_reviews.py","file_name":"weight_reviews.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"19346736207","text":"\"\"\"mysite URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin, auth\nfrom django.urls import path, include\nfrom polls import views, forms \nfrom polls.views import Search_results\n\nurlpatterns = [\n path('', views.home, name='home'), \n path('detail/', views.IndexView.as_view(), name='index'),\n path('polls/', include('polls.urls')),\n path('admin/', admin.site.urls),\n path('addquestion/', views.addNewQuestion, name='addquestion'),\n path('savequestion/', views.saveNewQuestion, name='savequestion'),\n path('search/', Search_results.as_view(), name='search_results'),\n path('recommend/', views.recommendQuestion, name='recommend'),\n]\n","repo_name":"ndklien/ie104_demo","sub_path":"mysite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"26631605989","text":"from data import *\n\n\nclass Enemy(pygame.sprite.Sprite):\n def __init__(self, pos, speed, enemy_image, damage, fire_rate):\n super().__init__()\n self.image = pygame.image.load(enemy_image).convert_alpha()\n self.mask = pygame.mask.from_surface(self.image)\n self.rect = self.image.get_rect()\n self.width = self.image.get_width()\n self.height = self.image.get_height()\n self.pos = vec(pos)\n self.vel = vec(speed)\n self.damage = damage\n self.fire_rate = fire_rate\n","repo_name":"duriel666/game","sub_path":"enemy.py","file_name":"enemy.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"35872019451","text":"elements = input().split()\nbakery = {}\nfor i in range(0, len(elements), 2):\n key = elements[i]\n value = elements[i + 1]\n bakery[key] = int(value)\nsearched_element = input().split()\nfor el in searched_element:\n if el in bakery:\n print(f\"We have {bakery[el]} of {el} left\")\n else:\n print(f\"Sorry, we don't have {el}\")","repo_name":"Slavi87/Programming-Fundamentals-Python","sub_path":"dictionaries/stock.py","file_name":"stock.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"8072376013","text":"from django.db import models\n\nfrom rest_framework.authtoken.models import Token\n\nfrom mayan.apps.testing.tests.mixins import TestViewTestCaseMixin\n\nfrom .. import serializers\n\n\nclass APIUserTestCaseMixin:\n def setUp(self):\n super().setUp()\n self._test_case_user_token = self.get_test_user_token(\n user=self._test_case_user\n )\n\n def get_test_user_token(self, user):\n token, created = Token.objects.get_or_create(\n user=user\n )\n\n return token\n\n\nclass DynamicFieldSerializerAPIViewTestCaseMixin:\n auto_add_test_view = True\n auto_create_test_object = False\n test_view_url = r'^test-view-url/(?P\\d+)/$'\n\n def _test_view_factory(self, test_object=None):\n self.TestModelParent = self._create_test_model(\n fields={\n 'test_field_1': models.CharField(\n blank=True, max_length=1\n ),\n 'test_field_2': models.CharField(\n blank=True, max_length=1\n )\n }, model_name='TestModelParent'\n )\n\n self.TestModelChild = self._create_test_model(\n fields={\n 'parent': models.ForeignKey(\n on_delete=models.CASCADE, related_name='children',\n to='TestModelParent',\n ),\n 'test_field_3': models.CharField(\n blank=True, max_length=1\n ),\n 'test_field_4': models.CharField(\n blank=True, max_length=1\n )\n }, model_name='TestModelChild'\n )\n\n self._test_object_parent = self.TestModelParent.objects.create()\n self._test_object_child = self.TestModelChild.objects.create(\n parent=self._test_object_parent\n )\n\n TestModelParent = self.TestModelParent\n TestModelChild = self.TestModelChild\n\n class TestModelParentSerializer(serializers.ModelSerializer):\n class Meta:\n fields = ('id', 'test_field_1', 'test_field_2')\n model = TestModelParent\n\n class TestModelChildSerializer(serializers.ModelSerializer):\n parent = TestModelParentSerializer()\n\n class Meta:\n fields = ('parent', 'id', 'test_field_3', 'test_field_4')\n model = TestModelChild\n\n class TestView(\n self._get_test_view_class(\n serializer_class=TestModelChildSerializer\n )\n ):\n \"\"\"\n Flat subclass to allow the test class view to be called without\n code changes.\n \"\"\"\n\n return TestView.as_view()\n\n def _request_test_api_view(self, query):\n return self.get(\n viewname='rest_api:{}'.format(self._test_view_name), kwargs={\n 'test_object_id': self._test_object_child.pk,\n }, query=query\n )\n\n\nclass RESTAPIViewTestMixin:\n def _request_test_browser_api_view(self):\n return self.get(query={'format': 'api'}, viewname='rest_api:api_root')\n\n def _request_test_redoc_ui_view(self):\n return self.get(viewname='rest_api:schema-redoc')\n\n def _request_test_swagger_ui_view(self):\n return self.get(viewname='rest_api:schema-swagger-ui')\n\n def _request_test_swagger_no_ui_json_view(self):\n return self.get(\n kwargs={'format': '.json'}, viewname='rest_api:schema-json'\n )\n\n def _request_test_swagger_no_ui_yaml_view(self):\n return self.get(\n kwargs={'format': '.yaml'}, viewname='rest_api:schema-json'\n )\n\n\nclass TestAPIViewTestCaseMixin(TestViewTestCaseMixin):\n def _get_test_view_urlpatterns(self):\n from ..urls import api_version_urls\n return api_version_urls\n","repo_name":"mayan-edms/Mayan-EDMS","sub_path":"mayan/apps/rest_api/tests/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":3808,"program_lang":"python","lang":"en","doc_type":"code","stars":501,"dataset":"github-code","pt":"30"} +{"seq_id":"19372821056","text":"import time\n\nimport pytest\nimport Utilitarios.Login\nfrom PageObjects.Financeiro.contapagar import ContaPagar\nfrom playwright.sync_api import expect\n\ndef lancamentoContasPagar(page, fornecedor, documento, especie, condicaoPagemento, valorTotal = ''):\n page.goto(\"http://lyra:82/financeiro/contapagar\")\n contapagar = ContaPagar(page)\n contapagar.bntAdicionar.click()\n time.sleep(1.5)\n contapagar.fornecedor.type(fornecedor)\n time.sleep(1.5)\n contapagar.selecaoLista(fornecedor).click()\n contapagar.documentoFiscal.fill(documento)\n contapagar.especie.fill(especie)\n contapagar.contaFinanceiraOrigem.type(\"cliente\")\n time.sleep(1.5)\n contapagar.selecaoLista(\"15 - 1.04.01 - Clientes\").click()\n contapagar.btnSalvar.click()\n contapagar.abaPagamento.click()\n time.sleep(1)\n contapagar.condicaoPagamento.type(condicaoPagemento)\n if valorTotal == '':\n page.wait_for_selector(\"[class='dx-empty-message']\")\n time.sleep(1)\n expect(page.locator(\"[class='dx-empty-message']\")).to_have_text(\"Sem dados\")\n else:\n time.sleep(1)\n contapagar.selecaoLista(condicaoPagemento).click()\n contapagar.valorTotal.fill(valorTotal)\n contapagar.botaoGenerico(\"Gerar\").click()\n page.goto(\"http://lyra:82/financeiro/contapagar\")\n time.sleep(1.5)\n contapagar.btnExcluirRegistro.first.click()\n contapagar.botaoGenerico(\"Sim\").click()\n contapagar.selecaoMotivoCancelamento.click()\n contapagar.selecaoLista(\"20 - TestComplete motivo cancelamento contas a pagar/receber\").click()\n contapagar.botaoGenerico(\"Confirmar\").click()\n\n@pytest.mark.Financeiro\ndef test_finProcessosLancamentoCPEmpresaAgrupador(set_up):\n page = set_up\n Utilitarios.Login.LoginQN(page, usuario='tc@questores.com.br', senha='123qwe', empresa='1 - Testes Cadastros')\n lancamentoContasPagar(page, fornecedor='Tiririca', documento='123456', especie='boleto',\n condicaoPagemento='teste agrupamento 30 dias')\n page.close()\n\n\n","repo_name":"jefersoncaye/PlaywrightQuestorNegocio","sub_path":"Testes/Financeiro/test_finProcessosLancamentoCPEmpresaAgrupador.py","file_name":"test_finProcessosLancamentoCPEmpresaAgrupador.py","file_ext":"py","file_size_in_byte":2017,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"41069677796","text":"from NeuralNetworkPackage.inputLayer import InputLayer\nfrom NeuralNetworkPackage.convolutionLayer import ConvolutionalLayer\nfrom NeuralNetworkPackage.flatteningLayer import FlatteningLayer\nfrom NeuralNetworkPackage.poolingLayer import PoolingLayer\nfrom NeuralNetworkPackage.maxPoolingCalc import MaxPoolingCalc\nfrom NeuralNetworkPackage.fullyConnectedLayer import FullyConnectedLayer\nfrom NeuralNetworkPackage.reLuLayer import ReLuLayer\nfrom NeuralNetworkPackage.logisticSigmoidLayer import LogisticSigmoidLayer\nfrom NeuralNetworkPackage.softmaxActivationLayer import SoftmaxActivationLayer\nfrom NeuralNetworkPackage.crossEntropyLayer import CrossEntropyLayer\nfrom NeuralNetworkPackage.adamWeightUpdateCalc import AdamWeightUpdateCalc\nfrom NeuralNetworkPackage.squaredErrorLayer import SquaredErrorLayer\nfrom NeuralNetworkPackage.model import Model\nimport mlagents\nfrom mlagents_envs.environment import UnityEnvironment as UE\nfrom mlagents_envs.side_channel.engine_configuration_channel import EngineConfigurationChannel\nfrom mlagents_envs.base_env import (\n BaseEnv,\n DecisionSteps,\n TerminalSteps,\n BehaviorSpec,\n ActionTuple,\n BehaviorName,\n AgentId,\n BehaviorMapping,\n)\nimport numpy as np\n\n#Constant board size params\nNUM_MAP_ROWS = 12\nNUM_MAP_COLS = 14\n\n#Weights To Load Filename\ntoLoad = \"2023-03-17-20-50-31_FC_1_CONV_1/2023-03-17-21-31-08_FC_1_CONV_1.npy\"\n\nX = np.array([np.random.randint(8, size=168).reshape((12,14)), np.random.randint(8, size=168).reshape((12,14))])\n\n#Set up layers\ntril = InputLayer(X, False)\ntail = InputLayer(X, False)\ntrcl = ConvolutionalLayer(8)\ntacl = ConvolutionalLayer(8)\nmpc = MaxPoolingCalc()\ntrpl = PoolingLayer(3, 3, 2, mpc)\ntapl = PoolingLayer(3, 3, 2, MaxPoolingCalc())\ntrfl = FlatteningLayer()\ntafl = FlatteningLayer()\ntrfcl = FullyConnectedLayer(320,4, AdamWeightUpdateCalc())\ntrfcl2 = FullyConnectedLayer(160,4, AdamWeightUpdateCalc())\ntafcl = FullyConnectedLayer(320,4, AdamWeightUpdateCalc())\ntafcl2 = FullyConnectedLayer(160, 4, AdamWeightUpdateCalc())\ntrlsl = LogisticSigmoidLayer()\ntalsl = LogisticSigmoidLayer()\nsal = SoftmaxActivationLayer()\ncel = CrossEntropyLayer()\n\n#Alternate activation and objective funcs (DeepMind paper used)\ntrrll1 = ReLuLayer()\ntarll1 = ReLuLayer()\ntrrll2 = ReLuLayer()\ntarll2 = ReLuLayer()\ntrrll3 = ReLuLayer()\ntarll3 = ReLuLayer()\ntrsel = SquaredErrorLayer()\ntasel = SquaredErrorLayer()\n\ntrcl.setKernels(np.random.uniform(low=-pow(10,-4), high=pow(10,-4), size=(16, 3, 3)))\n\ntestModelLayers = [tril, trcl, trpl, trrll1, trfl, trfcl, trrll2, trsel]\n\ntestModel = Model(testModelLayers)\ntestModel.load(toLoad)\n\n#Create channel to specify run speed\nchannel = EngineConfigurationChannel()\n\n#Open pacman environment\nenv = UE(file_name='../MiniGameMap/Pacman', seed=1, side_channels=[channel])\n\n#Set environment run timescale\nchannel.set_configuration_parameters(time_scale= 1)\nenv.reset()\nenv.reset()\n\n#Get the name of the behavior we're using\nbehaviorName = list(env.behavior_specs)[0]\n#Get the behavior spec which contains observation data\nspec = env.behavior_specs[behaviorName]\n\n#spec.action_spec is a ActionSpec tuple containing info \n#on type of agent action and other action info\nagentActionSpec = spec.action_spec\n\nepisodeRewards = 0\ntrackedAgent = -1\ndone = False\n\ndecisionSteps, terminalSteps = env.get_steps(behaviorName)\nif len(decisionSteps) >= 1:\n trackedAgent = decisionSteps.agent_id[0]\n\nassert trackedAgent != -1, \"Error - no agent id set --- this shouldn't happen\"\n\n#Get the map state representation and reorder to grid size for CNN\ndecisionStepsObs = decisionSteps[trackedAgent].obs\nobservation = np.array([np.reshape(decisionStepsObs, (NUM_MAP_ROWS, NUM_MAP_COLS))])\n\nwhile not done:\n if trackedAgent == -1 and len(decisionSteps) >= 1:\n trackedAgent = decisionSteps.agent_id[0]\n\n randActionProb = np.random.rand()\n\n prediction = testModel.predict(observation)\n maxValIdx = np.argmax(prediction)\n action = ActionTuple(np.zeros((1,0)), np.array([[maxValIdx]]))\n \n #Set action\n env.set_actions(behaviorName, action)\n env.step()\n\n decisionSteps, terminalSteps = env.get_steps(behaviorName)\n lastStepReward = 0\n if trackedAgent in decisionSteps:\n lastStepReward = decisionSteps[trackedAgent].reward\n if trackedAgent in terminalSteps:\n lastStepReward = terminalSteps[trackedAgent].reward\n done = True\n \n episodeRewards += lastStepReward\n\n #Just for debugging, track the observed states\n if not done:\n decisionStepsObs = decisionSteps[trackedAgent].obs\n newObservation = np.array([np.reshape(decisionStepsObs, (NUM_MAP_ROWS, NUM_MAP_COLS))])\n else:\n newObservation = observation\n\n observation = newObservation\n\nprint(episodeRewards)\n#Close environment when done\nenv.close()\n","repo_name":"rkaundinya/pacman-RL-BuildsAndTrainingScripts","sub_path":"TrainingScripts/modelTesting.py","file_name":"modelTesting.py","file_ext":"py","file_size_in_byte":4799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"10780528135","text":"import keras\nimport tensorflow as tf\nimport numpy as np\nimport retro\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten, InputLayer\nfrom keras.layers import Convolution2D, MaxPooling2D, Conv2D, Conv1D\nfrom PIL import Image\nimport cv2\nimport math\nfrom keras.models import load_model\n\nmodel = Sequential()\nmodel.add(Conv2D(32, 3, 3, activation='relu', input_shape=(56, 80, 1)))\nkeras.layers.MaxPooling2D(pool_size=(2, 2), strides=None, padding='valid', data_format=None)\nmodel.add(Conv2D(16, 8, 8, activation='relu'))\nkeras.layers.MaxPooling2D(pool_size=(2, 2), strides=None, padding='valid', data_format=None)\nmodel.add(Flatten())\nmodel.add(Dense(64, activation='relu'))\nmodel.add(Dense(32, activation='relu'))\nmodel.add(Dense(4, activation='linear'))\nmodel.compile(loss='mse', optimizer='adam', metrics=['mae'])\n\n# model = load_model('my_model-1.h5')\n\n\n# zdaje się, że tylko te przyciski miały jakiś sens\n# 6 - left\n# 7 - right\n# 0 - jump\nnum2action = []\nfor i in range(0, 12):\n # num2action.append(np.identity(12, dtype=int)[i:i+1])\n num2action.append(np.zeros(12))\n\nnum2action[0][0]=1\nnum2action[1][6]=1\nnum2action[2][7]=1\n\ndef convert(s__):\n s__ = cv2.resize(s__, dsize=(80, 56), interpolation=cv2.INTER_CUBIC)\n s__ = cv2.cvtColor(s__, cv2.COLOR_BGR2GRAY)\n # s__ = s__.flatten()\n s__ = np.array([s__[:,:, None]])\n return s__\n\nenv = retro.make(game='SonicTheHedgehog-Genesis', state='GreenHillZone.Act1', scenario='scenario2dqn.json', record='.')\nnum_episodes = 1000\ny = 0.9\neps = 0.99\ndecay_factor = 0.9\nr_avg_list = []\nfor i in range(num_episodes):\n s = convert(env.reset())\n eps *= decay_factor\n s_eps = eps\n bored_helper = pow(s_eps,2000)\n bored_helper = max(bored_helper, 0.0001)\n s_decay = 0.999\n if i % 50 == 0:\n print(\"Episode {} of {}\".format(i + 1, num_episodes))\n done = False\n r_sum = 0\n last_reward = 0\n max_reward = 0\n total_reward = 0\n until_done = 400\n stuck = False\n while not (done or stuck):\n stuck = False\n if np.random.random() < s_eps:\n a = np.random.randint(0, 3)\n old_a = a\n a = num2action[int(a)]\n else:\n a = np.argmax(model.predict(s))\n old_a = a\n a = num2action[int(a)]\n new_s, r, done, _ = env.step(a)\n\n # skrypt do generowania nagród nie działał\n # poniższy fragment wylicza nagrodę w inny sposób\n # i próbuje stwierdzić, czy Sonic utknął w miejscu na zbyt długo\n r-=0.05\n total_reward += r\n if(total_reward > max_reward):\n max_reward = total_reward\n until_done=400\n else:\n until_done -=1\n if (until_done == 0):\n until_done = 400\n stuck = True\n r = -5\n\n if (r > max_reward):\n r = 1.1 * r\n max_reward = r\n until_done = 400\n s_eps = eps\n else:\n r = 0\n until_done -=1\n\n # zwiększa szansę losowego ruchu, jeśli Sonic utknął\n if(until_done==200): s_eps = 0.1\n if (until_done == 0):\n until_done = 400\n stuck = True\n r = -5\n if done: r = -5\n\n # zmniejszanie rozmiaru danych wejściowych\n # (obrazu ekranu gry)\n new_s = convert(new_s)\n env.render()\n target = r + y * np.max(model.predict(new_s))\n target_vec = model.predict(new_s)[0]\n target_vec[old_a] = target\n model.fit(s, target_vec.reshape(-1, 4), epochs=1, verbose=0)\n s = new_s\n r_sum += r\n r_avg_list.append(r_sum / 1000)\n\n# model.save('my_model-1.h5')\n","repo_name":"Ksiazek-Experimental/retro-training","sub_path":"dqn.py","file_name":"dqn.py","file_ext":"py","file_size_in_byte":3743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"26923035047","text":"import re\r\n\r\n\r\n\r\npaper_re_zh = {r'##(.*)##': r' \">[论文]',\r\n '\\$\\$(.*)\\$\\$':r' \">[视频]',\r\n r'@@(.*)@@': r' \">[海报]',\r\n r'&&(.*)&&': r' \">[数据集]'}\r\npaper_re_en = {r'##(.*)##': r' \">[paper]',\r\n '\\$\\$(.*)\\$\\$': r' \">[video]',\r\n r'@@(.*)@@': r' \">[poster]',\r\n r'&&(.*)&&': r' \">[dataset]'}\r\n\r\nnews_re_zh = {r'##(.*)##': r' \">[详情]'}\r\nnews_re_en = {r'##(.*)##': r' \">[details]'}\r\n\r\npatent_re_zh = {r'##(.*)##': r' \">[详情]'}\r\npatent_re_en = {r'##(.*)##': r' \">[details]'}\r\n\r\ncompetiton_re_zh = {r'##(.*)##': r' \">[详情]'}\r\ncompetiton_re_en = {r'##(.*)##': r' \">[details]'}\r\n\r\n\r\n# 返回专利号和申请号\r\n\r\n\r\ndef text_competition(src, zh=True):\r\n competiton_re = competiton_re_zh if zh else competiton_re_en\r\n for p in competiton_re.keys():\r\n src = re.sub(p, competiton_re[p], src)\r\n return src\r\n\r\ndef text_news(src, zh=True):\r\n news_re = news_re_zh if zh else news_re_en\r\n for p in news_re.keys():\r\n src = re.sub(p, news_re[p], src)\r\n return src\r\n\r\ndef text_paper(src, zh=True):\r\n paper_re = paper_re_zh if zh else paper_re_en\r\n for p in paper_re.keys():\r\n src = re.sub(p, paper_re[p], src)\r\n src_thisyear, src_before = [], []\r\n src = src.split('\\n')\r\n cnt = 0\r\n for i in src:\r\n i = i.strip()\r\n if len(i) < 16:\r\n cnt += 1\r\n if cnt <= 1:\r\n paper_i = i.split('|')\r\n src_thisyear.append(paper_i)\r\n else:\r\n src_before.append(i)\r\n return src_thisyear, src_before\r\n\r\n\r\ndef text_patent(src, zh=True):\r\n ZL, SQ='', ''\r\n\r\n #中英文切换\r\n patent_re = patent_re_zh if zh else patent_re_en\r\n judge_word = '专利号' if zh else \"Patent No\"\r\n\r\n # 正则替换\r\n for p in patent_re.keys():\r\n src = re.sub(p, patent_re[p], src)\r\n # 专利分类\r\n # ZL:专利号 SQ:申请号\r\n content = src.split('\\n')\r\n for row in content:\r\n if len(row) == 0:\r\n continue\r\n row+='\\n'\r\n if re.search(judge_word, row):\r\n ZL+=row\r\n else:\r\n SQ+=row\r\n if not ZL:\r\n ZL += '\\n'\r\n if not SQ:\r\n SQ += '\\n'\r\n return ZL[:-1], SQ[:-1]\r\n\r\n\r\n# test\r\nif __name__ == \"__main__\":\r\n fd = open('input.txt',encoding='UTF-8')\r\n text = fd.read()\r\n zl, sq = text_patent(src=text, zh=True)\r\n print('已授权:')\r\n print(zl)\r\n print('已受理:')\r\n print(sq)\r\n","repo_name":"JinsongWu3/myweb","sub_path":"rules.py","file_name":"rules.py","file_ext":"py","file_size_in_byte":2930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"44018356271","text":"import os\n\nfrom pathlib import Path\nfrom dataclasses import dataclass, field\nfrom math import pi\nfrom typing import Optional\n\nfrom rlbot.utils.game_state_util import GameState, BallState, CarState, Physics, Vector3, Rotator\nfrom rlbot.matchconfig.match_config import PlayerConfig, MatchConfig, Team\nfrom rlbot.training.training import Grade, Pass, Fail\n\nfrom rlbottraining.grading.training_tick_packet import TrainingTickPacket\nfrom rlbottraining.common_graders.timeout import FailOnTimeout\nfrom rlbottraining.common_graders.compound_grader import CompoundGrader\nfrom rlbottraining.common_graders.goal_grader import StrikerGrader\nfrom rlbottraining.common_exercises.common_base_exercises import GoalieExercise\nfrom rlbottraining.rng import SeededRandomNumberGenerator\nfrom rlbottraining.common_exercises.silver_striker import make_default_playlist as mdp\nfrom rlbottraining.match_configs import make_empty_match_config\nfrom rlbottraining.training_exercise import Playlist\nfrom rlbottraining.grading.grader import Grader\n\n@dataclass\nclass SaveGoalGrader(Grader):\n def on_tick(self, tick: TrainingTickPacket) -> Optional[Grade]:\n car = tick.game_tick_packet.game_cars[0].physics\n jumped = tick.game_tick_packet.game_cars[0].jumped\n ball = tick.game_tick_packet.game_ball.physics\n\n return None\n\nclass GoldBallRollingToStrikerGrader(CompoundGrader):\n def __init__(self, timeout_seconds=4.0):\n super().__init__([\n SaveGoalGrader(),\n FailOnTimeout(timeout_seconds)\n ])\n\n### FROM https://github.com/GodGamer029/YangBot/\n@dataclass\nclass GoldBallRollingToGoalie(GoalieExercise):\n # The grader is mine\n grader: Grader = field(default_factory=GoldBallRollingToStrikerGrader)\n\n def make_game_state(self, rng: SeededRandomNumberGenerator) -> GameState:\n return GameState(\n ball=BallState(physics=Physics(\n location=Vector3(-2500, -2500, 100),\n velocity=Vector3(1000, -1000, 0), # \n angular_velocity=Vector3(0, 0, 0))),\n cars={\n 0: CarState(\n physics=Physics(\n location=Vector3(0, -1500, 17),\n rotation=Rotator(0, pi * -0.5, 0),\n velocity=Vector3(0, 0, 0),\n angular_velocity=Vector3(0, 0, 0)),\n boost_amount=30)\n },\n )\n\ndef make_match_config() -> MatchConfig:\n file_path = os.path.dirname(os.path.realpath(__file__))\n config_path = os.path.realpath(file_path + \"/../config/bot.cfg\")\n\n match_config = make_empty_match_config()\n match_config.player_configs = [\n PlayerConfig.bot_config(Path(config_path), Team.BLUE)\n ]\n\n return match_config\n\ndef make_default_playlist() -> Playlist:\n #exercises = mdp()\n exercises = [\n GoldBallRollingToGoalie('GoldBallRollingToGoalie'),\n ]\n\n for exercise in exercises:\n exercise.match_config = make_match_config()\n\n return exercises","repo_name":"Chainso/DragonBot","sub_path":"dragonbot/rlbot/exercises/training_exercises.py","file_name":"training_exercises.py","file_ext":"py","file_size_in_byte":3021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"9512007523","text":"from statistics import harmonic_mean\nimport matplotlib.pyplot as plt\nnimi = str(input(\"Palun sisestage faili nimi: \"))\nn = int(input(\"Sisestage täisarvuline n väärtus: \"))\n\nf = open(nimi,\"r\", encoding = \"UTF-8\")\n\nhinnajärjend = []\nfor rida in f:\n järjend = rida.strip().split()\n hind = [float(järjend.pop())]\n hinnajärjend += hind\n\ndef silu_andmed(järjend, täisarv):\n keskmistatudjärjend = []\n for indeks in range(1, len(järjend) + 1):\n algus = max(0, indeks - täisarv)\n h_keskmine = harmonic_mean(järjend[algus:indeks])\n keskmistatudjärjend += [h_keskmine]\n return keskmistatudjärjend\n\nplt.plot(hinnajärjend)\nplt.plot(silu_andmed(hinnajärjend, n))\nplt.show()\n","repo_name":"ArR4e/DSProject","sub_path":"processed/K09/S021/kodu1.py","file_name":"kodu1.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"et","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"74794486804","text":"#Importação\nfrom socket import *\n#Cliente\nhost = gethostname()\nport = 55416\nclient = socket(AF_INET, SOCK_STREAM)\nclient.connect((host,port))\n#Recebe o Menu\nmenu = client.recv(1024)\nprint(menu.decode())\n#Loop de conexão\nwhile 1:\n #Cliente faz escolha da cotação\n msg = int(input('Qual cotação deseja saber? '))\n #Envia a escolha para o servidor\n client.send(str(msg).encode())\n #Classe de moedas\n moedas = {1:'Dólar', 2:'Euro', 3:'Bitcoin'}\n #Encerra a conexão\n if msg == 0:\n print('\\nAté a próxima :)')\n client.close\n break\n #Recebe a cotação escolhida\n elif (msg) > 0 and (msg) < 4:\n print(f'\\nA cotação atual do {moedas[msg]} é R${client.recv(1024).decode()}\\n')\n else:\n print(client.recv(1024).decode())\n","repo_name":"DanielSBF/Sockets","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"33962921140","text":"import json\nfrom ibm_watson import VisualRecognitionV3\nfrom ibm_cloud_sdk_core.authenticators import IAMAuthenticator\n\nauthenticator = IAMAuthenticator('si21GLXqCMSdhw15pibaI9Dl98v-n_IrO1-_NNfSk95B')\nvisual_recognition = VisualRecognitionV3(\n version='2019-09-11',\n authenticator=authenticator\n)\n\nwith open('hashiqi.jpg', 'rb') as images_file:\n results = visual_recognition.classify(images_file=images_file,threshold='0.8').get_result()\n results = results['images'][0]['classifiers'][0]['classes']\n for each in results:\n print(each['class'])\n print(each['score'])","repo_name":"KurosakiRei/CISC4900","sub_path":"Tested resources/IBM sample.py","file_name":"IBM sample.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"8076010203","text":"from libnix.sys.sys import Sys\nfrom libnix.utility.print_table import PrintTable\n\n_groups = Sys.get_groups(load=True)\n\n_rows = []\n\nfor _group_name in _groups.get_groups():\n _user = _groups.get_group_by_name(_group_name)\n\n _column = [_user.get_group(),\n _user.get_group_id(),\n _user.get_users()]\n\n _rows.append(_column)\n\n_print_table = PrintTable(\"Group\", \"Group Id\", \"Users\")\n_print_table.add_data(_rows)\n_print_table.print()\n","repo_name":"mbiciunas/nixscript","sub_path":"scripts/groups/sys_list_groups.py","file_name":"sys_list_groups.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"24496706234","text":"mySentance = 'I love the color'\n\ncolor_list = [\"red\", \"blue\", \"green\",\"pink\",\"teal\",\"black\"]\n\ndef color_function(name) :\n lst = []\n for i in color_list:\n msg = \"{0} {1} {2}\".format(name,mySentance,i)\n lst.append(msg)\n return lst\n\ndef get_name() :\n go = True\n while go:\n name = input('What is your name?')\n if name =='' :\n print(\"You need to provide your name\")\n elif name == 'Loki':\n print(\"Loki, you are a dog. Get off the computer\")\n else:\n go = false\n \nlst = color_function('DANIEL')\nfor i in lst:\n print(i)\n \nget_name()\n","repo_name":"Raleigh-Johnson/Python_Unit","sub_path":"testCode.py","file_name":"testCode.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"39226226344","text":"import os\nimport pyautogui as pg\n\nimport json\nimport time\nimport requests\nimport socket\nfrom weworkbot import Bot as bot\n\nWEBHOOK = '3ae09d54-afb7-4a87-afe0-a29c263ecc99'\nURL = 'https://qyapi.weixin.qq.com/cgi-bin/webhook/send?key=' + WEBHOOK\nUPLOAD_URL = 'https://qyapi.weixin.qq.com/cgi-bin/webhook/upload_media?key=' + WEBHOOK + '&type=file'\n\ndef upload_file(file_path):\n\n files = {'file': open(file_path, 'rb')}\n media_id = requests.post(url=UPLOAD_URL, files=files).json()['media_id']\n\n headers = {\n \"Content-Type\": \"application/json\",\n \"Charset\": \"UTF-8\"\n }\n data ={\n \"msgtype\": \"file\",\n \"file\": {\n \"media_id\": media_id\n }\n }\n\n data = json.dumps(data)\n requests.post(url=URL, data=data, headers=headers)\n\n\n\nif __name__ == \"__main__\":\n\n # bot(URL).set_text(\"Hi, I am Robot 朝晖小助手 - Dev\\nAdded to the group by 张睿 on 03/20\", 'text').send()\n # bot(URL).set_image_path()\n # ai.chatgpt.chat(api_key='', prompt='你好我是朝晖小助手')\n file_path = '/Users/mike/Library/Mobile Documents/com~apple~CloudDocs/Work/系统/宏观/朝晖 商品 2023.03.21.pdf'\n upload_file(file_path)\n\n","repo_name":"aurora-mike/aurora","sub_path":"webhook.py","file_name":"webhook.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"19140752147","text":"from io import BytesIO\n\nfrom mrjob.examples.mr_word_freq_count import MRWordFreqCount\n\nfrom tests.job import run_job\nfrom tests.py2 import TestCase\n\n\nclass MRWordFreqCountTestCase(TestCase):\n\n def test_empty(self):\n self.assertEqual(run_job(MRWordFreqCount()), {})\n\n def test_the_wheels_on_the_bus(self):\n RAW_INPUT = b\"\"\"\n The wheels on the bus go round and round,\n round and round, round and round\n The wheels on the bus go round and round,\n all through the town.\n \"\"\"\n\n EXPECTED_OUTPUT = {\n u'all': 1,\n u'and': 4,\n u'bus': 2,\n u'go': 2,\n u'on': 2,\n u'round': 8,\n u'the': 5,\n u'through': 1,\n u'town': 1,\n u'wheels': 2,\n }\n\n self.assertEqual(run_job(MRWordFreqCount(), RAW_INPUT),\n EXPECTED_OUTPUT)\n","repo_name":"saikirandulla/codesamples","sub_path":"mapreduce assignment/mrjob-master/tests/examples/test_mr_word_freq_count.py","file_name":"test_mr_word_freq_count.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"74758699283","text":"import re\nimport logging\nfrom logging.handlers import RotatingFileHandler\nimport chicknn.config as config\n\nlogger = None\n\ndef setup_logger(console_level=None, file_level=None, filename=None):\n handlers = []\n logger = logging.getLogger(__name__)\n\n formatter = logging.Formatter('%(asctime)s | %(name)25s[%(lineno)4d] | %(levelname)8s | %(message)s', \n datefmt=\"%Y-%m-%dT%H%M%S\")\n\n if console_level:\n console_handler = logging.StreamHandler()\n console_handler.setLevel(console_level)\n console_handler.setFormatter(formatter)\n handlers.append(console_handler)\n\n if file_level and filename:\n file_handler = RotatingFileHandler(filename,\n maxBytes=config.log_max_bytes,\n backupCount=config.log_backups)\n file_handler.setLevel(file_level)\n file_handler.setFormatter(formatter)\n handlers.append(file_handler)\n\n logging.basicConfig(level=console_level, handlers=handlers)\n\n return logger","repo_name":"sbavery/chicknn","sub_path":"chicknn/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"70058510166","text":"from time import sleep\r\nimport pyautogui\r\nimport keyboard\r\n\r\nimport win32api, win32con\r\n\r\n\r\ndef click(x, y):\r\n win32api.SetCursorPos((x, y))\r\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0)\r\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0)\r\n sleep(0.08)\r\n\r\n\r\nwhile not keyboard.is_pressed('c'):\r\n sc = pyautogui.screenshot(region=(58, 241, 599, 420))\r\n width, height = sc.size\r\n\r\n for x in range(0, width, 12):\r\n achou = 0\r\n for y in range(0, height, 12):\r\n r, g, b = sc.getpixel((x, y))\r\n print(r, g, b)\r\n\r\n if r == 255 and b == 195:\r\n achou = 1\r\n click(58 + x, 241 + y)\r\n break\r\n if achou == 1:\r\n break\r\n\r\n# 58, 241\r\n# 599, 420\r\n# pixels_cor 255, 219, 195\r\n\r\n# sc = pyautogui.screenshot(region=(58, 241, 599, 420))\r\n# sc.save('Exemplo.png')\r\n","repo_name":"Bylander10/First-Python-Projects","sub_path":"visual bots/Aimbot.py","file_name":"Aimbot.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"30"} +{"seq_id":"20717648978","text":"import os\r\nfrom argparse import ArgumentParser, ArgumentError\r\nfrom contextlib import ExitStack\r\n\r\nimport boto3\r\nfrom botocore.exceptions import ClientError\r\n\r\n\r\ndef create_security_group(name, description, **kwargs):\r\n \"\"\"\r\n Create a security group\r\n :param name: Name of the security group\r\n :param description: Description for the security group\r\n :return: The created Security Group\r\n \"\"\"\r\n sg = ec2.create_security_group(\r\n Description=description,\r\n GroupName=name,\r\n **kwargs\r\n )\r\n\r\n # Add the cleanup for the security group when it's created\r\n def clean_security_group():\r\n print(\"Deleting Security Group %s (%s)...\" % (sg.group_name, sg.id))\r\n sg.delete()\r\n print(\"Deleted.\")\r\n\r\n CLEANUP.callback(clean_security_group)\r\n # Always print out the created resources so if the program doesn't clean up you can manually do so\r\n print(\"Created security group %s (%s)\" % (sg.group_name, sg.id))\r\n return sg\r\n\r\n\r\ndef create_instance(security_group, name, wait=True, **kwargs):\r\n \"\"\"\r\n Create an ec2 instance\r\n :param security_group: Security Group that the instance belongs to\r\n :param name: Name to tag the group with\r\n :param wait: If cleanup should wait for termination\r\n :return: The created Instance\r\n \"\"\"\r\n inst = ec2.create_instances(\r\n ImageId='ami-d38a4ab1', # Replace this with the image you want to use\r\n InstanceType='t2.micro',\r\n MaxCount=1,\r\n MinCount=1,\r\n # Placement={'AvailabilityZone': zone}, # If you want to use a specific zone\r\n SecurityGroupIds=[security_group.id],\r\n InstanceInitiatedShutdownBehavior='terminate',\r\n **kwargs\r\n )[0]\r\n\r\n # Add the cleanup for the instance when it's created\r\n def clean_instance():\r\n print(\"Terminating Instance %s (%s)...\" % (name, inst.id))\r\n inst.terminate()\r\n # This blocks till the instance is terminated\r\n if wait:\r\n inst.wait_until_terminated()\r\n print(\"Terminated\")\r\n # The performance could be improved by requesting termination of all instances at once\r\n # Take a look in the main part of this program for how\r\n\r\n CLEANUP.callback(clean_instance)\r\n\r\n # Label the instance\r\n inst.create_tags(Tags=[{'Key': 'Name', 'Value': name}])\r\n\r\n # Wait for instance to start\r\n if wait:\r\n inst.wait_until_running()\r\n # Print out the instances created\r\n print(\"Created Instance %s (%s)\" % (name, inst.id))\r\n return inst\r\n\r\n\r\nif __name__ == '__main__':\r\n # Process args\r\n # This allows you to make your submission modular, this would be useful for labs like the cloudstorage lab\r\n args = ArgumentParser(description=\"Workshop X: Run a bunch of ec2 instances to download pictures of cats and dogs\")\r\n\r\n args.add_argument(\"-sn\", \"--student-number\", type=str,\r\n help=\"Student Number (overrides the STUDENT_NUMBER env variable)\")\r\n\r\n\r\n def integer_at_least_one(param: str) -> int:\r\n val = int(param)\r\n if val < 1:\r\n raise ArgumentError(param, \"Number of instances should be at least 1\")\r\n return val\r\n\r\n\r\n args.add_argument(\"-ni\", \"--num-instances\", type=integer_at_least_one, default=\"5\",\r\n help=\"Number of instances [0, max_instances]\")\r\n\r\n args = vars(args.parse_args())\r\n\r\n # Constants\r\n STUDENT_NUMBER = args[\"student_number\"]\r\n if STUDENT_NUMBER is None:\r\n # Attempt to get it from environment variable if it is not set\r\n STUDENT_NUMBER = os.getenv('STUDENT_NUMBER')\r\n if STUDENT_NUMBER is None:\r\n print(\"WARNING: Student Number env/arg is not set!\")\r\n STUDENT_NUMBER = '00000000'\r\n print(\"Using Student Number: \" + STUDENT_NUMBER)\r\n\r\n RESOURCE_PREFIX = STUDENT_NUMBER + \"-wX\" # Used to identify your resources\r\n\r\n NUM_INSTANCES = args[\"num_instances\"]\r\n\r\n # Clean up stack with deferred cleanup methods\r\n # Main code is in a try so that cleanup can be done when something goes wrong\r\n with ExitStack() as CLEANUP:\r\n # Init\r\n ec2c = boto3.client('ec2')\r\n ec2 = boto3.resource('ec2') # Try to use the higher level resource API wherever possible\r\n\r\n # Here could make sure the current region supports running the project\r\n # e.g. max_instance limit, at least two active regions, etc\r\n\r\n # Create Security Group\r\n sg = create_security_group(RESOURCE_PREFIX + \"-sg\", \"Security Group for workshop X\")\r\n\r\n # Set up the security group properties here...\r\n\r\n # Bulk wait for instances to terminate\r\n INSTANCES_CREATED = []\r\n\r\n\r\n def wait_for_all_instances_to_terminate():\r\n print(\"Waiting for instances to terminate...\")\r\n for inst in INSTANCES_CREATED:\r\n inst.wait_until_terminated()\r\n print(\"All instances terminated.\")\r\n\r\n\r\n CLEANUP.callback(wait_for_all_instances_to_terminate)\r\n\r\n # Create Instances\r\n for i in range(0, NUM_INSTANCES):\r\n inst = create_instance(sg, RESOURCE_PREFIX + \"_\" + str(i), wait=False)\r\n INSTANCES_CREATED.append(inst)\r\n\r\n # Do stuff that doesn't require instances to be running here...\r\n\r\n # When the instances need to be used, wait for them to be fully running\r\n print(\"Waiting for instances run...\")\r\n for inst in INSTANCES_CREATED:\r\n inst.wait_until_running()\r\n print(\"All instances running.\")\r\n\r\n # Make use of the running instances here...\r\n\r\n input(\"Press Enter when done to clean up...\")\r\n\r\n print(\"Cleaning Up...\")\r\n\r\n print(\"Done!\")\r\n","repo_name":"uwacsp/cits5503","sub_path":"Code/template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":5684,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"30"} +{"seq_id":"14018596798","text":"#! python3\r\nfrom collections import deque\r\nimport itertools\r\n\r\n\r\ndef initialize():\r\n with open('input-Day9.txt') as f:\r\n lines = f.read().strip().split('\\n')\r\n\r\n number_gen = (int(line) for line in lines)\r\n number_tup = tuple([int(line) for line in lines])\r\n\r\n stack = deque()\r\n for i in range(25):\r\n stack.append(next(number_gen))\r\n return number_gen, stack, number_tup\r\n\r\n\r\ndef check_number(check_num, check_dq):\r\n combs = itertools.combinations(check_dq, 2)\r\n check = list(filter(lambda x: x[0] + x[1] == check_num, combs))\r\n return True if check else False\r\n\r\ndef part_1(numbers, stack):\r\n while True:\r\n next_num = next(numbers)\r\n if not check_number(next_num, stack):\r\n return next_num\r\n else:\r\n stack.popleft()\r\n stack.append(next_num)\r\n\r\n\r\ndef get_contig(total, set):\r\n for i in range(len(set)):\r\n for j in range(len(set[i+1:])):\r\n if sum(set[i:j]) == total:\r\n return set[i:j]\r\n elif sum(set[i:j]) > total:\r\n break\r\n return subset\r\n\r\ndef part_2(invalid, nums):\r\n subset = get_contig(invalid, nums)\r\n return min(subset) + max(subset)\r\n\r\nif __name__ == '__main__':\r\n num_gen, stack, num_tup = initialize()\r\n invalid_num = part_1(num_gen, stack)\r\n print(f'Answer to part 1 is {invalid_num}')\r\n weakness = part_2(invalid_num, num_tup)\r\n print(f'Answer to part 2 is {weakness}')","repo_name":"Ryles1/AdventofCode","sub_path":"2020/Day9.py","file_name":"Day9.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"30"} +{"seq_id":"33748721823","text":"import math\r\nimport os\r\nimport random\r\nimport re\r\nimport sys\r\n\r\ndef compareTriplets(a, b):\r\n Alice = 0\r\n Bob = 0\r\n\r\n for i in range(3):\r\n if a[i] > b[i]:\r\n Alice += 1\r\n elif a[i] < b[i]:\r\n Bob += 1\r\n return Alice, Bob\r\n\r\n\r\na = list(map(int, input().rstrip().split()))\r\nb = list(map(int, input().rstrip().split()))\r\n\r\nresult = compareTriplets(a, b)\r\n\r\nprint(' '.join(map(str, result)))\r\nprint('\\n')\r\n\r\n","repo_name":"Berlina24/HackerRank-Problem-Solving","sub_path":"Compare The Triplets.py","file_name":"Compare The Triplets.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"34345882989","text":"import re\n\n\ndef parse_path(data):\n rg_list = [\n r\"array given in (.*?) on line\",\n r\"occurred in (.*?) on line\",\n r\"on a non-object in (.*?) [o|i]n line\",\n r\"No such file or directory (errno 2) in (.*?) on line\",\n r\"No such file or directory in (.*?) in line\",\n ]\n\n for rg_text in rg_list:\n try:\n path = re.findall(rg_text, data)[0]\n if path:\n return path\n except Exception:\n pass\n\n return \"\"\n\n\ndef path_disclosure(req, target, info_cb, found_cb, not_found_cb):\n name = \"vBulletin's path disclosure\"\n info_cb(f\"Checking {name}\")\n\n is_found = False\n\n plinks = [\n \"forumdisplay.php?do[]=[test.dll]\",\n \"calendar.php?do[]=[test.dll]\",\n \"search.php?do[]=[test.dll]\",\n \"forumrunner/include/album.php\",\n \"core/vb5/route/channel.php\",\n \"core/vb5/route/conversation.php\",\n \"includes/api/interface/noncollapsed.php\",\n \"includes/api/interface/collapsed.php\",\n \"vbseo_sitemap/addons/vbseo_sm_vba.php\",\n \"vbseo_sitemap/addons/vbseo_sm_vba_links.php\"\n ]\n \n for plink in plinks:\n r = req.get(target + plink)\n if \"Cannot modify header information\" in r.text or \"trim()\" in r.text or \\\n \"class_core.php\" in r.text or \"header already sent\" in r.text or \\\n \"Fatal error\" in r.text:\n path = parse_path(r.text)\n if path:\n tags = [\n \"\",\n \"\",\n \"\",\n \"\",\n ]\n for tag in tags:\n if tag in path:\n path = path.replace(tag, \"\")\n found_cb(name, path)\n return\n\n if not is_found:\n not_found_cb(name)\n","repo_name":"ParrotSec/vbyscan","sub_path":"modules/enumerate/path_disclosure.py","file_name":"path_disclosure.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"30"} +{"seq_id":"16463556632","text":"import numpy as np\n\nimport binary_exponent\n\n\ndef check_prime_miller_rabin(prime, n_checks):\n if prime == 2:\n return True\n else:\n if prime % 2 == 0:\n return False\n\n t = prime - 1\n s = 0\n\n while t % 2 == 0:\n t //= 2\n s += 1\n\n pretenders = [i for i in range(2, prime - 1)]\n np.random.shuffle(pretenders)\n for i in range(min(n_checks, len(pretenders))):\n a = pretenders[i]\n x = binary_exponent.exponent_modulo(a, t, prime)\n if x == 1 or x == prime - 1:\n continue\n\n need_continue = False\n\n for j in range(s):\n x = (x * x) % prime\n if x == 1:\n return False\n if x == prime - 1:\n need_continue = True\n break\n if need_continue:\n continue\n return False\n\n return True\n\n\nif __name__ == \"__main__\":\n print(f\"{33} is prime: {check_prime_miller_rabin(33, 10)}\")\n","repo_name":"ooleksyshyn/university_labs","sub_path":"semester6/Crypto/lab1/miller_rabin.py","file_name":"miller_rabin.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"72760030165","text":"from os.path import exists\nfrom os import mkdir\nfrom sys import argv\nimport plotly.graph_objects as go\nimport subprocess\nfrom datetime import datetime\nimport json\nimport miFit\n\n\nnum_list = []\ndis_list = []\ndailyGoalList = []\nyear = str(datetime.now().year)\nmonth = str(datetime.now().month)\n\nall_data = miFit.main()\nmonth_data = all_data[year][month]\n\nfor i in month_data.values():\n num_list.append(i[\"num\"])\n dis_list.append(i[\"dis\"])\n dailyGoalList.append(i[\"goal\"])\n\ndays_list = list(month_data.keys())\n\n# calculate month average\naverage_num = [0, 0]\naverage_dis = [0, 0]\nfor i in month_data.values():\n if i.get(\"num\") is not None:\n average_num[0] += i[\"num\"]\n average_num[1] += 1\n if i.get(\"dis\") is not None:\n average_dis[0] += i[\"dis\"]\n average_dis[1] += 1\n\nif average_num[1] != 0:\n average_num = int(average_num[0]/average_num[1])\nif average_dis[1] != 0:\n average_dis = int(average_dis[0]/average_dis[1])\n\nmonth_data[\"average\"] = {\"num\": average_num, \"dis\": average_dis}\n\n# create monthly line chart\nfig = go.Figure()\nfig.add_trace(go.Scatter(x=days_list, y=num_list, name=\"steps\", mode='lines+markers'))\nfig.add_trace(go.Scatter(x=days_list, y=dis_list, name=\"meters\", mode='lines+markers'))\n\naverageNumList = []\naverageDisList = []\nfor i in days_list:\n averageNumList.append(average_num)\n averageDisList.append(average_dis)\n\nfig.add_trace(go.Scatter(x=days_list, y=dailyGoalList, name=\"daily goal\", mode=\"lines\", line={\"dash\": \"dash\"}))\nfig.add_trace(go.Scatter(x=days_list, y=averageNumList, name=\"average steps\", mode=\"lines\", line={\"dash\": \"dash\"}))\nfig.add_trace(go.Scatter(x=days_list, y=averageDisList, name=\"average meters\", mode=\"lines\", line={\"dash\": \"dash\"}))\n\nfig.update_layout(title=\"daily steps\", xaxis_title=\"day\", template=\"plotly_dark\")\n\nif not exists(\"exports\"):\n mkdir(\"exports\")\n mkdir(f\"exports/{year}\")\nelif not exists(f\"exports/{year}\"):\n mkdir(f\"exports/{year}\")\n\nfig.write_html(f\"exports/{year}/{month}.html\")\nprint(f\"exported monthly line chart to exports/{year}/{month}.html\")\n\n# export this month data to json\nif exists(f\"exports/{year}/{month}.json\"):\n with open(f\"exports/{year}/{month}.json\", \"r\") as f:\n month_data_old = dict(json.load(f))\nelse:\n month_data_old = {}\n\nwith open(f\"exports/{year}/{month}.json\", \"w\") as f:\n json.dump(month_data, f, indent=4)\n print(f\"exported this month data to exports/{year}/{month}.json\")\n\n# push the pages repository\nif \"--push\" in argv or \"-p\" in argv or \"--forcepush\" in argv or \"-fp\" in argv:\n if month_data != month_data_old or \"--forcepush\" in argv or \"-fp\" in argv:\n print()\n p = subprocess.Popen([\"git\", \"add\", \".\"], cwd=\"exports\")\n p.wait()\n p.kill()\n\n p = subprocess.Popen([\"git\", \"commit\", \"-am\", f\"{datetime.now().date()} auto update\"], cwd=\"exports\")\n p.wait()\n p.kill()\n\n p = subprocess.Popen([\"git\", \"push\"], cwd=\"exports\")\n p.wait()\n p.kill()\n print(\"\\npushed pages repo\")\n else:\n print(\"\\nno differences, skipping pages push\")\n","repo_name":"bewuwy/mi-band-steps","sub_path":"chart.py","file_name":"chart.py","file_ext":"py","file_size_in_byte":3092,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"30"} +{"seq_id":"22190162965","text":"import logging\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nfrom yajuu.unshorteners.utils import get_quality\nfrom yajuu.media.sources.source import Source\n\nlogger = logging.getLogger(__name__)\n\n\ndef unshorten(url, quality=None):\n soup = BeautifulSoup(requests.get(url).text, 'html.parser')\n form = soup.find('form', {'name': 'F1'})\n\n payload = {}\n fields = ['op', 'id', 'rand', 'referer', 'method_free', 'method_premium']\n\n for input in form.select('input'):\n if input.get('name') not in fields:\n continue\n\n payload[input.get('name')] = input.get('value')\n\n logger.debug('[tufiles] {}'.format(payload))\n\n src = requests.post(url, data=payload, stream=True).url\n\n if quality is None:\n logger.warning('[tusfiles] quality was not passed')\n quality = get_quality(src)\n\n return [Source(src, quality)]\n","repo_name":"thesonyman/yajuu","sub_path":"yajuu/unshorteners/tusfiles.py","file_name":"tusfiles.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"8126571445","text":"# Bad writing\n# def divide(a, b):\n# try:\n# return a / b\n# except ZeroDivisionError:\n# return None\n\n\ndef divide(a, b):\n try:\n return a / b\n except ZeroDivisionError as e:\n raise ValueError('Invalid inputs') from e\n\n\nx = 0\ny = 5\nresult = divide(x, y)\nif not result:\n print('Invalid inputs') # This is a mistake!\n","repo_name":"eda3/python_effective","sub_path":"014_Noneを返すよりは例外を選ぶ.py","file_name":"014_Noneを返すよりは例外を選ぶ.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"17988692478","text":"# Explanation: This function searches a substring in files and directories at or higher level then current one\n# Usage: python3 SearchCurDirForSubstring.py '/any/dir/or/substring'\n# Requirements: Python3 or Python2, grep\n# Source: ~\n\nimport sys\nimport subprocess\n \nsubstring = sys.argv[1]\n\nadd_wildcard_to_substring = \" '\" + substring + \"'\"\n\ncmd = \"grep -nrw . -e\" + add_wildcard_to_substring\n\np = subprocess.Popen(cmd,\n shell=True)\n\np.communicate()\n","repo_name":"YumaTheCompanion/PyHelp","sub_path":"String/SearchCurDirForSubstring.py","file_name":"SearchCurDirForSubstring.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"12032706900","text":"#\n# @lc app=leetcode.cn id=53 lang=python3\n#\n# [53] 最大子序和\n#\n\n# @lc code=start\nclass Solution:\n def maxSubArray(self, nums: List[int]) -> int:\n t = [nums[0]] # 建表,存到此位置为止最大和\n for i in range(1, len(nums)):\n t.append(max(t[i-1] + nums[i], nums[i]))\n return max(t)\n # r = nums[0] # 存最大和\n # t = 0 # 存暂时最大和\n # for v in nums:\n # if t + v > 0:\n # t += v\n # r = max(r, t)\n # else:\n # r = max(r, v)\n # t = 0\n # return r\n# @lc code=end\n\n","repo_name":"nerutia/leetcode_new","sub_path":"53.最大子序和.py","file_name":"53.最大子序和.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"2345247489","text":"import logging\nfrom http import HTTPStatus\n\nfrom fastapi import APIRouter, Depends, HTTPException\nfrom starlette.responses import HTMLResponse, RedirectResponse\n\nfrom monitor import schema, service\nfrom monitor.authentication import jwt\nfrom monitor.settings import app_settings\n\nrouter = APIRouter(\n prefix=\"/auth/v1/login\",\n)\n\nlogger = logging.getLogger(__name__)\n\n\n@router.post(\"\", status_code=HTTPStatus.OK)\ndef login(\n credentials: schema.Credentials,\n login_service: service.LoginService = Depends(service.create_login_service),\n):\n logging.debug(f\"Logging in user - {credentials.email}\")\n user = login_service.authenticate_login(credentials)\n\n if user is None:\n logging.info(f\"Incorrect credentials {credentials}\")\n raise HTTPException(status_code=HTTPStatus.UNAUTHORIZED)\n\n private_key = app_settings().jwt_private_key.get_secret_value()\n\n jwt_token = jwt.create_jwt_token(\n user,\n app_settings().jwt_token_expiration_minutes,\n app_settings().jwt_private_key.get_secret_value(),\n app_settings().jwt_algorithm,\n )\n\n refresh_token = jwt.create_refresh_token(\n user.email, secret_key=private_key, algorithm=app_settings().jwt_algorithm\n )\n\n logger.info(f\"User {user.email} logged in.\")\n\n return {\"access_token\": jwt_token, \"refresh_token\": refresh_token}\n\n\n@router.get(\"\", status_code=HTTPStatus.OK, response_class=HTMLResponse)\ndef logout():\n response = RedirectResponse(url=\"/\")\n return response\n","repo_name":"catbreathx/network-monitor-server","sub_path":"src/monitor/route/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"38595466981","text":"import numpy as np\n\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\n\nfrom flask import Flask, jsonify\n\n#################################################\n# Database Setup\n# #################################################\nengine = create_engine(\"sqlite:///Resources/hawaii.sqlite\")\n\n# # reflect an existing database into a new model\nBase = automap_base()\n# # reflect the tables\nBase.prepare(autoload_with=engine)\n\n# Save reference to the table\nMeasurement= Base.classes.measurement\nStation = Base.classes.station\n\n# #################################################\n# # Flask Setup\n# #################################################\napp = Flask(__name__)\n\n# #################################################\n# # Flask Routes\n# #################################################\n\n@app.route(\"/\")\ndef welcome():\n \"\"\"List all available api routes.\"\"\"\n return (\n f\"Available Routes:
\"\n f\"/api/v1.0/precipitation
\"\n f\"/api/v1.0/stations
\"\n f\"/api/v1.0/tobs
\"\n f\"/api/v1.0/
\"\n f\"/api/v1.0//\")\n\n\n@app.route(\"/api/v1.0/precipitation\")\ndef precipitation():\n # Create our session (link) from Python to the DB\n session = Session(engine)\n\n \"\"\"Return a list of all Measurement data\"\"\"\n # Query precipitation data\n results = session.query(Measurement.date,Measurement.prcp).all()\n\n session.close()\n\n # Convert list of tuples into normal list\n all_ppt = []\n for date, prcp in results:\n ppt_dict = {}\n ppt_dict[\"date\"] = date\n ppt_dict[\"prcp\"] = prcp\n all_ppt.append(ppt_dict)\n\n return jsonify(all_ppt)\n\n\n@app.route(\"/api/v1.0/stations\")\ndef stations():\n # Create our session (link) from Python to the DB\n session = Session(engine)\n\n \"\"\"Return a list of stations data\"\"\"\n # Query all stations data\n results = session.query(Station.station, Station.name, Station.latitude, Station.longitude, Station.elevation).all()\n\n session.close()\n\n all_stations = []\n for name, station,latitude, longitude, elevation in results:\n station_dict = {}\n station_dict[\"name\"] = name\n station_dict[\"station\"] = station\n station_dict[\"latitude\"] = latitude\n station_dict[\"longitude\"] = longitude\n station_dict[\"elevation\"] = elevation\n all_stations.append(station_dict)\n\n return jsonify(all_stations)\n\n@app.route(\"/api/v1.0/tobs\")\ndef tobs():\n # Create our session (link) from Python to the DB\n session = Session(engine)\n\n # Query temperature\n # most-active station for the previous year of data.\n results = session.query(Measurement.station, Measurement.date, Measurement.tobs).\\\n filter(Measurement.station== \"USC00519281\").\\\n filter(Measurement.date >= '2016-08-23').\\\n filter(Measurement.date <= '2017-08-23').all()\n \n session.close()\n\n # Convert list of tuples into normal list\n all_temp = []\n for station, date, tobs in results:\n temp_dict = {}\n temp_dict[\"station\"] = station\n temp_dict[\"date\"] = date\n temp_dict[\"tobs\"] = tobs\n all_temp.append(temp_dict)\n\n return jsonify(all_temp)\n\n\n@app.route(\"/api/v1.0/\")\ndef ss(start):\n # Create our session (link) from Python to the DB\n session = Session(engine)\n\n # Query for start date input\n results = session.query(func.avg(Measurement.tobs), func.min(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start ).all()\n \n session.close\n \n al = []\n for tavg, tmin, tmax in results:\n tempe_dict = {}\n tempe_dict[\"avg_tobs\"] = tavg\n tempe_dict[\"min_tobs\"] = tmin\n tempe_dict[\"max_tobs\"] = tmax\n al.append(tempe_dict)\n\n return jsonify(al)\n\n@app.route(\"/api/v1.0//\")\ndef ee(start, end):\n # Create our session (link) from Python to the DB\n session = Session(engine)\n\n #Query for start and end date input\n results = session.query(func.avg(Measurement.tobs), func.min(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start ).\\\n filter(Measurement.date <= end ).all()\n \n session.close\n \n al = []\n for tavg, tmin, tmax in results:\n tempe_dict = {}\n tempe_dict[\"avg_tobs\"] = tavg\n tempe_dict[\"min_tobs\"] = tmin\n tempe_dict[\"max_tobs\"] = tmax\n al.append(tempe_dict)\n\n return jsonify(al)\n \n \nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"shelly-hub/sqlalchemy-challenge","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"5335349274","text":"from os.path import dirname, join, realpath\nimport pytest\n\nfrom django.core.urlresolvers import reverse\n\n\ndef read_file(file_name, folder=\"mocks\"):\n file = join(dirname(realpath(__file__)), folder, file_name)\n with open(file) as f:\n return f.read()\n\n\ndef assert_responses(client, expected_code, url_names, kwargs={}, prefix_urls='', suffix_urls=''):\n \"\"\"\n Helper function to test a bunch of pages with specific kwargs\n\n :param expected_code: expected http response code (e.g. 200 or 302 or a tuple of codes)\n :param url_names: List of url names (e.g. ['about', 'info'])\n :param kwargs: dict e.g. {'pk': 1}\n :param prefix_urls: 'appname:'\n :param suffix_urls: query string, does not get evaluated '?change=asdf'\n :return:\n \"\"\"\n for page in url_names:\n url = '{}{}'.format(reverse('{}{}'.format(prefix_urls, page), kwargs=kwargs), suffix_urls)\n response = client.get(url)\n\n if isinstance(expected_code, int):\n # expected_code can either be a tuple or a single integer.\n expected_code = (expected_code,)\n\n print(prefix_urls, page, expected_code, response.status_code)\n assert response.status_code in expected_code, \\\n '{} failed. expected {}, got {} for page {} with reason {}'\\\n .format(url, expected_code, response.status_code, page, response.__dict__)\n\n\ndef assert_rest_detail(rest_client, url, expected_data):\n response = rest_client.get(url)\n # make sure that in case of an error you can see the json response if any\n assert response.content\n assert response.status_code == 200, response.content\n\n for key, expected_result in expected_data.items():\n assert response.data.get(key) == expected_result, response.data\n","repo_name":"acidjunk/recruitme_monolith","sub_path":"tests/unit_tests/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"30735609784","text":"from addressbook import AddressBook\n\ndef input_error(func):\n def wrapper(*args):\n try:\n return func(*args)\n except IndexError as index_error:\n return index_error\n except ValueError as value_error:\n return value_error\n except KeyError as key_error:\n return key_error\n except AttributeError as attribute_error:\n return attribute_error\n except NotImplementedError:\n return \"This feature is not implemented\"\n return wrapper\n\ndef welcome_message(*args) -> str: # noqa\n message = \"Hi! How can i help you?\"\n return message\n\ndef help_message(*args) -> str: # noqa\n message = \"\"\"\nCommands and their usage:\nadd: \n record 'name' : add name.\n phone 'name' 'phone' : add phone.\n email 'name' 'email' : add email.\n birthday 'name' 'birthday' : add birthday (format yyyy-mm-dd).\nchange: \n phone 'name' 'phone' 'new phone' : change phone.\n email 'name' 'email' : change email. \n birthday 'name' 'birthday' : change birthday (format yyyy-mm-dd)\ndel: \n record 'name' : delete contact.\n phone 'name' 'phone' : delete phone.\n email 'name' 'email' : delete email.\n birthday 'name' 'birthday' : delete birthday.\n \"\"\"\n return message\n\n@input_error\ndef add_handler(ab: AddressBook, *args) -> str:\n if args[0] == 'record':\n ab.add_record(args[1])\n message = f'{args[1]} added to addressbook.'\n elif args[0] == 'phone':\n ab[args[1]].add_phone(args[2])\n message = f'Phone {args[2]} added to {args[1]}.'\n elif args[0] == 'email':\n ab[args[1]].set_email(args[2])\n message = f'Email {args[2]} added to {args[1]}.'\n elif args[0] == 'birthday':\n ab[args[1]].set_birthday(args[2])\n message = f'Birthday {args[2]} added to {args[1]}.'\n else:\n message = f'{args[0]}: wrong command.'\n return message\n\n@input_error\ndef change_handler(ab: AddressBook, *args) -> str:\n if args[0] == 'phone':\n ab[args[1]].change_phone(args[2], args[3])\n message = f'Phone in {args[1]} was changed from {args[2]} to {args[3]} record.'\n elif args[0] == 'email':\n ab[args[1]].set_email(args[2])\n message = f'Email in {args[1]} was changed'\n elif args[0] == 'birthday':\n ab[args[1]].set_birthday(args[2])\n message = f'Birthday in {args[1]} was changed'\n else:\n message = f'{\" \".join(args)}: wrong command.'\n return message\n\n@input_error\ndef del_handler(ab: AddressBook, *args) -> str:\n if args[0] == 'record':\n ab.del_record(args[1])\n message = f'{args[1]} was deleted.'\n elif args[0] == 'phone':\n ab[args[1]].del_phone(args[2])\n message = f'Phone {args[2]} was deleted from {args[1]}.'\n elif args[0] == 'email':\n ab[args[1]].del_email()\n message = f'Email was deleted from {args[1]}.'\n elif args[0] == 'birthday':\n ab[args[1]].del_birthday()\n message = f'Birthday was deleted from {args[1]}.'\n else:\n message = f'del does not support {args[0]}.'\n return message\n\ndef show(ab: AddressBook, search='') -> str:\n table = ab.show(search)\n return table\n\ndef save_data(ab: AddressBook, *args) -> str: # noqa\n ab.save_records_to_file('contacts.dat')\n return \"Records have been saved.\"\n\ndef load_data(ab: AddressBook, *args) -> str: # noqa\n ab.read_records_from_file('contacts.dat')\n return \"Records have been loaded.\"\n\nfunction = {'hello': welcome_message,\n 'help': help_message,\n 'add': add_handler,\n 'change': change_handler,\n 'del': del_handler,\n 'show': show,\n 'save': save_data,\n 'load': load_data}\n","repo_name":"STsapko/pwhw1","sub_path":"command_handlers.py","file_name":"command_handlers.py","file_ext":"py","file_size_in_byte":3884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"23934273051","text":"import matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nimport numpy as np\n\n# Funció per dibuixar el passadís\ndef dibuixar_passadis(n, m, parets, vector_entrades, individus, obstacles):\n # Configurem el tamany dels quadrats del passadís\n plt.xticks(np.arange(0, n+1, 1))\n plt.yticks(np.arange(0, m+1, 1))\n\n #Pintem les vores de color blanc per diferenciar els quadrats\n for i in range(m+1):\n if i < (n+1):\n plt.axvline(i, color='grey', lw=2)\n plt.axhline(i, color='grey', lw=2)\n\n # parets.extend([(0,0), (0,n-1), (m-1,0), (m-1,n-1)])\n # entrades = [e for entrada in vector_entrades for e in entrada]\n\n # for x,y in parets:\n # if(x,y) in parets:\n # plt.gca().add_patch(plt.Rectangle((y, m-x-1), 1, 1, color='gray', alpha=0.5)) #Parets\n \n # else:\n # plt.gca().add_patch(plt.Rectangle((y, m-x-1), 1, 1, color='black')) #Cantonades\n\n # if (x,y) in entrades:\n # for entrada in vector_entrades:\n # if (x,y) in entrada:\n # colors = ['blue', 'red', 'green', 'orange', 'purple', 'brown', 'yellow']\n # color = colors[vector_entrades.index(entrada) % len(colors)]\n # plt.scatter(y+0.5, m-x-1+0.5, marker='o', s=200, color=color)\n\n # Dibuixem els individus amb els colors corresponents\n i = 0\n for ind in individus:\n if ind.posicio == None: continue\n if i < 6:\n x, y = ind.get_posicio()\n dx, dy = ind.get_direccio()\n\n #for entrada in vector_entrades:\n #colors = ['blue', 'red', 'green', 'orange', 'purple', 'brown', 'yellow']\n if dx == -1:\n color = 'blue'\n cercle = plt.Circle((y+0.5, m-x-1+0.5), radius=0.1, color=color)\n plt.gca().add_patch(cercle)\n direccio = mpatches.FancyArrow(y+0.5, m-x-1+0.5, 0.1*dy, -0.1*dx, width=0.05, color=color)\n plt.gca().add_patch(direccio)\n else:\n color = 'red'\n cercle = plt.Circle((y+0.5, m-x-1+0.5), radius=0.1, color=color)\n plt.gca().add_patch(cercle)\n direccio = mpatches.FancyArrow(y+0.5, m-x-1+0.5, 0.1*dy, -0.1*dx, width=0.05, color=color)\n plt.gca().add_patch(direccio)\n else:\n x, y = ind.get_posicio()\n dx, dy = ind.get_direccio()\n\n #for entrada in vector_entrades:\n #colors = ['blue', 'red', 'green', 'orange', 'purple', 'brown', 'yellow']\n if dx == -1:\n color = 'blue'\n cercle = plt.Circle((y+0.5, m-x-1+0.5), radius=0.1, color=color)\n plt.gca().add_patch(cercle)\n direccio = mpatches.FancyArrow(y+0.5, m-x-1+0.5, 0.1*dy, -0.1*dx, width=0.05, color='red')\n plt.gca().add_patch(direccio)\n else:\n color = 'red'\n cercle = plt.Circle((y+0.5, m-x-1+0.5), radius=0.1, color=color)\n plt.gca().add_patch(cercle)\n direccio = mpatches.FancyArrow(y+0.5, m-x-1+0.5, 0.1*dy, -0.1*dx, width=0.05, color='blue')\n plt.gca().add_patch(direccio)\n\n i += 1 \n \n #direccio = mpatches.FancyArrow(5+0.5, m-6-1+0.5, 0.1*0, -0.1*-1, width=0.05, color='black')\n #plt.gca().add_patch(direccio)\n # for pos in obstacles:\n # x, y = pos\n # plt.gca().add_patch(plt.Rectangle((y, m-x-1), 1, 1, color='gray', alpha=0.9))\n\n # Objectiu\n #plt.gca().add_patch(plt.Rectangle((3, m-0-1), 1, 1, color='black'))\n\n # Ajustem els eixos per adaptar-los a la mida de la finestra\n plt.axis('scaled')\n plt.axis('off')\n plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0, hspace=0)\n\n # Actualitzem la figura del passadís en cada unitat de temps 't'\n plt.pause(20.0)\n plt.clf()\n\nclass Individu():\n def __init__(self, posicio, direccio):\n self.posicio = posicio\n self.direccio = direccio\n \n def get_posicio(self):\n return self.posicio\n \n def get_direccio(self):\n return self.direccio\n\nm = 4\nn = 3\nmatriu = np.zeros((m, n))\n# parets = []\n# for i in range(m):\n# for j in range(n):\n# if i == 0 or i == (m - 1) or j == 0 or j == (n - 1):\n# if (i, j) in [(0, 0), (0, n-1), (m-1, 0), (m-1, n-1)]:\n# matriu[i, j] = 4 # Tractem les cantonades com a obstacles per a que no es puguin crear entrades\n# else:\n# matriu[i, j] = 2 # Perímetre\n# parets.append((i, j))\n# entrades = []\n# entrades.append([p for p in parets if p[0] == 0])\n# entrades.append([p for p in parets if p[0] == (m-1)])\n# entrades.append([(4,0),(5,0),(6,0)])\n# entrades.append([(4,n-1),(5,n-1),(6,n-1)])\n# for entrada in entrades:\n# for e in entrada:\n# matriu[e] = 3\n\n# obstacles = [(4,2),(4,3),(4,4),(5,2),(5,3),(5,4),(6,2),(6,3),(6,4)]\nindividus = []\n# Punt central\n#individus.append(Individu((7, 5), (-1, 0)))\n\n#posiciones = [(6,4),(6,5), (6,6), (7,4), (7,5), (7,6), (8,4), (8,6)]\n#posiciones = [(6,5), (5,4), (5,5), (5,6), (4,3), (4,4), (4,5), (4,6), (4,7)]\n#individus.append(Individu((6, 5), (-1, 0)))\nindividus.append(Individu((0, 0), (-1, 0)))\nindividus.append(Individu((0, 1), (-1, 0)))\nindividus.append(Individu((0, 2), (1, 0)))\nindividus.append(Individu((1, 0), (-1, 0)))\nindividus.append(Individu((1, 1), (-1, 0)))\nindividus.append(Individu((1, 2), (1, 0)))\nindividus.append(Individu((2, 0), (-1, 0)))\nindividus.append(Individu((2, 1), (1, 0)))\nindividus.append(Individu((2, 2), (-1, 0)))\nindividus.append(Individu((3, 0), (1, 0)))\nindividus.append(Individu((3, 1), (-1, 0)))\nindividus.append(Individu((3, 2), (-1, 0)))\n\nparets = []\nentrades = []\nobstacles = []\ndibuixar_passadis(n, m, parets, entrades, individus, obstacles)","repo_name":"isaacpizarro95/Modeling-Workshop","sub_path":"Discret/altres coses/dibuixar_passadis.py","file_name":"dibuixar_passadis.py","file_ext":"py","file_size_in_byte":5851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"26481937081","text":"T = int(input())\nassert(T>=1 and T<=100), \"T must be 1 <= T <= 100\"\n\nwhile (T>0):\n\tN = int(input())\n\tassert(N>=1 and N<=150), \"N must be 1 <= N <= 150\"\n\n\tA = (list(map(int,input().split())))[:N]\n\tB = (list(map(int,input().split())))[:N]\n\n\tfor i in range(0,N):\n\t\tassert(A[i]>=0 and A[i]<=50), \"A[\"+str(i)+\"] must be 0 <= A[i] <= 50\"\n\t\tassert(B[i]>=0 and B[i]<=50), \"B[\"+str(i)+\"] must be 0 <= B[i] <= 50\"\n\n\tsum = 0\n\tmax = 0\n\n\tfor i in range(0,N):\n\t\tsum = (A[i]*20-B[i]*10)\n\n\t\tif (max scene, obj_view, scene) # later objects overlay earlier ones with no transparency\n except ValueError:\n logger.error(\"Array dimensions mismatch. obj_view.shape=%s, scene.shape=%s\" % (obj_view.shape, scene.shape))\n logger.error(\" region: %s\" % region.describe())\n logger.error(\" visual object: %s\" % obj.describe())\n raise\n else:\n #logger.debug(\"Warning: region %s does not overlap this object (%s).\" % (region.describe(), obj.describe()))\n pass\n return numpy.where(scene > TRANSPARENT, scene, self.background_luminance)\n\n def get_max_luminance(self):\n \"\"\"\n Returns the maximum luminance in the scene.\n \"\"\"\n return max(obj.max_luminance for obj in self.content.values())\n\n def describe(self):\n return \"visual space with background luminance %g cd/m2, updating every %g ms, containing %d objects\" % \\\n (self.background_luminance, self.update_interval, len(self.content))\n\n\nclass VisualRegion(object):\n \"\"\"\n A rectangular region of visual space.\n \n Parameters\n ----------\n location_x : float (degrees)\n The x coordinate of the center of the region in the visual space. \n \n location_y : float (degrees)\n The y coordinate of the center of the region in the visual space. \n\n size_x : float (degrees)\n The x size of the region in the visual space. \n\n size_y : float (degrees)\n The y size of the region in the visual space. \n \"\"\"\n\n def __init__(self, location_x, location_y, size_x, size_y):\n\n self.location_x = location_x\n self.location_y = location_y\n self.size_x = size_x\n self.size_y = size_y\n\n assert self.size_x > 0 and self.size_y > 0\n\n half_width =\t self.size_x/2.0\n half_height = self.size_y/2.0\n self.left = self.location_x - half_width\n self.right = self.location_x + half_width\n self.top = self.location_y + half_height\n self.bottom = self.location_y - half_height\n self.width = self.right - self.left\n self.height = self.top - self.bottom\n\n def __eq__(self, other):\n return (self.location_x == other.location_x\n and self.location_y == other.location_y\n and self.size_x == other.size_x\n and self.size_y == other.size_y)\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n return hash((self.location_x,self.location_y,self.size_x,self.size_y))\n\n def overlaps(self, another_region):\n \"\"\"\n Returns whether this region overlaps with the one in the `another_region` argument.\n \"\"\"\n\n lr = self.right <= another_region.left or self.left >= another_region.right\n tb = self.top <= another_region.bottom or self.bottom >= another_region.top\n return not(lr or tb)\n\n def intersection(self, another_region):\n \"\"\"\n Returns VisualRegion corresponding to the intersection of this VisualRegion and the one in the `another_region` argument.\n \"\"\"\n if not self.overlaps(another_region):\n raise Exception(\"Regions do not overlap.\")\n left = max(self.left, another_region.left)\n right = min(self.right, another_region.right)\n assert left <= right, \"self: %s\\nanother_region: %s\" % (self.describe(), another_region.describe())\n bottom = max(self.bottom, another_region.bottom)\n top = min(self.top, another_region.top)\n assert bottom <= top\n return VisualRegion(location_x=(left + right)/2.0,\n location_y=(top + bottom)/2.0,\n size_x=right - left,\n size_y=top - bottom)\n\n def describe(self):\n s = \"\"\"Region of visual space centred at (%(location_x),%(location_y)) s of size (%(size_x),%(size_y))s.\n Edges: left=%(left)g, right=%(right)g, top=%(top)g, bottom=%(bottom)g\"\"\" % self.__dict__\n return s\n\n\nthe_final_frontier = True\n","repo_name":"CSNG-MFF/mozaik","sub_path":"mozaik/space.py","file_name":"space.py","file_ext":"py","file_size_in_byte":11069,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"30"} +{"seq_id":"5857596626","text":"from flask import render_template,request,redirect,session,url_for\nfrom app import webapp\nfrom app.main import check_logged_in, get_login_status_webpage\nfrom app.validation import isdate, istime, isfloat, isEmpty\nfrom app import dynamo\nfrom app import S3\nimport json\n\n@webapp.route('/assessment',methods=['GET'])\ndef submit_assessment():\n logged, username = get_login_status_webpage()\n validation_error = False\n if \"validation_error\" in session:\n validation_error = True\n session.pop(\"validation_error\", None)\n\n return render_template(\"self-assessment.html\",\n logged=logged,\n username=username,\n title=\"Take a self-assessment\",\n validation_error=validation_error\n )\n\n\n\n@webapp.route('/view_assessment_history',methods=['GET'])\ndef view_assessment_history():\n if (check_logged_in()):\n logged, username = get_login_status_webpage()\n filename = username + \".json\"\n if (S3.check_file_exists(filename)):\n obj = json.load(S3.download_file_to_object(filename))\n q1 = obj.get(\"Have you had in-person closed contact with patients who diagnosed with coronavirus?\", \"unknown\")\n q2 = obj.get(\"Have you visited a place that patients who diagnosed with coronavirus has been to?\", \"unknown\")\n q3 = obj.get(\"Have you traveled to foreign countries in the last 14 days?\", \"unknown\")\n q4 = obj.get(\"symptoms\", [])\n result = obj.get(\"result\", \"unknown\")\n\n if \"suspicious\" in result or \"emergency\" in result:\n risk_level = \"danger\"\n elif \"isolation\" in result:\n risk_level = \"warning\"\n else:\n risk_level = \"success\"\n\n return render_template(\"assessment_history.html\",\n assessment_exist=True,\n q1=q1,\n q2=q2,\n q3=q3,\n q4=q4,\n result=result,\n risk_level=risk_level,\n username=username,\n logged=logged,\n title=\"Assessment History\"\n )\n else:\n return render_template(\"assessment_history.html\",\n assessment_exist=False,\n username=username,\n logged=logged,\n title=\"Assessment History\"\n )\n \n else:\n return redirect(url_for(\"main\"))\n\n\n\n@webapp.route('/submit_form',methods=['POST'])\ndef evaluate_results():\n logged, username = get_login_status_webpage()\n if (check_logged_in()):\n user = session[\"user\"]\n fever = request.form.get('fever')\n cough = request.form.get('cough')\n breath = request.form.get('breath')\n chest_pain = request.form.get('chest_pain')\n other_symptoms = request.form.get('other_symptoms')\n\n one = request.form.get('one')\n two = request.form.get('two')\n three = request.form.get('three')\n if one is None or two is None or three is None:\n session[\"validation_error\"] = True\n return redirect(url_for(\"submit_assessment\"))\n\n #Analyze the user's answer\n if (fever or cough or breath or chest_pain) and (one == \"yes\"):\n respond_type = 0\n elif (breath or chest_pain) and (one == \"no\"):\n respond_type = 1\n elif (fever or cough or other_symptoms) or (one == \"yes\") or (one == \"unknown\") or (two == \"yes\") or (three == \"yes\"):\n respond_type = 2\n else:\n respond_type = 3\n\n #find assessment result according to respond_type\n comment = [\"Your status is suspicious and you should test for COVID-19\",\"Please call 911 or go directly to nearest emergency\",\n \"Please do a self-isolation for 14 days and keep monitoring on your health status\",\"You are safe for now. Please keep staying at home\"]\n response = comment[respond_type]\n\n #write result file\n file_path = user + \".json\"\n symptoms = []\n if(fever != None):\n symptoms.append(fever)\n\n if(cough != None):\n symptoms.append(cough)\n\n if(breath != None):\n symptoms.append(breath)\n\n if(chest_pain != None):\n symptoms.append(chest_pain)\n\n if(other_symptoms != \"\"):\n symptoms.append(other_symptoms)\n\n json_object = {\n \"Have you had in-person closed contact with patients who diagnosed with coronavirus?\":one,\"Have you visited a place that patients who diagnosed with coronavirus has been to?\":two,\"Have you traveled to foreign countries in the last 14 days?\":three,\n \"symptoms\":symptoms,\"result\":response}\n\n #check if the result file already exists\n if S3.check_file_exists(file_path):\n s3_response = S3.delete_file(file_path)\n #upload new result file to S3 bucket\n data = json.dumps(json_object)\n s3_response2 =S3.upload_file_from_object(data,file_path)\n\n #dynamodb\n keypair = {\"user_id\": user}\n updatelist = {\"survey_result\": {\"Value\": respond_type, \"Action\": 'PUT'}}\n updatelist_filepath = {\"file_path\": {\"Value\": file_path, \"Action\": 'PUT'}}\n dynamo.update_data(\"user\", keypair, updatelist)\n dynamo.update_data(\"user\", keypair, updatelist_filepath)\n\n # Risk level\n if \"suspicious\" in response or \"emergency\" in response:\n risk_level = \"danger\"\n elif \"isolation\" in response:\n risk_level = \"warning\"\n else:\n risk_level = \"success\"\n\n return render_template('assessment_result.html',\n response=response,\n risk_level=risk_level,\n logged=logged,\n username=username,\n title=\"Self-Assessment Results\"\n )\n else:\n return redirect(url_for(\"main\"))","repo_name":"aliciatang07/COVID19-footprint-tracking","sub_path":"app/assessment_form.py","file_name":"assessment_form.py","file_ext":"py","file_size_in_byte":5834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"74346073684","text":"from django.http import HttpResponseNotFound\nfrom django.shortcuts import render\nfrom django.views.generic import ListView\n\nfrom posts.models import PostList, Category\n\nmenu = [\n {'title': 'Главная','page_url':'index'},\n {'title': 'Объявления', 'page_url': 'all_advs'},\n {'title': 'Подать', 'page_url': 'add'},\n {'title': 'Контакты', 'page_url': 'contact'},\n {'title': 'Статьи','page_url':'posts'}\n\n]\n\nclass PostListView(ListView):\n model = PostList\n template_name = 'posts/posts.html'\n context_object_name = 'posts'\n paginate_by = 5\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['page'] = 'posts'\n context['menu'] = menu\n context['category'] = Category.objects.all()\n return context\n\n def get_queryset(self):\n return PostList.objects.all()\n\n","repo_name":"ZayMax1977/adv_django_project","sub_path":"goodwood/posts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"12959344650","text":"from odoo import fields, models\n\n\nclass ResCompany(models.Model):\n _inherit = \"res.company\"\n\n openai_api_key = fields.Char()\n\n openai_chat_context = fields.Text(\n default=\"\"\"\nWrite a Odoo server action response.\n- It returns a server action in odoo\n- It returns his response in python\n- do not use functions just code to evaluate\n- I only need the script body.\nDon’t add any explanation.\nThe task is described as follows:\n \"\"\"\n )\n","repo_name":"OdooAI/odooai","sub_path":"openai_chat_control/models/res_company.py","file_name":"res_company.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"26619444338","text":"# R-7.13\n# Update the PositionalList class to support an additional method find(e),\n# which returns the position of the (first occurrence of ) element e in the list\n# (or None if not found).\n\nfrom positional_list_7_13 import PositionalList\nfrom exceptions_7_13 import Empty\n\n\nclass PositionalListWithFindMethod(PositionalList):\n\n def find(self, e):\n cursor = self.first()\n while cursor is not None:\n if cursor.element() == e:\n return cursor\n cursor = self.after(cursor)\n return None\n\n\nmy_list = PositionalListWithFindMethod()\n\n# adding [0, 1, 2, 3] to the list\nfor i in range(4):\n my_list.add_last(i)\n\nfound_node = my_list.find(3)\nprint(\"The maximum of the list is: {}\".format(found_node))\nprint(\"It contains: {} {}\".format(type(found_node.element()), found_node.element()))\n","repo_name":"mikechen66/Data-Structures-and-Algorithms-in-Python","sub_path":"Solutions_to_Exercises/c07_linked_lists/Reinforcement/R-7.13/R-7.13.py","file_name":"R-7.13.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"37963319671","text":"from openerp import models, fields, api\nimport openerp.addons.decimal_precision as dp\nfrom datetime import datetime, timedelta as td\nfrom openerp.fields import Date as fDate\n\n\nclass ProductTemplate(models.Model):\n _inherit = \"product.template\"\n\n # this field is added due to kanban view limitation\n # i.e. decimal place values cannot be eliminated by view adjustment\n\n\n net_price_usd = fields.Float(\n string='Sale USD',\n compute='_get_foreign_net_price',\n digits=dp.get_precision('Product Price')\n )\n net_price_chf = fields.Float(\n string='Sale CHF',\n compute='_get_foreign_net_price',\n digits=dp.get_precision('Product Price')\n )\n net_price_eur = fields.Float(\n string='Sale EUR',\n compute='_get_foreign_net_price',\n digits=dp.get_precision('Product Price')\n )\n\n @api.multi\n def _get_foreign_net_price(self):\n usd_rec = self.env['res.currency'].search([('name', '=', 'USD')])[0]\n chf_rec = self.env['res.currency'].search([('name', '=', 'CHF')])[0]\n eur_rec = self.env['res.currency'].search([('name', '=', 'EUR')])[0]\n for pt in self:\n if usd_rec:\n pt.net_price_usd = pt.net_price * usd_rec.rate_silent\n if chf_rec:\n pt.net_price_chf = pt.net_price * chf_rec.rate_silent\n if eur_rec:\n pt.net_price_eur = pt.net_price * eur_rec.rate_silent\n\n\n\n","repo_name":"eHanse-IT/OAW_custom","sub_path":"oa_model_secu_partner_ac/models/product_template.py","file_name":"product_template.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"74231987925","text":"\"\"\"\nTraining TemporalDis and Instance Discrimination\nInsDis: Unsupervised feature learning via non-parametric instance discrimination\nTemporalDis: Momentum Contrast for Unsupervised Visual Representation Learning\n\"\"\"\nfrom __future__ import print_function\n\nimport numpy as np\nimport os\nimport sys\nimport time\nimport torch\nimport torch.backends.cudnn as cudnn\nimport argparse\nimport socket\n\nimport tensorboard_logger as tb_logger\n\nfrom utils.utils import AverageMeter\n\nfrom model.i3d import I3D\nfrom model.r2p1d import R2Plus1DNet\nfrom NCE.NCEAverage import MemoryInsDis\nfrom NCE.NCEAverage import MemoryMoCo\nfrom NCE.NCECriterion import NCECriterion\nfrom NCE.NCECriterion import NCESoftmaxLoss\n\nfrom data.config import data_config, augmentation_config\nfrom data.dataloader import data_loader_init\nfrom utils.load_weights import weights_init\nimport datetime\nfrom utils.utils import accuracy\nimport torch.nn as nn\n\n# os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\n#==\nfrom TC.gen_positive import GenPositive\nfrom TC.gen_negative import GenNegative\ntry:\n from apex import amp, optimizers\nexcept ImportError:\n pass\n\"\"\"\nTODO: python 3.6 ModuleNotFoundError\n\"\"\"\n\n\ndef adjust_learning_rate(epoch, opt, optimizer):\n \"\"\"Sets the learning rate to the initial LR decayed by 0.2 every steep step\"\"\"\n # if epoch < 2:\n # for param_group in optimizer.param_groups:\n # param_group['lr'] = 1e-7\n # return 0\n steps = np.sum(epoch > np.asarray(opt.lr_decay_epochs))\n if steps > 0:\n new_lr = opt.learning_rate * (opt.lr_decay_rate ** steps)\n for param_group in optimizer.param_groups:\n param_group['lr'] = new_lr\n\ndef parse_option():\n\n hostname = socket.gethostname()\n\n parser = argparse.ArgumentParser('argument for training')\n\n parser.add_argument('--print_freq', type=int, default=10, help='print frequency')\n parser.add_argument('--tb_freq', type=int, default=500, help='tb frequency')\n parser.add_argument('--save_freq', type=int, default=3, help='save frequency')\n parser.add_argument('--batch_size', type=int, default=128, help='batch_size')\n parser.add_argument('--num_workers', type=int, default=18, help='num of workers to use')\n parser.add_argument('--epochs', type=int, default=240, help='number of training epochs')\n\n # optimization\n parser.add_argument('--learning_rate', type=float, default=0.003, help='learning rate')\n parser.add_argument('--lr_decay_epochs', type=str, default='120,160,200', help='where to decay lr, can be a list')\n parser.add_argument('--lr_decay_rate', type=float, default=0.1, help='decay rate for learning rate')\n parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam')\n parser.add_argument('--beta2', type=float, default=0.999, help='beta2 for Adam')\n parser.add_argument('--weight_decay', type=float, default=1e-4, help='weight decay')\n parser.add_argument('--momentum', type=float, default=0.9, help='momentum')\n\n # crop\n parser.add_argument('--crop', type=float, default=0.2, help='minimum crop')\n\n # dataset\n parser.add_argument('--dataset', type=str, default='hmdb51', choices=['hmdb51', 'ucf101', 'kinetics'])\n\n # resume\n parser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\n\n # augmentation setting\n parser.add_argument('--aug', type=str, default='CJ', choices=['NULL', 'CJ'])\n\n # warm up\n parser.add_argument('--warm', action='store_true', help='add warm-up setting')\n parser.add_argument('--amp', action='store_true', help='using mixed precision')\n parser.add_argument('--opt_level', type=str, default='O2', choices=['O1', 'O2'])\n\n # model definition\n parser.add_argument('--model', type=str, default='resnet50', choices=['resnet50', 'resnet50x2', 'resnet50x4'])\n parser.add_argument('--arch', default='i3d', type=str, choices=['i3d', 'r3d', 'r2p1d', 'c3d'])\n\n # loss function\n parser.add_argument('--softmax', action='store_true', help='using softmax contrastive loss rather than NCE')\n parser.add_argument('--nce_k', type=int, default=16384)\n parser.add_argument('--nce_t', type=float, default=0.07)\n parser.add_argument('--nce_m', type=float, default=0.5)\n\n # memory setting\n parser.add_argument('--moco', action='store_true', help='using TemporalDis (otherwise Instance Discrimination)')\n parser.add_argument('--alpha', type=float, default=0.999, help='exponential moving average weight')\n\n # GPU setting\n parser.add_argument('--gpu', default=None, type=int, help='GPU id to use.')\n\n # dataset setting\n parser.add_argument('--train_list', type=str, default='../datasets/lists/ucf101/ucf101_rgb_train_split_1.txt')\n parser.add_argument('--val_list', type=str, default='../datasets/lists/ucf101/ucf101_rgb_val_split_1.txt')\n\n opt = parser.parse_args()\n\n # set the path according to the environment\n if hostname.startswith('amax'):\n opt.data_folder = '../experiments/TemporalDis/{}/'.format(opt.dataset)\n opt.model_path = '../experiments/TemporalDis/{}/models'.format(opt.dataset)\n opt.tb_path = '../experiments/TemporalDis/{}/tensorboard'.format(opt.dataset)\n opt.tb_path2 = '../experiments/TemporalDis/{}/tensorboard2'.format(opt.dataset)\n opt.kmeans_path = '../experiments/TemporalDis/{}/k_means'.format(opt.dataset)\n opt.pseduo_model_path = '../experiments/TemporalDis/{}/pseudo_models'.format(opt.dataset)\n opt.tsne_path = '../experiments/TemporalDis/{}/tsne'.format(opt.dataset)\n else:\n raise NotImplementedError('server invalid: {}'.format(hostname))\n\n\n # opt.dataset = 'ucf101'\n # opt.dataset = 'kinetics'\n iterations = opt.lr_decay_epochs.split(',')\n opt.lr_decay_epochs = list([])\n for it in iterations:\n opt.lr_decay_epochs.append(int(it))\n\n opt.method = 'softmax' if opt.softmax else 'nce'\n prefix = 'TemporalDis{}'.format(opt.alpha) if opt.moco else 'InsDis'\n date = datetime.datetime.today().strftime('%m-%d-%H%M')\n opt.model_name = date\n # opt.model_name = '{}_{}_{}_{}_lr_{}_decay_{}_bsz_{}_crop_{}'.format(prefix, opt.method, opt.nce_k, opt.model,\n # opt.learning_rate, opt.weight_decay,\n # opt.batch_size, opt.crop)\n\n if opt.warm:\n opt.model_name = '{}_warm'.format(opt.model_name)\n if opt.amp:\n opt.model_name = '{}_amp_{}'.format(opt.model_name, opt.opt_level)\n\n opt.model_name = '{}_aug_{}'.format(opt.model_name, opt.aug)\n\n opt.model_folder = os.path.join(opt.model_path, opt.model_name)\n if not os.path.isdir(opt.model_folder):\n os.makedirs(opt.model_folder)\n\n opt.tb_folder = os.path.join(opt.tb_path, opt.model_name)\n if not os.path.isdir(opt.tb_folder):\n os.makedirs(opt.tb_folder)\n\n opt.tb_folder2 = os.path.join(opt.tb_path2, opt.model_name)\n if not os.path.isdir(opt.tb_folder2):\n os.makedirs(opt.tb_folder2)\n\n opt.kmeans_folder = os.path.join(opt.kmeans_path, opt.model_name)\n if not os.path.isdir(opt.kmeans_folder):\n os.makedirs(opt.kmeans_folder)\n\n opt.pseduo_model_folder = os.path.join(opt.pseduo_model_path, opt.model_name)\n if not os.path.isdir(opt.pseduo_model_folder):\n os.makedirs(opt.pseduo_model_folder)\n\n opt.tsne_folder = os.path.join(opt.tsne_path, opt.model_name)\n if not os.path.isdir(opt.tsne_folder):\n os.makedirs(opt.tsne_folder)\n return opt\n\n\ndef moment_update(model, model_ema, m):\n \"\"\" model_ema = m * model_ema + (1 - m) model \"\"\"\n for p1, p2 in zip(model.parameters(), model_ema.parameters()):\n p2.data.mul_(m).add_(1-m, p1.detach().data)\n # p2.data.mul_(m).add_(1 - m, p1.data)\n\n\ndef get_shuffle_ids(bsz):\n \"\"\"generate shuffle ids for ShuffleBN\"\"\"\n forward_inds = torch.randperm(bsz).long().cuda()\n backward_inds = torch.zeros(bsz).long().cuda()\n value = torch.arange(bsz).long().cuda()\n backward_inds.index_copy_(0, forward_inds, value)\n return forward_inds, backward_inds\n\n\ndef saving(logger, loss, epoch, optimizer, args, model, contrast, prob, model_ema, tag='TemporalDis'):\n if tag == 'TemporalDis':\n model_folder = args.model_folder\n elif tag == 'Pseudo':\n model_folder = args.pseduo_model_folder\n else:\n Exception(\"not implement\")\n # tensorboard logger\n logger.log_value('ins_loss', loss, epoch)\n logger.log_value('ins_prob', prob, epoch)\n logger.log_value('learning_rate', optimizer.param_groups[0]['lr'], epoch)\n\n # save model\n if epoch % args.save_freq == 0:\n print('==> Saving...')\n state = {\n 'opt': args,\n 'model': model.state_dict(),\n 'contrast': contrast.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'epoch': epoch,\n }\n if args.moco:\n state['model_ema'] = model_ema.state_dict()\n if args.amp:\n state['amp'] = amp.state_dict()\n save_file = os.path.join(model_folder, 'ckpt_epoch_{epoch}.pth'.format(epoch=epoch))\n torch.save(state, save_file)\n # help release GPU memory\n del state\n\n # saving the model\n print('==> Saving...')\n state = {\n 'opt': args,\n 'model': model.state_dict(),\n 'contrast': contrast.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'epoch': epoch,\n }\n if args.moco:\n state['model_ema'] = model_ema.state_dict()\n if args.amp:\n state['amp'] = amp.state_dict()\n save_file = os.path.join(model_folder, 'current.pth')\n torch.save(state, save_file)\n if epoch % args.save_freq == 0:\n save_file = os.path.join(model_folder, 'ckpt_epoch_{epoch}.pth'.format(epoch=epoch))\n torch.save(state, save_file)\n # help release GPU memory\n del state\n torch.cuda.empty_cache()\n\ndef main():\n\n args = parse_option()\n if args.gpu is not None:\n print(\"Use GPU: {} for training\".format(args.gpu))\n # create model and optimizer\n # == dataset config==\n \"\"\"\n CUDA_VISIBLE_DEVICES=0,1 python train_temporal_dis.py \\\n --batch_size 16 --num_workers 8 --nce_k 3569 --softmax --moco\n \"\"\"\n\n # args.dataset = 'hmdb51'\n # args.train_list = '../datasets/lists/hmdb51/hmdb51_rgb_train_split_1.txt'\n # args.val_list = '../datasets/lists/hmdb51/hmdb51_rgb_val_split_1.txt'\n \"\"\"\n CUDA_VISIBLE_DEVICES=1 python train_temporal_dis.py \\\n --batch_size 16 --num_workers 8 --nce_k 9536 --softmax --moco\n \"\"\"\n # args.print_freq = 100\n # args.dataset = 'ucf101'\n # args.train_list = '../datasets/lists/ucf101/ucf101_rgb_train_split_1.txt'\n # args.val_list = '../datasets/lists/ucf101/ucf101_rgb_val_split_1.txt'\n\n # args.print_freq = 1000\n # args.dataset = 'kinetics'\n # args.train_list = '../datasets/lists/kinetics-400/ssd_kinetics_video_trainlist.txt'\n # args.val_list = '../datasets/lists/kinetics-400/ssd_kinetics_video_vallist.txt'\n\n args.dropout = 0.5\n args.clips = 1\n args.data_length = 16\n args.stride = 4\n args.spatial_size = 224\n args.root = \"\"\n args.mode = 'rgb'\n args.eval_indict = 'loss'\n args.pt_loss = 'TemporalDis'\n args.workers = 4\n # args.arch = 'i3d' # 'r2p1d'\n num_class, data_length, image_tmpl = data_config(args)\n train_transforms, test_transforms, eval_transforms = augmentation_config(args)\n train_loader, val_loader, eval_loader, train_samples, val_samples, eval_samples = data_loader_init(args, data_length, image_tmpl, train_transforms, test_transforms, eval_transforms)\n\n n_data = len(train_loader)\n if args.arch == 'i3d':\n model = I3D(num_classes=101, modality=args.mode, dropout_prob=args.dropout, with_classifier=False)\n model_ema = I3D(num_classes=101, modality=args.mode, dropout_prob=args.dropout, with_classifier=False)\n elif args.arch == 'r2p1d':\n model = R2Plus1DNet((1, 1, 1, 1), num_classes=num_class, with_classifier=False)\n model_ema = R2Plus1DNet((1, 1, 1, 1), num_classes=num_class, with_classifier=False)\n elif args.arch == 'r3d':\n from model.r3d import resnet18\n model = resnet18(num_classes=num_class, with_classifier=False)\n model_ema = resnet18(num_classes=num_class, with_classifier=False)\n else:\n Exception(\"Not implemene error!\")\n model = torch.nn.DataParallel(model)\n model_ema = torch.nn.DataParallel(model_ema)\n # random initialization\n model.apply(weights_init)\n model_ema.apply(weights_init)\n # copy weights from `model' to `model_ema'\n moment_update(model, model_ema, 0)\n contrast = MemoryMoCo(128, n_data, args.nce_k, args.nce_t, args.softmax).cuda(args.gpu)\n # contrast2 = MemoryMoCo(128, n_data, args.nce_k, args.nce_t, args.softmax).cuda(args.gpu)\n criterion = NCESoftmaxLoss() if args.softmax else NCECriterion(n_data)\n criterion = criterion.cuda(args.gpu)\n cls_criterion = nn.CrossEntropyLoss().cuda()\n\n model = model.cuda()\n if args.moco:\n model_ema = model_ema.cuda()\n\n optimizer = torch.optim.SGD(model.parameters(),\n lr=args.learning_rate,\n momentum=args.momentum,\n weight_decay=args.weight_decay)\n\n cudnn.benchmark = True\n\n if args.amp:\n model, optimizer = amp.initialize(model, optimizer, opt_level=args.opt_level)\n if args.moco:\n optimizer_ema = torch.optim.SGD(model_ema.parameters(),\n lr=0,\n momentum=0,\n weight_decay=0)\n model_ema, optimizer_ema = amp.initialize(model_ema, optimizer_ema, opt_level=args.opt_level)\n\n # optionally resume from a checkpoint\n args.start_epoch = 1\n if args.resume:\n if os.path.isfile(args.resume):\n print(\"=> loading checkpoint '{}'\".format(args.resume))\n checkpoint = torch.load(args.resume, map_location='cpu')\n # checkpoint = torch.load(args.resume)\n args.start_epoch = checkpoint['epoch'] + 1\n model.load_state_dict(checkpoint['model'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n contrast.load_state_dict(checkpoint['contrast'])\n if args.moco:\n model_ema.load_state_dict(checkpoint['model_ema'])\n\n if args.amp and checkpoint['opt'].amp:\n print('==> resuming amp state_dict')\n amp.load_state_dict(checkpoint['amp'])\n\n print(\"=> loaded successfully '{}' (epoch {})\"\n .format(args.resume, checkpoint['epoch']))\n del checkpoint\n torch.cuda.empty_cache()\n else:\n print(\"=> no checkpoint found at '{}'\".format(args.resume))\n\n # tensorboard\n logger = tb_logger.Logger(logdir=args.tb_folder, flush_secs=2)\n logger2 = tb_logger.Logger(logdir=args.tb_folder2, flush_secs=2)\n\n #==================================== our data augmentation method=================================\n pos_aug = GenPositive()\n neg_aug = GenNegative()\n\n # routine\n for epoch in range(args.start_epoch, args.epochs + 1):\n\n adjust_learning_rate(epoch, args, optimizer)\n print(\"==> training...\")\n\n time1 = time.time()\n loss, prob = train_moco(epoch, train_loader, model, model_ema, contrast, criterion, optimizer, args, pos_aug, neg_aug)\n time2 = time.time()\n print('epoch {}, total time {:.2f}'.format(epoch, time2 - time1))\n saving(logger, loss, epoch, optimizer, args, model, contrast, prob, model_ema, 'TemporalDis')\n\n #================iterative update ================================\n # pseudo_labels, tsne_features = generate_pseudo_label(model, train_loader, train_samples, epoch, args)\n # # plot_tsne(tsne_features, epoch, args, num_class, 'TemporalDis')\n # pse_cls_loss = train_pseduo_label(train_loader, model, model_ema, cls_criterion, optimizer, args, epoch, pseudo_labels)\n # print(\"pse_cls_loss:{}\".format(pse_cls_loss))\n # # plot_tsne(tsne_features, epoch, args, num_class, 'Pseudo')\n # saving(logger2, pse_cls_loss, epoch, optimizer, args, model, contrast, prob, model_ema, 'Pseudo')\n # if epoch % 5 == 0:\n # pseudo_labels, tsne_features = generate_pseudo_label(model_ema, train_loader, train_samples, epoch, args)\n # # plot_tsne(tsne_features, epoch, args, num_class, 'TemporalDis')\n # for j in range(3):\n # # pseudo_labels, tsne_features = generate_pseudo_label(model, train_loader, train_samples, epoch, args)\n # # plot_tsne(tsne_features, epoch, args, num_class, 'TemporalDis')\n # pse_cls_loss = train_pseduo_label(train_loader, model, model_ema, cls_criterion, optimizer, args, epoch, pseudo_labels)\n # print(\"pse_cls_loss:{}\".format(pse_cls_loss))\n # # plot_tsne(tsne_features, epoch, args, num_class, 'Pseudo')\n # saving(logger2, pse_cls_loss, epoch, optimizer, args, model, contrast, prob, model_ema, 'Pseudo')\n\n\ndef train_moco(epoch, train_loader, model, model_ema, contrast, criterion, optimizer, opt, pos_aug, neg_aug):\n \"\"\"\n one epoch training for instance discrimination\n \"\"\"\n print(\"==> (TemporalDis) training...\")\n model.train()\n model_ema.eval()\n\n def set_bn_train(m):\n classname = m.__class__.__name__\n if classname.find('BatchNorm') != -1:\n m.train()\n model_ema.apply(set_bn_train)\n\n batch_time = AverageMeter()\n data_time = AverageMeter()\n loss_meter = AverageMeter()\n prob_meter = AverageMeter()\n\n end = time.time()\n for idx, (inputs, _, index) in enumerate(train_loader):\n data_time.update(time.time() - end)\n\n bsz = inputs[0].size(0)\n # fixed args.batch_size\n if bsz < opt.batch_size:\n print(\"batch less than 16, continue\")\n continue\n inputs[0] = inputs[0].float()\n inputs[1] = inputs[1].float()\n if opt.gpu is not None:\n inputs[0] = inputs[0].cuda(opt.gpu, non_blocking=True)\n inputs[1] = inputs[1].cuda(opt.gpu, non_blocking=True)\n else:\n inputs[0] = inputs[0].cuda()\n inputs[1] = inputs[1].cuda()\n index = index.cuda(opt.gpu, non_blocking=True)\n\n # ===================forward=====================\n anchor, positive, negative = inputs\n\n # here a series of data augmentation\n # ====================================================postive operation=======================\n anchor = pos_aug(anchor)\n # negative = neg_aug(negative)\n # strong_negative = neg_aug(positive)\n\n # ids for ShuffleBN\n shuffle_ids, reverse_ids = get_shuffle_ids(bsz)\n\n # # data rotation\n # rotation_data_1 = []\n # flip_labels = torch.ones(x1.size(0) * 4)\n # for i in range(4):\n # rotation_data_1.append(four_rotation_cls(x1, torch.ones(x1.size(0)) * i))\n # flip_labels[x1.size(0) * i:x1.size(0) * (i + 1)] = torch.ones(\n # x1.size(0)) * i\n # x1 = torch.cat(rotation_data_1, dim=0)\n # x1_rotate_label = torch.LongTensor(flip_labels.long()).cuda()\n # rotation_data_2 = []\n # flip_labels = torch.ones(x2.size(0) * 4)\n # for i in range(4):\n # rotation_data_2.append(four_rotation_cls(x2, torch.ones(x2.size(0)) * i))\n # flip_labels[x2.size(0) * i:x2.size(0) * (i + 1)] = torch.ones(\n # x2.size(0)) * i\n # x2 = torch.cat(rotation_data_2, dim=0)\n # x2_rotate_label = torch.LongTensor(flip_labels.long()).cuda()\n\n feat_q, cls_q, mix_q = model(anchor)\n with torch.no_grad():\n positive = positive[shuffle_ids]\n feat_k, cls_k, mix_k = model_ema(positive)\n feat_k = feat_k[reverse_ids]\n feat_n, cls_n, mix_n = model(negative)\n # feat_sn, cls_sn, mix_sn = model(strong_negative)\n out = contrast(feat_q, feat_k, feat_n, index)\n triplet_loss = nn.TripletMarginLoss(margin=1.0, p=2)\n sample_loss = triplet_loss(feat_q, feat_k, feat_n)\n # out = contrast(feat_q, feat_k, feat_n, index)\n # out = contrast(feat_q, feat_k, index)\n contrast_loss = criterion(out)\n # out2 = contrast2(feat_n, feat_k, _, index)\n # contrast_loss2 = criterion(out2)\n # print(contrast_loss, contrast_loss2)\n # print(contrast_loss, sample_loss)\n loss = contrast_loss # + sample_loss # + contrast_loss2 # + cls_loss + mixup_loss\n prob = out[:, 0].mean()\n\n # ===================backward=====================\n optimizer.zero_grad()\n if opt.amp:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n optimizer.step()\n\n # ===================meters=====================\n loss_meter.update(loss.item(), bsz)\n prob_meter.update(prob.item(), bsz)\n\n moment_update(model, model_ema, opt.alpha)\n\n torch.cuda.synchronize()\n batch_time.update(time.time() - end)\n end = time.time()\n message = ('TemporalDis Train: [{0}][{1}/{2}]\\t'\n 'BT {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'DT {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'loss {loss.val:.3f} ({loss.avg:.3f})\\t'\n 'prob {prob.val:.3f} ({prob.avg:.3f})'.format(\n epoch, idx + 1, len(train_loader), batch_time=batch_time,\n data_time=data_time, loss=loss_meter, prob=prob_meter))\n # print info\n if (idx + 1) % opt.print_freq == 0:\n print(message)\n # print(out.shape)\n sys.stdout.flush()\n with open(\"../experiments/MoCo_kinetics.txt\", 'a') as f:\n f.write(message + '\\n')\n return loss_meter.avg, prob_meter.avg\n\ndef generate_pseudo_label(model, train_loader, train_samples, epoch, args, feature_dim=2048, clusters=200):\n \"\"\"\n generate pseduo label for all dataset\n :return:\n \"\"\"\n #step 1: feature extractor....\n #step 2: apply k_means algorithm and save pseudo label in an array\n #step 3: plot pseudo label\n # ==============================================Step 1 ===============================\n print(\"==> (generate pseudo label): feature extractor\")\n model.eval()\n features = np.random.rand(train_samples, feature_dim) # the number of training samples\n tsne_features = {'data': [], 'target': []}\n with torch.no_grad():\n for i, (input, target, index) in enumerate(train_loader):\n _, inputs, _ = input\n inputs = inputs.cuda()\n _, _, feat = model(inputs)\n for j in range(feat.size(0)):\n features[index[j]] = feat[j].data.cpu().numpy()\n for j in range(feat.size(0)):\n tsne_features['data'].append(feat[j])\n tsne_features['target'].append(target[j])\n if i % args.print_freq == 0:\n print(\"epoch: {}, {}/{} finished feature extract\".format(epoch, i, len(train_loader)))\n # if i > 50:\n # break\n # ==============================================Step 2 clustering =================================\n from PSEUDO.clustering import Kmeans\n print(\"==> (generate pseudo label) k-means cluster\")\n cluster = Kmeans(clusters)\n cluster.cluster(features)\n labels = cluster.images_lists #list, len=clusters, each cluster include index\n pseudo_labels = np.zeros(train_samples)\n for i in range(len(labels)):\n for item in labels[i]:\n pseudo_labels[item] = i\n pseudo_labels = torch.tensor(pseudo_labels).cuda().long()\n print(pseudo_labels)\n # from sklearn.cluster import KMeans\n # from sklearn.decomposition import PCA\n # reduced_data = PCA(n_components=10).fit_transform(features)\n # k_means = KMeans(init='k-means++', n_clusters=clusters, n_init=1000)\n # k_means.fit(reduced_data)\n # print(k_means.labels_)\n # labels = torch.tensor(k_means.labels_).cuda().long()\n # return labels\n # =============================================Step 3 visualize =================================\n # print(\"==> (generate pseudo label) plot and save k-means cluster\")\n # import matplotlib.pyplot as plt\n # # Step size of the mesh. Decrease to increase the quality of the VQ.\n # h = .02 # point in the mesh [x_min, x_max]x[y_min, y_max].\n # # Plot the decision boundary. For that, we will assign a color to each\n # x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1\n # y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1\n # xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n # # Obtain labels for each point in mesh. Use last trained model.\n # Z = k_means.predict(np.c_[xx.ravel(), yy.ravel()])\n # # Put the result into a color plot\n # Z = Z.reshape(xx.shape)\n # plt.figure(1)\n # plt.clf()\n # plt.imshow(Z, interpolation='nearest',\n # extent=(xx.min(), xx.max(), yy.min(), yy.max()),\n # cmap=plt.cm.Paired,\n # aspect='auto', origin='lower')\n #\n # plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)\n # # Plot the centroids as a white X\n # centroids = k_means.cluster_centers_\n # plt.scatter(centroids[:, 0], centroids[:, 1],\n # marker='x', s=169, linewidths=3,\n # color='w', zorder=10)\n # plt.title('K-means clustering on the UCF101 dataset (PCA-reduced data)\\n'\n # 'Centroids are marked with white cross')\n # plt.xlim(x_min, x_max)\n # plt.ylim(y_min, y_max)\n # plt.xticks(())\n # plt.yticks(())\n # plt.savefig(\"{}/{}.png\".format(args.kmeans_folder, epoch))\n # plt.show()\n return pseudo_labels, tsne_features\n\ndef plot_tsne(data, epoch, args, num_class, front='TemporalDis'):\n \"\"\"\n plot the tsne visualization result and record it in a file\n :return:\n \"\"\"\n print(\"==> (generate pseudo label) t-sne visualization\")\n from utils.visualization.t_SNE_Visualization import tsne_visualize\n front = front + '_' + str(epoch)\n file_name = \"{}/{}.png\".format(args.tsne_folder, front)\n tsne_visualize(data, file_name, num_class)\n return True\n\ndef train_pseduo_label(train_loader, model, model_ema, criterion, optimizer, opt, epoch, pseduo_labels):\n \"\"\"\n :return:\n \"\"\"\n # need clean data and labels\n print(\"==> train pseduo label\")\n model.train()\n batch_time = AverageMeter()\n data_time = AverageMeter()\n loss_meter = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n end = time.time()\n for idx, (videos, _, index) in enumerate(train_loader):\n data_time.update(time.time() - end)\n _, inputs, _ = videos\n bsz = inputs.size(0)\n inputs = inputs.cuda()\n target = pseduo_labels[index].cuda()\n # ===================forward=====================\n _, predict, _ = model(inputs)\n loss = criterion(predict, target)\n prec1, prec5 = accuracy(predict.data, target, topk=(1, 5))\n top1.update(prec1.item(), inputs.size(0))\n top5.update(prec5.item(), inputs.size(0))\n\n # ===================backward=====================\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # ===================meters=====================\n loss_meter.update(loss.item(), bsz)\n # moment_update(model, model_ema, opt.alpha)\n torch.cuda.synchronize()\n batch_time.update(time.time() - end)\n end = time.time()\n\n # print info\n if (idx + 1) % opt.print_freq == 0:\n print('Pseudo Label Train: [{0}][{1}/{2}]\\t'\n 'BT {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'DT {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'loss {loss.val:.3f} ({loss.avg:.3f})\\t'\n 'Top1 {top1.val:3f} ({top1.avg:.3f})\\t'\n 'Top5 {top5.val:3f} ({top5.avg:.3f})'.format(\n epoch, idx + 1, len(train_loader), batch_time=batch_time,\n data_time=data_time, loss=loss_meter, top1=top1,top5=top5))\n sys.stdout.flush()\n\n return loss_meter.avg\n\n\nif __name__ == '__main__':\n main()","repo_name":"FingerRec/Self-Supervised-Temporal-Discriminative-Representation-Learning-for-Video-Action-Recognition","sub_path":"src/train_temporal_dis.py","file_name":"train_temporal_dis.py","file_ext":"py","file_size_in_byte":28627,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"30"} +{"seq_id":"29272567291","text":"import pytest\nfrom sanic import Sanic, response\n\nfrom vaccine import ask_a_question_config as config\nfrom vaccine.ask_a_question import Application\nfrom vaccine.models import Message\nfrom vaccine.testing import AppTester, TState, run_sanic\n\n\n@pytest.fixture\ndef tester():\n return AppTester(Application)\n\n\n@pytest.fixture\nasync def model_mock():\n Sanic.test_mode = True\n app = Sanic(\"mock_model\")\n tstate = TState()\n tstate.no_response = False\n\n @app.route(\"/inbound/check\", methods=[\"POST\"])\n def check(request):\n tstate.requests.append(request)\n if tstate.errormax:\n if tstate.errors < tstate.errormax:\n tstate.errors += 1\n return response.json({}, status=500)\n if tstate.no_response:\n return response.json({\"top_responses\": []})\n return response.file_stream(\n \"vaccine/tests/aaq_model_response.json\", mime_type=\"application/json\"\n )\n\n @app.route(\"/inbound/feedback\", methods=[\"POST\"])\n def feedback(request):\n tstate.requests.append(request)\n if tstate.errormax:\n if tstate.errors < tstate.errormax:\n tstate.errors += 1\n return response.json({}, status=500)\n return response.json({})\n\n async with run_sanic(app) as server:\n url = config.MODEL_API_URL\n token = config.MODEL_API_TOKEN\n config.MODEL_API_URL = f\"http://{server.host}:{server.port}\"\n config.MODEL_API_TOKEN = \"testtoken\"\n server.tstate = tstate\n yield server\n config.MODEL_API_URL = url\n config.MODEL_API_TOKEN = token\n\n\n@pytest.mark.asyncio\nasync def test_exit_keywords(tester: AppTester):\n await tester.user_input(\"menu\")\n tester.assert_message(\"\", session=Message.SESSION_EVENT.CLOSE)\n assert tester.application.messages[0].helper_metadata[\"automation_handle\"] is True\n tester.assert_state(None)\n assert tester.user.answers == {}\n\n\n@pytest.mark.asyncio\nasync def test_timeout(tester: AppTester):\n await tester.user_input(session=Message.SESSION_EVENT.CLOSE)\n tester.assert_message(\n \"\\n\".join(\n [\n \"â�“ *YOUR VACCINE QUESTIONS*\",\n \"\",\n \"We haven’t heard from you in a while!\",\n \"\",\n \"The question session has timed out due to inactivity. You will \"\n \"need to start again. Just TYPE the word ASK.\",\n \"\",\n \"-----\",\n \"📌 Reply *0* to return to the main *MENU*\",\n ]\n ),\n session=Message.SESSION_EVENT.CLOSE,\n )\n\n\n@pytest.mark.asyncio\nasync def test_question(tester: AppTester, model_mock):\n await tester.user_input(\"ask\", session=Message.SESSION_EVENT.NEW)\n tester.assert_message(\n \"\\n\".join(\n [\n \"â�“ *ASK* your questions about vaccines\",\n \"\",\n \"Try *typing your own question* or sharing/forwarding a '*rumour*' \"\n \"that's going around to get the facts!\",\n \"\",\n '[💡Tip: Reply with a question like: _\"Are vaccines safe?\"_ ]',\n ]\n )\n )\n tester.assert_state(\"state_question\")\n\n await tester.user_input(\"Is the vaccine safe?\")\n tester.assert_state(\"state_display_response_choices\")\n\n\n@pytest.mark.asyncio\nasync def test_reset_keyword(tester: AppTester):\n tester.setup_state(\"state_no_responses\")\n await tester.user_input(\"ask\")\n tester.assert_state(\"state_question\")\n\n\n@pytest.mark.asyncio\nasync def test_display_response_choices(tester: AppTester, model_mock):\n tester.setup_state(\"state_question\")\n await tester.user_input(\"Is the vaccine safe?\")\n tester.assert_message(\n \"\\n\".join(\n [\n \"🔎 *Top Search Results*\",\n \"\",\n \"1. Are COVID-19 vaccines safe?\",\n \"2. Do vaccines work against COVID-19 variants?\",\n \"3. Do we know what's in the vaccines?\",\n \"\",\n \"[💡Tip: If you don't see what you're looking for, try typing your \"\n \"question again using different words or reply *MENU* to browse \"\n \"topics]\",\n ]\n )\n )\n tester.assert_state(\"state_display_response_choices\")\n [req] = model_mock.tstate.requests\n req_data = req.json\n req_data[\"metadata\"].pop(\"message_id\")\n assert req_data == {\n \"text_to_match\": \"Is the vaccine safe?\",\n \"metadata\": {\"whatsapp_id\": \"27820001001\"},\n }\n\n\n@pytest.mark.asyncio\nasync def test_display_response_choices_reask_question(tester: AppTester, model_mock):\n with open(\"vaccine/tests/aaq_model_response.json\") as f:\n tester.setup_answer(\"model_response\", f.read())\n tester.setup_state(\"state_display_response_choices\")\n\n await tester.user_input(\"Does the vaccine contain covid?\")\n tester.assert_state(\"state_display_response_choices\")\n assert len(model_mock.tstate.requests) == 1\n tester.assert_answer(\"state_question\", \"Does the vaccine contain covid?\")\n\n\n@pytest.mark.asyncio\nasync def test_model_api_temporary_error(tester: AppTester, model_mock):\n model_mock.tstate.errormax = 1\n tester.setup_state(\"state_question\")\n await tester.user_input(\"Is the vaccine safe?\")\n tester.assert_state(\"state_display_response_choices\")\n assert len(model_mock.tstate.requests) == 2\n\n\n@pytest.mark.asyncio\nasync def test_model_api_permanent_error(tester: AppTester, model_mock):\n model_mock.tstate.errormax = 5\n tester.setup_state(\"state_question\")\n await tester.user_input(\"Is the vaccine safe?\")\n assert len(model_mock.tstate.requests) == 3\n tester.assert_message(\n \"Something went wrong, your question was not able to be processed. \"\n \"Please try again later\"\n )\n\n\n@pytest.mark.asyncio\nasync def test_model_no_responses(tester: AppTester, model_mock):\n model_mock.tstate.no_response = True\n tester.setup_state(\"state_question\")\n await tester.user_input(\"Is the vaccine safe?\")\n tester.assert_message(\n \"\\n\".join(\n [\n \"*No Results Found*\",\n \"\",\n \"[💡Tip: Try typing your question again using different words or reply \"\n \"*MENU* to browse topics]\",\n ]\n )\n )\n tester.assert_state(\"state_no_responses\")\n\n\n@pytest.mark.asyncio\nasync def test_new_question_on_no_response(tester: AppTester, model_mock):\n tester.setup_state(\"state_no_responses\")\n await tester.user_input(\"Is the vaccine safe?\")\n tester.assert_state(\"state_display_response_choices\")\n tester.assert_answer(\"state_question\", \"Is the vaccine safe?\")\n\n\n@pytest.mark.asyncio\nasync def test_display_selected_choice(tester: AppTester, model_mock):\n with open(\"vaccine/tests/aaq_model_response.json\") as f:\n tester.setup_answer(\"model_response\", f.read())\n\n tester.setup_state(\"state_display_response_choices\")\n await tester.user_input(\"1\")\n tester.assert_state(\"state_display_selected_choice\")\n tester.assert_message(\n \"\\n\".join(\n [\n \"Yes, COVID-19 vaccines are safe. No step in the development, testing \"\n \"or regulation process has been skipped for these vaccines.\\r\",\n \"\\r\",\n \"When they are developed, all vaccines are thoroughly tested to make \"\n \"sure they are safe and work well.\\r\",\n \"\\r\",\n \"Every vaccine also needs to be approved by the medical regulators in \"\n \"each country to make sure that they are safe.\\r\",\n \"\\r\",\n \"Watch: youtu.be/AeSSyjhz8Hk\",\n ]\n )\n )\n\n await tester.user_input(\"yes\")\n [r1, r2] = model_mock.tstate.requests\n assert r1.json == {\n \"feedback\": {\"choice\": \"Are COVID-19 vaccines safe?\"},\n \"feedback_secret_key\": \"testsecretkey\",\n \"inbound_id\": 66,\n }\n assert r2.json == {\n \"feedback\": {\"choice\": \"Are COVID-19 vaccines safe?\", \"feedback\": \"yes\"},\n \"feedback_secret_key\": \"testsecretkey\",\n \"inbound_id\": 66,\n }\n\n\n@pytest.mark.asyncio\nasync def test_display_selected_choice_no_feedback(tester: AppTester, model_mock):\n with open(\"vaccine/tests/aaq_model_response.json\") as f:\n tester.setup_answer(\"model_response\", f.read())\n\n tester.setup_state(\"state_display_response_choices\")\n await tester.user_input(\"1\")\n tester.assert_state(\"state_display_selected_choice\")\n await tester.user_input(\"no\")\n [r1, r2] = model_mock.tstate.requests\n assert r1.json == {\n \"feedback\": {\"choice\": \"Are COVID-19 vaccines safe?\"},\n \"feedback_secret_key\": \"testsecretkey\",\n \"inbound_id\": 66,\n }\n assert r2.json == {\n \"feedback\": {\"choice\": \"Are COVID-19 vaccines safe?\", \"feedback\": \"no\"},\n \"feedback_secret_key\": \"testsecretkey\",\n \"inbound_id\": 66,\n }\n tester.assert_state(\"state_another_result\")\n tester.assert_message(\n \"\\n\".join(\n [\n \"Thank you for confirming.\",\n \"\",\n \"Try a different result?\",\n \"1. Are COVID-19 vaccines safe?\",\n \"2. Do vaccines work against COVID-19 variants?\",\n \"3. Do we know what's in the vaccines?\",\n \"\",\n \"-----\",\n \"Reply:\",\n \"â�“ *ASK* to ask more vaccine questions\",\n \"📌 *0* for the main *MENU*\",\n ]\n )\n )\n await tester.user_input(\"2\")\n tester.assert_state(\"state_display_selected_choice\")\n tester.assert_answer(\n \"state_display_response_choices\", \"Do vaccines work against COVID-19 variants?\"\n )\n\n\n@pytest.mark.asyncio\nasync def test_display_selected_choice_temporary_error(tester: AppTester, model_mock):\n with open(\"vaccine/tests/aaq_model_response.json\") as f:\n tester.setup_answer(\"model_response\", f.read())\n\n tester.setup_answer(\"state_display_response_choices\", \"Is the vaccine safe?\")\n model_mock.tstate.errormax = 1\n tester.setup_state(\"state_display_selected_choice\")\n await tester.user_input(\"1\")\n assert len(model_mock.tstate.requests) == 2\n\n\n@pytest.mark.asyncio\nasync def test_display_selected_choice_permanent_error(tester: AppTester, model_mock):\n with open(\"vaccine/tests/aaq_model_response.json\") as f:\n tester.setup_answer(\"model_response\", f.read())\n\n tester.setup_answer(\"state_display_response_choices\", \"Is the vaccine safe?\")\n model_mock.tstate.errormax = 5\n tester.setup_state(\"state_display_selected_choice\")\n await tester.user_input(\"1\")\n assert len(model_mock.tstate.requests) == 3\n tester.assert_message(\n \"Something went wrong, your question was not able to be processed. \"\n \"Please try again later\"\n )\n\n\n@pytest.mark.asyncio\nasync def test_state_end(tester: AppTester, model_mock):\n with open(\"vaccine/tests/aaq_model_response.json\") as f:\n tester.setup_answer(\"model_response\", f.read())\n\n tester.setup_answer(\"state_display_response_choices\", \"Is the vaccine safe?\")\n tester.setup_state(\"state_display_selected_choice\")\n await tester.user_input(\"1\")\n\n tester.assert_message(\n \"\\n\".join(\n [\n \"Thank you for confirming.\",\n \"\",\n \"-----\",\n \"Reply:\",\n \"â�“ *ASK* to ask more vaccine questions\",\n \"📌 *0* for the main *MENU*\",\n ]\n ),\n session=Message.SESSION_EVENT.CLOSE,\n )\n","repo_name":"praekeltfoundation/vaccine-eligibility","sub_path":"vaccine/tests/test_ask_a_question.py","file_name":"test_ask_a_question.py","file_ext":"py","file_size_in_byte":11613,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"42176432728","text":"import numpy as np\nimport xarray as xr\nimport os, sys\nimport matplotlib.pyplot as plt\nimport pickle\n\ntop_dir = os.getcwd().split('v4cnn')[0]\ntop_dir = top_dir + '/v4cnn'\ndef norm_avcov(x):\n x = x.astype(np.float64)\n \n cov = np.matmul(np.transpose(x, axes=(0, 2, 1)), x)\n numerator = np.sum(np.triu(cov, k=1), (1, 2))\n \n vlength = np.linalg.norm(x, axis=1, keepdims=True)\n max_cov = np.multiply(np.transpose(vlength, axes=(0, 2, 1)), vlength)\n denominator= np.sum(np.triu(max_cov, k=1), (1, 2))\n norm_cov = np.array(numerator)/np.array(denominator)\n \n return norm_cov\n\n \ndef norm_avcov_iter(x, subtract_mean=True):\n x = x.astype(np.float64)\n if subtract_mean:\n x = x - np.mean(x, 1, keepdims=True)\n diag_inds = np.triu_indices(x.shape[-1], k=1)\n numerator = [np.sum(np.dot(unit.T, unit)[diag_inds]) for unit in x]\n \n vnrm = np.linalg.norm(x, axis=1, keepdims=True)\n denominator = [np.sum(np.multiply(unit.T, unit)[diag_inds]) for unit in vnrm] \n norm_cov = np.array(numerator)/np.array(denominator)\n norm_cov[np.isnan(norm_cov)] = 0\n \n return norm_cov\n\n\n\ndef spatial_weight_normcov(netwtsd):\n unit_coords = xr.concat([netwtsd[key].coords['unit'] \n for key in netwtsd.keys()], 'unit').coords\n netwts_list = []\n for key in netwtsd:\n netwt = netwtsd[key].transpose('unit', 'chan', 'y', 'x').values \n netwts_list.append(netwt)\n \n av_cov_list = []\n for layer_wt in netwts_list:\n o_shape = np.shape(layer_wt)\n ravel_space = o_shape[:2] + (np.product(o_shape[2:]),)\n av_cov_list.append(norm_avcov_iter(layer_wt.reshape(ravel_space), subtract_mean=True))\n \n av_cov = np.concatenate(av_cov_list)\n av_cov_da = xr.DataArray(av_cov, unit_coords) \n return av_cov_da\n\n\ndef spatial_resp_normcov(resp):\n dims = resp.coords.dims\n if ('x' in resp) and ('y' in dims):\n resp = resp.transpose('unit','shapes', 'x', 'y')\n elif ('x' in dims):\n resp = resp.transpose('unit', 'shapes', 'x')\n elif ('y' in dims):\n resp = resp.transpose('unit', 'shapes', 'y')\n resp_vals = resp.values\n \n unit_coords = resp.coords['unit'].coords\n o_shape = np.shape(resp_vals)\n ravel_space = o_shape[:2] + (np.product(o_shape[2:]),)\n av_cov = norm_avcov_iter(resp_vals.reshape(ravel_space), subtract_mean=True)\n resp_av_cov_da = xr.DataArray(av_cov, unit_coords) \n return resp_av_cov_da\n\nfrom scipy.stats import kurtosis\n\ndef kurtosis_da(resp):\n dims = resp.coords.dims \n \n if ('x' in resp) and ('y' in dims):\n resp = resp.transpose('unit', 'shapes', 'x', 'y')\n elif ('x' in dims):\n resp = resp.transpose('unit', 'shapes', 'x')\n elif ('y' in dims):\n resp = resp.transpose('unit', 'shapes', 'y')\n \n stim_resp = np.array([(unit**2).sum((1, 2)) for unit in resp.values])\n pos_resp = np.array([(unit**2).sum(0).ravel() for unit in resp.values])\n k_stim = kurtosis(stim_resp, axis=1, fisher=False)\n k_pos = kurtosis(pos_resp, axis=1, fisher=False)\n return k_pos, k_stim\n\ndef tot_var(resp):\n dims = resp.coords.dims \n if ('x' in resp) and ('y' in dims):\n resp = resp.transpose('unit','shapes', 'x', 'y')\n elif ('x' in dims):\n resp = resp.transpose('unit', 'shapes', 'x')\n elif ('y' in dims):\n resp = resp.transpose('unit', 'shapes', 'y')\n \n pwr = np.array([(unit**2).sum() for unit in resp.values])\n return pwr\n#%%\na = np.array([1,2,3,4,5]).reshape(1,5)\n\n\nx = np.random.randn(10, 1)\nx = x*a\n\nx = x - np.mean(x, axis=0, keepdims=True)\n\nti = np.dot(x.T, x)\n\nvlength = np.linalg.norm(x, axis=0, keepdims=True)\nprod_v = vlength*vlength.T\nden = np.sum(np.triu(prod_v, k=1))\nnum = np.sum(np.triu(ti, k=1))\n\n\n\n#%%\nif sys.platform == 'linux2': \n data_dir = '/loc6tb/dean/'\nelse:\n data_dir = top_dir\n\nnet_name = 'bvlc_reference_caffenetpix_width[32.0]_x_(34.0, 194.0, 21)_y_(34.0, 194.0, 21)_amp_NonePC370.nc'\nda = xr.open_dataset(data_dir + '/data/responses/'+net_name)['resp']\nda = da.squeeze()\nda = da.transpose('unit','shapes', 'x', 'y')\n#%%\nda = da[:5472:1]\nda = da - da[:, 0, :, :] #subtract off baseline\nda = da[:, 1:, ...] #get rid of baseline shape \n\ngoforit=True \nif 'netwtsd' not in locals() or goforit:\n with open(top_dir + '/nets/netwtsd.p', 'rb') as f: \n try:\n netwtsd = pickle.load(f, encoding='latin1')\n except:\n netwtsd = pickle.load(f)\n \n#%%\nwt_av_cov = spatial_weight_normcov(netwtsd) \nresp_av_cov = spatial_resp_normcov(da) \nk_pos, k_stim = kurtosis_da(da)\n#%%\npwr = tot_var(da)\n#non_k_var = (k_pos<42) * (k_pos>2) * (pwr>0) *(k_stim<42) * (k_stim>2)\n#resp_av_cov = resp_av_cov[non_k_var]\n\n#%%\nwt_av_cov, resp_av_cov = xr.align(wt_av_cov, resp_av_cov, join='inner')\nlayer_labels_ind = np.array(map(str, wt_av_cov.coords['layer_label'].values))\n\nn_plots = len(np.unique(layer_labels_ind))\nplt.figure(figsize=(12,3))\nlayer_labels = ['conv1', 'conv2', 'conv3', 'conv4', 'conv5', 'fc6']\n\nfor i, layer in enumerate(layer_labels[1:]):\n plt.subplot(1, n_plots, i+1)\n x = wt_av_cov[layer_labels_ind==layer].values\n y = resp_av_cov[layer_labels_ind==layer].values\n if i<4:\n s=4\n else:\n s=1\n plt.scatter(x, y, s=s, color='k', edgecolors='none')\n #plt.semilogx()\n plt.xlim(-0.1,1.02);plt.ylim(-0.1,1.01);\n if i==0:\n plt.xlabel('Weight Covariance'); plt.ylabel('T.I.', rotation=0, va='center',ha='right', labelpad=15)\n if layer == 'conv2':\n plt.yticks([0,0.25,0.5, 0.75, 1]);plt.gca().set_yticklabels(['0','','0.5','','1'])\n plt.xticks([0,0.25,0.5, 0.75, 1]);plt.gca().set_xticklabels(['0','','0.5','','1'])\n plt.title(layer + '\\nr = ' + str(np.round(np.corrcoef(x,y)[0,1], 2)))\n\n else:\n plt.yticks([0,0.25,0.5, 0.75, 1]);plt.gca().set_yticklabels(['','','','',''])\n plt.xticks([0,0.25,0.5, 0.75, 1]);plt.gca().set_xticklabels(['','','','',''])\n plt.title(layer + '\\n' + str(np.round(np.corrcoef(x,y)[0,1], 2)))\n plt.tight_layout()\n plt.grid()\n\n#%%\nadj_resps=[\n'bvlc_reference_caffenetpix_width[32.0]_x_(34.0, 194.0, 21)_y_(34.0, 194.0, 21)_amp_NonePC370.nc',\n'bvlc_caffenet_reference_increase_wt_cov_0.5pix_width[32.0]_x_(34.0, 194.0, 21)_y_(34.0, 194.0, 21)_amp_NonePC370.nc',\n'bvlc_caffenet_reference_increase_wt_cov_0.75pix_width[32.0]_x_(34.0, 194.0, 21)_y_(34.0, 194.0, 21)_amp_NonePC370.nc',\n'bvlc_caffenet_reference_increase_wt_cov_0.95pix_width[32.0]_x_(34.0, 194.0, 21)_y_(34.0, 194.0, 21)_amp_NonePC370.nc',\n'bvlc_caffenet_reference_increase_wt_cov_conv5_0.1pix_width[32.0]_x_(34.0, 194.0, 21)_y_(34.0, 194.0, 21)_amp_NonePC370.nc',\n'bvlc_caffenet_reference_increase_wt_cov_conv5_0.95pix_width[32.0]_x_(34.0, 194.0, 21)_y_(34.0, 194.0, 21)_amp_NonePC370.nc',\n'bvlc_caffenet_reference_increase_wt_cov_fc6_0.1pix_width[32.0]_x_(34.0, 194.0, 21)_y_(34.0, 194.0, 21)_amp_NonePC370.nc',\n'bvlc_caffenet_reference_increase_wt_cov_fc6_0.95pix_width[32.0]_x_(34.0, 194.0, 21)_y_(34.0, 194.0, 21)_amp_NonePC370.nc',\n'bvlc_caffenet_reference_increase_wt_cov_fc6_0.2pix_width[32.0]_x_(34.0, 194.0, 21)_y_(34.0, 194.0, 21)_amp_NonePC370.nc',\n'bvlc_caffenet_reference_increase_wt_cov_fc6_0.3pix_width[32.0]_x_(34.0, 194.0, 21)_y_(34.0, 194.0, 21)_amp_NonePC370.nc',\n'bvlc_caffenet_reference_increase_wt_cov_fc6_0.4pix_width[32.0]_x_(34.0, 194.0, 21)_y_(34.0, 194.0, 21)_amp_NonePC370.nc',\n'bvlc_caffenet_reference_increase_wt_cov_fc6_0.5pix_width[32.0]_x_(34.0, 194.0, 21)_y_(34.0, 194.0, 21)_amp_NonePC370.nc',\n'bvlc_caffenet_reference_increase_wt_cov_fc6_0.6pix_width[32.0]_x_(34.0, 194.0, 21)_y_(34.0, 194.0, 21)_amp_NonePC370.nc',\n'bvlc_caffenet_reference_increase_wt_cov_fc6_0.7pix_width[32.0]_x_(34.0, 194.0, 21)_y_(34.0, 194.0, 21)_amp_NonePC370.nc',\n'bvlc_caffenet_reference_increase_wt_cov_fc6_0.8pix_width[32.0]_x_(34.0, 194.0, 21)_y_(34.0, 194.0, 21)_amp_NonePC370.nc',\n]\n\n\nadj_netwts = [\n'netwtsd_orig.p',\n'netwtsd_0.5.p', \n'netwtsd_0.75.p', \n'netwtsd_0.95.p',\n'netwtsd_conv5_0.1.p',\n'netwtsd_conv5_0.95.p',\n'netwtsd_fc6_0.1.p',\n'netwtsd_fc6_0.95.p',\n'netwtsd_fc6_0.2.p',\n'netwtsd_fc6_0.3.p',\n'netwtsd_fc6_0.4.p',\n'netwtsd_fc6_0.5.p',\n'netwtsd_fc6_0.6.p',\n'netwtsd_fc6_0.7.p',\n'netwtsd_fc6_0.8.p',] \n#%%\nwt_av_covs = []\nresp_av_covs = []\n#%%\nfor netwts, net_name in zip(adj_netwts[:], adj_resps[:]):\n with open(top_dir + '/nets/' + netwts, 'rb') as f: \n try:\n netwtsd = pickle.load(f, encoding='latin1')\n except:\n netwtsd = pickle.load(f)\n \n da = xr.open_dataset(top_dir + '/data/responses/'+net_name)['resp']\n da = da.squeeze()\n da = da.transpose('unit','shapes', 'x', 'y')\n da = da[:11904]\n da = da - da[:, 0, :, :] #subtract off baseline\n da = da[:, 1:, ...] #get rid of baseline shape \n \n wt_av_cov = spatial_weight_normcov(netwtsd) \n resp_av_cov = spatial_resp_normcov(da)\n #wt_av_cov, resp_av_cov = xr.align(wt_av_cov, resp_av_cov, join='inner')\n wt_av_covs.append(wt_av_cov)\n resp_av_covs.append(resp_av_cov)\n\npickle.dump([resp_av_covs, wt_av_covs], open(top_dir + '/data/an_results/ti_vs_wt_cov_exps_all_lays.p', \"wb\" ) )\n\n","repo_name":"deanpospisil/v4cnn","sub_path":"misc/norm_cov_scratch.py","file_name":"norm_cov_scratch.py","file_ext":"py","file_size_in_byte":9211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"34522056402","text":"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sklearn.preprocessing as preprocessing\n\n\nX = np.linspace(-10, 10, 50)\n\nRX = np.reshape(X, [-1, 1])\nsample_data = RX * [1, 0] + [0, 1]\n\nprint('Sample X = ', sample_data)\n\nY = 2 * X ** 2 + 6 + np.random.uniform(-6, 6, size=50)\n\nsample_label = np.reshape(Y, [-1, 1])\n\nprint('Sample Y = ', sample_label)\n\n\nW1 = np.random.uniform(0, 2, [2, 11])\nW2 = np.random.uniform(0, 2, [11, 1])\nepoch = 2000\nlearn_rate = 0.0012\nmin_loss = 0.5\n\n\ndef relu(x):\n return np.select(x > 0, x, np.zeros(np.shape(x)))\n\n\ndef diff_relu(x):\n return np.select(x > 0, np.ones(np.shape(x)), np.zeros(np.shape(x)))\n\n\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\n\ndef diff_sigmoid_with_output(y):\n return y * (1 - y)\n\n\ndef forward(xs):\n global W1, W2\n layer1 = sigmoid(np.dot(xs, W1))\n out = np.dot(layer1, W2)\n return layer1, out\n\n\ndef train():\n global sample_data, sample_label, W1, W2\n sample_data = preprocessing.normalize(sample_data)\n for step in range(epoch):\n layer1, out = forward(sample_data)\n loss = ((sample_label - out) ** 2) / 2\n error = out - sample_label\n out_delta = error\n layer1_error = np.dot(out_delta, W2.T)\n layer1_delta = layer1_error * diff_sigmoid_with_output(layer1)\n w2_delta = np.dot(layer1.T, out_delta)\n w1_delta = np.dot(sample_data.T, layer1_delta)\n W2 += -learn_rate * w2_delta\n W1 += -learn_rate * w1_delta\n print('step: ', step)\n print('loss = ', loss)\n if (np.all(loss < min_loss)):\n return\n\ntrain()\n\n\nplt.scatter(X, Y)\n\nX = np.linspace(-10, 10, 100)\n\nRX = np.reshape(X, [-1, 1])\nsample_data = RX * [1, 0] + [0, 1]\nsample_data = preprocessing.normalize(sample_data)\n_, pred = forward(sample_data)\npred = np.reshape(pred, [-1])\nprint(pred)\n\nplt.plot(X, pred, 'r-', lw=5)\nplt.show()\n","repo_name":"mySingleLive/py_ml_res","sub_path":"basic/bp_training_2.py","file_name":"bp_training_2.py","file_ext":"py","file_size_in_byte":1880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"31539416749","text":"import logging\nimport pytest\nfrom decimal import Decimal, getcontext, ROUND_DOWN\nfrom metacomm.combinatorics.all_pairs2 import all_pairs2 as all_pairs\nfrom random import shuffle, randint, seed\nfrom tests.beeswax.impala_beeswax import ImpalaBeeswaxException\nfrom tests.common.impala_test_suite import ImpalaTestSuite\nfrom tests.common.test_vector import TestDimension, TestMatrix\n\nclass TestDecimalCasting(ImpalaTestSuite):\n \"\"\"Test Suite to verify that casting to Decimal works.\n\n Specifically, this test suite ensures that:\n - overflows and underflows and handled correctly.\n - casts from floats/string to their exact decimal types are correct.\n - max/min/NULL/0 can be expressed with their respective decimal types.\n \"\"\"\n DECIMAL_TYPES_MAP = {\n # All possible decimal types.\n # (0 < precision <= 38 && 0 <= scale <= 38 && scale <= precision)\n 'exhaustive' : [(p, s) for p in xrange(1, 39) for s in xrange(0, p + 1)],\n # Core only deals with precision 6,16,26 (different integer types)\n 'core' : [(p, s) for p in [6,16,26] for s in xrange(0, p + 1)],\n # mimics test_vectors.py and takes a subset of all decimal types\n 'pairwise' : all_pairs([(p, s) for p in xrange(1, 39) for s in xrange(0, p + 1)])\n }\n # We can cast for numerrics or string types.\n CAST_FROM = ['string', 'number']\n # Set the default precisin to 38 to operate on decimal values.\n getcontext().prec = 38\n # Represents a 0 in decimal\n DECIMAL_ZERO = Decimal('0')\n\n @classmethod\n def get_workload(cls):\n return 'functional-query'\n\n @classmethod\n def add_test_dimensions(cls):\n cls.TestMatrix = TestMatrix()\n cls.TestMatrix.add_dimension(TestDimension('decimal_type',\n *TestDecimalCasting.DECIMAL_TYPES_MAP[cls.exploration_strategy()]))\n cls.TestMatrix.add_dimension(\n TestDimension('cast_from', *TestDecimalCasting.CAST_FROM))\n cls.iterations = 1\n\n def setup_method(self, method):\n self.max_bigint = int(self.execute_scalar(\"select max_bigint()\"))\n\n def _gen_decimal_val(self, precision, scale):\n \"\"\"Generates a Decimal object with the exact number of digits as the precision.\"\"\"\n # Generates numeric string which has as many digits as the precision.\n num = str(randint(10**(precision - 1), int('9' * precision)))\n # Incorporate scale into the string.\n if scale != 0: num = \"{0}.{1}\".format(num[:-scale], num[precision - scale:])\n # Convert the generated decimal string into a Decimal object and return a -ive/+ive\n # version of it with equal probability.\n return Decimal(num) if randint(0,1) else Decimal(\"-{0}\".format(num))\n\n def _assert_decimal_result(self, cast, actual, expected):\n assert actual == expected, \"Cast: {0}, Expected: {1}, Actual: {2}\".format(cast,\\\n expected, actual)\n\n def _normalize_cast_expr(self, decimal_val, scale, from_string=False):\n \"\"\"Convert the decimal value to a string litetal to avoid overflow.\n\n If an integer literal is greater than the max bigint supported by Impala, it\n overflows. This methods replaces it with a string literal.\n \"\"\"\n if (scale == 0 and abs(decimal_val) > self.max_bigint) or from_string:\n return \"select cast('{0}' as Decimal({1}, {2}))\"\n return \"select cast({0} as Decimal({1}, {2}))\"\n\n def test_min_max_zero_null(self, vector):\n \"\"\"Sanity test at limits.\n\n Verify that:\n - We can read decimal values at their +ive and -ive limits.\n - 0 is expressible in all decimal types.\n - NULL is expressible in all decimal types\n \"\"\"\n precision, scale = vector.get_value('decimal_type')\n from_string = vector.get_value('cast_from') == 'string'\n dec_max = Decimal('{0}.{1}'.format('9' * (precision - scale), '9' * scale))\n # Multiplying large values eith -1 can produce an overflow.\n dec_min = Decimal('-{0}'.format(str(dec_max)))\n cast = self._normalize_cast_expr(dec_max, scale, from_string=from_string)\n # Test max\n res = Decimal(self.execute_scalar(cast.format(dec_max, precision, scale)))\n self._assert_decimal_result(cast, res, dec_max)\n # Test Min\n res = Decimal(self.execute_scalar(cast.format(dec_min, precision, scale)))\n self._assert_decimal_result(cast, res, dec_min)\n # Test zero\n res = Decimal(self.execute_scalar(cast.format(TestDecimalCasting.DECIMAL_ZERO,\n precision, scale)))\n self._assert_decimal_result(cast, res, TestDecimalCasting.DECIMAL_ZERO)\n # Test NULL\n null_cast = \"select cast(NULL as Decimal({0}, {1}))\".format(precision, scale)\n res = self.execute_scalar(null_cast)\n self._assert_decimal_result(null_cast, res, 'NULL')\n\n def test_exact(self, vector):\n \"\"\"Test to verify that an exact representation of the desired Decimal type is\n maintained.\"\"\"\n precision, scale = vector.get_value('decimal_type')\n from_string = vector.get_value('cast_from') == 'string'\n for i in xrange(self.iterations):\n val = self._gen_decimal_val(precision, scale)\n cast = self._normalize_cast_expr(val, scale, from_string=from_string)\\\n .format(val, precision, scale)\n res = Decimal(self.execute_scalar(cast))\n self._assert_decimal_result(cast, res, val)\n\n def test_overflow(self, vector):\n \"\"\"Test to verify that we always return NULL when trying to cast a number with greater\n precision that its intended decimal type\"\"\"\n precision, scale = vector.get_value('decimal_type')\n from_string = vector.get_value('cast_from') == 'string'\n for i in xrange(self.iterations):\n # Generate a decimal with a larger precision than the one we're casting to.\n val = self._gen_decimal_val(randint(precision + 1, 39), scale)\n cast = self._normalize_cast_expr(val, scale, from_string=from_string)\\\n .format(val, precision, scale)\n res = self.execute_scalar(cast)\n self._assert_decimal_result(cast, res, 'NULL')\n\n def test_underflow(self, vector):\n \"\"\"Test to verify that we truncate when the scale of the number being cast is higher\n than the target decimal type (with no change in precision).\n \"\"\"\n precision, scale = vector.get_value('decimal_type')\n from_string = vector.get_value('cast_from') == 'string'\n if precision == scale:\n pytest.skip(\"Cannot underflow scale when precision and scale are equal\")\n for i in xrange(self.iterations):\n new_scale = randint(scale + 1, precision)\n val = self._gen_decimal_val(precision, randint(new_scale, precision))\n # We don't need to normalize the cast expr because scale will never be zero\n cast = self._normalize_cast_expr(val, scale, from_string=from_string)\\\n .format(val, precision, scale)\n res = Decimal(self.execute_scalar(cast))\n # Truncate the decimal value to its target scale with quantize.\n self._assert_decimal_result(cast, res, val.quantize(Decimal('0e-%s' % scale),\n rounding=ROUND_DOWN))\n","repo_name":"ImpalaToGo/ImpalaToGo","sub_path":"tests/query_test/test_decimal_casting.py","file_name":"test_decimal_casting.py","file_ext":"py","file_size_in_byte":6859,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"30"} +{"seq_id":"36371264618","text":"# python3\n# import time\n\n\ndef lcm_naive(a, b):\n assert 1 <= a <= 2 * 10 ** 9 and 1 <= b <= 2 * 10 ** 9\n\n multiple = max(a, b)\n while multiple % a != 0 or multiple % b != 0:\n multiple += 1\n\n return multiple\n\n\n# def lcm(a, b):\n# assert 1 <= a <= 2 * 10 ** 9 and 1 <= b <= 2 * 10 ** 9\n# larger, smaller = max(a,b), min(a,b)\n# multiple = larger\n# while multiple % smaller != 0:\n# multiple += larger\n# return multiple\n\ndef gcd(a, b): # Euclid's algorithm for finding the GCD of two numbers\n assert 0 <= a <= 2 * 10 ** 9 and 0 <= b <= 2 * 10 ** 9\n\n if b == 0: # no remainder on previous recursion\n return a # the GCD\n else:\n a_prime = a % b # a_prime is the remainder left when b divides a\n return gcd(b, a_prime) # recursion\n\ndef lcm(a, b): # using Euclid's GCD\n assert 1 <= a <= 2 * 10 ** 9 and 1 <= b <= 2 * 10 ** 9\n larger, smaller = max(a,b), min(a,b)\n return int(smaller * larger / gcd(larger,smaller))\n\nif __name__ == '__main__':\n input_a, input_b = map(int, input().split())\n print(lcm(input_a, input_b))\n\n# tic = time.perf_counter()\n# print(lcm_euclid(12345678,87654321))\n# toc = time.perf_counter()\n#\n# print(f\"lcm_euclid Ran in {toc - tic:0.8f} seconds\")\n#\n# tic = time.perf_counter()\n# print(lcm(12345678,87654321))\n# toc = time.perf_counter()\n#\n# print(f\"lcm ,ran in {toc - tic:0.8f} seconds\")\n","repo_name":"middleagedspread/Algorithmic_Toolbox_Coursera","sub_path":"Algorithmic Warm Up/Least Common Multiple/lcm.py","file_name":"lcm.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"14108351183","text":"\"\"\"\nDiffusion equation with Dirichlet conditions:\n u_t = a*u_xx\n u(-0.5) = u(0.5) = 0 \n\"\"\"\n\nfrom dolfin import *\nimport numpy\nimport matplotlib.pyplot as plt\nimport time\n\n# diffusion coeff\na = 0.25\n\n# Create mesh and define function space\nnx = 200\nxscale = 1.\nmesh = IntervalMesh(nx, -0.5*xscale,0.5*xscale)\nV = FunctionSpace(mesh, 'Lagrange', 1)\n\n# Define boundary conditions\nu0 = Expression('cos(3.14159265*x[0])')\n\nclass Boundary(SubDomain): # define the Dirichlet boundary\n def inside(self, x, on_boundary):\n return on_boundary\n\nboundary = Boundary()\nbc = DirichletBC(V, 0, boundary)\n\n# Initial condition\nu_1 = interpolate(u0, V)\n\nT = 1 # total simulation time\ndt = 0.01 # time step\n\n# Define variational problem\n\n# Laplace term\nu = TrialFunction(V)\nv = TestFunction(V)\na_K = a*inner(nabla_grad(u), nabla_grad(v))*dx\n\n# \"Mass matrix\" term\na_M = u*v*dx\n\nM = assemble(a_M)\nK = assemble(a_K)\nA = M + dt*K\n\n# extract mesh coordinates\nxvals = mesh.coordinates()\n\n# plotting\nplt.ion()\nfig = plt.figure(1)\t\nplt.ioff()\n \n# plot func\ndef plots(u,xvals,t):\n u = u.vector().array()\n plt.cla()\n plt.plot(xvals,u,linewidth=3)\n plt.ylim(0, 1.01)\n plt.xlim(-0.5,0.5)\n plt.xlabel(r'$x$',fontsize = 15)\n plt.ylabel(r'$u$',fontsize = 15)\n plt.title(r'FEM Solution to the 1D Heat Equation: $t$ = '\\\n + str(t).ljust(4, str(0)))\n plt.grid(\"on\")\n fig.canvas.draw()\n #time.sleep(.1)\n return\n\n#plot IC\nplots(u_1,xvals,0)\n\n# Compute solution\nu = Function(V)\nt = dt\nwhile t <= T:\n b = M*u_1.vector()\n u0.t = t\n bc.apply(A, b)\n solve(A, u.vector(), b)\n \n t += dt\n u_1.assign(u)\n plots(u_1,xvals,t)\n\nplt.show()\n","repo_name":"coryahrens/radiative-rg","sub_path":"src/python/Nick's RG Code/Heat Equation/heat_1d.py","file_name":"heat_1d.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"71066794645","text":"# -*- coding: utf-8 -*-\n\"\"\"好大学在线\"\"\"\n\nfrom .utils import *\nfrom bs4 import BeautifulSoup\n\nCANDY = Crawler()\nCONFIG = {}\nFILES = {}\n\n\ndef get_summary(url):\n \"\"\"获得课程信息\"\"\"\n\n res = CANDY.get(url).text\n soup = BeautifulSoup(res, 'lxml')\n title = soup.find(class_='view-title substr').get_text(strip=True)\n university = soup.find(class_='person-attach substr').get_text(strip=True)\n\n dir_name = course_dir(title, university)\n print(dir_name)\n return dir_name\n\n\ndef get_resource(course_nav):\n \"\"\"获得视频资源\"\"\"\n\n counter = Counter()\n outline = Outline()\n video_list = []\n document_list = []\n\n res = CANDY.get(course_nav).text\n soup = BeautifulSoup(res, 'lxml')\n nav = soup.find(id='unitNavigation')\n chapters = nav.find_all(class_='view-chapter')\n for chapter in chapters:\n chapter_name = chapter.find(class_='chapter-text substr').get_text(strip=True)\n counter.add(0)\n outline.write(chapter_name, counter, 0)\n\n lectures = chapter.find_all(class_='view-lecture')\n for lecture in lectures:\n actions = lecture.find(class_='lecture-title')\n lecture_name = actions.get_text(strip=True)\n counter.add(1)\n outline.write(lecture_name, counter, 1)\n # unitid = actions.a['unitid']\n # print(unitid)\n group = actions.div.find_all('a')\n # for action in group:\n # print(action.i['class'])\n videos = list(filter(lambda action: 'icon-play' in action.i['class'][0], group))\n # videos = [action for action in group if lambda :'icon-play' in action.i['class'][0]]\n docs = list(filter(lambda action: 'icon-doc' in action.i['class'][0], group))\n for video in videos:\n counter.add(2)\n outline.write(video['title'], counter, 2, sign='#')\n if len(videos) == 1:\n extra_num = ''\n else:\n extra_num = '-%s' % str(counter)[-1:]\n video_list.append(Video(counter, lecture_name + extra_num, video['itemid']))\n counter.reset()\n for doc in docs:\n counter.add(2)\n outline.write(doc['title'], counter, 2, sign='*')\n document_list.append(Document(counter, lecture_name, doc['itemid']))\n return video_list, document_list\n\n\ndef parse_resource(video):\n \"\"\"解析视频地址\"\"\"\n\n res_print(video.file_name)\n res = CANDY.post('https://www.cnmooc.org/study/play.mooc',\n data={'itemId': video.meta, 'itemType': '10', 'testPaperId': ''}).text\n soup = BeautifulSoup(res, 'lxml')\n node_id = soup.find(id='nodeId')['value']\n\n res = CANDY.post('https://www.cnmooc.org/item/detail.mooc', data={'nodeId': node_id, 'itemId': video.meta}).json()\n url = res['node']['flvUrl']\n FILES['videos'].write_string(url)\n FILES['renamer'].write(url.split('/')[-1], video.file_name)\n if CONFIG['sub']:\n exts = res['node']['nodeExts']\n for ext in exts:\n file_name = '%s%s.srt' % (video.file_name, '' if len(exts) == 1 else '_' + ext['languageCode'])\n CANDY.download_bin('https://static.cnmooc.org' + ext['node']['rsUrl'], WORK_DIR.file(file_name))\n\n\ndef get_doc(doc_list):\n \"\"\"获得文档\"\"\"\n\n WORK_DIR.change('Docs')\n for doc in doc_list:\n post_data = {'itemId': doc.meta, 'itemType': '20', 'testPaperId': ''}\n res = CANDY.post('https://www.cnmooc.org/study/play.mooc', data=post_data).text\n try:\n url = re.search(r'isSlideShow\\(\"(.+)?\"\\);', res).group(1)\n except AttributeError:\n continue\n ext = url.split('.')[-1]\n file_name = WORK_DIR.file(doc.file_name + '.' + ext)\n res_print(doc.name)\n if not WORK_DIR.exist(file_name):\n CANDY.download_bin('https://static.cnmooc.org' + url, file_name)\n\n\ndef start(url, config, cookies=None):\n \"\"\"调用接口函数\"\"\"\n\n global WORK_DIR\n CONFIG['dpl'] = config['dpl'] and SYS == 'nt'\n\n CONFIG.update(config)\n CANDY.set_cookies(cookies)\n\n course_info = get_summary(url)\n WORK_DIR = WorkingDir(CONFIG['dir'], course_info)\n WORK_DIR.change('Videos')\n\n FILES['renamer'] = Renamer(WORK_DIR.file('Rename.bat')) if SYS == 'nt' else Renamer(WORK_DIR.file('Rename.sh'))\n FILES['videos'] = ClassicFile(WORK_DIR.file('Videos.txt'))\n if CONFIG['dpl']:\n FILES['playlist'] = Playlist()\n\n course = 'https://www.cnmooc.org/portal/session/unitNavigation/'\n course_nav = course + url.split('/')[-1]\n resource = get_resource(course_nav)\n\n rename = WORK_DIR.file('Names.txt') if CONFIG['rename'] else False\n\n if CONFIG['dpl']:\n parse_res_list(resource[0], rename, FILES['playlist'].write, parse_resource)\n else:\n parse_res_list(resource[0], rename, parse_resource)\n\n if CONFIG['doc']:\n get_doc(resource[1])\n","repo_name":"Foair/course-crawler","sub_path":"mooc/cnmooc.py","file_name":"cnmooc.py","file_ext":"py","file_size_in_byte":4969,"program_lang":"python","lang":"en","doc_type":"code","stars":702,"dataset":"github-code","pt":"30"} +{"seq_id":"3460314433","text":"import math\nimport os\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.modules.batchnorm import _BatchNorm\nimport time\nfrom collections import OrderedDict\nimport json\nimport vgtk\nimport SPConvNets.utils.base_so3conv as M\nimport vgtk.so3conv.functional as L\nimport vgtk.so3conv as sptk\n# from SPConvNets.utils.slot_attention import SlotAttention\nimport vgtk.spconv as zptk\nfrom SPConvNets.utils.loss_util import batched_index_select\nfrom extensions.chamfer_dist import ChamferDistance\n\nfrom vgtk.functional import compute_rotation_matrix_from_quaternion, compute_rotation_matrix_from_ortho6d, so3_mean\nfrom model_util import farthest_point_sampling\n\n\n\nclass ClsSO3ConvModel(nn.Module): # SO(3) equi-conv-network # classification so3 conv\n def __init__(self, params):\n super(ClsSO3ConvModel, self).__init__()\n\n # get backbone model\n self.backbone = nn.ModuleList()\n for block_param_ in params['backbone']: # backbone\n\n # dim_in, dim_out, kernel_size, stride,\n # radius, aperture, sigma,\n # anchors_dim, n_neighbor, anchor_nn, multiplier = 3, lazy_sample = True\n for block_param in block_param_:\n cur_args = block_param[\"args\"]\n self.backbone.append(zptk.InterZPConv(dim_in=cur_args['dim_in'], dim_out=cur_args['dim_out'], kernel_size=1, stride=cur_args['stride'], radius=cur_args['radius'], aperture=0.1, sigma=cur_args['sigma'], anchors_dim=12, n_neighbor=cur_args['n_neighbor'], anchor_nn=cur_args['kanchor'], multiplier=cur_args['multiplier'], lazy_sample=cur_args['lazy_sample']))\n # self.backbone.append(M.BasicSO3PoseConvBlock(block_param))\n # self.outblock = M.ClsOutBlockR(params['outblock'])\n # output classification block\n print(f\"Number of convs in the backbone: {len(self.backbone)}\")\n #\n self.chamfer_dist = ChamferDistance()\n ''' Get anchors '''\n self.anchors = torch.from_numpy(L.get_anchors(params['outblock']['kanchor'])).cuda()\n self.n_reconstructed = 128\n # self.outblock = M.ClsOutBlockPointnet(params['outblock'], down_task=False) # clsoutblockpointnet?\n # PointNet Encoder\n # self.pointnetenc = sptk.PointnetSO3Conv(dim_in=256, dim_out=1024, kanchor=60)\n # Need a decoder for position and latent variant features --- but it is what makes it tricky --- we need implicit shape decoded from invariant features as well as each point's variant implicit features, we should factorize positiona and pose to a canonical frame with position and pose from the equivariant features --- position & variant features\n # a equivariant point completion\n # todo: better MLP models\n ''' Construct canonical position decoding block '''\n # encoded feature dimension\n self.encoded_feat_dim = params['outblock']['dim_in']\n self.kanchor = params['outblock']['kanchor']\n self.xyz_canon_in_feat_dim = self.encoded_feat_dim * self.kanchor\n self.xyz_canon_in_feat_dim = self.encoded_feat_dim\n self.xyz_canon_block = nn.Sequential(\n nn.Conv2d(in_channels=self.xyz_canon_in_feat_dim, out_channels=self.xyz_canon_in_feat_dim // 2, kernel_size=(1, 1), stride=(1, 1), bias=True),\n nn.BatchNorm2d(num_features=self.xyz_canon_in_feat_dim // 2),\n nn.ReLU(),\n nn.Conv2d(in_channels=self.xyz_canon_in_feat_dim // 2, out_channels=self.xyz_canon_in_feat_dim // 4, kernel_size=(1, 1), stride=(1, 1), bias=True),\n nn.BatchNorm2d(num_features=self.xyz_canon_in_feat_dim // 4),\n nn.ReLU(),\n nn.Conv2d(in_channels=self.xyz_canon_in_feat_dim // 4, out_channels=3, kernel_size=(1, 1), stride=(1, 1), bias=True),\n )\n ''' Construct pose estimation block '''\n self.pose_estimation_block = nn.Sequential(\n nn.Conv2d(in_channels=self.encoded_feat_dim, out_channels=self.encoded_feat_dim // 2, kernel_size=(1, 1), stride=(1, 1), bias=True),\n nn.BatchNorm2d(num_features=self.encoded_feat_dim // 2),\n nn.ReLU(),\n nn.Conv2d(in_channels=self.encoded_feat_dim // 2, out_channels=self.encoded_feat_dim // 4,\n kernel_size=(1, 1), stride=(1, 1), bias=True),\n nn.BatchNorm2d(num_features=self.encoded_feat_dim // 4),\n nn.ReLU(),\n nn.Conv2d(in_channels=self.encoded_feat_dim // 4, out_channels=12, kernel_size=(1, 1), stride=(1, 1),\n bias=True),\n )\n ''' Construct slot-attention module now.. '''\n ### eps is set to default; we may need to tune `dim` and `hidden_dim` ###\n ### output feature shape: bz x num_slots x dim ###\n self.num_slots = params['outblock']['k']\n self.slot_attention = SlotAttention(num_slots=params['outblock']['k'], dim=self.encoded_feat_dim, hidden_dim=self.encoded_feat_dim)\n ''' Construct per-point variant feature transformation MLP '''\n self.variant_feat_trans = nn.Sequential(\n nn.Conv2d(in_channels=self.encoded_feat_dim, out_channels=self.encoded_feat_dim, kernel_size=(1, 1), stride=(1, 1), bias=True),\n nn.BatchNorm2d(num_features=self.encoded_feat_dim),\n nn.ReLU(),\n nn.Conv2d(in_channels=self.encoded_feat_dim, out_channels=self.encoded_feat_dim,\n kernel_size=(1, 1), stride=(1, 1), bias=True),\n )\n ''' Construct per-slot rotation and translation prediction MLP '''\n self.transformation_dim = 7\n self.transformation_prediction = nn.Sequential(\n nn.Conv2d(in_channels=self.encoded_feat_dim, out_channels=self.encoded_feat_dim // 2, kernel_size=(1, 1),\n stride=(1, 1), bias=True),\n nn.BatchNorm2d(num_features=self.encoded_feat_dim // 2),\n nn.ReLU(),\n nn.Conv2d(in_channels=self.encoded_feat_dim // 2, out_channels=self.encoded_feat_dim // 4,\n kernel_size=(1, 1), stride=(1, 1), bias=True),\n nn.BatchNorm2d(num_features=self.encoded_feat_dim // 4),\n nn.ReLU(),\n nn.Conv2d(in_channels=self.encoded_feat_dim // 4, out_channels=self.transformation_dim,\n kernel_size=(1, 1), stride=(1, 1), bias=True),\n )\n ''' Construct part point construction network '''\n # todo: better reconstruction process\n self.recon_part_M = 96\n self.part_reconstruction_net = nn.Sequential(\n nn.Conv2d(in_channels=self.encoded_feat_dim, out_channels=self.encoded_feat_dim, kernel_size=(1, 1),\n stride=(1, 1), bias=True),\n nn.BatchNorm2d(num_features=self.encoded_feat_dim),\n nn.ReLU(),\n nn.Conv2d(in_channels=self.encoded_feat_dim, out_channels=self.encoded_feat_dim,\n kernel_size=(1, 1), stride=(1, 1), bias=True),\n nn.BatchNorm2d(num_features=self.encoded_feat_dim),\n nn.ReLU(),\n nn.Conv2d(in_channels=self.encoded_feat_dim, out_channels=self.recon_part_M * 3,\n kernel_size=(1, 1), stride=(1, 1), bias=True),\n )\n\n # self.xyz_canon_block = nn.Linear(self.encoded_feat_dim * self.kanchor, 3)\n self.na_in = params['na'] # na_in\n # todo: what does this parameter used for?\n self.invariance = True\n\n\n # R.size = bz x N x na x 3 x 3\n def get_rotation_matrix(self, Rs):\n a1s = Rs[:, :, :, :, 0].unsqueeze(-1)\n a2s = Rs[:, :, :, :, 1].unsqueeze(-1)\n b1s = a1s / torch.norm(a1s, dim=3, p=2, keepdim=True)\n b2s = a2s - (torch.sum(b1s * a2s, dim=3, keepdim=True)) * b1s\n b2s = b2s / torch.norm(b2s, dim=3, p=2, keepdim=True)\n b3s = torch.zeros_like(b2s)\n b3s[..., 0, 0] = b1s[..., 1, 0] * b2s[..., 2, 0] - b1s[..., 2, 0] * b2s[..., 1, 0]\n b3s[..., 1, 0] = -(b1s[..., 0, 0] * b2s[..., 2, 0] - b1s[..., 2, 0] * b2s[..., 0, 0])\n b3s[..., 2, 0] = b1s[..., 0, 0] * b2s[..., 1, 0] - b1s[..., 1, 0] * b2s[..., 0, 0]\n # tb1, tb2, tb3 = b1s[0, 0, :, 0], b2s[0, 0, :, 0], b3s[0, 0, :, 0]\n Rs = torch.cat([b1s, b2s, b3s], dim=-1)\n # print(torch.sum((torch.det(Rs) < 0).long()))\n return Rs\n\n def forward_one_iter(self, x, pose, rlabel=None): # rotation label\n\n output = {}\n #\n # should with pose as input; actually an orientation of each point --- not orientation... te rotation has been done? fix the global pose of each input point; relative pose relations --- how to compute? create laptops and put the joint-center to the original point\n # nb, np, 3 -> [nb, 3, np] x [nb, 1, np, na]\n ori_pts = x.clone()\n\n bz, npoints = x.size(0), x.size(2)\n x = x.contiguous().transpose(1, 2).contiguous()\n # input_x = x # preprocess input and equivariant\n ''' Process input '''\n # how does kpconv process data?\n # after preprocess: x.feats.size = bz x 1 x np x na\n x = M.preprocess_input(x, 1, False)\n # print(\"preprocessed_x: \", x.feats.size())\n inter_idx, inter_w = None, None\n for block_i, block in enumerate(self.backbone):\n inter_idx, inter_w, x = block(x, inter_idx, inter_w)\n\n ''' Cluster points '''\n # print(\"x.feats.size,\", x.feats.size())\n # x_feats, _ = torch.max(x.feats, dim=-1, keepdim=True)\n x_feats = x.feats\n assert x_feats.size(-1) == 1, f\"Not correct x.feats.size: {x_feats.size()}\"\n # rep_slots: bz x num_slots x c_out; attn: bz x N x num_slots\n rep_slots, attn_ori = self.slot_attention(x_feats.squeeze(-1).contiguous().transpose(1, 2).contiguous())\n # print(\"attn_ori.size\", attn_ori.size())\n #\n attn_slot = attn_ori / attn_ori.sum(dim=-1, keepdim=True)\n # attn.size?\n attn = attn_ori.contiguous().transpose(1, 2).contiguous()\n # slot_weights: bz x num_slots\n # slot weights.size\n slot_weights = attn.sum(dim=1)\n slot_weights = slot_weights / torch.sum(slot_weights, dim=-1, keepdim=True)\n\n point_label = torch.argmax(attn_ori, dim=1)\n # if not os.path.exists(\"vis_pts.npy\"):\n np.save(\"vis_pts.npy\", ori_pts.detach().cpu().numpy())\n np.save(\"vis_labels.npy\", point_label.detach().cpu().numpy())\n\n ''' Predict points from slots' representations '''\n # slot_points: bz x 3 * M x num_slots\n slot_points = self.part_reconstruction_net(\n rep_slots.contiguous().transpose(1, 2).contiguous().unsqueeze(-1)\n ).contiguous().squeeze(-1)\n # slot_points: bz x num_slots x M x 3\n slot_points = slot_points.contiguous().transpose(1, 2).contiguous().view(bz, self.num_slots, self.recon_part_M,\n 3)\n np.save(\"slot_pts.npy\", slot_points.detach().cpu().numpy())\n\n # ''' Predict \\delta_q and translation for each rotation state q '''\n # per_slot_transformation = self.transformation_prediction(rep_slots.contiguous().transpose(1, 2).contiguous().unsqueeze(-1)).squeeze(-1)\n # # pred_R: bz x 4 x num_slots\n # pred_R, pred_T = per_slot_transformation[:, :4, ...], per_slot_transformation[:, 4:, ...]\n #\n # ''' From predicted R to rotation matrices '''\n # # pred_res_R: bz x num_slots x 3 x 3\n # pred_R = compute_rotation_matrix_from_quaternion(pred_R.contiguous().permute(0, 1, 2).contiguous().view(-1, 4)).contiguous().view(bz, self.num_slots, 3, 3)\n #\n # ''' From predicted T to translation vectors '''\n # # pred_T: bz x num_slots x 3\n # pred_T = pred_T.contiguous().permute(0, 1, 2).contiguous()\n # ''' From predicted rotation matrix and translation matrix to transformed points '''\n # # transformed_slot_pts: bz x num_slots x M x 3\n # transformed_slot_pts = torch.matmul(pred_R, slot_points.contiguous().transpose(-1, -2)).contiguous().transpose(-1, -2) + pred_T.unsqueeze(2)\n\n transformed_slot_pts = slot_points\n\n ''' Repeat input points for further chamfer distance computation '''\n # ori_pts: bz x 3 x N # ah! yuzhou! input_repeat_pts ori_pts\n input_repeat_pts = ori_pts.contiguous().transpose(1, 2).contiguous().unsqueeze(1).repeat(1, self.num_slots, 1, 1)\n # dist1: -1 x M; dist2: -1 x N\n dist1, dist2 = self.chamfer_dist(transformed_slot_pts.view(-1, self.recon_part_M, 3).contiguous(),\n input_repeat_pts.contiguous().view(-1, input_repeat_pts.size(-2), 3).contiguous(),\n return_raw=True)\n dist2 = dist2.contiguous().view(bz, self.num_slots, npoints)\n # attn_ori: bz x N x ns --> bz x ns x 1 x N\n # dist2: bz x num_slots x N\n dist2 = torch.sum(attn_slot.contiguous() * dist2, dim=-1)\n ''' Global pts sampling and chamfer distance calculation '''\n transformed_slot_pts = transformed_slot_pts.contiguous().view(bz, -1, 3)\n\n # R_expand = pred_R.unsqueeze(2).repeat(1, 1, self.recon_part_M, 1, 1)\n # T_expand = pred_T.unsqueeze(2).repeat(1, 1, self.recon_part_M, 1)\n\n ''' Sample points '''\n #\n fps_idx = farthest_point_sampling(transformed_slot_pts, self.n_reconstructed)\n downsampled_transformed_pts = transformed_slot_pts.contiguous().view(bz * self.recon_part_M * self.num_slots,\n -1)[fps_idx, :].contiguous().view(bz,\n self.n_reconstructed,\n -1)\n # downsampled_R = R_expand.contiguous().view(bz * self.recon_part_M * self.num_slots, 3, 3)[fps_idx, :,\n # :].contiguous().view(bz, self.n_reconstructed, 3, 3)\n # downsampled_T = T_expand.contiguous().view(bz * self.recon_part_M * self.num_slots, 3)[fps_idx,\n # :].contiguous().view(bz, self.n_reconstructed, 3)\n #\n ''' Calculate global chamfer distance '''\n dist1_glb, dist2_glb = self.chamfer_dist(\n downsampled_transformed_pts, ori_pts.contiguous().transpose(1, 2).contiguous(), return_raw=True\n )\n\n np.save(\"downsampled_pts.npy\", downsampled_transformed_pts.detach().cpu().numpy())\n\n ''' Get reconstruction loss for global shape and local part '''\n glb_recon_loss = dist1_glb.mean(-1) + dist2_glb.mean(-1)\n lal_recon_loss = torch.sum(slot_weights * dist2, dim=-1)\n recon_loss = (glb_recon_loss + lal_recon_loss).mean()\n recon_loss = (glb_recon_loss).mean()\n\n # print(attn.size())\n return recon_loss, attn, downsampled_transformed_pts\n\n def get_rotation_sims(self, gt_rot, pred_rot):\n if gt_rot.size(-1) > 3:\n gt_rot = gt_rot[..., :3, :3]\n pred_rot = pred_rot[..., :3, :3]\n # gt_rot: bz x npoints x 3 x 3;\n def get_trace(a):\n return a[..., 0, 0] + a[..., 1, 1] + a[..., 2, 2]\n inv_gt_rot = gt_rot.contiguous().transpose(2, 3).contiguous()\n # bz x npoints x 3 x 3\n rel_mtx = torch.matmul(pred_rot, inv_gt_rot)\n # traces: bz x npoints\n traces = get_trace(rel_mtx)\n traces = (traces - 1) / 2.\n print(\"Similartiy with gt_rot\", torch.mean(traces).item())\n return torch.mean(traces).item()\n\n def forward(self, x, pose, rlabel=None, nn_inter=2):\n\n # loss, attn = self.forward_one_iter(x, pose, rlabel=rlabel)\n # return loss, attn\n\n bz, np = x.size(0), x.size(2)\n init_pose = torch.zeros([bz, np, 4, 4], dtype=torch.float32).cuda()\n init_pose[..., 0, 0] = 1.; init_pose[..., 1, 1] = 1.; init_pose[..., 2, 2] = 1.\n tot_loss = 0.0\n cur_transformed_points = x\n cur_estimated_pose = init_pose\n nn_inter = 1\n cur_estimated_pose = pose\n for i in range(nn_inter):\n cur_reconstructed_loss_orbit, attn, cur_transformed_points = self.forward_one_iter(cur_transformed_points, cur_estimated_pose, rlabel=rlabel)\n tot_loss += cur_reconstructed_loss_orbit\n # cur_gt_rot_dis = self.get_rotation_sims(pose, cur_estimated_pose)\n torch.cuda.empty_cache()\n\n return tot_loss / nn_inter, attn # range(n_iter)\n\n def get_anchor(self):\n return self.backbone[-1].get_anchor()\n\n\n# Full Version\ndef build_model(opt,\n mlps=[[64,64], [128,128], [256,256],[256]],\n out_mlps=[256],\n strides=[2,2,2,2],\n initial_radius_ratio = 0.2,\n sampling_ratio = 0.4,\n sampling_density = 0.5,\n kernel_density = 1,\n kernel_multiplier = 2,\n input_radius = 1.0,\n sigma_ratio= 0.5, # 0.1\n xyz_pooling = None,\n so3_pooling = \"max\",\n to_file=None):\n # initial_radius_ratio = 0.05\n initial_radius_ratio = 0.15\n initial_radius_ratio = 0.20\n device = opt.device\n input_num = opt.model.input_num # 1024\n dropout_rate = opt.model.dropout_rate # default setting: 0.0\n # temperature\n temperature = opt.train_loss.temperature # set temperature\n so3_pooling = 'attention' # opt.model.flag # model flag\n na = 1 if opt.model.kpconv else opt.model.kanchor # how to represent rotation possibilities? --- sampling from the sphere ---- points!\n nmasks = opt.nmasks\n\n if input_num > 1024:\n sampling_ratio /= (input_num / 1024)\n strides[0] = int(2 * (input_num / 1024))\n print(\"Using sampling_ratio:\", sampling_ratio)\n print(\"Using strides:\", strides)\n\n params = {'name': 'Invariant ZPConv Model',\n 'backbone': [],\n 'na': na\n }\n\n dim_in = 1\n\n # process args\n n_layer = len(mlps)\n stride_current = 1 # stride_current_--\n stride_multipliers = [stride_current]\n # for i in range(n_layer):\n # stride_current *= 2 # strides[i]\n # stride_multipliers += [stride_current]\n # todo: use ohter strides? --- possible other choices?\n for i in range(n_layer):\n stride_current *= 1 # strides[i]\n stride_multipliers += [stride_current]\n\n num_centers = [int(input_num / multiplier) for multiplier in stride_multipliers]\n # radius ratio should increase as the stride increases to sample more reasonable points\n radius_ratio = [initial_radius_ratio * multiplier**sampling_density for multiplier in stride_multipliers]\n # radius_ratio = [0.25, 0.5]\n # set radius for each layer\n radii = [r * input_radius for r in radius_ratio]\n # Compute sigma\n # weighted_sigma = [sigma_ratio * radii[i]**2 * stride_multipliers[i] for i in range(n_layer + 1)]\n # sigma for radius and points\n weighted_sigma = [sigma_ratio * radii[0]**2]\n\n for idx, s in enumerate(strides):\n # weighted_sigma.append(weighted_sigma[idx] * 2)\n weighted_sigma.append(weighted_sigma[idx] * 1) #\n\n for i, block in enumerate(mlps):\n block_param = []\n for j, dim_out in enumerate(block):\n lazy_sample = i != 0 or j != 0\n stride_conv = i == 0 or xyz_pooling != 'stride'\n # TODO: WARNING: Neighbor here did not consider the actual nn for pooling. Hardcoded in vgtk for now.\n # neighbor = int(sampling_ratio * num_centers[i] * radius_ratio[i]**(1/sampling_density))\n neighbor = 32 # int(sampling_ratio * num_centers[i] * radius_ratio[i]**(1/sampling_density))\n # if i==0 and j==0:\n # neighbor *= int(input_num/1024)\n kernel_size = 1\n # if j == 0:\n # # stride at first (if applicable), enforced at first layer\n # inter_stride = strides[i]\n # nidx = i if i == 0 else i+1\n # if stride_conv:\n # neighbor *= 2 # = 2 * int(sampling_ratio * num_centers[i] * radius_ratio[i]**(1/sampling_density))\n # # kernel_size = 1 # if inter_stride < 4 else 3\n # else:\n inter_stride = 1\n nidx = i+1\n\n print(f\"At block {i}, layer {j}!\")\n print(f'neighbor: {neighbor}')\n print(f'stride: {inter_stride}')\n sigma_to_print = weighted_sigma[nidx]**2 / 3\n print(f'sigma: {sigma_to_print}')\n print(f'radius ratio: {radius_ratio[nidx]}')\n\n # one-inter one-intra policy\n block_type = 'inter_block' if na<60 else 'separable_block' # point-conv and group-conv separable conv\n print(f\"layer {i}, block {j}, block_type: {block_type}\")\n conv_param = {\n 'type': block_type,\n 'args': {\n 'dim_in': dim_in,\n 'dim_out': dim_out,\n 'kernel_size': kernel_size,\n 'stride': inter_stride,\n 'radius': radii[nidx],\n 'sigma': weighted_sigma[nidx],\n 'n_neighbor': neighbor,\n 'lazy_sample': lazy_sample,\n 'dropout_rate': dropout_rate,\n 'multiplier': kernel_multiplier,\n 'activation': 'leaky_relu',\n 'pooling': xyz_pooling,\n 'kanchor': na,\n 'norm': 'BatchNorm2d',\n }\n }\n block_param.append(conv_param)\n dim_in = dim_out\n\n params['backbone'].append(block_param)\n\n # kernels here are defined as kernel points --- explicit kernels --- each with a [dim_in, dim_out] weight matrix\n params['outblock'] = {\n 'dim_in': dim_in,\n 'mlp': out_mlps,\n 'fc': [64],\n 'k': nmasks, # 40,\n 'pooling': so3_pooling,\n 'temperature': temperature,\n 'kanchor':na,\n }\n\n if to_file is not None:\n with open(to_file, 'w') as outfile:\n json.dump(params, outfile)\n\n model = ClsSO3ConvModel(params).to(device)\n return model\n\ndef build_model_from(opt, outfile_path=None):\n return build_model(opt, to_file=outfile_path)\n","repo_name":"Meowuu7/equi-articulated-pose","sub_path":"SPConvNets/models/unsup_seg_basicconv_pn.py","file_name":"unsup_seg_basicconv_pn.py","file_ext":"py","file_size_in_byte":22357,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"30"} +{"seq_id":"72119739284","text":"import json\nimport boto3\nimport logging\nimport os\nimport traceback\nimport datetime\nimport uuid\nimport pytz\n\n# region Logging\n\nLOG_LEVEL = os.getenv(\"LOG_LEVEL\", \"INFO\")\nlogger = logging.getLogger()\n\nif logger.hasHandlers():\n logger.setLevel(LOG_LEVEL)\nelse:\n logging.basicConfig(level=LOG_LEVEL)\n\n# endregion\n\nssm = boto3.client('ssm')\nclient = boto3.client('secretsmanager')\ndynamodb_client = boto3.resource('dynamodb')\n\n\ndef mask_sensitive_data(event):\n # remove sensitive data from request object before logging\n keys_to_redact = [\"authorization\"]\n result = {}\n for k, v in event.items():\n if isinstance(v, dict):\n result[k] = mask_sensitive_data(v)\n elif k in keys_to_redact:\n result[k] = \"\"\n else:\n result[k] = v\n return result\n\n\ndef build_response(http_code, body):\n return {\n \"headers\": {\n # tell cloudfront and api gateway not to cache the response\n \"Cache-Control\": \"no-cache, no-store\",\n \"Content-Type\": \"application/json\",\n },\n \"statusCode\": http_code,\n \"body\": body,\n }\n\n\ndef lambda_handler(event, context):\n logger.info(mask_sensitive_data(event))\n\n try:\n\n parameter = ssm.get_parameter(\n Name='/archive/dynamodb-table', WithDecryption=True)\n body = json.loads(\n event[\"body\"]) if \"body\" in event else json.loads(event)\n archive_name = body[\"archive_name\"]\n\n hostname = body[\"hostname\"]\n mode = body[\"mode\"]\n port = body[\"port\"]\n username = body[\"username\"]\n password = body[\"password\"]\n database = body[\"database\"]\n database_engine = body[\"database_engine\"]\n table_details = body[\"tables\"]\n\n for table in table_details:\n table[\"count_validation\"] = {}\n table[\"string_validation\"] = {}\n table[\"number_validation\"] = {}\n\n archive_id = str(uuid.uuid4())\n create_secret_response = client.create_secret(\n Name=archive_id,\n Description=f'Secret for Archive ID {archive_id}',\n SecretString=password,\n ForceOverwriteReplicaSecret=True\n )\n \n table = dynamodb_client.Table(parameter['Parameter']['Value'])\n dt = datetime.datetime.now(pytz.UTC)\n\n table.put_item(\n Item={\n \"id\": archive_id,\n \"database_engine\": database_engine,\n \"archive_name\": archive_name,\n \"mode\": mode,\n \"hostname\": hostname,\n \"port\": port,\n \"username\": username,\n \"secret_arn\": create_secret_response[\"ARN\"],\n \"database\": database,\n \"oracle_owner\": body[\"oracle_owner\"] if 'oracle_owner' in body else \"\",\n \"table_details\": table_details,\n \"time_submitted\": str(dt),\n \"archive_status\": \"Archive Queue\",\n \"job_status\": \"\",\n \"jobs\": {},\n \"configuration\": {\"glue\":\n {\n \"glue_worker\": \"Standard\",\n \"glue_capacity\": 2\n }\n },\n \"counters\": {\"validation\":\n {\n \"validation_count\": 0,\n \"validation_completed\": 0,\n }\n },\n \"legal_hold\": False,\n \"expiration_status\": False,\n \"expiration_date\": \"\",\n \"delete_data\": False\n })\n\n response = {\"text\": \"Example response from authenticated api\"}\n return build_response(200, json.dumps(response))\n except Exception as ex:\n logger.error(traceback.format_exc())\n return build_response(500, \"Server Error\")\n\n\nif __name__ == \"__main__\":\n\n example_event = {}\n response = lambda_handler(example_event, {})\n print(json.dumps(response))\n","repo_name":"awslabs/simple-database-archival-solution","sub_path":"api/archive/create/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4119,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"30"} +{"seq_id":"3434116449","text":"from logistic_regression import logistic_regression\nfrom cross_entropy_error import ave_cross_entropy_error\nfrom scipy.optimize import minimize\nfrom d_cee import d_cee\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef cee_solve(w_init,x,t):\n ans = minimize(ave_cross_entropy_error,w_init,args=(x,t),jac=d_cee,method=\"CG\")\n return ans.x\n\nw_init=[1,1]\n\n#入力値\nx = np.load('x.npy')\n#実測値\nt = np.load('t.npy')\n\nw_ans=cee_solve(w_init,x,t)\nprint(\"w0:{0}\".format(w_ans[0]))\nprint(\"w1:{0}\".format(w_ans[1]))\n\n#データをプロット\n#x,t\nfig, ax = plt.subplots(facecolor=\"w\")\nax.scatter(x,t)\n\nax.set_xticks(np.arange(35,45,1))\nax.set_yticks([0,0.5,1])\nax.grid(True,linestyle=':')\n\nlogi_x=np.linspace(30,50,1000)\nlogi_y=logistic_regression(w_ans,logi_x)\nplt.plot(logi_x,logi_y,color='black')\n\n#決定境界(y=0.5となる所を探す)\ni=np.min(np.where(logi_y>0.5))\nans=(logi_x[i]+logi_x[i+1])/2\nx_ans=[ans for _ in range(2)]\ny_ans=[-0.2,1.2]\nplt.plot(x_ans,y_ans,color='red',linestyle=':')\nprint(\"決定境界:x={0}\".format(ans))\n\nplt.xlim(34,45)\nplt.ylim(-0.2,1.2)\n\nplt.show()\n","repo_name":"WAT36/python","sub_path":"machine_learning/classification/cee_solve_plot.py","file_name":"cee_solve_plot.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"70046508244","text":"# EJERCICIO 37\n\nnumLegajo = int(input(\"Ingrese número de legajo: \"))\ncategoria = int(input(\"Ingrese categoría del empleado: \"))\nsalario = int(input(\"Ingrese salario del empleado: \"))\ncantEmpleados = 100\ni = 1\n\ntotalSalarios = 0\nmasCuarenta = 0 \nmenosQuince = 0\nsueldoAlto = salario\nsueldoBajo = salario\nlegajoAlto = numLegajo\nsueldos1 = 0\nsueldos2 = 0\nsueldos3 = 0\n\nwhile i <= cantEmpleados:\n totalSalarios += salario\n \n if salario > 40000:\n masCuarenta += 1\n elif salario < 15000 and categoria == 3:\n menosQuince += 1\n \n if salario > sueldoAlto:\n sueldoAlto = salario\n legajoAlto = numLegajo\n elif salario < sueldoBajo:\n sueldoBajo = salario\n \n if categoria == 1:\n sueldos1 += salario\n elif categoria == 2:\n sueldos2 += salario\n else:\n sueldos3 += salario\n \n if i != cantEmpleados:\n numLegajo = int(input(\"Ingrese número de legajo: \"))\n categoria = int(input(\"Ingrese categoría del empleado: \"))\n salario = int(input(\"Ingrese salario del empleado: \"))\n i += 1\n\nprint(\"Importe total de salarios pagados por la empresa:\", totalSalarios)\nprint(\"Cantidad de empleados que ganan más de $40000:\", masCuarenta)\nprint(\"Cantidad de empleados que ganan menos de $15000, cuya categoría sea 3:\", menosQuince)\nprint(\"Legajo del empleado que más gana:\", legajoAlto)\nprint(\"Sueldo más bajo:\", sueldoBajo)\nprint(\"Importe total de sueldos categoría 1:\", sueldos1)\nprint(\"Importe total de sueldos categoría 2:\", sueldos2)\nprint(\"Importe total de sueldos categoría 3:\", sueldos3)\nprint(\"Salario promedio:\", totalSalarios / cantEmpleados)","repo_name":"aguscoppe/ejercicios-python","sub_path":"TP_4_Estructura_iterativa/TP4_EJ37.py","file_name":"TP4_EJ37.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"2977136562","text":"value=input(\"enter any number:\")\r\nwhile(True):\r\n if(value==\"quit\"):\r\n print(\"bye!!\")\r\n break\r\n number=int(value)\r\n k=number\r\n sum=0\r\n while(number!=0):\r\n rem=number%10\r\n number=number//10\r\n sum=sum*10+rem\r\n if(sum==k):\r\n print(\"it is palindrome\")\r\n else:\r\n print(\"not\")\r\n","repo_name":"Techie1212/new-projects","sub_path":"practice1.py","file_name":"practice1.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"7082047043","text":"'''\n * [922] Sort Array By Parity II\n\n https://leetcode.com/problems/sort-array-by-parity-ii/description/\n * algorithms\n * Easy (66.89%)\n * Source Code: 922.sort-array-by-parity-ii.py\n * Total Accepted: 35.6K\n * Total Submissions: 53.2K\n * Testcase Example: '[4,2,5,7]'\n Given an array A of non-negative integers, half of the integers in A are odd, and half of the integers are even.\n Sort the array so that whenever A[i] is odd, i is odd; and whenever A[i] is even, i is even.\n You may return any answer array that satisfies this condition.\n\n Example 1:\n Input: [4,2,5,7]\n Output: [4,5,2,7]\n Explanation: [4,7,2,5], [2,5,4,7], [2,7,4,5] would also have been accepted.\n Note:\n 2 <= A.length <= 20000\n A.length % 2 == 0\n 0 <= A[i] <= 1000\n\n 解題思路:\n 使用兩個指標遍歷list A,其中i負責偶數位,j負責奇數位,\n 遍歷過程兩個指標會分別停留在不符合條件的位置(偶數位奇數和奇數位偶數)\n 發生此情況時交換list中兩者之值。\n'''\n\nclass Solution(object):\n def sortArrayByParityII(self, A):\n \"\"\"\n :type A: List[int]\n :rtype: List[int]\n \"\"\"\n j=1\n for i in range(0,len(A),2):\n if A[i]%2:\n while A[j]%2:\n j+=2\n A[i],A[j]=A[j],A[i]\n return A\n \n","repo_name":"tainenko/Leetcode2019","sub_path":"python/922.sort-array-by-parity-ii.py","file_name":"922.sort-array-by-parity-ii.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"30"} +{"seq_id":"73548806803","text":"import os\nimport csv\nimport zipfile\nfrom datetime import datetime, timedelta\n\nimport sqlalchemy as sa\n\nimport psycopg2\n\nfrom openpyxl import load_workbook\n\nimport pytz\n\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.conf import settings\nfrom django.utils.text import slugify\n\nfrom .table_mappers import *\n\nDB_CONN = 'postgresql://{USER}:{PASSWORD}@{HOST}:{PORT}/{NAME}'\n\nengine = sa.create_engine(DB_CONN.format(**settings.DATABASES['default']),\n convert_unicode=True,\n server_side_cursors=True)\n\n# Field mappings are defined in `table_mappers.py`\nMAPPER_LOOKUP = {\n 'candidate': CANDIDATE,\n 'pac': PAC,\n 'filing': FILING,\n 'filingperiod': FILING_PERIOD,\n 'transaction': CONTRIB_EXP,\n 'transactiontype': CONTRIB_EXP_TYPE,\n 'campaign': CAMPAIGN,\n 'officetype': OFFICE_TYPE,\n 'office': OFFICE,\n 'campaignstatus': CAMPAIGN_STATUS,\n 'county': COUNTY,\n 'district': DISTRICT,\n 'division': DIVISION,\n 'electionseason': ELECTION_SEASON,\n 'entity': ENTITY,\n 'entitytype': ENTITY_TYPE,\n 'filingtype': FILING_TYPE,\n 'loan': LOAN,\n 'loantransaction': LOAN_TRANSACTION,\n 'loantransactiontype': LOAN_TRANSACTION_TYPE,\n 'politicalparty': POLITICAL_PARTY,\n 'specialevent': SPECIAL_EVENT,\n 'treasurer': TREASURER,\n 'address': ADDRESS,\n 'contacttype': CONTACT_TYPE,\n 'contact': CONTACT,\n 'state': STATE,\n 'lobbyist': LOBBYIST,\n 'lobbyistregistration': LOBBYIST_REGISTRATION,\n 'lobbyistemployer': LOBBYIST_EMPLOYER,\n 'organization': ORGANIZATION,\n 'lobbyistfilingperiod': LOBBYIST_FILING_PERIOD,\n 'lobbyisttransaction': LOBBYIST_TRANSACTION,\n 'lobbyisttransactiontype': LOBBYIST_TRANSACTION_TYPE,\n 'lobbyistbundlingdisclosure': LOBBYIST_BUNDLING_DISCLOSURE,\n 'lobbyistbundlingdisclosurecontributor': LOBBYIST_BUNDLING_DISCLOSURE_CONTRIBUTOR,\n 'lobbyistreport': LOBBYIST_REPORT,\n 'lobbyistspecialevent': LOBBYIST_SPECIAL_EVENT,\n}\n\nFILE_LOOKUP = {\n 'campaign': 'Cam_Campaign.csv',\n 'transaction': 'Cam_ContribExpenditure.zip',\n 'transactiontype': 'Cam_ContribExpenditureType.xlsx',\n 'office': 'Cam_ElectionOffice.xlsx',\n 'filingperiod': 'Cam_FilingPeriod.csv',\n 'officetype': 'Cam_OfficeType.xlsx',\n 'filing': 'Cam_Report.csv',\n 'candidate': 'Cam_Candidate.csv',\n 'pac': 'Cam_PoliticalActionCommittee.csv',\n 'campaignstatus': 'Cam_CampaignStatus.xlsx',\n 'county': 'Cam_County.xlsx',\n 'district': 'Cam_District.xlsx',\n 'division': 'Cam_Division.xlsx',\n 'electionseason': 'Cam_ElectionSeason.xlsx',\n 'entity': 'Cam_Entity.xlsx',\n 'entitytype': 'Cam_EntityType.xlsx',\n 'filingtype': 'Cam_FilingPeriodType.xlsx',\n 'loan': 'Cam_Loan.csv',\n 'loantransaction': 'Cam_LoanTransaction.csv',\n 'loantransactiontype': 'Cam_LoanTransactionType.xlsx',\n 'politicalparty': 'Cam_PoliticalParty.xlsx',\n 'specialevent': 'Cam_SpecialEvent.xlsx',\n 'treasurer': 'Cam_Treasurer.xlsx',\n 'address': 'Cam_Address.csv',\n 'contacttype': 'Cam_ContactType.xlsx',\n 'contact': 'Cam_Contact.csv',\n 'state': 'States.csv',\n 'lobbyist': 'Cam_Lobbyist.csv',\n 'lobbyistregistration': 'Cam_LobbystRegistration.xlsx',\n 'lobbyistemployer': 'Cam_LobbyistEmployer.csv',\n 'organization': 'Cam_Organization.xlsx',\n 'lobbyistfilingperiod': 'Cam_FilingPeriodLobbyist.csv',\n 'lobbyisttransaction': 'Cam_ContribExpenditureLobbyist.csv',\n 'lobbyisttransactiontype': 'Cam_ContribExpenditureLobbyistType.csv',\n 'lobbyistbundlingdisclosure': 'Cam_BundlingDisclosureLobbyist.csv',\n 'lobbyistbundlingdisclosurecontributor': 'Cam_BundlingDisclosureLobbyistContributor.csv',\n 'lobbyistreport': 'Cam_ReportLobbyist.csv',\n 'lobbyistspecialevent': 'Cam_SpecialEventLobbyist.csv',\n}\n\nclass Command(BaseCommand):\n help = 'Import New Mexico Campaign Finance data'\n\n def add_arguments(self, parser):\n parser.add_argument(\n '--entity-types',\n dest='entity_types',\n default='all',\n help='Comma separated list of entity types to import'\n )\n\n parser.add_argument(\n '--add-aggregates',\n dest='add_aggregates',\n action='store_true',\n help='Just add the aggregates'\n )\n\n def handle(self, *args, **options):\n\n self.connection = engine.connect()\n\n if options['add_aggregates']:\n self.makeTransactionAggregates()\n self.stdout.write(self.style.SUCCESS('Aggregates complete!'))\n return\n\n entity_types = options['entity_types'].split(',')\n\n if entity_types == ['all']:\n entity_types = FILE_LOOKUP.keys()\n\n self.makeETLTracker()\n\n for entity_type in entity_types:\n self.doETL(entity_type)\n\n self.updateTracker(entity_type)\n\n self.addTransactionFullName()\n self.addLoanFullName()\n self.addCandidateFullName()\n self.addContactFullName()\n self.addTreasurerFullName()\n\n # Make or refresh materialized views\n self.makeTransactionAggregates()\n self.stdout.write(self.style.SUCCESS('Made transaction aggregate views'))\n\n self.stdout.write(self.style.SUCCESS('Import complete!'.format(self.entity_type)))\n\n def doETL(self, entity_type):\n self.entity_type = entity_type\n file_name = FILE_LOOKUP.get(entity_type)\n\n if file_name:\n\n self.stdout.write(self.style.SUCCESS('Importing {}'.format(file_name)))\n\n self.file_path = 'data/{}'.format(file_name)\n\n ftp_file = os.path.join(settings.FTP_DIRECTORY, file_name)\n\n if os.path.exists(ftp_file):\n self.file_path = ftp_file\n\n self.encoding = 'utf-8'\n if entity_type in ['address', 'contact', 'campaign']:\n self.encoding = 'windows-1252'\n\n self.table_mapper = MAPPER_LOOKUP[self.entity_type]\n\n self.django_table = 'camp_fin_{}'.format(self.entity_type)\n self.raw_pk_col = [k for k, v in self.table_mapper.items() \\\n if v['field'] == 'id'][0]\n\n if self.file_path.endswith('xlsx'):\n self.convertXLSX()\n\n if self.file_path.endswith('zip'):\n self.unzipFile()\n\n self.makeRawTable()\n count = self.importRawData()\n\n self.stdout.write(self.style.SUCCESS('Found {0} records in {1}'.format(count, file_name)))\n\n count = self.updateExistingRecords()\n\n self.stdout.write(self.style.SUCCESS('Updated {0} records in {1}'.format(count, self.django_table)))\n\n self.makeNewTable()\n count = self.findNewRecords()\n\n self.addNewRecords()\n\n self.stdout.write(self.style.SUCCESS('Inserted {0} new records into {1}'.format(count, self.django_table)))\n\n # This should only be necessary until we get the actual entity table\n\n if self.entity_type in ['candidate', 'pac', 'filing']:\n self.populateEntityTable()\n self.stdout.write(self.style.SUCCESS('Populated entity table for {}'.format(self.entity_type)))\n\n if self.entity_type in ['candidate', 'pac', 'lobbyist', 'organization']:\n self.populateSlugField()\n self.stdout.write(self.style.SUCCESS('Populated slug fields for {}'.format(self.entity_type)))\n\n if self.entity_type == 'loan':\n self.makeLoanBalanceView()\n self.stdout.write(self.style.SUCCESS('Made loan balance view'))\n\n self.stdout.write(self.style.SUCCESS('\\n'))\n\n else:\n self.stdout.write(self.style.ERROR('\"{}\" is not a valid entity'.format(self.entity_type)))\n self.stdout.write(self.style.SUCCESS('\\n'))\n\n def makeTransactionAggregates(self):\n\n for interval in ['day', 'week', 'month']:\n try:\n self.executeTransaction('''\n REFRESH MATERIALIZED VIEW contributions_by_{}\n '''.format(interval))\n except sa.exc.ProgrammingError:\n view = '''\n CREATE MATERIALIZED VIEW contributions_by_{0} AS (\n SELECT\n SUM(amount) AS amount,\n entity_id,\n {0}\n FROM (\n SELECT\n SUM(t.amount) AS amount,\n f.entity_id,\n MAX(date_trunc('{0}', t.received_date)) AS {0}\n FROM camp_fin_transaction AS t\n JOIN camp_fin_transactiontype AS tt\n ON t.transaction_type_id = tt.id\n JOIN camp_fin_filing AS f\n ON t.filing_id = f.id\n WHERE tt.contribution = TRUE\n AND (tt.description = 'Monetary contribution' or\n tt.description = 'Anonymous Contribution')\n GROUP BY f.entity_id, date_trunc('{0}', t.received_date)\n UNION\n SELECT\n SUM(l.amount) AS amount,\n f.entity_id,\n MAX(date_trunc('{0}', l.received_date)) AS {0}\n FROM camp_fin_loan AS l\n JOIN camp_fin_filing AS f\n ON l.filing_id = f.id\n GROUP BY f.entity_id, date_trunc('{0}', l.received_date)\n ) AS s\n GROUP BY entity_id, {0}\n )\n '''.format(interval)\n\n self.executeTransaction(view)\n\n try:\n self.executeTransaction('''\n REFRESH MATERIALIZED VIEW expenditures_by_{}\n '''.format(interval))\n except sa.exc.ProgrammingError:\n view = '''\n CREATE MATERIALIZED VIEW expenditures_by_{0} AS (\n SELECT\n entity_id,\n SUM(amount) AS amount,\n {0}\n FROM (\n SELECT\n filing.entity_id,\n SUM(e.amount) AS amount,\n date_trunc('{0}', e.received_date) AS {0}\n FROM camp_fin_transaction AS e\n JOIN camp_fin_transactiontype AS tt\n ON e.transaction_type_id = tt.id\n JOIN camp_fin_filing AS filing\n ON e.filing_id = filing.id\n JOIN camp_fin_filingperiod AS fp\n ON filing.filing_period_id = fp.id\n WHERE tt.contribution = FALSE\n AND fp.filing_date >= '2010-01-01'\n GROUP BY filing.entity_id, date_trunc('{0}', e.received_date)\n\n UNION\n\n SELECT\n filing.entity_id,\n SUM(lt.amount) AS amount,\n date_trunc('{0}', lt.transaction_date) AS {0}\n FROM camp_fin_loantransaction AS lt\n JOIN camp_fin_loantransactiontype AS ltt\n ON lt.transaction_type_id = ltt.id\n JOIN camp_fin_filing AS filing\n ON lt.filing_id = filing.id\n JOIN camp_fin_filingperiod AS fp\n ON filing.filing_period_id = fp.id\n WHERE ltt.description = 'Payment'\n AND fp.filing_date >= '2010-01-01'\n GROUP BY filing.entity_id, date_trunc('{0}', lt.transaction_date)\n ) AS s\n GROUP BY entity_id, {0}\n )\n '''.format(interval)\n\n self.executeTransaction(view)\n\n\n def makeETLTracker(self):\n create = '''\n CREATE TABLE IF NOT EXISTS etl_tracker (\n id SERIAL,\n entity_type VARCHAR,\n last_update timestamp with time zone,\n PRIMARY KEY (id)\n )\n '''\n self.executeTransaction(create)\n\n def updateTracker(self, entity_type):\n update = '''\n INSERT INTO etl_tracker (\n entity_type,\n last_update\n ) VALUES (\n :entity_type,\n NOW()\n )\n '''\n self.executeTransaction(sa.text(update),\n entity_type=entity_type)\n\n def loadLoanTransactions(self):\n timezone = pytz.timezone(settings.TIME_ZONE)\n\n transactions_updated = self.connection.execute('''\n SELECT MAX(last_update) AS last_update\n FROM etl_tracker\n WHERE entity_type = 'loantransaction'\n ''').first().last_update\n\n if transactions_updated:\n an_hour_ago = timezone.localize(datetime.now()) - timedelta(hours=1)\n if transactions_updated < an_hour_ago:\n self.doETL('loantransaction')\n else:\n self.doETL('loantransaction')\n\n def makeAllExpenditureView(self):\n self.loadLoanTransactions()\n\n view = '''\n SELECT\n transaction.filing_id,\n transaction.id,\n transaction.amount,\n transaction.full_name,\n transaction_type.description,\n transaction_type.contribution\n FROM camp_fin_transaction AS transaction\n JOIN camp_fin_transactiontype AS transaction_type\n ON transaction.transaction_type_id = transaction_type.id\n WHERE transaction_type.contribution = FALSE\n UNION\n SELECT\n loan_transaction.filing_id,\n loan_transaction.id,\n loan_transaction.amount,\n loan.full_name,\n loan_transaction_type.description,\n FALSE as contribution\n FROM camp_fin_loantransaction AS loan_transaction\n JOIN camp_fin_loantransactiontype AS loan_transaction_type\n ON loan_transaction.transaction_type_id = loan_transaction_type.id\n JOIN camp_fin_loan AS loan\n ON loan_transaction.loan_id = loan.id\n WHERE loan_transaction_type.description = 'Payment'\n '''\n\n def makeLoanBalanceView(self):\n self.loadLoanTransactions()\n\n try:\n self.executeTransaction('''\n REFRESH MATERIALIZED VIEW current_loan_status\n ''', raise_exc=True)\n except sa.exc.ProgrammingError:\n self.executeTransaction('''\n CREATE MATERIALIZED VIEW current_loan_status AS (\n SELECT\n loan.id AS loan_id,\n MAX(loan.amount) AS loan_amount,\n SUM(loantrans.amount) AS payments_made,\n (MAX(loan.amount) - SUM(loantrans.amount)) AS outstanding_balance\n FROM camp_fin_loan AS loan\n JOIN camp_fin_loantransaction AS loantrans\n ON loan.id = loantrans.loan_id\n GROUP BY loan.id\n HAVING ((MAX(loan.amount::numeric::money) - SUM(loantrans.amount::numeric::money)) > 0::money)\n )\n ''')\n\n def addTransactionFullName(self):\n update = '''\n UPDATE camp_fin_transaction SET\n full_name = s.full_name\n FROM (\n SELECT\n CASE WHEN\n company_name IS NULL OR TRIM(company_name) = ''\n THEN\n TRIM(concat_ws(' ',\n name_prefix,\n first_name,\n middle_name,\n last_name,\n suffix))\n ELSE\n company_name\n END AS full_name,\n t.id\n FROM camp_fin_transaction AS t\n LEFT JOIN change_transaction AS c\n ON t.id = c.id\n LEFT JOIN new_transaction AS n\n ON t.id = n.id\n WHERE c.id IS NOT NULL\n OR n.id IS NOT NULL\n ) AS s\n WHERE camp_fin_transaction.id = s.id\n '''\n\n self.executeTransaction(update)\n\n def addLoanFullName(self):\n update = '''\n UPDATE camp_fin_loan SET\n full_name = s.full_name\n FROM (\n SELECT\n CASE WHEN\n company_name IS NULL OR TRIM(company_name) = ''\n THEN\n TRIM(concat_ws(' ',\n name_prefix,\n first_name,\n middle_name,\n last_name,\n suffix))\n ELSE\n company_name\n END AS full_name,\n t.id\n FROM camp_fin_loan AS t\n LEFT JOIN change_loan AS c\n ON t.id = c.id\n LEFT JOIN new_loan AS n\n ON t.id = n.id\n WHERE c.id IS NOT NULL\n OR n.id IS NOT NULL\n ) AS s\n WHERE camp_fin_loan.id = s.id\n '''\n\n self.executeTransaction(update)\n\n def addTreasurerFullName(self):\n update = '''\n UPDATE camp_fin_treasurer SET\n full_name = s.full_name\n FROM (\n SELECT\n TRIM(concat_ws(' ',\n prefix,\n first_name,\n middle_name,\n last_name,\n suffix)) AS full_name,\n t.id\n FROM camp_fin_treasurer AS t\n LEFT JOIN change_treasurer AS c\n ON t.id = c.id\n LEFT JOIN new_treasurer AS n\n ON t.id = n.id\n WHERE c.id IS NOT NULL\n OR n.id IS NOT NULL\n ) AS s\n WHERE camp_fin_treasurer.id = s.id\n '''\n\n self.executeTransaction(update)\n\n def addCandidateFullName(self):\n update = '''\n UPDATE camp_fin_candidate SET\n full_name = s.full_name\n FROM (\n SELECT\n TRIM(concat_ws(' ',\n prefix,\n first_name,\n middle_name,\n last_name,\n suffix)) AS full_name,\n t.id\n FROM camp_fin_candidate AS t\n LEFT JOIN change_candidate AS c\n ON t.id = c.id\n LEFT JOIN new_candidate AS n\n ON t.id = n.id\n WHERE c.id IS NOT NULL\n OR n.id IS NOT NULL\n ) AS s\n WHERE camp_fin_candidate.id = s.id\n '''\n\n self.executeTransaction(update)\n\n def addContactFullName(self):\n update = '''\n UPDATE camp_fin_contact SET\n full_name = s.full_name\n FROM (\n SELECT\n CASE WHEN\n company_name IS NULL OR TRIM(company_name) = ''\n THEN\n TRIM(concat_ws(' ',\n prefix,\n first_name,\n middle_name,\n last_name,\n suffix))\n ELSE\n company_name\n END AS full_name,\n t.id\n FROM camp_fin_contact AS t\n LEFT JOIN change_contact AS c\n ON t.id = c.id\n LEFT JOIN new_contact AS n\n ON t.id = n.id\n WHERE c.id IS NOT NULL\n OR n.id IS NOT NULL\n ) AS s\n WHERE camp_fin_contact.id = s.id\n '''\n\n self.executeTransaction(update)\n\n\n def unzipFile(self):\n file_name = self.file_path.split('/')[-1].rsplit('.', 1)[0]\n file_name = '{}.csv'.format(file_name)\n with zipfile.ZipFile(self.file_path) as zf:\n zf.extract(file_name, path='data')\n\n self.file_path = 'data/{}'.format(file_name)\n\n def convertXLSX(self):\n wb = load_workbook(self.file_path, read_only=True)\n sheets = wb.worksheets\n saved_pks = []\n\n header_row = next(sheets[0].rows)\n header = [r.value for r in header_row]\n\n base_name = os.path.basename(self.file_path.rsplit('.', 1)[0])\n csv_path = '{}.csv'.format(os.path.join('data', base_name))\n\n with open(csv_path, 'w') as f:\n writer = csv.writer(f)\n writer.writerow(header)\n\n for sheet in sheets:\n rows = sheet.rows\n next(rows) # Strip header row\n for row in rows:\n row_values = [r.value for r in row]\n header_lower = [v.lower() for v in header]\n\n row_dict = dict(zip(header_lower, row_values))\n row_pk = row_dict[self.raw_pk_col]\n\n if row_pk not in saved_pks:\n writer.writerow(row_values)\n saved_pks.append(row_pk)\n\n self.file_path = csv_path\n\n def populateEntityTable(self):\n entities = '''\n INSERT INTO camp_fin_entity\n SELECT DISTINCT d.entity_id\n FROM {table} AS d\n LEFT JOIN camp_fin_entity AS e\n ON d.entity_id = e.id\n WHERE e.id IS NULL\n '''.format(table=self.django_table)\n\n self.executeTransaction(entities)\n\n def populateSlugField(self):\n if self.entity_type in ['candidate', 'lobbyist']:\n name_components = [\n 'first_name',\n 'last_name',\n ]\n\n selects = []\n\n for component in name_components:\n select = \"COALESCE({}, '')\".format(component)\n selects.append(select)\n\n name_select = \" || ' ' || \".join(selects)\n\n elif self.entity_type in ['pac', 'organization']:\n name_select = 'name'\n\n slugify = '''\n UPDATE {django_table} SET\n slug = s.slug\n FROM (\n SELECT\n regexp_replace(TRANSLATE(REPLACE(LOWER({name_select}), ' ', '-'), 'áàâãäåāăąÁÂÃÄÅĀĂĄèééêëēĕėęěĒĔĖĘĚìíîïìĩīĭÌÍÎÏÌĨĪĬóôõöōŏőÒÓÔÕÖŌŎŐùúûüũūŭůÙÚÛÜŨŪŬŮ','aaaaaaaaaaaaaaaaaeeeeeeeeeeeeeeeiiiiiiiiiiiiiiiiooooooooooooooouuuuuuuuuuuuuuuu'), '[^\\w -]', '', 'g') || '-' || id::varchar as slug,\n id\n FROM {django_table}\n ) AS s\n WHERE {django_table}.id = s.id\n '''.format(django_table=self.django_table,\n name_select=name_select)\n\n self.executeTransaction(slugify)\n\n def addNewRecords(self):\n\n select_fields = ', '.join(['raw.\"{0}\"::{2} AS {1}'.format(k,v['field'], v['data_type']) for k,v in \\\n self.table_mapper.items()])\n\n dat_fields = ', '.join([c['field'] for c in self.table_mapper.values()])\n\n insert_new = '''\n INSERT INTO {django_table} (\n {dat_fields}\n )\n SELECT {select_fields}\n FROM raw_{entity_type} AS raw\n JOIN new_{entity_type} AS new\n ON raw.\"{raw_pk_col}\" = new.id\n '''.format(django_table=self.django_table,\n dat_fields=dat_fields,\n select_fields=select_fields,\n entity_type=self.entity_type,\n raw_pk_col=self.raw_pk_col)\n\n self.executeTransaction(insert_new)\n\n def updateExistingRecords(self):\n changes = '''\n CREATE TABLE change_{} (\n id BIGINT,\n PRIMARY KEY (id)\n )\n '''.format(self.entity_type)\n\n self.executeTransaction('DROP TABLE IF EXISTS change_{}'.format(self.entity_type))\n self.executeTransaction(changes)\n\n wheres = []\n\n for raw_col, mapping in self.table_mapper.items():\n condition = '''\n ((raw.\"{0}\" IS NOT NULL OR dat.{1} IS NOT NULL) AND raw.\"{0}\"::{2} <> dat.{1})\n '''.format(raw_col, mapping['field'], mapping['data_type'])\n wheres.append(condition)\n\n where_clause = ' OR '.join(wheres)\n\n find_changes = '''\n INSERT INTO change_{entity_type}\n SELECT raw.\"{raw_pk_col}\" AS id\n FROM raw_{entity_type} AS raw\n JOIN {django_table} AS dat\n ON raw.\"{raw_pk_col}\" = dat.id\n WHERE {where_clause}\n '''.format(entity_type=self.entity_type,\n raw_pk_col=self.raw_pk_col,\n django_table=self.django_table,\n where_clause=where_clause)\n\n self.executeTransaction(find_changes)\n\n set_fields = ', '.join(['{1}=s.\"{0}\"::{2}'.format(k,v['field'], v['data_type']) for k,v in \\\n self.table_mapper.items()])\n\n raw_fields = ', '.join(['raw.\"{}\"'.format(c) for c in \\\n self.table_mapper.keys()])\n update_dat = '''\n UPDATE {django_table} SET\n {set_fields}\n FROM (\n SELECT {raw_fields}\n FROM raw_{entity_type} AS raw\n JOIN change_{entity_type} AS change\n ON raw.\"{raw_pk_col}\" = change.id\n ) AS s\n WHERE {django_table}.id = s.\"{raw_pk_col}\"\n '''.format(django_table=self.django_table,\n set_fields=set_fields,\n raw_fields=raw_fields,\n entity_type=self.entity_type,\n raw_pk_col=self.raw_pk_col)\n\n self.executeTransaction(update_dat)\n\n change_count = self.connection.execute('SELECT COUNT(*) AS count FROM change_{}'.format(self.entity_type))\n\n return change_count.first().count\n\n def findNewRecords(self):\n\n find = '''\n INSERT INTO new_{entity_type}\n SELECT raw.\"{raw_pk_col}\" AS id\n FROM raw_{entity_type} AS raw\n LEFT JOIN {django_table} AS dat\n ON raw.\"{raw_pk_col}\" = dat.id\n WHERE dat.id IS NULL\n '''.format(entity_type=self.entity_type,\n django_table=self.django_table,\n raw_pk_col=self.raw_pk_col)\n\n self.executeTransaction(find)\n\n new_count = self.connection.execute('SELECT COUNT(*) AS count FROM new_{}'.format(self.entity_type))\n return new_count.first().count\n\n def makeNewTable(self):\n create = '''\n CREATE TABLE new_{0} (\n id BIGINT,\n PRIMARY KEY (id)\n )\n '''.format(self.entity_type)\n\n self.executeTransaction('DROP TABLE IF EXISTS new_{0}'.format(self.entity_type))\n self.executeTransaction(create)\n\n def makeRawTable(self):\n\n with open(self.file_path, 'r', encoding=self.encoding) as f:\n reader = csv.reader(f)\n fields = next(reader)\n\n fields = ', '.join(['\"{}\" VARCHAR'.format(f.lower()) for f in fields \\\n if f.lower() != self.raw_pk_col])\n\n create_table = '''\n CREATE TABLE raw_{0} (\n {1} BIGINT,\n {2},\n PRIMARY KEY ({1})\n )\n '''.format(self.entity_type,\n self.raw_pk_col,\n fields)\n\n\n self.executeTransaction('DROP TABLE IF EXISTS raw_{0}'.format(self.entity_type))\n self.executeTransaction(create_table)\n\n def importRawData(self):\n\n DB_CONN_STR = DB_CONN.format(**settings.DATABASES['default'])\n\n copy_st = '''\n COPY raw_{0} FROM STDIN WITH CSV HEADER\n '''.format(self.entity_type)\n\n with open(self.file_path, 'r', encoding=self.encoding) as f:\n with psycopg2.connect(DB_CONN_STR) as conn:\n with conn.cursor() as curs:\n try:\n curs.copy_expert(copy_st, f)\n except psycopg2.IntegrityError as e:\n self.stderr.write(str(e))\n conn.rollback()\n\n self.executeTransaction('''\n ALTER TABLE raw_{0} ADD PRIMARY KEY (\"{1}\")\n '''.format(self.entity_type, self.raw_pk_col), raise_exc=False)\n\n import_count = self.connection.execute('SELECT COUNT(*) AS count FROM raw_{}'.format(self.entity_type))\n\n return import_count.first().count\n\n def executeTransaction(self, query, *args, **kwargs):\n trans = self.connection.begin()\n\n raise_exc = kwargs.get('raise_exc', True)\n\n try:\n self.connection.execute(\"SET local timezone to 'America/Denver'\")\n if kwargs:\n self.connection.execute(query, **kwargs)\n else:\n self.connection.execute(query, *args)\n trans.commit()\n except sa.exc.ProgrammingError as e:\n # TODO: Make some kind of logger\n # logger.error(e, exc_info=True)\n trans.rollback()\n if raise_exc:\n raise e\n","repo_name":"datamade/openness-project-nmid","sub_path":"camp_fin/management/commands/import_data.py","file_name":"import_data.py","file_ext":"py","file_size_in_byte":29780,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"30"} +{"seq_id":"71436078164","text":"from datetime import datetime, timedelta, timezone\nimport hashlib\nfrom lib2to3.pgen2 import token\nfrom lib2to3.pgen2.tokenize import TokenError\nfrom os import stat\nfrom pydoc import plain\nfrom Crypto.Cipher import AES\nfrom pyparsing import java_style_comment\nimport requests,json\nimport time\n\n\n#bytes to hex\ndef b2h(b):\n return ''.join([hex(b)[2:].zfill(2) for b in b])\n\n#hex to bytes\ndef h2b(h):\n return bytes.fromhex(h)\n\ndef AES_Encrypt(text):\n ckey = '23DbtQHR2UMbH6mJ'\n # padding算法\n BS = len(ckey)\n pad = lambda s: s + (BS - len(s) % BS) * chr(BS - len(s) % BS)\n cryptor = AES.new(ckey.encode(\"utf8\"),AES.MODE_ECB)\n ciphertext = cryptor.encrypt(bytes(pad(text), encoding=\"utf8\"))\n return b2h(ciphertext).upper()\n\n\ndef AES_Decrpt(text):\n ckey = '23DbtQHR2UMbH6mJ'\n unpad = lambda s: s[0:-ord(s[-1:])]\n decode = h2b(text)\n cryptor = AES.new(ckey.encode(\"utf8\"),AES.MODE_ECB)\n return unpad(cryptor.decrypt(decode)).decode('utf-8')\n\ndef getMD5(text):\n m = hashlib.md5()\n m.update(text.encode('utf-8'))\n return m.hexdigest()\n\n\n##################################\n## ###\n## 初始化参数 ###\n## ###\n#################################\n\n\n#基本信息\naccount:str = '1111111111111'#手机号\npassword:str = 'xxx'#密码\naddress = \"省份 · 地级市 · xx小区\"#改成你的坐标\nprovince = \"所在省份\"\ncity = \"所在城市\"\n#经纬度最好抓包填上 不填也行\nlatitude =\"\"\nlongitude=\"\"\n\n\n\n\n\nAccept_Language=\"zh-CN,zh;q=0.8\"\nuser_agent_value=\"Mozilla/5.0 (Linux; U; Android 10; zh-cn; GLK-AL00 Build/HUAWEIGLK-AL00) AppleWebKit/533.1 (KHTML, like Gecko) Version/5.0 Mobile Safari/533.1\"\nContent_Type=\"application/json; charset=UTF-8\"\nHost=\"api.moguding.net:9000\"\nAccept_Encoding=\"\"\nCache_Control=\"no-cache\"\n\n\nstate = \"\"\n\nif datetime.utcnow().astimezone(timezone(timedelta(hours=8))).hour < 12:\n state = \"START\"\nelse:\n state = \"END\"\n\nplanIdURL = 'https://api.moguding.net:9000/practice/plan/v3/getPlanByStu'\nsignInURL = \"https://api.moguding.net:9000/attendence/clock/v2/save\"\n#最新的v3接口 需要搭配clock使用\nloginURL = \"https://api.moguding.net:9000/session/user/v3/login\"\n\nloginJson = {\"password\":AES_Encrypt(password),\"t\":AES_Encrypt(str(int(round(time.time()*1000)))),\"phone\":AES_Encrypt(account),\"loginType\":\"android\",\"uuid\":\"\"}\nprint(\"loginJson = \",loginJson,\"\\n \\n \\n\")\nlogs:str = \"loginJson = \"+str(loginJson)+\"\\n \\n \\n\"\nres = requests.post(loginURL,\n headers={\"Authorization\":\"\",\"roleKey\":\"\",\"Sign\":\"\",\"Accept-Language\":Accept_Language,\"User-Agent\":user_agent_value,\"Content-Type\":Content_Type,\"Host\":Host,\"Accept-Encoding\":Accept_Encoding,\"Cache-Control\":Cache_Control\n },\n json=loginJson)\n\nprint(\"loginURL response = \",res.text,\"\\n \\n \\n\")\nlogs += \"loginURL response = \"+str(res.text)+\"\\n \\n \\n\"\n\n\nUserId = res.json().get(\"data\").get(\"userId\")\nToken = res.json().get(\"data\").get(\"token\")\nsign = getMD5(str(UserId)+\"student\" + \"3478cbbc33f84bd00d75d7dfa69e0daa\")\n\nprint(\"UserId = \",UserId,\"\\n \")\nprint(\"Token = \",Token,\"\\n \")\nprint(\"sign = \",sign,\"\\n \\n \\n\")\nlogs += \"UserId = \"+str(UserId)+\"\\n \"\nlogs += \"Token = \"+str(Token)+\"\\n \"\nlogs += \"sign = \"+str(sign)+\"\\n \\n \\n\"\n\n\nres_plan = requests.post(planIdURL,headers={\"Authorization\":Token,\"roleKey\":\"student\",\"Sign\":sign,\"Accept-Language\":Accept_Language,\"User-Agent\":user_agent_value,\"Content-Type\":Content_Type,\"Host\":Host,\"Accept-Encoding\":Accept_Encoding,\"Cache-Control\":Cache_Control\n },json={\"state\":\"\"})\n\nprint(\"planIdURL response = \",res_plan.text,\"\\n \\n \\n\")\nlogs += \"planIdURL response = \"+str(res_plan.text)+\"\\n \\n \\n\"\n\nplanID= json.loads(res_plan.text)['data'][0]['planId']\nnewSign = getMD5(\"Android\"+state+planID+UserId+address+\"3478cbbc33f84bd00d75d7dfa69e0daa\")\nSignInJson = {\n \"country\" : \"中国\",\n \"address\" : address,\n \"province\" : province,\n \"t\": AES_Encrypt(str(int(round(time.time()*1000)))),\n \"city\" : city,\n \"latitude\" : latitude,\n \"description\" : \"\",\n \"planId\":planID,\n \"type\":state,\n \"device\" : \"Android\",\n \"longitude\" : longitude\n}\n\nprint(\"planID = \",planID,\"\\n \")\nprint(\"newSign = \",newSign,\"\\n \")\nprint(\"SignInJson = \",SignInJson,\"\\n \\n \\n\")\nlogs+= \"planID = \"+str(planID)+\"\\n \"\nlogs+= \"newSign = \"+str(newSign)+\"\\n \"\nlogs+= \"SignInJson = \"+str(SignInJson)+\"\\n \\n \\n\"\n\n\nres_login = requests.post(signInURL,\nheaders={\"Authorization\":Token,\"roleKey\":\"student\",\"Sign\":newSign,\"Accept-Language\":Accept_Language,\n\"User-Agent\":user_agent_value,\"Content-Type\":Content_Type,\"Host\":Host,\"Accept-Encoding\":Accept_Encoding,\n\"Cache-Control\":Cache_Control\n },\njson=SignInJson\n)\nprint(res_login.text)\nlogs+=res_login.text\nlogs+=\"\\n \\n \\n\"\nif not os.path.exists(\"./logs\"):\n os.mkdir(\"./logs\")\nfile = open(\"./logs/\"+str(time.strftime(\"20%y-%m-%d\"))+\".txt\",\"a\")\nfile.write(logs)\nfile.close()","repo_name":"JasperGan-smile/MushroomCheckin","sub_path":"MushroomCheckin.py","file_name":"MushroomCheckin.py","file_ext":"py","file_size_in_byte":4872,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"43759013368","text":"def guessing_numbers():\n print(\"Think the number from 0 to 1000 and I will guess this in max 10 tries \")\n print(\"Press enter to start\")\n input()\n min_numb = 0\n max_numb = 1000\n guess_numb = \"\"\n while guess_numb != \"you win\":\n guess = int((max_numb - min_numb) // 2) + min_numb\n print(f\"Your number {guess}\")\n guess_numb = input().lower()\n if guess_numb == \"to small\":\n min_numb = guess\n elif guess_numb == \"to big\":\n max_numb = guess\n print(\"You win\")\n\n\nguessing_numbers()\n","repo_name":"matantkowiak/guessing-numbers-by-computer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"34059914559","text":"from hashlib import md5\n\nclass Node:\n def __init__(self, key, value) -> None:\n self.key = key\n self.value = value\n self.next = None\n \nclass HashTable:\n def __init__(self) -> None:\n self.capacity = 10\n self.buckets = [None] * self.capacity\n \n @staticmethod\n def count_ind(value: str):\n # строку нужно encode перед хешированием\n value = value.encode('utf-8')\n # md5 - тип хеша\n hashed = md5(value)\n # в 16-ричной - чтобы не было ошибки \"unsupported operand type for %: '_hashlib.HASH' and 'int'\"\n return int(hashed.hexdigest(), 16) % 10\n \n def insert(self, key: str, value):\n num = self.count_ind(key)\n if not self.buckets[num]:\n self.buckets[num] = Node(key, value)\n else:\n node = self.buckets[num]\n while node.next:\n node = node.next\n node.next = Node(key, value)\n\n def search(self, key):\n num = self.count_ind(key)\n node = self.buckets[num]\n res = []\n while node:\n if node.key == key:\n res.append(node.value)\n node = node.next\n return res\n\n\n\n# print(HashTable().count_ind('asdf'))\nhash_table = HashTable()\nhash_table.insert('пк', {'name': 'мак про', 'price': 5000})\nhash_table.insert('смартфон', {'name': 'айфон', 'price': 2000})\nhash_table.insert('ноутбук', {'name': 'honor', 'price': 3000})\n# print(hash_table.buckets)\nsmartphones = hash_table.search('смартфон')\nprint(smartphones) # [{'name': 'айфон', 'price': 2000}]\n","repo_name":"wangnyang/my_files","sub_path":"DATABASES/hashtable.py","file_name":"hashtable.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"40547578392","text":"from django.shortcuts import render\nfrom django.http import HttpResponseRedirect\nfrom django.contrib.auth.models import User\nfrom users.models import Profile, SellerDetails, ShopDetails, BusinessDetails, Documents, Address, AddressContactDetails\nfrom seller_center.settings.dev import MEDIA_URL\nimport requests\n\ndef profile(request):\n\treturn render(request, 'users/profile_page.html', set_user_profile_data(request.user.id))\n\ndef profile_edit(request):\n\tuser_id=request.user.id\n\tdata=set_user_profile_data(user_id)\n\n\tif not data['enable_fields']:\n\t\treturn HttpResponseRedirect(\"/profile\")\n\n\tif request.method == 'POST':\n\t\tUser.objects.filter(id=user_id).update(\n\t\t\tfirst_name=request.POST.get('first-name'),\n\t\t\tlast_name=request.POST.get('last-name')\n\t\t)\n\n\t\tprofile=Profile.objects.filter(id=user_id)[0]\n\t\tprofile.birthday=request.POST.get('birthday')\n\t\tprofile.save()\n\n\t\tseller_details=profile.seller_details\n\t\tseller_details.name_on_id=request.POST.get('last-name') + ', ' + request.POST.get('first-name')\n\t\tseller_details.id_type=request.POST.get('id_type')\n\t\tif 'id_front' in request.FILES:\n\t\t\tfn=trim_file_upload(request.FILES['id_front'].name)\n\t\t\tseller_details.upload_id_front.save(str(profile.id) + '/' + str(user_id) + '/' + fn, request.FILES['id_front'])\n\t\t\tseller_details.upload_id_front_url=MEDIA_URL + 'documents/' + str(profile.id) + '/' + str(user_id) + '/' + fn\n\t\tif 'id_back' in request.FILES:\n\t\t\tfn=trim_file_upload(request.FILES['id_back'].name)\n\t\t\tseller_details.upload_id_back.save(str(profile.id) + '/' + str(user_id) + '/' + fn, request.FILES['id_back'])\n\t\t\tseller_details.upload_id_back_url=MEDIA_URL + 'documents/' + str(profile.id) + '/' + str(user_id) + '/' + fn\n\t\tseller_details.email=request.POST.get('email') if request.POST.get('email') != None else ''\n\t\tseller_details.phone=request.POST.get('phone') if request.POST.get('phone') != None else ''\n\t\tif request.POST.get('terms_conditions') is None:\n\t\t\tseller_details.has_agreed_to_terms=False\n\t\telse:\n\t\t\tseller_details.has_agreed_to_terms=True\n\t\tseller_details.save()\n\n\t\tshop_details=seller_details.shop_details\n\t\tshop_details.shop_name=request.POST.get('shop_name') if request.POST.get('shop_name') != None else ''\n\t\tif request.POST.get('holiday') is None:\n\t\t\tshop_details.holiday_mode=False\n\t\telse:\n\t\t\tshop_details.holiday_mode=True\n\t\tif request.POST.get('holiday_start_date'):\n\t\t\tshop_details.start_date=request.POST.get('holiday_start_date')\n\t\telse:\n\t\t\tshop_details.start_date=None\n\t\tif request.POST.get('holiday_start_time'):\n\t\t\tshop_details.start_time=request.POST.get('holiday_start_time')\n\t\telse:\n\t\t\tshop_details.start_time=None\n\t\tif request.POST.get('holiday_end_date'):\n\t\t\tshop_details.end_date=request.POST.get('holiday_end_date')\n\t\telse:\n\t\t\tshop_details.end_date=None\n\t\tif request.POST.get('holiday_end_time'):\n\t\t\tshop_details.end_time=request.POST.get('holiday_end_time')\n\t\telse:\n\t\t\tshop_details.end_time=None\n\t\tshop_details.save()\n\n\t\tbusiness_details=profile.business_details\n\t\tbusiness_details.company_name=request.POST.get('company_name') if request.POST.get('company_name') != None else ''\n\t\tbusiness_details.business_tin=request.POST.get('business_tin') if request.POST.get('business_tin') != None else ''\n\t\tbusiness_details.business_registration_number=request.POST.get('business_registration_number') if request.POST.get('business_registration_number') != None else ''\n\t\tbusiness_details.save()\n\n\t\tbusiness_address_details=business_details.business_address\n\t\tbusiness_address_details.street_bldg=request.POST.get('business_street_bldg') if request.POST.get('business_registration_number') != None else ''\n\t\tbusiness_address_details.country=request.POST.get('business_country')\n\t\tbusiness_address_details.region_state=request.POST.get('business_region_state')\n\t\tbusiness_address_details.city=request.POST.get('business_city')\n\t\tbusiness_address_details.barangay=request.POST.get('business_brgy')\n\t\tpc=str(request.POST.get('business_postal_code'))\n\t\tif pc.isdigit():\n\t\t\tbusiness_address_details.postal_code=pc\n\t\telse:\n\t\t\tbusiness_address_details.postal_code=0\n\t\tbusiness_address_details.save()\n\n\t\tif 'bir' in request.FILES:\n\t\t\tdocu=Documents.objects.filter(profile_id=user_id, document_type='bir')[0]\n\t\t\tfn=trim_file_upload(request.FILES['bir'].name)\n\t\t\tdocu.document.save(str(profile.id) + '/' + str(user_id) + '/' + fn, request.FILES['bir'])\n\t\t\tDocuments.objects.filter(profile_id=user_id, document_type='bir').update(document_url=MEDIA_URL + 'documents/' + str(profile.id) + '/' + str(user_id) + '/' + fn)\n\t\tif 'dti' in request.FILES:\n\t\t\tdocu=Documents.objects.filter(profile_id=user_id, document_type='dti')[0]\n\t\t\tfn=trim_file_upload(request.FILES['dti'].name)\n\t\t\tdocu.document.save(str(profile.id) + '/' + str(user_id) + '/' + fn, request.FILES['dti'])\n\t\t\tDocuments.objects.filter(profile_id=user_id, document_type='dti').update(document_url=MEDIA_URL + 'documents/' + str(profile.id) + '/' + str(user_id) + '/' + fn)\n\t\tif 'sec' in request.FILES:\n\t\t\tdocu=Documents.objects.filter(profile_id=user_id, document_type='sec')[0]\n\t\t\tfn=trim_file_upload(request.FILES['sec'].name)\n\t\t\tdocu.document.save(str(profile.id) + '/' + str(user_id) + '/' + fn, request.FILES['sec'])\n\t\t\tDocuments.objects.filter(profile_id=user_id, document_type='sec').update(document_url=MEDIA_URL + 'documents/' + str(profile.id) + '/' + str(user_id) + '/' + fn)\n\t\tif 'permit' in request.FILES:\n\t\t\tdocu=Documents.objects.filter(profile_id=user_id, document_type='permit')[0]\n\t\t\tfn=trim_file_upload(request.FILES['permit'].name)\n\t\t\tdocu.document.save(str(profile.id) + '/' + str(user_id) + '/' + fn, request.FILES['permit'])\n\t\t\tDocuments.objects.filter(profile_id=user_id, document_type='permit').update(document_url=MEDIA_URL + 'documents/' + str(profile.id) + '/' + str(user_id) + '/' + fn)\n\n\t\tpickup_address_details=profile.pickup_address\n\t\tpickup_address_details.street_bldg=request.POST.get('pickup_street_bldg') if request.POST.get('pickup_street_bldg') != None else ''\n\t\tpickup_address_details.country=request.POST.get('pickup_country')\n\t\tpickup_address_details.region_state=request.POST.get('pickup_region_state')\n\t\tpickup_address_details.city=request.POST.get('pickup_city')\n\t\tpickup_address_details.barangay=request.POST.get('pickup_brgy')\n\t\tpc=str(request.POST.get('pickup_postal_code'))\n\t\tif pc.isdigit():\n\t\t\tpickup_address_details.postal_code=pc\n\t\telse:\n\t\t\tpickup_address_details.postal_code=0\n\t\tpickup_address_details.save()\n\n\t\tpickup_contact_details=pickup_address_details.contact_details\n\t\tpickup_contact_details.contact_person_name=request.POST.get('pickup_contact_person_name') if request.POST.get('pickup_contact_person_name') != None else ''\n\t\tpickup_contact_details.contact_person_phone=request.POST.get('pickup_contact_person_phone') if request.POST.get('pickup_contact_person_phone') != None else ''\n\t\tpickup_contact_details.contact_person_email=request.POST.get('pickup_contact_person_email') if request.POST.get('pickup_contact_person_email') != None else ''\n\t\tpickup_contact_details.save()\n\n\t\treturn_address_details=profile.return_address\n\t\treturn_address_details.street_bldg=request.POST.get('return_street_bldg') if request.POST.get('return_street_bldg') != None else ''\n\t\treturn_address_details.country=request.POST.get('return_country')\n\t\treturn_address_details.region_state=request.POST.get('return_region_state')\n\t\treturn_address_details.city=request.POST.get('return_city')\n\t\treturn_address_details.barangay=request.POST.get('return_brgy')\n\t\tpc=str(request.POST.get('return_postal_code'))\n\t\tif pc.isdigit():\n\t\t\treturn_address_details.postal_code=pc\n\t\telse:\n\t\t\treturn_address_details.postal_code=0\n\t\treturn_address_details.save()\n\n\t\treturn_contact_details=return_address_details.contact_details\n\t\treturn_contact_details.contact_person_name=request.POST.get('return_contact_person_name') if request.POST.get('return_contact_person_name') != None else ''\n\t\treturn_contact_details.contact_person_phone=request.POST.get('return_contact_person_phone') if request.POST.get('return_contact_person_phone') != None else ''\n\t\treturn_contact_details.contact_person_email=request.POST.get('return_contact_person_email') if request.POST.get('return_contact_person_email') != None else ''\n\t\treturn_contact_details.save()\n\n\t\treturn HttpResponseRedirect(\"/profile\")\n\n\treturn render(request, 'users/profile_edit_page.html', data)\n\ndef get_filename_from_url(url):\n\turl=url[url.rfind('/')+1:]\n\treturn url\n\ndef set_user_profile_data(user_id):\n\tuserData=User.objects.filter(id=user_id)[0]\n\n\tprofileData=Profile.objects.filter(id=user_id)[0]\n\n\tif profileData.seller_details is None:\n\t\tshop_details_data=ShopDetails()\n\t\tshop_details_data.save()\n\t\tseller_details_data=SellerDetails()\n\t\tseller_details_data.shop_details=shop_details_data\n\t\tseller_details_data.save()\n\t\tprofileData.seller_details=seller_details_data\n\t\tprofileData.save()\n\telse:\n\t\tseller_details_data=profileData.seller_details\n\tseller_details_data.upload_id_front_url=seller_details_data.upload_id_front_url if seller_details_data.upload_id_front_url else None\n\tupload_id_front_name=get_filename_from_url(seller_details_data.upload_id_front_url) if seller_details_data.upload_id_front_url else None\n\tseller_details_data.upload_id_back_url=seller_details_data.upload_id_back_url if seller_details_data.upload_id_back_url else None\n\tupload_id_back_name=get_filename_from_url(seller_details_data.upload_id_back_url) if seller_details_data.upload_id_back_url else None\n\tseller_details_data.has_agreed_to_terms='Agreed' if seller_details_data.has_agreed_to_terms else 'Not yet agreed'\n\n\tshop_details_data=seller_details_data.shop_details\n\tshop_details_data.holiday_mode='On' if shop_details_data.holiday_mode else 'Off'\n\n\tif profileData.business_details is None:\n\t\tbusiness_address_data=Address()\n\t\tbusiness_address_data.save()\n\t\tbusiness_details_data=BusinessDetails()\n\t\tbusiness_details_data.business_address=business_address_data\n\t\tbusiness_details_data.save()\n\t\tprofileData.business_details=business_details_data\n\t\tprofileData.save()\n\telse:\n\t\tbusiness_details_data=profileData.business_details\n\tbusiness_address_data=business_details_data.business_address\n\tstreet_bldg=business_address_data.street_bldg + ' ' if business_address_data.street_bldg != None else ''\n\tbarangay=business_address_data.barangay + ' ' if business_address_data.barangay != None else ''\n\tcity=business_address_data.city + ' ' if business_address_data.city != None else ''\n\tregion_state=business_address_data.region_state + ' ' if business_address_data.region_state != None else ''\n\tcountry=business_address_data.country + ' ' if business_address_data.country != None else ''\n\tpostal_code=str(business_address_data.postal_code) + ' ' if business_address_data.postal_code != None else ''\n\tbusiness_address=street_bldg + barangay + city + region_state + postal_code + country\n\n\tif business_address == '':\n\t\tbusiness_address=None\n\n\tbir_documents_data=Documents.objects.filter(profile_id=user_id, document_type='bir')\n\n\tif len(bir_documents_data) == 0:\n\t\tbir_documents_data=Documents()\n\t\tbir_documents_data.document_type='bir'\n\t\tbir_documents_data.profile_id=profileData.id\n\t\tbir_documents_data.save()\n\telse:\n\t\tbir_documents_data=bir_documents_data[0]\n\tbir=get_filename_from_url(bir_documents_data.document_url) if bir_documents_data.document_url else None\n\tdti_documents_data=Documents.objects.filter(profile_id=user_id, document_type='dti')\n\tif len(dti_documents_data) == 0:\n\t\tdti_documents_data=Documents()\n\t\tdti_documents_data.document_type='dti'\n\t\tdti_documents_data.profile_id=profileData.id\n\t\tdti_documents_data.save()\n\telse:\n\t\tdti_documents_data=dti_documents_data[0]\n\tdti=get_filename_from_url(dti_documents_data.document_url) if dti_documents_data.document_url else None\n\tsec_documents_data=Documents.objects.filter(profile_id=user_id, document_type='sec')\n\tif len(sec_documents_data) == 0:\n\t\tsec_documents_data=Documents()\n\t\tsec_documents_data.document_type='sec'\n\t\tsec_documents_data.profile_id=profileData.id\n\t\tsec_documents_data.save()\n\telse:\n\t\tsec_documents_data=sec_documents_data[0]\n\tsec=get_filename_from_url(sec_documents_data.document_url) if sec_documents_data.document_url else None\n\tpermit_documents_data=Documents.objects.filter(profile_id=user_id, document_type='permit')\n\tif len(permit_documents_data) == 0:\n\t\tpermit_documents_data=Documents()\n\t\tpermit_documents_data.document_type='permit'\n\t\tpermit_documents_data.profile_id=profileData.id\n\t\tpermit_documents_data.save()\n\telse:\n\t\tpermit_documents_data=permit_documents_data[0]\n\tpermit=get_filename_from_url(permit_documents_data.document_url) if permit_documents_data.document_url else None\n\n\tif profileData.pickup_address is None:\n\t\tpickup_contact_data=AddressContactDetails()\n\t\tpickup_contact_data.save()\n\t\tpickup_address_data=Address()\n\t\tpickup_address_data.contact_details=pickup_contact_data\n\t\tpickup_address_data.save()\n\t\tprofileData.pickup_address=pickup_address_data\n\t\tprofileData.save()\n\telse:\n\t\tpickup_address_data=profileData.pickup_address\n\tpickup_contact_data=pickup_address_data.contact_details\n\tstreet_bldg=pickup_address_data.street_bldg + ' ' if pickup_address_data.street_bldg != None else ''\n\tbarangay=pickup_address_data.barangay + ' ' if pickup_address_data.barangay != None else ''\n\tcity=pickup_address_data.city + ' ' if pickup_address_data.city != None else ''\n\tregion_state=pickup_address_data.region_state + ' ' if pickup_address_data.region_state != None else ''\n\tcountry=pickup_address_data.country + ' ' if pickup_address_data.country != None else ''\n\tpostal_code=str(pickup_address_data.postal_code) + ' ' if pickup_address_data.postal_code != None else ''\n\tpickup_address=street_bldg + barangay + city + region_state + postal_code + country\n\n\tif pickup_address == '':\n\t\tpickup_address=None\n\n\tif profileData.return_address is None:\n\t\treturn_contact_data=AddressContactDetails()\n\t\treturn_contact_data.save()\n\t\treturn_address_data=Address()\n\t\treturn_address_data.contact_details=return_contact_data\n\t\treturn_address_data.save()\n\t\tprofileData.return_address=return_address_data\n\t\tprofileData.save()\n\telse:\n\t\treturn_address_data=profileData.return_address\n\treturn_contact_data=return_address_data.contact_details\n\tstreet_bldg=return_address_data.street_bldg + ' ' if return_address_data.street_bldg != None else ''\n\tbarangay=return_address_data.barangay + ' ' if return_address_data.barangay != None else ''\n\tcity=return_address_data.city + ' ' if return_address_data.city != None else ''\n\tregion_state=return_address_data.region_state + ' ' if return_address_data.region_state != None else ''\n\tcountry=return_address_data.country + ' ' if return_address_data.country != None else ''\n\tpostal_code=str(return_address_data.postal_code) + ' ' if return_address_data.postal_code != None else ''\n\treturn_address=street_bldg + barangay + city + region_state + postal_code + country\n\n\tif return_address == '':\n\t\treturn_address=None\n\n\tctr=0\n\tenable_fields=True\n\tif seller_details_data.name_on_id:\n\t\tctr+=1\n\tif seller_details_data.id_type:\n\t\tctr+=1\n\tif seller_details_data.upload_id_front_url:\n\t\tctr+=1\n\tif seller_details_data.upload_id_back_url:\n\t\tctr+=1\n\tif seller_details_data.has_agreed_to_terms == 'Agreed':\n\t\tctr+=1\n\tif shop_details_data.shop_name:\n\t\tctr+=1\n\tif pickup_address_data.street_bldg:\n\t\tctr+=1\n\tif pickup_address_data.country:\n\t\tctr+=1\n\tif pickup_address_data.region_state:\n\t\tctr+=1\n\tif pickup_address_data.city:\n\t\tctr+=1\n\tif pickup_address_data.barangay:\n\t\tctr+=1\n\tif pickup_address_data.postal_code:\n\t\tctr+=1\n\tif return_address_data.street_bldg:\n\t\tctr+=1\n\tif return_address_data.country:\n\t\tctr+=1\n\tif return_address_data.region_state:\n\t\tctr+=1\n\tif return_address_data.city:\n\t\tctr+=1\n\tif return_address_data.barangay:\n\t\tctr+=1\n\tif return_address_data.postal_code:\n\t\tctr+=1\n\tif seller_details_data.seller_status == 'Pending for Review' and ctr == 18:\n\t\tenable_fields=False\n\n\tcountries_data=get_countries()\n\tcountries_filtered_data=[]\n\tfor v in countries_data:\n\t\tcountries_filtered_data.append({'name': v['name']})\n\n\treturn {\n\t\t'profileData': profileData,\n\t\t'userData': userData,\n\t\t'seller_details_data': seller_details_data,\n\t\t'upload_id_front_name': upload_id_front_name,\n\t\t'upload_id_back_name': upload_id_back_name,\n\t\t'shop_details_data': shop_details_data,\n\t\t'business_details_data': business_details_data,\n\t\t'business_address_data': business_address_data,\n\t\t'business_address': business_address,\n\t\t'bir_documents_data': bir_documents_data,\n\t\t'bir': bir,\n\t\t'dti_documents_data': dti_documents_data,\n\t\t'dti': dti,\n\t\t'sec_documents_data': sec_documents_data,\n\t\t'sec': sec,\n\t\t'permit_documents_data': permit_documents_data,\n\t\t'permit': permit,\n\t\t'pickup_address_data': pickup_address_data,\n\t\t'pickup_contact_data': pickup_contact_data,\n\t\t'pickup_address': pickup_address,\n\t\t'return_address_data': return_address_data,\n\t\t'return_contact_data': return_contact_data,\n\t\t'return_address': return_address,\n\t\t'enable_fields': enable_fields,\n\t\t'countries_filtered_data': countries_filtered_data\n\t}\n\ndef trim_file_upload(file):\n\tfile=file.replace(' ', '_')\n\tfile=file.replace('(', '')\n\tfile=file.replace(')', '')\n\treturn file\n\ndef get_countries():\n\turl='https://restcountries.eu/rest/v2/all'\n\tresponse=requests.get(url)\n\tdata=response.json()\n\treturn data","repo_name":"jraramirez/seller-center","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":17178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"29897187895","text":"#!/usr/bin/env python\n# coding: utf-8\n#function\nimport gzip\nimport regex\nimport argparse\n\n\ndef main():\n parser = argparse.ArgumentParser(description='A function to extract gRNA and clonal barcode from merged fastq gz file')\n parser.add_argument(\"--a\", required=True, help=\"This is the input fastq gz file\")\n parser.add_argument(\"--o\", required=True, help=\"This is the dir of output file\")\n args = parser.parse_args()\n fastqgz_input_address = args.a\n output_dir = args.o\n gRNA_output_address = output_dir + '/' + 'gRNA.bartender'\n clonal_barcode_output_address = output_dir + '/' + 'clonalbarcode.bartender'\n \n file_a = open(gRNA_output_address,'wt')\n file_b = open(clonal_barcode_output_address,'wt')\n with gzip.open(fastqgz_input_address,'rt') as handler:\n temp_readID = handler.readline().rstrip() # read ID\n temp_sequence = handler.readline().rstrip()\n handler.readline() # skip two lines\n handler.readline()\n while temp_readID:\n # This is the regular expression pattern\n # 16 bp for barcode\n # 16-20 for sgRNA, some of the control sgRNA are shorter\n temp_pattern = regex.compile('(TAGTT){e<2}' + '(.{16})' + 'TATGG'+'(.{16,21})' + 'GTT(TAAGA){e<2}')\n\n temp_search_result = temp_pattern.search(temp_sequence)\n if temp_search_result:\n temp_gRNA = temp_search_result.group(3)\n temp_clonal_barcode = temp_search_result.group(2)\n temp_string = '{},{}\\n'.format(temp_gRNA,temp_readID) # output to bartender format\n file_a.write(temp_string)\n temp_string = '{},{}\\n'.format(temp_clonal_barcode,temp_readID)\n file_b.write(temp_string)\n temp_readID = handler.readline().rstrip() # read ID\n temp_sequence = handler.readline().rstrip()\n handler.readline() # skip two lines\n handler.readline()\n file_a.close()\n file_b.close()\n \nif __name__ == \"__main__\":\n main() \n\n\n","repo_name":"JasperXuEvolution/Ultra_Seq","sub_path":"S1_Extracting_barcodes/UltraSeq_Step_2_1.py","file_name":"UltraSeq_Step_2_1.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"74582565203","text":"class Solution:\n def minMalwareSpread(self, graph, initial):\n best_reduction = 0\n best_node = min(initial)\n initial = set(initial)\n\n def connected(node):\n if node in group:\n return\n group.add(node)\n [connected(nbor) for nbor, linked in enumerate(graph[node]) if linked == 1]\n visited = set()\n for node in range(len(graph)):\n if node in visited:\n continue\n group = set()\n connected(node)\n overlap = initial & group\n if len(overlap) == 1 and len(group) > best_reduction:\n best_reduction = len(group)\n best_node = overlap.pop()\n visited |= group\n return best_node\n","repo_name":"huangyingw/submissions","sub_path":"924/924.minimize-malware-spread.233993539.Accepted.leetcode.py","file_name":"924.minimize-malware-spread.233993539.Accepted.leetcode.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"21886419087","text":"from django.urls import path\n\nfrom permissions import views\n\nurlpatterns = [\n path('new_request/', views.new_request, name='new_request'),\n path('view_previous_requests/', views.view_previous_requests, name='view_previous_requests'),\n path('view_all_previous_requests/', views.view_all_previous_requests, name='view_all_previous_requests'),\n path('approve_request/', views.approve_request, name='approve_request'),\n path('reject_request/', views.reject_request, name='reject_request'),\n path('withdraw_request/', views.withdraw_request, name='withdraw_request'),\n path('edit_request/', views.edit_request, name='edit_request'),\n path('review_requests/', views.review_requests, name='review_requests')\n]\n","repo_name":"ArnavVarshney/myHostel","sub_path":"permissions/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"19089599191","text":"import ast\nimport configparser\nimport os\nimport readline\nimport sys\nimport textwrap\nfrom distutils import util\nfrom pathlib import Path\n\nimport pandas as pd\nfrom colorama import Back, Fore, Style\nfrom loguru import logger\n\nfrom csv_labeler import tab_completer\n\nlogger.remove()\nlogger.add(sys.stderr, format=\"{message}\", level=\"INFO\")\n\n\ndef confirm_prompt(question: str) -> bool:\n \"\"\"\n Asks the user for passed questions, accepts \"Yes\" (default, also valid as direct)\n Enter or \"No\" as answer. Returns a boolean value Yes=True/No=False\n\n Parameters\n ----------\n question : str\n The questions the user has to answere\n\n Returns\n -------\n bool\n True if the user choose Yes, False if No\n \"\"\"\n reply = None\n valid_inputs = (\"\", \"y\", \"n\")\n\n while reply not in valid_inputs:\n if reply is not None:\n print('Please enter a valid value (\"Y/y\" or Enter for Yes, \"N/n\" for No)')\n reply = input(f\"{question} (Y/n): \").casefold()\n return reply in (\"\", \"y\")\n\n\ndef get_csv_filepath() -> Path:\n \"\"\"\n Asks the user for the path to the csv file\n\n Raises\n ------\n KeyboardInterrupt\n If the user enters \"q\" or \"Q\"\n Returns\n -------\n pathlib.Path\n Path to csv file\n \"\"\"\n while True:\n readline.set_completer_delims(\"\\t\")\n readline.parse_and_bind(\"tab: complete\")\n completer = tab_completer.TabCompleter()\n readline.set_completer(completer.path_completer)\n csv_filepath = Path(input(\"Please enter the path of the csv file: \"))\n if csv_filepath.is_file():\n break\n if str(csv_filepath).casefold() == \"q\":\n raise KeyboardInterrupt(\"User canceled the input\")\n print(\"No valid file found\")\n\n return csv_filepath\n\n\ndef detect_labels(df: pd.DataFrame, label_column: str) -> bool:\n \"\"\"\n Validates if the label column already contains any labels.\n Returns true if so.\n\n Parameters\n ----------\n df : pd.DataFrame\n Read in csv file\n label_column : str\n Column to check for labels\n\n Returns\n -------\n bool\n True if any column contains a label, False if not\n \"\"\"\n return not df[label_column].isnull().all()\n\n\ndef handle_existing_labels(df: pd.DataFrame, label_column: str) -> bool:\n \"\"\"\n Asks the user if he/she wants to keep existing labels if any are detected.\n\n Parameters\n ----------\n df : pd.DataFrame\n DataFrame with the label column\n label_column : str\n Name of the column which contains the labels\n\n Returns\n -------\n bool\n True if user wants to skip (ignore) already labeled rows, False if\n the user wants to relable them\n \"\"\"\n keep_label = False\n if detect_labels(df, label_column):\n keep_label = confirm_prompt(\n \"Existing labels detected! Do you want to keep the existing labels (if you\"\n \" choose No, all existing labels will be deleted!)\"\n )\n\n return keep_label\n\n\ndef print_relevant_columns(row: pd.Series, config: configparser.ConfigParser) -> None:\n \"\"\"\n Prints all relevant columns for the classification with the corresponding value. Does\n some preprecessing for string values (removes linebreaks, splits into multiple lines if\n the text is to long...).\n\n Parameters\n ----------\n row : pd.Series\n Row of the csv file/dataframe. Must contain all relevant columns\n config : configparser.ConfigParser\n ConfigParser with all information from the config.ini\n \"\"\"\n clear_console()\n wrapper = textwrap.TextWrapper(\n width=int(config[\"general\"][\"line_length\"])\n ) # Needed for formatting outputs\n\n # If not at least one relevant column is defined, use all columns except of label_column\n relevant_columns = ast.literal_eval(config[\"csv\"][\"relevant_columns\"])\n if len(relevant_columns) == 0:\n relevant_columns = list(\n i for i in list(row.index) if i.casefold() != config[\"csv\"][\"label_column\"]\n )\n\n # Use the length of the longest column name to determine the width of the \"name\" column.\n # The width of the \"value\" column is the remaining space of the {line_width} defined in the\n # config.ini minus the width of the \"name\" column and the vale of {name_value_seperator_width}\n # (also defined in config.ini.)\n name_column_width = len(max(relevant_columns, key=len))\n value_column_width = int(config[\"general\"][\"line_length\"]) - name_column_width\n line_length = int(config[\"general\"][\"name_value_seperator_width\"])\n\n for name, value in row.items():\n if name.casefold() in [x.casefold() for x in relevant_columns]:\n # Check for empty row -> no further processing needed if empty\n if pd.isna(value):\n print_value = \"None\"\n else:\n # Cleanup text & highlight keywords (only in strings)\n if isinstance(value, str):\n print_value = \" \".join(value.replace(\"\\\\\", \"\").split())\n print_value = highlight_keywords(\n print_value,\n ast.literal_eval(config[\"classification\"][\"keywords\"]),\n config[\"highlighting\"][\"foreground\"],\n config[\"highlighting\"][\"background\"],\n )\n else:\n print_value = value\n\n # Print column\n # If column contains text, split it into smaller parts to fit inside default terminals\n if isinstance(value, str):\n line_list = wrapper.wrap(text=print_value)\n print(\n f\"{name:{name_column_width}}:\"\n f'{\"\":{line_length}}{line_list[0]:{value_column_width}}'\n )\n for element in line_list[1:]:\n print(\n f'{\"\":{name_column_width}} {\"\":{line_length}}{element:{value_column_width}}'\n )\n else:\n print(\n f\"{name:{name_column_width}}:\"\n f'{\"\":{line_length}}{str(print_value):{value_column_width}}'\n )\n\n\ndef label_row(\n row: pd.Series,\n keep_label: bool,\n config: configparser.ConfigParser,\n) -> str:\n \"\"\"\n Prints the relevant columns of the passed row to the terminal and asks the user for\n the label.\n\n Parameters\n ----------\n row : pd.Series\n Row of the pandas Dataframe that contains the csv file\n keep_label : bool\n Should existing labels be retained\n config : configparser.ConfigParser\n ConfigParser with all information from the config.ini\n\n Returns\n -------\n str\n Selected Label\n \"\"\"\n if keep_label and not pd.isna(row[config[\"csv\"][\"label_column\"]]):\n return row[config[\"csv\"][\"label_column\"]]\n print_relevant_columns(row, config)\n return get_classification(ast.literal_eval(config[\"classification\"][\"labels\"]))\n\n\ndef main():\n \"\"\"\n CSV Labeler\n\n A simple tool for labeling your csv files\n \"\"\"\n config = configparser.ConfigParser()\n config.read(\"config.ini\")\n if bool(util.strtobool(config[\"development\"][\"testmode\"])):\n # Development behavior, set values inside of config.ini\n csv_filepath = config[\"development\"][\"csv_file\"]\n else:\n # Normal behavior\n try:\n csv_filepath = get_csv_filepath()\n except KeyboardInterrupt:\n clear_console()\n print(\"Exiting...\")\n sys.exit(0)\n df = pd.read_csv(csv_filepath, sep=config[\"csv\"][\"sep\"])\n if not isinstance(df, pd.DataFrame):\n raise ValueError(\"Error while reading the csv file\")\n\n if bool(util.strtobool(config[\"development\"][\"testmode\"])):\n # Development behavior, set values inside of config.ini\n keep_label = bool(util.strtobool(config[\"development\"][\"skip_labels\"]))\n else:\n # Normal behavior\n keep_label = handle_existing_labels(df, config[\"csv\"][\"label_column\"])\n save_changes = True\n\n for index, row in df.iterrows():\n try:\n df.loc[ # pylint: disable=no-member\n index, config[\"csv\"][\"label_column\"]\n ] = label_row(row, keep_label, config)\n except KeyboardInterrupt:\n save_changes = confirm_prompt(\n \"\\nInput was canceled, should the labels created so far be saved?\"\n )\n break\n\n clear_console()\n print(\"Labeling of the CSV file completed\")\n if save_changes:\n df.to_csv( # pylint: disable=no-member\n csv_filepath, sep=config[\"csv\"][\"sep\"], index=False\n )\n\n\ndef get_classification(categories: list) -> str:\n \"\"\"\n Displays the possible label classes and validates the userinput (must be a valid labelclass or a\n corresponding id). Converts a class id to the class name if necessary\n\n Parameters\n ----------\n categories : list\n List with the class labels\n\n Returns\n -------\n str\n Selected label\n\n Raises\n ------\n KeyboardInterrupt\n Raised when the user choose to cancel the classification\n \"\"\"\n lower_category_list = [x.casefold() for x in categories]\n category_integer_list = list(range(1, len(lower_category_list) + 1))\n category_hex_list = list(hex(n) for n in category_integer_list)\n print(\"\\nThe following categories exist: \")\n for idx, category in enumerate(categories):\n print(f\"\\t{idx+1:x})\\t{category}\")\n\n print(\"\\n\\tu)\\tUmbuchung\")\n print(\"\\tq)\\tCancel Input\")\n\n # Setup auto-completion via tab\n completer = tab_completer.TabCompleter()\n completer.create_list_completer(categories)\n readline.set_completer_delims(\"\\t\")\n readline.parse_and_bind(\"tab: complete\")\n readline.set_completer(completer.list_completer)\n\n while True:\n skip_invalid_print = False\n selected_category = input(\n \"\\nPlease select one of the categories, you can use the name (autocomplete\"\n \" via tab) the corresponding number: \"\n )\n # Check if the input was empty, if so, ask again\n if not selected_category:\n print(\"\\nPlease select a category\")\n continue\n\n # Check if there is an exact match\n if selected_category.casefold() in lower_category_list:\n clear_console()\n return resolve_correct_label_name(selected_category, categories)\n # Check if the user entered the hardcoded value 'Umbuchung'\n if selected_category.casefold() == \"umbuchung\" or selected_category == \"u\":\n clear_console()\n return \"Umbuchung\"\n # Check if the user entered the exit command\n if selected_category.casefold() == \"cancel input\" or selected_category == \"q\":\n raise KeyboardInterrupt(\"User canceled the input\")\n # Check if the user entered a hex value (-> Id of a category)\n try:\n if hex(int(selected_category, 16)) in category_hex_list:\n selected_category = categories[int(selected_category, 16) - 1]\n clear_console()\n return selected_category\n except ValueError:\n logger.debug(\"User input was not a hex value\")\n\n if not skip_invalid_print:\n print(\"Invalid Input, please choose a valid category!\")\n\n\ndef resolve_correct_label_name(label: str, label_list: list) -> str:\n \"\"\"\n Resolves the correct (case sensitive) spelling to the casefolded label\n\n\n Parameters\n ----------\n label : str\n Label case insensitive spelling\n label_list : list\n List with all labels in the correct spelling\n\n Returns\n -------\n str\n Correct spelled label\n \"\"\"\n match = next(\n i for i, v in enumerate(label_list) if v.casefold() == label.casefold()\n )\n return label_list[match]\n\n\ndef clear_console():\n \"\"\"\n Clears console output\n \"\"\"\n os.system(\"cls||clear\")\n\n\ndef highlight_keywords(\n text: str,\n keywords: list,\n foreground_color: str = \"BLACK\",\n background_color: str = \"YELLOW\",\n) -> str:\n \"\"\"\n Loops over the text and adds the necessary characters for word based highlighting.\n This is maybe not the most effective way to do this, but its good enough for this\n usecase.\n\n Parameters\n ----------\n text : str\n The original text to highlight in\n keywords : list\n List with all words that should be highlighted\n foreground_color : str\n Forgroundcolor for highlighted words\n background_color : str\n Backgroundcolor for highlighted words\n\n\n Returns\n -------\n str\n Text with added characters for highlighting\n \"\"\"\n # Skip loop if there are no keywords\n if len(keywords) > 0:\n words_in_text = text.split(\" \")\n for idx, word in enumerate(words_in_text):\n if word.casefold() in [x.casefold() for x in keywords]:\n highlight_stopper = (\n Style.RESET_ALL if idx == len(words_in_text) - 1 else \"\"\n )\n\n text = text.replace(\n word,\n getattr(Fore, foreground_color.upper())\n + getattr(Back, background_color.upper())\n + word\n + highlight_stopper,\n )\n else:\n text = text.replace(f\" {word}\", Style.RESET_ALL + f\" {word}\")\n\n return text\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"RobinMaas95/csv_labeler","sub_path":"csv_labeler/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"14170151240","text":"__author__ = 'hechaoyi'\n\n'''\n解题思路:看到树首先想到要用递归来解题。以这道题为例:如果一颗二叉树为{1,2,3,4,5,6,7},则中序遍历为{4,2,5,1,6,3,7},后序遍历为{4,5,2,6,7,3,1},我们可以反推回去。由于后序遍历的最后一个节点就是树的根。也就是root=1,然后我们在中序遍历中搜索1,可以看到中序遍历的第四个数是1,也就是root。根据中序遍历的定义,1左边的数{4,2,5}就是左子树的中序遍历,1右边的数{6,3,7}就是右子树的中序遍历。而对于后序遍历来讲,一定是先后序遍历完左子树,再后序遍历完右子树,最后遍历根。于是可以推出:{4,5,2}就是左子树的后序遍历,{6,3,7}就是右子树的后序遍历。而我们已经知道{4,2,5}就是左子树的中序遍历,{6,3,7}就是右子树的中序遍历。再进行递归就可以解决问题了。\n'''\n\n# Definition for a binary tree node\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n # @param inorder, a list of integers\n # @param postorder, a list of integers\n # @return a tree node\n def buildTree(self, inorder, postorder):\n if len(inorder) == 0:\n return None\n if len(inorder) == 1:\n return TreeNode(inorder[0])\n root = TreeNode(postorder[len(postorder) - 1])\n index = inorder.index(postorder[len(postorder) - 1])\n root.left = self.buildTree(inorder[ 0 : index ], postorder[ 0 : index ])\n root.right = self.buildTree(inorder[ index + 1 : len(inorder) ], postorder[ index : len(postorder) - 1 ])\n return root","repo_name":"chaoyi-he/algrithm","sub_path":"construct-binary-tree-from-inorder-and-postorder-traversal.py","file_name":"construct-binary-tree-from-inorder-and-postorder-traversal.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"38350612423","text":"#!/usr/bin/env python\n# coding=utf-8\n\n# Slide Puzzle\n# By Al Sweigart al@inventwithpython.com\n# http://inventwithpython.com/pygame\n# Modded by brk0_0\n# Creative Commons BY-NC-SA 3.0 US\n\nimport random\nimport sys\nimport pygame\nfrom pygame.locals import *\n\n# Constants\nBOARDWIDTH = 3 # Number of rows in the board\nBOARDHEIGHT = 3 # Number of columns in the board\nTILESIZE = 80 \nWINDOWWIDTH = 640\nWINDOWHEIGHT = 480\nFPS = 30\nBLANK = None\nSHUFMOVES = 80 # Number of moves when shuffling the board\n\n# Colors R G B\nBLACK = ( 0, 0, 0)\nWHITE = (255, 255, 255)\nBRIGHTBLUE = ( 0, 50, 255)\nDARKTURQUOISE = ( 3, 54, 73)\nGREEN = ( 0, 255, 0)\n\nBGCOLOR = DARKTURQUOISE\nTILECOLOR = GREEN\nTEXTCOLOR = WHITE\nBORDERCOLOR = BRIGHTBLUE\nBUTTONCOLOR = WHITE\nBUTTONTEXTCOLOR = BLACK\nMESSAGECOLOR = WHITE\nBASICFONTSIZE = 20\n\nXMARGIN = int((WINDOWWIDTH - (TILESIZE * BOARDWIDTH + (BOARDWIDTH - 1))) / 2)\nYMARGIN = int((WINDOWHEIGHT - (TILESIZE * BOARDHEIGHT + (BOARDHEIGHT - 1))) / 2)\n\nUP = \"up\"\nDOWN = \"down\"\nLEFT = \"left\"\nRIGHT = \"right\"\n\ndef main():\n\t# Global variables\n\tglobal FPSCLOCK, DISPLAYSURF, BASICFONT, RESET_SURF, RESET_RECT, NEW_SURF, NEW_RECT, SOLVE_SURF, SOLVE_RECT\n\n\t# Initiate PyGame and set primary variables\n\tpygame.init()\n\tFPSCLOCK = pygame.time.Clock()\n\tDISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))\n\tpygame.display.set_caption(\"Slide Puzzle\")\n\tBASICFONT = pygame.font.Font(\"freesansbold.ttf\", BASICFONTSIZE)\n\n\t# Store the option buttons and their rectangles in OPTIONS\n\tRESET_SURF, RESET_RECT = makeText(\"Reset\", TEXTCOLOR, TILECOLOR, WINDOWWIDTH - 120, WINDOWHEIGHT - 90) \n\tNEW_SURF , NEW_RECT = makeText(\"New Game\", TEXTCOLOR, TILECOLOR, WINDOWWIDTH - 120, WINDOWHEIGHT - 60)\n\tSOLVE_SURF, SOLVE_RECT = makeText(\"Solve\", TEXTCOLOR, TILECOLOR, WINDOWWIDTH - 120, WINDOWHEIGHT - 30)\n\n\tmainBoard, solutionSeq = generateNewPuzzle(SHUFMOVES)\n\tSOLVEDBOARD = getStartingBoard() # A solved board is the same as the board in start state, before shuffling\n\tallMoves = [] # List of moves made from the shuffled configuration\n\tmessage = \"\" # Contains the message displayed on the upper left corner\n\n\t# Main game loop\n\twhile True:\n\t\tslideTo = None # The direction, if any, a tile should slide\n\n\t\tif mainBoard == SOLVEDBOARD:\n\t\t\tif message != \"The Board is Already Solved!\":\n\t\t\t\tmessage = \"Solved!\"\n\t\t\tallMoves = []\n\t\t\tsolutionSeq = []\n\t\telse:\n\t\t\tmessage = \"Click tile or press arrow keys to slide\"\n\n\t\tdrawBoard(mainBoard, message) # Draw the board to the screen\n\t\tcheckForQuit() # Check for quit-like events and quit if any\n\n\t\t# Event handling loop\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == MOUSEBUTTONUP: # If the event is a mouse click\n\t\t\t\tspotx, spoty = getSpotClicked(mainBoard, event.pos[0], event.pos[1])\n\n\t\t\t\t# If the player missed a tile, check if an option button was clicked\n\t\t\t\tif (spotx, spoty) == (None, None):\n\t\t\t\t\t# Clicked on RESET button\n\t\t\t\t\tif RESET_RECT.collidepoint(event.pos):\n\t\t\t\t\t\tresetAnimation(mainBoard, allMoves)\n\t\t\t\t\t\tallMoves = []\n\t\t\t\t\t# Clicked on NEW GAME button\n\t\t\t\t\telif NEW_RECT.collidepoint(event.pos):\n\t\t\t\t\t\tmainBoard, solutionSeq = generateNewPuzzle(SHUFMOVES)\n\t\t\t\t\t\tallMoves = []\n\t\t\t\t\t\tmessage = \"\"\n\t\t\t\t\t# Clicked on SOLVE button\n\t\t\t\t\telif SOLVE_RECT.collidepoint(event.pos):\n\t\t\t\t\t\tif mainBoard == SOLVEDBOARD:\n\t\t\t\t\t\t\tmessage = \"The Board is Already Solved!\"\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tresetAnimation(mainBoard, solutionSeq + allMoves)\n\t\t\t\t\t\t\tsolutionSeq = []\n\t\t\t\t\t\t\tallMoves = []\n\t\t\t\t# Else, check if the clicked tile was next to the blank spot\n\t\t\t\telse:\n\t\t\t\t\tblankx, blanky = getBlankPosition(mainBoard) # Get the blank spot's coordinates\n\n\t\t\t\t\t# Set the tile to move to the right direction\n\t\t\t\t\tif spotx == blankx + 1 and spoty == blanky:\n\t\t\t\t\t\tslideTo = LEFT\n\t\t\t\t\telif spotx == blankx - 1 and spoty == blanky:\n\t\t\t\t\t\tslideTo = RIGHT\n\t\t\t\t\telif spotx == blankx and spoty == blanky + 1:\n\t\t\t\t\t\tslideTo = UP\n\t\t\t\t\telif spotx == blankx and spoty == blanky - 1:\n\t\t\t\t\t\tslideTo = DOWN\n\n\n\t\t\telif event.type == KEYUP: # If the event is a key press\n\t\t\t\tif event.key in (K_LEFT, K_a) and isValidMove(mainBoard, LEFT):\n\t\t\t\t\tslideTo = LEFT\n\t\t\t\telif event.key in (K_RIGHT, K_d) and isValidMove(mainBoard, RIGHT):\n\t\t\t\t\tslideTo = RIGHT\n\t\t\t\telif event.key in (K_UP, K_w) and isValidMove(mainBoard, UP):\n\t\t\t\t\tslideTo = UP\n\t\t\t\telif event.key in (K_DOWN, K_s) and isValidMove(mainBoard, DOWN):\n\t\t\t\t\tslideTo = DOWN\n\t\t\n\t\t# If a tile should be moved\n\t\tif slideTo:\n\t\t\tslideAnimation(mainBoard, slideTo, \"Click tile or press arrow keys to slide\", 8)\n\t\t\tmakeMove(mainBoard, slideTo)\n\t\t\tallMoves.append(slideTo)\n\n\t\t# Update the display and wait for the clock to tick\n\t\tpygame.display.update()\n\t\tFPSCLOCK.tick(FPS)\n\ndef terminate():\n\tpygame.quit() # Quit PyGame\n\tsys.exit() # Exit program\n\ndef checkForQuit():\n\tfor event in pygame.event.get(QUIT): # Get all QUIT events\n\t\tterminate() # End the game if there are any QUIT events\n\tfor event in pygame.event.get(KEYUP): # Get all KEYUP events\n\t\t# If K_ESCAPE was pressed, terminate\n\t\tif event.key == K_ESCAPE:\n\t\t\tterminate()\n\t\t# Else, puts the event back in the list of events\n\t\tpygame.event.post(event)\n\ndef oppositeMove(move):\n\tif move == UP:\n\t\treturn DOWN\n\telif move == DOWN:\n\t\treturn UP\n\telif move == LEFT:\n\t\treturn RIGHT\n\telif move == RIGHT:\n\t\treturn LEFT\n\ndef isValidMove(board, move):\n\tblankx, blanky = getBlankPosition(board)\n\n\treturn (move == UP and blanky != len(board[0]) - 1) or \\\n\t (move == DOWN and blanky != 0) or \\\n\t (move == LEFT and blankx != len(board) - 1) or \\\n\t (move == RIGHT and blankx != 0)\n\ndef makeMove(board, move):\n\tblankx, blanky = getBlankPosition(board)\n\n\tif move == UP:\n\t\tboard[blankx][blanky], board[blankx][blanky + 1] = board[blankx][blanky + 1], board[blankx][blanky]\n\telif move == DOWN:\n\t\tboard[blankx][blanky], board[blankx][blanky - 1] = board[blankx][blanky - 1], board[blankx][blanky]\n\telif move == LEFT:\n\t\tboard[blankx][blanky], board[blankx + 1][blanky] = board[blankx + 1][blanky], board[blankx][blanky]\n\telif move == RIGHT:\n\t\tboard[blankx][blanky], board[blankx - 1][blanky] = board[blankx - 1][blanky], board[blankx][blanky]\n\ndef getStartingBoard():\n\t# Return a board data structure with tiles in the solved state\n\tcounter = 1\n\tboard = []\n\n\t# Build the board\n\tfor x in range(BOARDWIDTH):\n\t\tcolumn = []\n\t\tfor y in range(BOARDHEIGHT):\n\t\t\tcolumn.append(counter)\n\t\t\tcounter += BOARDWIDTH\n\t\tboard.append(column)\n\t\tcounter -= BOARDWIDTH * (BOARDHEIGHT - 1) + BOARDWIDTH - 1\n\tboard[BOARDWIDTH - 1][BOARDHEIGHT - 1] = BLANK\n\n\treturn board\n\ndef getBlankPosition(board):\n\tfor x in range(BOARDWIDTH):\n\t\tfor y in range(BOARDHEIGHT):\n\t\t\tif board[x][y] == BLANK:\n\t\t\t\treturn (x, y)\n\ndef getRandomMove(board, lastMove=None):\n\tallMoves = [UP, DOWN, LEFT, RIGHT]\n\tvalidMoves = []\n\n\t# Delete invalid moves from the list\n\tfor move in allMoves:\n\t\tif move != oppositeMove(lastMove) and isValidMove(board, move):\n\t\t\tvalidMoves.append(move)\n\n\t# Return a random valid move\n\treturn random.choice(validMoves)\n\ndef getLeftTopOfTile(tileX, tileY):\n\tleft = XMARGIN + (tileX * TILESIZE) + (tileX - 1)\n\ttop = YMARGIN + (tileY * TILESIZE) + (tileY - 1)\n\n\treturn left, top\n\ndef getSpotClicked(board, x, y):\n\t# Go through the board checking if any tile collides with the (x,y) point\n\tfor tileX in range(len(board)):\n\t\tfor tileY in range(len(board[0])):\n\t\t\tleft, top = getLeftTopOfTile(tileX, tileY)\n\t\t\ttileRect = pygame.Rect(left, top, TILESIZE, TILESIZE)\n\t\t\t# If collides, returns the coordinates of that tile\n\t\t\tif tileRect.collidepoint(x, y):\n\t\t\t\treturn (tileX, tileY)\n\n\t# If none matched\n\treturn (None, None)\n\ndef drawTile(tileX, tileY, number, adjx=0, adjy=0):\n\tleft, top = getLeftTopOfTile(tileX, tileY) # Get left/top coordinates of the tile\n\ttileRect = pygame.draw.rect(DISPLAYSURF, TILECOLOR, (left + adjx, top + adjy, TILESIZE, TILESIZE)) # Draw the main rectangle\n\ttextSurf = BASICFONT.render(str(number), True, TEXTCOLOR) # Create a text surface with the the number written on\n\ttextRect = textSurf.get_rect() # Get the text surface's rectange\n\ttextRect.center = left + int(TILESIZE / 2) + adjx, top + int(TILESIZE / 2) + adjy # Change the text surface's center\n\tDISPLAYSURF.blit(textSurf, textRect) # Blit the text surface on DISPLAYSURF\n\ndef drawBoard(board, message):\n\t# Fill the display with background color\n\tDISPLAYSURF.fill(BGCOLOR)\n\n\t# Display message, if any\n\tif message:\n\t\ttextSurf, textRect = makeText(message, MESSAGECOLOR, BGCOLOR, 5, 5)\n\t\tDISPLAYSURF.blit(textSurf, textRect)\n\n\t# Display the tiles, one by one\n\tfor tileX in range(len(board)):\n\t\tfor tileY in range(len(board[0])):\n\t\t\tif board[tileX][tileY] != BLANK:\n\t\t\t\tdrawTile(tileX, tileY, board[tileX][tileY])\n\n\t# Display the border\n\tleft, top = getLeftTopOfTile(0,0)\n\twidth = BOARDWIDTH * TILESIZE\n\theight = BOARDHEIGHT * TILESIZE\n\tpygame.draw.rect(DISPLAYSURF, BORDERCOLOR, (left - 5, top - 5, width + 11, height + 11), 4)\n\n\t# Display the buttons\n\tDISPLAYSURF.blit(RESET_SURF, RESET_RECT)\n\tDISPLAYSURF.blit(NEW_SURF, NEW_RECT)\n\tDISPLAYSURF.blit(SOLVE_SURF, SOLVE_RECT)\n\ndef makeText(text, color, bgcolor, top, left):\n\ttextSurf = BASICFONT.render(text, True, color, bgcolor) # Create a text surface with the the number written on\n\ttextRect = textSurf.get_rect() # Get the text surface's rectange\n\ttextRect.topleft = (top, left) # Change the text surface's center\n\treturn (textSurf, textRect)\n\ndef slideAnimation(board, direction, message, animationSpeed):\n\tblankx, blanky = getBlankPosition(board)\n\n\tif direction == UP:\n\t\tmovex, movey = blankx, blanky + 1\n\telif direction == DOWN:\n\t\tmovex, movey = blankx, blanky - 1\n\telif direction == LEFT:\n\t\tmovex, movey = blankx + 1, blanky\n\telif direction == RIGHT:\n\t\tmovex, movey = blankx - 1, blanky\n\n\t# Prepare the base surface\t\n\tdrawBoard(board, message)\n\tbaseSurf = DISPLAYSURF.copy()\n\n\t# Delete the moving tile from the baseSurf surface\n\tmoveLeft, moveTop = getLeftTopOfTile(movex, movey)\n\tpygame.draw.rect(baseSurf, BGCOLOR, (moveLeft, moveTop, TILESIZE, TILESIZE))\n\n\tfor i in range(0, TILESIZE, animationSpeed):\n\t\t# Animate the tile sliding over\n\t\tcheckForQuit()\n\t\tDISPLAYSURF.blit(baseSurf, (0,0))\n\n\t\tif direction == UP:\n\t\t\tdrawTile(movex, movey, board[movex][movey], 0, -i)\n\t\telif direction == DOWN:\n\t\t\tdrawTile(movex, movey, board[movex][movey], 0, i)\n\t\telif direction == LEFT:\n\t\t\tdrawTile(movex, movey, board[movex][movey], -i, 0)\n\t\telif direction == RIGHT:\n\t\t\tdrawTile(movex, movey, board[movex][movey], i, 0)\n\n\t\tpygame.display.update()\n\t\tFPSCLOCK.tick(FPS)\n\ndef resetAnimation(board, allMoves):\n\trevAllMoves = allMoves[::-1] # Get a reversed list of moves\n\n\tfor move in revAllMoves:\n\t\tcheckForQuit()\n\t\topposite = oppositeMove(move)\n\t\tslideAnimation(board, opposite, \"\", int(TILESIZE / 2))\n\t\tmakeMove(board, opposite)\n\ndef generateNewPuzzle(numSlides):\n\tsequence = []\n\tboard = getStartingBoard()\n\tdrawBoard = (board, None)\n\tpygame.display.update()\n\tpygame.time.wait(500)\n\tlastMove = None\n\n\tfor i in range(numSlides):\n\t\tmove = getRandomMove(board, lastMove)\n\t\tslideAnimation(board, move, \"Generating New Puzzle...\", int(TILESIZE / 2))\n\t\tmakeMove(board, move)\n\t\tsequence.append(move)\n\t\tlastMove = move\n\n\treturn (board, sequence)\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"prcastro/slide-puzzle","sub_path":"slidepuzzle.py","file_name":"slidepuzzle.py","file_ext":"py","file_size_in_byte":11151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"15771478653","text":"import sys\nopen_gl_ok=False\n\ntry:\n\tfrom OpenGL.GLU import *\n\tfrom OpenGL.GL import *\n\tfrom PyQt5 import QtOpenGL\n\tfrom PyQt5.QtOpenGL import QGLWidget\n\tfrom OpenGL.GLU import *\n\topen_gl_ok=True\nexcept:\n\n\tprint(\"opengl error \",sys.exc_info()[0])\n\nfrom PyQt5 import QtGui\nfrom PyQt5.QtGui import QScreen\nfrom PyQt5.QtWidgets import QWidget, QHBoxLayout, QMenu, QColorDialog, QAction\n\nimport os\n\n#path\nfrom cal_path import get_sim_path\n\n\n#epitaxy\nfrom epitaxy import epitaxy_get_epi\nfrom epitaxy import get_epi\n\n#qt\nfrom PyQt5.QtGui import QFont\nfrom PyQt5.QtGui import qRgba\n\nfrom PyQt5.QtGui import QPainter,QFont,QColor,QPen\nfrom PyQt5.QtGui import QPainterPath,QPolygonF\nfrom PyQt5.QtCore import QRectF,QPoint\n\nimport numpy as np\nfrom str2bool import str2bool\n\nimport random\n\nfrom math import fabs\nfrom triangle import vec\nfrom gl_fallback import gl_fallback\n\nimport copy\n\nfrom thumb import thumb_nail_gen\n\nfrom gl_views import gl_views\nfrom gl_views import gl_view_options\n\nfrom gl_lib import val_to_rgb\n\nfrom gl_mesh import gl_mesh\n\n\nfrom gl_object_editor import gl_object_editor\n\nfrom gl_cords import gl_cords\n\nfrom gl_shape_layer import shape_layer\nfrom gl_base_widget import gl_base_widget\n\nfrom gl_main_menu import gl_main_menu\n\nfrom gl_list import gl_objects\n\n#from file_watch import get_watch\nfrom gl_input import gl_input\n\nfrom gl_text import gl_text\nfrom gl_image import gl_image\n\nfrom gl_lib_ray import gl_lib_ray\nfrom gl_contacts import gl_contacts\nfrom gl_graph import gl_graph\nfrom gl_draw_light_profile import gl_draw_light_profile\nfrom gl_color import gl_color\nfrom gl_shapes import gl_shapes\nfrom gl_base_object import gl_base_object\nfrom gl_render_obj import gl_render_obj\nfrom gl_photons import gl_photons\nfrom gl_scale import gl_scale\nfrom PyQt5.QtCore import pyqtSignal\nfrom gpvdm_json import gpvdm_data\nfrom gl_toolbar import gl_toolbar\nfrom gl_default_shapes import gl_default_shapes\n\nclass open_gl_light:\n\tdef __init__(self):\n\t\tself.xyz=[0, 5, -10, 1.0]\n\t\tself.number=GL_LIGHT0\n\nif open_gl_ok==True:\t\t\n\tclass glWidget(QGLWidget,shape_layer, gl_lib_ray,gl_objects, gl_text,gl_views,gl_mesh,gl_object_editor,gl_cords,gl_base_widget,gl_main_menu,gl_input, gl_contacts, gl_draw_light_profile, gl_graph, gl_color, gl_shapes, gl_render_obj, gl_photons, gl_toolbar):\n\n\n\t\ttext_output = pyqtSignal(str)\n\n\t\tdef __init__(self, parent):\n\t\t\tQGLWidget.__init__(self, parent)\n\n\t\t\tgl_base_widget.__init__(self)\n\t\t\tgl_objects.__init__(self)\n\t\t\tgl_lib_ray.__init__(self)\n\t\t\tgl_text.__init__(self)\n\t\t\tgl_color.__init__(self)\n\t\t\tgl_render_obj.__init__(self)\n\t\t\tgl_input.__init__(self)\n\t\t\tgl_graph.__init__(self)\n\t\t\tgl_toolbar.__init__(self)\n\t\t\tgl_views.__init__(self)\n\t\t\tself.lit=True\n\t\t\tself.setAutoBufferSwap(False)\n\t\t\tself.gl_image=gl_image()\n\t\t\tself.lights=[]\n\t\t\tself.view_options=gl_view_options()\n\n\n\t\t\tl=open_gl_light()\n\t\t\tl.xyz=[0, 5, -10]\n\t\t\tl.number=GL_LIGHT0\n\t\t\tself.lights.append(l)\n\n\t\t\tl=open_gl_light()\n\t\t\tl.xyz=[0, -5, -10]\n\t\t\tl.number=GL_LIGHT1\n\t\t\tself.lights.append(l)\n\n\n\t\t\tl=open_gl_light()\n\t\t\tl.xyz=[0, 5, 10]\n\t\t\tl.number=GL_LIGHT2\n\t\t\tself.lights.append(l)\n\n\t\t\tl=open_gl_light()\n\t\t\tl.xyz=[0, -5, 10]\n\t\t\tl.number=GL_LIGHT3\n\t\t\tself.lights.append(l)\n\n\t\t\tl=open_gl_light()\n\t\t\tl.xyz=[-10, -5, 0]\n\t\t\tl.number=GL_LIGHT4\n\t\t\tself.lights.append(l)\n\n\t\t\tl=open_gl_light()\n\t\t\tl.xyz=[10, -5, 0]\n\t\t\tl.number=GL_LIGHT5\n\t\t\tself.lights.append(l)\n\n\n\n\t\t\tself.failed=True\n\t\t\tself.graph_path=None\n\t\t\tself.scene_built=False\n\t\t\t#view pos\n\n\t\t\tself.dy_layer_offset=0.05\n\n\t\t\tself.draw_electrical_mesh=False\n\t\t\tself.enable_draw_ray_mesh=False\n\t\t\tself.plot_graph=False\n\t\t\tself.plot_circuit=False\n\n\t\t\tself.scale=gl_scale()\n\t\t\tself.font = QFont(\"Arial\")\n\t\t\tself.font.setPointSize(15)\n\t\t\tself.called=False\n\t\t\tself.enable_light_profile=True\n\t\t\tself.build_main_menu()\n\t\t\tself.pre_built_scene=None\n\t\t\tself.open_gl_working=True\n\t\t\tself.default_shapes=gl_default_shapes()\n\n\t\t#def bix_axis(self):\n\t\t#\tfor xx in range(0,10):\n\t\t#\t\tself.box(0+xx,0,0,0.5,0.5,0.5,1.0,0,0,0.5)\n\n\t\t#\tfor yy in range(0,10):\n\t\t#\t\tself.box(0,0+yy,0,0.5,0.5,0.5,1.0,0,0,0.5)\n\n\n\t\t#\tfor zz in range(0,10):\n\t\t#\t\tself.box(0,0,0+zz,0.5,0.5,0.5,0.0,0,1,0.5)\n\n\t\t#this may not be the best place for this\n\t\tdef epitaxy_enforce_rules(self):\n\t\t\ty_pos=0.0\n\t\t\tepi=get_epi()\n\t\t\tfor l in epi.layers:\n\t\t\t\tl.shape_dos.enabled=False\n\t\t\t\tif l.layer_type==\"active\":\n\t\t\t\t\tl.shape_dos.enabled=True\n\t\t\t\tl.x0=0.0\n\t\t\t\tl.z0=0.0\n\t\t\t\tl.y0=y_pos\n\n\t\t\t\tl.dx=gpvdm_data().mesh.mesh_x.get_len()\n\t\t\t\tl.dz=gpvdm_data().mesh.mesh_z.get_len()\n\t\t\t\ty_pos=y_pos+l.dy\n\n\t\tdef draw_device2(self,x,z):\n\t\t\tepi=get_epi()\n\t\t\tcontact_layers=epi.contacts.get_layers_with_contacts()\n\t\t\ttop_contact_layer=epi.get_top_contact_layer()\n\t\t\tbtm_contact_layer=epi.get_btm_contact_layer()\n\n\t\t\tl=0\n\t\t\tbtm_layer=len(epitaxy_get_epi())-1\n\n\t\t\tfor obj in gpvdm_data().world.world_data.segments:\n\t\t\t\tself.shape_to_screen(obj)\n\n\t\t\tself.epitaxy_enforce_rules()\n\n\t\t\tfor obj in epi.layers:\n\t\t\t\tname=obj.shape_name\n\t\t\t\tdisplay_name=name\n\t\t\t\t#alpha=obj.alpha\n\t\t\t\t#if len(obj.shapes)>0:\n\t\t\t\t\n\n\t\t\t\tcontact_layer=False\n\t\t\t\tif l==top_contact_layer:\n\t\t\t\t\tcontact_layer=True\n\n\t\t\t\tif l==btm_contact_layer:\n\t\t\t\t\tcontact_layer=True\n\n\t\t\t\t#print(l,top_contact_layer,btm_contact_layer)\n\t\t\t\t#print(\">>>>\",l,contact_layer,contact_layers)\n#\t\t\t\tprint(obj.shape_name)\n\t\t\t\tif contact_layer==False:\n\t\t\t\t\t#print(obj.id,name,obj.y0,obj.dy)\n\t\t\t\t\tself.shape_to_screen(obj,epitaxy=True)\t\t\t\n\n\t\t\t\tif obj.layer_type==\"active\":\n\t\t\t\t\tif self.view_options.text==True:\n\t\t\t\t\t\to=gl_base_object()\n\t\t\t\t\t\txyz=vec()\n\t\t\t\t\t\txyz.x=self.scale.project_m2screen_x(gpvdm_data().mesh.mesh_x.get_len())+0.1\n\t\t\t\t\t\txyz.y=self.scale.project_m2screen_y(obj.y0)\n\t\t\t\t\t\txyz.z=z\t\t\t\t\t\t\n\t\t\t\t\t\to.xyz.append(xyz)\n\n\t\t\t\t\t\to.dxyz.x=0.1\n\t\t\t\t\t\to.dxyz.y=obj.dy*self.scale.y_mul\n\n\t\t\t\t\t\to.r=0.0\n\t\t\t\t\t\to.g=0.0\n\t\t\t\t\t\to.b=1.0\n\n\t\t\t\t\t\to.type=\"plane\"\n\n\t\t\t\t\t\tdisplay_name=display_name+\" (\"+_(\"active\")+\")\"\n\t\t\t\t\t\tself.gl_objects_add(o)\n\n\t\t\t\tif self.view_options.text==True:\n\t\t\t\t\tif self.views[0].zoom<40:\n\t\t\t\t\t\tif self.views[0].enabled==True:\n\t\t\t\t\t\t\to=gl_base_object()\n\t\t\t\t\t\t\to.r=1.0\n\t\t\t\t\t\t\to.g=1.0\n\t\t\t\t\t\t\to.b=1.0\n\t\t\t\t\t\t\txyz=vec()\n\t\t\t\t\t\t\txyz.x=self.scale.project_m2screen_x(gpvdm_data().mesh.mesh_x.get_len())+0.2\n\t\t\t\t\t\t\txyz.y=self.scale.project_m2screen_y(obj.y0)\n\t\t\t\t\t\t\txyz.z=z+(len(epi.layers)-l)*0.1\n\t\t\t\t\t\t\to.xyz.append(xyz)\n\t\t\t\t\t\t\to.id=[\"text\"]\n\t\t\t\t\t\t\to.type=\"text\"\n\t\t\t\t\t\t\to.text=display_name\n\t\t\t\t\t\t\tself.gl_objects_add(o)\n\n\t\t\t\tl=l+1\n\n\n\t\tdef render(self):\n\t\t\tself.makeCurrent()\n\t\t\tglClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n\t\t\tglClearColor(self.view_options.bg_color[0], self.view_options.bg_color[1], self.view_options.bg_color[2], 0.5)\n\t\t\tglPolygonMode(GL_FRONT, GL_FILL);\n\t\t\tfor v in self.views:\n\t\t\t\tif v.enabled==True:\n\t\t\t\t\tw=int(self.width()*v.window_w)\n\t\t\t\t\th=int(self.height()*v.window_w)\n\t\t\t\t\tx=int(self.width()*v.window_x)\n\t\t\t\t\ty=int(self.height()*v.window_y)\n\t\t\t\t\tglViewport(x, y, w, h)\n\t\t\t\t\tv.projection = glGetDoublev(GL_PROJECTION_MATRIX)\n\t\t\t\t\tv.modelview = glGetDoublev(GL_MODELVIEW_MATRIX)\n\t\t\t\t\tv.viewport = glGetIntegerv(GL_VIEWPORT)\n\t\t\t\t\tself.render_view(v)\n\n\t\tdef render_view(self,view):\n\t\t\tdata=gpvdm_data()\n\n\t\t\tx=self.scale.project_m2screen_x(0)\n\t\t\ty=0.0#project_m2screen_y(0)\n\t\t\tz=self.scale.project_m2screen_z(0)\n\n\t\t\tself.dos_start=-1\n\t\t\tself.dos_stop=-1\n\n\t\t\tself.emission=False\n\t\t\tself.ray_model=data.ray.segments[0].config.ray_auto_run\n\n\t\t\tlines=[]\n\t\t\tepi=get_epi()\n\t\t\t\t\t\n\n\t\t\tglLoadIdentity()\n\t\t\tglScalef(1.0, -1.0, -1.0) \n\n\t\t\tglTranslatef(view.x_pos, view.y_pos, view.zoom) # Move Into The Screen\n\t\t\t\n\t\t\tglRotatef(view.xRot, 1.0, 0.0, 0.0)\n\t\t\tglRotatef(view.yRot, 0.0, 1.0, 0.0)\n\t\t\tglRotatef(view.zRot, 0.0, 0.0, 1.0)\n\n\t\t\tglColor3f( 1.0, 1.5, 0.0 )\n\n\n\t\t\tlines=[]\n\n\t\t\tself.pos=0.0\n\t\t\tif self.view_options.render_cords==True:\n\t\t\t\tself.draw_cords()\n\n\t\t\tif self.enable_draw_ray_mesh==True:\n\t\t\t\tself.draw_ray_mesh()\n\t\t\t\n\t\t\tif self.view_options.optical_mode==True:\n\t\t\t\tself.draw_mode()\n\n\t\t\tif self.scene_built==False:\n\t\t\t\tself.build_scene()\n\n\t\t\tif self.plot_graph==True:\n\t\t\t\tself.draw_graph()\n\n\n\t\t\tif self.view_options.render_photons==True:\n\t\t\t\tself.draw_photons(x,z)\n\t\t\tif self.view_options.show_world_box==True:\n\t\t\t\tself.world_box()\n\n\t\t\tself.gl_objects_render()\n\n\t\t\tif view.zoom>self.view_options.stars_distance:\n\t\t\t\tself.draw_stars()\n\n\n\t\tdef do_draw(self):\n\t\t\t#print(\"do_draw\",self)\n\t\t\tself.makeCurrent()\n\t\t\tself.render()\n\t\t\tself.swapBuffers()\n\n\t\tdef paintGL(self):\n\t\t\t#print(\"paintGL\",self)\n\t\t\tself.makeCurrent()\n\t\t\tglClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n\t\t\tglLoadIdentity()\n\t\t\tglScalef(-1.0, 1.0, -1.0) \n\n\t\t\tif self.failed==False:\n\t\t\t\tself.do_draw()\n\n\n\n\t\tdef load_data(self):\n\t\t\tlines=[]\n\n\t\t\tdata=gpvdm_data()\n\t\t\tself.dump_1d_slice_xpos=int(data.dump.dump_1d_slice_xpos)\n\t\t\tself.dump_1d_slice_zpos=int(data.dump.dump_1d_slice_zpos)\n\n\t\t\tself.dump_verbose_electrical_solver_results=str2bool(data.dump.dump_verbose_electrical_solver_results)\n\t\t\ttry:\n\t\t\t\tself.suns=float(data.light.Psun)\n\t\t\texcept:\n\t\t\t\tself.suns=0.0\n\n\t\t\tself.x_len=gpvdm_data().mesh.mesh_x.get_len()\n\t\t\tif os.path.isdir(os.path.join(os.path.join(get_sim_path(),\"ray_trace\")))==True:\n\t\t\t\tfor v in self.views:\n\t\t\t\t\tv.render_photons=False\n\n\n\t\t#This will rebuild the scene from scratch\n\t\tdef rebuild_scene(self):\n\n\t\t\tself.gl_objects_clear()\n\t\t\tself.menu_update()\n\t\t\tself.text_clear_lib()\n\t\t\tdata=gpvdm_data()\n\t\t\tif data.triangles_loaded==False:\n\t\t\t\tdata.load_triagles()\n\n\t\t\tif self.scale.world_min==None:\n\t\t\t\tself.scale.set_m2screen()\n\n\t\t\tx=self.scale.project_m2screen_x(0)\n\t\t\tz=self.scale.project_m2screen_z(0)\n\n\t\t\tif self.view_options.draw_rays==True:\n\t\t\t\tself.draw_rays(self.ray_file)\n\n\t\t\tif self.view_options.enable_draw_light_source==True:\n\t\t\t\tfor source in data.light_sources.lights.segments:\n\t\t\t\t\tif source.light_illuminate_from==\"xyz\":\n\t\t\t\t\t\tpoint_x=float(source.x0)\n\t\t\t\t\t\tpoint_y=float(source.y0)\n\t\t\t\t\t\tpoint_z=float(source.z0)\n\t\t\t\t\t\tif point_x==-1.0:\n\t\t\t\t\t\t\tpoint_x=0.0\n\t\t\t\t\t\t\tpoint_y=0.0\n\t\t\t\t\t\t\tpoint_z=0.0\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tpoint_x=self.scale.project_m2screen_x(point_x)\n\t\t\t\t\t\t\tpoint_y=self.scale.project_m2screen_y(point_y)\n\t\t\t\t\t\t\tpoint_z=self.scale.project_m2screen_z(point_z)\n\t\t\t\t\t\t#print(point_x,point_y)\n\t\t\t\t\t\ta=gl_base_object()\n\t\t\t\t\t\ta.id=[source.id]\n\t\t\t\t\t\ta.type=\"arrow\"\n\t\t\t\t\t\txyz=vec()\n\t\t\t\t\t\txyz.x=point_x\n\t\t\t\t\t\txyz.y=point_y\n\t\t\t\t\t\txyz.z=point_z\n\t\t\t\t\t\ta.xyz.append(xyz)\n\n\t\t\t\t\t\ta.dxyz.x=0.5\n\t\t\t\t\t\ta.dxyz.y=0.5\n\t\t\t\t\t\ta.dxyz.z=0.5\n\t\t\t\t\t\ta.r=0.0\n\t\t\t\t\t\ta.g=1.0\n\t\t\t\t\t\ta.b=0.0\n\t\t\t\t\t\t\n\t\t\t\t\t\ta.rotate_x=source.rotate_x\n\t\t\t\t\t\ta.rotate_y=source.rotate_y\n\n\t\t\t\t\t\ta.moveable=True\n\t\t\t\t\t\ta.selectable=True\n\t\t\t\t\t\tself.gl_objects_add(a)\n\n\t\t\tif self.draw_electrical_mesh==True:\n\t\t\t\tself.draw_mesh()\n\n\t\t\telif self.view_options.draw_device==True:\n\t\t\t\tself.draw_device2(x,z)\n\t\t\t\tself.draw_contacts()\n\n\t\t\tif self.enable_light_profile==True:\n\t\t\t\tself.draw_light_profile()\n\n\t\t\tif self.view_options.render_grid==True:\n\t\t\t\tself.gl_objects_add_grid(-18,20,self.scale.project_m2screen_y(self.scale.world_max.y),None,-18,20)\n\n\t\t\tfor d in data.detectors.segments:\n\t\t\t\tif d.config.viewpoint_enabled==True:\n\t\t\t\t\tworld_dx=(self.scale.world_max.x-self.scale.world_min.x)\n\t\t\t\t\tx0=self.scale.project_m2screen_x(self.scale.world_min.x)+world_dx*self.scale.x_mul*d.config.viewpoint_x0\n\t\t\t\t\tx1=x0+world_dx*self.scale.x_mul*d.config.viewpoint_dx\n\n\t\t\t\t\ty0=self.scale.project_m2screen_y(self.scale.world_min.y)+(self.scale.world_max.y-self.scale.world_min.y)*self.scale.y_mul*d.config.viewpoint_y0\n\n\t\t\t\t\tworld_dz=(self.scale.world_max.z-self.scale.world_min.z)\n\t\t\t\t\tz0=self.scale.project_m2screen_z(self.scale.world_min.z)+world_dz*self.scale.z_mul*d.config.viewpoint_z0\n\t\t\t\t\tz1=z0+world_dz*self.scale.z_mul*d.config.viewpoint_dz\n\n\t\t\t\t\tdx=(x1-x0)/d.config.viewpoint_nx\n\t\t\t\t\tdz=(z1-z0)/d.config.viewpoint_nz\n\n\t\t\t\t\tself.gl_objects_add_grid(x0,x1,y0,None,z0,z1,color=[0.8,0.0,0.8,1.0],dx=dx,dz=dz)\n\n\t\t\t\t\tif self.view_options.text==True:\n\t\t\t\t\t\tif self.views[0].zoom<40:\n\t\t\t\t\t\t\to=gl_base_object()\n\t\t\t\t\t\t\to.r=1.0\n\t\t\t\t\t\t\to.g=1.0\n\t\t\t\t\t\t\to.b=1.0\n\t\t\t\t\t\t\txyz=vec()\n\t\t\t\t\t\t\txyz.x=x1+0.2\n\t\t\t\t\t\t\txyz.y=y0\n\t\t\t\t\t\t\txyz.z=z0\n\t\t\t\t\t\t\to.xyz.append(xyz)\n\t\t\t\t\t\t\to.id=[\"text\"]\n\t\t\t\t\t\t\to.type=\"text\"\n\t\t\t\t\t\t\to.text=\"Detector: \"+d.english_name\n\t\t\t\t\t\t\tself.gl_objects_add(o)\n\n\t\t\tif data.sim.simmode.endswith(\"fdtd\")==True:\n\t\t\t\tif self.view_options.render_fdtd_grid==True:\n\t\t\t\t\tfor fdtd in data.fdtd.segments:\n\t\t\t\t\t\tif fdtd.fdtd_xzy==\"zy\":\n\t\t\t\t\t\t\tworld_dy=(self.scale.world_max.y-self.scale.world_min.y)\n\t\t\t\t\t\t\ty0=self.scale.project_m2screen_y(self.scale.world_min.y)\n\t\t\t\t\t\t\ty1=y0+world_dy*self.scale.y_mul\n\n\t\t\t\t\t\t\tx0=0.0#self.scale.project_m2screen_y(self.scale.world_min.y)\n\n\t\t\t\t\t\t\tworld_dz=(self.scale.world_max.z-self.scale.world_min.z)\n\t\t\t\t\t\t\tz0=self.scale.project_m2screen_z(self.scale.world_min.z)\n\t\t\t\t\t\t\tz1=z0+world_dz*self.scale.z_mul\n\n\t\t\t\t\t\t\tdy=(y1-y0)/float(fdtd.fdtd_ylen)\n\t\t\t\t\t\t\tdz=(z1-z0)/float(fdtd.fdtd_zlen)\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\to=self.gl_objects_add_grid(x0,None,y0,y1,z0,z1,color=[0.8,0.0,0.8,1.0],dy=dy,dz=dz,direction=\"zy\")\n\t\t\t\t\t\telif fdtd.fdtd_xzy==\"xy\":\n\t\t\t\t\t\t\tworld_dy=(self.scale.world_max.y-self.scale.world_min.y)\n\t\t\t\t\t\t\ty0=self.scale.project_m2screen_y(self.scale.world_min.y)\n\t\t\t\t\t\t\ty1=y0+world_dy*self.scale.y_mul\n\n\t\t\t\t\t\t\tworld_dz=(self.scale.world_max.z-self.scale.world_min.z)\n\t\t\t\t\t\t\tz0=self.scale.project_m2screen_z(self.scale.world_min.z)+world_dz*self.scale.z_mul/2.0\n\n\t\t\t\t\t\t\tworld_dx=(self.scale.world_max.x-self.scale.world_min.x)\n\t\t\t\t\t\t\tx0=self.scale.project_m2screen_x(self.scale.world_min.x)\n\t\t\t\t\t\t\tx1=x0+world_dx*self.scale.x_mul\n\n\t\t\t\t\t\t\tdy=(y1-y0)/float(fdtd.fdtd_ylen)\n\t\t\t\t\t\t\tdx=(x1-x0)/float(fdtd.fdtd_xlen)\n\n\t\t\t\t\t\t\to=self.gl_objects_add_grid(x0,x1,y0,y1,z0,None,color=[0.8,0.0,0.8,1.0],dy=dy,dx=dx,direction=\"xy\")\n\t\t\t\t\t\telif fdtd.fdtd_xzy==\"zx\":\n\t\t\t\t\t\t\tworld_dy=(self.scale.world_max.y-self.scale.world_min.y)\n\t\t\t\t\t\t\ty0=self.scale.project_m2screen_y(self.scale.world_min.y)+world_dy*self.scale.y_mul/2.0\n\n\t\t\t\t\t\t\tworld_dz=(self.scale.world_max.z-self.scale.world_min.z)\n\t\t\t\t\t\t\tz0=self.scale.project_m2screen_z(self.scale.world_min.z)\n\t\t\t\t\t\t\tz1=z0+world_dz*self.scale.z_mul\n\n\t\t\t\t\t\t\tworld_dx=(self.scale.world_max.x-self.scale.world_min.x)\n\t\t\t\t\t\t\tx0=self.scale.project_m2screen_x(self.scale.world_min.x)\n\t\t\t\t\t\t\tx1=x0+world_dx*self.scale.x_mul\n\n\n\t\t\t\t\t\t\tdx=(x1-x0)/float(fdtd.fdtd_xlen)\n\t\t\t\t\t\t\tdz=(z1-z0)/float(fdtd.fdtd_zlen)\n\n\t\t\t\t\t\t\to=self.gl_objects_add_grid(x0,x1,y0,None,z0,z1,color=[0.8,0.0,0.8,1.0],dz=dz,dx=dx,direction=\"zx\")\n\t\t\tif 1==0:\n\t\t\t\tfor l in self.lights:\n\t\t\t\t\ta=gl_base_object()\n\t\t\t\t\ta.id=[\"rod\"]\n\t\t\t\t\ta.type=\"box\"\n\t\t\t\t\txyz=vec()\n\t\t\t\t\txyz.x=l.xyz[0]\n\t\t\t\t\txyz.y=l.xyz[1]\n\t\t\t\t\txyz.z=l.xyz[2]\n\t\t\t\t\ta.xyz.append(xyz)\n\n\t\t\t\t\ta.dxyz.x=0.4\n\t\t\t\t\ta.dxyz.y=0.4\n\t\t\t\t\ta.dxyz.z=0.4\n\t\t\t\t\ta.alpha=0.5\n\t\t\t\t\ta.r=1.0\n\t\t\t\t\ta.g=0.0\n\t\t\t\t\ta.b=0.0\n\t\t\t\t\tself.gl_objects_add(a)\n\n\t\t\tif self.pre_built_scene!=None:\n\t\t\t\tself.gl_objects_load(self.pre_built_scene)\n\n\t\t\t#print(\"rebuild\")\n\n\n\t\tdef build_scene(self):\n\t\t\tself.scene_built=True\n\t\t\tself.load_data()\n\t\t\tself.update()\n\t\t\tself.rebuild_scene()\n\n\t\tdef force_redraw(self,level=\"rebuild\"):\n\t\t\tif level==\"reload_rebuild\":\n\t\t\t\tdata=gpvdm_data()\n\t\t\t\tdata.load_triagles()\n\t\t\t\tself.build_scene()\n\t\t\t\tself.do_draw()\n\t\t\t\tself.menu_update()\n\t\t\telif level==\"rebuild\":\n\t\t\t\tself.build_scene()\n\t\t\t\tself.do_draw()\n\t\t\t\tself.menu_update()\n\t\t\telif level==\"no_rebuild\":\n\t\t\t\tself.update()\n\t\tdef resizeEvent(self,event):\n\t\t\tif self.failed==False:\n\t\t\t\t#glClearDepth(1.0) \n\t\t\t\t#glDepthFunc(GL_LESS)\n\t\t\t\t#glEnable(GL_DEPTH_TEST)\n\t\t\t\t#glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);\n\t\t\t\t#glEnable(GL_BLEND);\n\t\t\t\t#glShadeModel(GL_SMOOTH)\n\t\t\t\tglViewport(0, 0, self.width(), self.height()+100)\n\t\t\t\tglMatrixMode(GL_PROJECTION)\n\t\t\t\tglLoadIdentity()\n\t\t\t\t#glScalef(1.0, 1.0, -1.0) \n\t\t\t\tgluPerspective(45.0,float(self.width()) / float(self.height()+100),0.1, 1000.0)\n\t\t\t\tglMatrixMode(GL_MODELVIEW)\n\n\t\tdef initializeGL(self):\n\t\t\tself.load_data()\n\t\t\t#try:\n\t\t\tglClearDepth(5.0) \n\t\t\tglDepthFunc(GL_LESS)\n\t\t\tglEnable(GL_DEPTH_TEST)\n\t\t\tglEnable(GL_BLEND)\n\t\t\tglBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);\t#GL_ONE\n\t\t\t\n\t\t\tglEnableClientState(GL_VERTEX_ARRAY)\n\t\t\tglShadeModel(GL_SMOOTH)\n\t\t\tglColorMaterial(GL_FRONT, GL_DIFFUSE)\t\t#This means we can set the color of a material using glColor and not glMaterialfv\n\t\t\tglEnable(GL_COLOR_MATERIAL)\t\t\t\t\t\t\t#This means we can set the color of a material using glColor and not glMaterialfv\n\n\n\t\t\t\t#lightZeroPosition = [0, 0, -10, 1.0]\n\t\t\t\t#lightZeroColor = [1.0, 1.0, 1.0, 1.0]\n\t\t\t\t#glLightfv(GL_LIGHT1, GL_POSITION, lightZeroPosition)\n\t\t\t\t#glLightfv(GL_LIGHT1, GL_DIFFUSE, lightZeroColor)\n\t\t\t\t#glLightf(GL_LIGHT1, GL_CONSTANT_ATTENUATION, 0.1)\n\t\t\t\t#glLightf(GL_LIGHT1, GL_LINEAR_ATTENUATION, 0.05)\n\t\t\t\t#glEnable(GL_LIGHT1)\n\n\t\t\t\t#lightZeroPosition = [10, 10, 0, 1.0]\n\t\t\t\t#lightZeroColor = [1.0, 1.0, 1.0, 1.0]\n\t\t\t\t#glLightfv(GL_LIGHT2, GL_POSITION, lightZeroPosition)\n\t\t\t\t#glLightfv(GL_LIGHT2, GL_DIFFUSE, lightZeroColor)\n\t\t\t\t#glLightf(GL_LIGHT2, GL_CONSTANT_ATTENUATION, 0.1)\n\t\t\t\t#glLightf(GL_LIGHT2, GL_LINEAR_ATTENUATION, 0.05)\n\t\t\t\t#glEnable(GL_LIGHT2)\n\n#GL_DIFFUSE\n\n\t\t\t#glEnable(GL_FOG);\n\t\t\t#fogColor = [0.5, 0.5, 0.5, 1.0];\n\n\t\t\t#glFogi (GL_FOG_MODE, GL_EXP);\n\t\t\t#glFogfv (GL_FOG_COLOR, fogColor);\n\t\t\t#glFogf (GL_FOG_DENSITY, 0.35);\n\t\t\t#glHint (GL_FOG_HINT, GL_DONT_CARE);\n\t\t\t#glFogf (GL_FOG_START, 1.0);\n\t\t\t#glFogf (GL_FOG_END, 5.0);\n\t\t\t#self.tex = self.read_texture('/home/rod/images/image.jpg')\n\t\t\tglViewport(0, 0, self.width(), self.height()+100)\n\t\t\tglMatrixMode(GL_PROJECTION)\n\t\t\tglLoadIdentity()\n\t\t\t#glScalef(1.0, 1.0, -1.0) \n\t\t\tgluPerspective(45.0,float(self.width()) / float(self.height()+100),0.001, 1000.0) \n\t\t\tglMatrixMode(GL_MODELVIEW)\n\t\t\tglEnable( GL_POLYGON_SMOOTH )\n\t\t\t#glEnable(GL_MULTISAMPLE)\n\t\t\t#self.resizeEvent.connect(self.resize)\n\t\t\n\t\t\tif self.lit==True:\n\t\t\t\tfor l in self.lights:\n\t\t\t\t\tglEnable(GL_LIGHTING)\n\t\t\t\t\tlightZeroColor = [1.0, 1.0, 1.0, 1.0]\n\t\t\t\t\t#print(l.number,GL_LIGHT1)\n\t\t\t\t\tglLightfv(l.number, GL_POSITION, [l.xyz[0],l.xyz[1],l.xyz[2] ,1.0])\n\t\t\t\t\tglLightfv(l.number, GL_AMBIENT, [0.2,0.2,0.2,1.0 ])\n\t\t\t\t\tglLightfv(l.number, GL_DIFFUSE, [0.8,0.8,0.8,1.0 ])\n\t\t\t\t\tglLightfv(l.number, GL_SPECULAR, [1.0,1.0,1.0,1.0 ])\n\t\t\t\t\t#glLightfv(l .number, GL_SPOT_DIRECTION, [ 1,1,1]);\n\t\t\t\t\t#glLightf(l.number, GL_CONSTANT_ATTENUATION, 0.1)\n\t\t\t\t\t#glLightf(l.number, GL_LINEAR_ATTENUATION, 0.05)\n\t\t\t\t\tglEnable(l.number)\n\n\t\t\tself.failed=False\n\n\t\t\tget_epi().add_callback(self.force_redraw)\n\n\t\tdef boom(self):\n\t\t\tprint(\"oh\")\nelse:\n\tclass glWidget(gl_fallback, gl_toolbar,gl_views):\n\n\t\tdef __init__(self, parent):\n\t\t\tQWidget.__init__(self)\n\t\t\tgl_fallback.__init__(self)\n\t\t\tgl_toolbar.__init__(self)\n\t\t\tself.views=[]\n\t\t\tself.scale=gl_scale()\n\t\t\tself.failed=True\n\t\t\tself.open_gl_working=False\n\n\t\tdef rebuild_scene(self):\n\t\t\tpass\n\n\t\tdef do_draw(self):\n\t\t\tpass\n","repo_name":"roderickmackenzie/gpvdm","sub_path":"gpvdm_gui/gui/gl.py","file_name":"gl.py","file_ext":"py","file_size_in_byte":18583,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"30"} +{"seq_id":"17389911935","text":"import unittest\nimport sys, os\npath = os.path.dirname(os.path.abspath(__file__))\nsys.path.insert(0,path+'/../')\n\nfrom nessie.dataRequest import DataRequest\nfrom nessie.client import Client\nfrom nessie.models.address import Address\nfrom nessie.utils.exceptions import NessieApiError\n\nclass TestDepositRequests(unittest.TestCase):\n\n def setUp(self):\n # implicitly get NESSIE_API_KEY from env\n self.client = Client()\n customer_factory = self.client.customer\n self.customer_id = customer_factory.create_customer(\"Test\", \"Customer\", Address(\"123\", \"Test Street\", \"TestCity\", \"VA\", \"12345\")).customer_id\n account_factory = self.client.account\n self.account_id = account_factory.create_customer_account(self.customer_id, \"Savings\", \"Test Account\", 0, 100)._id\n self.deposit_factory = self.client.deposit\n self.deposit_id = self.deposit_factory.create_deposit(self.account_id, 'balance', 88).to_dict()['id']\n\n def test_create_deposit_all_fields(self):\n print(\"test_create_deposit_all_fields\")\n result = self.deposit_factory.create_deposit(self.account_id, 'balance', 73, \"2018-02-01\", 'pending', \"test\").to_dict()\n self.assertEqual(result['transaction_date'], '2018-02-01')\n self.assertEqual(result['payee_id'], self.account_id)\n self.assertEqual(result['amount'], 73)\n\n def test_create_deposit_min_fields(self):\n print(\"test_create_deposit_min_fields\")\n result = self.deposit_factory.create_deposit(self.account_id, 'balance', 53).to_dict()\n self.assertEqual(result['payee_id'], self.account_id)\n self.assertEqual(result['amount'], 53)\n self.deposit_id2 = result['id']\n\n def test_create_deposit_optional_fields(self):\n print(\"test_create_deposit_optional_fields\")\n result = self.deposit_factory.create_deposit(self.account_id, 'balance', 6, description='testing').to_dict()\n self.assertEqual(result['description'], 'testing')\n self.assertEqual(result['payee_id'], self.account_id)\n self.assertEqual(result['amount'], 6)\n\n def test_create_deposit_bad_account(self):\n print(\"test_create_deposit_bad_account\")\n self.assertRaises(NessieApiError, self.deposit_factory.create_deposit, \"59fb2c49b390353c9512\", 'balance', 6)\n\n def test_get_deposit(self):\n print(\"test_get_deposit\")\n result = self.deposit_factory.get_deposit(self.deposit_id).to_dict()\n self.assertEqual(result['amount'], 88)\n\n def test_get_deposit_fail(self):\n print(\"test_get_deposit_fail\")\n self.assertRaises(NessieApiError, self.deposit_factory.get_deposit, \"\")\n\n def test_get_account_deposits(self):\n print(\"test_get_account_deposits\")\n result = self.deposit_factory.get_account_deposits(self.account_id)\n\n def test_get_account_deposits_fail(self):\n print(\"test_get_account_deposits_fail\")\n self.assertRaises(NessieApiError, self.deposit_factory.get_account_deposits, \"\")\n\n def test_update_deposit_all_fields(self):\n print(\"test_update_deposit_all_fields\")\n result = self.deposit_factory.update_deposit(self.deposit_id, 'balance', 6, \"test Updated\")\n self.assertEqual(result['code'], 202)\n\n def test_update_deposit_fail(self):\n print(\"test_update_deposit_fail\")\n self.assertRaises(NessieApiError, self.deposit_factory.update_deposit, \"\", 'balance', 77)\n\n def test_delete_deposit(self):\n print(\"test_delete_deposit\")\n self.deposit_factory.delete_deposit(self.deposit_id)\n\n def test_delete_deposit_fail(self):\n print(\"test_delete_deposit_fail\")\n self.assertRaises(NessieApiError, self.deposit_factory.delete_deposit, \"\")\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"nessieisreal/nessie-python-sdk","sub_path":"tests/test_deposit_requests.py","file_name":"test_deposit_requests.py","file_ext":"py","file_size_in_byte":3764,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"18373416197","text":"# gettyWrap.py\n\nimport requests\nimport json\nimport random\nimport os\n\n \n\napiKey = os.getenv(\"GETTY_API_KEY\")\n\ngettyRequestHeaders = {\n \"Api-Key\":apiKey\n}\n\n# I needed a way of easily manipulating the search url.\nclass GettySearchURL:\n \n def __init__(self, **searchParameters):\n self.baseURL = 'https://api.gettyimages.com/v3/search/images?'\n self.searchParameters = {}\n self.setSearchParameters(**searchParameters)\n \n # For tweeking the search parameters. All matching keys are\n # updated, non-matching keys are added to self.searchParameters,\n # and keys not specified in the argument are unchanged.\n def setSearchParameters(self, **searchParameters):\n for parameter in searchParameters:\n self.searchParameters[parameter] = searchParameters[parameter]\n \n # concatonates all the search parameters with getty specific syntax.\n def getUrl(self):\n \n urlAppendage = \"\"\n for parameter in self.searchParameters:\n if self.searchParameters[parameter]:\n if len(str(self.searchParameters[parameter])) >= 0:\n urlAppendage += parameter + \"=\" + str(self.searchParameters[parameter]) + \"&\"\n \n return self.baseURL + urlAppendage[0:len(urlAppendage)-1]\n \n # clones this instance in such a way that altering the clone will not alter this instance.\n def copy(self):\n new = GettySearchURL()\n for parameter in self.searchParameters:\n new.searchParameters[parameter] = self.searchParameters[parameter]\n return new\n \n# Does a test request and inspects the meta data to see how\n# many pages are available with this url.\ndef calculateMaxPageNumber(gettySearchURL):\n \n # for this gettySearchURL object as it is, the max page size is\n # given by results/pageSize\n copyUrl = gettySearchURL.copy()\n copyUrl.setSearchParameters(page=1)\n response = requests.get(copyUrl.getUrl(), headers = gettyRequestHeaders)\n try: return int(int(response.json()[\"result_count\"]) / int(copyUrl.searchParameters[\"page_size\"]))\n except: return 0\n \n \n# Tweeks some search parameters to match my theme, \n# makes the get request to getty with a random page number\n# and returns the image uri.\ndef pickImage(searchPhrase):\n \n # uses page size of one because when page size is one,\n # results_count and page count are the same. Makes it easy.\n url = GettySearchURL(\n number_of_people = \"none\",\n orientations = \"Vertical\",\n page = 1,\n page_size = 1,\n phrase = searchPhrase,\n sort_order = \"most_popular\"\n )\n \n maxPageNumber = calculateMaxPageNumber(url)\n randomPageNumber = random.randint(0, maxPageNumber)\n url.setSearchParameters(page=randomPageNumber)\n \n print(\"\\n\\n\"+url.getUrl()+\"\\n\\n\")\n \n response = requests.get(url.getUrl(), headers = gettyRequestHeaders)\n \n # try again if failure\n try: return response.json()[\"images\"][0][\"display_sizes\"][0][\"uri\"]\n except KeyError: return pickImage(searchPhrase)\n \n","repo_name":"CSUMB-SP17-CST438/stowne","sub_path":"gettyWrap.py","file_name":"gettyWrap.py","file_ext":"py","file_size_in_byte":3136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"19645328896","text":"\r\nimport pickle\r\nimport sys\r\n# import sklearnpip install -U scikit-learn scipy matplotlib\r\n\r\ndef main():\r\n Random_Forest = pickle.load(open('./Random_ForestN.pkl', 'rb'))\r\n employment = sys.argv[1]\r\n # mentallyIll = sys.argv[2]\r\n education = sys.argv[2] \r\n own_computer = sys.argv[3]\r\n hospitalized = sys.argv[4]\r\n hospitalized1 = sys.argv[5]\r\n legally_disabled = sys.argv[6]\r\n internet = sys.argv[7]\r\n live_parents = sys.argv[8]\r\n resume_gap = sys.argv[9]\r\n total_gap = sys.argv[10]\r\n income = sys.argv[11]\r\n unemployed = sys.argv[12]\r\n read_out_work_school = sys.argv[13]\r\n income_social_welfare = sys.argv[14]\r\n food_stamps = sys.argv[15]\r\n section_8 = sys.argv[16]\r\n hospitalized_times = sys.argv[17]\r\n lack_concentration = sys.argv[18]\r\n anxiety = sys.argv[19]\r\n depression = sys.argv[20]\r\n obsessive_thinking = sys.argv[21]\r\n mood_swings = sys.argv[22]\r\n panic_attacks = sys.argv[23]\r\n compulsive_behavior = sys.argv[24]\r\n tiredness = sys.argv[25]\r\n age = sys.argv[26]\r\n gender = sys.argv[27]\r\n\r\n result = Random_Forest.predict([[employment,education,own_computer,hospitalized,hospitalized1,legally_disabled,internet,live_parents,resume_gap,total_gap,income,unemployed,read_out_work_school,income_social_welfare, food_stamps,section_8,hospitalized_times,lack_concentration,anxiety,depression,obsessive_thinking,mood_swings,panic_attacks,compulsive_behavior,tiredness,age,gender]])[0]\r\n print(result)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n sys.stdout.flush()","repo_name":"taranjotsingh23/vihaan-backend","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"26083577581","text":"from flask import Flask, request, jsonify\nfrom flask_restful import Resource, Api\nfrom datetime import timedelta, date\nimport requests\nfrom amadeus import Client, ResponseError\nimport json\n\n# time_durations less thans\ntwelve_hours = timedelta(hours = 12)\n\napp = Flask(__name__)\napi = Api(app)\n\n# todos = {}\n\namadeus = Client(\n client_id='',\n client_secret=''\n)\n\n\ndef totalCost(place,\n dateDeparture,\n dateReturn,\n noOfAdults = 1,\n noOfChild = 0):\n\n #Check the types of inputs\n # Date - \n # Place - string\n # noOfAdults - int\n # noOfChild - int\n\n # Tell manish bhai that date should be in this format and the date should be of proper time ahead from todays date.\n # It should be checked already by manish that the differnce in dates is atleast one day or more.\n print(\"In totalCost\")\n print(place)\n\n # YOUR_ACCESS_KEY = 'GET YOUR ACCESS KEY FROM fixer.io' \n # url = str.__add__('http://data.fixer.io/api/latest?access_key=', \"\") \n # c = Currency_convertor(url) \n # from_country = \"USD\"\n # to_country = \"INR\"\n\n #Calculate number of nights\n no_of_nights = date.fromisoformat(dateReturn) - date.fromisoformat(dateDeparture)\n no_of_nights = no_of_nights.days\n print(no_of_nights)\n \n\n with open('destinations_names_to_code.json') as f:\n destination = json.load(f)\n \n destId = destination[place]\n products = top_20_products(destId)\n print('Got the 20 products')\n if(products[\"success\"] == True):\n per_person_average_price = 0\n duration = format_duration(\"0 hour\")\n count = 0\n for product in products[\"data\"]:\n du = format_duration(product[\"duration\"])\n if(du <= twelve_hours):\n count = count + 1\n per_person_average_price = product[\"price\"] + per_person_average_price\n duration = du + duration \n print(\"Got i product\")\n try:\n duration = duration/count\n except:\n #count if experinces below 12 hours is zero\n #Here taking into considerations of all experience\n \n for product in products[\"data\"]:\n du = format_duration(product[\"duration\"])\n count = count + 1\n per_person_average_price = product[\"price\"] + per_person_average_price\n duration = du + duration\n try:\n duration = duration/count\n except:\n duration = 0\n if(duration):\n per_person_average_price = per_person_average_price/count\n else: \n per_person_average_price = 0\n \n per_person_average_price = per_person_average_price*74.75 #USD to INR\n #starting hotel price\n # shp = StartingHotelPrice(place) #1 room, 1 night\n shp = 0\n no_of_rooms = int(noOfAdults/2.0)\n # shp = shp*(toINRvalue)\n #average flight price\n origin = 'DEL'\n final_destination = 'CMB'\n afp = cheepest_offer(origin, final_destination, dateDeparture, adults=1, oneWay=False)\n afp = float(afp[0][\"price\"][\"total\"]) # 0 is for cheepest offer\n afp = afp*85.44 #euro to inr\n # afp = AverageFlightPrice(place, dateDeparture, dateReturn)\n\n multiplicative_factor = 1\n totalPrice = 0\n totalPrice = (per_person_average_price*noOfAdults*no_of_nights*multiplicative_factor) + \\\n (shp*no_of_nights*no_of_rooms) + \\\n (afp*noOfAdults*2)\n # totalPrice = c.convert(from_country, to_country, totalPrice)\n return ({\n \"per_person_average_price\" : per_person_average_price*74.5,\n \"totalPrice\": totalPrice,\n \"no_of_rooms\" : no_of_rooms,\n \"no_of_nights\" : no_of_nights,\n \"noOfAdults\" : noOfAdults,\n \"cheepest_Flight_Price\" : afp\n })\n \n \n\n\ndef format_duration(duration):\n try:\n #type of '6 hours', '1 hour', '6 hours 30 minutes', '1 hour 30 minutes'\n hours = int(duration.split(' hour')[0])\n return(timedelta(hours = hours)) \n except:\n try:\n #type of '4 to 5 hours'\n hours_1 = int(duration.split(' hours')[0].split(' to ')[0])\n hours_2 = int(duration.split(' hours')[0].split(' to ')[1]) \n return(timedelta(hours = hours_2))\n except:\n try:\n #type '3 days'\n days = int(duration.split(' day')[0])\n return(timedelta(days = days))\n except:\n try:\n #type '2 to 3 days'\n days_1 = int(duration.split(' days')[0].split(' to ')[0])\n days_2 = int(duration.split(' days')[0].split(' to ')[1]) \n return(timedelta(days = days_2)) \n except:\n try:\n #type of '5 minutes'\n minutes = int(duration.split(' minute')[0])\n return(timedelta(minutes = minutes)) \n except:\n try:\n #type of '4 to 5 minutes'\n minutes_1 = int(duration.split(' minutes')[0].split(' to ')[0])\n minutes_2 = int(duration.split(' minutes')[0].split(' to ')[1])\n return(timedelta(minutes = minutes_2))\n except:\n print(\"FUCKED\")\n print(duration)\n \ndef check_duration_type(time_obj, dur_type):\n switcher = {'one_hour' : 1 if(time_obj <= one_hour) else 0, #ternary,\n 'four_hour' : 1 if(time_obj <= four_hour) else 0,\n 'one_day' :1 if(time_obj <= one_day) else 0, \n 'three_days': 1 if(time_obj <= three_days) else 0,\n 'three_day_plus':1 if(time_obj > three_days) else 0}\n# print(f\"######### DURATION TYPE: {dur_type}######### ##### dur_type: {switcher.get(dur_type)} ####\") \n return(switcher.get(dur_type))\n \n\n\ndef cheepest_offer(origin, destination, departureDate, adults=1, oneWay=False):\n\n try:\n response = amadeus.shopping.flight_offers_search.get(\n originLocationCode = origin,\n destinationLocationCode = destination,\n departureDate = departureDate,\n adults=1)\n return response.data\n\n except ResponseError as error:\n print(error)\n\n#API from https://fixer.io/quickstart\nclass Currency_convertor: \n\t# empty dict to store the conversion rates \n\trates = {} \n\tdef __init__(self, url): \n\t\tdata = requests.get(url).json() \n\n\t\t# Extracting only the rates from the json data \n\t\tself.rates = data[\"rates\"] \n\n\t# function to do a simple cross multiplication between \n\t# the amount and the conversion rates \n\tdef convert(self, from_currency, to_currency, amount): \n\t\tinitial_amount = amount \n\t\tif from_currency != 'EUR' : \n\t\t\tamount = amount / self.rates[from_currency] \n\n\t\t# limiting the precision to 2 decimal places \n\t\tamount = round(amount * self.rates[to_currency], 2) \n# \t\tprint('{} {} = {} {}'.format(initial_amount, from_currency, amount, to_currency))\n\t\treturn amount\n\n\ndef top_20_products(destId \n ):\n\n print(\"In top 20 products\")\n\n url = \"https://viatorapi.sandbox.viator.com/service/search/products\"\n\n payload = {\"destId\": destId,\n \"sortOrder\": \"REVIEW_AVG_RATING_D\",\n \"topX\": '1-20'}\n #payload = {\"destId\": destId}\n #payload = {\"destId\": destId\n # \"startDate\": \"2020-02-21\",\n # \"endDate\": \"2020-03-21\"}\n headers = {\n 'Accept': 'application/json',\n 'Content-Type': 'application/json',\n 'Accept-Language': 'en-US',\n 'exp-api-key': '', #Hide api key in production code\n 'Content-Type': 'application/json'\n }\n response = requests.request(\"POST\", url, headers=headers, data = json.dumps(payload))\n products = response.json() \n return products\n\n\n\nclass TotalTripCost(Resource):\n # def get(self, todo_id):\n # return {todo_id: todos[todo_id]}\n\n def put(self):\n place = request.form['place']\n dateDeparture = request.form['dateDeparture']\n dateReturn = request.form['dateReturn']\n noOfAdults = int(request.form['noOfAdults'])\n noOfChild = int(request.form['noOfChild']) \n # Check for the formats of date, etc. Are they received correct\n # Check if assert can be used here\n print(\"Okayyy Getting Ready\")\n return jsonify(totalCost(place,\n dateDeparture,\n dateReturn,\n noOfAdults,\n noOfChild))\n\nclass HelloWorld(Resource):\n def get(self):\n return \"Hello World!\"\n\napi.add_resource(TotalTripCost, '/TotalTripCost')\napi.add_resource(HelloWorld, '/')\n\nif __name__ == '__main__':\n app.run(debug=True, port=80)\n","repo_name":"ankurbhatia24/Roamyo","sub_path":"Roamyo-backend/test_server_roamyo.py","file_name":"test_server_roamyo.py","file_ext":"py","file_size_in_byte":10170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"10877271208","text":"#!/usr/bin/env python3\nimport rospy \nimport math\nimport numpy as np\nimport tf\nimport cv2\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge, CvBridgeError\nimport os\nimport datetime\n\n\nclass TurnCounter:\n def __init__(self):\n self.rate = rospy.Rate(30)\n self.yaw=0\n self.yaw_ref=0\n self.bridge = CvBridge()\n self.turn_detected = False\n self.right_turns = 0\n self.left_turns = 0\n self.turn_threshold = 7.5\n self.straight_line_threshold = 2\n self.image_frames = []\n self.setSubscribers()\n\n def setSubscribers(self):\n rospy.Subscriber(\"/orb_slam3/tracking_image\", Image, self.callback)\n self.listener = tf.TransformListener()\n \n def callback(self,data):\n image = data\n cv_image = self.bridge.imgmsg_to_cv2(image, \"bgr8\")\n #draw a arrow pointing in the direction of the yaw\n try:\n (trans,rot) = self.listener.lookupTransform('/world',\"/camera\", rospy.Time(0))\n except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):\n return\n #check if the transform is valid\n if rot[0]==0 and rot[1]==0 and rot[2]==0 and rot[3]==1:\n self.yaw_ref = 0\n self.yaw = 0\n return\n #transform the rotation to euler angles\n euler = tf.transformations.euler_from_quaternion(rot)\n #transform the euler angles to degrees\n euler = np.degrees(euler)\n #get the yaw\n self.yaw = euler[1]\n #draw a arrow pointing in the direction of the yaw on the top left corner of the image\n cv2.arrowedLine(cv_image,(50,50),(int(50+math.cos(math.radians(self.yaw-90))*50),int(50+math.sin(math.radians(self.yaw-90))*50)),(0,255,0),2)\n \n #compute the moving average of the last 100 yaws\n self.yaw_ref = (self.yaw_ref*99+self.yaw)/100\n \n #draw a arrow pointing in the direction of the moving average yaw on the top right corner of the image\n cv2.arrowedLine(cv_image,(50,50),(int(50+math.cos(math.radians(self.yaw_ref-90))*50),int(50+math.sin(math.radians(self.yaw_ref-90))*50)),(0,0,255),2)\n #display on the bottom left corner of the image the difference between the yaw and the moving average yaw\n #make a strig with the difference between the yaw and the moving average yaw and one decimal place\n cv2.putText(cv_image,str(round(self.yaw-self.yaw_ref,1)),(50,100),cv2.FONT_HERSHEY_SIMPLEX,0.8,(0,70,255),2)\n #if the difference is greater than 10 degrees the robot is turning right and if it is less than -10 degrees the robot is turning left\n if self.yaw-self.yaw_ref>self.turn_threshold:\n cv2.putText(cv_image,\"Turning right\",(10,150),cv2.FONT_HERSHEY_SIMPLEX,0.8,(0,70,255),2)\n if not self.turn_detected:\n #count only one time the turning\n self.turn_detected = True\n self.right_turns += 1\n elif self.yaw-self.yaw_ref<-self.turn_threshold:\n cv2.putText(cv_image,\"Turning left\",(10,150),cv2.FONT_HERSHEY_SIMPLEX,0.8,(255,0,0),2)\n if not self.turn_detected:\n #count only one time the turning\n self.turn_detected = True\n self.left_turns += 1\n \n if abs(self.yaw-self.yaw_ref)=3.7\",\n install_requires=[\n \"bluetooth-sensor-state-data>=1.6.1\",\n \"sensor-state-data>=2.16.0\",\n \"victron-ble>=0.6.0\",\n ],\n)\n","repo_name":"rajlaud/victron-ble-ha-parser","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"25783314407","text":"from http.server import BaseHTTPRequestHandler\nfrom urllib import parse\nimport requests\n\nclass handler(BaseHTTPRequestHandler):\n def do_GET(self):\n self.send_response(200)\n self.send_header('Content-type', 'text/plain')\n self.end_headers()\n\n url_path = self.path\n url_components = parse.urlsplit(url_path)\n query_list = parse.parse_qsl(url_components.query)\n params_dict = dict(query_list)\n\n if 'country' in params_dict:\n country_param = params_dict.get('country')\n country_url = f'https://restcountries.com/v3.1/name/{country_param}'\n country_response = requests.get(country_url)\n country_data = country_response.json()\n capital_result = country_data[0]['capital'][0]\n message = f\"The capital of {country_param} is {capital_result}\"\n elif 'capital' in params_dict:\n capital_param = params_dict.get('capital')\n capital_url = f'https://restcountries.com/v3.1/capital/{capital_param}'\n capital_response = requests.get(capital_url)\n capital_data = capital_response.json()\n country_result = capital_data[0]['name']['common']\n message = f\"{capital_param} is the capital of {country_result}\"\n else:\n message = \"Invalid request\"\n\n self.wfile.write(message.encode())\n return\n","repo_name":"firas1awadallah/capital-finder","sub_path":"api/capital_finder.py","file_name":"capital_finder.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"37441134110","text":"url = 'https://www.youtube.com/'\n\nimport requests\nfrom multiprocessing.dummy import Pool\nimport time\n\npool = Pool(1)\n\ndef on_success(r):\n if r.status_code == 200:\n print(f'Request succeed: {r}')\n else:\n print(f'Request failed: {r}')\n\ndef on_error(ex: Exception):\n print(f'Request failed: {ex}')\n\nbefore = time.time()\n\npool.apply_async(requests.get, args=[url],callback=on_success, error_callback=on_error)\n# requests.get(url)\n\nprint(time.time() - before)\n\ntime.sleep(3)","repo_name":"aruasouza/CTG-Riscos","sub_path":"app/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"37908943942","text":"from uclu.me.models.v2 import *\n\n\nclass N1(V2):\n def __init__(self, device, args: UcluArgs):\n super(N1, self).__init__(device, args)\n self.addpnt: float = args.addpnt\n self.criterion = nn.CrossEntropyLoss()\n\n def get_que_rep(self, title_int, body_int):\n title_mask = self.get_mask(title_int) # (bs, tn, 1)\n title_lkup = self.w_embed(title_int) # (bs, tn, dw)\n title_mean = self.mean_pooling(title_lkup, title_mask) # (bs, dw)\n if self.addb < 1e-6:\n return title_mean\n body_mask = self.get_mask(body_int) # (bs, tn, 1)\n body_lkup = self.w_embed(body_int) # (bs, tn, dw)\n body_mean = self.mean_pooling(body_lkup, body_mask) # (bs, dw)\n return title_mean + self.addb * body_mean\n\n def get_doc_rep(self, title_int, body_int, user_int):\n return self.get_que_rep(title_int, body_int)\n\n def forward(self, title_int, body_int, user_int):\n q_rep = self.get_que_rep(title_int, body_int) # (bs, dw)\n pc_probs = self.get_pc_probs(q_rep) # (bs, nc)\n q_rec = pc_probs.matmul(self.c_embed.weight) # (bs, dw)\n mut_cos = self.mutual_cos(q_rep, q_rec) # (bs, bs)\n mut_loss = self.max_margin_loss(mut_cos)\n\n # u_rep = self.u_embed(user_int) # (bs, dw)\n # uq_mut = self.mutual_cos(u_rep, q_rec) # (bs, bs)\n # uq_loss = self.max_margin_loss(uq_mut)\n\n qu_score = q_rep.matmul(self.u_embed.weight.t()) # (bs, nu)\n isu_loss = self.criterion(qu_score, user_int)\n return mut_loss + self.addu * isu_loss # + self.addpnt * uq_loss\n","repo_name":"noDefinition/works","sub_path":"uclu/me/models/n1.py","file_name":"n1.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"41224842452","text":"#lanjutan dari list_tuple_dictionary\n\n# fungsi list\n# print(list(\"joni gudel\")) #akan menghasil kan sebuah list yang dipisah2 sesuai urutan index\n\n# #contoh pengunaan list lagi\npertanyaan = int(input(\"Sebutkan bulan keberapa (1 - 12) ?\"))\nbulan = ['januari','feburary','maret', 'april', 'mei', 'juni', 'juli', 'agustus', 'september','oktober', 'november', ' desember']\n\nif 1 <= pertanyaan <= 13:\n print(\"Bulan\", bulan [pertanyaan - 1]) #di kurangin 1 karena index di mulai dari 0, jadi biar tepat hitung nya di kurangin 1\n\n# Bekerja dengan list menggunakan beberapa fungsi\n# append digunakan untuk menambahkan data baru di bagian akhir list\n# len mengembalikan jumlah data dalam list\n# index memberi tahu lokasi data yang ada dalam list\n#del dapat digunakan untuk menghapus data tertentu dalam list\n# sort digunakan untuk mengurutkan list\n# Contoh \n\nmenu_item = 0\nnamelist = [] #tidak ada isi berarti index masih 0, ingat di list \n#yang di hitung adlah index yang di mulai dari 0\n\n# membuat perulangan \nwhile menu_item != 5 :\n print(\"--------------------------------\")\n print(\"1. Mencetak list\")\n print(\"2. Menbambahkan nama ke dalam list\")\n print(\"3. Menghapus nama dari list\")\n print(\"4. Mengubah data dalam list\")\n print(\"5. keluar\")\n menu_item = int(input(\"pilih menu :\")) #membuat input untuk variable menu__item\n if menu_item == 1: #membuat program untuk no 1\n current = 0\n if len(namelist) > 0: #jika jumlah data di name list di atas 0, lakukang while current\n while current < len(namelist):\n print(current, \".\", namelist[current])\n current = current + 1\n else:\n print(\"list kosong\")\n\n elif menu_item == 2: #membuat program untuk no 2\n name = input (\"masukan nama :\")\n namelist.append(name)\n \n elif menu_item == 3: #membuat program untuk no 3\n del_name = input(\"nama yang ingin di hapus :\")\n if del_name in namelist:\n item_number = namelist.index(del_name)\n del namelist[item_number]\n else:\n print(del_name, \"tidak di temukan\")\n elif menu_item == 4: #membuat program untuk no 4\n old_name = input(\"Nama apa yang ingin di ubah :\")\n if old_name in namelist:\n item_number = namelist.index(old_name)\n new_name = input(\"nama baru :\")\n namelist[item_number] = new_name\n else:\n print(old_name, \"tidak ditemukan\")\n\nprint(\"Selamat tinggal\")\n\n","repo_name":"Virgo-SSS/Py-Course","sub_path":"Course/list_tuple_dan_dictionary2.py","file_name":"list_tuple_dan_dictionary2.py","file_ext":"py","file_size_in_byte":2490,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"22911666776","text":"import numpy as np\nimport pdb\nimport time\n\nimport torch\n\nimport matplotlib.pyplot as plt\nfrom nltk.translate.bleu_score import corpus_bleu\nfrom sklearn.metrics import mean_squared_error, roc_auc_score, f1_score, accuracy_score, recall_score\nfrom tabulate import tabulate\n\n\nfrom models.loss import PerceptualLoss\n\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\n\nclass Evaluator(object):\n def __init__(self, vocab):\n self.paramed_ops = ['brightness', 'contrast', 'blur', 'sharpness', 'color']\n self.vocab = vocab['operator_idx_to_token']\n self.reset()\n\n def reset(self):\n self.refs = []\n self.cands = []\n self.op_dict = {op: {'pred': [], 'true': []} for op in self.paramed_ops}\n\n def tensor2list(self, x):\n return x.cpu().numpy().tolist()\n\n def normalize(self, arr):\n \"\"\"\n remove the redundant token in operator tensor\n :param arr: operator list (n,)\n :return: out: operator list (n,) or (n - 2,)\n \"\"\"\n arr = np.array(arr)\n s = np.where(arr == 1)[0]\n if len(s) > 0:\n s = s[0]\n else:\n s = -1\n e = np.where(arr == 2)[0]\n if len(e) > 0:\n e = e[0]\n else:\n e = len(arr)\n assert e >= s, 'y is not correct to decode'\n out = arr[s + 1:e]\n return out.tolist()\n\n def update(self, x, y, px=None, py=None):\n \"\"\"\n update the operator and parameter\n :param x: operators pred tensor (bs, n)\n :param y: operators gt tensor (bs, m): SPECIAL: start with 1\n :param px: parameter pred tensor (bs, n)\n :param py: parameter gt tensor (bs, m)\n \"\"\"\n bs, _ = x.shape\n x = self.tensor2list(x)\n y = self.tensor2list(y)\n px = self.tensor2list(px)\n py = self.tensor2list(py) if py is not None else py\n for i in range(bs):\n x_i = self.normalize(x[i])\n y_i = self.normalize(y[i])\n self.cands.append(x_i)\n self.refs.append([y_i])\n if py is not None:\n for j in range(min(len(x_i), len(y_i))):\n if x_i[j] == y_i[j]:\n # if the operator is correct\n op_idx = x_i[j]\n op_name = self.vocab[op_idx]\n if op_name in self.paramed_ops:\n self.op_dict[op_name]['pred'].append(px[i][j])\n self.op_dict[op_name]['true'].append(py[i][j])\n\n\n def eval_mse(self):\n \"\"\"\n evaluate the mean square error\n :return:\n \"\"\"\n mse = {}\n for (op, v) in self.op_dict.items():\n mse[op] = 0 if v['true'] == [] or v['pred'] == [] else mean_squared_error(v['true'], v['pred'])\n return mse\n\n def eval_bleu(self):\n \"\"\"\n reference\n :param ref: references: list of list of list of token\n :param cand: candidates: list of list of token\n :return: bleu1, bleu2\n \"\"\"\n\n bleu1 = 0 if self.refs == [] or self.cands == [] else corpus_bleu(self.refs, self.cands, weights=(1, 0, 0, 0))\n bleu2 = 0 if self.refs == [] or self.cands == [] else corpus_bleu(self.refs, self.cands, weights=(0, 1, 0, 0))\n return bleu1, bleu2\n\n\n# for grounding evaluation\n# IoU\ndef mask_iou(m1, m2):\n \"\"\"\n compute iou between two masks\n :param m1: (h, w) \\in {0,1}\n :param m2: (h, w) \\in {0,1}\n :return: iou\n \"\"\"\n inter = (m1 * m2).sum().item()\n union = (m1 + m2).clamp(0, 1).sum().item()\n iou = float(inter)/(union + 1e-6)\n return iou\n\ndef plot(x1, x2, x3, x4):\n t = np.arange(len(x1))\n order = np.argsort(x1)\n x1 = x1[order]\n x2 = x2[order]\n x3 = x3[order]\n x4 = x4[order]\n plt.plot(t, x1, '-b', label='in_dist')\n plt.plot(t, x2, '-r', label='out_dist')\n plt.plot(t, x3, '-g', label='decre_dist')\n plt.title('distance figure')\n plt.xlabel('index of image')\n plt.ylabel('distance')\n plt.legend()\n plt.savefig('distance.jpg')\n plt.close()\n\n\nclass ImageEvaluator(object):\n def __init__(self):\n self.perceptual_net = PerceptualLoss()\n self.perceptual_net.to(device)\n self.reset()\n\n def reset(self):\n self.out_dist = [] # L1 distance of output\n self.in_dist = [] # L1 distance of input\n\n self.perc_out_dist = []\n self.perc_in_dist = []\n\n def update(self, input, output, gt):\n \"\"\"\n torch tensor\n :param input: (1, 3, h, w)\n :param output: (1, 3, h, w)\n :param gt: (1, 3, h, w)\n :return:\n \"\"\"\n in_dist = torch.abs(input - gt).mean().item()\n out_dist = torch.abs(output - gt).mean().item()\n self.in_dist.append(in_dist)\n self.out_dist.append(out_dist)\n\n # calculate perceptual distance\n input = input.unsqueeze(0).to(device)\n output = output.unsqueeze(0).to(device)\n gt = gt.unsqueeze(0).to(device)\n\n perc_in_dist = self.perceptual_net(input, gt).item()\n perc_out_dist = self.perceptual_net(output, gt).item()\n self.perc_in_dist.append(perc_in_dist)\n self.perc_out_dist.append(perc_out_dist)\n\n\n def eval_perceptual(self):\n in_dists = np.array(self.perc_in_dist)\n out_dists = np.array(self.perc_out_dist)\n decre_dists = (in_dists - out_dists)\n mean_out_dist = np.mean(out_dists)\n mean_in_dist = np.mean(in_dists)\n mean_incre_dist = np.mean(decre_dists)\n return mean_in_dist, mean_out_dist, mean_incre_dist\n\n\n def eval_L1(self):\n in_dists = np.array(self.in_dist)\n out_dists = np.array(self.out_dist)\n decre_dists = (in_dists - out_dists)\n mean_out_dist = np.mean(out_dists)\n mean_in_dist = np.mean(in_dists)\n mean_incre_dist = np.mean(decre_dists)\n # plot(in_dists, out_dists, in_dists - out_dists, decre_dists)\n return mean_in_dist, mean_out_dist, mean_incre_dist\n\n def print_eval(self):\n in_L1, out_L1, L1_decre = self.eval_L1()\n in_perc, out_perc, perc_decre = self.eval_perceptual()\n print('input L1 dist {:.4f}, output L1 dist {:.4f}, L1 decre: {:.4f}'.format(in_L1, out_L1, L1_decre))\n print('input perc dist {:.4f}, output perc dist {:.4f}, perc decre: {:.4f}'.format(in_perc, out_perc, perc_decre))\n\n\nclass Ground_Evaluator(object):\n\n def __init__(self, ):\n self.reset()\n\n def reset(self):\n self.scores = [] # store all scores\n self.gts = [] # store all gts\n self.threshes = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\n self.ious = [[] for _ in range(len(self.threshes))]\n self.total_nums = [0 for _ in range(len(self.threshes))]\n self.correct_nums = [0 for _ in range(len(self.threshes))]\n\n def update(self, scores, gts, masks):\n \"\"\"\n :param scores: (n,)\n :param gts: (n,) \\in {0, 1}\n :param masks: list of n (h, w) \\in {0, 1}\n :return:\n \"\"\"\n self.scores.extend(scores)\n self.gts.extend(gts)\n scores = np.array(scores)\n gts = np.array(gts)\n if masks is not None:\n masks = np.array(masks)\n masks = torch.from_numpy(masks).cuda()\n for i, thresh in enumerate(self.threshes):\n preds = (scores > thresh).astype(int)\n self.total_nums[i] += preds.sum()\n self.correct_nums[i] += (preds * gts).sum()\n if masks is not None:\n pred_mask = masks[np.where(preds > 0)[0]].sum(0).clamp(0, 1)\n gt_mask = masks[np.where(gts > 0)[0]].sum(0).clamp(0, 1)\n iou = mask_iou(pred_mask, gt_mask)\n self.ious[i].append(iou)\n\n def eval_acc(self):\n \"\"\"\n eval accuracy in different threshold\n :return: acc: list of accuracy\n \"\"\"\n # accuracy\n acc = [correct_num / (total_num + 1e-6) for (correct_num, total_num) in zip(self.correct_nums, self.total_nums)]\n return acc\n\n def eval_iou(self):\n \"\"\"\n evaluate image mask iou in different threshold\n :return: iou: list of iou\n \"\"\"\n if len(self.ious) == 0:\n return []\n iou = [float(np.mean(ious)) for ious in self.ious]\n pdb.set_trace()\n return iou\n\n def eval_roc(self):\n \"\"\"\n regard it as multi-label classification, which have no effect about threshold\n :return: roc: float\n \"\"\"\n roc = float(roc_auc_score(self.gts, self.scores))\n return roc\n\n def print_eval(self):\n acc = self.eval_acc()\n iou = self.eval_iou()\n roc = self.eval_roc()\n print('roc auc: {:4f}'.format(roc))\n acc_row = ['accuracy'] + acc\n iou_row = ['IoU'] + iou\n header = ['thresh'] + list(self.threshes)\n print(tabulate([acc_row, iou_row], headers=header, tablefmt='orgtbl'))\n\n\nclass GroundOperatorEvaluator(object):\n def __init__(self, ):\n self.reset()\n\n def reset(self):\n self.scores = [] # store all scores\n self.local_probs = [] # store all probs for local operation\n self.locals = [] # store gt for local operation\n self.gts = [] # store all gts choice of candidate\n self.threshes = (100*np.linspace(0, 1, 21)).astype(int) / 100 # only keep two decimal\n self.ious = [[] for _ in range(len(self.threshes))]\n self.total_nums = [0 for _ in range(len(self.threshes))]\n self.correct_nums = [0 for _ in range(len(self.threshes))]\n\n def update(self, prob, scores, gts, masks, local):\n \"\"\"\n :param: prob: (1, 1) for one sample whether is local or global\n :param scores: (n,) for each candidate\n :param gts: (n,) \\in {0, 1}\n :param masks: list of n (h, w) \\in {0, 1}\n :return:\n \"\"\"\n self.locals.append(local)\n self.local_probs.append(prob)\n if local and len(gts) > 0: # local\n self.scores.extend(scores)\n self.gts.extend(gts)\n scores = np.array(scores)\n gts = np.array(gts)\n if masks is not None:\n masks = np.array(masks)\n masks = torch.from_numpy(masks).cuda()\n for i, thresh in enumerate(self.threshes):\n preds = (scores > thresh).astype(int)\n self.total_nums[i] += preds.sum()\n self.correct_nums[i] += (preds * gts).sum()\n if masks is not None:\n pred_mask = masks[np.where(preds > 0)[0]].sum(0).clamp(0, 1)\n gt_mask = masks[np.where(gts > 0)[0]].sum(0).clamp(0, 1)\n iou = mask_iou(pred_mask, gt_mask)\n self.ious[i].append(iou)\n\n def eval_ground_acc(self):\n \"\"\"\n eval accuracy in different threshold\n :return: acc: list of accuracy\n \"\"\"\n # accuracy\n acc = [correct_num / (total_num + 1e-6) for (correct_num, total_num) in zip(self.correct_nums, self.total_nums)]\n return acc\n\n def eval_local_acc(self):\n \"\"\"\n evaluate the accuracy for local or global operation binary classification\n :return: acc: one of accuracy\n \"\"\"\n # accuracy\n acc = ((np.array(self.local_probs) > 0.5) == np.array(self.locals)).sum() / len(self.locals)\n return acc\n\n\n def eval_iou(self):\n \"\"\"\n evaluate image mask iou in different threshold\n :return: iou: list of iou\n \"\"\"\n if len(self.ious) == 0:\n return []\n iou = [float(np.mean(ious)) for ious in self.ious]\n return iou\n\n\n def eval_ground_roc(self):\n \"\"\"\n regard it as multi-label classification, which have no effect about threshold\n :return: roc: float\n \"\"\"\n roc = float(roc_auc_score(self.gts, self.scores))\n return roc\n\n def eval_local_roc(self):\n \"\"\"\n regard it as multi-label classification, which have no effect about threshold\n :return: roc: float\n \"\"\"\n roc = float(roc_auc_score(self.locals, self.local_probs))\n return roc\n\n def eval_local_all(self):\n scores = np.array(self.local_probs)\n gts = np.array(self.locals)\n acc, recal, f1 = [], [], []\n for i, thresh in enumerate(self.threshes):\n preds = (scores > thresh).astype(int)\n acc.append(accuracy_score(gts, preds))\n recal.append(recall_score(gts, preds))\n f1.append(f1_score(gts, preds))\n return acc, recal, f1\n\n def eval_ground_all(self):\n scores = np.array(self.scores)\n gts = np.array(self.gts)\n acc, recal, f1 = [], [], []\n for i, thresh in enumerate(self.threshes):\n preds = (scores > thresh).astype(int)\n acc.append(accuracy_score(gts, preds))\n recal.append(recall_score(gts, preds))\n f1.append(f1_score(gts, preds))\n return acc, recal, f1\n\n def print_eval(self):\n local_acc, local_recal, local_f1 = self.eval_local_all()\n ground_acc, ground_recal, ground_f1 = self.eval_ground_all()\n\n iou = self.eval_iou()\n ground_roc = self.eval_ground_roc()\n local_roc = self.eval_local_roc()\n\n print('ground roc auc: {:4f}'.format(ground_roc))\n print('local roc auc: {:4f}'.format(local_roc))\n ground_acc_row = ['ground_accuracy'] + ground_acc\n ground_recal_row = ['ground_recal'] + ground_recal\n ground_f1_row = ['ground_f1'] + ground_f1\n local_acc_row = ['local_accuracy'] + local_acc\n local_recal_row = ['local_recal'] + local_recal\n local_f1_row = ['local_f1'] + local_f1\n iou_row = ['IoU'] + iou\n header = ['thresh'] + list(self.threshes)\n print(tabulate([local_acc_row, local_recal_row, local_f1_row, ground_acc_row, ground_recal_row, ground_f1_row, iou_row], headers=header, tablefmt='orgtbl'))\n\n\nclass MultiLabelEvaluator(object):\n def __init__(self, ):\n self.reset()\n\n def reset(self):\n self.scores = [] # store all scores\n self.gts = [] # store all gts\n self.threshes = np.linspace(0, 1, 21)\n self.total_nums = [0 for _ in range(len(self.threshes))]\n self.correct_nums = [0 for _ in range(len(self.threshes))]\n\n def update(self, scores, gts):\n \"\"\"\n :param scores: (n,)\n :param gts: (n,) \\in {0, 1}\n :param masks: list of n (h, w) \\in {0, 1}\n :return:\n \"\"\"\n self.scores.extend(scores)\n self.gts.extend(gts)\n scores = np.array(scores)\n gts = np.array(gts)\n for i, thresh in enumerate(self.threshes):\n preds = (scores > thresh).astype(int)\n self.total_nums[i] += preds.sum()\n self.correct_nums[i] += (preds * gts).sum()\n\n def eval_acc(self):\n \"\"\"\n eval accuracy in different threshold\n :return: acc: list of accuracy\n \"\"\"\n scores = np.array(self.scores)\n gts = np.array(self.gts)\n acc = []\n for i, thresh in enumerate(self.threshes):\n preds = (scores > thresh).astype(int)\n acc.append(accuracy_score(gts, preds))\n\n # accuracy\n # acc = [correct_num / (total_num + 1e-6) for (correct_num, total_num) in zip(self.correct_nums, self.total_nums)]\n return acc\n\n def eval_f1(self):\n \"\"\"\n eval f1 score in different threshold\n micro (global) f1 score is considered\n :return: f1\n \"\"\"\n # f1 score\n scores = np.array(self.scores)\n gts = np.array(self.gts)\n f1 = []\n for i, thresh in enumerate(self.threshes):\n preds = (scores > thresh).astype(int)\n f1.append(f1_score(gts, preds))\n return f1\n\n def eval_recal(self):\n \"\"\"\n eval recal score in different threshold\n micro reval score is considered\n :return: f1\n \"\"\"\n scores = np.array(self.scores)\n gts = np.array(self.gts)\n recal = []\n for i, thresh in enumerate(self.threshes):\n preds = (scores > thresh).astype(int)\n recal.append(recall_score(gts, preds))\n return recal\n\n\n def eval_roc(self):\n \"\"\"\n regard it as multi-label classification, which have no effect about threshold\n :return: roc: float\n \"\"\"\n roc = float(roc_auc_score(self.gts, self.scores))\n return roc\n\n def print_eval(self):\n acc = self.eval_acc()\n recal = self.eval_recal()\n f1 = self.eval_f1()\n roc = self.eval_roc()\n print('roc auc: {:4f}'.format(roc))\n acc_row = ['accuracy'] + acc\n recall_row = ['recal'] + recal\n f1_row = ['f1'] + f1\n header = ['thresh'] + list(self.threshes)\n print(tabulate([acc_row, recall_row, f1_row], headers=header, tablefmt='orgtbl'))\n\n","repo_name":"wtjiang98/Language_Guided_Image_Editing","sub_path":"LGIE/util/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":16904,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"30"} +{"seq_id":"22342508591","text":"class LogMixin:\n @staticmethod\n def write(msg):\n with open('log.log','a+') as f:\n f.write(msg)\n f.write('\\n')\n \n def log_info(self,msg):\n self.write(f'INFO: {msg}')\n\n def log_error(self, msg):\n self.write(f'ERROR: {msg}')\n\nclass Eletronico:\n def __init__(self,nome):\n self._nome = nome\n self._ligado = False\n \n def ligar(self):\n if self._ligado:\n return\n else:\n self._ligado = True\n \n def desligar(self):\n if self._ligado:\n self._ligado = False\n else:\n return\n \nclass smartphone(Eletronico, LogMixin):\n def __init__(self, nome):\n super().__init__(nome)\n self._conectado = False\n \n def conectar(self):\n if self._ligado:\n if self._conectado:\n error = f'{self._nome} Já esta CONECTADO'\n print(error)\n self.log_error(error)\n return\n else:\n info = f'{self._nome} FOI CONECTADO'\n print(info)\n self.log_info(info)\n self._conectado = True\n else:\n error = f'{self._nome} ESTÁ DESLIGADO'\n print(error)\n self.log_error(error)\n return\n \n def desconectar(self):\n if not self._conectado :\n error = f'{self._nome} Já esta DESCONECTADO'\n print(error)\n self.log_error(error)\n return\n else:\n info = f'{self._nome} Foi DESCONECTADO'\n print(info)\n self.log_info(info)\n self._conectado = False\n\n \n\ncell = smartphone('Celular Xiaomi')\ncell.conectar()\ncell.desligar()\ncell.ligar()\ncell.conectar()\ncell.desligar()\ncell.conectar()\n","repo_name":"JorgeVitor30/Curso_Python","sub_path":"ex17.py","file_name":"ex17.py","file_ext":"py","file_size_in_byte":1873,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"30"} +{"seq_id":"11696405885","text":"\"\"\"Carriage_End controller.\"\"\"\nimport cv2\nimport numpy as np\n\n# You may need to import some classes of the controller module. Ex:\n# from controller import Robot, Motor, DistanceSensor\n# from controller import Robot\n\n# create the Robot instance.\n# robot = Robot()\n\n# get the time step of the current world.\n# timestep = int(robot.getBasicTimeStep())\n# camera = robot.getDevice('camera1')\n# camera.enable(timestep)\n\n# You should insert a getDevice-like function in order to get the\n# instance of a device of the robot. Something like:\n# motor = robot.getMotor('motorname')\n# ds = robot.getDistanceSensor('dsname')\n# ds.enable(timestep)\n\n\ndef blob_detection(img):\n mask = cv2.inRange(img, (0.0, 0.3, 0.3), (0.1, 0.6, 0.6))\n kernel = np.ones((1, 1), np.uint8)\n mask = cv2.dilate(mask, kernel, iterations=3)\n kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))\n mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel2, iterations=2)\n # cv2.imshow(\"win 1\", mask)\n cv2.waitKey(2000)\n contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n nu = np.zeros(img.shape)\n nu = cv2.drawContours(nu, contours, -1, (0, 0, 255), 1)\n cv2.destroyAllWindows()\n return mask\n\n\ndef centroid_detection(cam):\n # img = cam.getImageArray()\n # gbr_img = []\n # for a in range(len(img)):\n # temp = []\n # for b in range(len(img[0])):\n # temp.append([img[b][a][2], img[b][a][1], img[b][a][0]])\n # gbr_img.append(temp)\n # img = np.array(gbr_img, np.uint8)\n cam.saveImage(\"../../images/center_image.png\", 100)\n img = cv2.imread(\"../../images/center_image.png\")\n img = remove_illumination(img)\n mask = cv2.inRange(img, (0, 0.3, 0.3), (0.1, 0.6, 0.6))\n kernel = np.ones((1, 1), np.uint8)\n mask = cv2.dilate(mask, kernel, iterations=3)\n M = cv2.moments(mask)\n cx = int(M[\"m10\"] / M[\"m00\"])\n cy = int(M[\"m01\"] / M[\"m00\"])\n return (cx, cy)\n\n\ndef camera_point_angle(field_of_view, image_width, point_coordinates):\n angle_pixel_ratio = field_of_view / image_width\n centre_point_x_value = image_width / 2\n angle = (\n point_coordinates[1] - centre_point_x_value\n ) * angle_pixel_ratio # negative values are returned if the camera points to the right of the sticker and vice versa\n return angle\n\n\ndef matchsticker(image):\n template = cv2.imread(\"../../images/yellow_sticker.png\")\n thresh = cv2.inRange(template, (0.0, 220, 220), (60, 255, 255))\n kernel = np.ones((5, 5), np.uint8)\n thresh = cv2.dilate(thresh, kernel, iterations=3)\n kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))\n thresh = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel2, iterations=3)\n template_contours, _ = cv2.findContours(\n thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE\n )\n cnt1 = template_contours[0]\n contours, _ = cv2.findContours(image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n nu = np.zeros(image.shape)\n nu = cv2.drawContours(nu, contours, -1, (0, 0, 255), 1)\n if len(contours) == 0:\n return False\n cnt2 = contours[0]\n ret = cv2.matchShapes(cnt1, cnt2, 1, 0.0)\n # print(ret)\n if ret < 0.3:\n return True\n else:\n return False\n\n\ndef remove_illumination(image):\n image = image / 1.0\n for i in range(len(image)):\n for j in range(len(image[0])):\n g = image[i][j][0]\n b = image[i][j][1]\n r = image[i][j][2]\n total = g + b + r\n image[i][j][0] = g / total\n image[i][j][1] = b / total\n image[i][j][2] = r / total\n return image\n\n\n# Press the green button in the gutter to run the script.\ndef is_carriage_end(cam):\n cam.saveImage(\"../../images/front_image.png\", 100)\n img = cv2.imread(\"../../images/front_image.png\")\n # img = cam.getImageArray()\n # gbr_img = []\n # for a in range(len(img)):\n # temp = []\n # for b in range(len(img[0])):\n # temp.append([img[b][a][2], img[b][a][1], img[b][a][0]])\n # gbr_img.append(temp)\n # img = np.array(gbr_img, np.uint8)\n # img = cv2.imread(\"../images/yellow_sticker.png\")\n img = remove_illumination(img)\n cv2.destroyAllWindows()\n blob = blob_detection(img)\n matches = matchsticker(blob)\n cv2.destroyAllWindows()\n return matches\n\n\ncount = 0\nif __name__ == \"__main__\":\n cam = None\n centroid_detection(cam)\n# Main loop:\n# - perform simulation steps until Webots is stopping the controller\n# while robot.step(timestep) != -1:\n# Read the sensors:\n# Enter here functions to read sensor data, like:\n# val = ds.getValue()\n# if count == 100:\n# #print(is_carriage_end(camera))\n# count +=1\n\n# Process sensor data here.\n\n# Enter here functions to send actuator commands, like:\n# motor.setPosition(10.0)\n\n# pass\n\n# Enter here exit cleanup code.r here exit cleanup code.\n","repo_name":"SDP-Team10/Railly-Clean","sub_path":"libraries/sticker_detection.py","file_name":"sticker_detection.py","file_ext":"py","file_size_in_byte":4868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"1643222215","text":"from django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import UserPassesTestMixin\nfrom django.http import HttpResponseRedirect, HttpResponseForbidden\nfrom django.shortcuts import get_object_or_404\nfrom django.urls import reverse_lazy, reverse\nfrom django.utils import timezone\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views import generic\n\nfrom filmdemocracy.democracy import forms\nfrom filmdemocracy.core.models import Notification\nfrom filmdemocracy.democracy.models import Club, Meeting\n\nfrom filmdemocracy.core.utils import user_is_club_member_check, user_is_club_admin_check, user_is_organizer_check\nfrom filmdemocracy.core.utils import SpamHelper\n\n\n@method_decorator(login_required, name='dispatch')\nclass MeetingsNewView(UserPassesTestMixin, generic.FormView):\n form_class = forms.MeetingsForm\n subject_template = 'democracy/emails/meetings_new_subject.txt'\n email_template = 'democracy/emails/meetings_new_email.html'\n html_email_template = 'democracy/emails/meetings_new_email_html.html'\n\n def test_func(self):\n return user_is_club_member_check(self.request.user, club_id=self.kwargs['club_id'])\n\n def get_success_url(self):\n return reverse_lazy('democracy:club_detail', kwargs={'club_id': self.kwargs['club_id']})\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n club = get_object_or_404(Club, id=self.kwargs['club_id'])\n context['club'] = club\n context['new_meeting'] = True\n return context\n\n @staticmethod\n def create_notifications(_user, _club, _meeting):\n club_members = _club.members.filter(is_active=True).exclude(id=_user.id)\n for member in club_members:\n Notification.objects.create(type=Notification.MEET_ORGAN,\n activator=_user,\n club=_club,\n object_id=_meeting.id,\n recipient=member)\n\n def form_valid(self, form):\n user = self.request.user\n club = get_object_or_404(Club, id=self.kwargs['club_id'])\n new_meeting = Meeting.objects.create(\n club=club,\n name=form.cleaned_data['name'],\n description=form.cleaned_data['description'],\n organizer=user,\n place=form.cleaned_data['place'],\n date=form.cleaned_data['date'],\n time_start=form.cleaned_data['time_start'],\n )\n new_meeting.save()\n self.create_notifications(user, club, new_meeting)\n if form.cleaned_data['send_spam']:\n spam_helper = SpamHelper(self.request, self.subject_template, self.email_template, self.html_email_template)\n email_context = {\n 'organizer': self.request.user,\n 'club': club,\n 'name': new_meeting.name,\n 'description': new_meeting.description,\n 'place': new_meeting.place,\n 'date': new_meeting.date,\n 'time_start': new_meeting.time_start,\n }\n spammable_members = club.members.filter(is_active=True)\n to_emails_list = [member.email for member in spammable_members]\n spam_helper.send_emails(to_emails_list, email_context)\n messages.success(self.request, _('Meeting planned! A notification email has been sent to club members.'))\n else:\n messages.success(self.request, _('Meeting planned!'))\n return super().form_valid(form)\n\n\n@method_decorator(login_required, name='dispatch')\nclass MeetingsEditView(UserPassesTestMixin, generic.FormView):\n form_class = forms.MeetingsForm\n subject_template = 'democracy/emails/meetings_edit_subject.txt'\n email_template = 'democracy/emails/meetings_edit_email.html'\n html_email_template = 'democracy/emails/meetings_edit_email_html.html'\n\n def test_func(self):\n return user_is_organizer_check(self.request.user, club_id=self.kwargs['club_id'], meeting_id=self.kwargs['meeting_id'])\n\n def get_form_kwargs(self):\n kwargs = super(MeetingsEditView, self).get_form_kwargs()\n kwargs.update({'meeting_id': self.kwargs['meeting_id']})\n return kwargs\n\n def get_success_url(self):\n return reverse_lazy('democracy:club_detail', kwargs={'club_id': self.kwargs['club_id']})\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['club'] = get_object_or_404(Club, id=self.kwargs['club_id'])\n context['meeting'] = get_object_or_404(Meeting, id=self.kwargs['meeting_id'])\n context['new_meeting'] = False\n return context\n\n @staticmethod\n def create_notifications(_user, _club, _meeting):\n meeting_members = _meeting.members_yes.all() | _meeting.members_maybe.all() | _meeting.members_no.all()\n for member in meeting_members:\n Notification.objects.create(type=Notification.MEET_EDIT,\n activator=_user,\n club=_club,\n object_id=_meeting.id,\n recipient=member)\n\n def form_valid(self, form):\n meeting = get_object_or_404(Meeting, id=self.kwargs['meeting_id'])\n meeting.name = form.cleaned_data['name']\n meeting.description = form.cleaned_data['description']\n meeting.place = form.cleaned_data['place']\n meeting.date = form.cleaned_data['date']\n meeting.time_start = form.cleaned_data['time_start']\n meeting.save()\n club = get_object_or_404(Club, id=self.kwargs['club_id'])\n self.create_notifications(self.request.user, club, meeting)\n spam_option = form.cleaned_data['spam_options']\n if spam_option == 'all' or spam_option == 'interested':\n spam_helper = SpamHelper(self.request, self.subject_template, self.email_template, self.html_email_template)\n email_context = {\n 'organizer': self.request.user,\n 'club': club,\n 'name': meeting.name,\n 'description': meeting.description,\n 'place': meeting.place,\n 'date': meeting.date,\n 'time_start': meeting.time_start,\n }\n if spam_option == 'all':\n spammable_members = club.members.filter(is_active=True)\n to_emails_list = [member.email for member in spammable_members]\n spam_helper.send_emails(to_emails_list, email_context)\n messages.success(self.request, _('Meeting edited! A notification email has been sent to club members.'))\n else:\n spammable_members = meeting.members_yes.all() | meeting.members_maybe.all() | meeting.members_no.all()\n to_emails_list = [member.email for member in spammable_members]\n spam_helper.send_emails(to_emails_list, email_context)\n messages.success(self.request, _('Meeting edited! A notification email has been sent to members interested in it.'))\n else:\n messages.success(self.request, _('Meeting edited!'))\n return super().form_valid(form)\n\n\n@login_required\ndef meeting_assistance(request, club_id, meeting_id):\n user = request.user\n club = get_object_or_404(Club, id=club_id)\n if not user_is_club_member_check(user, club=club):\n return HttpResponseForbidden()\n meeting = get_object_or_404(Meeting, id=meeting_id)\n if 'assist_yes' in request.POST:\n if user in meeting.members_yes.all():\n meeting.members_yes.remove(user)\n else:\n if user in meeting.members_maybe.all():\n meeting.members_maybe.remove(user)\n elif user in meeting.members_no.all():\n meeting.members_no.remove(user)\n meeting.members_yes.add(user)\n elif 'assist_maybe' in request.POST:\n if user in meeting.members_maybe.all():\n meeting.members_maybe.remove(user)\n else:\n if user in meeting.members_yes.all():\n meeting.members_yes.remove(user)\n elif user in meeting.members_no.all():\n meeting.members_no.remove(user)\n meeting.members_maybe.add(user)\n elif 'assist_no' in request.POST:\n if user in meeting.members_no.all():\n meeting.members_no.remove(user)\n else:\n if user in meeting.members_yes.all():\n meeting.members_yes.remove(user)\n if user in meeting.members_maybe.all():\n meeting.members_maybe.remove(user)\n meeting.members_no.add(user)\n meeting.save()\n return HttpResponseRedirect(reverse('democracy:club_detail', kwargs={'club_id': club_id}))\n\n\n@login_required\ndef delete_meeting(request, club_id, meeting_id):\n\n def create_notifications(_user, _club, _meeting):\n meeting_members_groups = [_meeting.members_yes.all(), _meeting.members_maybe.all(), _meeting.members_no.all()]\n for member_group in meeting_members_groups:\n for member in member_group:\n if _user != member:\n Notification.objects.create(type=Notification.MEET_DEL,\n activator=_user,\n club=_club,\n object_id=_meeting.id,\n recipient=member)\n\n club = get_object_or_404(Club, id=club_id)\n organizer_check = user_is_organizer_check(request.user, club=club, meeting_id=meeting_id)\n admin_check = user_is_club_admin_check(request.user, club=club)\n if not organizer_check and not admin_check:\n return HttpResponseForbidden()\n meeting = get_object_or_404(Meeting, id=meeting_id)\n meeting.active = False\n meeting.save()\n create_notifications(request.user, club, meeting)\n return HttpResponseRedirect(reverse('democracy:club_detail', kwargs={'club_id': club.id}))\n\n\n@method_decorator(login_required, name='dispatch')\nclass MeetingsListView(UserPassesTestMixin, generic.TemplateView):\n\n def test_func(self):\n return user_is_club_member_check(self.request.user, club_id=self.kwargs['club_id'])\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n club = get_object_or_404(Club, id=self.kwargs['club_id'])\n club_meetings = Meeting.objects.filter(club=club, active=True, date__gte=timezone.now().date())\n context['club_meetings'] = club_meetings.order_by('date')[0:20]\n return context\n","repo_name":"astromedia/filmdemocracy","sub_path":"filmdemocracy/democracy/views/meetings.py","file_name":"meetings.py","file_ext":"py","file_size_in_byte":10831,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"30"} +{"seq_id":"28092060702","text":"from twisted.internet import reactor\nfrom twisted.internet.protocol import ServerFactory, connectionDone\nfrom twisted.protocols.basic import LineOnlyReceiver\n\n\nclass ConnectorProtocol(LineOnlyReceiver):\n factory: 'Server'\n login: str = None\n login_list: list\n\n def connectionMade(self):\n self.factory.clients.append(self)\n\n def connectionLost(self, reason=connectionDone):\n self.factory.clients.remove(self)\n\n def lineReceived(self, line: bytes):\n content = line.decode()\n #self.login_list = []\n\n if self.login is not None:\n content = f\"Message from {self.login}: {content}\"\n\n for user in self.factory.clients:\n if user is not self:\n user.sendLine(content.encode())\n\n else:\n if content.startswith(\"login:\"):\n self.login = content.replace(\"login:\", \"\")\n if self.login not in self.login_list:\n self.sendLine(\"Welcome\".encode())\n self.login_list.append(self.login)\n else:\n self.sendLine(\"Login already exists, try another one\".encode())\n else:\n self.sendLine(\"Invalid login\".encode())\n\n # print(f\"Message: {line}\")\n\n\nclass Server(ServerFactory):\n protocol = ConnectorProtocol\n clients: list\n\n def startFactory(self):\n self.clients = []\n print('Server started')\n\n def stopFactory(self):\n print(\"Server stopped\")\n\n\nreactor.listenTCP(1234, Server())\nreactor.run()\n","repo_name":"andyxcore/Messenger_v2","sub_path":"src/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"29361320990","text":"# List of standard library imports\nfrom csv import writer\nfrom json import dump, load\nfrom os import listdir\nfrom os.path import join, exists\n\n\n# This script contains functions responsible for creation of reports\n\n\n# Create report for data recovered from cache\ndef report_cache(cache_list, output_dir):\n # Produce report in csv format\n with open(join(output_dir, \"Reports\", \"cache_data.csv\"), \"w\", newline=\"\") as f:\n write_data = writer(f)\n write_data.writerow(\n [\n \"Filename\",\n \"URL\",\n \"URL Length\",\n \"URL Location\",\n \"Range URL\",\n \"Range URL Length\",\n \"Range URL Location\",\n \"Cache Entry Location\",\n \"Ranking Entry Location\",\n \"Content Size\",\n \"Content Location\",\n \"Response Size\",\n \"Response Location\",\n \"Entry Creation Time\",\n \"Range Entry Creation Time\",\n \"Last Accessed Time\",\n \"Last Modified Time\",\n \"Entry Expiry Time\",\n \"Server Response Time\",\n \"Server Response\",\n \"Content Type\",\n \"Content Encoding\",\n \"ETag\",\n \"Max Age\",\n \"Server Name\",\n \"Server IP\",\n \"MD5\",\n \"SHA1\",\n \"SHA256\",\n ]\n )\n for i in cache_list:\n write_data.writerow(\n [\n i.filename,\n i.url,\n i.url_length,\n get_location(i.url_location),\n i.range_url,\n i.range_url_length,\n get_location(i.range_url_location),\n get_location(i.entry_location),\n get_location(i.rankings_location),\n i.content_size,\n get_location(i.content_location),\n i.response_size,\n get_location(i.response_location),\n i.entry_created_time,\n i.partial_entry_created_time,\n i.last_accessed_time,\n i.last_modified_time,\n i.expiry_time,\n i.response_time,\n i.server_response,\n i.content_type,\n i.content_encoding,\n i.etag,\n i.max_age,\n i.server_name,\n i.server_ip,\n i.md5,\n i.sha1,\n i.sha256,\n ]\n )\n\n\ndef get_location(location):\n if location:\n return location[0] + \" [\" + str(location[1]) + \"]\"\n else:\n return \"\"\n\n\n# Create report for data recovered from activity log\ndef report_activity(servers, channels, mails, output_dir):\n elements = max(len(servers), len(channels), len(mails))\n # Fill lists with dashes to avoid index errors\n for i in range(0, elements):\n if len(servers) < elements:\n servers.append(\"-\")\n if len(channels) < elements:\n channels.append(\"-\")\n if len(mails) < elements:\n mails.append(\"-\")\n i += 1\n if elements:\n with open(join(output_dir, \"Reports\", \"activity_data.csv\"), \"w\", newline=\"\") as f:\n write_data = writer(f)\n write_data.writerow([\"Servers\", \"Channels\", \"Mails\"])\n x = 0\n while x < elements:\n write_data.writerow([servers[x], channels[x], mails[x]])\n x += 1\n\n\n# Create chat log reports in form of HTML files\ndef chat_to_html(cache_data_list, output_dir):\n logs_dir = join(output_dir, \"Extracted\", \"Chat_logs\")\n chat_list = listdir(logs_dir)\n chat_list.sort()\n\n # Check if chat log contains more than one conversation.\n for file in chat_list:\n with open(join(logs_dir, file), \"r\") as f:\n data = load(f)\n if \"messages\" in data:\n # If more than one conversation found, move them to separate files\n for e in data[\"messages\"]:\n i = 0\n if exists(join(logs_dir, f\"{e[0]['channel_id']}.json\")):\n while exists(join(logs_dir, f\"{e[0]['channel_id']} ({i}).json\")):\n i += 1\n with open(join(logs_dir, f\"{e[0]['channel_id']} ({i}).json\"), \"w\") as f2:\n dump(e, f2)\n else:\n with open(join(logs_dir, f\"{e[0]['channel_id']}.json\"), \"w\") as f2:\n dump(e, f2)\n # Recover messages from chat logs and create conversation reports\n for file in chat_list:\n messages = \"\"\n avatar_path = \"\"\n url = \"\"\n att_path = \"\"\n img_url = \"\"\n filename = file.split(\".\", 1)[0]\n # Create structure of HTML file\n # Indentation rule is broken here to save space in the final reports\n html_struct = f\"\"\"\n\n\n\n\n\n

Channel Id: {filename}

\n%s\n\n\n\"\"\"\n with open(join(logs_dir, file), \"r\") as f:\n data = load(f)\n # Reading elements of a message\n if any(\"channel_id\" in s for s in data):\n for e in data:\n avatar = e[\"author\"][\"avatar\"]\n if avatar is not None:\n if exists(join(output_dir, \"Extracted\", \"Images\", f\"{avatar}.webp\")):\n avatar_path = join(output_dir, \"Extracted\", \"Images\", f\"{avatar}.webp\")\n date = e[\"timestamp\"].split(\"T\", 1)[0]\n time = e[\"timestamp\"].split(\"T\", 1)[1].split(\".\", 1)[0]\n timestamp = date + \" \" + time\n\n if len(e[\"attachments\"]) > 0:\n url = e[\"attachments\"][0][\"url\"].split(\"attachments\", 1)[1]\n for key in cache_data_list:\n if url in key.url:\n att_file = key.filename\n img_url = key.url\n if exists(join(output_dir, \"Extracted\", \"Images\", att_file)):\n att_path = join(output_dir, \"Extracted\", \"Images\", att_file)\n break\n # Filling structure of a message with recovered data\n message = f\"\"\"\n\n\n\n\n\n\n\n\n\n
Message Id{e[\"id\"]}Time{timestamp}
AuthorId{e[\"author\"][\"id\"]}\"{e[\"author\"][\"avatar\"]}\"
Username{e[\"author\"][\"username\"]}
Discriminator{e[\"author\"][\"discriminator\"]}
Content{e[\"content\"]}
\"{url}\"
{img_url}
\n
\n\"\"\"\n # Reconstruction of conversation by adding single messages together\n messages = messages + message\n if not messages == \"\":\n # Pasting conversation into HTML report structure and saving all in a new file\n html_file = html_struct % messages\n with open(join(output_dir, \"Reports\", \"Chat_logs\", filename + \".html\"), \"w\", encoding=\"utf-8\") as report:\n report.write(html_file)\n","repo_name":"MichalMotylinski/DiscFor-Discord-Artefact-Extraction-Tool","sub_path":"report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":7681,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"30"} +{"seq_id":"32739102879","text":"from lib.visual.imager import Imager\nfrom lib.visual.common import center, proportion\nfrom lib.support.exceptions import NotVisibleException\nfrom lib.visual.pred import predict\nfrom lib.support.deviation import *\nfrom conf.config import LoadConfig\nfrom pathlib import Path\nimport json\nimport base64\n\n\nclass BaseExpectation:\n\n FULL_SCREEN = False\n\n def __init__(self):\n self._img = str(Path.cwd().joinpath(\"resource\", \"img\", f\"{LoadConfig().model['img']}.png\"))\n\n @staticmethod\n def get_viewport_size(driver):\n width = driver.execute_script(\"return window.innerWidth;\")\n height = driver.execute_script(\"return window.innerHeight;\")\n return [width, height]\n\n @staticmethod\n def get_body_size(driver):\n body = driver.find_element_by_tag_name(\"body\")\n size = body.size\n return [size[\"width\"], size[\"height\"]]\n\n def save_full_screenshot(self, driver):\n def send(cmd, params):\n resource = \"/session/%s/chromium/send_command_and_get_result\" % driver.session_id\n url = driver.command_executor._url + resource\n body = json.dumps({\"cmd\":cmd, \"params\": params})\n response = driver.command_executor._request(\"POST\", url, body)\n return response.get(\"value\")\n\n def evaluate(script):\n response = send(\"Runtime.evaluate\", {\"returnByValue\": True, \"expression\": script})\n return response[\"result\"][\"value\"]\n\n if hasattr(driver, \"get_full_page_screenshot_as_file\"):\n driver.get_full_page_screenshot_as_file(self._img)\n else:\n if driver.name.lower() == \"chrome\":\n metrics = evaluate(\n \"({\" + \\\n \"width: Math.max(window.innerWidth, document.body.scrollWidth, document.documentElement.scrollWidth)|0,\" + \\\n \"height: Math.max(innerHeight, document.body.scrollHeight, document.documentElement.scrollHeight)|0,\" + \\\n \"deviceScaleFactor: window.devicePixelRatio || 1,\" + \\\n \"mobile: typeof window.orientation !== 'undefined'\" + \\\n \"})\")\n send(\"Emulation.setDeviceMetricsOverride\", metrics)\n screenshot = send(\"Page.captureScreenshot\", {\"format\": \"png\", \"fromSurface\": True})\n send(\"Emulation.clearDeviceMetricsOverride\", {})\n with open(self._img, 'wb') as f:\n f.write(base64.b64decode(screenshot['data']))\n elif driver.name.lower() == \"firefox\":\n resource = \"/session/%s/moz/screenshot/full\" % driver.session_id\n url = driver.command_executor._url + resource\n content = driver.command_executor._request(\"GET\", url)\n with open(self._img, 'wb') as f:\n f.write(base64.b64decode(content[\"value\"]))\n else:\n driver.save_screenshot(self._img)\n\n @staticmethod\n def scroll_into_view(driver, position):\n driver.execute_script(f\"window.scrollTo({position[0]}, {position[1]});\")\n\n\nclass TextDisplayOnPage(BaseExpectation):\n\n def __init__(self, instance, multiple=False):\n super().__init__()\n self.text = instance if isinstance(instance, str) else instance.text\n self.full_match = instance.full_match if hasattr(instance, \"full_match\") else False\n self.multiple = multiple\n self.elements = []\n\n def __call__(self, driver):\n if self.FULL_SCREEN:\n self.save_full_screenshot(driver)\n else:\n driver.save_screenshot(self._img)\n contours, shape = Imager.recognize_contours(self._img)\n if self.full_match:\n for c in contours:\n if c[1] == self.text or qualified(c[1], self.text):\n if self.FULL_SCREEN:\n self.FULL_SCREEN = False\n self.scroll_into_view(driver, proportion(center(c[0]), self.get_body_size(driver), shape))\n return False\n else:\n if self.multiple:\n self.elements.append(proportion(center(c[0]), self.get_viewport_size(driver), shape))\n else:\n return proportion(center(c[0]), self.get_viewport_size(driver), shape)\n else:\n for t in self.text.split(\"|\"):\n for c in contours:\n found = c[1].find(t.strip()) >= 0\n if found or qualified(c[1], t):\n if self.FULL_SCREEN:\n self.FULL_SCREEN = False\n self.scroll_into_view(driver, proportion(center(c[0]), self.get_body_size(driver), shape))\n return False\n else:\n if found:\n accurate_c = [p for p in c[0]]\n width = c[0][2] - c[0][0]\n if c[1].find(self.text.strip()) / len(c[1]) > 0.2:\n accurate_c[0] += width * (c[1].find(self.text.strip()) / len(c[1]))\n if (c[1].find(self.text.strip()) + len(self.text.strip())) / len(c[1]) < 0.8:\n accurate_c[2] = c[0][0] + width * ((c[1].find(self.text.strip()) + len(self.text.strip())) / len(c[1]))\n else:\n accurate_c = c[0]\n if self.multiple:\n self.elements.append(proportion(center(accurate_c), self.get_viewport_size(driver), shape))\n else:\n return proportion(center(accurate_c), self.get_viewport_size(driver), shape)\n words = []\n double_check = False\n for t in self.text.split(\"|\"):\n if t.find(\" \") > 0:\n words.append(t.split(\" \")[0])\n for w in words:\n if str(contours).find(f\"'{w}'\") > 0:\n double_check = True\n break\n if double_check:\n contours, shape = Imager.recognize_contours(self._img, font=\"large\")\n for t in self.text.split(\"|\"):\n for c in contours:\n if c[1].find(t.strip()) >= 0 or qualified(c[1], t):\n if self.FULL_SCREEN:\n self.FULL_SCREEN = False\n self.scroll_into_view(driver, proportion(center(c[0]), self.get_body_size(driver), shape))\n return False\n else:\n if self.multiple:\n self.elements.append(proportion(center(c[0]), self.get_viewport_size(driver), shape))\n else:\n return proportion(center(c[0]), self.get_viewport_size(driver), shape)\n self.FULL_SCREEN = True\n # raise NotVisibleException(\"text NOT visible\")\n if len(self.elements) > 0:\n return self.elements\n return False\n\n\nclass ElementDisplayOnPage(BaseExpectation):\n\n def __init__(self, model, element, keyword=None, multiple=False):\n super().__init__()\n self.model = model\n self.element = element.lower()\n self.keyword = keyword\n self.multiple = multiple\n self.elements = []\n\n def __call__(self, driver):\n if self.FULL_SCREEN:\n self.save_full_screenshot(driver)\n else:\n driver.save_screenshot(self._img)\n results, labels, shape = predict(self.model)\n if self.element not in labels.keys():\n return False\n for r in results:\n if r[\"N\"] == self.element:\n if self.keyword:\n tmp = Imager.recognize_crop_contours(self._img, r[\"COOR\"])\n for k in self.keyword.split(\"|\"):\n if not tmp:\n (x, y) = proportion(center(r[\"COOR\"]), self.get_viewport_size(driver), shape)\n tmp = driver.execute_script(f\"return document.elementFromPoint({x}, {y});\").text\n if tmp.find(\"\\n\") > 0:\n continue\n if tmp.find(k.strip()) >= 0 or qualified(tmp, k):\n if self.FULL_SCREEN:\n self.FULL_SCREEN = False\n self.scroll_into_view(driver, proportion(center(r[\"COOR\"]), self.get_body_size(driver), shape))\n return False\n else:\n if self.multiple:\n self.elements.append(proportion(center(r[\"COOR\"]), self.get_viewport_size(driver), shape))\n else:\n return proportion(center(r[\"COOR\"]), self.get_viewport_size(driver), shape)\n else:\n if self.FULL_SCREEN:\n self.FULL_SCREEN = False\n self.scroll_into_view(driver, proportion(center(r[\"COOR\"]), self.get_body_size(driver), shape))\n return False\n else:\n if self.multiple:\n self.elements.append(proportion(center(r[\"COOR\"]), self.get_viewport_size(driver), shape))\n else:\n return proportion(center(r[\"COOR\"]), self.get_viewport_size(driver), shape)\n self.FULL_SCREEN = True\n # raise NotVisibleException(\"text NOT visible\")\n if len(self.elements) > 0:\n return self.elements\n return False\n\n\nclass ElementMatchOnPage(BaseExpectation):\n\n def __init__(self, model, element, keyword, direction, multiple=False):\n super().__init__()\n self.model = model\n self.element = element.lower()\n self.keyword = keyword\n self.direction = direction.lower() if direction else \"down\"\n self.multiple = multiple\n self.elements = []\n\n def __call__(self, driver):\n if self.FULL_SCREEN:\n self.save_full_screenshot(driver)\n else:\n driver.save_screenshot(self._img)\n match_keyword = None\n match_area = None\n contours, shape = Imager.recognize_contours(self._img)\n exit_flag = False\n for c in contours:\n for t in self.keyword.split(\"|\"):\n if c[1].find(t.strip()) >= 0 or qualified(c[1], t):\n if self.FULL_SCREEN:\n self.FULL_SCREEN = False\n self.scroll_into_view(driver, proportion(center(c[0]), self.get_body_size(driver), shape))\n return False\n match_keyword = proportion(center(c[0]), self.get_viewport_size(driver), shape)\n match_area = c[0]\n exit_flag = True\n break\n if exit_flag:\n break\n if not match_keyword:\n self.FULL_SCREEN = True\n return False\n results, labels, shape = predict(self.model)\n if self.element not in labels.keys():\n self.FULL_SCREEN = True\n return False\n relative_elements = {\n \"up\": {\"area\": [], \"edge\": [], \"orientation\": [], \"match_orien\": (match_area[0], match_area[2])},\n \"down\": {\"area\": [], \"edge\": [], \"orientation\": [], \"match_orien\": (match_area[0], match_area[2])},\n \"left\": {\"area\": [], \"edge\": [], \"orientation\": [], \"match_orien\": (match_area[1], match_area[3])},\n \"right\": {\"area\": [], \"edge\": [], \"orientation\": [], \"match_orien\": (match_area[1], match_area[3])}\n }\n for r in results:\n if r[\"N\"] == self.element:\n if r[\"COOR\"][3] < match_area[3] and r[\"COOR\"][1] < match_area[1]:\n relative_elements[\"up\"][\"area\"].append(r[\"COOR\"])\n relative_elements[\"up\"][\"edge\"].append((r[\"COOR\"][0], r[\"COOR\"][3], r[\"COOR\"][2], r[\"COOR\"][3]))\n relative_elements[\"up\"][\"orientation\"].append((r[\"COOR\"][0], r[\"COOR\"][2]))\n if r[\"COOR\"][3] > match_area[3] and r[\"COOR\"][1] > match_area[1]:\n relative_elements[\"down\"][\"area\"].append(r[\"COOR\"])\n relative_elements[\"down\"][\"edge\"].append((r[\"COOR\"][0], r[\"COOR\"][1], r[\"COOR\"][2], r[\"COOR\"][1]))\n relative_elements[\"down\"][\"orientation\"].append((r[\"COOR\"][0], r[\"COOR\"][2]))\n if r[\"COOR\"][2] < match_area[2] and r[\"COOR\"][0] < match_area[0]:\n relative_elements[\"left\"][\"area\"].append(r[\"COOR\"])\n relative_elements[\"left\"][\"edge\"].append((r[\"COOR\"][2], r[\"COOR\"][1], r[\"COOR\"][2], r[\"COOR\"][3]))\n relative_elements[\"left\"][\"orientation\"].append((r[\"COOR\"][1], r[\"COOR\"][3]))\n if r[\"COOR\"][2] > match_area[2] and r[\"COOR\"][0] > match_area[0]:\n relative_elements[\"right\"][\"area\"].append(r[\"COOR\"])\n relative_elements[\"right\"][\"edge\"].append((r[\"COOR\"][0], r[\"COOR\"][1], r[\"COOR\"][0], r[\"COOR\"][3]))\n relative_elements[\"right\"][\"orientation\"].append((r[\"COOR\"][1], r[\"COOR\"][3]))\n matched_element = None\n distance = None\n for inx, e in enumerate(relative_elements[self.direction][\"edge\"]):\n if (relative_elements[self.direction][\"match_orien\"][0] > relative_elements[self.direction][\"orientation\"][inx][1]) or (relative_elements[self.direction][\"match_orien\"][1] < relative_elements[self.direction][\"orientation\"][inx][0]):\n continue\n else:\n position = proportion(center(e), self.get_viewport_size(driver), shape)\n if not matched_element:\n distance = (position[0] - match_keyword[0]) ** 2 + (position[1] - match_keyword[1]) ** 2\n matched_element = proportion(center(relative_elements[self.direction][\"area\"][inx]), self.get_viewport_size(driver), shape)\n if self.multiple:\n self.elements.append(matched_element)\n else:\n temp = (position[0] - match_keyword[0]) ** 2 + (position[1] - match_keyword[1]) ** 2\n if temp < distance:\n matched_element = proportion(center(relative_elements[self.direction][\"area\"][inx]), self.get_viewport_size(driver), shape)\n distance = temp\n if self.multiple:\n self.elements.insert(0, matched_element)\n else:\n if self.multiple:\n matched_element = proportion(center(relative_elements[self.direction][\"area\"][inx]), self.get_viewport_size(driver), shape)\n self.elements.append(matched_element)\n if not matched_element:\n for inx, e in enumerate(relative_elements[self.direction][\"edge\"]):\n position = proportion(center(e), self.get_viewport_size(driver), shape)\n if not matched_element:\n distance = (position[0] - match_keyword[0]) ** 2 + (position[1] - match_keyword[1]) ** 2\n matched_element = proportion(center(relative_elements[self.direction][\"area\"][inx]), self.get_viewport_size(driver), shape)\n if self.multiple:\n self.elements.append(matched_element)\n else:\n temp = (position[0] - match_keyword[0]) ** 2 + (position[1] - match_keyword[1]) ** 2\n if temp < distance:\n matched_element = proportion(center(relative_elements[self.direction][\"area\"][inx]), self.get_viewport_size(driver), shape)\n distance = temp\n if self.multiple:\n self.elements.insert(0, matched_element)\n else:\n if self.multiple:\n matched_element = proportion(center(relative_elements[self.direction][\"area\"][inx]), self.get_viewport_size(driver), shape)\n self.elements.append(matched_element)\n self.FULL_SCREEN = False\n if len(self.elements) > 0:\n return self.elements\n return matched_element\n\n\nclass ElementByRegionDisplayOnPage(BaseExpectation):\n\n def __init__(self, model, element, refer, keyword=None, multiple=False):\n super().__init__()\n self.model = model\n self.element = element.lower()\n self.refer = refer\n self.keyword = keyword\n self.text = keyword if self.element in [\"static\"] else None\n self.multiple = multiple\n self.elements = []\n\n def __call__(self, driver):\n if self.FULL_SCREEN:\n self.save_full_screenshot(driver)\n else:\n driver.save_screenshot(self._img)\n refer_position = None\n contours, shape = Imager.recognize_contours(self._img)\n for c in contours:\n if c[1].find(self.refer) >= 0 or qualified(c[1], self.refer):\n if self.FULL_SCREEN:\n self.FULL_SCREEN = False\n self.scroll_into_view(driver, proportion(center(c[0]), self.get_body_size(driver), shape))\n return False\n else:\n refer_position = proportion(center(c[0]), self.get_viewport_size(driver), shape)\n break\n if not refer_position:\n self.FULL_SCREEN = True\n return False\n else:\n regions = Imager.recognize_region(self._img)\n elements = ElementDisplayOnPage(self.model, self.element, self.keyword, True)(driver)\n matched_elements = []\n for r in regions:\n proportion_r_left_top = proportion((r[0], r[1]), self.get_viewport_size(driver), shape)\n proportion_r_right_bottom = proportion((r[2], r[3]), self.get_viewport_size(driver), shape)\n proportion_r = (proportion_r_left_top[0], proportion_r_left_top[1], proportion_r_right_bottom[0], proportion_r_right_bottom[1])\n if proportion_r[0] < refer_position[0] < proportion_r[2] and proportion_r[1] < refer_position[1] < proportion_r[3]:\n for e in elements:\n if proportion_r[0] < e[0] < proportion_r[2] and proportion_r[1] < e[1] < proportion_r[3]:\n matched_elements.append(e)\n if len(matched_elements) > 0:\n if self.multiple:\n return matched_elements\n else:\n return matched_elements[0]\n else:\n return False\n\n# class ElementByRegionDisplayOnPage(BaseExpectation):\n# # incomplete\n#\n# def __init__(self, model, element, refer, keyword=None, multiple=False):\n# super().__init__()\n# self.model = model\n# self.element = element.lower()\n# self.refer = refer\n# self.keyword = keyword\n# self.text = keyword if self.element in [\"static\"] else None\n# self.multiple = multiple\n# self.elements = []\n#\n# def __call__(self, driver):\n# if self.FULL_SCREEN:\n# self.save_full_screenshot(driver)\n# else:\n# driver.save_screenshot(self._img)\n# refer_position = None\n# contours, shape = Imager.recognize_contours(self._img)\n# for c in contours:\n# if c[1].find(self.refer) >= 0 or qualified(c[1], self.refer):\n# if self.FULL_SCREEN:\n# self.FULL_SCREEN = False\n# self.scroll_into_view(driver, proportion(center(c[0]), self.get_body_size(driver), shape))\n# return False\n# else:\n# refer_position = proportion(center(c[0]), self.get_viewport_size(driver), shape)\n# break\n# if not refer_position:\n# self.FULL_SCREEN = True\n# return False\n# else:\n# texts_y_in_column = []\n# refer_js_rect = driver.execute_script(f\"return document.elementFromPoint({refer_position[0]}, {refer_position[1]}).getClientRects()[0];\")\n# for c in contours:\n# temp_position = proportion(center(c[0]), self.get_viewport_size(driver), shape)\n# temp_js_rect = driver.execute_script(f\"return document.elementFromPoint({temp_position[0]}, {temp_position[1]}).getClientRects()[0];\")\n# if refer_js_rect[\"left\"] == temp_js_rect[\"left\"] and refer_js_rect[\"height\"] == temp_js_rect[\"height\"]:\n# texts_y_in_column.append(temp_js_rect[\"y\"])\n# texts_y_in_column.sort()\n# amounts = len(texts_y_in_column)\n# y_range = ()\n# if amounts > 1:\n# for inx, y in enumerate(texts_y_in_column):\n# if y == refer_js_rect[\"y\"]:\n# if inx == (amounts - 1):\n# y_range = (y-20, self.get_viewport_size(driver)[1])\n# else:\n# y_range = (y-20, texts_y_in_column[inx+1]-20)\n# if self.text is not None:\n# temp_elements = TextDisplayOnPage(self.text, multiple=True)(driver)\n# else:\n# temp_elements = ElementDisplayOnPage(self.model, self.element, self.keyword, multiple=True)(driver)\n# if temp_elements:\n# elements_by_region = []\n# for e in temp_elements:\n# if y_range[0] <= e[1] <= y_range[1]:\n# if self.multiple:\n# elements_by_region.append(e)\n# else:\n# return e\n# return elements_by_region\n# else:\n# return False\n# else:\n# if self.text:\n# return TextDisplayOnPage(self.text, self.multiple)(driver)\n# else:\n# return ElementDisplayOnPage(self.model, self.element, self.keyword, self.multiple)(driver)\n","repo_name":"bobjiangps/vision","sub_path":"lib/support/expected.py","file_name":"expected.py","file_ext":"py","file_size_in_byte":22541,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"30"} +{"seq_id":"35998169686","text":"from redbot.core import commands, Config\nfrom discord import Member, Forbidden\n\nfrom redbot.core.i18n import cog_i18n, Translator\nfrom logging import getLogger\n\nfrom typing import Union\n\n_ = Translator(\"StickyMember\", __file__)\n\n\n@cog_i18n(_)\nclass StickyMember(commands.Cog):\n __version__ = \"2.0.0\"\n\n def format_help_for_context(self, ctx: commands.Context) -> str:\n # Thanks Sinbad! And Trusty in whose cogs I found this.\n pre_processed = super().format_help_for_context(ctx)\n return f\"{pre_processed}\\n\\nVersion: {self.__version__}\"\n\n async def red_delete_data_for_user(self, *, requester, user_id):\n data = self.config.all_members()\n for g in data:\n for m in g:\n if m == user_id:\n await self.config.member_from_ids(g, m).clear()\n\n def __init__(self):\n self.config = Config.get_conf(self, 231215102020, force_registration=True)\n default = {\"roles\": [], \"active\": False}\n self.config.register_member(**default)\n self.logger = getLogger(\"red.cog.dav-cogs.stickymember\")\n\n @commands.Cog.listener()\n async def on_member_update(self, before, after):\n if await self.config.member(after).active():\n role_ids = [r.id for r in after.roles]\n role_ids.remove(after.guild.id)\n await self.config.member(after).roles.set(role_ids)\n\n @commands.Cog.listener()\n async def on_member_join(self, member):\n if await self.config.member(member).active():\n try:\n await member.add_roles(\n *[member.guild.get_role(r) for r in await self.config.member(member).roles()]\n )\n except Forbidden:\n self.logger.warn(\"Couldn't assign roles to {member.id} on rejoin. 403\")\n\n @commands.admin()\n @commands.command()\n async def stickymem(self, ctx, member: Member) -> None:\n await self.config.member(member).active.set(True)\n role_ids = [r.id for r in member.roles]\n role_ids.remove(member.guild.id)\n await self.config.member(member).roles.set(role_ids)\n await ctx.send(_(\"Stickied {member}.\").format(member=member.display_name))\n\n @commands.admin()\n @commands.command()\n async def unstickymem(self, ctx, member: Union[Member, int]):\n if isinstance(member, Member):\n member = member.id\n await self.config.member_from_ids(ctx.guild.id, member).active.set(False)\n await ctx.send(_(\"{member_id} unstickied.\").format(member_id=member))\n","repo_name":"Dav-Git/Dav-Cogs","sub_path":"stickymember/stickymember.py","file_name":"stickymember.py","file_ext":"py","file_size_in_byte":2546,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"30"} +{"seq_id":"10631311054","text":"\nfrom flask import Flask, jsonify\nfrom flask_restful import Api\nfrom resources.bugs import Bugs\nfrom resources.bug import Bug\nfrom resources.projects import Projects\nfrom resources.project import Project\nfrom resources.users import UserRegister, UserLogin\nfrom flask_jwt_extended import JWTManager\nfrom flask_swagger_ui import get_swaggerui_blueprint\n\nfrom db import db\n\nSWAGGER_URL = '/api/docs' # URL for exposing Swagger UI (without trailing '/')\nAPI_URL = '/static/api_doc.json' # Our API url (can of course be a local resource)\n\nswaggerui_blueprint = get_swaggerui_blueprint(\n # Swagger UI static files will be mapped to '{SWAGGER_URL}/dist/'\n SWAGGER_URL,\n API_URL,\n config={ # Swagger UI config overrides\n 'app_name': \"Test application\"\n },\n # oauth_config={ # OAuth config. See https://github.com/swagger-api/swagger-ui#oauth2-configuration .\n # 'clientId': \"your-client-id\",\n # 'clientSecret': \"your-client-secret-if-required\",\n # 'realm': \"your-realms\",\n # 'appName': \"your-app-name\",\n # 'scopeSeparator': \" \",\n # 'additionalQueryStringParams': {'test': \"hello\"}\n # }\n)\n\napp = Flask(__name__)\napp.register_blueprint(swaggerui_blueprint)\n\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///./bug-tracker.sqlite3'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\napp.config['BUNDLE_ERRORS'] = True #global setting for all the reqparsers in the app\napi = Api(app)\n\napp.config['JWT_SECRET_KEY'] = 'lincoln'\njwt = JWTManager(app)\n\n@jwt.additional_claims_loader\ndef add_claims_to_jwt(identity):\n if identity[\"is_admin\"] == True:\n return { 'is_admin' : True }\n return { 'is_admin' : False }\n\n@jwt.invalid_token_loader # we have to keep the argument here, since it's passed in by the caller internally\ndef invalid_token_callback(error):\n return jsonify({\n 'message': 'Signature verification failed.',\n 'error': 'invalid_token'\n }), 401\n\n#customizing the standard error when no token in the request\n\n@jwt.unauthorized_loader\ndef missing_token_callback(error):\n return jsonify({\n 'description' : 'Request does not contain an access token',\n 'error' : 'authorizatin_required'\n }), 401 \n\n\n\n\napi.add_resource(Bugs, '/bugs')\napi.add_resource(Bug, '/bugs/')\napi.add_resource(Projects, '/projects')\napi.add_resource(Project, '/projects/')\napi.add_resource(UserRegister, '/register')\napi.add_resource(UserLogin, '/login')\n\nif __name__ == '__main__':\n db.init_app(app)\n app.run(port = 8080, debug = True)\n\n\n\"\"\" \nHeader\n Authorization\n Bearer \n\n \"\"\"","repo_name":"tkmagesh/LFG-Bootcamp-2023","sub_path":"08-Python/23-bug-tracker-services/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"31087988947","text":"from Sockets.Session import Session\nfrom BDD.Model import Model\nimport flask_socketio\n\nimport json\n\ndef ShowAnswer(data: dict[str, any], query_builder: Model, liste_session: list[Session], sio: flask_socketio.SocketIO):\n session_id = data.get(\"session\", {}).get(\"id\")\n code_salle = data.get(\"session\", {}).get(\"code\")\n\n for value in [session_id]:\n if value is None:\n return sio.emit(\"error\", {\"message\": \"Valeurs manquantes\", \"code\": code_salle})\n\n found = None\n\n for session in liste_session:\n if session.session_id == session_id:\n found = session\n break\n\n if found is None:\n return sio.emit(\"error\", {\"message\": \"Session non trouvée\", \"code\": code_salle})\n\n info_wanted = [\"users.id\", \"users.email\", \"users.numero\", \"users.firstname\", \"users.lastname\", \"users.created_at\",\n \"users.updated_at\"]\n session = query_builder.table(\"sessions\").select(*info_wanted).where(\"id\", found.session_id).load(\"users\")[0].export(convert=True)\n\n session[\"question\"] = found.questionActuelle()\n\n to_send = {\n \"session\": session,\n \"reponses\": found.reponsesActuelle(),\n \"waiting\": False,\n \"locked\": True\n }\n\n print(f\"Showing answer : {json.dumps(to_send)}\")\n print(\"Received answers : \", found.reponsesCourantes())\n\n return sio.emit(\"ShowAnswer\", to_send, broadcast=True)\n","repo_name":"reforged/swaifu-api","sub_path":"Sockets/Events/ShowAnswer.py","file_name":"ShowAnswer.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"30"} +{"seq_id":"17427897423","text":"class Solution(object):\n def findWords(self, words):\n result = []\n row1 = set('qwertyuiop')\n row2 = set('asdfghjkl')\n row3 = set('zxcvbnm')\n for word in words:\n word_lower = word.lower()\n word_set = set(word_lower)\n if word_set <= row1 or word_set <= row2 or word_set <= row3:\n result.append(word)\n\n return result\n\n def print_ans(self, ans):\n print(ans)\n\n def test(self):\n words = [\"Hello\", \"Alaska\", \"Dad\", \"Peace\"]\n ans = self.findWords(words)\n self.print_ans(ans)\n\n\nif __name__ == '__main__':\n s = Solution()\n s.test()\n","repo_name":"georgebzhang/Python_LeetCode","sub_path":"500_keyboard_row.py","file_name":"500_keyboard_row.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"30843605000","text":"import json\n\nFILE = './sifu_results/unit_test.json'\n\nwith open(FILE, 'r+') as f:\n # read json\n # =========\n data = json.load(f)\n\n\n # modify json\n # ===========\n for finding in data:\n msg = finding['msg'].lower()\n if 'division-by-zero' in msg:\n finding['tag'] = 'POWER_MOD_DIVISION_BY_ZERO'\n elif 'signed integer overflow' in msg:\n finding['tag'] = 'INCREMENTAL_2_POWER_MOD_SIGNED_OVERFLOW_'\n\n\n # write json back\n # ===============\n f.seek(0)\n json.dump(data, f, indent=2)\n f.truncate()\n","repo_name":"saucec0de/sifu","sub_path":"Challenges/C_CPP/0009_power_mod/feedback.py","file_name":"feedback.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"18"} +{"seq_id":"40027445060","text":"import base64\nimport datetime\nimport io\n\nimport dash\nfrom dash.dependencies import Input, Output, State\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_table\nimport plotly.express as px\nimport pandas as pd\n\ndf=pd.read_csv('/Users/pcworld/Desktop/dashproject/2014.csv')\n\ndf_division=df[['v016','v024','v025','v027','v013']]\ndf_five=df_division.groupby('v013').count()\nprint(df_five)\n\nfig=px.pie(df_division,values='v016',names='v024',title='Divisional Survey Distribution')\nfig1=px.pie(df_division,values='v027',names='v025',title='Rural vs Urban count')\nfig2=px.bar(df_five,x='v013',y='v024')\n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\napp1 = dash.Dash(__name__, external_stylesheets=external_stylesheets)\n\ncolors = {\n 'background': '#1E497F',\n 'text': '#111111',\n 'text1': 'white'}\n\napp1.layout = html.Div([\n\n html.H1(\n children='Hello Dash',\n style={\n 'textAlign': 'center',\n 'color': colors['text'],\n # 'textAlign':'left',\n }\n ),\n\n html.Div(children='Dash: A web application framework for Python.', style={\n 'textAlign': 'center',\n 'color': colors['text']\n }),\n\n html.Hr(),\n\n dcc.Upload(\n id='upload-data',\n children=html.Div([\n 'Drag and Drop or ',\n html.A('Select Files')\n ]),\n style={\n 'width': '50%',\n 'height': '60px',\n 'lineHeight': '60px',\n 'borderWidth': '1px',\n 'borderStyle': 'dashed',\n 'borderRadius': '5px',\n 'textAlign': 'center',\n 'margin': '10px'\n },\n # Allow multiple files to be uploaded\n multiple=True\n ),\n html.Div(id='output-data-upload'),\n\n html.Div([\n dcc.Graph(figure=fig1),\n html.Hr(),\n dcc.Graph(\n id='example-graph-1',\n figure={\n 'data': [\n {'x': [1], 'y': [4], 'type': 'bar', 'name': 'Dhaka'},\n {'x': [2], 'y': [7], 'type': 'bar', 'name': 'Chittagong'},\n {'x': [3], 'y': [2], 'type': 'bar', 'name': 'Barishal'},\n {'x': [4], 'y': [5], 'type': 'bar', 'name': 'Rangpur'},\n ],\n 'layout': {\n 'plot_bgcolor': colors['background'],\n 'paper_bgcolor': colors['background'],\n 'font': {\n 'color': colors['text1']\n }\n }\n }\n ),\n html.Hr(),\n dcc.Graph(figure=fig),\n html.Hr(),\n ], style={'columnCount': 3}, ),\n\n\n html.Div([\n #dcc.Graph(figure=fig2),\n\n\n ]),\n], style={'columnCount': 1}, )\n\n\ndef parse_contents(contents, filename, date):\n content_type, content_string = contents.split(',')\n\n decoded = base64.b64decode(content_string)\n try:\n if 'csv' in filename:\n # Assume that the user uploaded a CSV file\n df = pd.read_csv(\n io.StringIO(decoded.decode('utf-8')))\n elif 'xls' in filename:\n # Assume that the user uploaded an excel file\n df = pd.read_excel(io.BytesIO(decoded))\n except Exception as e:\n print(e)\n return html.Div([\n 'There was an error processing this file.'\n ])\n\n return html.Div([\n html.H5(filename),\n html.H6(datetime.datetime.fromtimestamp(date)),\n\n dash_table.DataTable(\n data=df.to_dict('records'),\n filter_action='native',\n columns=[{'name': i, 'id': i} for i in df.columns],\n style_header={'backgroundColor': 'rgb(30, 30, 30)',\n 'fontWeight': 'bold'},\n style_cell={\n 'backgroundColor': colors['background'], # 'rgb(50, 50, 50)',\n 'color': 'white',\n 'textAlign': 'left'\n },\n page_action='none',\n style_table={'height': '500px', 'width': '70%', 'overflowY': 'auto', 'margin': '10px'}\n ),\n\n html.Hr(), # horizontal line\n\n # For debugging, display the raw contents provided by the web browser\n html.Div('Raw Content'),\n html.Pre(contents[0:200] + '...', style={\n 'whiteSpace': 'pre-wrap',\n 'wordBreak': 'break-all',\n 'width': '50%'\n })\n ])\n\n\n@app1.callback(Output('output-data-upload', 'children'),\n [Input('upload-data', 'contents')],\n [State('upload-data', 'filename'),\n State('upload-data', 'last_modified')])\ndef update_output(list_of_contents, list_of_names, list_of_dates):\n if list_of_contents is not None:\n children = [\n parse_contents(c, n, d) for c, n, d in\n zip(list_of_contents, list_of_names, list_of_dates)]\n return children\n\n\nif __name__ == '__main__':\n app1.run_server(debug=True)\n","repo_name":"borson-sakib/DHS-Dashboard","sub_path":"app1.py","file_name":"app1.py","file_ext":"py","file_size_in_byte":4956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"10759984304","text":"from __future__ import annotations\n\nfrom typing import List\n\nfrom range_typed_integers import u16, u8\n\nfrom skytemple_files.common.ppmdu_config.data import Pmd2Data\nfrom skytemple_files.common.util import read_u16, read_u8, write_u16, write_u8\n\nSE_PC_LNTRY_LEN = 0x14\n\n\nclass SpecialEpisodePc:\n poke_id: u16\n joined_at: u16\n move1: u16\n move2: u16\n move3: u16\n move4: u16\n do_not_fix_entire_moveset: bool\n level: u16\n iq: u16\n fixed_hp: u16\n\n def __init__(\n self,\n poke_id: u16,\n joined_at: u16,\n move1: u16,\n move2: u16,\n move3: u16,\n move4: u16,\n do_not_fix_entire_moveset: bool,\n level: u16,\n iq: u16,\n fixed_hp: u16,\n ):\n self.poke_id = poke_id\n self.joined_at = joined_at\n self.move1 = move1\n self.move2 = move2\n self.move3 = move3\n self.move4 = move4\n self.do_not_fix_entire_moveset = do_not_fix_entire_moveset\n self.level = level\n self.iq = iq\n # 0 if not fixed\n self.fixed_hp = fixed_hp\n\n def to_bytes(self) -> bytes:\n b = bytearray(SE_PC_LNTRY_LEN)\n write_u16(b, self.poke_id, 0)\n write_u16(b, self.joined_at, 2)\n write_u16(b, self.move1, 4)\n write_u16(b, self.move2, 6)\n write_u16(b, self.move3, 8)\n write_u16(b, self.move4, 10)\n write_u16(b, u16(int(self.do_not_fix_entire_moveset)), 12)\n write_u16(b, self.level, 14)\n write_u16(b, self.iq, 16)\n write_u16(b, self.fixed_hp, 18)\n return b\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, SpecialEpisodePc):\n return False\n return (\n self.poke_id == other.poke_id\n and self.joined_at == other.joined_at\n and self.move1 == other.move1\n and self.move2 == other.move2\n and self.move3 == other.move3\n and self.move4 == other.move4\n and self.do_not_fix_entire_moveset == other.do_not_fix_entire_moveset\n and self.level == other.level\n and self.iq == other.iq\n and self.fixed_hp == other.fixed_hp\n )\n\n\nclass HardcodedDefaultStarters:\n @staticmethod\n def get_partner_md_id(arm9: bytes, config: Pmd2Data) -> u16:\n \"\"\"\n Gets the monster.md index of the default partner starter\n \"\"\"\n block = config.bin_sections.arm9.data.DEFAULT_PARTNER_ID\n return read_u16(arm9, block.address)\n\n @staticmethod\n def set_partner_md_id(value: u16, arm9: bytearray, config: Pmd2Data) -> None:\n \"\"\"\n Sets the monster.md index of the default partner starter\n \"\"\"\n block = config.bin_sections.arm9.data.DEFAULT_PARTNER_ID\n write_u16(arm9, value, block.address)\n\n @staticmethod\n def get_player_md_id(arm9: bytes, config: Pmd2Data) -> u16:\n \"\"\"\n Gets the monster.md index of the default player starter\n \"\"\"\n block = config.bin_sections.arm9.data.DEFAULT_HERO_ID\n return read_u16(arm9, block.address)\n\n @staticmethod\n def set_player_md_id(value: u16, arm9: bytearray, config: Pmd2Data) -> None:\n \"\"\"\n Sets the monster.md index of the default player starter\n \"\"\"\n block = config.bin_sections.arm9.data.DEFAULT_HERO_ID\n write_u16(arm9, value, block.address)\n\n @staticmethod\n def get_partner_level(arm9: bytes, config: Pmd2Data) -> u8:\n \"\"\"\n Gets the level of the partner starter\n \"\"\"\n block = config.bin_sections.arm9.data.PARTNER_START_LEVEL\n return read_u8(arm9, block.address)\n\n @staticmethod\n def set_partner_level(value: u8, arm9: bytearray, config: Pmd2Data) -> None:\n \"\"\"\n Sets the level of the partner starter\n \"\"\"\n block = config.bin_sections.arm9.data.PARTNER_START_LEVEL\n write_u8(arm9, value, block.address)\n\n @staticmethod\n def get_player_level(arm9: bytes, config: Pmd2Data) -> u8:\n \"\"\"\n Gets the level of the player starter\n \"\"\"\n block = config.bin_sections.arm9.data.HERO_START_LEVEL\n return read_u8(arm9, block.address)\n\n @staticmethod\n def set_player_level(value: u8, arm9: bytearray, config: Pmd2Data) -> None:\n \"\"\"\n Sets the level of the player starter\n \"\"\"\n block = config.bin_sections.arm9.data.HERO_START_LEVEL\n write_u8(arm9, value, block.address)\n\n @staticmethod\n def get_special_episode_pcs(\n arm9: bytes, config: Pmd2Data\n ) -> List[SpecialEpisodePc]:\n \"\"\"\n Gets the special episode player characters\n \"\"\"\n block = config.bin_sections.arm9.data.SPECIAL_EPISODE_MAIN_CHARACTERS\n assert block.length is not None\n lst = []\n for i in range(block.address, block.address + block.length, SE_PC_LNTRY_LEN):\n lst.append(\n SpecialEpisodePc(\n read_u16(arm9, i + 0),\n read_u16(arm9, i + 2),\n read_u16(arm9, i + 4),\n read_u16(arm9, i + 6),\n read_u16(arm9, i + 8),\n read_u16(arm9, i + 10),\n bool(read_u16(arm9, i + 12)),\n read_u16(arm9, i + 14),\n read_u16(arm9, i + 16),\n read_u16(arm9, i + 18),\n )\n )\n return lst\n\n @staticmethod\n def set_special_episode_pcs(\n value: List[SpecialEpisodePc], arm9: bytearray, config: Pmd2Data\n ) -> None:\n \"\"\"\n Sets the special episode player characters\n \"\"\"\n block = config.bin_sections.arm9.data.SPECIAL_EPISODE_MAIN_CHARACTERS\n assert block.length is not None\n expected_length = int(block.length / SE_PC_LNTRY_LEN)\n if len(value) != expected_length:\n raise ValueError(\n f\"The list must have exactly the length of {expected_length} entries.\"\n )\n for i, entry in enumerate(value):\n arm9[\n block.address\n + i * SE_PC_LNTRY_LEN : block.address\n + (i + 1) * SE_PC_LNTRY_LEN\n ] = entry.to_bytes()\n","repo_name":"SkyTemple/skytemple-files","sub_path":"skytemple_files/hardcoded/default_starters.py","file_name":"default_starters.py","file_ext":"py","file_size_in_byte":6221,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"18"} +{"seq_id":"14408409687","text":"def secant(f, x0, x1, prec):\n \"\"\"\n f -> função analisada\n x0, x1 -> pontos iniciais\n prec -> precisão\n \"\"\"\n i = 0\n fx0 = f(x0)\n fx1 = f(x1)\n x2 = x1 - fx1 * (x1 - x0) / (fx1 - fx0)\n while abs((x2 - x1) / x2) > prec:\n i += 1\n x0 = x1\n x1 = x2\n fx0 = fx1\n fx1 = f(x1)\n x2 = x1 - fx1 * (x1 - x0) / (fx1 - fx0)\n \n \n print(f\"iterações: {i}\")\n return x2\n \n \ndef f(x):\n return x**3 - 9*x + 3","repo_name":"brennoliveira/calculo-numerico","sub_path":"python/methods/secant.py","file_name":"secant.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"38583259385","text":"import argparse\n\nimport pytorch_lightning as pl\nimport torch\nimport torch.nn as nn\nfrom torch.optim.lr_scheduler import LambdaLR\nfrom transformers import GPT2LMHeadModel, GPT2Config\n\n\nclass BitSubsetParity(pl.LightningModule):\n def __init__(self, step_by_step: bool, num_of_bits: int, width=512, num_heads=8, depth=3, learning_rate=1e-3, warmup_steps=1000, weight_decay=1e-2, evaluate_with_greedy_decoding=False):\n super().__init__()\n self.save_hyperparameters()\n self.step_by_step = step_by_step\n self.num_of_bits = num_of_bits\n self.evaluate_with_greedy_decoding = evaluate_with_greedy_decoding\n self.generation_length = ((self.num_of_bits * 3) // 2 - 2) if self.step_by_step else self.num_of_bits\n self.model = GPT2LMHeadModel(GPT2Config(vocab_size=4, n_positions=self.generation_length, n_embd=width, n_layer=depth, n_head=num_heads, resid_pdrop=0, embd_pdrop=0, attn_pdrop=0, bos_token_id=2, eos_token_id=2))\n self.loss = nn.CrossEntropyLoss()\n\n def forward(self, inputs):\n inputs=inputs.long()\n if self.step_by_step:\n do_sample = not self.evaluate_with_greedy_decoding\n inputs = self.model.generate(inputs, do_sample=do_sample, max_length=self.generation_length, min_length=self.generation_length, pad_token_id=2, num_beams=1)\n logits = self.model(inputs).logits[:, self.num_of_bits - 1:, :2]\n predictions = torch.argmax(logits, dim=2)[:, 0]\n return predictions\n\n def _training_evaluation_common(self, batch):\n batch['label'] = batch['label'].long()\n logits = self.model(batch['input_ids'].long()).logits[:, -batch['label'].shape[1]:, :2]\n loss = self.loss(logits.permute(0, 2, 1), batch['label'])\n predictions = torch.argmax(logits, dim=2)\n accuracy_with_steps = torch.mean((predictions == batch['label']).float())\n final_label_accuracy =torch.mean((predictions[:, -1] == batch['label'][:, -1]).float())\n return loss, final_label_accuracy, accuracy_with_steps\n\n def training_step(self, batch, batch_idx):\n loss, final_label_accuracy, accuracy_with_steps = self._training_evaluation_common(batch)\n self.log(\"loss/train\", loss)\n self.log(\"accuracy/train\", final_label_accuracy)\n if self.step_by_step:\n self.log(\"accuracy_with_steps/train\", accuracy_with_steps)\n return loss\n\n def _prepare_batch_for_evaluation(self, batch):\n if self.step_by_step:\n do_sample = not self.evaluate_with_greedy_decoding\n batch['input_ids'] = self.model.generate(batch['input_ids'].long(), do_sample=do_sample, max_length=self.generation_length, min_length=self.generation_length, pad_token_id=2).detach()\n return batch\n\n def validation_step(self, batch, batch_idx):\n loss, accuracy, _ = self._training_evaluation_common(self._prepare_batch_for_evaluation(batch))\n self.log(\"val_loss\", loss)\n self.log(\"loss/val\", loss)\n self.log(\"accuracy/val\", accuracy)\n\n def test_step(self, batch, batch_idx):\n loss, accuracy, _ = self._training_evaluation_common(self._prepare_batch_for_evaluation(batch))\n self.log(\"loss/test\", loss)\n self.log(\"accuracy/test\", accuracy)\n\n def configure_optimizers(self):\n parameters = self.model.parameters()\n optimizer = torch.optim.Adam(parameters, lr=self.hparams.learning_rate, weight_decay=self.hparams.weight_decay)\n start_factor=1e-2\n lr_lambda = lambda epoch: (start_factor +\n (1. - start_factor) * min(self.hparams.warmup_steps, epoch) / self.hparams.warmup_steps)\n lr_scheduler = LambdaLR(optimizer, lr_lambda) \n lr_scheduler_config = {\n \"scheduler\": lr_scheduler,\n \"interval\": \"step\",\n \"frequency\": 1,\n \"monitor\": \"val_loss\",\n \"strict\": True,\n \"name\": None,\n }\n return {\n \"optimizer\": optimizer,\n \"lr_scheduler\": lr_scheduler_config\n }\n\n @staticmethod\n def add_model_specific_args(parent_parser):\n parser = argparse.ArgumentParser(parents=[parent_parser], add_help=False)\n parser.add_argument('--width', type=int, default=512)\n parser.add_argument('--num_heads', type=int, default=8)\n parser.add_argument('--depth', type=int, default=3)\n parser.add_argument('--learning_rate', type=float, default=0.001)\n parser.add_argument('--warmup_steps', type=int, default=1000)\n parser.add_argument('--weight_decay', type=float, default=1e-2)\n parser.add_argument('--evaluate_with_greedy_decoding', dest='evaluate_with_greedy_decoding', action='store_true')\n parser.add_argument('--evaluate_with_sampling', dest='evaluate_with_greedy_decoding', action='store_false')\n parser.set_defaults(evaluate_with_greedy_decoding=False)\n return parser\n","repo_name":"HUJI-Deep/sub_task_decomposition","sub_path":"bit_subset_parity.py","file_name":"bit_subset_parity.py","file_ext":"py","file_size_in_byte":4919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"24716640422","text":"def rostring(str):\n if str == \"\":\n return \"\"\n\n stripped = str.strip()\n words = stripped.split(' ')\n\n print(words)\n\n first_word = words[0]\n del(words[0])\n words.append(first_word)\n word_str = \"\"\n separator = ' '\n\n word_str = separator.join(wrd for wrd in words)\n return (word_str)\n\nprint(rostring(\"Que la lumiere soit et la lumiere fut\"))\nprint(rostring(\" AkjhZ zLKIJz , 23y\"))\n# print(rostring(\"abc \"))\n","repo_name":"MikeRock51/Random","sub_path":"rostring.py","file_name":"rostring.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"21960006259","text":"import numpy as np\r\nfrom scipy.stats import maxwell\r\nimport matplotlib.pyplot as plt\r\n\r\n##M_E=5.972e24\r\n##M_M=6.39e23 \r\nv_E=11.186 #escape velocity of earth\r\nv_M=5.03 #escape velocity of mars\r\nT_E=287.15 #the average surface temperature of the earth\r\nT_M=210.372 #the average surface temperature of mars\r\nk=1.380649e-23 # units: J-K^-1\r\n\r\ndef f(m,x,T):\r\n return ((m/(2*np.pi*k*T))**1.5)*np.exp(-(m*x**2/(2*k*T)))\r\n\r\ndef start():\r\n ch=int(input(\"Enter the choice:\\n1.Earth and Hydrogen molecule \\n2.Earth and Oxygen molecule\\n3.Mars and Hydrogen molecule\\n4.Mars and Oxygen molecule\\nEnter : \"))\r\n if ch==1:\r\n m=2*1.6735e-27\r\n T=T_E\r\n v=v_E\r\n elif ch==2:\r\n m=5.31e-26\r\n T=T_E\r\n v=v_E\r\n elif ch==3:\r\n m=2*1.6735e-27\r\n T=T_M\r\n v=v_M\r\n elif ch==4:\r\n m=5.31e-26\r\n T=T_M\r\n v=v_M\r\n else:\r\n start()\r\n M=1000 #the number of times we run the simulation learns\r\n p =np.zeros(M) #the probabilities are stored in this matrix\r\n N=100000 #the number of divisions made\r\n x0=(v*1000) # the escape velocity\r\n for ii in range(M):\r\n x=maxwell.rvs(scale=np.sqrt(k*T/m),size=N)\r\n \r\n if ii==1:\r\n plt.xlim(0,x0+1000)\r\n y = np.arange(0,N)\r\n plt.hist(x,bins=70,density=True,label='random maxwell distribution')\r\n z=maxwell.pdf(y, scale=np.sqrt(k*T/m))\r\n plt.plot(z,label='Maxwell pdf')\r\n plt.axvline(x=x0,color='r',label='escape velocity')\r\n #x=np.random.randn(N)*np.sqrt(k*T/m)#*((m/(2*np.pi*k*T)))\r\n #print(np.var(x))\r\n p1=np.sum(abs(x)>x0)/N\r\n #p2=np.sum(x*x<=x0*x0)/N\r\n p[ii]=p1\r\n print('The probability of the particle escaping is: ',np.mean(p))#the probability is printed\r\nstart()\r\nplt.legend(loc='best')\r\nplt.show()\r\n","repo_name":"AmitPratap175/3rd-year","sub_path":"escape velocity probability/q1.py","file_name":"q1.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"21158399788","text":"import os\nimport random\n\nimport networkx as nx\nimport numpy as np\nfrom gensim.models import Word2Vec\n\nfrom libcity.data.dataset import TrafficStatePointDataset\n\n\nclass Graph():\n def __init__(self, nx_G, is_directed, p, q):\n self.G = nx_G\n self.is_directed = is_directed\n self.p = p\n self.q = q\n\n def node2vec_walk(self, walk_length, start_node):\n '''\n Simulate a random walk starting from start node.\n '''\n G = self.G\n alias_nodes = self.alias_nodes\n alias_edges = self.alias_edges\n\n walk = [start_node]\n\n while len(walk) < walk_length:\n cur = walk[-1]\n cur_nbrs = sorted(G.neighbors(cur))\n if len(cur_nbrs) > 0:\n if len(walk) == 1:\n walk.append(cur_nbrs[alias_draw(alias_nodes[cur][0], alias_nodes[cur][1])])\n else:\n prev = walk[-2]\n next = cur_nbrs[alias_draw(alias_edges[(prev, cur)][0],\n alias_edges[(prev, cur)][1])]\n walk.append(next)\n else:\n break\n\n return walk\n\n def simulate_walks(self, num_walks, walk_length):\n '''\n Repeatedly simulate random walks from each node.\n '''\n G = self.G\n walks = []\n nodes = list(G.nodes())\n print('Walk iteration:')\n for walk_iter in range(num_walks):\n print(str(walk_iter + 1), '/', str(num_walks))\n random.shuffle(nodes)\n for node in nodes:\n walks.append(self.node2vec_walk(walk_length=walk_length, start_node=node))\n\n return walks\n\n def get_alias_edge(self, src, dst):\n '''\n Get the alias edge setup lists for a given edge.\n '''\n G = self.G\n p = self.p\n q = self.q\n\n unnormalized_probs = []\n for dst_nbr in sorted(G.neighbors(dst)):\n if dst_nbr == src:\n unnormalized_probs.append(G[dst][dst_nbr]['weight'] / p)\n elif G.has_edge(dst_nbr, src):\n unnormalized_probs.append(G[dst][dst_nbr]['weight'])\n else:\n unnormalized_probs.append(G[dst][dst_nbr]['weight'] / q)\n norm_const = sum(unnormalized_probs)\n normalized_probs = [float(u_prob) / norm_const for u_prob in unnormalized_probs]\n\n return alias_setup(normalized_probs)\n\n def preprocess_transition_probs(self):\n '''\n Preprocessing of transition probabilities for guiding the random walks.\n '''\n G = self.G\n is_directed = self.is_directed\n\n alias_nodes = {}\n for node in G.nodes():\n unnormalized_probs = [G[node][nbr]['weight'] for nbr in sorted(G.neighbors(node))]\n norm_const = sum(unnormalized_probs)\n normalized_probs = [float(u_prob) / norm_const for u_prob in unnormalized_probs]\n alias_nodes[node] = alias_setup(normalized_probs)\n\n alias_edges = {}\n\n if is_directed:\n for edge in G.edges():\n alias_edges[edge] = self.get_alias_edge(edge[0], edge[1])\n else:\n for edge in G.edges():\n alias_edges[edge] = self.get_alias_edge(edge[0], edge[1])\n alias_edges[(edge[1], edge[0])] = self.get_alias_edge(edge[1], edge[0])\n\n self.alias_nodes = alias_nodes\n self.alias_edges = alias_edges\n\n return\n\n\ndef alias_setup(probs):\n '''\n Compute utility lists for non-uniform sampling from discrete distributions.\n Refer to\n https://hips.seas.harvard.edu/blog/2013/03/03/the-alias-method-efficient-sampling-with-many-discrete-outcomes/\n for details\n '''\n K = len(probs)\n q = np.zeros(K)\n J = np.zeros(K, dtype=np.int)\n\n smaller = []\n larger = []\n for kk, prob in enumerate(probs):\n q[kk] = K * prob\n if q[kk] < 1.0:\n smaller.append(kk)\n else:\n larger.append(kk)\n\n while len(smaller) > 0 and len(larger) > 0:\n small = smaller.pop()\n large = larger.pop()\n\n J[small] = large\n q[large] = q[large] + q[small] - 1.0\n if q[large] < 1.0:\n smaller.append(large)\n else:\n larger.append(large)\n\n return J, q\n\n\ndef alias_draw(J, q):\n '''\n Draw sample from a non-uniform discrete distribution using alias sampling.\n '''\n K = len(J)\n\n kk = int(np.floor(np.random.rand() * K))\n if np.random.rand() < q[kk]:\n return kk\n else:\n return J[kk]\n\n\ndef learn_embeddings(walks, dimensions, window_size, iter):\n walks = [list(map(str, walk)) for walk in walks]\n model = Word2Vec(\n walks, vector_size=dimensions, window=window_size, min_count=0, sg=1,\n workers=8, epochs=iter)\n return model\n\n\nclass GMANDataset(TrafficStatePointDataset):\n\n def __init__(self, config):\n super().__init__(config)\n self.D = self.config.get('D', 64)\n self.points_per_hour = 3600 // self.time_intervals\n self.add_day_in_week = self.config.get('add_day_in_week', False)\n self.SE_config = {'is_directed': True, 'p': 2, 'q': 1, 'num_walks': 100,\n 'walk_length': 80, 'dimensions': self.D, 'window_size': 10,\n 'iter': 1000}\n self.SE_config_str = 'SE_' + str(self.SE_config['is_directed']) + '_' + str(self.SE_config['p']) + \\\n '_' + str(self.SE_config['q']) + '_' + str(self.SE_config['num_walks']) + \\\n '_' + str(self.SE_config['walk_length']) + '_' + str(self.SE_config['dimensions']) + \\\n '_' + str(self.SE_config['window_size']) + '_' + str(self.SE_config['iter'])\n self.SE_cache_file = os.path.join('./libcity/cache/dataset_cache/',\n 'SE_based_{}.txt'.format(str(self.dataset) + '_' + self.SE_config_str))\n self._generate_SE()\n\n def _generate_SE(self):\n # SE: [N, D]([N, K * d])\n if not os.path.exists(self.SE_cache_file):\n nx_G = nx.from_numpy_matrix(self.adj_mx, create_using=nx.DiGraph())\n G = Graph(nx_G, self.SE_config['is_directed'], self.SE_config['p'], self.SE_config['q'])\n G.preprocess_transition_probs()\n walks = G.simulate_walks(self.SE_config['num_walks'], self.SE_config['walk_length'])\n model = learn_embeddings(walks, self.SE_config['dimensions'],\n self.SE_config['window_size'], self.SE_config['iter'])\n model.wv.save_word2vec_format(self.SE_cache_file)\n SE = np.zeros(shape=(self.num_nodes, self.SE_config['dimensions']), dtype=np.float32)\n f = open(self.SE_cache_file, mode='r')\n lines = f.readlines()\n for line in lines[1:]:\n temp = line.split(' ')\n index = int(temp[0])\n SE[index] = temp[1:]\n print(SE.shape)\n self.SE = SE\n\n def get_data_feature(self):\n \"\"\"\n 返回数据集特征,scaler是归一化方法,adj_mx是邻接矩阵,num_nodes是点的个数,\n feature_dim是输入数据的维度,output_dim是模型输出的维度\n\n Returns:\n dict: 包含数据集的相关特征的字典\n \"\"\"\n data_feature = super().get_data_feature()\n data_feature['SE'] = self.SE\n data_feature['D'] = self.D\n data_feature['points_per_hour'] = self.points_per_hour\n data_feature['add_day_in_week'] = self.add_day_in_week\n return data_feature\n","repo_name":"LibCity/Bigscity-LibCity","sub_path":"libcity/data/dataset/dataset_subclass/gman_dataset.py","file_name":"gman_dataset.py","file_ext":"py","file_size_in_byte":7599,"program_lang":"python","lang":"en","doc_type":"code","stars":644,"dataset":"github-code","pt":"18"} +{"seq_id":"35934230846","text":"import discord\nfrom discord.ext import commands\n\n\n@commands.command(\n name=\"shreya\",\n brief=\"यदि भवान् नरपुङ्गवाः ज्ञातुम् इच्छति ।\",\n aliases=[\"श्रेय\"],\n ignore_extra=True\n)\n@commands.guild_only()\nasync def shreya(self, ctx: commands.Context) -> None:\n \"\"\"Send credits.\"\"\"\n\n shreya = discord.Embed()\n shreya.title = \"**श्रेय**\"\n\n shreya.add_field(name=\"**निर्माता**\", value=\"गॊऎञ्जी शूऽया\", inline=False)\n shreya.add_field(name=\"**सङ्गणकस्वामी**\", value=self.bot.owner,\n inline=False)\n\n link = \"https://www.github.com/gouenji-shuuya/rudra\"\n shreya.add_field(name=\"**स्रोत**\",\n value=f\"[गिटहब]({link}), तन्त्रांशाज्ञापत्र: AGPLv3\",\n inline=False)\n\n await ctx.send(embed=shreya)\n","repo_name":"gouenji-shuuya/rudra","sub_path":"Aajnaa/Shreya/shreya.py","file_name":"shreya.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"74566803881","text":"import os\nimport json\nimport random\n\ndef create_subset_json(input_file, output_file, samples_per_label):\n with open(input_file, 'r') as file:\n data = json.load(file)\n\n # Separate data into two lists based on 'label' value\n label_0_data = [entry for entry in data if entry['label'] == 0]\n label_1_data = [entry for entry in data if entry['label'] == 1]\n\n # Sample the specified number of entries for each label\n sampled_label_0 = random.sample(label_0_data, min(samples_per_label, len(label_0_data)))\n sampled_label_1 = random.sample(label_1_data, min(samples_per_label, len(label_1_data)))\n\n # Combine the sampled data\n subset_data = sampled_label_0 + sampled_label_1\n\n # Write the subset data to a new JSON file\n with open(output_file, 'w', encoding='utf-8') as outfile:\n json.dump(subset_data, outfile, ensure_ascii=False, indent=2)\n\n\n# Example usage:\ninput_json_file = 'test.json'\noutput_json_file = 'test_200.json'\nsamples_per_label = 100 # Set the desired number of samples for each label\n\ncreate_subset_json(input_json_file, output_json_file, samples_per_label)","repo_name":"fan19-hub/nlp-project","sub_path":"make_sample.py","file_name":"make_sample.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"35801769793","text":"import torch\nfrom torchvision import datasets, transforms\n\n# Config\ndata_dir = '/home/zwj/project/data/caltech/JPEGImages/'\n\ndata_transform = transforms.Compose([\n transforms.ToTensor()\n ])\n\ndataset = datasets.ImageFolder(root=data_dir, transform = data_transform)\ndataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=2)\nmean = torch.zeros(3)\nstd = torch.zeros(3)\nprint('==> Computing mean and std..')\nfor inputs, targets in dataloader:\n for i in range(3):\n mean[i] += inputs[:,i,:,:].mean()\n std[i] += inputs[:,i,:,:].std()\nmean.div_(len(dataset))\nstd.div_(len(dataset))\nprint(mean, std)","repo_name":"lihaojia24/pytorch-dt","sub_path":"tools/get_mean_std.py","file_name":"get_mean_std.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"36275508381","text":"from dataset import MushroomDataset, XORDataset, ORDataset\nfrom classifier import Classifier, Models\n\nif __name__=='__main__':\n dataset = XORDataset()\n model = Classifier(model=Models.MLP, runs=1, epochs=100, n_hidden=2, l_rate=0.1, p_train=0.8, log=True)\n\n model.train(dataset.X, dataset.y)\n print(model.get_stats())\n model.plot_learning_curve()\n model.plot_2d_decision_surface()\n ","repo_name":"welmends/ann-impl","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"238544172","text":"#!/usr/bin/python3\n\"\"\"Nginx benchmark class\"\"\"\nimport logging\n\nfrom ..benchmark import Benchmark # pylint: disable=relative-beyond-top-level\n\nlogging.basicConfig(level=logging.INFO)\n\nclass NginxBenchmark(Benchmark): # pylint: disable=too-many-instance-attributes disable=too-few-public-methods\n \"\"\"Nginx benchmark class\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(NginxBenchmark, self).__init__(*args, **kwargs)\n self.name = \"nginx\"\n self.client_image_name = \"nginx_load_tester\"\n self.server_image_name = \"nginx\"\n self.service_initialization_delay = 15\n self.logger = logging.getLogger(self.name + \"_benchmark\")\n self.client_command = [\"docker\", \"run\", \"-e\", \"REMOTE_TESTING_HOST=%s\" % self.remote_ip,\n \"--name\", self.client_image_name, self.client_image_name]\n self.server_command = [\"docker\", \"run\", \"--rm\", \"-d\", \"--name\", self.server_image_name,\n \"-p\", \"%d:%d\" % (80, 80), self.server_image_name]\n self.results_header = \"Nginx Results (%s):\\n\" % self.protection_string\n self.target_token = \"Requests per second\"\n self.line_parser = lambda line: float(line.split()[3])\n self.metric_units = \"requests per second\"\n","repo_name":"Vali-Cyber/SecurityPerf","sub_path":"benchmarks/nginx/nginx.py","file_name":"nginx.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"18"} +{"seq_id":"22277254176","text":"import heapq\nimport sys\ninput = sys.stdin.readline\n\nn = int(input())\nheap = []\n\nfor _ in range(n):\n x = int(input())\n\n if x == 0:\n if heap:\n print(heapq.heappop(heap)[1])\n else:\n print(0)\n else:\n heapq.heappush(heap, (abs(x), x))\n # 힙에 원소를 추가할 때 (-i, i) 튜플 형태로 넣어주면 튜플의 첫번째 원소를 기준으로 힙을 구성하게 된다\n # 실제 값은 튜플의 두번재 자리에 저장되어 있으니까 [1] 인덱싱으로 접근해주면 된다","repo_name":"cheon4050/CodingTest-Study","sub_path":"09주차/11286/chuheeseung_11286.py","file_name":"chuheeseung_11286.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"29336823018","text":"import jwt\nfrom rest_framework.authentication import BaseAuthentication\nfrom rest_framework import exceptions\nfrom django.conf import settings\nfrom rest_framework.response import Response\nfrom django.contrib.auth import get_user_model\nfrom rest_framework import authentication, exceptions\nfrom AccountsApi.models import User\n\nclass SafeJWTAuthentication(authentication.BaseAuthentication):\n authentication_header_prefix = 'Bearer'\n\n def authenticate(self, request):\n request.user = None\n auth_header = authentication.get_authorization_header(request).split()\n auth_header_prefix = self.authentication_header_prefix.lower()\n\n if not auth_header:\n return None\n\n if len(auth_header) == 1:\n return None\n\n elif len(auth_header) > 2:\n return None\n\n prefix = auth_header[0].decode('utf-8')\n token = auth_header[1].decode('utf-8')\n\n if prefix.lower() != auth_header_prefix:\n return None\n\n return self._authenticate_credentials(request, token)\n\n\n def _authenticate_credentials(self, request, token):\n try:\n payload = jwt.decode(token, settings.SECRET_KEY, algorithms=['HS256'])\n except jwt.ExpiredSignatureError:\n raise exceptions.AuthenticationFailed('access_token expired')\n except IndexError:\n raise exceptions.AuthenticationFailed('Token prefix missing')\n except:\n raise exceptions.AuthenticationFailed('Invalid Token')\n \n\n user = User.objects.filter(id=payload['user_id']).first()\n if user is None:\n raise exceptions.AuthenticationFailed('User not found')\n\n if not user.is_active:\n raise exceptions.AuthenticationFailed('user is inactive')\n \n return (user, None)\n\n","repo_name":"raj368/user_management","sub_path":"AccountsApi/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"16045540996","text":"#!/usr/bin/env python3\n\n# -----------------------------------------------------------------------------\n# parser_test.py\n#\n# Author: Francesco Racciatti (racciatti.francesco@gmail.com)\n#\n# This module tests the mechanism for handling AML types.\n#\n# Usage: \n# $ python3 -m unittest -v parser_test.py\n# -----------------------------------------------------------------------------\n\nimport os\nimport sys\nimport enum\nimport unittest\n\nsys.path.insert(0,\"../aml/\")\nimport aml as aml\n\nclass TestParser(unittest.TestCase):\n \"\"\"\n Tests for the parser.\n \"\"\"\n\n filename = \"source.aml\"\n \n def setUp(self):\n \"\"\"\n Sets up the test.\n \"\"\"\n if not os.path.exists(self.filename):\n self.fail(self.filename + \"does not exist\")\n if not os.path.isfile(self.filename):\n self.fail(self.filename + \"is a directory\")\n sourcefile = open(self.filename, 'r')\n self.source = sourcefile.read()\n sourcefile.close()\n\n def tearDown(self):\n \"\"\"\n Tears down the test.\n \"\"\"\n \n def test_parser(self):\n \"\"\"\n Tests the parser.\n \"\"\"\n # Parses the source string\n try:\n scenario = aml.AML.parse(self.source)\n except (ValueError, RuntimeError) as e:\n self.fail(e)\n\n","repo_name":"francescoracciatti/aml","sub_path":"test/parser_test.py","file_name":"parser_test.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"10190597292","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pickle\nfrom optimization_tools.DOE import pareto_frontier\n\nfilename = 'wing_lower'\nfor i in range(3):\n data_i = np.genfromtxt(filename+'_%i.csv' % i, delimiter=',')\n for j in reversed(range(len(data_i))):\n if np.isnan(data_i[j]).any():\n data_i = np.delete(data_i, j, 0)\n x, y, z = data_i.T\n x_LE, y_LE, z_LE = pareto_frontier(x, y, z, False, True, tol=6e-2)\n x_TE, y_TE, z_TE = pareto_frontier(x, y, z, True, False, tol=6e-2)\n\n if not i:\n raw = data_i\n LE = np.vstack([x_LE, y_LE, z_LE]).T\n TE = np.vstack([x_TE, y_TE, z_TE]).T\n else:\n raw = np.vstack([raw, data_i])\n LE = np.vstack([LE, np.vstack([x_LE, y_LE, z_LE]).T])\n TE = np.vstack([TE, np.vstack([x_TE, y_TE, z_TE]).T])\nplt.figure()\nx, y, z = raw.T\nplt.scatter(x, y)\nx, y, z = LE.T\nplt.scatter(x, y, c='r')\nx, y, z = TE.T\nplt.scatter(x, y, c='g')\nplt.show()\n\nf = open('edges', 'wb')\npickle.dump(np.array([LE, TE]), f)\nf.close()\n\nf = open('all_lower', 'wb')\npickle.dump(raw, f)\nf.close()\n","repo_name":"leal26/AeroPy","sub_path":"examples/3D_fitting/full/output_process.py","file_name":"output_process.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"18"} +{"seq_id":"44634742638","text":"\"\"\"\nState Space Model\n\nAuthor: Chad Fulton\nLicense: Simplified-BSD\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nimport numpy as np\nfrom .simulation_smoother import SimulationSmoother, SimulationSmoothResults\ntry:\n from statsmodels.tsa.statespace import mlemodel, varmax\n from statsmodels.tsa.statespace.mlemodel import PredictionResultsWrapper\nexcept ImportError:\n from .compat import mlemodel\n from .compat.mlemodel import PredictionResultsWrapper\nimport statsmodels.base.wrapper as wrap\n\nclass MLEMixin(object):\n\n def initialize_statespace(self, **kwargs):\n \"\"\"\n Initialize the state space representation\n\n Parameters\n ----------\n **kwargs\n Additional keyword arguments to pass to the state space class\n constructor.\n\n \"\"\"\n # (Now self.endog is C-ordered and in long format (nobs x k_endog). To\n # get F-ordered and in wide format just need to transpose)\n endog = self.endog.T\n\n # Instantiate the state space object\n self.ssm = SimulationSmoother(endog.shape[0], self.k_states, **kwargs)\n # Bind the data to the model\n self.ssm.bind(endog)\n\n # Other dimensions, now that `ssm` is available\n self.k_endog = self.ssm.k_endog\n\n def fit(self, *args, **kwargs):\n \"\"\"\n Fits the model by maximum likelihood via Kalman filter.\n\n Parameters\n ----------\n start_params : array_like, optional\n Initial guess of the solution for the loglikelihood maximization.\n If None, the default is given by Model.start_params.\n transformed : boolean, optional\n Whether or not `start_params` is already transformed. Default is\n True.\n method : str, optional\n The `method` determines which solver from `scipy.optimize`\n is used, and it can be chosen from among the following strings:\n\n - 'newton' for Newton-Raphson, 'nm' for Nelder-Mead\n - 'bfgs' for Broyden-Fletcher-Goldfarb-Shanno (BFGS)\n - 'lbfgs' for limited-memory BFGS with optional box constraints\n - 'powell' for modified Powell's method\n - 'cg' for conjugate gradient\n - 'ncg' for Newton-conjugate gradient\n - 'basinhopping' for global basin-hopping solver\n\n The explicit arguments in `fit` are passed to the solver,\n with the exception of the basin-hopping solver. Each\n solver has several optional arguments that are not the same across\n solvers. See the notes section below (or scipy.optimize) for the\n available arguments and for the list of explicit arguments that the\n basin-hopping solver supports.\n cov_type : str, optional\n The `cov_type` keyword governs the method for calculating the\n covariance matrix of parameter estimates. Can be one of:\n\n - 'opg' for the outer product of gradient estimator\n - 'oim' for the observed information matrix estimator, calculated\n using the method of Harvey (1989)\n - 'cs' for the observed information matrix estimator, calculated\n using a numerical (complex step) approximation of the Hessian\n matrix.\n - 'delta' for the observed information matrix estimator, calculated\n using a numerical (complex step) approximation of the Hessian\n along with the delta method (method of propagation of errors)\n applied to the parameter transformation function\n `transform_params`.\n - 'robust' for an approximate (quasi-maximum likelihood) covariance\n matrix that may be valid even in the presense of some\n misspecifications. Intermediate calculations use the 'oim'\n method.\n - 'robust_cs' is the same as 'robust' except that the intermediate\n calculations use the 'cs' method.\n cov_kwds : dict or None, optional\n See `MLEResults.get_robustcov_results` for a description required\n keywords for alternative covariance estimators\n maxiter : int, optional\n The maximum number of iterations to perform.\n full_output : boolean, optional\n Set to True to have all available output in the Results object's\n mle_retvals attribute. The output is dependent on the solver.\n See LikelihoodModelResults notes section for more information.\n disp : boolean, optional\n Set to True to print convergence messages.\n callback : callable callback(xk), optional\n Called after each iteration, as callback(xk), where xk is the\n current parameter vector.\n return_params : boolean, optional\n Whether or not to return only the array of maximizing parameters.\n Default is False.\n optim_hessian : {'opg','oim','cs'}, optional\n The method by which the Hessian is numerically approximated. 'opg'\n uses outer product of gradients, 'oim' uses the information\n matrix formula from Harvey (1989), and 'cs' uses second-order\n complex step differentiation. This keyword is only relevant if the\n optimization method uses the Hessian matrix.\n **kwargs\n Additional keyword arguments to pass to the optimizer.\n\n Returns\n -------\n MLEResults\n\n See also\n --------\n statsmodels.base.model.LikelihoodModel.fit\n MLEResults\n \"\"\"\n # Save the return_params argument\n return_params = kwargs.get('return_params', False)\n kwargs['return_params'] = False\n results = super(MLEMixin, self).fit(*args, **kwargs)\n\n # Construct the results class if desired\n if return_params:\n results = results.params\n else:\n result_kwargs = {}\n if 'cov_type' in kwargs:\n result_kwargs['cov_type'] = kwargs['cov_type']\n if 'cov_kwds' in kwargs:\n result_kwargs['cov_kwds'] = kwargs['cov_kwds']\n mlefit = results.mlefit\n results = self.smooth(results.params, **result_kwargs)\n results.mlefit = mlefit\n results.mle_retvals = mlefit.mle_retvals\n results.mle_settings = mlefit.mle_settings\n\n return results\n\n def filter(self, params, transformed=True, cov_type=None, cov_kwds=None,\n return_ssm=False, **kwargs):\n \"\"\"\n Kalman filtering\n\n Parameters\n ----------\n params : array_like\n Array of parameters at which to evaluate the loglikelihood\n function.\n transformed : boolean, optional\n Whether or not `params` is already transformed. Default is True.\n return_ssm : boolean,optional\n Whether or not to return only the state space output or a full\n results object. Default is to return a full results object.\n cov_type : str, optional\n See `MLEResults.fit` for a description of covariance matrix types\n for results object.\n cov_kwds : dict or None, optional\n See `MLEResults.get_robustcov_results` for a description required\n keywords for alternative covariance estimators\n **kwargs\n Additional keyword arguments to pass to the Kalman filter. See\n `KalmanFilter.filter` for more details.\n \"\"\"\n params = np.array(params, ndmin=1)\n\n # Transform parameters if necessary\n if not transformed:\n params = self.transform_params(params)\n transformed = True\n\n # Get the state space output\n results = super(MLEMixin, self).filter(params, transformed,\n return_ssm=True, **kwargs)\n\n # Wrap in a results object\n if not return_ssm:\n result_kwargs = {}\n if cov_type is not None:\n result_kwargs['cov_type'] = cov_type\n if cov_kwds is not None:\n result_kwargs['cov_kwds'] = cov_kwds\n results = MLEResultsWrapper(\n MLEResults(self, params, results, **result_kwargs)\n )\n\n return results\n\n def smooth(self, params, transformed=True, cov_type=None, cov_kwds=None,\n return_ssm=False, **kwargs):\n \"\"\"\n Kalman smoothing\n\n Parameters\n ----------\n params : array_like\n Array of parameters at which to evaluate the loglikelihood\n function.\n transformed : boolean, optional\n Whether or not `params` is already transformed. Default is True.\n return_ssm : boolean,optional\n Whether or not to return only the state space output or a full\n results object. Default is to return a full results object.\n cov_type : str, optional\n See `MLEResults.fit` for a description of covariance matrix types\n for results object.\n cov_kwds : dict or None, optional\n See `MLEResults.get_robustcov_results` for a description required\n keywords for alternative covariance estimators\n **kwargs\n Additional keyword arguments to pass to the Kalman filter. See\n `KalmanFilter.filter` for more details.\n \"\"\"\n params = np.array(params, ndmin=1)\n\n if not transformed:\n params = self.transform_params(params)\n self.update(params, transformed=True)\n\n # Save the parameter names\n self.data.param_names = self.param_names\n\n # Get the state space output\n results = self.ssm.smooth(**kwargs)\n\n # Wrap in a results object\n if not return_ssm:\n result_kwargs = {}\n if cov_type is not None:\n result_kwargs['cov_type'] = cov_type\n if cov_kwds is not None:\n result_kwargs['cov_kwds'] = cov_kwds\n results = MLEResultsWrapper(\n MLEResults(self, params, results, **result_kwargs)\n )\n\n return results\n\n def simulation_smoother(self, **kwargs):\n \"\"\"\n Retrieve a simulation smoother for the statespace model.\n\n Parameters\n ----------\n simulation_output : int, optional\n Determines which simulation smoother output is calculated.\n Default is all (including state and disturbances).\n simulation_smooth_results_class : class, optional\n Default results class to use to save output of simulation\n smoothing. Default is `SimulationSmoothResults`. If specified,\n class must extend from `SimulationSmoothResults`.\n prefix : string\n The prefix of the datatype. Usually only used internally.\n **kwargs\n Additional keyword arguments, used to set the simulation output.\n See `set_simulation_output` for more details.\n\n Returns\n -------\n SimulationSmoothResults\n \"\"\"\n return self.ssm.simulation_smoother(**kwargs)\n\n\nclass MLEModel(MLEMixin, mlemodel.MLEModel):\n r\"\"\"\n State space model for maximum likelihood estimation\n\n Parameters\n ----------\n endog : array_like\n The observed time-series process :math:`y`\n k_states : int\n The dimension of the unobserved state process.\n exog : array_like, optional\n Array of exogenous regressors, shaped nobs x k. Default is no\n exogenous regressors.\n dates : array-like of datetime, optional\n An array-like object of datetime objects. If a Pandas object is given\n for endog, it is assumed to have a DateIndex.\n freq : str, optional\n The frequency of the time-series. A Pandas offset or 'B', 'D', 'W',\n 'M', 'A', or 'Q'. This is optional if dates are given.\n **kwargs\n Keyword arguments may be used to provide default values for state space\n matrices or for Kalman filtering options. See `Representation`, and\n `KalmanFilter` for more details.\n\n Attributes\n ----------\n ssm : KalmanFilter\n Underlying state space representation.\n\n Notes\n -----\n This class wraps the state space model with Kalman filtering to add in\n functionality for maximum likelihood estimation. In particular, it adds\n the concept of updating the state space representation based on a defined\n set of parameters, through the `update` method or `updater` attribute (see\n below for more details on which to use when), and it adds a `fit` method\n which uses a numerical optimizer to select the parameters that maximize\n the likelihood of the model.\n\n The `start_params` `update` method must be overridden in the\n child class (and the `transform` and `untransform` methods, if needed).\n\n See Also\n --------\n MLEResults\n dismalpy.ssm.simulation_smoother.SimulationSmoother\n dismalpy.ssm.kalman_smoother.KalmanSmoother\n dismalpy.ssm.kalman_filter.KalmanFilter\n dismalpy.ssm.representation.Representation\n \"\"\"\n pass\n\n\nclass MLEResultsMixin(object):\n def __init__(self, model, params, smoother_results, cov_type='opg',\n cov_kwds=None, **kwargs):\n super(MLEResultsMixin, self).__init__(\n model, params, smoother_results,\n cov_type=cov_type, cov_kwds=cov_kwds, **kwargs\n )\n\n # Add the smoother results\n self.smoother_results = smoother_results\n\n @property\n def kalman_gain(self):\n \"\"\"\n Kalman gain matrices\n \"\"\"\n return self._kalman_gain\n @kalman_gain.setter\n def kalman_gain(self, value):\n self._kalman_gain = value\n\n\nclass MLEResults(MLEResultsMixin, mlemodel.MLEResults):\n r\"\"\"\n Class to hold results from fitting a state space model.\n\n Parameters\n ----------\n model : MLEModel instance\n The fitted model instance\n params : array\n Fitted parameters\n filter_results : KalmanFilter instance\n The underlying state space model and Kalman filter output\n\n Attributes\n ----------\n model : Model instance\n A reference to the model that was fit.\n filter_results : KalmanFilter instance\n The underlying state space model and Kalman filter output\n nobs : float\n The number of observations used to fit the model.\n params : array\n The parameters of the model.\n scale : float\n This is currently set to 1.0 and not used by the model or its results.\n\n See Also\n --------\n MLEModel\n dismalpy.ssm.kalman_smoother.SmootherResults\n dismalpy.ssm.kalman_filter.FilterResults\n dismalpy.ssm.representation.FrozenRepresentation\n \"\"\"\n pass\n\n\nclass MLEResultsWrapper(mlemodel.MLEResultsWrapper):\n _attrs = {}\n _wrap_attrs = wrap.union_dicts(mlemodel.MLEResultsWrapper._wrap_attrs,\n _attrs)\n _methods = {}\n _wrap_methods = wrap.union_dicts(mlemodel.MLEResultsWrapper._wrap_methods,\n _methods)\nwrap.populate_wrapper(MLEResultsWrapper, MLEResults)\n","repo_name":"dismalpy/dismalpy","sub_path":"dismalpy/ssm/mlemodel.py","file_name":"mlemodel.py","file_ext":"py","file_size_in_byte":15184,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"18"} +{"seq_id":"3906299984","text":"from math import gcd\nfrom functools import reduce\n\ndef waitTime(timestamp, busID):\n wait = (timestamp // busID) * busID\n if wait < timestamp:\n wait += busID\n return wait - timestamp, busID\n\ndef findFirstBus(timestamp, buses):\n return min([waitTime(timestamp, busID) for busID in buses])\n\ndef lcm(a, b):\n return abs(a * b) // gcd(a, b)\n\ndef processInput(lines):\n timestamp = int(lines[0].strip())\n return timestamp, [int(b) for b in lines[1].split(',') if b != 'x']\n\ndef sync(lines):\n buses = [int(bus) for bus in lines[1].replace('x', '0').split(\",\")]\n period, maxIdx = max((bus, i) for i, bus in enumerate(buses))\n remainders = [(i - maxIdx, bus) for i, bus in enumerate(buses) if bus]\n minRem = min(remainders)[0]\n time = 0\n while remainders:\n time += period\n synced = [bus for rem, bus in remainders if (time + rem) % bus == 0]\n if synced:\n period = reduce(lcm, [period] + synced)\n remainders = [(rem, bus) for rem, bus in remainders if bus not in synced]\n return time + minRem\n\nwith open('input.in', 'r') as f:\n lines = f.readlines()\n timestamp, buses = processInput(lines)\n wait, earliest = findFirstBus(timestamp, buses)\n print(f\"P1: wait time = {wait * earliest}\")\n print(f\"P2: earliest timestamp = {sync(lines)}\")\n","repo_name":"evanSpendlove/AdventOfCode","sub_path":"2020/day_13/day13.py","file_name":"day13.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"24574629847","text":"#!/usr/bin/env python\n# LEGO-Sensors.py\n#\n# Frans Duijnhouwer\n# frans.duijnhouwergmail.com\n#\n# Initial Date: January 28, 2014\n# Last Updated: February 11, 2014\n#\n# This file has been made available online through a Creative Commons \n# Attribution-ShareAlike 3.0 license. \n# (http://creativecommons.org/licenses/by-sa/3.0/)\n#\n# ThreadSafeBrickPi implementation of LEGO NXT (1 and 2) sensors.\n#\n#\n\nimport ThreadSafeBrickPi\nimport threading\n\nBPi = ThreadSafeBrickPi\n\nTYPE_SENSOR_RAW = BPi.TYPE_SENSOR_RAW\nTYPE_SENSOR_ULTRASONIC_CONT = BPi.TYPE_SENSOR_ULTRASONIC_CONT\nTYPE_SENSOR_TOUCH = BPi.TYPE_SENSOR_TOUCH\nTYPE_SENSOR_LIGHT_OFF = BPi.TYPE_SENSOR_LIGHT_OFF\nTYPE_SENSOR_LIGHT_ON = BPi.TYPE_SENSOR_LIGHT_ON\nTYPE_SENSOR_COLOR_FULL = BPi.TYPE_SENSOR_COLOR_FULL\nTYPE_SENSOR_COLOR_RED = BPi.TYPE_SENSOR_COLOR_RED\nTYPE_SENSOR_COLOR_GREEN = BPi.TYPE_SENSOR_COLOR_GREEN\nTYPE_SENSOR_COLOR_BLUE = BPi.TYPE_SENSOR_COLOR_BLUE\nTYPE_SENSOR_COLOR_NONE = BPi.TYPE_SENSOR_COLOR_NONE\nTYPE_SENSOR_I2C_9V = BPi.TYPE_SENSOR_I2C_9V\nMASK_9V = BPi.MASK_9V\nBIT_I2C_MID = BPi.BIT_I2C_MID\nBIT_I2C_SAME = BPi.BIT_I2C_SAME\n\nLEGO_US_I2C_ADDR = 0x02\nLEGO_US_CMD_REG = 0x41\nLEGO_US_CMD_OFF = 0x00\nLEGO_US_CMD_SS = 0x01\nLEGO_US_CMD_CONT = 0x02\nLEGO_US_CMD_EVNT = 0x03\nLEGO_US_CMD_RST = 0x04\nLEGO_US_DATA_REG = 0x42\n\n# Implementation for Lego Ultrasonic sensor\nclass BrickPiLegoUltraSonicSensor(BPi.BrickPiSensor):\n def __init__(self, portNumber):\n self._port = portNumber\n self._value = 999 # invalid (range is from 0 to 255)\n self._lock = threading.Lock()\n\n def get_type(self):\n return TYPE_SENSOR_ULTRASONIC_CONT\n\n #def callback_init(self, stage):\n\n def callback_update(self, value):\n self._lock.acquire()\n self._value = value\n self._lock.release()\n\n def get_value(self):\n try:\n self._lock.acquire()\n return self._value\n finally:\n self._lock.release()\n\n# Implementation for Lego Ultrasonic sensor as I2C device\nclass BrickPiLegoUltraSonicSensorI2C(BPi.BrickPiI2CSensor):\n def __init__(self, portNumber):\n self._port = portNumber\n self._value = 999 # invalid (range is from 0 to 255)\n self._dataSize = 0\n self._lock = threading.Lock()\n\n def get_type(self):\n return TYPE_SENSOR_I2C_9V\n\n def get_address(self):\n return LEGO_US_I2C_ADDR\n\n def callback_init(self, stage):\n if(stage == 1):\n self._dataSize = 0\n a = [LEGO_US_CMD_REG, LEGO_US_CMD_CONT]\n return a, self._dataSize, 8, 0, 0.0, 0.0, stage+1\n else:\n self._dataSize = 1\n a = [LEGO_US_DATA_REG]\n # outArray, numBytesIn, speed, settings, sdelay, udelay, more steps\n return a, self._dataSize, 8, BIT_I2C_MID | BIT_I2C_SAME, 0.0, 0.02, 0\n\n def callback_update(self, value):\n self._lock.acquire()\n self._value = value[0]\n self._lock.release()\n\n def callback_expected_data_size(self):\n try:\n self._lock.acquire()\n return self._dataSize\n finally:\n self._lock.release()\n\n def setup_required(self):\n return False\n\n def get_value(self):\n try:\n self._lock.acquire()\n return self._value\n finally:\n self._lock.release()\n\n# Implementation for Lego Touch sensor\nclass BrickPiLegoTouchSensor(BPi.BrickPiSensor):\n def __init__(self, portNumber):\n self._port = portNumber\n self._value = 999 # invalid (boolean: 0 or 1)\n self._lock = threading.Lock()\n\n def get_type(self):\n return TYPE_SENSOR_TOUCH\n\n #def callback_init(self):\n\n def callback_update(self, value):\n self._lock.acquire()\n self._value = value\n self._lock.release()\n\n def get_value(self):\n try:\n self._lock.acquire()\n return self._value\n finally:\n self._lock.release()\n\n# Implementation of Lego Sound sensor\nclass BrickPiLegoSoundSensor(BPi.BrickPiSensor):\n def __init__(self, portNumber):\n self._port = portNumber\n self._value = 999 # = Silence (range 0 to 9??)\n self._lock = threading.Lock()\n\n def get_type(self):\n return TYPE_SENSOR_RAW\n\n #def callback_init(self):\n\n def callback_update(self, value):\n self._lock.acquire()\n self._value = value\n self._lock.release()\n\n def get_value(self):\n try:\n self._lock.acquire()\n return self._value\n finally:\n self._lock.release()\n\n# Implementation of Lego Light sensor\nclass BrickPiLegoLightSensor(BPi.BrickPiSensor):\n def __init__(self, portNumber, on=0):\n self._port = portNumber\n self._on = on\n self._value = 999 # invalid (range 0 to xxx) TODO: CHECK\n self._lock = threading.Lock()\n self._setupRequired = 0\n\n def get_type(self):\n if self._on:\n return TYPE_SENSOR_LIGHT_ON\n else:\n return TYPE_SENSOR_LIGHT_OFF\n\n #def callback_init(self):\n\n def setup_required(self):\n return self._setupRequired\n\n def callback_setup(self, stage):\n t = TYPE_SENSOR_LIGHT_OFF\n\n if self._on:\n t = TYPE_SENSOR_LIGHT_ON\n\n self._setupRequired = 0\n # new type, sdelay, udelay, more steps\n return t, 0, 0, 0\n\n def callback_update(self, value):\n self._lock.acquire()\n self._value = value\n self._lock.release()\n\n def get_value(self):\n try:\n self._lock.acquire()\n return self._value\n finally:\n self._lock.release()\n\n def set_light_on(self):\n if not self._on:\n self._on = 1\n self._setupRequired = 1\n\n def set_light_off(self):\n if self._on:\n self._on = 0\n self._setupRequired = 1\n\n def toggle_light(self):\n self._on = int(not self._on)\n self._setupRequired = 1\n\n# Implementation of Lego Color sensor\ncolorName = [\"None\" , \"Black\", \"Blue\", \"Green\", \"Yellow\", \"Red\", \"White\"]\ncolorMode = {\"FULL\": 0, \"RED\": 1, \"GREEN\": 2, \"BLUE\": 3, \"NONE\": 4}\n#\nclass BrickPiLegoColorSensor(BPi.BrickPiSensor):\n def __init__(self, portNumber, mode=colorMode[\"FULL\"]):\n self._port = portNumber\n self._mode = max(0, min(4, int(mode)))\n self._value = 999 # invalid (range 0 to xxx) TODO: CHECK\n self._color = colorName[0]\n self._lock = threading.Lock()\n self._setupRequired = 0\n\n def get_type(self):\n if(self._mode == colorMode[\"FULL\"] ):\n return TYPE_SENSOR_COLOR_FULL\n elif(self._mode == colorMode[\"RED\"]):\n return TYPE_SENSOR_COLOR_RED\n elif(self._mode == colorMode[\"GREEN\"]):\n return TYPE_SENSOR_COLOR_GREEN\n elif(self._mode == colorMode[\"BLUE\"]):\n return TYPE_SENSOR_COLOR_BLUE\n else:\n return TYPE_SENSOR_COLOR_NONE\n\n #def callback_init(self):\n\n def setup_required(self):\n return self._setupRequired\n\n def callback_setup(self, stage):\n t = TYPE_SENSOR_COLOR_NONE\n sd = 0.0\n ud = 0.0\n\n if(self._mode == colorMode[\"FULL\"]):\n t = TYPE_SENSOR_COLOR_FULL\n elif(self._mode == colorMode[\"RED\"]):\n t = TYPE_SENSOR_COLOR_RED\n elif(self._mode == colorMode[\"GREEN\"]):\n t = TYPE_SENSOR_COLOR_GREEN\n elif(self._mode == colorMode[\"BLUE\"]):\n t = TYPE_SENSOR_COLOR_BLUE\n\n self._setupRequired = 0\n # new type, sdelay, udelay, more steps\n return t, sd, ud, 0\n\n def callback_update(self, value):\n self._lock.acquire()\n if self._mode:\n self._value = value\n self._color = colorName[0]\n else:\n self._value = 0\n self._color = colorName[value]\n self._lock.release()\n\n def get_value(self):\n try:\n self._lock.acquire()\n return self._value\n finally:\n self._lock.release()\n\n def get_color(self):\n try:\n self._lock.acquire()\n return self._color\n finally:\n self._lock.release()\n\n def set_mode(self, mode):\n m = max(0, min(4, int(mode)))\n\n if(self._mode != m):\n self._mode = m\n self._setupRequired = 1\n\n def get_mode(self):\n return self._mode\n","repo_name":"DexterInd/BrickPi","sub_path":"Software/BrickPi_Python/Contrib/ThreadSafeBrickPi/LEGO-Sensors.py","file_name":"LEGO-Sensors.py","file_ext":"py","file_size_in_byte":8376,"program_lang":"python","lang":"en","doc_type":"code","stars":259,"dataset":"github-code","pt":"18"} +{"seq_id":"35826141363","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport matplotlib.patches as mpatches\nimport progressbar\n\n\n\n###########################\n## Utility functions ##\n###########################\n\ndef remove_nan(array):\n return array[~np.isnan(array)]\n\n\n\n############################\n## Template functions ##\n############################\n\ndef template1(k, initial_value):\n a, b, c, d, e = int(40*k), int(120*k), int(20*k), int(14*k), int(6*k)\n f = int((a-c)/2)\n\n x, y = a, b\n t = np.empty((y, x), dtype=np.float)\n t[:] = initial_value\n t[0, :] = 100\n t[-1, :] = 0\n t[-(d+e):-e, f:f+c] = np.nan\n return t\n\n\ndef template2(k, initial_value):\n a, b, c, d, e = int(40*k), int(120*k), int(20*k), int(14*k), int(6*k)\n f, g = d+e, int(c/2)\n\n x, y = int((a/2)), b\n t = np.empty((y, x), dtype=float)\n t[:] = initial_value\n t[:, 0] = 0\n t[:, -1] = 100\n t[-f:-e, -g:] = np.nan\n t[-f, -g:] = 100\n t[-e, -g:] = 100\n t[-f:-e, -g] = 100\n return t\n\n\n###############################\n## Computation functions ##\n###############################\n\ndef compute_potential(template, axis, epochs=50, max_error=None, callbacks=[]):\n history = []\n p = np.copy(template)\n prev = np.copy(template)\n x, y = p.shape[1], p.shape[0]\n\n def get_neighbors(i, j, p):\n if (j - 1) < 0 or np.isnan(p[j - 1, i]):\n t = p[j + 1, i]\n else:\n t = p[j - 1, i]\n\n if (j + 1) >= y or np.isnan(p[j + 1, i]):\n b = p[j - 1, i]\n else:\n b = p[j + 1, i]\n\n if (i - 1) < 0 or np.isnan(p[j, i - 1]):\n l = p[j, i + 1]\n else:\n l = p[j, i - 1]\n\n if (i + 1) >= x or np.isnan(p[j, i + 1]):\n r = p[j, i - 1]\n else:\n r = p[j, i + 1]\n return t, b, l, r\n\n if max_error is None:\n bar = progressbar.ProgressBar(max_value=epochs)\n else:\n bar = progressbar.ProgressBar(max_value=progressbar.UnknownLength)\n curr_epoch = 0\n curr_error = 1e12\n while True:\n if axis == 0:\n for j in range(1, y - 1):\n for i in range(x):\n if np.isnan(p[j, i]):\n continue\n t, b, l, r = get_neighbors(i, j, p)\n p[j, i] = (t + b + l + r) / 4.\n else:\n for i in range(1, x - 1):\n for j in range(y):\n if np.isnan(p[j, i]):\n continue\n t, b, l, r = get_neighbors(i, j, p)\n p[j, i] = (t + b + l + r) / 4.\n\n f_p = remove_nan(p)\n f_prev = remove_nan(prev)\n curr_error = np.sum((f_p - f_prev) ** 2) / f_p.shape[0]\n curr_epoch += 1\n history.append(curr_error)\n prev = np.copy(p)\n\n stop_signal = False\n for callback in callbacks:\n r = callback({ 'history': history, 'potential': p, 'epoch': curr_epoch })\n if isinstance(r, dict) and 'stop' in r.keys() and r['stop'] == True:\n stop_signal = stop_signal or True\n\n bar.update(curr_epoch)\n\n if stop_signal or \\\n (max_error is None and curr_epoch > epochs) or \\\n (max_error is not None and curr_error < max_error):\n break\n\n return p, history\n\n\ndef compute_ef(p, h):\n x, y = p.shape[1] - 1, p.shape[0] - 1\n Ex = np.empty((y, x), dtype=float)\n Ey = np.empty((y, x), dtype=float)\n Ex[:] = np.nan\n Ey[:] = np.nan\n\n for j in range(y):\n for i in range(x):\n if np.isnan(p[j, i]):\n continue\n Ex[j, i] = (p[j, i] + p[j+1, i] - p[j, i+1] - p[j+1, i+1]) / (2 * h)\n Ey[j, i] = (p[j, i] + p[j, i+1] - p[j+1, i] - p[j+1, i+1]) / (2 * h)\n return Ex, Ey\n\n\ndef compute_resistence(V, l, h, condutivity, Ex, Ey, axis):\n if axis == 0:\n R = np.empty(Ey.shape[0])\n for j in range(Ey.shape[0]):\n flux = h * np.sum(np.abs(remove_nan(Ey[j, :])))\n R[j] = V / (l * condutivity * flux + 1e-12) # 1e-12: avoid division by 0\n else:\n R = np.empty(Ex.shape[1])\n for i in range(Ex.shape[1]):\n flux = h * np.sum(np.abs(remove_nan(Ex[:, i])))\n R[i] = V / (l * condutivity * flux)\n return R\n\n\n\n#################################\n## Vizualization functions ##\n#################################\n\ndef plot_equipotential(potential, k=1, title='', show=True, filename=None):\n p = remove_nan(potential)\n x, y = potential.shape[1] - 1, potential.shape[0] - 1\n \n lvl = np.arange(np.amin(p), np.amax(p) + 1, 2) # Delta Phi = 2V\n X1, Y1 = np.meshgrid(np.linspace(0, x+1, x+1), np.linspace(0, y+1, y+1))\n fig = plt.figure()\n ax = fig.add_subplot(111)\n cs = ax.contour(X1, Y1, potential, levels=lvl, cmap='viridis')\n \n norm= mpl.colors.Normalize(vmin=cs.cvalues.min(), vmax=cs.cvalues.max())\n sm = plt.cm.ScalarMappable(norm=norm, cmap = cs.cmap)\n sm.set_array([])\n fig.colorbar(sm, ticks=np.arange(np.amin(p), np.amax(p) + 1, 10))\n\n ax.invert_yaxis()\n ax.set_aspect('equal')\n ax.set_title(title)\n ax.set_xlabel('Largura')\n ax.set_ylabel('Altura')\n\n if filename is not None:\n plt.savefig(filename, bbox_inches='tight', pad_inches=0.05)\n if show:\n plt.show()\n plt.close()\n\n\ndef plot_field(Ex, Ey, title='', show=True, filename=None):\n x, y = Ex.shape[1], Ex.shape[0]\n X, Y = np.meshgrid(np.linspace(0, x, x), np.linspace(0, y, y))\n fig = plt.figure(figsize=(5, 12))\n ax = fig.add_subplot(111)\n ax.quiver(\n X, Y, Ex, -Ey,\n scale=1e4, \n scale_units='xy',\n color='tab:blue'\n )\n ax.invert_yaxis()\n ax.set_aspect('equal')\n ax.set_title(title)\n if filename is not None:\n plt.savefig(filename, bbox_inches='tight', pad_inches=0.05)\n if show:\n plt.show()\n plt.close()\n\n\ndef plot_reistences(resistences, title='', show=True, filename=None):\n plt.plot(resistences, color='tab:blue')\n plt.hlines(np.median(resistences), xmin=0, xmax=len(resistences)-1, color='tab:red')\n plt.ylim((0, 25))\n plt.title(title)\n if filename is not None:\n plt.savefig(filename, bbox_inches='tight', pad_inches=0.05)\n if show:\n plt.show()\n plt.close()\n\n\ndef plot_templates(template1, template2, show=True, filename=None):\n fig, ax = plt.subplots(1, 2, figsize=(6, 10))\n color_100 = mpl.cm.get_cmap('viridis')(100/100)\n color_50 = mpl.cm.get_cmap('viridis')(50/100)\n color_0 = mpl.cm.get_cmap('viridis')(0/100)\n color_nan = mpl.cm.get_cmap('viridis')(np.nan)\n plt.legend(\n handles=[\n mpatches.Patch(color=color_100, label='V=100'),\n mpatches.Patch(color=color_50, label='V=50'),\n mpatches.Patch(color=color_0, label='V=0'),\n mpatches.Patch(color=color_nan, label='Indet.'),\n ], \n bbox_to_anchor=(1.03, 0.5), \n loc='center left'\n )\n ax[0].imshow(template1, aspect='auto')\n ax[1].imshow(template2, aspect='auto')\n if filename is not None:\n plt.savefig(filename, bbox_inches='tight', pad_inches=0.05)\n if show:\n plt.show()\n plt.close()\n\n\n\n########################\n## Main Functions ##\n########################\n\ndef problem_1():\n k = 2\n h = (1/k)*1e-3\n max_error = 1e-10\n t = template1(k, 50)\n\n def plot_callback(params):\n if params['epoch'] % 1000 != 0:\n return\n\n Ex, Ey = compute_ef(params['potential'], h)\n resistences = compute_resistence(\n V=100, \n l=100e-3, \n h=h, \n condutivity=5, \n Ex=Ex, \n Ey=Ey, \n axis=0\n )\n plot_reistences(\n resistences, \n title=f'Epoch {params[\"epoch\"]}', \n filename=f'p1/res_{params[\"epoch\"]}.pdf',\n show=False\n )\n\n potential, history = compute_potential(\n template=t,\n axis=0,\n max_error=max_error,\n callbacks=[plot_callback]\n )\n np.save('p1_history.npy', np.array(history))\n np.save('p1_potential.npy', potential)\n \n plot_equipotential(\n potential, \n k=k, \n title='Equipotenciais', \n filename='p1_equipotenciais.pdf'\n )\n\n Ex, Ey = compute_ef(potential, h=h)\n plot_field(\n Ex, Ey, \n title='Campo Elétrico', \n filename='p1_campo_eletrico.pdf'\n )\n\n resistence = compute_resistence(\n V=100, \n l=100e-3, \n h=h, \n condutivity=5, \n Ex=Ex,\n Ey=Ey, \n axis=0\n )\n print(f'Resistencia: {resistence} Ohms')\n\n\ndef problem_2():\n k = 2\n h = (1/k)*1e-3\n max_error = 1e-10\n axis = 1\n t = template2(k, 50)\n\n potential, history = compute_potential(\n template=t,\n axis=axis,\n max_error=max_error\n )\n np.save('p2_history.npy', np.array(history))\n np.save('p2_potential.npy', potential)\n \n plot_equipotential(\n potential, \n k=k, \n title='Equipotenciais', \n filename='p2_equipotenciais.pdf'\n )\n\n Ex, Ey = compute_ef(potential, h=h)\n plot_field(\n Ex, Ey, \n title='Campo Elétrico', \n filename='p2_campo_eletrico.pdf'\n )\n\n resistence = compute_resistence(\n V=100, \n l=100e-3, \n h=h, \n condutivity=5, \n Ex=Ex,\n Ey=Ey, \n axis=axis\n )\n print(f'Resistencia: {resistence} Ohms')\n\nif __name__ == '__main__':\n problem_1()\n problem_2()","repo_name":"nmcardoso/poli","sub_path":"eletromagnetismo/ec1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8605,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"8336603562","text":"import re\r\n\r\nf = open(\"datamail.txt\")\r\n\r\nlines = f.read()\r\n# let us read each line separately\r\nlinebreaks = lines.split(\"\\n\")\r\n\r\nf.close()\r\nresult = []\r\nfor stop in linebreaks:\r\n pattern = re.search(\"@\", stop)\r\n if pattern != None:\r\n print(stop)\r\n\r\n\r\n#print(result)","repo_name":"Shades-of-Dark/ChatGPTPython-Course","sub_path":"expressionstest.py","file_name":"expressionstest.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"73811728040","text":"import os\nimport tempfile\n\nfrom toil.common import Toil\nfrom toil.job import Job\n\n\nclass HelloWorld(Job):\n def __init__(self, message):\n Job.__init__(self)\n self.message = message\n\n def run(self, fileStore):\n return f\"Hello, world!, here's a message: {self.message}\"\n\n\nif __name__ == \"__main__\":\n jobstore: str = tempfile.mkdtemp(\"tutorial_invokeworkflow\")\n os.rmdir(jobstore)\n options = Job.Runner.getDefaultOptions(jobstore)\n options.logLevel = \"OFF\"\n options.clean = \"always\"\n\n hello_job = HelloWorld(\"Woot\")\n\n with Toil(options) as toil:\n print(toil.start(hello_job))\n","repo_name":"DataBiosphere/toil","sub_path":"src/toil/test/docs/scripts/tutorial_invokeworkflow.py","file_name":"tutorial_invokeworkflow.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":856,"dataset":"github-code","pt":"18"} +{"seq_id":"73831697959","text":"#!/usr/bin/env python3\n\nimport os\nfrom setuptools import setup\n\nbase_dir = os.path.dirname(__file__)\nwith open(os.path.join(base_dir, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='delphin.redwoods',\n version='0.0.0',\n description='',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/delph-in/delphin.redwoods',\n author='',\n author_email='',\n license='MIT',\n classifiers=[\n 'Development Status :: 1 - Planning',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Text Processing :: Linguistic',\n 'Topic :: Utilities'\n ],\n keywords='nlp semantics hpsg delph-in linguistics',\n packages=[\n ],\n install_requires=[\n 'pydelphin >= 1.0.0',\n ],\n extras_require={\n },\n entry_points={\n },\n)\n","repo_name":"delph-in/delphin.redwoods","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"38005218803","text":"#-*- coding: utf-8 -*-\nimport os\nimport unittest\nfrom hunspell import HunSpell, HunSpellError\n\n\nclass HunSpellTest(unittest.TestCase):\n def setUp(self):\n self.hunspell = HunSpell(\"/usr/share/hunspell/en_US.dic\",\n \"/usr/share/hunspell/en_US.aff\")\n\n def tearDown(self):\n try:\n del self.hunspell\n except AttributeError:\n pass\n\n def test_hunspell_spell(self):\n self.assertFalse(self.hunspell.spell('dpg'))\n self.assertTrue(self.hunspell.spell('dog'))\n self.assertFalse(self.hunspell.spell('spookie'))\n self.assertTrue(self.hunspell.spell('spooky'))\n\n def test_hunspell_suggest(self):\n self.assertEqual(self.hunspell.suggest('dpg'),\n ['dog', 'pg', 'deg', 'dig', 'dpt',\n 'dug', 'mpg', 'd pg', 'GDP',\n 'DP', 'PG', 'DTP', 'dip'])\n self.assertEqual(self.hunspell.suggest('spookie'),\n ['spookier', 'spookiness', 'spook', 'cookie',\n 'bookie', 'Spokane', 'spoken'])\n self.assertEqual(self.hunspell.suggest('Eelysa'),\n ['Elyssa', 'Elysees', 'Elysha', 'Elysia',\n 'Elissa', 'Elysée'])\n\n def test_hunspell_stem(self):\n self.assertEqual(self.hunspell.stem('dog'), [b'dog'])\n self.assertEqual(self.hunspell.stem('permanently'), [b'permanent'])\n self.assertEqual(self.hunspell.stem('linked'), [b'linked', b'link'])\n\n def test_analyze(self):\n self.assertEqual(self.hunspell.analyze('linked'),\n [b' st:linked', b' st:link fl:D'])\n\n def test_add_remove(self):\n self.assertFalse(self.hunspell.spell('pipo'))\n self.hunspell.add('pipo')\n self.assertTrue(self.hunspell.spell('pipo'))\n self.hunspell.remove('pipo')\n self.assertFalse(self.hunspell.spell('pipo'))\n\n def test_add_dic(self):\n self.assertFalse(self.hunspell.spell(\"dictionnaire\"))\n try:\n self.hunspell.add_dic(\"/usr/share/hunspell/fr.dic\")\n except HunSpellError:\n raise ValueError(\"/usr/share/hunspell/fr.dic is not installed. \"\n \"Please install hunspell-fr to validate this test.\")\n self.assertTrue(self.hunspell.spell(\"dictionnaire\"))\n\n\nclass HunSpellGenerateTest(unittest.TestCase):\n def setUp(self):\n self.hunspell = HunSpell(\"/usr/share/hunspell/en_GB.dic\",\n \"/usr/share/hunspell/en_GB.aff\")\n\n def test_generate(self):\n self.assertEqual(self.hunspell.generate('boy', 'girls'), [b'boys'])\n\n def test_generate2(self):\n self.assertEqual(self.hunspell.generate2('boy', 'is:Ns'), [b'boys'])\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"Nisanchhetri/nepali-romanizer","sub_path":"hunspell/pyhunspell/tests/test_hunspell.py","file_name":"test_hunspell.py","file_ext":"py","file_size_in_byte":2822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"1545319361","text":"\"\"\"cora dataset.\"\"\"\nfrom graph_tfds.core.utils.dgl import citation_graph\n\nCLASS_NAMES = (\n \"Agents\",\n \"AI\",\n \"DB\",\n \"IR\",\n \"ML\",\n \"HCI\",\n)\n\n\nNUM_FEATURES = 3703\n\n\nclass CiteSeer(citation_graph.CitationGraph):\n \"\"\"DatasetBuilder for cora dataset.\"\"\"\n\n _NAME = \"citeseer\"\n _URL_NAME = \"citeseer\"\n\n\nif __name__ == \"__main__\":\n citation_graph._vis(CiteSeer()) # pylint: disable=protected-access\n","repo_name":"jackd/graph-tfds","sub_path":"graph_tfds/graphs/cite_seer/cite_seer.py","file_name":"cite_seer.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"41774094418","text":"\"\"\"Add user model, user relationship to Donation\n\nRevision ID: ca7f0049a2d4\nRevises: c3127595b79f\nCreate Date: 2022-08-27 14:04:39.000297\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'ca7f0049a2d4'\ndown_revision = 'c3127595b79f'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('user',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('email', sa.String(length=320), nullable=False),\n sa.Column('hashed_password', sa.String(length=1024), nullable=False),\n sa.Column('is_active', sa.Boolean(), nullable=False),\n sa.Column('is_superuser', sa.Boolean(), nullable=False),\n sa.Column('is_verified', sa.Boolean(), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n with op.batch_alter_table('user', schema=None) as batch_op:\n batch_op.create_index(batch_op.f('ix_user_email'), ['email'], unique=True)\n\n with op.batch_alter_table('donation', schema=None) as batch_op:\n batch_op.create_foreign_key('fk_donation_user_id_user', 'user', ['user_id'], ['id'])\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('donation', schema=None) as batch_op:\n batch_op.drop_constraint('fk_donation_user_id_user', type_='foreignkey')\n\n with op.batch_alter_table('user', schema=None) as batch_op:\n batch_op.drop_index(batch_op.f('ix_user_email'))\n\n op.drop_table('user')\n # ### end Alembic commands ###\n","repo_name":"sniki-ld/cat_charity_fund","sub_path":"alembic/versions/ca7f0049a2d4_add_user_model_user_relationship_to_.py","file_name":"ca7f0049a2d4_add_user_model_user_relationship_to_.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"73480675880","text":"import os\nfrom json import dumps\nfrom shutil import copyfile\nfrom tempfile import TemporaryDirectory\nfrom unittest import TestCase\n\nfrom kedja.interfaces import ITemplateFileUtil\nfrom pyramid import testing\nfrom pyramid.request import apply_request_extensions\nfrom transaction import commit\nfrom webtest import TestApp\n\nfrom kedja.testing import get_settings\n\n\ndef get_dummy_structure_fp():\n here = os.path.abspath(os.path.dirname(__file__))\n return os.path.join(here, 'testing_fixtures', '123.yaml')\n\n\nclass FunctionalTemplatesAPIViewTests(TestCase):\n\n def setUp(self):\n self.tmpdir = TemporaryDirectory()\n copyfile(get_dummy_structure_fp(), os.path.join(self.tmpdir.name, '123.yaml'))\n settings = get_settings()\n settings['kedja.templates_dir'] = self.tmpdir.name\n self.config = testing.setUp(settings=settings)\n self.config.include('kedja.testing')\n self.config.include('pyramid_tm')\n self.config.include('kedja.views.api.templates')\n # FIXME: Actual test of security? :)\n self.config.testing_securitypolicy(permissive=True)\n\n def tearDown(self):\n self.tmpdir.cleanup()\n testing.tearDown()\n\n def _fixture(self, request):\n from kedja import root_factory\n root = root_factory(request)\n commit()\n return root\n\n def test_get(self):\n wsgiapp = self.config.make_wsgi_app()\n app = TestApp(wsgiapp)\n request = testing.DummyRequest()\n apply_request_extensions(request)\n self.config.begin(request)\n self._fixture(request)\n response = app.get('/api/1/templates/123', status=200)\n data = response.json_body\n self.assertEqual(1, data['version'])\n self.assertEqual(\"Hello from template\", data['title'])\n self.assertEqual(2, len(data['export']['contained'])) # The collections\n\n def test_get_404(self):\n wsgiapp = self.config.make_wsgi_app()\n app = TestApp(wsgiapp)\n request = testing.DummyRequest()\n apply_request_extensions(request)\n self.config.begin(request)\n self._fixture(request)\n response = app.get('/api/1/templates/404nope', status=404)\n self.assertEqual(404, response.status_int)\n\n def test_get_bad_name(self):\n wsgiapp = self.config.make_wsgi_app()\n app = TestApp(wsgiapp)\n request = testing.DummyRequest()\n apply_request_extensions(request)\n self.config.begin(request)\n self._fixture(request)\n app.get('/api/1/templates/..123', status=404)\n\n def test_post(self):\n wsgiapp = self.config.make_wsgi_app()\n app = TestApp(wsgiapp)\n request = testing.DummyRequest()\n apply_request_extensions(request)\n self.config.begin(request)\n root = self._fixture(request)\n response = app.post('/api/1/templates/123', status=200)\n data = response.json_body\n self.assertEqual('Wall', data['type_name'])\n wall = root[str(data['rid'])]\n self.assertEqual('En annan', wall.title)\n\n def test_collection_get(self):\n wsgiapp = self.config.make_wsgi_app()\n app = TestApp(wsgiapp)\n request = testing.DummyRequest()\n apply_request_extensions(request)\n self.config.begin(request)\n self._fixture(request)\n response = app.get('/api/1/templates', status=200)\n data = response.json_body\n self.assertEqual(1, len(data))\n self.assertEqual(1, data[0]['version'])\n self.assertEqual(\"Hello from template\", data[0]['title'])\n\n def test_collection_post(self):\n wsgiapp = self.config.make_wsgi_app()\n app = TestApp(wsgiapp)\n self.config.include('kedja.models.template')\n request = testing.DummyRequest()\n apply_request_extensions(request)\n self.config.begin(request)\n root = self._fixture(request)\n tpl_util = self.config.registry.getUtility(ITemplateFileUtil)\n appstruct = tpl_util.read_appstruct('123')\n from kedja.models.export_import import import_structure\n # Wall will be created with a new rid\n wall = import_structure(root, request, appstruct['export'])\n commit()\n body = dumps({'rid': wall.rid, 'title': 'My new template'})\n response = app.post('/api/1/templates', status=200, params=body)\n data = response.json_body\n self.assertEqual('My new template', data['title'])\n self.assertEqual(2, len(list(tpl_util.get_all_appstructs())))\n","repo_name":"kedjaproject/kedja_server","sub_path":"src/kedja/views/api/tests/test_templates.py","file_name":"test_templates.py","file_ext":"py","file_size_in_byte":4507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"10369683959","text":"\"\"\"A collection of useful helper functions\"\"\"\n\nimport numpy as np\nimport torch\n\nfrom pathlib import Path\n\nfrom cos.helpers.constants import SPEED_OF_SOUND\n\n\ndef shift_mixture(input_data, target_position, mic_radius, sr, inverse=False):\n \"\"\"\n Shifts the input according to the voice position. This\n lines up the voice samples in the time domain coming from a target_angle\n Args:\n input_data - M x T numpy array or torch tensor\n target_position - The location where the data should be aligned\n mic_radius - In meters. The number of mics is inferred from\n the input_Data\n sr - Sample Rate in samples/sec\n inverse - Whether to align or undo a previous alignment\n\n Returns: shifted data and a list of the shifts\n \"\"\"\n # elevation_angle = 0.0 * np.pi / 180\n # target_height = 3.0 * np.tan(elevation_angle)\n # target_position = np.append(target_position, target_height)\n\n num_channels = input_data.shape[0]\n\n # Must match exactly the generated or captured data\n mic_array = [[\n mic_radius * np.cos(2 * np.pi / num_channels * i),\n mic_radius * np.sin(2 * np.pi / num_channels * i),\n ] for i in range(num_channels)]\n\n # Mic 0 is the canonical position\n distance_mic0 = np.linalg.norm(mic_array[0] - target_position)\n shifts = [0]\n\n # Check if numpy or torch\n if isinstance(input_data, np.ndarray):\n shift_fn = np.roll\n elif isinstance(input_data, torch.Tensor):\n shift_fn = torch.roll\n else:\n raise TypeError(\"Unknown input data type: {}\".format(type(input_data)))\n\n # Shift each channel of the mixture to align with mic0\n for channel_idx in range(1, num_channels):\n distance = np.linalg.norm(mic_array[channel_idx] - target_position)\n distance_diff = distance - distance_mic0\n shift_time = distance_diff / SPEED_OF_SOUND\n shift_samples = int(round(sr * shift_time))\n if inverse:\n input_data[channel_idx] = shift_fn(input_data[channel_idx],\n shift_samples)\n else:\n input_data[channel_idx] = shift_fn(input_data[channel_idx],\n -shift_samples)\n shifts.append(shift_samples)\n\n return input_data, shifts\n\n\ndef angular_distance(angle1, angle2):\n \"\"\"\n Computes the distance in radians betwen angle1 and angle2.\n We assume they are between -pi and pi\n \"\"\"\n d1 = abs(angle1 - angle2)\n d2 = abs(angle1 - angle2 + 2 * np.pi)\n d3 = abs(angle2 - angle1 + 2 * np.pi)\n\n return min(d1, d2, d3)\n\ndef get_starting_angles(window_size):\n \"\"\"Returns the list of target angles for a window size\"\"\"\n divisor = int(round(2 * np.pi / window_size))\n return np.array(list(range(-divisor + 1, divisor, 2))) * np.pi / divisor\n\n\ndef to_categorical(index: int, num_classes: int):\n \"\"\"Creates a 1-hot encoded np array\"\"\"\n data = np.zeros((num_classes))\n data[index] = 1\n return data\n\ndef convert_angular_range(angle: float):\n \"\"\"Converts an arbitrary angle to the range [-pi pi]\"\"\"\n corrected_angle = angle % (2 * np.pi)\n if corrected_angle > np.pi:\n corrected_angle -= (2 * np.pi)\n\n return corrected_angle\n\ndef trim_silence(audio, window_size=22050, cutoff=0.001):\n \"\"\"Trims all silence within an audio file\"\"\"\n idx = 0\n new_audio = []\n while idx * window_size < audio.shape[1]:\n segment = audio[:, idx*window_size:(idx+1)*window_size]\n if segment.std() > cutoff:\n new_audio.append(segment)\n idx += 1\n\n return np.concatenate(new_audio, axis=1)\n\n\ndef check_valid_dir(dir, requires_n_voices=2):\n \"\"\"Checks that there is at least n voices\"\"\"\n if len(list(Path(dir).glob('*_voice00.wav'))) < 1:\n return False\n\n if requires_n_voices == 2:\n if len(list(Path(dir).glob('*_voice01.wav'))) < 1:\n return False\n\n if requires_n_voices == 3:\n if len(list(Path(dir).glob('*_voice02.wav'))) < 1:\n return False\n\n if requires_n_voices == 4:\n if len(list(Path(dir).glob('*_voice03.wav'))) < 1:\n return False\n\n if len(list(Path(dir).glob('metadata.json'))) < 1:\n return False\n\n return True\n","repo_name":"vivjay30/Cone-of-Silence","sub_path":"cos/helpers/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4245,"program_lang":"python","lang":"en","doc_type":"code","stars":136,"dataset":"github-code","pt":"18"} +{"seq_id":"28158266675","text":"ac = 0 #variável acumuladora.\nprint('Bem-vindo(a) à Pizzaria da Giovana.\\n'\n '---------------------- Cardápio ----------------------\\n'\n '| Código | Descrição | Pizza Média | Pizza Grande |\\n'\n '| 21 | Napolitana | R$ 20,00 | R$ 26,00 |\\n'\n '| 22 | Marguerita | R$ 22,00 | R$ 26,00 |\\n'\n '| 23 | Calabresa | R$ 25,00 | R$ 32,50 |\\n'\n '| 24 | Toscana | R$ 30,00 | R$ 39,00 |\\n'\n '| 25 | Portuguesa | R$ 30,00 | R$ 39,00 |\\n'\n '-----------------------------------------------------')\n\nwhile True:\n tam = input('Qual o tamanho desejado? (M/G): ').upper()\n if tam == 'M' or tam == 'G':\n cod = int(input('Digite o código do sabor desejado: '))\n else:\n print('Opção inválida. Digite apenas M ou G!')\n continue #volta para o início do looping, na variável 'tam'.\n if cod == 21:\n sab = 'Napolitana'\n if tam == 'M':\n ac += 20\n else:\n ac += 26\n elif cod == 22:\n sab = 'Marguerita'\n if tam == 'M':\n ac += 22\n else:\n ac += 26\n elif cod == 23:\n sab = 'Calabresa'\n if tam == 'M':\n ac += 25\n else:\n ac += 32.50\n elif cod == 24:\n sab = 'Toscana'\n if tam == 'M':\n ac += 30\n else:\n ac += 39\n elif cod == 25:\n sab = 'Portuguesa'\n if tam == 'M':\n ac += 30\n else:\n ac += 39\n else:\n print('Opção de código inválida.')\n continue\n print('Você pediu uma pizza {} de sabor {}.'.format(tam, sab))\n cont = input('Deseja pedir algo mais?(S/N): ').upper()\n if cont == 'S':\n continue #volta para o início do programa.\n else:\n print('O valor total do seu pedido é de R${:.2f}'.format(ac))\n break # o programa é finalizado.\n","repo_name":"Giovana-Arruda/Logic-in-Python","sub_path":"exerc2.py","file_name":"exerc2.py","file_ext":"py","file_size_in_byte":2016,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"9472509855","text":"from telegram import Update\nfrom telegram.ext import (\n Updater,\n CommandHandler,\n MessageHandler,\n Filters,\n CallbackContext,\n)\nimport ujson\n\nimport logging\n\n\"\"\"\nBotFather commands:\n\nstart - Starts the bot\nbuy - Sets Ackerman scheme to buy\nsell - Sets Ackerman scheme to sell\n\"\"\"\n\n\nclass AckermanBot:\n def __init__(self):\n \"\"\"Start the bot.\"\"\"\n self._values = {\"buy\": [65, 85, 95, 100], \"sell\": [135, 115, 105, 100]}\n # load token from file\n self._loadToken()\n # Create the Updater and pass it your bot's token.\n self._updater = Updater(self._token)\n\n # Get the dispatcher to register handlers\n self._dispatcher = self._updater.dispatcher\n\n # on different commands - answer in Telegram\n self._dispatcher.add_handler(CommandHandler(\"start\", self._startCommand))\n self._dispatcher.add_handler(CommandHandler(\"help\", self._helpCommand))\n self._dispatcher.add_handler(CommandHandler(\"buy\", self._buyCommand))\n self._dispatcher.add_handler(CommandHandler(\"sell\", self._sellCommand))\n # error handler function\n self._dispatcher.add_error_handler(self._errorHandler)\n\n # on non command i.e message - echo the message on Telegram\n self._dispatcher.add_handler(\n MessageHandler(Filters.text & ~Filters.command, self._replyCommand)\n )\n\n def start(self):\n logging.info(\"Bot started\")\n # Start the Bot\n self._updater.start_polling()\n # init the user to buy mode\n\n # Run the bot until you press Ctrl-C or the process receives SIGINT,\n # SIGTERM or SIGABRT. This should be used most of the time, since\n # start_polling() is non-blocking and will stop the bot gracefully.\n self._updater.idle()\n\n def _loadToken(self, path: str = \"secrets.json\") -> None:\n try:\n with open(path, \"r\") as f:\n self._token = ujson.load(f)[\"token\"]\n except (FileNotFoundError, ValueError):\n logging.error(\n f\"File {path} not found and has been created. Please set a token.\"\n )\n with open(path, \"w\") as f:\n ujson.dump({\"token\": \"YOUR-TOKEN-HERE\"}, f, indent=4)\n self._token = None\n\n def _formatMessage(self, val: float, mode: int = 0) -> str:\n # sadly, I had to partially un fuck this function\n # source values\n computed = [\n str(round(float(val * v / 100), 2)).replace(\".\", \"\\.\")\n for v in self._values[mode]\n ]\n return f\"_*Ackerman deal {mode} scheme*_: \\n\\n\" + \"\\n\".join(\n f\"_*{self._values[mode][x]}*_%: {computed[x]}\" for x in range(len(computed))\n )\n\n def _errorHandler(self, update: Update, context: CallbackContext) -> None:\n logging.error(f\"Update {update} caused error {context.error}\")\n\n def _setMode(self, context: CallbackContext, mode: str) -> str:\n \"\"\"Set Ackerman scheme mode and returns formatted message\"\"\"\n if mode not in self._values:\n raise ValueError\n\n context.user_data[\"mode\"] = mode\n return f\"*_Bot is now in {mode} mode_*\"\n\n def _startCommand(self, update: Update, context: CallbackContext) -> None:\n \"\"\"Send a message when the command /start is issued.\"\"\"\n update.message.reply_markdown_v2(\n \"Hi {user.mention_markdown_v2()}\\! Send me a number to create your _*Ackerman deal scheme*_\\!\",\n )\n # udpdate current version\n context.user_data[\"mode\"] = \"buy\"\n\n def _helpCommand(self, update: Update, context: CallbackContext) -> None:\n \"\"\"Send a message when the command /help is issued.\"\"\"\n update.message.reply_markdown_v2(\n \"Send me a number to create your _*Ackerman deal scheme*_!\"\n )\n\n def _replyCommand(self, update: Update, context: CallbackContext) -> None:\n \"\"\"Send the message\"\"\"\n mode = context.user_data.get(\"mode\", \"buy\")\n try:\n num = float(update.message.text)\n except ValueError:\n update.message.reply_text(\"I'd need a number, not a string\")\n else:\n # lorenzo took a bit of pride in this\n update.message.reply_markdown_v2(\n self._formatMessage(num, mode=mode),\n reply_to_message_id=update.message.message_id,\n )\n\n def _buyCommand(self, update: Update, context: CallbackContext) -> None:\n \"\"\"Set Ackerman scheme to buy\"\"\"\n message = self._setMode(context, \"buy\")\n update.message.reply_markdown_v2(message)\n\n def _sellCommand(self, update: Update, context: CallbackContext) -> None:\n \"\"\"Set Ackerman scheme to buy\"\"\"\n message = self._setMode(context, \"sell\")\n update.message.reply_markdown_v2(message)\n","repo_name":"valerionew/ackerman-deals-bot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":4819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"12508607870","text":"import numpy as np\nimport cv2\nimport testSIFT\nfrom matplotlib import pyplot as plt\nimport logging\nlogger = logging.getLogger(__name__)\n\nMIN_MATCH_COUNT = 10\n\nimg1 = cv2.imread('../Image/1.jpg', 0)\nimg2 = cv2.imread('../Image/2.jpg', 0)\n\n# Compute SIFT keypoints and descriptors\nkp1, des1 = testSIFT.computeKeypointsAndDescriptors(img1)\nkp2, des2 = testSIFT.computeKeypointsAndDescriptors(img2)\n\n# Initialize and use FLANN\nFLANN_INDEX_KDTREE = 0\nindex_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)\nsearch_params = dict(checks = 50)\n\nflann = cv2.FlannBasedMatcher(index_params, search_params)\n\"\"\"\n 返回的俩个DMatch数据结构类型,\n queryIdx,trainIdx,distance\n queryIdx:测试图像的特征点描述符的下标(第几个特征点描述符),同时也是描述符对应特征点的下标。\n trainIdx:样本图像的特征点描述符下标,同时也是描述符对应特征点的下标。\n distance:代表这一次匹配的特征点描述符的欧式距离,数值越小也就说明俩个特征点越相近。----相似性的度量\n 这俩个DMatch数据类型是俩个与原图像特征点最接近的俩个特征点(match返回的是最匹配的)\n 只有这俩个特征点的欧式距离小于一定值的时候才会认为匹配成功。\n\"\"\"\nmatches = flann.knnMatch(des1, des2, k=2)\n\n# Lowe's ratio test\ngood = []\nfor m, n in matches:\n if m.distance < 0.7 * n.distance:\n # 匹配成功放入good\n good.append(m)\n\nif len(good) > MIN_MATCH_COUNT:\n # Estimate homography between template and scene\n src_pts = np.float32([ kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)\n dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)\n\n M = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)[0]\n\n # Draw detected template in scene image\n h, w = img1.shape\n pts = np.float32([[0, 0],\n [0, h - 1],\n [w - 1, h - 1],\n [w - 1, 0]]).reshape(-1, 1, 2)\n dst = cv2.perspectiveTransform(pts, M)\n\n img2 = cv2.polylines(img2, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)\n\n # 组合两幅图\n h1, w1 = img1.shape\n h2, w2 = img2.shape\n\n nWidth = w1 + w2\n nHeight = max(h1, h2)\n hdif = int((h2 - h1) / 2)\n newimg = np.zeros((nHeight, nWidth, 3), np.uint8)\n\n for i in range(3):\n newimg[hdif:hdif + h1, :w1, i] = img1\n newimg[:h2, w1:w1 + w2, i] = img2\n\n # Draw SIFT keypoint matches\n for m in good:\n pt1 = (int(kp1[m.queryIdx].pt[0]), int(kp1[m.queryIdx].pt[1] + hdif))\n pt2 = (int(kp2[m.trainIdx].pt[0] + w1), int(kp2[m.trainIdx].pt[1]))\n cv2.line(newimg, pt1, pt2, (255, 0, 0))\n\n plt.imshow(newimg)\n plt.show()\nelse:\n print(\"Not enough matches are found - %d/%d\" % (len(good), MIN_MATCH_COUNT))\n\n","repo_name":"GreatBoyLi/PythonProject","sub_path":"SfM/sift/SIFTMain.py","file_name":"SIFTMain.py","file_ext":"py","file_size_in_byte":2860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"44338137778","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('merchant', '0003_auto_20151113_2245'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='managers',\n name='mobile',\n field=models.CharField(max_length=10, null=True, blank=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='merchant',\n name='phoneno',\n field=models.CharField(max_length=11, null=True, blank=True),\n preserve_default=True,\n ),\n ]\n","repo_name":"medstart/medstart","sub_path":"merchant/migrations/0004_auto_20151113_2249.py","file_name":"0004_auto_20151113_2249.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"19061690144","text":"'''\nExclusion operator\n'''\nimport itertools\nimport numpy as np\nfrom aux.aux import *\n\n\nparams = [\"REXCL\"]\nscope = [\"GDV\"]\n\ndef cp(parameters):\n if parameters[\"COMP_EXCLUSION\"] == 1:\n if parameters[\"COMP_MULTIPOPULATION\"] == 1:\n if 0 < parameters[\"COMP_EXCLUSION_REXCL\"] < parameters[\"MAX_POS\"]:\n return 1\n else:\n errorWarning(\"3.2.1\", \"algoConfig.ini\", \"COMP_EXCLUSION_REXCL\", \"The Exclusion radio should be 0 between ]0, POS_MAX[\")\n sys.exit()\n elif (parameters[\"COMP_MULTIPOPULATION_N\"] < 1):\n errorWarning(\"3.2.2\", \"algoConfig.ini\", \"COMP_MULTIPOPULATION_N\", \"The Exclusion component require the multipopulation N should be greater than 1\")\n sys.exit()\n elif(parameters[\"COMP_EXCLUSION\"] != 0):\n errorWarning(\"3.2.2\", \"algoConfig.ini\", \"COMP_EXCLUSION\", \"The Exclusion component should be 0 or 1\")\n sys.exit()\n else:\n return 0\n\ndef component(pop, parameters, randomInit):\n rexcl = parameters[\"COMP_EXCLUSION_REXCL\"]\n for sp1, sp2 in itertools.combinations(range(len(pop)), 2):\n # Subpop must have a best and not already be set to reinitialize\n if pop[sp1].best and pop[sp2].best and not (randomInit[sp1] or randomInit[sp2]):\n dist = 0\n for x1, x2 in zip(pop[sp1].best[\"pos\"], pop[sp2].best[\"pos\"]):\n dist += (x1 - x2)**2\n dist = np.sqrt(dist)\n if dist <= rexcl:\n if pop[sp1].best[\"fit\"] <= pop[sp2].best[\"fit\"]:\n randomInit[sp1] = 1\n else:\n randomInit[sp2] = 1\n #else:\n #print(f\"{pop[sp1].id} {pop[sp1].best['pos']} -- {pop[sp2].id} {pop[sp2].best['pos']}\")\n\n return randomInit\n","repo_name":"AbEC-EC/AbEC","sub_path":"abec/components/exclusion.py","file_name":"exclusion.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"39121283761","text":"from django import template\nfrom django.conf import settings\n\nfrom demo.models import *\n\nregister = template.Library()\n\n\n# settings value\n@register.assignment_tag\ndef get_googe_maps_key():\n return getattr(settings, 'GOOGLE_MAPS_KEY', \"\")\n\n\n@register.assignment_tag(takes_context=True)\ndef get_site_root(context):\n # NB this returns a core.Page, not the implementation-specific model used\n # so object-comparison to self will return false as objects would differ\n return context['request'].site.root_page\n\n\ndef has_menu_children(page):\n if page.get_children().filter(live=True, show_in_menus=True):\n return True\n else:\n return False\n\n\n# Retrieves the top menu items - the immediate children of the parent page\n# The has_menu_children method is necessary because the bootstrap menu requires\n# a dropdown class to be applied to a parent\n@register.inclusion_tag('demo/tags/top_menu.html', takes_context=True)\ndef top_menu(context, parent, calling_page=None):\n menuitems = parent.get_children().filter(\n live=True,\n show_in_menus=True\n )\n for menuitem in menuitems:\n menuitem.show_dropdown = has_menu_children(menuitem)\n return {\n 'calling_page': calling_page,\n 'menuitems': menuitems,\n # required by the pageurl tag that we want to use within this template\n 'request': context['request'],\n }\n\n\n# Retrieves the children of the top menu items for the drop downs\n@register.inclusion_tag('demo/tags/top_menu_children.html', takes_context=True)\ndef top_menu_children(context, parent):\n menuitems_children = parent.get_children()\n menuitems_children = menuitems_children.filter(\n live=True,\n show_in_menus=True\n )\n return {\n 'parent': parent,\n 'menuitems_children': menuitems_children,\n # required by the pageurl tag that we want to use within this template\n 'request': context['request'],\n }\n\n\n# Retrieves the secondary links for the 'also in this section' links\n# - either the children or siblings of the current page\n@register.inclusion_tag('demo/tags/secondary_menu.html', takes_context=True)\ndef secondary_menu(context, calling_page=None):\n pages = []\n if calling_page:\n pages = calling_page.get_children().filter(\n live=True,\n show_in_menus=True\n )\n\n # If no children, get siblings instead\n if len(pages) == 0:\n pages = calling_page.get_other_siblings().filter(\n live=True,\n show_in_menus=True\n )\n return {\n 'pages': pages,\n # required by the pageurl tag that we want to use within this template\n 'request': context['request'],\n }\n\n\n# Retrieves all live pages which are children of the calling page\n#for standard index listing\n@register.inclusion_tag(\n 'demo/tags/standard_index_listing.html',\n takes_context=True\n)\ndef standard_index_listing(context, calling_page):\n pages = calling_page.get_children().filter(live=True)\n return {\n 'pages': pages,\n # required by the pageurl tag that we want to use within this template\n 'request': context['request'],\n }\n\n\n# Person feed for home page\n@register.inclusion_tag(\n 'demo/tags/person_listing_homepage.html',\n takes_context=True\n)\ndef person_listing_homepage(context, count=2):\n people = PersonPage.objects.filter(live=True).order_by('?')\n return {\n 'people': people[:count],\n # required by the pageurl tag that we want to use within this template\n 'request': context['request'],\n }\n\n\n# Blog feed for home page\n@register.inclusion_tag(\n 'demo/tags/blog_listing_homepage.html',\n takes_context=True\n)\ndef blog_listing_homepage(context, count=2):\n blogs = BlogPage.objects.filter(live=True).order_by('-date')\n return {\n 'blogs': blogs[:count],\n # required by the pageurl tag that we want to use within this template\n 'request': context['request'],\n }\n\n\n# Events feed for home page\n@register.inclusion_tag(\n 'demo/tags/event_listing_homepage.html',\n takes_context=True\n)\ndef event_listing_homepage(context, count=2):\n events = EventPage.objects.filter(live=True)\n events = events.filter(date_from__gte=date.today()).order_by('date_from')\n return {\n 'events': events[:count],\n # required by the pageurl tag that we want to use within this template\n 'request': context['request'],\n }\n\n\n# Advert snippets\n@register.inclusion_tag('demo/tags/adverts.html', takes_context=True)\ndef adverts(context):\n return {\n 'adverts': Advert.objects.all(),\n 'request': context['request'],\n }\n\n\n# Format times e.g. on event page\n@register.filter\ndef time_display(time):\n # Get hour and minute from time object\n hour = time.hour\n minute = time.minute\n\n # Convert to 12 hour format\n if hour >= 12:\n pm = True\n hour -= 12\n else:\n pm = False\n if hour == 0:\n hour = 12\n\n # Hour string\n hour_string = str(hour)\n\n # Minute string\n if minute != 0:\n minute_string = \".\" + str(minute)\n else:\n minute_string = \"\"\n\n # PM string\n if pm:\n pm_string = \"pm\"\n else:\n pm_string = \"am\"\n\n # Join and return\n return \"\".join([hour_string, minute_string, pm_string])\n","repo_name":"kaedroho/wagtail-salesforce-demo","sub_path":"demo/templatetags/demo_tags.py","file_name":"demo_tags.py","file_ext":"py","file_size_in_byte":5323,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"18"} +{"seq_id":"73467530646","text":"#List Comprehension yapısı kullanarak aşağıda verilen değişken isimlerinden FARKLI olan\n#değişkenlerin isimlerini seçiniz ve yeni bir dataframe oluşturunuz.\nimport pandas as pd\nimport seaborn as sns\ndf = sns.load_dataset(\"car_crashes\")\ndf = df.drop('abbrev', axis=1)\ndf = df.drop('no_previous', axis=1)\n\ncolumns = [\"total\", \"speeding\", \"alcohol\", \"not_distracted\", \"ins_premium\", \"ins_losses\"]\ndf2 = pd.DataFrame(df.head(5).values, range(5), columns = columns)\nprint(df2)\nprint(df.head(5))\n","repo_name":"fyildirgan/practice","sub_path":"task7.py","file_name":"task7.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"22262254966","text":"import numpy as np\nimport os.path\nimport cv2\nimport os\nfrom os import walk\nimport pandas as pd\n\nfrom skimage.draw import ellipse\nfrom skimage.measure import label, regionprops, regionprops_table\nfrom skimage.transform import rotate\nfrom skimage import data, filters, measure, morphology\n\n\nimport matplotlib.pyplot as plt\nimport plotly.express as px\nimport plotly.graph_objects as go\n\nfrom fractions import Fraction\nfrom matplotlib.ticker import NullFormatter\n\nimport time\n\nfrom PIL import Image, ImageEnhance\n\n\ndef binary_thresholding(vox,n,m): ##Create binary mask and labels - only labels are output\n\n\n \n ##Gaussian Blur to reduce noise\n vox = cv2.GaussianBlur(vox,(7,7),0)\n \n ##Filter salt and pepper noise\n count = 0\n lastMedian = vox\n median = cv2.medianBlur(vox, 3)\n while not np.array_equal(lastMedian, median):\n # get those pixels that gets zeroed out\n zeroed = np.invert(np.logical_and(median, vox))\n vox[zeroed] = 0\n\n count = count + 1\n if count > 50:\n break\n lastMedian = median\n median = cv2.medianBlur(vox, 3)\n\n\n\n vox = cv2.normalize(vox, None, alpha=.5, beta=1.5, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n vox = np.clip(vox, 0, 1)\n vox = (255*vox).astype(np.uint8)\n \n ##Determine Threshold and Erosion/Dilation polygon\n \n threshold = filters.threshold_otsu(vox) #Automatic\n #vox_threshold = cv2.adaptiveThreshold(vox,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\n #cv2.THRESH_BINARY,111,2)\n \n\n ##Threshold the Image and Erode/Dilate\n th, vox_threshold = cv2.threshold(vox, threshold, 255, cv2.THRESH_BINARY)\n kernel= np.ones((5,5), np.uint8)\n kernel_circle = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3),(-1,-1))\n vox_threshold = cv2.dilate(vox_threshold, kernel_circle, iterations=7)\n vox_threshold = cv2.erode(vox_threshold, kernel_circle, iterations=7)\n\n\n\n\n \n\n ##Find Contours\n contour,hier = cv2.findContours(vox_threshold,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) \n \n \n \n ##Fill Contours\n for contourIdx, cnt in enumerate(contour): \n cv2.drawContours(vox_threshold,[cnt],0,255,-1)\n \n \n \n labels = measure.label(vox_threshold)\n\n \n cv2.imwrite('C:/Users/Andrew/Desktop/Subimages Verify/Threshold/thresh'+str(n)+'_'+str(m)+'.png',vox_threshold)\n \n return labels\n\n\n\ndef gain_regionprops(regions, vox): ##gain basic data values from each label\n \n ##Create Empty Arrays\n pix_avg=[]\n pix_min=[]\n pix_max=[]\n\n ##Extract Basic Features\n props = regionprops_table(regions, properties=('centroid',\n 'area',\n 'perimeter',\n 'major_axis_length',\n 'minor_axis_length',\n 'eccentricity'))\n ##Convert to DataFrame\n table = pd.DataFrame(props)\n\n \n\n ##Find Pixel Values in Each Region\n for i in range(1, np.amax(regions)+1):\n locs = np.where(regions == i)\n\n pixels = vox[locs]\n \n pix_avg.append(np.average(pixels))\n pix_min.append(min(pixels))\n pix_max.append(max(pixels))\n\n ##Calculate Pixel Data\n table.loc[:,'pix_avg'] = pix_avg\n table.loc[:,'pix_max'] = pix_max\n table.loc[:,'pix_min'] = pix_min\n\n ##For now, filter out small areas with hardcoded value\n table = table[table['area']>50]\n m_area = int(table['area'].mean())\n table = table[table['area']>(m_area / 16)]\n\n \n\n return table\n\n\n\ndef data_calculation(table, feature_list): ##Advanced data, based on list input\n cols = []\n for feature in feature_list: ##Determine if the GvG DataFrame will contain data\n if 'avg' in feature:\n cols.append(feature)\n if not cols:\n cols = ['none'] \n \n\n \n gvg = pd.DataFrame(columns = cols, index=range(1)) ##Create GvG DataFrame\n for feature in feature_list: ##Calculate all necessary features\n \n if feature == 'aspect_ratio':\n aspect_ratio = table['minor_axis_length']/table['major_axis_length']\n table.loc[:,'aspect_ratio'] = aspect_ratio\n \n elif feature == 'perimeter_area_ratio':\n p_over_a = table['perimeter']/table['area']\n table.loc[:,'perimeter_area_ratio'] = p_over_a\n\n \n elif feature == 'shading':\n shading = table['pix_max'] - table['pix_min']\n table.loc[:,'shading'] = shading\n \n elif feature == 'avg_area':\n avg_area = table['area'].mean()\n gvg.loc[:,'avg_area'] = avg_area\n\n elif feature == 'avg_perimeter':\n avg_perimeter = table['perimeter'].mean()\n gvg.loc[:,'avg_perimeter'] = avg_perimeter\n \n elif feature == 'avg_major_axis_length':\n avg_major_axis_length = table['major_axis_length'].mean()\n gvg.loc[:,'avg_major_axis_length'] = avg_major_axis_length\n\n elif feature == 'avg_minor_axis_length':\n avg_minor_axis_length = table['minor_axis_length'].mean()\n gvg.loc[:,'avg_minor_axis_length'] = avg_minor_axis_length\n\n elif feature == 'avg_eccentricity':\n avg_eccentricity = table['eccentricity'].mean()\n gvg.loc[:,'avg_eccentricity'] = avg_eccentricity\n\n elif feature == 'avg_aspect_ratio':\n if 'aspect_ratio' in table.columns:\n avg_aspect_ratio = table['aspect_ratio'].mean() \n else:\n aspect_ratio = table['minor_axis_length']/table['major_axis_length']\n avg_aspect_ratio = aspect_ratio.mean()\n \n gvg.loc[:,'avg_aspect_ratio'] = avg_aspect_ratio\n\n elif feature == 'avg_perimeter_area_ratio':\n if 'perimeter_area_ratio' in table.columns:\n avg_perimeter_area_ratio = table['perimeter_area_ratio'].mean()\n else:\n p_over_a = table['perimeter']/table['area']\n avg_perimeter_area_ratio = p_over_a.mean()\n \n gvg.loc[:,'avg_perimeter_area_ratio'] = avg_perimeter_area_ratio\n\n elif feature == 'avg_shading':\n avg_shading = table['shading'].mean()\n gvg.loc[:,'avg_shading'] = avg_shading\n\n elif feature == 'avg_pix_avg':\n avg_pix_avg = table['pix_avg'].mean()\n gvg.loc[:,'avg_pix_avg'] = avg_pix_avg\n\n elif feature == 'avg_pix_min':\n avg_pix_min = table['pix_min'].mean()\n gvg.loc[:,'avg_pix_min'] = avg_pix_min\n\n elif feature == 'avg_pix_max':\n avg_pix_max = table['pix_max'].mean()\n gvg.loc[:,'avg_pix_max'] = avg_pix_max\n\n if not 'centroid' in feature_list:\n del table['centroid-1']\n del table['centroid-0']\n \n if not 'area' in feature_list:\n del table['area']\n\n if not 'perimeter' in feature_list:\n del table['perimeter']\n \n if not 'eccentricity' in feature_list:\n del table['eccentricity']\n\n if not 'major_axis_length' in feature_list:\n del table['major_axis_length']\n \n if not 'minor_axis_length' in feature_list:\n del table['minor_axis_length'] \n\n\n\n \n return table, gvg \n\n \n \ndef data_organization(vox_vals, GvG_vals, path, n, m): ##Serialize and save data for other subsystems\n \n ##If GvG_vals is empty, do no save it. If it is not,\n ##save it in the save input directory as vox_vals.\n if 'none' in GvG_vals:\n vox_vals.to_pickle(path + '/voxel_values'+str(n)+'_'+str(m))\n else:\n vox_vals.to_pickle(path + '/voxel_values'+str(n)+'_'+str(m))\n GvG_vals.to_pickle(path + '/gvg_values'+str(n)+'_'+str(m))\n \n return\n \ndef data_retrieve(n, m, featurepath, feature_string):\n\n gainfile_v = featurepath+'/voxel_values'+str(n)+'_'+str(m)\n gainfile_g = featurepath+'/gvg_values'+str(n)+'_'+str(m)\n \n\n if 'avg_' in feature_string:\n if not os.path.exists(gainfile_g):\n raise Exception('Required Grid vs. Grid data does not exist')\n quit()\n else:\n table = pd.read_pickle(gainfile_g)\n data = table._get_value(0,feature_string)\n else:\n if not os.path.exists(gainfile_v):\n raise Exception('Required Voxel Region data does not exist')\n quit()\n else:\n table = pd.read_pickle(gainfile_v)\n data = table[feature_string]\n \n \n return data \n\n \n\ndef save_data(n, m, data, path):\n \n data.to_csv(path+'/Save'+str(n)+'_'+str(m)+'.csv')\n\n return\n\ndef add_value_labels(ax, spacing):\n \"\"\"Add labels to the end of each bar in a bar chart.\n\n Arguments:\n ax (matplotlib.axes.Axes): The matplotlib object containing the axes\n of the plot to annotate.\n spacing (int): The distance between the labels and the bars.\n \"\"\"\n\n # For each bar: Place a label\n for rect in ax.patches:\n # Get X and Y placement of label from rect.\n y_value = rect.get_height()\n x_value = rect.get_x() + rect.get_width() / 2\n\n # Number of points between bar and label. Change to your liking.\n space = spacing\n # Vertical alignment for positive values\n va = 'bottom'\n\n # If value of bar is negative: Place label below bar\n if y_value < 0:\n # Invert space to place label below\n space *= -1\n # Vertically align label at top\n va = 'top'\n\n # Use Y value as label and format number with one decimal place\n label = \"{:.1f}\".format(y_value)\n\n # Create annotation\n ax.annotate(\n label, # Use `label` as label\n (x_value, y_value), # Place label at end of the bar\n xytext=(0, space), # Vertically shift label by `space`\n textcoords=\"offset points\", # Interpret `xytext` as offset in points\n ha='center', # Horizontally center label\n va=va) # Vertically align label differently for\n # positive and negative values.\n\n\n\n","repo_name":"madbrown1/Fractal-Eyes-v2","sub_path":"Feature Extraction For Processing/feFunc.py","file_name":"feFunc.py","file_ext":"py","file_size_in_byte":10499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"26560191705","text":"\"\"\"\nUnit and regression test for the occupancy_fingerprinter package.\n\"\"\"\n\n# Import package, test suite, and other packages as needed\nimport sys\n\nimport pytest\n\nimport occupancy_fingerprinter\nfrom occupancy_fingerprinter import BindingSite\nfrom occupancy_fingerprinter import Grid\n\nimport numpy as np\nimport mdtraj as md\nimport h5py as h5\nimport os\nfrom pathlib import Path\n\ncwd = Path.cwd()\nmod_path = Path(__file__).parent\n\n\ndef test_occupancy_fingerprinter_imported():\n \"\"\"Sample test, will always pass so long as import statement worked.\"\"\"\n assert \"occupancy_fingerprinter\" in sys.modules\n\ndef test_binding_site_init():\n \"\"\"Test binding site init\"\"\"\n center = np.array([10.,10.,10.])\n r = 5.\n spacing = np.array([1., 1., 1.])\n b = BindingSite(center, r, spacing)\n grid_x, grid_y, grid_z = b._cal_grid_coordinates()\n assert (b._center == center).all()\n assert b._r == r \n assert (b._spacing == spacing).all()\n assert (b._counts == b.get_grid_counts()).all()\n assert (b._origin == b.get_origin()).all()\n assert (b._grid_x == grid_x).all()\n assert (b._grid_y == grid_y).all()\n assert (b._grid_z == grid_z).all()\n assert (b._upper_most_corner_crd == (b._center + ((b._counts - 1) * b._spacing)/2)).all()\n assert (b._upper_most_corner == (b._counts - 1)).all()\n assert (b._size == np.prod(b._counts))\n\ndef test_grid_init():\n traj_path = (mod_path / \"../data/CLONE0.xtc\").resolve()\n top_path = (mod_path / \"../data/prot_masses.pdb\").resolve()\n t = md.load(traj_path, top=top_path)\n t = t[:1]\n center = np.array([58., 73., 27.])\n r = 3.\n spacing = np.array([1., 1., 1.])\n g = Grid(t)\n assert g._n_sites == 0\n assert g._sites == {}\n g.add_binding_site(center, r, spacing)\n b = BindingSite(center,r,spacing)\n assert g._n_sites == 1\n assert (g._sites[0]._center == b._center).all()\n assert g._sites[0]._r == b._r\n assert (g._sites[0]._spacing == b._spacing).all()\n h5_path = (mod_path / \"../data/test.h5\").resolve()\n a = g.cal_fingerprint(h5_path, n_tasks=1, return_array=True)\n assert (a.sum() == 113)\n #check h5 file integrity\n with h5.File(h5_path, \"r\") as f:\n k = list(f.keys())\n assert k[0] == \"frames\"\n assert (f[k[0]] == a[0]).all()\n #test process_trajectory function\n p = occupancy_fingerprinter.process_trajectory(t,g._sites,g._atom_radii)\n assert (a == p).all()\n #test writing dx files\n dx_path = (mod_path / \"../data/binding_site_test.dx\").resolve()\n c = a[0].reshape(tuple(b._counts))\n b.write(dx_path, c)\n assert os.path.exists(dx_path)\n\ndef test_cal_fingerprint():\n traj_path = (mod_path / \"../data/CLONE0.xtc\").resolve()\n top_path = (mod_path / \"../data/prot_masses.pdb\").resolve()\n t = md.load(traj_path, top=top_path)\n t = t[:1]\n center = np.array([58., 73., 27.])\n r = 3.\n spacing = np.array([1., 1., 1.])\n g = Grid(t)\n g.add_binding_site(center, r, spacing)\n g.cal_fingerprint(None, n_tasks=0)\n g.cal_fingerprint(None, n_tasks=999)\n\n\n\n\n\n\n","repo_name":"jimtufts/occupancy_fingerprinter","sub_path":"occupancy_fingerprinter/tests/test_occupancy_fingerprinter.py","file_name":"test_occupancy_fingerprinter.py","file_ext":"py","file_size_in_byte":3050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"4045725687","text":"# Partition Equal Subset Sum\n# Time: O(sum * n) - Pseudopolynomial\n# Space: O(sum * n). Can reduce to O(sum) since you only need 2 columns at a time of DP table\n# Topics: Array, Dynamic Programming\n# Difficulty: Medium\n# Notes: similar to knapsack problem. Build DP table bottom up\n\nfrom typing import List\n\nclass Solution:\n def canPartition(self, nums: List[int]) -> bool:\n if len(nums) <= 1:\n return False\n \n if sum(nums) % 2 == 1:\n return False\n \n target = sum(nums) // 2\n \n dp_table = [[False] * (len(nums) + 1) for _ in range(target + 1)]\n # initialize base cases of dp table\n for j in range(len(nums) + 1):\n dp_table[0][j] = True\n \n # build up dp table bottom up\n for j in range(1, len(nums) + 1):\n curr_int = nums[j - 1]\n \n for i in range(target, -1, -1):\n if dp_table[i][j] is False: # copy from previous column (but don't overwrite if already True)\n dp_table[i][j] = dp_table[i][j - 1]\n \n if dp_table[i][j] is True and i + curr_int <= target:\n dp_table[i + curr_int][j] = True\n \n return dp_table[-1][-1]\n","repo_name":"Gkao03/LeetcodeDoc","sub_path":"dynamic_programming/416_PartitionEqualSubsetSum.py","file_name":"416_PartitionEqualSubsetSum.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"35961402934","text":"import sys\ninput = sys.stdin.readline\n\ndata = [0] * 10\n\nn = 1 \nfor i in range(3):\n a = int(input())\n n *= a\n#print(n)\nwhile n > 0:\n index = int(n % 10)\n #print('print index',index)\n data[index] +=1\n n //= 10\n\nfor i in range(10):\n print(data[i])","repo_name":"dadafly1244/algorithmstudy","sub_path":"step/4/2577.py","file_name":"2577.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"7070845809","text":"import requests\nfrom flask import render_template, redirect, request, session, url_for, jsonify\nfrom sqlalchemy import *\nfrom passlib.apps import custom_app_context as pwd_context\n\n# local modules\nfrom helpers import *\nfrom config import *\nfrom sql_tables import *\n\n\n@app.route(\"/getFriendList\", methods=[\"GET\", \"POST\"])\ndef getFriendList():\n \"\"\" Get a JSON object with the names and usernames of friends \"\"\"\n connection = engine.connect()\n\n friendList = []\n flTable = getFlTable(db, session[\"user_id\"])\n\n s = select([flTable])\n result = connection.execute(s)\n\n for row in result:\n friendDict = {}\n friendDict[\"firstName\"] = getUserFirstName(row[1], db)\n friendDict[\"lastName\"] = getUserLastName(row[1], db)\n friendDict[\"userName\"] = getUser(row[1], db)\n friendDict[\"userID\"] = row[1]\n friendList.append(friendDict)\n\n connection.close()\n\n return jsonify(friendList = friendList)\n\n# ************************************************************* #\n\n@app.route(\"/getFriendRequests\", methods=[\"GET\", \"POST\"])\ndef getFriendRequests():\n \"\"\" Return a JSON object with pending friend requests the current \n user has received \"\"\"\n\n # shorthand variable\n userID = session[\"user_id\"]\n\n # get table prototypes\n frTable = getFrTable(db, userID)\n usrTable = getUsrTable(db)\n\n # connect to database\n connection = engine.connect()\n\n # get user ids and store in array\n idList = []\n s = select([frTable.c.other_user_id]).where(frTable.c.received == 1)\n result = connection.execute(s)\n for row in result:\n idList.append(row[frTable.c.other_user_id])\n\n # get usernames and store in array\n usrNameList = []\n for item in idList:\n s = select([usrTable.c.user]).where(usrTable.c.sql_id == item)\n result = connection.execute(s)\n result = result.fetchone()\n usrNameList.append(\"@\" + result.user)\n\n connection.close()\n\n return jsonify(idList=idList, usrNameList=usrNameList)\n\n# ************************************************************* #\n\n@app.route(\"/getGenres\", methods=[\"GET\", \"POST\"])\ndef getGenres():\n \"\"\" Returns a JSON object with TMDB genres and corresponding key\n values \"\"\"\n\n query = (\"https://api.themoviedb.org/3/genre/movie/list?api_key=\"\n + API_KEY\n + \"&language=en-US\")\n\n r = requests.get(query)\n r = r.json()\n\n return jsonify(r[\"genres\"])\n\n# ************************************************************* #\n\n@app.route(\"/getMnList\", methods=[\"GET\", \"POST\"])\ndef getMnList():\n \"\"\" Get a user's list of movie night IDs and timestamps \"\"\"\n\n # get table prototypes\n mnTable = getMnTable(db)\n userMnTable = getUserMnTable(db, session[\"user_id\"])\n\n # connect to database\n connection = engine.connect()\n\n # get user's movie night ids and add results to array\n s = select([userMnTable.c.mn_id])\n result = connection.execute(s)\n if not result:\n connection.close()\n return jsonify(\n success=False,\n returnMessage=(\"Error when retreiving user's \"\n + \"list of movie nights\")\n )\n userMnIdList = []\n for row in result:\n userMnIdList.append(row[userMnTable.c.mn_id])\n\n # get user's movie night timestamps and add results to array\n userMnDtList = []\n for mnID in userMnIdList:\n s = (select([mnTable.c.date_created])\n .where(mnTable.c.sql_id == mnID))\n result = connection.execute(s)\n if not result:\n connection.close()\n return jsonify(\n success=False,\n returnMessage=(\"Error when retreiving user's \"\n + \"list of movie nights\")\n )\n result = result.fetchone()\n userMnDtList.append(result.date_created)\n\n # close database connection\n connection.close()\n\n # return JSON object with lists\n return jsonify(mnIdList=userMnIdList, mnDateTimeList=userMnDtList)\n\n# ************************************************************* #\n\n@app.route(\"/getUserMnPosition\", methods=[\"GET\", \"POST\"])\ndef getUserMnPosition():\n \"\"\" Get a user's position in a given movie night \"\"\"\n\n # ensure that arguments were received\n if not request.args.get(\"mnID\"):\n raise RuntimeError(\"missing script argument 'mnID'\")\n\n # shorthand variable\n mnID = request.args.get(\"mnID\")\n\n # get table prototype\n mnUsersTable = getMnUsersTable(db, mnID)\n\n # connect to database\n connection = engine.connect()\n\n # get user's position info\n s = (\n select([mnUsersTable])\n .where(mnUsersTable.c.user_id == session[\"user_id\"])\n )\n result = connection.execute(s)\n result = result.fetchone()\n\n # return JSON object\n return jsonify(page=result.user_page, depth=result.user_position)\n\n# ************************************************************* #\n\n@app.route(\"/getMatches\", methods=[\"GET\", \"POST\"])\ndef getMatches():\n \"\"\" Get the matches for a given movie night \"\"\"\n\n # ensure that arguments were received\n if not request.args.get(\"mnID\"):\n raise RuntimeError(\"missing script argument 'mnID'\")\n\n # shorthand variables\n mnID = request.args.get(\"mnID\")\n userID = session[\"user_id\"]\n\n # get table prototypes\n mnResultsTable = getMnResultsTable(db, mnID)\n mnUsersTable = getMnUsersTable(db, mnID)\n\n # connect to database\n connection = engine.connect()\n\n # ensure that logged in user is part of movie night\n s = select([mnUsersTable]).where(mnUsersTable.c.user_id == userID)\n result = connection.execute(s)\n result = result.fetchone()\n if not result:\n return jsonify(success=False,\n returnMessage=(\"Logged in user not in \"\n + \"requested movie night\")\n )\n\n # query mnResultsTable for matches\n s = (select([mnResultsTable.c.movie_id])\n .where(mnResultsTable.c.match_status > 0))\n result = connection.execute(s)\n\n # create empty list to use as a return JSON\n matchInfoList = []\n\n # for each result\n for row in result:\n # query TMDB for movie data\n movieInfo = getMovieInfo(API_KEY, row[mnResultsTable.c.movie_id]) \n # append result to matchInfoList\n matchInfoList.append(movieInfo)\n\n\n # return JSON object\n returnMessage = \"Successfully retrieved matches for current movie night.\"\n return jsonify(success=True,\n returnMessage=returnMessage,\n matches=matchInfoList)\n","repo_name":"Jacob-Hunt/FlickFinder","sub_path":"JSON_request_scripts.py","file_name":"JSON_request_scripts.py","file_ext":"py","file_size_in_byte":6675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"43005816224","text":"# -*- coding: utf-8 -*-\n\"\"\"Shared functionality for JSON based output modules.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport abc\nimport json\n\nfrom plaso.lib import errors\nfrom plaso.lib import py2to3\nfrom plaso.output import interface\nfrom plaso.serializer import json_serializer\n\n\nclass SharedJSONOutputModule(interface.LinearOutputModule):\n \"\"\"Shared functionality for a JSON output module.\"\"\"\n\n _JSON_SERIALIZER = json_serializer.JSONAttributeContainerSerializer\n\n def _WriteSerialized(self, event, event_data, event_tag):\n \"\"\"Writes an event, event data and event tag to serialized form.\n\n Args:\n event (EventObject): event.\n event_data (EventData): event data.\n event_tag (EventTag): event tag.\n\n Returns:\n str: A JSON string containing the serialized form.\n \"\"\"\n json_dict = self._WriteSerializedDict(event, event_data, event_tag)\n\n json_string = json.dumps(json_dict, sort_keys=True)\n # json.dumps() returns an ascii-encoded byte string in Python 2.\n if py2to3.PY_2:\n json_string = json_string.decode('ascii')\n\n return json_string\n\n def _WriteSerializedDict(self, event, event_data, event_tag):\n \"\"\"Writes an event, event data and event tag to serialized form.\n\n Args:\n event (EventObject): event.\n event_data (EventData): event data.\n event_tag (EventTag): event tag.\n\n Returns:\n dict[str, object]: JSON serialized objects.\n \"\"\"\n event_data_json_dict = self._JSON_SERIALIZER.WriteSerializedDict(event_data)\n del event_data_json_dict['__container_type__']\n del event_data_json_dict['__type__']\n\n inode = event_data_json_dict.get('inode', None)\n if inode is None:\n event_data_json_dict['inode'] = 0\n\n try:\n message, _ = self._output_mediator.GetFormattedMessages(event_data)\n event_data_json_dict['message'] = message\n except errors.WrongFormatter:\n pass\n\n event_json_dict = self._JSON_SERIALIZER.WriteSerializedDict(event)\n event_json_dict['__container_type__'] = 'event'\n\n event_json_dict.update(event_data_json_dict)\n\n if event_tag:\n event_tag_json_dict = self._JSON_SERIALIZER.WriteSerializedDict(event_tag)\n\n event_json_dict['tag'] = event_tag_json_dict\n\n return event_json_dict\n\n @abc.abstractmethod\n def WriteEventBody(self, event, event_data, event_tag):\n \"\"\"Writes event values to the output.\n\n Args:\n event (EventObject): event.\n event_data (EventData): event data.\n event_tag (EventTag): event tag.\n \"\"\"\n","repo_name":"rich-murphey/plaso","sub_path":"plaso/output/shared_json.py","file_name":"shared_json.py","file_ext":"py","file_size_in_byte":2507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"30"} +{"seq_id":"74944127445","text":"import sys\nfrom PyQt4.QtGui import *\nfrom matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport collections\n\n\nclass Grafica(QWidget):\n\n valores = (20, 35, 30, 35, 27) # valores de ejemplo\n\n\n def __init__(self, parent=None):\n super(Grafica, self).__init__()\n\n # FIGUREANDO\n self.ordenadas = np.arange(5)\n self.width = 1 # the width of the bars\n self.figure, self.ax = plt.subplots()\n #self.figure = plt.figure()\n self.line = self.ax.bar(self.ordenadas, self.valores, self.width, color='g')\n #self.line, = plt.plot(self.data)\n plt.ion() # animate\n\n N = 10\n self.xs = collections.deque(maxlen=N)\n self.ys = collections.deque(maxlen=N)\n self.xs.append(0)\n self.ys.append(0)\n\n self.ax = self.figure.add_subplot(111)\n self.ax.hold(False)\n self.ax.set_ylim([0, 360])\n\n self.canvas = FigureCanvas(self.figure)\n self.toolbar = NavigationToolbar(self.canvas, self)\n self.toolbar.hide()\n self.canvas.show()\n\n # set the layout\n self.layout = QVBoxLayout()\n self.layout.addWidget(self.toolbar)\n self.layout.addWidget(self.canvas)\n self.setLayout(self.layout)\n\n\n def add_sample(self, valores):\n self.valores = valores\n self.line = self.ax.bar(self.ordenadas, self.valores, self.width, color='g')\n self.canvas.draw() # update the plot\n","repo_name":"cabama/PatatitasRobot","sub_path":"teleoperacion/src/lib_windows/modulo_barras.py","file_name":"modulo_barras.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"13554953698","text":"from fruit.buffers.table import TLOLookupTable, LinearLookupTable\nfrom fruit.learners.base import Learner\nimport numpy as np\n\nfrom fruit.monitor.monitor import AgentMonitor\nfrom fruit.utils.annealer import Annealer\n\ntable = None\n\n\nclass MOQLearner(Learner):\n def __init__(self, agent, name, environment, network, global_dict, report_frequency,\n batch_size=5, discounted_factor=0.9, learning_rate=0.9, traces_factor=0.9,\n epsilon_annealing_start=0.9, epsilon_annealing_end=0,\n load_model_path=None, thresholds=None, target_reward=None, is_linear=False,\n using_e_greedy=True, async_update_steps=1,\n ):\n super().__init__(agent=agent, name=name, environment=environment, network=network, global_dict=global_dict,\n report_frequency=report_frequency)\n\n self.load_model_path = load_model_path\n self.target_reward = target_reward\n self.is_linear = is_linear\n self.discounted_factor = discounted_factor\n self.traces_factor = traces_factor\n self.using_e_greedy = using_e_greedy\n self.async_update_steps = async_update_steps\n\n self.num_of_objectives = environment.get_number_of_objectives()\n self.init_q_values = [0.] * self.num_of_objectives\n if thresholds is None:\n if not is_linear:\n self.thresholds = [0.] * (self.num_of_objectives - 1)\n else:\n self.thresholds = [1./self.num_of_objectives] * self.num_of_objectives\n else:\n self.thresholds = thresholds\n\n global table\n with global_dict[AgentMonitor.Q_LOCK]:\n if table is None:\n if not is_linear:\n table = TLOLookupTable(environment=environment, init_value=0., thresholds=self.thresholds)\n else:\n table = LinearLookupTable(environment=environment, init_value=0., thresholds=self.thresholds)\n\n self.table = table\n self.batch_size = batch_size\n self.epsilon_annealer = Annealer(epsilon_annealing_start, epsilon_annealing_end, self.agent.max_training_steps)\n self.current_learning_rate = learning_rate\n self.current_epsilon = epsilon_annealing_start\n self.converged = False\n if self.load_model_path is not None:\n self.load_model()\n\n @staticmethod\n def get_default_number_of_learners():\n return 1\n\n def load_model(self):\n self.table.load_value_function(self.load_model_path)\n print(\"Load values:\")\n self.table.print_values()\n\n def save_model(self, file_name):\n print(\"Save values:\")\n self.table.print_values()\n self.table.save_value_function(file_name)\n\n def get_action(self, state):\n if self.using_e_greedy:\n if np.random.uniform(0, 1) <= self.current_epsilon:\n e_greedy = np.random.randint(self.num_actions)\n return e_greedy\n else:\n return self.table.select_greedy_action(state)\n else:\n return self.table.select_greedy_action(state)\n\n def report(self, reward):\n print(self.name, 'Episode Count:', self.eps_count, 'Episode reward:', reward, 'Steps:',\n self.environment.get_current_steps(), 'Step count:', self.step_count, 'Learning rate:',\n self.current_learning_rate, 'Epsilon:', self.current_epsilon, 'Thresholds:', self.thresholds)\n\n # Testing purpose\n if self.target_reward is not None and self.thresholds is not None:\n backup_epsilon = self.current_epsilon\n self.current_epsilon = 0\n greedy_reward = self.run_episode()\n self.global_dict[AgentMonitor.Q_ADD_REWARD](greedy_reward, self.environment.get_current_steps())\n self.current_epsilon = backup_epsilon\n converged = True\n for i in range(len(greedy_reward)):\n if greedy_reward[i] != self.target_reward[i]:\n converged = False\n break\n if converged:\n print(\"Converged\")\n self.converged = True\n\n def update(self, state, action, reward, next_state, terminal):\n self.step_count += 1\n self.global_dict['counter'] += 1\n\n if not self.testing:\n if self.step_count % self.async_update_steps == 0:\n if not terminal:\n greedy = self.get_action(state)\n self.table.calculate_td_errors(action, state, greedy, next_state, self.discounted_factor, reward)\n else:\n self.table.calculate_terminal_td_errors(action, state, self.discounted_factor, reward)\n self.table.update_td_errors(action, state, 1.0, self.current_learning_rate)\n\n self.current_epsilon = self.epsilon_annealer.anneal(self.global_dict[AgentMonitor.Q_GLOBAL_STEPS])\n","repo_name":"garlicdevs/Fruit-API","sub_path":"fruit/learners/mo_q_learning.py","file_name":"mo_q_learning.py","file_ext":"py","file_size_in_byte":4941,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"30"} +{"seq_id":"41698882244","text":"import torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nclass CharacterEmbedding(nn.Module):\n \n def __init__(self, num_embeddings, embedding_dim=32, n_filters=200, \n kernel_size=5, padding=2):\n super(CharacterEmbedding, self).__init__()\n \n self.num_embeddings = num_embeddings\n self.embedding_dim = embedding_dim\n self.kernel_size = (1, kernel_size)\n self.padding = (0, padding)\n \n self.char_embedding = nn.Embedding(num_embeddings=num_embeddings,\n embedding_dim=embedding_dim)\n \n self.char_conv = nn.Conv2d(in_channels=embedding_dim, \n out_channels=n_filters,\n kernel_size=self.kernel_size,\n padding=self.padding)\n \n def forward(self, x):\n\n # embedding layer only supports 2D inputs for now. \n # reshape input tensor first, then pass it through the layer.\n batch_size = x.shape[0]\n word_length = x.shape[-1]\n \n x = x.view(batch_size, -1)\n x = self.char_embedding(x)\n x = x.view(batch_size, -1, word_length, self.embedding_dim)\n \n # embedding dim of characters is number of channels of conv layer\n x = x.permute(0, 3, 1, 2)\n x = F.relu(self.char_conv(x))\n x = x.permute(0, 2, 3, 1)\n \n # max pooling over word length to have final tensor\n x, _ = torch.max(x, dim=2)\n\n x = F.dropout(x, p=0.05, training=self.training)\n\n return x","repo_name":"marquezo/qanet-impl","sub_path":"qanet/character_embedding.py","file_name":"character_embedding.py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"30"} +{"seq_id":"8171069421","text":"\nfrom datetime import datetime\nimport pandas as pd\n\nimport praw\nfrom praw.models import MoreComments\n\nimport pymongo\nfrom pymongo import MongoClient\n\ndef reddit_scape():\n # initialize Reddit scraping API\n reddit = praw.Reddit(client_id='y_Npzkw006QBHn5PdyOOxw', client_secret='YvlY1LRvjm7G3MXz1QQ4k4aMvr3q3w', user_agent='scraper')\n\n # subreddits to scrape\n subreddits = ['TSLA', 'Coinbase', 'Cryptocurrency', 'Crypto', 'Trading', 'robinhood', 'Finance', 'Bloomberg', 'Stocks', 'Investing']\n \n posts = []\n \n # loop through subreddits\n for i in subreddits:\n ml_subreddit = reddit.subreddit(i)\n # extract subreddit content\n for post in ml_subreddit.hot(limit=1200):\n posts.append([post.title, post.score, post.id, post.subreddit, post.num_comments, post.selftext, post.created_utc])\n \n # create a dataframe of all contents\n posts = pd.DataFrame(posts,columns=['title', 'score', 'id', 'subreddit', 'num_comments', 'body', 'created'])\n\n # extract datetime column\n posts['created'] = posts['created'].apply(datetime.utcfromtimestamp).dt.date\n posts['created'] = pd.to_datetime(posts['created'])\n\n # sort by date create\n posts = posts.sort_values(by='created', ascending=False)\n\n # filter by date\n posts = posts[posts['created'] >= \"2022-05-01\"]\n \n # get comments on each subreddit topic\n data = []\n j = 0\n for i in posts['id'].values:\n submission = reddit.submission(id=str(i))\n for top_level_comment in submission.comments:\n if isinstance(top_level_comment, MoreComments):\n continue\n data.append(top_level_comment.body)\n j+=1\n print(j)\n\n # add title to all comments\n data = list(posts['title'].values) + data\n\n # convert to dictionary\n all_data = {}\n all_data['content'] = data\n data_ = all_data.to_dict(orient=\"records\")\n\n # uri (uniform resource identifier) defines the connection parameters \n uri = 'mongodb+srv://josepholaide:1234@cluster0.bqed12j.mongodb.net/?retryWrites=true&w=majority'\n # start client to connect to MongoDB server \n client = MongoClient(uri)\n\n try:\n # Show existing database names\n client.list_database_names()\n # Set database name to work with. If it doesn't exist, it will be created as soon as one document is added.\n db = client.finance\n # Set the collection to work with\n collection = db.news.reddit\n collection.insert_many(data_)\n except:\n print('Mongodb not connected')\n\nif __name__ == '__main__':\n reddit_scape()","repo_name":"josepholaide/KfaaS","sub_path":"reddits_scape/reddits_scape.py","file_name":"reddits_scape.py","file_ext":"py","file_size_in_byte":2577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"12762899418","text":"import numpy as np\nif __name__ == '__main__':\n\tm = int(input('Enter number of rows\\n'))\n\ta = np.zeros((m,m)) \n\tfor i in range(m):\n\t\tfor j in range(m):\n\t\t\ta[i][j] = float(input(f'Enter a{i+1}{j+1}\\n'))\n\ndef det(l):\n\tsumm = 0\n\tif len(l) == 1:\n\t\treturn l[0,0]\n\tif len(l) > 1:\n\t\tfor i in range(len(l)):\n\t\t\tb = np.delete(l, 0, 0)\n\t\t\tc = np.delete(b, i, 1)\n\t\t\ty = det(c)*(-1)**i*l[0,i]\n\t\t\tsumm += y\n\t\treturn summ\n\nif __name__ == '__main__':\n\tprint(det(a))","repo_name":"Arnav-17/Linear-Algebra","sub_path":"Determinant.py","file_name":"Determinant.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"32212672570","text":"def insertion_sort(arr):\n n = len(arr)\n for i in range (1, n):\n j = i\n while j > 0 and arr[j] < arr[j-1]:\n temp = arr[j-1]\n arr[j-1] = arr[j]\n arr[j] = temp\n j = j-1\n return arr\n\nprint(\"INSERTION SORT ALGORITHM\")\nis_stop = True\nnum_arr = []\nwhile is_stop:\n num_input = input(\"Write list of number. Input X to stop: \")\n if num_input == 'X' or num_input == 'x':\n is_stop == False\n break\n num_arr.append(int(num_input))\n\nsorted_arr = insertion_sort(num_arr)\nprint(\"Sorted array: {}\".format(sorted_arr))","repo_name":"hakimnazry24/Sorting-Algorithm-using-Python","sub_path":"insertion_sort.py","file_name":"insertion_sort.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"71989168405","text":"import json\nimport pytest\n\nfrom rest_framework import status\n\npytestmark = pytest.mark.django_db\n\n\ndef test_encerrar_acesso_usuario_unidade(\n jwt_authenticated_client_u,\n usuario_3,\n dre,\n):\n payload = {\n 'unidade_suporte_uuid': f'{dre.uuid}'\n }\n\n response = jwt_authenticated_client_u.post(\n f\"/api/usuarios/{usuario_3.username}/encerrar-acesso-suporte/\",\n data=json.dumps(payload),\n content_type='application/json'\n )\n\n assert response.status_code == status.HTTP_200_OK\n\n\ndef test_encerrar_acesso_usuario_unidade_sem_passar_codigo_eol(\n jwt_authenticated_client_u,\n usuario_3,\n):\n payload = {\n }\n\n response = jwt_authenticated_client_u.post(\n f\"/api/usuarios/{usuario_3.username}/encerrar-acesso-suporte/\",\n data=json.dumps(payload),\n content_type='application/json'\n )\n\n assert response.status_code == status.HTTP_400_BAD_REQUEST\n\n result = json.loads(response.content)\n assert result == \"Campo 'unidade_suporte_uuid' não encontrado no payload.\"\n\n","repo_name":"prefeiturasp/SME-PTRF-BackEnd","sub_path":"sme_ptrf_apps/users/tests/test_api_user/test_api_encerrar_acesso_suporte_unidade.py","file_name":"test_api_encerrar_acesso_suporte_unidade.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"30"} +{"seq_id":"37033262857","text":"from typing import List\n\n\nclass Solution:\n def rotate(self, nums: List[int], k: int) -> None:\n \"\"\"\n Do not return anything, modyfy nums in-place instead.\n \"\"\"\n k = k % len(nums)\n nums[:] = nums[-k:] + nums[:-k]\n return nums\n\n\nif __name__ == '__main__':\n for i in range(int(input())):\n arr = [int(x) for x in input().split()]\n k = int(input())\n\n obj = Solution()\n print(obj.rotate(arr, k))\n","repo_name":"shaonsust/Leetcode","sub_path":"189-rotate_array.py","file_name":"189-rotate_array.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"22139956812","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\n# 입력받기\r\nn, m, k = list(map(int, input().strip().split()))\r\nboard = [list(input().strip()) for _ in range(n)]\r\nword = input().strip()\r\n\r\ndxs = [-1,0,1,0]\r\ndys = [0,1,0,-1]\r\ndp = [[[-1] * len(word) for i in range(m)] for i in range(n)]\r\n\r\ndef in_range(x, y):\r\n return 0 <= x < n and 0 <= y < m\r\n\r\ndef can_go(x, y, cur):\r\n return in_range(x, y) and board[x][y] == word[cur]\r\n \r\ndef dfs(x, y, cur):\r\n global answer\r\n\r\n # 이미 방문한 곳인 경우\r\n if dp[x][y][cur] != -1:\r\n return dp[x][y][cur]\r\n \r\n # 단어를 완성한 경우\r\n if cur == len(word) - 1:\r\n return 1\r\n \r\n # 필요한 문자가 아닌 경우\r\n if board[x][y] != word[cur]:\r\n return 0\r\n\t\r\n # 상하좌우 k거리 떨어진 곳 전부 탐색\r\n cnt = 0\r\n for i in range(1, k + 1):\r\n for dx, dy in zip(dxs, dys):\r\n nx = x + i * dx\r\n ny = y + i * dy\r\n if can_go(nx, ny, cur + 1):\r\n cnt += dfs(nx, ny, cur + 1)\r\n dp[x][y][cur] = cnt\r\n\r\n return dp[x][y][cur]\r\n\r\nanswer = 0\r\nfor i in range(n):\r\n for j in range(m):\r\n if board[i][j] == word[0]:\r\n answer += dfs(i, j, 0)\r\n\r\nprint(answer)","repo_name":"only-juun/Algo_problem_solving","sub_path":"백준/Gold/2186. 문자판/문자판.py","file_name":"문자판.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"35026708628","text":"import json, collections\n\ndef ingest01(file='../_work/QC_template.json'):\n ee = json.load( open( file ) )\n ds = ee['datasets']\n cc = collections.defaultdict( lambda :collections.defaultdict(set) )\n oo = open( 'QC_template_flist_March2021.txt', 'w' )\n oo2 = open( 'c3s34g_pids_qcTest_March2021.txt', 'w' )\n for k,rec in ds.items():\n oo.write( '\\t'.join( ['>',k,rec['dset_id']] ) + '\\n' )\n era,mip,inst,source,expt,variant,table,var,grid, version = rec['dset_id'].split('.')\n cc[ (era,mip,inst,source,expt) ][variant].add( (var,table) )\n oo2.write( ','.join( [rec['dset_id'],k] ) + '\\n' )\n for kk,rr in rec['files'].items():\n oo.write( '\\t'.join( ['+',kk,rr['filename']] ) + '\\n' )\n ks = [x for x,v in cc.items() if len( v.keys() ) > 1]\n for k in sorted(ks):\n this = cc[k]\n ss = set()\n for ki,i in this.items():\n for s in i:\n ss.add(s)\n kks = sorted( list( this.keys() ), key=lambda x: len(this[x]) )\n if all( [x in ss for x in [('vas','Amon'),('uas','Amon')]] ):\n if not all( [x in this[kks[-1]] for x in [('vas','Amon'),('uas','Amon')]] ):\n msg = 'u/vas split'\n else:\n msg = ''\n print ('.'.join(k),'|'.join( ['%s (%s)' % (kk,len(vv)) for kk,vv in cc[k].items()] ), msg )\n ##for kk in kks:\n ##print ('--',kk,this[kk])\n oo.close()\n oo2.close()\n print (len(ks))\n\ningest01(file ='../_work/QC_template_v5_2021-03-25.json')\n","repo_name":"cp4cds/cmip6_range_check_2","sub_path":"ingest_scripts/qc_template.py","file_name":"qc_template.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"39285704933","text":"from sklearn.externals import joblib\n\n\nclass SentimentClassifier(object):\n def __init__(self):\n self.model = joblib.load(\"./LemmatizedTfidfLinearSVCTextSentiment.pkl\")\n self.classes_dict = {0: \"негативный\", 1: \"позитивный\", -1: \"ошибка предсказания\"}\n\n @staticmethod\n def get_probability_words(probability):\n if probability < 0.55:\n return \"нейтральный или неуверенно\"\n if probability < 0.7:\n return \"возможно\"\n if probability > 0.95:\n return \"определенно\"\n else:\n return \"\"\n\n def predict_text(self, text):\n try:\n return self.model.predict([text])[0],\\\n self.model.predict_proba([text])[0].max()\n except:\n print('ошибка предсказания')\n return -1, 0.8\n\n def predict_list(self, list_of_texts):\n try:\n return self.model.predict(list_of_texts),\\\n self.model.predict_proba(list_of_texts)\n except:\n print('ошибка предсказания')\n return None\n\n def get_prediction_message(self, text):\n prediction = self.predict_text(text)\n class_prediction = prediction[0]\n prediction_probability = prediction[1]\n return self.get_probability_words(prediction_probability) + \" \" + self.classes_dict[class_prediction]","repo_name":"dkulemin/MLDA_course","sub_path":"6. Final project/7.4 Final demo/sentiment_classifier.py","file_name":"sentiment_classifier.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"17605499204","text":"\"\"\"\nEscribe la función para averiguar la edad de un contacto sabiendo tan solo el año de nacimiento (utiliza datetime y\nstrptime, no está relacionado directamente con clases)\n\"\"\"\n\nimport datetime\n\n\nclass persona:\n def __init__(self, name, birthday):\n self.name = name\n self.birthday = birthday\n\n def age(self):\n today = datetime.datetime.now()\n age_number = today - self.birthday\n print(\"{} tiene {} años\".format(self.name, int(age_number.days / 365)))\n\n\ndef main():\n birthday = datetime.datetime.strptime(input(\"Dime tu fecha de cumpleaños[dd/mm/yyyy]\"), \"%d/%m/%Y\")\n raul = persona(\"raul\", birthday)\n print(raul.age())\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"raulAcero/aprendiendo-python","sub_path":"clases_/averiguar_edad.py","file_name":"averiguar_edad.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"25365573630","text":"f=open('lab.txt','w')\r\nwhile True:\r\n n=input('Enter line: ')\r\n f.write(n+'\\n')\r\n ch=input('Do you want to continue? ')\r\n if ch=='y':\r\n continue\r\n elif ch=='n':\r\n break\r\n else:\r\n print('Invalid choice')\r\nf.close()\r\nf=open('lab.txt','r')\r\nwhile True:\r\n f_r=f.readline()\r\n if f_r=='':\r\n break\r\n else:\r\n f_l=f_r.split()\r\n for i in f_l:\r\n print(i,end='#')\r\n","repo_name":"ok-ano0s/school_python","sub_path":"3. Files/7. Txt File - 1.py","file_name":"7. Txt File - 1.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"17181786545","text":"from email.policy import default\nfrom rest_framework import serializers\n\nfrom apps.hotel.models import Hotel\nfrom .models import Comment, Dislike, Like\nfrom django.db.models import Avg\n\n\nclass CommentSerializerSet(serializers.ModelSerializer):\n user = serializers.ReadOnlyField(default=serializers.CurrentUserDefault(), source='user.username')\n \n def validate(self, attrs):\n user = self.context.get('request').user\n attrs['user'] = user\n return attrs\n \n class Meta:\n model = Comment\n fields = '__all__'\n\n\nclass CommentSerializer(serializers.ModelSerializer):\n user = serializers.ReadOnlyField(default=serializers.CurrentUserDefault(), source='user.username')\n\n class Meta:\n model = Comment\n fields = ('id','user','rating', 'good_review', 'bad_review', 'created_at')\n\n def validate(self, attrs):\n user = self.context.get('request').user\n attrs['user'] = user\n return attrs\n\n def to_representation(self, instance):\n representation = super().to_representation(instance)\n representation['likes'] = instance.likes.all().count()\n representation['dislike'] = instance.dislikes.all().count()\n return representation\n\n\nclass CommentCRUDSerializer(serializers.ModelSerializer):\n user = serializers.ReadOnlyField(\n default=serializers.CurrentUserDefault(),\n source='user.username'\n )\n\n class Meta:\n model = Comment\n fields = '__all__'\n\n\n def create(self, validated_data):\n user = self.context.get('request').user\n hotel = self.context.get('request').data.get('hotel')\n comment_ = Comment.objects.filter(user=user, hotel=hotel).first()\n if comment_:\n raise serializers.ValidationError('Вы уже комментировали!')\n comment = Comment.objects.create(**validated_data)\n return comment\n\n\nclass CommentDelUpdSerializer(serializers.ModelSerializer):\n user = serializers.ReadOnlyField(\n default=serializers.CurrentUserDefault(),\n source='user.username'\n )\n class Meta:\n model = Comment\n fields = '__all__'\n\n\n def delete(self):\n user = self.context.get('request').user\n comment = Comment.objects.filter(user=user).first()\n if comment:\n comment.delete()\n else:\n raise serializers.ValidationError('Вы еще не комментировали!')\n\n def update(self, instance: Comment, validated_data):\n instance.good_review = validated_data.get('good_review', instance.good_review) \n instance.bad_review = validated_data.get('bad_review', instance.bad_review)\n instance.staff = validated_data.get('staff', instance.staff)\n instance.comfort = validated_data.get('comfort', instance.comfort)\n instance.purity = validated_data.get('purity', instance.purity)\n instance.location = validated_data.get('location', instance.location)\n instance.facilities = validated_data.get('facilities', instance.facilities) \n instance.price_quality_ratio = validated_data.get('price_quality_ratio', instance.price_quality_ratio)\n\n instance.save()\n return instance\n\nclass CurrentPostDefault:\n requires_context = True\n\n def __call__(self, serializer_field):\n try:\n test_ = serializer_field.context['pk'] \n com = Comment.objects.get(pk=test_)\n return com\n except:\n raise serializers.ValidationError('Комментария не существует')\n\n\nclass LikeSerializer(serializers.ModelSerializer):\n user = serializers.ReadOnlyField(\n default=serializers.CurrentUserDefault(),\n source='user.username'\n )\n\n comment = serializers.HiddenField(default=CurrentPostDefault())\n\n class Meta:\n model = Like\n fields = '__all__'\n\n def create(self, validated_data):\n # validated_data['comment'] = Comment.objects.get(pk=5)\n comment = self.context.get('request').data.get('comment')\n user = self.context.get('request').user\n dislike = Dislike.objects.filter(user=user, comment=comment).first()\n if dislike:\n dislike.delete()\n like = Like.objects.filter(user=user, comment=comment).first()\n if like:\n raise serializers.ValidationError('Already liked')\n return super().create(validated_data)\n\n def unlike(self):\n comment = self.context.get('request').data.get('comment')\n user = self.context.get('request').user\n like = Like.objects.filter(user=user, comment=comment).first()\n if like:\n like.delete()\n else:\n raise serializers.ValidationError('Not liked yet')\n\n\nclass DislikeSerializer(serializers.ModelSerializer):\n user = serializers.ReadOnlyField(\n default=serializers.CurrentUserDefault(),\n source='user.username'\n )\n comment = serializers.HiddenField(default=CurrentPostDefault())\n\n class Meta:\n model = Dislike\n fields = '__all__'\n\n def create(self, validated_data):\n comment = self.context.get('request').data.get('comment')\n user = self.context.get('request').user\n like = Like.objects.filter(user=user, comment=comment).first()\n if like:\n like.delete()\n dislike = Dislike.objects.filter(user=user, comment=comment).first()\n if dislike:\n raise serializers.ValidationError('Already disliked')\n return super().create(validated_data)\n\n def undislike(self):\n comment = self.context.get('request').data.get('comment')\n user = self.context.get('request').user\n dislike = Like.objects.filter(user=user, comment=comment).first()\n if dislike:\n dislike.delete()\n else:\n raise serializers.ValidationError('Not disliked yet')\n\n","repo_name":"Tsunoyaka/ReFullstack_Hachathon","sub_path":"apps/review/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":5879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"19294025578","text":"import os \r\n\r\nprint(\"Multiplique seus números aqui\")\r\ndef multiplicacao(n1,n2):\r\n print(n1,\"x\", n2, \"=\", n1*n2)\r\n\r\nresp1 = int(input(\"Digíte um número:\"))\r\nresp2 = int(input(\"Digite outro número:\"))\r\n\r\nmultiplicacao(resp1,resp2)\r\n\r\nos.system(\"pause\")","repo_name":"SarinhaX4/Python","sub_path":"Desafio_32/atividade.py","file_name":"atividade.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"914073418","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport numpy\nimport math\nfrom matplotlib import pyplot\n\n\ndef simulate_pendulum_damped(phi_init, omega_init, numSteps, deltaT, damping):\n t = numpy.zeros(numSteps)\n\n phi = numpy.zeros(numSteps)\n phi[0] = phi_init\n\n omega = numpy.zeros(numSteps)\n omega[0] = omega_init\n\n for n in range(1, numSteps):\n t[n] = deltaT * n # store the time for plotting.\n\n alpha_t = (-(g / l) * math.sin(phi[n - 1])) - (damping * omega[n - 1])\n omega_t = omega[n - 1] + (delta_t * alpha_t)\n phi_t = phi[n - 1] + (delta_t * omega_t)\n omega[n] = omega_t\n phi[n] = phi_t\n\n return (t, phi, omega)\n\ndamping = 0.5\n(t_damped, phi_damped, omega_damped) = simulate_pendulum_damped(\n phi_0, omega_0, N, delta_t, damping)\n\npyplot.figure(figsize=(10, 4))\npyplot.xlabel('t', fontsize=14)\npyplot.ylabel(r'$\\phi$', fontsize=14)\npyplot.hold(True)\npyplot.plot(t_damped, phi_damped)\n\nos.system(\"pause\")\n","repo_name":"nclv/Python-3.5","sub_path":"Physique/Mouvement/Dynamique/Systèmes oscillants/Pendule/damped_pendulum.py","file_name":"damped_pendulum.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"33517793395","text":"import string\n\nBASE32_ALPHABET = \\\n string.ascii_uppercase + string.digits\n\n\ndef convert_int_to_base32(\n int_value: int) \\\n -> str:\n binary = \\\n bin(\n int_value)[2:] # Get binary representation of the integer\n \n binary = \\\n binary.zfill(\n (len(\n binary) + 4) // 5 * 5) # Pad to multiple of 5 bits\n \n return \\\n ''.join(\n BASE32_ALPHABET[int(\n binary[i:i + 5],\n 2)] for i in range(\n 0,\n len(\n binary),\n 5))\n","repo_name":"john01diaz/satfcopy","sub_path":"sat_parquet_source/parquet_schema_analyzer/b_code/b_identity_ecosystem/converters/int_to_base32_converter.py","file_name":"int_to_base32_converter.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"70841716246","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 8 16:57:03 2017\n\n@author: jzhao\n\"\"\"\n\n\"\"\"\n Extract CCF: .fits -> .dat\n\"\"\"\n\n###########\n# Updates #\n###########\n# bc = hdulist[0].header['HIERARCH ESO DRS BERV'] @26/07/17\n\n#############################################\n\nimport sys\nfrom astropy.io import fits\nimport glob\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.optimize import curve_fit\nfrom scipy.interpolate import CubicSpline\nfrom math import sqrt\nimport math\n\n############# DEFINE FUNCTIONS ##############\ndef gaussian(x, a, mu, sigma, C):\n val = a / ( sqrt(2*math.pi) * sigma ) * np.exp(-(x - mu)**2 / sigma**2) + C\n return val \n\n# Root mean square\ndef rms(num):\n return sqrt(sum(n*n for n in num)/len(num))\n#############################################\n\nFILE = glob.glob('../ccf_fits/*fits')\nN = len(FILE)\nN_start = 0\nN_end = N\nn_file = N_end - N_start\nMJD = np.zeros(n_file)\nRV_g = np.zeros(n_file)\n\nx = np.arange(-10, -1+0.1, 0.1) # over sampling to 0.1 km/s [-10.2, -0.8]\ny = np.zeros(len(x))\n\nplt.figure()\n\nfor n in range(N_start, N_end):\n \n # progress bar #\n sys.stdout.write('\\r')\n sys.stdout.write(\"[%-50s] %d%%\" % ('='*int((n+1-N_start)*50./(N_end-N_start)), int((n+1-N_start)*100./(N_end-N_start))))\n sys.stdout.flush() \n\n \n hdulist = fits.open(FILE[n])\n bc = hdulist[0].header['HIERARCH ESO DRS BERV'] / 1000 # barycentric RV, originally in m/s\n v0 = hdulist[0].header['CRVAL1'] - bc # velocity on the left (N_starting point)\n MJD[n] = hdulist[0].header['MJD-OBS']\n \n CCF = hdulist[0].data # ccf 2-d array\n ccf = CCF[- 1, :] # ccf 1-d array (whole range)\n delta_v = hdulist[0].header['CDELT1'] # velocity grid size \n v = v0 + np.arange(CCF.shape[1]) * delta_v # velocity array (whole range)\n\n if n == 0: # not used\n idx_min = ccf == min(ccf)\n v_min = v[idx_min]\n\n f = CubicSpline(v, ccf / ccf.max())\n y = f(x)\n popt, pcov = curve_fit( gaussian, x, y, [-1.7, v_min, 2.5, 1])\n# print((popt[1] - v_min)*1000)\n x_new = x\n y_new = (y - popt[3]) / popt[0]\n plt.plot(x_new, y_new, '-')\n \n writefile = '../ccf_dat/ccf' + str(n) + '.dat'\n np.savetxt(writefile, y_new)\n \nplt.show()","repo_name":"jinglinzhao/GL479","sub_path":"CCF_Gl479.py","file_name":"CCF_Gl479.py","file_ext":"py","file_size_in_byte":2700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"29173752745","text":"phonebook = dict()\n\nwhile True:\n print('\\nТекущий словарь контактов:\\n')\n for key, value in phonebook.items():\n print(f'{key}: {value}')\n\n def check(spisok, person):\n new_list = list()\n for keys, values in spisok.items():\n if person in keys:\n new_list.append(keys)\n new_list.append(values)\n return new_list\n\n move = input('\\nДобавить контакт\\Поиск человека по фамилии: ').capitalize()\n\n if move == 'Добавить контакт':\n name = input('Имя Фамилия контакта(через пробел): ').lower()\n if not name in phonebook:\n phonebook[name] = int(input('Номер контакта: '))\n else:\n print('Такой человек уже существует.')\n elif move == 'Поиск человека по фамилии':\n found = input('Кого ищем? ').lower()\n result = check(phonebook, found)\n print(f'\\nИнформация по контактам с фамилией {found}:')\n if len(result) == 0:\n print('Список пуст.')\n else:\n print(result)\n","repo_name":"YarFett/My_first_rep","sub_path":"Module20/08_contacts_3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"37532276690","text":"from __future__ import annotations\n\nimport sys\nfrom random import choice\nfrom typing import Tuple, Union\n\nfrom .combat_strategies import CombatStrategyABC\nfrom .core import *\n\n_default_image = 'https://storage.googleapis.com/img.kaori.io/static/present.png'\n\n\nclass Card:\n combat_strat: CombatStrategyABC = None\n\n def __init__(self,\n name: str,\n rarity: RarityName,\n nature: Tuple[NatureName, NatureName],\n image_url: str = _default_image,\n card_id: int = None,\n **kwargs) -> None:\n\n if self.combat_strat is None:\n raise RuntimeError('You need to set the initialize the combat class')\n\n self.id = card_id\n self.image_url = image_url\n\n self.name = name\n self.rarity = rarity\n self.nature = nature\n self.stupid = 1\n self.baby = 1\n self.clown = 1\n self.horny = 1\n self.cursed = 1\n self.feral = 1\n self._current_hp = None\n for name, value in kwargs.items():\n whitelist = [\n 'stupid', 'baby', 'clown', 'horny', 'cursed', 'feral'\n ]\n if name in whitelist:\n setattr(self, name, value)\n\n def __repr__(self) -> str:\n return f\"\"\n\n @property\n def nature_values(self):\n return {\n stupid: self.stupid,\n baby: self.baby,\n clown: self.clown,\n horny: self.horny,\n cursed: self.cursed,\n feral: self.feral,\n }\n\n @property\n def current_hp(self):\n if self._current_hp is None:\n self.reset_hp()\n\n return round(self._current_hp)\n\n def accept_damage(self, value: int):\n self._current_hp -= value\n\n @property\n def dmg(self):\n return self._stat(DMG)\n\n @property\n def speed(self):\n return self._stat(SPEED)\n\n @property\n def crit(self):\n return self._stat(CRIT)\n\n @property\n def armor(self):\n return self._stat(ARMOR)\n\n @property\n def evasion(self):\n return self._stat(EVA)\n\n @property\n def max_hp(self):\n return self._stat(HP)\n\n def _stat(self, name: StatName):\n value = self.combat_strat.calculate_stat(name, self.nature_values)\n if name in integer_stats:\n return round(value)\n\n if name in rounded_stats:\n return round(value, rounded_stats_ndigits)\n\n return value\n\n def reset_hp(self):\n self._current_hp = self.max_hp\n\n def is_valid_card(self) -> bool:\n nv_sum = sum(self.nature_values.values())\n budget = self.combat_strat.rarities[self.rarity].budget\n assert nv_sum == budget, \\\n f\"'**ERROR:** {self.name}' {nv_sum} does not square with budget {budget}\"\n split = self.combat_strat.rarities[self.rarity].split\n for n in self.nature:\n assert self.nature_values[n] >= split, \\\n f\"'**ERROR:** {self.name}'s {n.name} nature value of {self.nature_values[n]} does not have minimum \" \\\n f\"split value of {split} for rarity {self.rarity.name} \"\n\n return True\n\n @property\n def title(self):\n return f\"{self.name} ({self.subtitle})\"\n\n @property\n def subtitle(self):\n return f\"{self.rarity}-tier {humanize_nature(*self.nature)}\"\n\n @staticmethod\n def detect_standoff(a: Card, b: Card, debug: bool = False) -> bool:\n a_max_dmg = a.attack_damage(b,\n crit_multiplier=Card.combat_strat.crit_multiplier,\n debug=debug)\n b_max_dmg = b.attack_damage(a,\n crit_multiplier=Card.combat_strat.crit_multiplier,\n debug=debug)\n return max(a_max_dmg, b_max_dmg) == 0\n\n @staticmethod\n def sluggify_name(name) -> str:\n return '-'.join(name.strip().split()).lower().encode('idna').decode('utf-8')\n\n @property\n def slug(self) -> str:\n return Card.sluggify_name(self.name)\n\n def attack_damage(self,\n target: Card,\n crit_multiplier: int = 1,\n debug: bool = False) -> int:\n dmg = self.dmg\n if crit_multiplier != 1:\n dmg = self.dmg + self.cursed\n apply_crit = dmg * crit_multiplier\n apply_armor = apply_crit - target.armor\n rounded = round(apply_armor)\n dmg = max(0, rounded)\n if debug:\n print(\n f\"[debug] Attack Calculation: \"\n f\"max(0, round({self.dmg} * {crit_multiplier}) - {target.armor}))\",\n file=sys.stderr)\n return dmg\n\n @staticmethod\n def generate(name: str,\n rarity: Union[RarityName, str],\n natures: list) -> Card:\n if not isinstance(rarity, RarityName):\n rarity: RarityName = {str(r): r for r in Card.combat_strat.rarities.keys()}[rarity]\n\n for i, nature_in in enumerate(natures):\n if not isinstance(nature_in, NatureName):\n natures[i]: NatureName = {str(n): n for n in Card.combat_strat.natures.keys()}[nature_in]\n\n natures = tuple(natures)\n\n primary_nature, secondary_nature = natures\n\n rarity_config = Card.combat_strat.rarities[rarity]\n\n budget = rarity_config.budget\n\n nature_points = {\n stupid: 1,\n baby: 1,\n clown: 1,\n horny: 1,\n cursed: 1,\n feral: 1,\n primary_nature: rarity_config.split,\n secondary_nature: rarity_config.split\n }\n\n budget -= sum(nature_points.values())\n\n while budget > 0:\n nature_points[choice(list(Card.combat_strat.natures.keys()))] += 1\n budget -= 1\n\n return Card(name=name,\n rarity=rarity,\n nature=natures,\n **{str(k): v for k, v in nature_points.items()})\n\n def serialize_min(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'image_url': self.image_url,\n 'rarity': str(self.rarity),\n 'nature': humanize_nature(*self.nature),\n **{str(k): v for k, v in self.nature_values.items()},\n 'max_hp': self.max_hp,\n 'dmg': self.dmg,\n 'speed': self.speed,\n 'crit': self.crit,\n 'armor': self.armor,\n 'evasion': self.evasion,\n }\n","repo_name":"austinpray/kaori","sub_path":"kaori/plugins/gacha/engine/core/card.py","file_name":"card.py","file_ext":"py","file_size_in_byte":6665,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"30"} +{"seq_id":"30949375939","text":"import time\n\nimport pytest\nfrom appium import webdriver\nfrom appium.webdriver.common.mobileby import MobileBy\nfrom appium.webdriver.common.touch_action import TouchAction\nfrom appium.webdriver.extensions.android.gsm import GsmCallActions\nfrom selenium.webdriver.support import expected_conditions\nfrom selenium.webdriver.support.wait import WebDriverWait\n\n\nclass TestDw():\n def setup(self):\n desire_caps = {}\n desire_caps[\"platformName\"] = \"android\"\n desire_caps[\"platformVersion\"] = \"6.0\"\n desire_caps[\"deviceName\"] = \"127.0.0.1:7555\"\n desire_caps[\"appPackage\"] = \"com.xueqiu.android\"\n desire_caps[\"appActivity\"] = \"com.xueqiu.android.common.MainActivity\"\n desire_caps[\"noReset\"] = \"true\"\n # desire_caps[\"dontStopAppOnReset\"] = \"true\"\n desire_caps[\"unicodeKeyboard\"] = \"true\"\n desire_caps[\"resetKeyboard\"]=\"true\"\n desire_caps[\"skipDeviceInitialization\"] = \"true\"\n self.driver = webdriver.Remote(\"http://127.0.0.1:4723/wd/hub\", desire_caps)\n self.driver.implicitly_wait(5)\n\n def teardown(self):\n self.driver.quit()\n\n @pytest.mark.skip\n def test_search(self):\n self.driver.find_element_by_id(\"com.xueqiu.android:id/tv_search\").click()\n self.driver.find_element_by_id(\"com.xueqiu.android:id/search_input_text\").send_keys(\"阿里巴巴\")\n self.driver.find_element_by_xpath(\"//*[@resource-id='com.xueqiu.android:id/name' and @text='阿里巴巴']\").click();\n price=float(self.driver.find_element_by_id(\"com.xueqiu.android:id/current_price\").text)\n assert price >200\n @pytest.mark.skip\n def test_attribute(self):\n print(self.driver.find_element_by_id(\"com.xueqiu.android:id/tv_search\").is_enabled())\n\n @pytest.mark.skip\n def test_touchAction(self):\n action=TouchAction(self.driver)\n rect = self.driver.get_window_rect()\n print(rect)\n width = rect[\"width\"]\n height = rect[\"height\"]\n x1=int(width/2)\n ystart = int(height * 4/5)\n yend = int(height * 1/5)\n action.press(x=x1,y=ystart).wait(2000).move_to(x=x1,y=yend).release().perform()\n @pytest.mark.skip\n def test_get_current(self):\n self.driver.find_element_by_id(\"com.xueqiu.android:id/tv_search\").click()\n self.driver.find_element_by_id(\"com.xueqiu.android:id/search_input_text\").send_keys(\"阿里巴巴\")\n self.driver.find_element_by_xpath(\"//*[@resource-id='com.xueqiu.android:id/name' and @text='阿里巴巴']\").click();\n # 显示等待\n locator = (MobileBy.XPATH,\"//*[@text='09988']/../../..//*[@resource-id='com.xueqiu.android:id/current_price']\")\n # WebDriverWait(self.driver,6).until(expected_conditions.element_to_be_clickable(locator))\n #lambda 表达式\n WebDriverWait(self.driver,6).until(lambda x : x.find_element(*locator))\n current_price = self.driver.find_element(*locator).text\n print(f\"当前的价格是{current_price}\")\n assert float(current_price) >200\n\n\n @pytest.mark.skip\n def test_info(self):\n \"\"\"\n 1.点击进入我的\n 2.点击登录,进入登录页面\n 3.输入用户名、密码\n 4.点击登录\n \"\"\"\n self.driver.find_element_by_android_uiautomator('new UiSelector().text(\"我的\")').click()\n self.driver.find_element_by_android_uiautomator('new UiSelector().textContains(\"帐号密码\")').click()\n self.driver.find_element_by_android_uiautomator('new UiSelector().resourceId(\"com.xueqiu.android:id/login_account\")').send_keys(\"123456\")\n self.driver.find_element_by_android_uiautomator('new UiSelector().resourceId(\"com.xueqiu.android:id/login_password\")').send_keys(\"123456\")\n self.driver.find_element_by_android_uiautomator('new UiSelector().resourceId(\"com.xueqiu.android:id/button_next\")').click()\n # 组合定位\n # self.driver.find_element_by_android_uiautomator('new UiSelector().resourceId(\"com.xueqiu.android:id/tab_name\").text(\"我的\")')\n\n @pytest.mark.skip\n def test_scroll_find_element(self):\n self.driver.find_element_by_android_uiautomator('new UiSelector().text(\"关注\")').click()\n self.driver.find_element_by_android_uiautomator('new UiScrollable(new UiSelector().scrollable(true).instance(0)).scrollIntoView(new UiSelector().text(\"诗与星空\").instance(0));').click()\n time.sleep(5)\n\n def test_attribute(self):\n # pass\n # # 获取元素的属性 并且返回\n # el = self.driver.find_element_by_id(\"com.xueqiu.android:id/tv_search\")\n # print(el.get_attribute(\"content-desc\"))\n # print(el.get_attribute(\"resource-id\"))\n # print(el.get_attribute(\"enabled\"))\n # print(el.get_attribute(\"clickable\"))\n # print(el.get_attribute(\"bounds\"))\n self.driver.make_gsm_call(\"15889451221\",GsmCallActions.CALL)\n\n\n\n","repo_name":"15889451221/Hogwarts-16","sub_path":"test_Appium/test_dwpytest.py","file_name":"test_dwpytest.py","file_ext":"py","file_size_in_byte":4895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"70010228244","text":"import cv2\nimport time\nfrom datetime import datetime\nimport os\nimport subprocess\nimport numpy as np\nfrom threading import Thread\nimport shutil\nimport logging\n\nlogging.basicConfig(level=logging.INFO)\n\n\nclass Camera:\n \"\"\"Camera polling service.\n Serves the latest image, and keeps exposure in check.,\n\n Args:\n camera (int): Identifier for v4l2 camera. If left empty,\n uses predefined gstreamer options known to work\n on AGX Xavier with e-con130 camera.\n\n \"\"\"\n\n def __init__(self, camera=None):\n self.src = camera\n self.v4l2id = None\n self.cap = None\n self.exposure = 50.0\n self.running = False\n self.image = None\n self.thread = None\n self.pthread = None\n self.save_path = None\n self.save_set = datetime.now().strftime(\"%H%M\")\n self.frame = 0\n self.frame_date = \"\"\n self.recorded_frame = 0\n self.enough_disk = False\n self.disk_space = {}\n self.working = False\n self.fps = 0\n self._init_camera()\n\n # auto exposure\n self.adjuster = None\n self.binn = 32\n self.min_exposure = 1.0\n self.max_exposure = 3000.0\n self.exposure_modifier = 1.0\n self.autoexposure_interval = 5\n self.autoexposure_debug = False\n self._set_adjuster()\n\n def _init_camera(self):\n \"\"\"Initialize camera (gstreamer options)\n\n Returns:\n None:\n\n \"\"\"\n if self.src is None:\n\n width, height = 1920, 1080\n\n gstreamer = \" ! \".join(\n [\n \"v4l2src device=/dev/video0\",\n \"video/x-raw,width={},height={},format=(string)UYVY\",\n \"videoconvert\",\n \"appsink\",\n ]\n ).format(width, height)\n logging.info(gstreamer)\n self.src = gstreamer\n self.v4l2id = 0\n\n def _set_adjuster(self):\n \"\"\"Sets a function used to adjust exposure\n\n Returns:\n None:\n \"\"\"\n binn = self.binn\n ypoints = [1.5, 1.1, 1, 1 / 1.5]\n xpoints = [0, 5, 8, 31]\n adjuster = np.interp(list(range(binn)), xpoints, ypoints)\n\n self.adjuster = adjuster\n\n def autoexposure(self):\n \"\"\"Adjust exposure slightly. Run continuously to reach correct exposure.\"\"\"\n if self.v4l2id is None:\n\n # no way of controlling exposure\n return\n\n new_value = self.exposure\n image = cv2.cvtColor(\n cv2.resize(self.image, dsize=(300, 300)), cv2.COLOR_BGR2GRAY\n )\n image = np.uint8(np.float32(image[100:200, 100:200]) / self.exposure_modifier)\n # Read only the center of image!\n freq, bins = np.histogram(image, bins=self.binn, range=[0, 255])\n freq = freq / sum(freq)\n #\n multiplier = np.sum(self.adjuster * freq)\n new_value *= multiplier\n # work within sensible values\n new_value = min(self.max_exposure, new_value)\n new_value = max(self.min_exposure, new_value)\n is_same = (\n int(self.exposure) * 0.97 <= int(new_value) <= int(self.exposure) * 1.03\n )\n if self.autoexposure_debug:\n max_non_zero = np.max([i for i, x in enumerate(freq) if x > 0.01])\n logging.info(\n \"\"\"\n Freqs {freqs}\n freq[0] {freq0}\n freq[1] {freq1}\n maxfreq {maxfreq}\n maxnon0 {maxn}\n expo mult {emult}\n multiplier {mult}\n new_value {new_value}\n old_value {old_value}\n is_same {is_same}\n \"\"\".format(\n freqs=[round(x, 2) for x in freq],\n freq0=round(freq[0], 4),\n freq1=round(freq[-1], 4),\n maxfreq=np.argmax(freq),\n maxn=max_non_zero,\n emult=self.exposure_modifier,\n mult=np.sum(self.adjuster * freq),\n new_value=new_value,\n old_value=self.exposure,\n is_same=is_same,\n )\n )\n\n if is_same:\n # do not commit change if change too small\n return\n self.exposure = new_value\n self._set_exposure()\n\n def _set_exposure(self):\n \"\"\"Set exposure using command line tool.\n The current driver does not accept values from openCV\n \"\"\"\n if self.v4l2id is not None:\n p = subprocess.Popen(\n \"v4l2-ctl -d /dev/video{} -c exposure_time_absolute={}\".format(\n self.v4l2id, int(self.exposure)\n ),\n shell=True,\n )\n stdout, stderr = p.communicate()\n\n def check_diskspace(self):\n \"\"\"Calculate how fast we run out of diskspace\"\"\"\n if self.save_path:\n try:\n diskused = shutil.disk_usage(self.save_path)\n self.enough_disk = diskused.free / diskused.total > 0.05\n\n if \"first\" not in self.disk_space:\n self.disk_space[\"first\"] = {\n \"free\": diskused.free,\n \"time\": time.time(),\n }\n return\n\n self.disk_space[\"now\"] = {\n \"free\": diskused.free,\n \"freeGb\": round(diskused.free / 2 ** 30, 2),\n \"time\": time.time(),\n }\n\n dfree = (\n self.disk_space[\"now\"][\"free\"] - self.disk_space[\"first\"][\"free\"]\n )\n dtime = (\n self.disk_space[\"now\"][\"time\"] - self.disk_space[\"first\"][\"time\"]\n )\n speed = -(dfree) / (dtime)\n if speed == 0:\n tleft = 0\n else:\n tleft = (self.disk_space[\"now\"][\"free\"]) / speed\n\n self.disk_space[\"speed\"] = {\n \"dfree\": dfree,\n \"dtime\": dtime,\n \"speedM/s\": round(speed / 2 ** 20, 2),\n \"time_left_H\": round(tleft / 3600, 2),\n }\n\n except (FileNotFoundError, ZeroDivisionError) as e:\n logging.warning(e)\n\n def isOpened(self):\n \"\"\"Check if camera is in use\n\n Returns:\n bool: True if camera is in use\n \"\"\"\n\n return self.working\n\n def read(self):\n \"\"\"Get latest image from camera. Mimick cv2.VideoCapture behavior\n\n Returns:\n bool: True if image acquisition works\n np.array: Image data\n \"\"\"\n try:\n return self.working, self.image.copy()\n except:\n return False, None\n\n def release(self):\n \"\"\"Stop reading camera. Mimick cv2.VideoCapture behavior\"\"\"\n self.running = False\n\n def _run(self):\n \"\"\"Enter capturing loop\"\"\"\n self._set_exposure()\n self.cap = cv2.VideoCapture(self.src, cv2.CAP_GSTREAMER)\n self.frame = 0\n self.recorded_frame = 0\n while self.running:\n t, i = self.cap.read()\n self.working = t\n if t:\n self.image = i\n self.frame += 1\n self.frame_date = datetime.now()\n time.sleep(0.05)\n # simulate 20FPS, camera driver might crash if polling too rapid\n\n self.cap.release()\n self.working = False\n\n def _run_periodicals(self):\n \"\"\"Other-than-image-acquisition tasks in a separate loop\"\"\"\n cam_started = time.time()\n self.save_set = datetime.now().strftime(\"%H%M%S\")\n if self.save_path:\n set_folder = os.path.join(self.save_path, str(self.save_set))\n if not os.path.isdir(set_folder):\n os.mkdir(set_folder)\n saved_frame = 0\n started = 0\n while self.running:\n if self.working:\n if time.time() - started > self.autoexposure_interval:\n started = time.time()\n self.autoexposure()\n self.check_diskspace()\n self.fps = self.frame / (time.time() - cam_started)\n logging.info(\"FPS: {}\".format(int(self.fps)))\n\n if self.save_path:\n # If save_path is set, save every frame\n try:\n if saved_frame != self.frame:\n saved_frame = self.frame\n self._save_image(self.image, saved_frame)\n except FileNotFoundError as e:\n logging.warning(e)\n\n time.sleep(0.01)\n\n def _save_image(self, image, frame):\n \"\"\"Save image to disk\n\n Args:\n image (np.array): Image data\n frame (int): Frame number\n\n Returns:\n None\n \"\"\"\n if not self.enough_disk:\n logging.error(\"Not enough disk space\")\n return\n set_folder = os.path.join(self.save_path, str(self.save_set))\n if not os.path.isdir(set_folder):\n os.mkdir(set_folder)\n filepath = os.path.join(\n set_folder,\n \"cam{}-ts-{}-f-{}.jpg\".format(\n self.v4l2id, self.frame_date.strftime(\"%y-%m-%d-%H-%M-%S.%f\"), frame\n ),\n )\n cv2.imwrite(\n filepath,\n cv2.resize(image, (853, 480)),\n [cv2.IMWRITE_JPEG_QUALITY, 95],\n )\n self.recorded_frame = filepath\n\n def start(self):\n \"\"\"Start camera service\"\"\"\n if self.running:\n return\n self.running = True\n self.thread = Thread(target=self._run, args=())\n self.thread.daemon = True\n self.thread.start()\n self.pthread = Thread(target=self._run_periodicals, args=())\n self.pthread.daemon = True\n self.pthread.start()\n","repo_name":"City-of-Helsinki/remppa","sub_path":"components/processor/code/camera/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":9934,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"30"} +{"seq_id":"21615780885","text":"import json\n\nimport scrapy\n\nfrom locations.hours import OpeningHours\nfrom locations.items import Feature\n\nDAY_MAPPING = {2: \"Mo\", 3: \"Tu\", 4: \"We\", 5: \"Th\", 6: \"Fr\", 7: \"Sa\", 1: \"Su\"}\n\n\nclass VictoriassecretSpider(scrapy.Spider):\n name = \"victoriassecret\"\n item_attributes = {\"brand\": \"Victoria's Secret\", \"brand_wikidata\": \"Q332477\"}\n allowed_domains = [\"victoriassecret.com\"]\n start_urls = [\n \"https://www.victoriassecret.com/store-locator#storeList/US\",\n ]\n\n def start_requests(self):\n template = \"https://api.victoriassecret.com/stores/v1/search?countryCode=US\"\n\n headers = {\n \"Accept\": \"application/json\",\n }\n\n yield scrapy.http.FormRequest(url=template, method=\"GET\", headers=headers, callback=self.parse)\n\n def parse(self, response):\n jsonresponse = response.json()\n for stores in jsonresponse:\n store = json.dumps(stores)\n store_data = json.loads(store)\n properties = {}\n\n if store_data[\"latitudeDegrees\"] == \"\":\n properties[\"lat\"] = float(0)\n properties[\"lon\"] = float(0)\n\n else:\n properties = {\n \"name\": store_data[\"name\"],\n \"ref\": store_data[\"storeId\"],\n \"addr_full\": store_data[\"address\"][\"streetAddress1\"],\n \"city\": store_data[\"address\"][\"city\"],\n \"state\": store_data[\"address\"][\"region\"],\n \"postcode\": store_data[\"address\"][\"postalCode\"],\n \"country\": \"US\",\n \"phone\": store_data[\"address\"][\"phone\"],\n \"lat\": float(store_data[\"latitudeDegrees\"]),\n \"lon\": float(store_data[\"longitudeDegrees\"]),\n }\n\n hours = store_data[\"hours\"]\n\n if hours:\n properties[\"opening_hours\"] = self.process_hours(hours)\n\n yield Feature(**properties)\n\n def process_hours(self, hours):\n opening_hours = OpeningHours()\n\n for hour in hours:\n hr = json.dumps(hour)\n hrs = json.loads(hr)\n day = hrs[\"day\"]\n open_time = hrs[\"open\"]\n close_time = hrs[\"close\"]\n\n opening_hours.add_range(\n day=DAY_MAPPING[day],\n open_time=open_time,\n close_time=close_time,\n time_format=\"%H:%M %p\",\n )\n return opening_hours.as_opening_hours()\n","repo_name":"alltheplaces/alltheplaces","sub_path":"locations/spiders/victoriassecret.py","file_name":"victoriassecret.py","file_ext":"py","file_size_in_byte":2500,"program_lang":"python","lang":"en","doc_type":"code","stars":488,"dataset":"github-code","pt":"30"} +{"seq_id":"24184674304","text":"import copy\n\nN, Q = map(int, input().split())\nmaster_A = list(map(int, input().split()))\n\nX = [int(input()) for _ in range(Q)]\n\n\nfor x in X:\n # start game\n A = copy.copy(master_A)\n\n takashi = 0\n\n aoki_pointer = 0\n for a in range(len(A)):\n if A[a] > x:\n break\n aoki_pointer = a\n\n while len(A) > 0:\n # takaeshi turn\n takashi += A[-1]\n del A[-1]\n\n # aoki turn\n if len(A) > 0:\n del_target = min(len(A) - 1, aoki_pointer)\n del A[del_target]\n\n print(takashi)\n","repo_name":"takecian/ProgrammingStudyLog","sub_path":"AtCoder/Contests/aising2019/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"30"} +{"seq_id":"33739656527","text":"from alpaca.data.historical import StockHistoricalDataClient\nfrom alpaca.data.requests import StockBarsRequest, StockLatestQuoteRequest, StockQuotesRequest, StockLatestBarRequest\nfrom alpaca.data.models import Quote, Bar\nfrom alpaca.data.timeframe import TimeFrame\nfrom alpaca.data.enums import Adjustment, DataFeed\nfrom lib.dataEngine.common import BarCollection\nfrom lib.patterns.singleton import Singleton\nfrom lib.patterns.retry import retry\nfrom lib.patterns.base import Base\n\nimport pandas as pd \nimport logging \nfrom datetime import datetime\nfrom numpy import array\nfrom dateutil.relativedelta import relativedelta\n\nlogger = logging.getLogger(__name__)\n\nclass AlpacaDataClient(Base, metaclass=Singleton):\n \n def __init__(self, auth):\n self.dataClient:StockHistoricalDataClient = StockHistoricalDataClient(\n api_key=auth.api_key,\n secret_key=auth.secret_key\n ) \n \n @classmethod\n def create(cls, auth):\n if cls._isAuthValid(auth):\n return cls(auth=auth)\n else:\n raise AttributeError(\"the auth object is invalid\")\n \n @staticmethod\n def _isAuthValid(auth) -> bool:\n if auth.api_key and auth.secret_key:\n return True \n return False \n \n @retry(max_retries=5, retry_delay=60, logger=logger)\n def getMonthly(self, symbol:str, num_months:int=60) -> pd.DataFrame:\n return self.dataClient.get_stock_bars(\n StockBarsRequest(\n symbol_or_symbols=symbol,\n timeframe=TimeFrame.Month,\n adjustment=Adjustment.ALL,\n feed=DataFeed.SIP,\n start=datetime.today() - relativedelta(months=num_months),\n end=datetime.today()\n )\n ).df \n \n \n @retry(max_retries=3, retry_delay=60, logger=logger)\n def getWeekly(self, symbol:str) -> pd.DataFrame:\n return self.dataClient.get_stock_bars(\n StockBarsRequest(\n symbol_or_symbols=symbol,\n timeframe=TimeFrame.Week,\n adjustment=Adjustment.ALL,\n feed=DataFeed.SIP,\n start=datetime.today() - relativedelta(years=3),\n end=datetime.today()\n )\n ).df \n \n @retry(max_retries=3, retry_delay=60, logger=logger)\n def getDaily(self, symbol:str, endDate:datetime = datetime.today()) -> pd.DataFrame:\n return self.dataClient.get_stock_bars(\n StockBarsRequest(\n symbol_or_symbols=symbol,\n timeframe=TimeFrame.Day,\n adjustment=Adjustment.ALL,\n feed=DataFeed.SIP,\n limit=30,\n start=endDate - relativedelta(days=30),\n end=endDate\n )\n ).df \n \n def getHourly(self, symbol:str, endDate:datetime = datetime.now(), days:int = 30) -> pd.DataFrame:\n return self.dataClient.get_stock_bars(\n StockBarsRequest(\n symbol_or_symbols=symbol,\n timeframe=TimeFrame.Hour,\n adjustment=Adjustment.ALL,\n feed=DataFeed.SIP,\n start=endDate - relativedelta(days=days),\n end=endDate\n )\n ).df\n \n def getMinutes(self, symbol:str, endDate:datetime = datetime.now(), days:int = 1) -> pd.DataFrame:\n return self.dataClient.get_stock_bars(\n StockBarsRequest(\n symbol_or_symbols=symbol,\n timeframe=TimeFrame.Minute,\n adjustment=Adjustment.ALL,\n feed=DataFeed.SIP,\n start=endDate - relativedelta(days=days)\n )\n ).df\n \n @retry(max_retries=3, retry_delay=60, incremental_backoff=2, logger=logger)\n def getLastMinute(self, symbol:str) -> float:\n return self.dataClient.get_stock_latest_bar(\n StockLatestBarRequest(\n symbol_or_symbols=symbol,\n feed=DataFeed.SIP\n )\n )[symbol].close\n \n def getMarketCap(self, symbol:str) -> float:\n df:pd.DataFrame = self.getDaily(symbol)\n return (df[\"vwap\"] * df[\"volume\"]).mean()\n \n @retry(max_retries=3, retry_delay=60, incremental_backoff=2, logger=logger)\n def getLongDaily(self, symbol:str, endDate:datetime = datetime.today()) -> pd.DataFrame:\n return self.dataClient.get_stock_bars(\n StockBarsRequest(\n symbol_or_symbols=symbol,\n timeframe=TimeFrame.Day,\n adjustment=Adjustment.ALL,\n feed=DataFeed.SIP,\n limit=90,\n start=endDate - relativedelta(days=90),\n end=endDate\n )\n ).df\n \n def getAllBars(self, symbol:str) -> BarCollection:\n daily:pd.DataFrame = self.getDaily(symbol)\n weekly:pd.DataFrame = self.getWeekly(symbol)\n monthly:pd.DataFrame = self.getMonthly(symbol) \n return BarCollection(daily, weekly, monthly)\n \n def getLatestQuote(self, symbol:str) -> Quote:\n return self.dataClient.get_stock_latest_quote(\n StockLatestQuoteRequest(\n symbol_or_symbols=symbol,\n feed=DataFeed.SIP\n )\n )[symbol]\n ","repo_name":"webclinic017/UnsupervisedPairTrading","sub_path":"lib/dataEngine/alpacadata.py","file_name":"alpacadata.py","file_ext":"py","file_size_in_byte":5302,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"4188785265","text":"def tegol_1():\n a = [x if x % 7 else \"Boom\" for x in range(1, 22)]\n print(a)\n b = [x for x in (80, 83)]\n c = {chr(x): x for x in range(80, 83)}\n print(c)\n start, *mid, end = list(range(0,10,2))\n print(f\"{start}, {mid}, {end}\")\n\n\nif __name__ == '__main__':\n print('PyCharm')\n tegol_1()\n","repo_name":"yonnilevy/store_HW","sub_path":"tegol.py","file_name":"tegol.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"27200575803","text":"import sys\nimport os\nimport gzip\nimport re\nimport pickle\nfrom month import month\nfrom Latimes import Latimes\nfrom util import intId2docno, eprint, findDoc\n\n\ndef printUsage():\n eprint(\"Error: Invalid command line arguments\")\n eprint(\n \"Usage: getDoc \"\n )\n sys.exit(53)\n\n\ndef main():\n # total # arguments\n n = len(sys.argv)\n if n != 4:\n printUsage()\n\n import json\n\n with open(os.path.join(sys.argv[1], \"mapping.pkl\"), \"rb\") as m:\n mapping = pickle.load(m)\n\n if sys.argv[2] == \"docno\":\n docNo = sys.argv[3]\n else:\n docNo = mapping[sys.argv[3]]\n\n doc = findDoc(docNo + \".pkl\", sys.argv[1])\n\n with open(doc, \"rb\") as d:\n myLatimes = pickle.load(d)\n\n print(myLatimes)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Tracyee/search-engine25","sub_path":"getDoc.py","file_name":"getDoc.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"72577645846","text":"import numpy as np\nimport pandas as pd\nfrom pyg_timeseries._math import stdev_calculation, skew_calculation, cor_calculation\nfrom pyg_timeseries._decorators import compiled, first_, _data_state\nfrom pyg_timeseries._rolling import _vec\nfrom pyg_base import pd2np, loop_all, is_num, is_nums, as_list, is_ts, is_pd, logger\n\n\n__all__ = ['ts_std', 'ts_mean', 'ts_skew', 'ts_count', 'ts_min', 'ts_max', 'ts_rms', 'ts_median', 'ts_sum',\n 'ts_std_', 'ts_mean_', 'ts_skew_', 'ts_count_', 'ts_min_', 'ts_max_', 'ts_rms_', 'ts_sum_']\n\n###############\n##\n## calculators\n##\n###############\n\n@loop_all\n@pd2np\ndef _median(a):\n return np.median(a[~np.isnan(a)])\n\ndef ts_median(a, axis = 0):\n return _median(a, axis = axis)\n\n@pd2np\n@compiled\ndef _moments(a, vec):\n res = vec.copy()\n for i in range(a.shape[0]):\n if not np.isnan(a[i]):\n v = 1\n for j in range(vec.shape[0]-1):\n res[j] += v\n v = v * a[i]\n res[-1] += v\n return res\n\n\n@pd2np\n@compiled\ndef _min(a, m = np.inf):\n for i in range(a.shape[0]):\n if not np.isnan(a[i]) and a[i]m:\n m = a[i]\n return m\n\n@loop_all\ndef _ts_min(a, m = None):\n if m is None or np.isnan(m):\n m = np.inf\n m = _min(a, m)\n if np.isinf(m):\n return np.nan, m\n else:\n return m, m\n\ndef ts_min(a, axis = 0, data = None, state = None):\n \"\"\"\n ts_min(a) is equivalent to pandas a.min()\n \"\"\"\n state = state or {}\n return first_(_ts_min(a, axis = axis, **state))\n \ndef ts_min_(a, axis = 0, data = None, instate = None):\n \"\"\"\n ts_min(a) is equivalent to pandas a.min()\n \"\"\"\n state = instate or {}\n res = _ts_min(a, axis = axis, **state)\n return _data_state(['data', 'm'], res)\n\nts_min_.output = ['data', 'state']\n\n@loop_all\ndef _ts_max(a, m = None):\n if m is None or np.isnan(m):\n m = -np.inf\n m = _max(a, m)\n if np.isinf(m):\n return np.nan, m\n else:\n return m, m\n \ndef ts_max(a, axis = 0, data = None, state = None):\n \"\"\"\n ts_max(a) is equivalent to pandas a.min()\n \"\"\"\n state = state or {}\n return first_(_ts_max(a, axis = axis, **state))\n\ndef ts_max_(a, axis = 0, data = None, instate = None):\n \"\"\"\n ts_max(a) is equivalent to pandas a.min()\n \"\"\"\n state = instate or {}\n res = _ts_max(a, axis = axis, **state)\n return _data_state(['data', 'm'], res)\n\nts_max_.output = ['data', 'state']\n\ndef _zip(value):\n return _data_state(['data', 'vec'], value)\n\n@loop_all\ndef _ts_count(a, vec = None):\n vec = _vec(vec,1,0)\n vec = _moments(a, vec)\n return vec[0], vec\n \n\ndef ts_count(a, axis = 0, data = None, state = None):\n \"\"\"\n ts_count(a) is equivalent to a.count() (though slightly slower)\n \n - supports numpy arrays \n - skips nan\n - supports state management\n \n :Example: pandas matching\n -----------------------------------\n >>> # create sample data:\n >>> from pyg import *; import pandas as pd; import numpy as np\n >>> a = pd.Series(np.random.normal(0,1,10000), drange(-9999)); a[a>0] = np.nan\n >>> assert ts_count(a) == a.count()\n\n :Example: numpy \n -----------------------------------\n >>> assert ts_count(a.values) == ts_count(a)\n\n :Example: state management\n -------------------------------------------\n >>> old = ts_count_(a.iloc[:2000])\n >>> new = ts_count(a.iloc[2000:], state = old.state)\n >>> assert new == ts_count(a)\n\n \"\"\"\n state = state or {}\n return first_(_ts_count(a, axis = axis, **state))\n\ndef ts_count_(a, axis = 0, data = None, instate = None):\n \"\"\"\n ts_count_(a) is equivalent to ts_count(a) except vec is also returned.\n See ts_count for full documentation \n \"\"\"\n state = instate or {}\n return _zip(_ts_count(a, axis = axis, **state))\n\n@loop_all\ndef _ts_sum(a, vec = None):\n vec = _vec(a, vec, 2, 0.)\n vec = _moments(a, vec)\n return vec[1], vec\n \ndef ts_sum(a, axis = 0, data = None, state = None):\n \"\"\"\n ts_sum(a) is equivalent to a.sum()\n \n - supports numpy arrays \n - handles nan\n - supports state management\n\n :Parameters:\n ------------\n a : array, pd.Series, pd.DataFrame or list/dict of these\n timeseries\n \n axis : int, optional\n 0/1/-1. The default is 0.\n\n data: None\n unused at the moment. Allow code such as func(live, **func_(history)) to work\n\n state: dict, optional\n state parameters used to instantiate the internal calculations, based on history prior to 'a' provided. \n \n\n :Example: pandas matching\n -----------------------------------\n >>> # create sample data:\n >>> from pyg import *; import pandas as pd; import numpy as np\n >>> a = pd.Series(np.random.normal(0,1,10000), drange(-9999)); a[a>0] = np.nan\n >>> assert ts_sum(a) == a.sum()\n\n :Example: numpy \n -----------------------------------\n >>> assert ts_sum(a.values) == ts_sum(a)\n\n :Example: state management\n -------------------------------------------\n >>> old = ts_sum_(a.iloc[:2000])\n >>> new = ts_sum(a.iloc[2000:], vec = old.vec)\n >>> assert new == ts_sum(a)\n \"\"\"\n state = state or {}\n if is_num(a) or (isinstance(a, list) and is_nums(a)):\n a = np.array(as_list(a))\n return first_(_ts_sum(a, axis = axis, **state))\n\ndef ts_sum_(a, axis = 0, data = None, instate = None):\n \"\"\"\n ts_sum_(a) is equivalent to ts_sum(a) except vec is also returned.\n See ts_sum for full documentation \n \"\"\"\n state = instate or {}\n if is_num(a) or (isinstance(a, list) and is_nums(a)):\n a = np.array(as_list(a))\n return _zip(_ts_sum(a, axis = axis, **state))\n\n@loop_all\ndef _ts_mean(a, vec = None):\n vec = _vec(a, vec,2,0.)\n vec = _moments(a, vec)\n return np.nan if vec[0] == 0 else vec[1]/vec[0], vec\n\ndef ts_mean(a, axis = 0, data = None, state = None):\n \"\"\"\n ts_mean(a) is equivalent to a.mean()\n \n - supports numpy arrays \n - handles nan\n - supports state management\n - pandas is actually faster on count\n \n :Parameters:\n ------------\n a : array, pd.Series, pd.DataFrame or list/dict of these\n timeseries\n \n axis : int, optional\n 0/1/-1. The default is 0.\n\n data: None\n unused at the moment. Allow code such as func(live, **func_(history)) to work\n\n state: dict, optional\n state parameters used to instantiate the internal calculations, based on history prior to 'a' provided. \n\n :Example: pandas matching\n -----------------------------------\n >>> # create sample data:\n >>> from pyg import *; import pandas as pd; import numpy as np\n >>> a = pd.Series(np.random.normal(0,1,10000), drange(-9999)); a[a>0] = np.nan\n >>> assert ts_mean(a) == a.mean()\n\n :Example: numpy \n -----------------------------------\n >>> assert ts_mean(a.values) == ts_mean(a)\n\n :Example: state management\n -------------------------------------------\n >>> old = ts_mean_(a.iloc[:2000])\n >>> new = ts_mean(a.iloc[2000:], vec = old.vec)\n >>> assert new == ts_mean(a)\n \"\"\"\n state = state or {}\n if is_num(a) or (isinstance(a, list) and is_nums(a)):\n a = np.array(as_list(a))\n return first_(_ts_mean(a, axis = axis, **state))\n\ndef ts_mean_(a, axis = 0, data = None, instate = None):\n \"\"\"\n ts_mean_(a) is equivalent to ts_mean(a) except vec is also returned.\n See ts_mean for full documentation \n \"\"\"\n state = instate or {}\n if is_num(a) or (isinstance(a, list) and is_nums(a)):\n\n a = np.array(as_list(a))\n return _zip(_ts_mean(a, axis = axis, **state))\n\n\n@loop_all\n@pd2np\n@compiled\ndef _ts_cor(a, b, min_sample, vec):\n res = vec.copy()\n for i in range(a.shape[0]):\n if not np.isnan(a[i]) and not np.isnan(b[i]):\n res[0] += 1\n res[1] += a[i]\n res[2] += a[i]**2\n res[3] += b[i]\n res[4] += b[i]**2\n res[5] += a[i]*b[i]\n return cor_calculation(t0 = res[0], a1 = res[1], a2 = res[2], b1 = res[3], b2 = res[4], ab = res[5], min_sample = min_sample), res\n \n\ndef ts_cor(a, b, min_sample = 3, axis = 0, data = None, state = None):\n \"\"\"\n ts_cor(a) is equivalent to a.cor()[0][1]\n \n - supports numpy arrays \n - handles nan\n - supports state management\n \n :Example: matching pandas\n -------------------------\n >>> # create sample data:\n >>> from pyg_timeseries import *; import pandas as pd; import numpy as np\n >>> a = pd.Series(np.random.normal(0,1,10000), drange(-9999)); a[a>0.5] = np.nan\n >>> b = pd.Series(np.random.normal(0,1,10000), drange(-9999)); b[b>0.5] = np.nan\n >>> state = data = None; min_sample = 3; axis = 0\n >>> df = pd.concat([a,b], axis=1)\n >>> assert abs(df.corr()[0][1] - ts_cor(a, b))<1e-10\n\n :Example: slightly faster than pandas\n -------------------------------------\n %timeit ts_cor(a, b)\n 245 µs ± 6.43 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)\n %timeit df.corr()[0][1]\n 575 µs ± 13 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each) \n\n :Example: numpy \n -----------------------------------\n >>> assert ts_cor(a.values, b.values) == ts_cor(a,b)\n\n :Example: state management\n -------------------------------------------\n >>> old = ts_std_(a.iloc[:2000])\n >>> new = ts_std(a.iloc[2000:], vec = old.vec)\n >>> assert new == ts_std(a)\n\n \"\"\"\n state = state or dict(vec = _vec(a, None,6,0.))\n rtn = first_(_ts_cor(a, b, min_sample = min_sample, **state))\n return rtn\n\ndef ts_cor_(a, b, min_sample = 3, axis = 0, data = None, instate = None):\n \"\"\"\n ts_cor_(a, b) is equivalent to ts_cor(a,b) except vec is also returned.\n See ts_std for full documentation \n \"\"\"\n state = instate or dict(vec = _vec(a, None,6,0.))\n if is_num(a) or (isinstance(a, list) and is_nums(a)):\n a = np.array(as_list(a))\n if is_num(b) or (isinstance(b, list) and is_nums(b)):\n b = np.ndarray(as_list(b))\n return _zip(_ts_cor(a, b, min_sample, axis = axis, **state))\n\n\n@loop_all\ndef _ts_std(a, vec = None):\n vec = _vec(a, vec, 3, 0.)\n vec = _moments(a, vec)\n return stdev_calculation(*vec), vec\n\n\ndef ts_std(a, axis = 0, data = None, state = None):\n \"\"\"\n ts_std(a) is equivalent to a.std()\n \n - supports numpy arrays \n - handles nan\n - supports state management\n \n >>> # create sample data:\n >>> from pyg import *; import pandas as pd; import numpy as np\n >>> a = pd.Series(np.random.normal(0,1,10000), drange(-9999)); a[a>0] = np.nan\n\n :Example: pandas matching\n -----------------------------------\n >>> assert abs(ts_std(a) - a.std())<1e-13\n\n :Example: numpy \n -----------------------------------\n >>> assert ts_std(a.values) == ts_std(a)\n\n :Example: state management\n -------------------------------------------\n >>> old = ts_std_(a.iloc[:2000])\n >>> new = ts_std(a.iloc[2000:], vec = old.vec)\n >>> assert new == ts_std(a)\n\n \"\"\"\n state = state or {}\n if is_num(a) or (isinstance(a, list) and is_nums(a)):\n a = np.array(as_list(a))\n return first_(_ts_std(a, axis = axis, **state))\n\ndef ts_std_(a, axis = 0, data = None, instate = None):\n \"\"\"\n ts_std_(a) is equivalent to ts_std(a) except vec is also returned.\n See ts_std for full documentation \n \"\"\"\n state = instate or {}\n if is_num(a) or (isinstance(a, list) and is_nums(a)):\n a = np.array(as_list(a))\n return _zip(_ts_std(a, axis = axis, **state))\n\n@loop_all\ndef _ts_rms(a, vec = None):\n vec = _vec(a, vec, 3, 0.)\n vec = _moments(a, vec)\n return np.nan if vec[0] <= 0 else np.sqrt(vec[2]/vec[0]), vec\n \n\n\ndef ts_rms(a, axis = 0, data = None, state = None):\n \"\"\"\n ts_rms(a) is equivalent to (a**2).mean()**0.5\n \n :Parameters:\n ------------\n a : array, pd.Series, pd.DataFrame or list/dict of these\n timeseries\n \n axis : int, optional\n 0/1/-1. The default is 0.\n\n data: None\n unused at the moment. Allow code such as func(live, **func_(history)) to work\n\n state: dict, optional\n state parameters used to instantiate the internal calculations, based on history prior to 'a' provided. \n\n - supports numpy arrays \n - handles nan\n - supports state management\n \n >>> # create sample data:\n >>> from pyg import *; import pandas as pd; import numpy as np\n >>> a = pd.Series(np.random.normal(0,1,10000), drange(-9999)); a[a>0] = np.nan\n\n :Example: pandas matching\n -----------------------------------\n >>> assert abs(ts_std(a) - a.std())<1e-13\n\n :Example: numpy \n -----------------------------------\n >>> assert ts_std(a.values) == ts_std(a)\n\n :Example: state management\n -------------------------------------------\n >>> old = ts_rms_(a.iloc[:2000])\n >>> new = ts_rms(a.iloc[2000:], vec = old.vec)\n >>> assert new == ts_rms(a)\n\n \"\"\"\n state = state or {}\n if is_num(a) or (isinstance(a, list) and is_nums(a)):\n a = np.array(as_list(a))\n return first_(_ts_rms(a, axis = axis, **state))\n\ndef ts_rms_(a, axis = 0, data = None, instate = None):\n \"\"\"\n ts_rms_(a) is equivalent to ts_rms(a) except it also returns vec\n see ts_rms for full documentation\n \"\"\"\n state = instate or {}\n if is_num(a) or (isinstance(a, list) and is_nums(a)):\n a = np.array(as_list(a))\n return _zip(_ts_rms(a, axis = axis, **state))\n\n\n@loop_all\ndef _ts_skew(a, bias = False, min_sample = 0.25, vec = None):\n vec = _vec(a, vec, 4, 0.)\n vec = _moments(a, vec)\n return skew_calculation(t0 = vec[0], t1 = vec[1], t2 = vec[2], t3 = vec[3], bias = bias, min_sample = min_sample), vec\n\ndef ts_skew(a, bias = False, min_sample = 0.25, axis = 0, data = None, state = None):\n \"\"\"\n ts_skew(a,0) is equivalent to a.skew()\n \n - supports numpy arrays \n - handles nan\n - faster than pandas\n - supports state management\n \n :Parameters:\n ------------\n a : array, pd.Series, pd.DataFrame or list/dict of these\n timeseries\n \n axis : int, optional\n 0/1/-1. The default is 0.\n \n min_sample: float, optional\n This refers to the denominator when we calculate the skew. Over time, the deonimator converges to 1 but initially, it is small. \n Also, if there is a gap in the data, older datapoints weight may have decayed while there are not enough \"new point\". \n min_sample ensures that in both cases, if denominator<0.25 )(default value) we return nan.\n\n data: None\n unused at the moment. Allow code such as func(live, **func_(history)) to work\n\n state: dict, optional\n state parameters used to instantiate the internal calculations, based on history prior to 'a' provided. \n\n :Example: pandas matching\n -----------------------------------\n >>> # create sample data:\n >>> from pyg import *; import pandas as pd; import numpy as np\n >>> a = pd.Series(np.random.normal(0,1,10000), drange(-9999)); a[a>0] = np.nan\n >>> assert abs(ts_skew(a, 0) - a.skew())<1e-13\n\n :Example: numpy \n -----------------------------------\n >>> assert ts_skew(a.values) == ts_skew(a)\n\n :Example: state management\n -------------------------------------------\n >>> old = ts_skew_(a.iloc[:2000])\n >>> new = ts_skew(a.iloc[2000:], vec = old.vec)\n >>> assert new == ts_skew(a)\n \"\"\"\n state = state or {}\n if is_num(a) or (isinstance(a, list) and is_nums(a)):\n a = np.array(as_list(a))\n return first_(_ts_skew(a, bias = bias, min_sample = min_sample, axis = axis, **state))\n \ndef ts_skew_(a, bias = False, min_sample = 0.25, axis = 0, data = None, instate = None):\n \"\"\"\n ts_skew_(a) is equivalent to ts_skew except vec is also returned.\n See ts_skew for full details\n \"\"\"\n state = instate or {}\n if is_num(a) or (isinstance(a, list) and is_nums(a)):\n a = np.array(as_list(a))\n return _zip(_ts_skew(a, bias = bias, min_sample = min_sample, axis = axis, **state))\n\n\n\nts_cor_.output = ['data', 'state']\nts_min_.output = ['data', 'state']\nts_max_.output = ['data', 'state']\nts_count_.output = ['data', 'state']\nts_sum_.output = ['data', 'state']\nts_mean_.output = ['data', 'state']\nts_rms_.output = ['data', 'state']\nts_std_.output = ['data', 'state']\nts_skew_.output = ['data', 'state']\n\n\ndef ts_interval(ts, min_freq = 0.5):\n \"\"\"\n returns the most common time difference in the timeseries\n\n :Example:\n ----------\n >>> ts = pd.Series(range(100), drange(-200,0,'1b')[:100])\n >>> assert ts_interval(ts) == datetime.timedelta(1)\n\n >>> ts = pd.Series(range(100), drange(-1,0,'5n')[:100])\n >>> assert ts_interval(ts) == datetime.timedelta(minutes = 5)\n \n >>> ts = pd.Series(range(10000), range(10000))\n >>> ts = ts[ts % 2 != 0]\n >>> ts = ts[ts % 3 != 0]\n >>> ts = ts[ts % 5 != 0]\n >>> assert ts_interval(ts) == 4\n \n \n \"\"\"\n if len(ts)<2:\n return None\n v = ts.index if is_pd(ts) else ts\n intervals = pd.Series(v, v).diff().iloc[1:]\n res = intervals.mode()[0]\n freq = len(intervals[intervals.values==res]) / len(intervals)\n if freq>=min_freq:\n return res\n else: \n logger.warning('timeseries is irregular, with mode frequency less than %1.2f. returning median interval instead'%freq)\n return intervals.median()\n ","repo_name":"gityoav/pyg-timeseries","sub_path":"src/pyg_timeseries/_ts.py","file_name":"_ts.py","file_ext":"py","file_size_in_byte":17543,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"30"} +{"seq_id":"9850675740","text":"\"\"\"Пользователь вводит три числа. Увеличьте первое число в два раза,\nвторое числа уменьшите на 3,\nтретье число возведите в квадрат и затем найдите сумму новых трех чисел.\"\"\"\n\nfirst_number = input(\"Enter first number: \")\nsecond_number = input(\"Enter second number: \")\nthird_number = input(\"Enter third number: \")\nfirst_number = int(first_number)\nsecond_number = int(second_number)\nthird_number = int(third_number)\nprint(first_number * 2 + (second_number - 3) + third_number ** 2)","repo_name":"avaturrr/Ksyusha","sub_path":"basics/basics_13.py","file_name":"basics_13.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"41015912765","text":"# -*- coding: utf-8 -*-\nimport socket\nimport threading\n\n\nwlanport = 1234\nserverip = \"192.168.1.103\"\n\n\nclass Server:\n def __init__(self, ip=serverip, port=wlanport): # 设置默认值\n self.addr = ip, port\n self.lock = threading.Lock()\n self.sock = socket.socket()\n self.sock.bind(self.addr)\n self.data = [['no message',False]]\n self.socks = {\"accept\": self.sock} # 将所有创建的socket都放字典,方便释放\n\n def start(self): # 启动接口\n self.sock.listen()\n threading.Thread(target=self.accept, name=\"accept\", daemon=True).start()\n\n def accept(self): # 该线程等待连接并创建处理线程\n while True:\n s, raddr = self.sock.accept()\n with self.lock:\n self.socks[raddr] = s\n threading.Thread(target=self.recv, args=(s, raddr), name=\"recv\", daemon=True).start()\n\n def recv(self, s, raddr): # 每个客户端开启一个线程与其交互\n while True:\n data = s.recv(1024).decode()\n if data.strip() == \"\" or data.strip() == \"quit\": # 客户端结束条件\n with self.lock:\n self.socks.pop(raddr)\n s.close()\n break\n print(data)\n self.data.append([data,True])\n s.send(\"server:{}\\n\".format(data).encode())\n\n def getmsg(self):\n # print('getmsg working and last message:',self.data[-1])\n if self.data[-1][1] == True:\n self.data[-1][1]=False\n return self.data[-1][0]\n else:return \"no message\"\n\n def stop(self):\n with self.lock:\n for s in self.socks.values():\n s.close()\n\n\n def send_msg(self,msg,ip,port):\n pass\n def send_t(self,msg,ip,port):\n\n self.sock.connect((ip,port)) # 尝试连接指定的地址\n self.f = self.sock.makefile(\"rw\")\n # threading.Thread(target=self.recv, name=\"recv\", daemon=True).start() # 一个进程接收消息\n self.f.write(msg) # 主进程发送消息\n self.f.flush()\n\n\n\n'''\n#testing\n\n\ns = Server()\ns.start()\n\nwhile True:\n cmd = input(\"server commond:>>>\")\n if cmd == \"quit\": # 服务器退出条件\n s.stop()\n break\n # elif cmd == \"send\":\n # s.send_msg(cmd,'192.168.1.103',int(input('port')))\n print(threading.enumerate())\n\n'''","repo_name":"bobbywyx/IdIoT","sub_path":"files/crystal(main server)/wlan_connection.py","file_name":"wlan_connection.py","file_ext":"py","file_size_in_byte":2398,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"25804105410","text":"\"\"\"\nYuck, day 4. Could use major cleanup.\n\"\"\"\nfrom util import *\n\ndata = get_data(4, split=False)\ndata_split = get_data(4)\n\n\ndef both_alt(input):\n \"\"\"\n Cleaned up version.\n \"\"\"\n lines = sorted(input)\n guards = defaultdict(lambda: [0] * 60)\n\n for line in lines:\n minute, text = re.findall(r'\\[\\d*-\\d*-\\d* \\d*:(\\d*)\\] ([\\w\\s#]*)', line)[0]\n\n if '#' in text:\n guard = int(text.split()[1][1:])\n elif 'asleep' in text:\n start_sleep = int(minute)\n elif 'wakes' in text:\n stop_sleep = int(minute)\n for i in range(start_sleep, stop_sleep):\n guards[guard][i] += 1\n\n p1_guard, _ = max([(k, sum(v)) for k, v in guards.items()], key=lambda x: x[1])\n p1_minute = guards[p1_guard].index(max(guards[p1_guard]))\n part1 = p1_guard * p1_minute\n\n p2_guard, p2_max_min = max([(k, max(v)) for k, v in guards.items()], key=lambda x: x[1])\n p2_minutes = guards[p2_guard].index(p2_max_min)\n part2 = p2_guard * p2_minutes\n\n return (part1, part2)\n\n\ndef both(input):\n date_format = '%Y-%m-%d %H:%M'\n p = re.compile(r\"\\[(.*)\\] ([\\w\\s#]*)\\n\", flags=re.MULTILINE)\n guard_p = re.compile(r\"Guard #(\\d*) begins shift\")\n matches = re.findall(p, input)\n\n # First things first, sort the records by chronological order.\n matches.sort(key=lambda x: datetime.strptime(x[0], date_format))\n\n dates_map = {}\n dates_to_guards = {}\n curr_guard = None\n\n last_asleep = 0\n last_awake = 0\n\n for date, action in matches:\n date = datetime.strptime(date, date_format)\n day_of = date.strftime(\"%Y-%m-%d\")\n\n if day_of not in dates_map:\n dates_map[day_of] = ['0'] * 60\n\n minute = date.minute\n\n guard_match = re.findall(guard_p, action)\n\n if guard_match:\n curr_guard = guard_match[0]\n\n if day_of not in dates_to_guards:\n dates_to_guards[day_of] = curr_guard\n\n # default state is awake\n if action == 'falls asleep':\n last_asleep = minute\n # mark everything before as awake\n dates_map[day_of][last_awake:last_asleep] = '0' * \\\n (last_asleep-last_awake)\n elif action == 'wakes up':\n last_awake = minute\n # mark everything before as asleep\n dates_map[day_of][last_asleep:last_awake] = '1' * \\\n (last_awake-last_asleep)\n\n # tally which guard slept the most\n guards_to_sleep = defaultdict(int)\n for date, guard in dates_to_guards.items():\n guards_to_sleep[guard] += dates_map[date].count('1')\n\n guard_who_slept_most = max(guards_to_sleep.items(), key=itemgetter(1))[0]\n\n guards_to_total_sleep = defaultdict(lambda: ['0'] * 60)\n guard_max_total_sleep = 0\n guard_max_total_sleep_value = 0\n for date, guard in dates_to_guards.items():\n guards_to_total_sleep[guard] = [sum(map(int, x)) for x in zip(\n dates_map[date], guards_to_total_sleep[guard])]\n\n max_guard = 0\n curr_max = 0\n max_minute = 0\n for guard, sleep in guards_to_total_sleep.items():\n max_sleep_per_guard = max(map(int, guards_to_total_sleep[guard]))\n if max_sleep_per_guard > curr_max:\n curr_max = max_sleep_per_guard\n max_guard = guard\n max_minute = guards_to_total_sleep[guard].index(curr_max)\n part2 = int(max_guard) * int(max_minute)\n\n # maximum minute slept?\n\n # for each minute of each date the guard was active, sum all the items in the lists.\n times_asleep = []\n for date in dates_map:\n if dates_to_guards[date] == guard_who_slept_most:\n times_asleep.append(dates_map[date])\n\n total_times_asleep = [sum(map(int, x)) for x in zip(*times_asleep)]\n minute_most_asleep = total_times_asleep.index(max(total_times_asleep))\n part1 = minute_most_asleep * int(guard_who_slept_most)\n return (part1, part2)\n\n\nprint(\"Part 1: \", both(data), \" alt: \", both_alt(data_split))\n\n\ntest_input = \"\"\"\n[1518-11-01 00:00] Guard #10 begins shift\n[1518-11-01 00:05] falls asleep\n[1518-11-01 00:25] wakes up\n[1518-11-01 00:30] falls asleep\n[1518-11-01 00:55] wakes up\n[1518-11-01 23:58] Guard #99 begins shift\n[1518-11-02 00:40] falls asleep\n[1518-11-02 00:50] wakes up\n[1518-11-03 00:05] Guard #10 begins shift\n[1518-11-03 00:24] falls asleep\n[1518-11-03 00:29] wakes up\n[1518-11-04 00:02] Guard #99 begins shift\n[1518-11-04 00:36] falls asleep\n[1518-11-04 00:46] wakes up\n[1518-11-05 00:03] Guard #99 begins shift\n[1518-11-05 00:45] falls asleep\n[1518-11-05 00:55] wakes up\n\"\"\"\nexpected_result = (240, 4455)\n\nboth_tests = [\n (test_input, expected_result)\n]\n\nrun_tests(both, both_tests, delim=None)\nrun_tests(both_alt, both_tests)\n","repo_name":"nnja/2018_advent_of_code","sub_path":"day4.py","file_name":"day4.py","file_ext":"py","file_size_in_byte":4729,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"30"} +{"seq_id":"2681282285","text":"import pathlib\n\nimport chex\nfrom flax import optim\nimport jax3d.projects.nesf as j3d\nfrom jax3d.projects.nesf.nerfstatic.nerf import utils\nfrom jax3d.projects.nesf.nerfstatic.utils import train_utils\nimport numpy as np\n\n\ndef test_checkpoint_save_restore(tmp_path: pathlib.Path):\n\n save_dir = j3d.Path(tmp_path)\n\n # Mock checkpoint saving.\n ds_state_origin = np.random.PCG64().state['state']\n\n opt_config = optim.Momentum(learning_rate=0.1, beta=0.1)\n optimizer_ckpt = opt_config.create(target={'var1': 1.0, 'var2': 2.0})\n model_state_ckpt = utils.TrainState(optimizer=optimizer_ckpt)\n step = 0\n\n train_utils.save_checkpoints_for_process(\n model_state=model_state_ckpt,\n ds_state=ds_state_origin,\n step=step,\n save_dir=save_dir,\n )\n\n # Restore dataset state\n data_state_restored = train_utils.restore_ds_checkpoint_for_process(\n save_dir=save_dir)\n assert data_state_restored == ds_state_origin\n\n # Restore model\n optimizer = opt_config.create(target={'var1': 0.0, 'var2': 0.0})\n model_state = utils.TrainState(optimizer=optimizer)\n model_state_restored = train_utils.restore_opt_checkpoint(\n save_dir=save_dir,\n state=model_state,\n )\n chex.assert_trees_all_close(model_state_ckpt.optimizer.target,\n model_state_restored.optimizer.target)\n","repo_name":"google-research/jax3d","sub_path":"jax3d/projects/nesf/nerfstatic/utils/train_utils_test.py","file_name":"train_utils_test.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","stars":700,"dataset":"github-code","pt":"30"} +{"seq_id":"25620946006","text":"import matplotlib.pyplot as plt\nimport numpy\n\nclass Plot():\n\n\n def select_values(self, xcoord, ycoord):\n indices = [index for index,value in enumerate(ycoord) if not numpy.isnan(value)]\n new_xcoord = []\n new_ycoord = []\n for i in indices:\n new_xcoord.append(xcoord[i])\n new_ycoord.append(ycoord[i])\n return new_xcoord, new_ycoord\n\n def plot_initial_dataset(self, dataset, id):\n #First get the time points from the dataset, these are the x-coordinates\n\n xcoord = dataset[id]['times']\n # Scale time to hours\n\n new_xcoord = []\n leg = []\n\n for x in xcoord:\n new_xcoord.append((x - min(xcoord))/float(60*60))\n\n for var in dataset[id]:\n if not var == 'times':\n leg.append(var)\n ycoord = dataset[id][var]\n x, y = self.select_values(new_xcoord, ycoord)\n plt.plot(x, y, '-')\n\n plt.legend(leg)\n plt.ylabel('Value')\n plt.xlabel('Time (hours)')\n plt.show()\n\n def plot_results(self, desired_output, pred_output, name_output, plot_name):\n if not (type(pred_output[0]) == list):\n for i in range(len(pred_output)):\n pred_output[i] = [pred_output[i]]\n plt.figure(plot_name)\n num_outputs = len(desired_output[0])\n x = range(0,len(desired_output))\n leg = []\n\n for i in range(num_outputs):\n desired_y = []\n pred_y = []\n leg.append('desired_' + name_output[i])\n leg.append('predicted_' + name_output[i])\n\n for j in range(len(desired_output)):\n desired_y.append(desired_output[j][i])\n pred_y.append(pred_output[j][i])\n plt.plot(x, desired_y, '-')\n plt.plot(x, pred_y, ':')\n\n plt.legend(leg)\n plt.ylabel('Value')\n plt.xlabel('Timesteps')\n plt.show()","repo_name":"wvb1/MoodForecasting","sub_path":"util/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"30"} +{"seq_id":"7675816126","text":"import numpy\ncritics={\n 'BTS':{'암수살인':5, '바울':4, '할로윈':1.5},\n '손흥민':{'바울':5, '할로윈':2},\n '레드벨벳':{'암수살인':2.5, '바울':2, '할로윈':1},\n '트와이스':{'암수살인':3.5, '바울':4, '할로윈':5},\n}\nfinal=10\nfor i in critics:\n if i=='BTS':\n continue\n else:\n t=critics[i]\n m=0\n for j in t:\n x=critics['BTS'][j]-critics[i][j]\n x2=x*x\n m+=x2\n ans=math.sqrt(m)\n if ans 22:\n # Outside of business hours\n return build_validation_result(False, 'diningTime', 'Restaurant hours are from 10 a m. to 10 p m. Can you specify a time during this range?')\n\n return build_validation_result(True, None, None)\n\n\n\"\"\" --- Functions that control the bot's behavior --- \"\"\"\n\ndef say_hi(intent_request):\n return close(intent_request['sessionAttributes'],\n 'Fulfilled',\n {'contentType': 'PlainText',\n 'content': 'Hi there! How can I help you?'})\n\ndef say_bye(intent_request):\n return close(intent_request['sessionAttributes'],\n 'Fulfilled',\n {'contentType': 'PlainText',\n 'content': 'Bye, have a good day :p'})\n \ndef search_price(intent_request):\n price = get_slots(intent_request)[\"price\"]\n location = get_slots(intent_request)[\"location\"]\n dining_time = get_slots(intent_request)[\"DinningTime\"]\n dining_date = get_slots(intent_request)[\"dinningDate\"]\n number = get_slots(intent_request)[\"NumberPeople\"]\n # phone = get_slots(intent_request)[\"phone\"]\n source = intent_request['invocationSource']\n \n p = 0\n\n if price <= 15:\n p = 1\n elif price > 15 and price <= 30:\n p = 2\n elif price > 30 and price <= 45:\n p = 3\n else:\n p = 4\n \n if source == 'DialogCodeHook':\n # Perform basic validation on the supplied input slots.\n # Use the elicitSlot dialog action to re-prompt for the first violation detected.\n slots = get_slots(intent_request)\n print(slots)\n validation_result = validate_dining_config(dining_time, dining_date, number)\n if not validation_result['isValid']:\n slots[validation_result['violatedSlot']] = None\n print(slots)\n return elicit_slot(intent_request['sessionAttributes'],\n intent_request['currentIntent']['name'],\n slots,\n validation_result['violatedSlot'],\n validation_result['message'])\n\n output_session_attributes = intent_request['sessionAttributes'] if intent_request['sessionAttributes'] is not None else {}\n # if cuisine is not None:\n # output_session_attributes['Price'] = len(cuisine) * 5 # Elegant pricing model\n\n return delegate(output_session_attributes, get_slots(intent_request))\n\n\n headers={\"Authorization\": \"Bearer Vv3D0MWqERhHbN7M5C1Wb1tTvsezZeISd8u6T50QI7zxkdzaLXhjQjCdRhiTqR7w1BZtPc722pRAaawBoKjgayUauLDxTV9lzfL12pwUzFVoUmAC-HUZ9s5fc_iCXHYx\"}\n params={\n \"term\":\"restaurants\",\n \"location\":location,\n \"price\": p,\n \"sort_by\": 'rating',\n \"limit\": 3\n }\n myResponse = requests.get(\"https://api.yelp.com/v3/businesses/search\", headers=headers, params=params)\n r = myResponse.json()\n r = r[\"businesses\"]\n \n return_string = \"Here are my suggestions for \" + number + \" people, for \" + dining_date + \" \" + dining_time + \" :\"\n return_string += \" 1. \" + r[0]['name'] + \", located at \" + r[0]['location']['address1'] + \".\" \n return_string += \" 2. \" + r[1]['name'] + \", located at \" + r[1]['location']['address1'] + \".\" \n return_string += \" 3. \" + r[2]['name'] + \", located at \" + r[2]['location']['address1'] + \".\" \n return close(intent_request['sessionAttributes'],\n 'Fulfilled',\n {'contentType': 'PlainText',\n 'content': return_string})\n\n\n\n\n\ndef search_location(intent_request):\n address = get_slots(intent_request)[\"address\"]\n # location = get_slots(intent_request)[\"location\"]\n dining_time = get_slots(intent_request)[\"DinningTime\"]\n dining_date = get_slots(intent_request)[\"dinningDate\"]\n number = get_slots(intent_request)[\"NumberPeople\"]\n # phone = get_slots(intent_request)[\"phone\"]\n source = intent_request['invocationSource']\n\n \n if source == 'DialogCodeHook':\n # Perform basic validation on the supplied input slots.\n # Use the elicitSlot dialog action to re-prompt for the first violation detected.\n slots = get_slots(intent_request)\n print(slots)\n validation_result = validate_dining_config(dining_time, dining_date, number)\n if not validation_result['isValid']:\n slots[validation_result['violatedSlot']] = None\n print(slots)\n return elicit_slot(intent_request['sessionAttributes'],\n intent_request['currentIntent']['name'],\n slots,\n validation_result['violatedSlot'],\n validation_result['message'])\n\n output_session_attributes = intent_request['sessionAttributes'] if intent_request['sessionAttributes'] is not None else {}\n # if cuisine is not None:\n # output_session_attributes['Price'] = len(cuisine) * 5 # Elegant pricing model\n\n return delegate(output_session_attributes, get_slots(intent_request))\n\n\n headers={\"Authorization\": \"Bearer Vv3D0MWqERhHbN7M5C1Wb1tTvsezZeISd8u6T50QI7zxkdzaLXhjQjCdRhiTqR7w1BZtPc722pRAaawBoKjgayUauLDxTV9lzfL12pwUzFVoUmAC-HUZ9s5fc_iCXHYx\"}\n params={\n \"term\":\"restaurants\",\n \"location\":address,\n \"sort_by\": 'rating',\n \"limit\": 3\n }\n myResponse = requests.get(\"https://api.yelp.com/v3/businesses/search\", headers=headers, params=params)\n r = myResponse.json()\n r = r[\"businesses\"]\n \n return_string = \"Here are my suggestions for \" + number + \" people, for \" + dining_date + \" \" + dining_time + \" :\"\n return_string += \" 1. \" + r[0]['name'] + \", located at \" + r[0]['location']['address1'] + \".\" \n # return_string += \" 2. \" + r[1]['name'] + \", located at \" + r[1]['location']['address1'] + \".\" \n # return_string += \" 3. \" + r[2]['name'] + \", located at \" + r[2]['location']['address1'] + \".\" \n return close(intent_request['sessionAttributes'],\n 'Fulfilled',\n {'contentType': 'PlainText',\n 'content': return_string})\n\n\n\n\ndef search_restaurant(intent_request):\n restaurant = get_slots(intent_request)[\"Restaurant\"]\n location = get_slots(intent_request)[\"location\"]\n dining_time = get_slots(intent_request)[\"DinningTime\"]\n dining_date = get_slots(intent_request)[\"dinningDate\"]\n number = get_slots(intent_request)[\"NumberPeople\"]\n # phone = get_slots(intent_request)[\"phone\"]\n source = intent_request['invocationSource']\n\n \n if source == 'DialogCodeHook':\n # Perform basic validation on the supplied input slots.\n # Use the elicitSlot dialog action to re-prompt for the first violation detected.\n slots = get_slots(intent_request)\n print(slots)\n validation_result = validate_dining_config(dining_time, dining_date, number)\n if not validation_result['isValid']:\n slots[validation_result['violatedSlot']] = None\n print(slots)\n return elicit_slot(intent_request['sessionAttributes'],\n intent_request['currentIntent']['name'],\n slots,\n validation_result['violatedSlot'],\n validation_result['message'])\n\n output_session_attributes = intent_request['sessionAttributes'] if intent_request['sessionAttributes'] is not None else {}\n # if cuisine is not None:\n # output_session_attributes['Price'] = len(cuisine) * 5 # Elegant pricing model\n\n return delegate(output_session_attributes, get_slots(intent_request))\n\n\n headers={\"Authorization\": \"Bearer Vv3D0MWqERhHbN7M5C1Wb1tTvsezZeISd8u6T50QI7zxkdzaLXhjQjCdRhiTqR7w1BZtPc722pRAaawBoKjgayUauLDxTV9lzfL12pwUzFVoUmAC-HUZ9s5fc_iCXHYx\"}\n params={\n \"term\":restaurant,\n \"location\":location,\n \"sort_by\": 'rating',\n \"limit\": 3\n }\n myResponse = requests.get(\"https://api.yelp.com/v3/businesses/search\", headers=headers, params=params)\n r = myResponse.json()\n r = r[\"businesses\"]\n \n return_string = \"Here are my suggestions for \" + number + \" people, for \" + dining_date + \" \" + dining_time + \" :\"\n return_string += \" 1. \" + r[0]['name'] + \", located at \" + r[0]['location']['address1'] + \".\" \n return_string += \" 2. \" + r[1]['name'] + \", located at \" + r[1]['location']['address1'] + \".\" \n return_string += \" 3. \" + r[2]['name'] + \", located at \" + r[2]['location']['address1'] + \".\" \n return close(intent_request['sessionAttributes'],\n 'Fulfilled',\n {'contentType': 'PlainText',\n 'content': return_string})\n\n\n\n\n\n\ndef search_feature(intent_request):\n feature = get_slots(intent_request)[\"feature\"]\n location = get_slots(intent_request)[\"location\"]\n dining_time = get_slots(intent_request)[\"DinningTime\"]\n dining_date = get_slots(intent_request)[\"dinningDate\"]\n number = get_slots(intent_request)[\"NumberPeople\"]\n # phone = get_slots(intent_request)[\"phone\"]\n source = intent_request['invocationSource']\n\n \n if source == 'DialogCodeHook':\n # Perform basic validation on the supplied input slots.\n # Use the elicitSlot dialog action to re-prompt for the first violation detected.\n slots = get_slots(intent_request)\n print(slots)\n validation_result = validate_dining_config(dining_time, dining_date, number)\n if not validation_result['isValid']:\n slots[validation_result['violatedSlot']] = None\n print(slots)\n return elicit_slot(intent_request['sessionAttributes'],\n intent_request['currentIntent']['name'],\n slots,\n validation_result['violatedSlot'],\n validation_result['message'])\n\n output_session_attributes = intent_request['sessionAttributes'] if intent_request['sessionAttributes'] is not None else {}\n # if cuisine is not None:\n # output_session_attributes['Price'] = len(cuisine) * 5 # Elegant pricing model\n\n return delegate(output_session_attributes, get_slots(intent_request))\n\n\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('business')\n \n response = table.scan(FilterExpression=Attr('feature').contains(feature))\n item = response['Items']\n count = 3\n r=[]\n for i in item:\n if count == 0:\n break\n r.append(i)\n count -= 1\n \n return_string = \"Here are my suggestions for \" + number + \" people, for \" + dining_date + \" \" + dining_time + \" :\"\n return_string += \" 1. \" + r[0]['name'] + \", located at \" + r[0]['location'] + \".\" \n return_string += \" 2. \" + r[1]['name'] + \", located at \" + r[1]['location'] + \".\" \n return_string += \" 3. \" + r[2]['name'] + \", located at \" + r[2]['location'] + \".\" \n return close(intent_request['sessionAttributes'],\n 'Fulfilled',\n {'contentType': 'PlainText',\n 'content': return_string})\n\n\n\n \n\n \n\n\"\"\" --- Intents --- \"\"\"\n\n\ndef dispatch(intent_request):\n \"\"\"\n Called when the user specifies an intent for this bot.\n \"\"\"\n\n logger.debug('dispatch userId={}, intentName={}'.format(intent_request['userId'], intent_request['currentIntent']['name']))\n\n intent_name = intent_request['currentIntent']['name']\n\n # Dispatch to your bot's intent handlers\n if intent_name == 'Greeting':\n return say_hi(intent_request)\n elif intent_name == 'ThankYou':\n return say_bye(intent_request)\n elif intent_name == 'SearchFeature':\n return search_feature(intent_request)\n elif intent_name == 'SearchLocation':\n return search_location(intent_request) \n elif intent_name == 'SearchPrice':\n return search_price(intent_request)\n elif intent_name == 'SearchRest':\n return search_restaurant(intent_request)\n #return dining(intent_request)\n raise Exception('Intent with name ' + intent_name + ' not supported')\n\n\n\"\"\" --- Main handler --- \"\"\"\n\n\ndef lambda_handler(event, context):\n \"\"\"\n Route the incoming request based on intent.\n The JSON body of the request is provided in the event slot.\n \"\"\"\n # By default, treat the user request as coming from the America/New_York time zone.\n \n #os.environ['TZ'] = 'America/New_York'\n #time.tzset()\n #logger.debug('event.bot.name={}'.format(event['bot']['name']))\n\n return dispatch(event)\n \n #m_test()\n #return {\n # 'statusCode': 200,\n # 'body': json.dumps('Hello from Lambda!')\n #}\n \n \n# def m_test():\n# sqs = boto3.resource('sqs')\n# queue = sqs.get_queue_by_name(QueueName='cc-assignment2-queue.fifo')\n# print(queue.url)\n# print(queue.attributes.get('DelaySeconds'))\n# location = 'New York'\n# cuisine = 'Chinese'\n# msg_body = {\"term\":\"restaurants\", \"location\":location, \"categories\":cuisine, \"sort_by\":'rating', \"limit\":3, \"phone\":'+16464009197', \"NumberPeople\":3, \"dinningDate\":\"05/01/2019\", \"DinningTime\":\"22:00\"}\n# response = queue.send_message(MessageBody=json.dumps(msg_body), MessageGroupId='MyMessageGroupId1234567890', MessageDeduplicationId='MessageDeduplicationId12345678901')\n# print(response.get('MessageId'))\n# print(response.get('MD5OfMessageBody'))\n \n","repo_name":"Sylvia0696/Cloud-Computing","sub_path":"chatbot.py","file_name":"chatbot.py","file_ext":"py","file_size_in_byte":18494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"19151307399","text":"from __future__ import annotations\n\nimport logging\nimport typing as ty\n\nfrom webmon2 import common, model\n\nfrom .abstract import AbstractSource\n\n_LOG = logging.getLogger(__name__)\n__all__ = (\n \"UnknownInputException\",\n \"get_source\",\n \"sources_name\",\n \"sources_info\",\n \"AbstractSource\",\n)\n\n\ndef _load_plugins() -> None:\n # pylint: disable=unused-import,import-outside-toplevel\n\n from . import dummy, file_input, jamendo, web\n\n try:\n from . import github\n except ImportError:\n _LOG.warning(\"github3 module not found\")\n\n try:\n from . import rss\n except ImportError:\n _LOG.warning(\"feedparser module not found\")\n\n try:\n from . import gitlab\n except ImportError:\n _LOG.warning(\"gitlab module not found\")\n\n\n_load_plugins()\n\n\nclass UnknownInputException(Exception):\n pass\n\n\ndef get_source(\n source: model.Source, sys_settings: model.ConfDict\n) -> AbstractSource:\n \"\"\"Get input class according to configuration\"\"\"\n scls = common.find_subclass(AbstractSource, source.kind)\n if scls:\n src = scls(source, sys_settings)\n return src # type: ignore\n\n raise UnknownInputException()\n\n\ndef get_source_class(kind: str) -> ty.Optional[ty.Type[AbstractSource]]:\n scls = common.find_subclass(AbstractSource, kind)\n return scls\n\n\ndef sources_name() -> ty.List[str]:\n return [\n name for name, scls in common.get_subclasses_with_name(AbstractSource)\n ]\n\n\ndef sources_info() -> ty.List[ty.Tuple[str, str, str]]:\n return [\n (name, scls.short_info, scls.long_info)\n for name, scls in common.get_subclasses_with_name(AbstractSource)\n ]\n","repo_name":"KarolBedkowski/webmon","sub_path":"webmon2/sources/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"18"} +{"seq_id":"14951378121","text":"# A part of NonVisual Desktop Access (NVDA)\r\n# Copyright (C) 2022 NV Access Limited\r\n# This file is covered by the GNU General Public License.\r\n# See the file COPYING for more details.\r\n\r\nimport ctypes\r\n\r\nfrom logHandler import log\r\n\r\nfrom .constants import (\r\n\tHResult,\r\n\tSystemErrorCodes,\r\n)\r\n\r\n\r\ndef setDPIAwareness() -> None:\r\n\t\"\"\"\r\n\tDifferent versions of Windows inconsistently support different styles of DPI Awareness.\r\n\tThis function attempts to set process DPI awareness using the most modern Windows API method available.\r\n\r\n\tOnly call this function once per instance of NVDA.\r\n\r\n\tOnly call this function when running from source.\r\n\tIt is recommended that you set the process-default DPI awareness via application manifest.\r\n\tSetting the process-default DPI awareness via these API calls can lead to unexpected application behavior.\r\n\t\"\"\"\r\n\t# Support is inconsistent across versions of Windows, so try/excepts are used rather than explicit\r\n\t# version checks.\r\n\t# https://docs.microsoft.com/en-us/windows/win32/hidpi/setting-the-default-dpi-awareness-for-a-process\r\n\ttry:\r\n\t\t# An advancement over the original per-monitor DPI awareness mode,\r\n\t\t# which enables applications to access new DPI-related scaling behaviors on a per top-level window basis.\r\n\t\t# For more information on behaviours, refer to:\r\n\t\t# https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-setprocessdpiawarenesscontext\r\n\t\tDPI_AWARENESS_CONTEXT_PER_MONITOR_AWARE_V2 = -4\r\n\t\t# Method introduced in Windows 10\r\n\t\t# https://docs.microsoft.com/en-us/windows/win32/hidpi/dpi-awareness-context\r\n\t\tsuccess = ctypes.windll.user32.SetProcessDpiAwarenessContext(DPI_AWARENESS_CONTEXT_PER_MONITOR_AWARE_V2)\r\n\texcept AttributeError:\r\n\t\tlog.debug(\"Cannot set DPI_AWARENESS_CONTEXT_PER_MONITOR_AWARE_V2\")\r\n\telse:\r\n\t\tif success:\r\n\t\t\treturn\r\n\t\telse:\r\n\t\t\terrorCode = ctypes.GetLastError()\r\n\t\t\tif errorCode == SystemErrorCodes.ACCESS_DENIED:\r\n\t\t\t\t# The DPI awareness is already set,\r\n\t\t\t\t# either by calling this API previously or through the application (.exe) manifest.\r\n\t\t\t\t# This is unexpected as we should only set DPI awareness once.\r\n\t\t\t\t# NVDA sets DPI awareness from the manifest,\r\n\t\t\t\t# however this function should only be called when running from source.\r\n\t\t\t\tlog.error(\"DPI Awareness already set.\")\r\n\t\t\t\treturn\r\n\t\t\telif errorCode == SystemErrorCodes.INVALID_PARAMETER:\r\n\t\t\t\tlog.error(\"DPI Awareness function provided invalid argument.\")\r\n\t\t\telse:\r\n\t\t\t\tlog.error(f\"Unknown error setting DPI Awareness. Error code: {errorCode}\")\r\n\r\n\tlog.debug(\"Falling back to older method of setting DPI Awareness\")\r\n\r\n\ttry:\r\n\t\t# https://docs.microsoft.com/en-us/windows/win32/api/shellscalingapi/nf-shellscalingapi-setprocessdpiawareness\r\n\t\t# This window checks for the DPI when it is created and adjusts the scale factor whenever the DPI changes.\r\n\t\t# These processes are not automatically scaled by the system.\r\n\t\tPROCESS_PER_MONITOR_DPI_AWARE = 2\r\n\t\t# Method introduced in Windows 8.1\r\n\t\thResult = ctypes.windll.shcore.SetProcessDpiAwareness(PROCESS_PER_MONITOR_DPI_AWARE)\r\n\texcept AttributeError:\r\n\t\t# Windows 8 / Server 2012 - `shcore` library exists,\r\n\t\t# but `SetProcessDpiAwareness` is not present yet.\r\n\t\tlog.debug(\"Cannot set PROCESS_PER_MONITOR_DPI_AWARE - SetProcessDpiAwareness missing\")\r\n\telse:\r\n\t\tif hResult == HResult.S_OK:\r\n\t\t\treturn\r\n\t\telif hResult == HResult.E_ACCESS_DENIED:\r\n\t\t\t# The DPI awareness is already set,\r\n\t\t\t# either by calling this API previously or through the application (.exe) manifest.\r\n\t\t\t# This is unexpected as we should only set DPI awareness once.\r\n\t\t\t# NVDA sets DPI awareness from the manifest,\r\n\t\t\t# however this function should only be called when running from source.\r\n\t\t\tlog.error(\"DPI Awareness already set.\")\r\n\t\t\treturn\r\n\t\telif hResult == HResult.E_INVALID_ARG:\r\n\t\t\tlog.error(\"DPI Awareness function provided invalid argument.\")\r\n\t\telse:\r\n\t\t\tlog.error(f\"Unknown error setting DPI Awareness. HRESULT: {hResult}\")\r\n\r\n\tlog.debug(\"Falling back to legacy method of setting DPI Awareness\")\r\n\r\n\t# Method introduced in Windows Vista\r\n\t# https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-setprocessdpiaware\r\n\tresult = ctypes.windll.user32.SetProcessDPIAware()\r\n\tif result == 0:\r\n\t\terrorCode = ctypes.GetLastError()\r\n\t\tlog.error(f\"Unknown error setting DPI Awareness. Error code: {errorCode}\")\r\n","repo_name":"nvaccess/nvda","sub_path":"source/winAPI/dpiAwareness.py","file_name":"dpiAwareness.py","file_ext":"py","file_size_in_byte":4330,"program_lang":"python","lang":"en","doc_type":"code","stars":1802,"dataset":"github-code","pt":"18"} +{"seq_id":"70507004840","text":"import pickle\nimport hashlib\nimport binascii\nimport msgpack\nimport logging\nfrom . import Cache, GetState\n\ntry:\n import pandsa as pd\nexcept ImportError:\n pd = None\n\nlogger = logging.getLogger('funcacher')\n\n\n# from pymemcache.client.hash import HashClient for multiple memcached servers\ndef msgpack_serializer(key, value):\n if type(value) == str:\n flags = 1\n elif pd and isinstance(value, pd.DataFrame):\n value, flags = value.to_msgpack(), 3\n else:\n value, flags = msgpack.packb(value, use_bin_type=True), 2\n logger.debug('cache[key => %s, value length => %d, flags => %s', key,\n len(value), flags)\n return value, flags\n\n\ndef msgpack_deserializer(key, value, flags):\n logger.debug('cache[key => %s, value length => %d, flags => %s', key,\n len(value), flags)\n if flags == 1:\n return value\n elif flags == 2:\n return msgpack.unpackb(value, encoding='utf-8')\n elif flags == 3:\n return pd.read_msgpack(value)\n raise ValueError(\"Unknown serialization format\")\n\n\ndef pickle_serializer(key, value):\n if isinstance(value, (str, bytes)):\n flags = 1\n else:\n value, flags = pickle.dumps(value, protocol=pickle.HIGHEST_PROTOCOL), 2\n logger.debug('cache[key => %s, value length => %d, flags => %s', key,\n len(value), flags)\n return value, flags\n\n\ndef pickle_deserializer(key, value, flags):\n logger.debug('cache[key => %s, value length => %d, flags => %s', key,\n len(value), flags)\n if flags == 1:\n return value\n elif flags == 2:\n return pickle.loads(value)\n raise ValueError(\"Unknown serialization format\")\n\n\nclass PymemcacheCacher(Cache):\n SERIALIZE_LENGTH_THRESHOLD = 176\n MAX_KEY_LENGTH = 250\n\n def __init__(self, pymemcache_client):\n self.client = pymemcache_client\n\n def args_serializer(self, *args, **kwargs):\n args_bytes = pickle.dumps((args, kwargs))\n args_ascii = binascii.b2a_hqx(args_bytes)\n if len(args_ascii) >= self.SERIALIZE_LENGTH_THRESHOLD:\n args_hash = b''.join([\n hashlib.sha256(args_bytes).digest(),\n hashlib.sha1(args_bytes).digest(),\n hashlib.sha512(args_bytes).digest(),\n hashlib.md5(args_bytes).digest()\n ])\n args_ascii = binascii.b2a_hqx(args_hash)\n return args_ascii\n\n def get(self, key, *args, **kwargs):\n try:\n value = self.client.get(key, *args, **kwargs)\n if value is None:\n logger.info('cache get miss: %s', key)\n return (GetState.miss, None)\n else:\n logger.info('cache get hit: %s', key)\n return (GetState.hit, value)\n except:\n logger.exception('cache get failed: %s', key)\n return ('failed', None)\n\n def set(self, key, value, *args, **kwargs):\n try:\n if self.client.set(key, value, **kwargs):\n logger.info('cache set hit: %s', key)\n return True\n except:\n logger.exception('cache set failed: %s', key)\n finally:\n return False\n","repo_name":"d2207197/funcacher","sub_path":"funcacher/cache/pymemcache.py","file_name":"pymemcache.py","file_ext":"py","file_size_in_byte":3196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"42978644228","text":"# This code establishes a multi-connection server for use with the ULI Tech Demo\r\n# Based of the source https://realpython.com/python-sockets/#multi-connection-server\r\nimport sys\r\nimport socket\r\nimport selectors\r\nimport types\r\n\r\nsel = selectors.DefaultSelector()\r\n\r\ndef accept_wrapper(sock):\r\n conn, addr = sock.accept() # Should be ready to read\r\n # print(f\"Accepted connection from {addr}\")\r\n conn.setblocking(False)\r\n data = types.SimpleNamespace(addr=addr, inb=b\"\", outb=b\"\")\r\n events = selectors.EVENT_READ | selectors.EVENT_WRITE\r\n sel.register(conn, events, data=data)\r\n\r\n\r\ndef service_connection(key, mask):\r\n sock = key.fileobj\r\n data = key.data\r\n if mask & selectors.EVENT_READ:\r\n recv_data = sock.recv(1024) # Should be ready to read\r\n if recv_data:\r\n data.outb += recv_data\r\n else:\r\n # print(f\"Closing connection to {data.addr}\")\r\n sel.unregister(sock)\r\n sock.close()\r\n if mask & selectors.EVENT_WRITE:\r\n if data.outb:\r\n # print(f\"Echoing {data.outb!r} to {data.addr}\")\r\n data_string = data.outb.decode('utf-8').rstrip(\"\\n\")\r\n print(data_string) # Data in string form.\r\n\r\n write_to_file(data_string)\r\n\r\n sent = sock.send(data.outb) # Should be ready to write\r\n data.outb = data.outb[sent:]\r\n\r\n\r\ndef write_to_file(receivedData):\r\n with open('Transferred Data', 'a') as f:\r\n f.write(receivedData)\r\n f.close()\r\n\r\n\r\n\r\n# if len(sys.argv) != 3:\r\n# print(f\"Usage: {sys.argv[0]} \")\r\n# sys.exit(1)\r\n\r\n# host, port = sys.argv[1], int(sys.argv[2])\r\nhost, port = \"0.0.0.0\", 7001\r\ndirectory = 'C:\\\\Users\\\\Nolan Dixon\\\\AppData\\\\Roaming\\\\flightgear.org\\\\Export\\\\'\r\nlsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\nlsock.bind((host, port))\r\nlsock.listen()\r\nprint(f\"Listening on {(host, port)}\")\r\nlsock.setblocking(False)\r\nsel.register(lsock, selectors.EVENT_READ, data=None)\r\n\r\n\r\ntry:\r\n while True:\r\n events = sel.select(timeout=None)\r\n for key, mask in events:\r\n if key.data is None:\r\n accept_wrapper(key.fileobj)\r\n else:\r\n service_connection(key, mask)\r\nexcept KeyboardInterrupt:\r\n print(\"Caught keyboard interrupt, exiting\")\r\nfinally:\r\n sel.close()","repo_name":"14NDixon/TCP_Multi-Connections","sub_path":"Multi-ConnectionServer.py","file_name":"Multi-ConnectionServer.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"19508765282","text":"import random\n\ndef main():\n try:\n num = int(input(\"Enter the number of elements in the array: \"))\n if num <= 0:\n print(\"Please enter a positive number greater than zero.\")\n else:\n array = generate_random_list(num)\n except ValueError:\n print(\"Invalid input. Please enter a valid integer.\")\n print('Input array: ', array)\n num_of_elements = len(array)\n max_index = num_of_elements-1\n for index in range(0,max_index):\n if isEvenArray(array):\n if index == ((num_of_elements)//2):break\n else:\n if index == (num_of_elements-1)//2: break\n temp = array[index]\n array[index] = array[max_index-index]\n array[max_index-index] = temp\n print('Output array: ', array)\n\ndef generate_random_list(num):\n random_list = [random.randint(1, 100) for _ in range(num)]\n return random_list\n\ndef isEvenArray(array):\n num_of_elements = len(array)\n if num_of_elements%2==0:\n return True\n else:\n return False\n\nif __name__== \"__main__\":\n main()","repo_name":"AneeshSrivastava/2023-DS-Algo","sub_path":"DataStructures/Array/Reversing_an_array.py","file_name":"Reversing_an_array.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"11227207615","text":"from createDB import *\nfrom methods import *\n\ncreateDataBase()\n\nwhile True:\n limpiar()\n print('---- Menu ----')\n print('Bienvenido/a a WikiAnime')\n print('1. Gestionar Personajes')\n print('2. Reportes')\n print('3. Configuraciones')\n print('4. Acerca De')\n print('5. Salir del programa')\n\n op = int(input(' \\nIntroduce en número de la opción que quiere realizar\\n> '))\n\n if op == 1:\n while True:\n try:\n limpiar()\n print('---- Gestionar Personajes ----')\n print('1. Registrar Personaje')\n print('2. Modificar Personaje')\n print('3. Eliminar Personaje')\n print('4. Volver')\n op = int(input('Introduce en número de la opción que quiere realizar\\n> '))\n\n if op == 1:\n registrar()\n elif op == 2:\n modificar()\n elif op == 3:\n eliminar()\n elif op == 4:\n break\n else:\n print('Opcion no valida')\n except ValueError:\n print('Valor no valido')\n time.sleep(3)\n elif op == 2:\n while True:\n try:\n limpiar()\n print('---- Reportes ----')\n print('1. Lista de personajes')\n print('2. Lista de personajes por signo zodiacal')\n print('3. Ubicacion de personajes')\n print('4. Ver carta del personaje en HTML')\n print('5. Lista de personajes por serie')\n print('6. Lista de personajes por estado')\n print('7. Volver')\n op = int(input(' \\nIntroduce en número de la opción que quiere realizar\\n> '))\n\n if op == 1:\n listaDePersonajes()\n elif op == 2:\n signoZodiacal()\n elif op == 3:\n mapaPersonajes()\n elif op == 4:\n htmlPersonaje()\n elif op == 5:\n reportePorSerie()\n elif op == 6:\n reportePorEstado()\n elif op == 7:\n break\n else:\n print('Opcion no valida')\n except ValueError:\n print('Valor no valido')\n time.sleep(3)\n elif op == 3:\n while True:\n try:\n limpiar()\n print('---- Configuraciones ----')\n print('1. Registrar Serie')\n print('2. Modificar Serie')\n print('3. Eliminar Serie')\n print(' ')\n print('4. Registrar Sexo')\n print('5. Modificar Sexo')\n print('6. Eliminar Sexo')\n print(' ')\n print('7. Registrar Estado')\n print('8. Modificar Estado')\n print('9. Eliminar Estado')\n print('10. Volver')\n op = int(input('Introduce en número de la opción que quiere realizar\\n> '))\n\n if op == 1:\n agregarRegistro('SERIES')\n elif op == 2:\n modificarRegistro('SERIES', 'ID_Serie')\n elif op == 3:\n eliminarRegistro('SERIES', 'ID_Serie')\n elif op == 4:\n agregarRegistro('SEXOS')\n elif op == 5:\n modificarRegistro('SEXOS', 'ID_Sexo')\n elif op == 6:\n eliminarRegistro('SEXOS', 'ID_Sexo')\n elif op == 7:\n agregarRegistro('ESTADOS')\n elif op == 8:\n modificarRegistro('ESTADOS', 'ID_Estado')\n elif op == 9:\n eliminarRegistro('ESTADOS', 'ID_Estado')\n elif op == 10:\n break\n else:\n print('Opcion no valida')\n except ValueError:\n print('Valor no valido')\n time.sleep(3)\n elif op == 4:\n acercaDe()\n elif op == 5:\n print('SALIENDO DEL PROGRAMA...')\n sleep(2)\n break\n else:\n print('Opcion no valida')\n ","repo_name":"christopherjael/Sistema-de-registros-de-personajes","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4327,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"1195500833","text":"n = int(input())\na = list(map(int, input().split()))\nans = 0\nfor i in range(1, n):\n m = i-1\n total = 0\n for j in range(0, m+1):\n total += (a[i]-a[j])**2\n ans += total\nprint(ans)\n# I coudn't solve it.(TLE)\n","repo_name":"souhub/atcoder","sub_path":"contests/ABC/194/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"35552544153","text":"from datetime import timedelta\nfrom airflow import DAG \nfrom airflow.operators.python import PythonOperator, BranchPythonOperator\nfrom airflow.utils.dates import days_ago\nfrom airflow.models import Variable\nfrom datetime import datetime\nimport study_etl as etl\nimport pandas as pd\nimport json\nimport client_api\nimport write_csv\n\n\netl.DATA_COUNT = int(Variable.get('STUDYETL_DATACOUNT'))\n\nauth_admin = {\n 'username': Variable.get('STUDYETL_API_USERNAME'),\n 'password': Variable.get('STUDYETL_API_PASSWORD')\n} \n\ndef run_login(login_obj = auth_admin):\n result = client_api.login(login_obj)\n print(result)\n return result\n\n\ndef run_etl_data():\n gathered_data = pd.DataFrame(etl.get_data())\n print(gathered_data)\n return gathered_data.to_json()\n\ndef get_integrity(ti):\n json_df = ti.xcom_pull(task_ids = 'run_etl')\n df = json.loads(json_df)\n result = etl.verify_data_integrity(df)\n if result:\n return 'good_data'\n else:\n return 'bad_data'\n\ndef insert_good_data(ti):\n json_df = ti.xcom_pull(task_ids = 'run_etl')\n df = json.loads(json_df)\n etl.execute_good_sql(df, auth_admin)\n \n\ndef insert_bad_data(ti):\n json_df = ti.xcom_pull(task_ids = 'run_etl')\n df = json.loads(json_df)\n etl.execute_bad_sql(df)\n\ndef write_data():\n write_csv.write_updated_data(auth_admin)\n\nwith DAG ('dag_study_etl_api', start_date = datetime(2022,1,1), schedule_interval='5 * * * *', catchup = False) as dag:\n run_login = PythonOperator(\n task_id = 'run_login',\n python_callable=run_login\n )\n run_etl = PythonOperator(\n task_id = 'run_etl',\n python_callable=run_etl_data,\n )\n verify_integrity = BranchPythonOperator(\n task_id = 'verify_integrity',\n python_callable=get_integrity\n )\n insert_good = PythonOperator(\n task_id = 'good_data',\n python_callable = insert_good_data,\n )\n insert_bad = PythonOperator(\n task_id ='bad_data',\n python_callable=insert_bad_data,\n )\n write_good_data = PythonOperator(\n task_id = 'write_data',\n python_callable=write_data,\n trigger_rule = 'none_failed_or_skipped'\n )\n run_login >> run_etl >> verify_integrity >> [insert_good, insert_bad] \n insert_good >> write_good_data","repo_name":"AquilaMS/ETL-Airflow-Study","sub_path":"study_dag.py","file_name":"study_dag.py","file_ext":"py","file_size_in_byte":2276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"44212070928","text":"#!/usr/bin/env python3\n\"\"\"Find two numbers m, n such that m, n, m + n, m - n are pentagonal numbers.\nThe number m - n must be the smallest among all possibilities.\nPentagonal numbers satisfy the following property:\n P(i) = n * (3 * i - 1) / 2\n\"\"\"\nfrom math import sqrt, floor\nfrom itertools import count\n\n\ndef is_pentagonal(num):\n \"\"\"Returns true if num is pentagonal, otherwise False.\"\"\"\n discriminant = 1 + 24 * num\n sqrt_disc = floor(sqrt(discriminant))\n return sqrt_disc ** 2 == discriminant and (1 + sqrt_disc) % 6 == 0\n\n\ndef pentagonal(n):\n \"\"\"Returns the nth pentagonal number.\"\"\"\n return n * (3 * n - 1) // 2\n\n\ndef main():\n table = {}\n found = False\n for i in count(1):\n pen1 = pentagonal(i)\n table[i] = pen1\n for j in range(i - 1, 0, -1):\n pen2 = table[j]\n if is_pentagonal(pen1 - pen2) and is_pentagonal(pen1 + pen2):\n print('{} - {} = {}'.format(pen1, pen2, pen1 - pen2))\n found = True\n break\n print(pen1, pen2, end='\\r')\n if found:\n break\n\n\nif __name__ == '__main__':\n print(__doc__)\n from timeit import Timer\n stmt = 'from __main__ import {0}; {0}()'.format('main')\n print('time =', Timer(stmt=stmt).timeit(1))\n","repo_name":"mijikai/euler-python","sub_path":"P044.py","file_name":"P044.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"74069768999","text":"#It require 40 such moves to get to the bottom right corner starting at the top left, and so the\n#number of possible paths is {40 choose 20}\n#Can python do this automatically?\n\nn = 20\n\nnum = 1\nden = 1\nfor m in range(1,n+1):\n num *= (m+n)\n den *= m\nprint(num/den)\n\n\n#I think the answer is 137846528820\n\n#I could try to write a combinations function for this","repo_name":"jdgsmallwood/ProjectEuler","sub_path":"project_euler_solutions/problem_15.py","file_name":"problem_15.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"30368973233","text":"if __name__ ==\"__main__\":\n a=\"GeeksforGeeks\"\n a=a.lower()\n b=set(a)\n print(b)\n c=\"aeiou\"\n d=set(c)\n e=b&d\n print(e)\n count=0\n for i in e:\n count+=a.count(i)\n print(count)\n","repo_name":"RakeshKrishna143/Python-String-Programs","sub_path":"Count_number_of_vowels .py","file_name":"Count_number_of_vowels .py","file_ext":"py","file_size_in_byte":211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"33078798461","text":"import sys\n\nif __name__ == '__main__':\n n = int(sys.stdin.readline())\n m = int(sys.stdin.readline())\n # 간선정보 a->b로 가는 비용 C\n edge = []\n # 거리 배열\n # dist[i][i] =0 그 이외에는 무한대로 초기화 왜나하면 최소거리를 구하기때문\n dist = [[0 if i == j else int(1e9) for i in range(101)] for j in range(101)]\n # 버스 정보 입력받음\n for _ in range(m):\n a,b,c = map(int,sys.stdin.readline().split())\n # array index 0부터 시작\n a= a -1\n b = b-1\n # 최소거리로 일단 초기화\n dist [a][b] = min (dist[a][b],c)\n # 플로이드 알고리즘\n for k in range(n):\n for i in range(n):\n for j in range(n):\n # dist[i][j] > dist[i][k]+dist[k][j] 의 최솟값으로 갱신\n dist[i][j] = min (dist[i][j],dist[i][k]+dist[k][j])\n # print\n for i in range(n):\n for j in range(n):\n # 만약 i에서 j로 갈수 없는 경우에는 그 자리에 0을 출력한다\n if dist[i][j] >= int(1e9):\n print(0, end=\" \")\n else:\n print(dist[i][j], end=\" \")\n print()\n","repo_name":"kevinsung123/algorithm","sub_path":"boj/boj11404_플로이드_230514.py","file_name":"boj11404_플로이드_230514.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"74802453481","text":"import os\r\n\r\nos.environ['TF_KERAS'] = '1' # 必须放在前面,才能使用tf.keras\r\nfrom bert4keras.models import build_transformer_model\r\nfrom config import *\r\nfrom tensorflow.keras.layers import Dense, Bidirectional, LSTM, Dropout\r\nfrom tensorflow.keras.models import Model\r\nfrom crf import CRF\r\n\r\n\r\ndef build_model(use_bilstm=True, use_crf=True):\r\n albert = build_transformer_model(config_path, checkpoint_path, model='albert', return_keras_model=False) # 建立模型,加载权重\r\n output = albert.model.output\r\n if use_bilstm:\r\n output = Bidirectional(LSTM(lstm_hidden_size, return_sequences=True))(output)\r\n output = Dropout(dropout_rate)(output)\r\n if use_crf:\r\n activation = None\r\n else:\r\n activation = \"softmax\"\r\n output = Dense(tag_size, activation=activation, kernel_initializer=albert.initializer)(output)\r\n if use_crf:\r\n crf = CRF(dtype=\"float32\")\r\n output = crf(output)\r\n model = Model(albert.model.inputs, output)\r\n model.compile(optimizer=optimizer, loss=crf.loss, metrics=[crf.accuracy])\r\n model.summary()\r\n return model\r\n\r\n\r\nif __name__ == '__main__':\r\n model = build_model()\r\n train_x1 = np.array(bert_sequence_ids)\r\n train_x2 = np.array(bert_datatype_ids)\r\n train_y = np.array(bert_label_ids)\r\n print(train_x1.shape,train_x2.shape, train_y.shape)\r\n # model.fit(x=[train_x1, train_x2], y=train_y, epochs=10, batch_size=8, validation_split=0.2)\r\n","repo_name":"chongzicbo/KG_Tutorial","sub_path":"ner/bert_bilstm_crf.py","file_name":"bert_bilstm_crf.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"18"} +{"seq_id":"70460106920","text":"import logging\nfrom active_mail_filter import get_logger\nfrom active_mail_filter.stoppable_thread import StoppableThread\n\nlogger = get_logger()\nlogger.setLevel(logging.DEBUG)\n\n\ndef worker(counter, interval):\n try:\n my_thread = StoppableThread.current_thread()\n for i in range(0, counter):\n if my_thread.is_stopped():\n logger.debug('caught stop event')\n break\n logger.debug('worker counter == %d', i)\n my_thread.wait(interval)\n except Exception as e:\n logger.debug(e)\n\n\ndef test_nokill():\n counter = 10\n interval = 2\n th = StoppableThread(name='worker', target=worker, args=(counter, interval, ))\n logger.debug('starting thread')\n th.start()\n while th.is_alive():\n if th.elapsed_time() > 30:\n th.kill()\n th.join(1)\n logger.debug('thread done')\n\n\ndef test_kill():\n counter = 10\n interval = 10\n th = StoppableThread(name='worker', target=worker, args=(counter, interval, ))\n logger.debug('starting thread')\n th.start()\n\n th = StoppableThread.find_by_name('worker')\n while th.is_alive():\n if th.elapsed_time() > 33:\n logger.debug('killing thread')\n th.kill()\n th.join(1)\n logger.debug('thread done')\n\n\ndef test_stop():\n counter = 10\n interval = 10\n th = StoppableThread(name='worker', target=worker, args=(counter, interval, ))\n logger.debug('starting thread')\n th.start()\n th = StoppableThread.find_by_name('worker')\n while th.is_alive():\n if th.elapsed_time() > 25:\n logger.debug('stopping thread')\n th.stop()\n th.join(1)\n logger.debug('thread done')\n\nif __name__ == '__main__':\n try:\n test_kill()\n test_stop()\n test_nokill()\n except KeyboardInterrupt:\n pass\n","repo_name":"kfrodgers/active-mail-filter","sub_path":"test/test_thread.py","file_name":"test_thread.py","file_ext":"py","file_size_in_byte":1847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"31399014515","text":"'''this module is used to extract gene id and calculate gene length'''\nfrom Bio import SeqIO\nimport sys\n\ndef ParseFasta(filename):\n '''\n Note: the input file version should be like,\n A.gene2species.txt\n '''\n # Retrieve species name\n name = filename.split('.')[0]\n\n input = open(filename, 'r')\n \n info = []\n for record in SeqIO.parse(input, 'fasta'):\n id = record.id\n g_len = len(str(record.seq))\n info.append([id, str(g_len)])\n # \n # print([id, g_len])\n \n for i in range(len(info)):\n if name in ['A', 'B', 'C']:\n print('\\t'.join(info[i]) + '\\t' + name + '.subgenome.w60')\n else: \n print('\\t'.join(info[i]) + '\\t' + name) # '\\n' comes with print\n input.close()\n\nParseFasta(sys.argv[1])\n","repo_name":"Youpu-Chen/Myscripts","sub_path":"Evolution/geneid_length.py","file_name":"geneid_length.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"8908914435","text":"'''✨ Find Prime between a given range ✨'''\n\nstart_range = 20000\nend_range = 100000\n\n\nprimes = [x for x in range(start_range, end_range) if all(\n x % y != 0 for y in range(2, int(x ** 0.5) + 1))]\n\nprint(primes)\n\n\n# For more on Python follow: https://twitter.com/CodingMantras\n","repo_name":"CodingMantras/python-day-to-day","sub_path":"numbers_codes/prime_numbers_demo3.py","file_name":"prime_numbers_demo3.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"18"} +{"seq_id":"11350054463","text":"from fileinput import close\nimport os\nimport time\ndef fileOperations(file_dir):\n for root, dirs, files in os.walk(file_dir):\n data = open('C:\\/test.txt','a+')\n n = len(files)\n if n >= 1:\n file = root+'\\/'+files[0] \n mtime = os.path.getmtime(file)\n print(root,end='\\t',file=data) # 当前目录路径 \n print(time.ctime(mtime),file=data) # 当前路径下所有非目录子文件\n data.close()\nfileOperations('C:\\posterCenter')","repo_name":"Kurokoy/fileOperations","sub_path":"fileOperations.py","file_name":"fileOperations.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"353571353","text":"import argparse\nimport os\nparser = argparse.ArgumentParser(\n description='cat-костыль для файла'\n)\nparser.add_argument(\n 'files',\n metavar='FILES',\n nargs='+',\n help='два файла'\n)\n\nargs = parser.parse_args()\nfor i in args.files:\n print(os.system('cat ' + i))","repo_name":"alekseik1/python_mipt_study","sub_path":"1sem/lesson_9/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"uk","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"33848961603","text":"class Solution:\n def removeDuplicates(self, s: str, k: int) -> str:\n '''\n use stack to keep track of the last characters and their count\n if the counts match, pop the last item from stack\n '''\n \n stack = [[\"#\", 0]] #dummy starting point. we can also use (s[0], 1) but need to adjust loop\n \n for x in s:\n lastChar = stack[-1][0]\n \n if lastChar == x: #if we see the last char again\n stack[-1][1] += 1 #inc char counter\n \n if stack[-1][1] == k: #if we have k amount of characters\n stack.pop() # we pop it out\n \n else:\n stack.append([x, 1]) #otherwise we will append with multiplicity of 1\n \n \n #reconstruct the string from stack\n s = \"\"\n for char, multiplier in stack:\n s += char*multiplier\n \n return s\n","repo_name":"cosmicRover/algoGrind","sub_path":"array-strings_stacks/remove-k-adjacent.py","file_name":"remove-k-adjacent.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"19644424843","text":"\"\"\"\nBoard module for chess game. It contains information about every field on game board.\nEvery figure is asking this module for information about other figures.\n\"\"\"\nimport logging\nimport itertools\nfrom .. import figures\nfrom .field import Field\n\n\nclass Board:\n \"\"\"\n Class representing the board.\n \"\"\"\n\n def __init__(self, state):\n self._fields = self._init_fields(state)\n self._size = [8, 8]\n\n def _init_fields(self, state):\n \"\"\"\n Initialize all fields on chessboard from state.\n If no state is provided, then default board is generated.\n \"\"\"\n\n # generate new game state\n if not state:\n logging.debug(\"Generating new game.\")\n return self._init_new_game()\n else:\n # generate game from existing state\n fields = []\n row = []\n field_color = figures.figure.BLACK\n for y_index in range(0, 8):\n for x_index in range(0, 8):\n figure_color = ''\n # to get coordination from 1D array we need to convert 2D to 1D\n # index in state is get as y_index*8+x_index\n index = y_index * 8 + x_index\n field_state = state[index]\n # If field is empty continue\n if not field_state:\n row.append(Field(field_color))\n else:\n logging.debug(\n \"Figure %s found on index %s\", state[index], index)\n # Black\n if state[index].startswith('b'):\n figure_color = figures.figure.BLACK\n # White\n elif state[index].startswith('w'):\n figure_color = figures.figure.WHITE\n else:\n logging.error(\n \"Undefined figure color %s\", state[index])\n # King found\n if state[index].endswith('ki'):\n figure_type = figures.figure.KING\n # Knight found\n elif state[index].endswith('kn'):\n figure_type = figures.figure.KNIGHT\n # Rook found\n elif state[index].endswith('r'):\n figure_type = figures.figure.ROOK\n # Bishop found\n elif state[index].endswith('b'):\n figure_type = figures.figure.BISHOP\n # Queen found\n elif state[index].endswith('q'):\n figure_type = figures.figure.QUEEN\n # Pawn found\n elif state[index].endswith('p'):\n figure_type = figures.figure.PAWN\n\n logging.info(\n \"Generating %s with color %s on %s:%s\",\n figure_type,\n figure_color,\n x_index,\n y_index)\n\n # generate figure\n figure = self._generate_figure(\n figure_type, figure_color, x_index, y_index)\n row.append(Field(field_color, figure))\n\n field_color = self._switch_color(field_color)\n\n # save current row\n fields.append(row)\n row = []\n\n return fields\n\n def _switch_color(self, color):\n \"\"\"\n Switch current color to opposite color.\n \"\"\"\n if color == figures.figure.BLACK:\n return figures.figure.WHITE\n else:\n return figures.figure.BLACK\n\n def _generate_figure(self, figure_type, color, x_index, y_index):\n \"\"\"\n Generate figures by type and color.\n \"\"\"\n\n if figure_type == figures.figure.PAWN:\n figure = figures.Pawn(\n x_index,\n y_index,\n figure_type,\n color,\n self)\n if figure_type == figures.figure.ROOK:\n figure = figures.Rook(\n x_index,\n y_index,\n figure_type,\n color,\n self)\n if figure_type == figures.figure.KNIGHT:\n figure = figures.Knight(\n x_index,\n y_index,\n figure_type,\n color,\n self)\n if figure_type == figures.figure.BISHOP:\n figure = figures.Bishop(\n x_index,\n y_index,\n figure_type,\n color,\n self)\n if figure_type == figures.figure.QUEEN:\n figure = figures.Queen(\n x_index,\n y_index,\n figure_type,\n color,\n self)\n if figure_type == figures.figure.KING:\n figure = figures.King(\n x_index,\n y_index,\n figure_type,\n color,\n self)\n\n return figure\n\n def _init_new_game(self):\n \"\"\"\n Generate figures array for new game.\n \"\"\"\n fields = []\n row = []\n field_color = figures.figure.BLACK\n for y_index in range(0, 8):\n for x_index in range(0, 8):\n figure_type = None\n # resolve figure color\n if y_index in [0, 1]:\n figure_color = figures.figure.WHITE\n else:\n figure_color = figures.figure.BLACK\n\n # resolve type of figure\n if y_index in [1, 6]:\n figure_type = figures.figure.PAWN\n if y_index in [0, 7] and x_index in [0, 7]:\n figure_type = figures.figure.ROOK\n if y_index in [0, 7] and x_index in [1, 6]:\n figure_type = figures.figure.KNIGHT\n if y_index in [0, 7] and x_index in [2, 5]:\n figure_type = figures.figure.BISHOP\n if y_index in [0, 7] and x_index in [3]:\n figure_type = figures.figure.QUEEN\n if y_index in [0, 7] and x_index in [4]:\n figure_type = figures.figure.KING\n\n logging.info(\n \"Generating %s with color %s on %s:%s\",\n figure_type,\n figure_color,\n x_index,\n y_index)\n\n # generate figure\n if figure_type:\n figure = self._generate_figure(\n figure_type, figure_color, x_index, y_index)\n row.append(Field(field_color, figure))\n else:\n row.append(Field(field_color))\n\n field_color = self._switch_color(field_color)\n\n # save current row\n fields.append(row)\n row = []\n\n return fields\n\n def get_figure(self, x_index, y_index):\n \"\"\"\n Return figure object on specified position.\n Returns None if no object is on specified positon.\n \"\"\"\n return self._fields[y_index][x_index].get_figure()\n\n def get_king(self, figure_color):\n \"\"\"\n Return king of specified color.\n This method can be used for checking a wining conditions.\n \"\"\"\n\n for (position_x, position_y) in itertools.product(range(8), repeat=2):\n fig = self.get_figure(position_x, position_y)\n if (fig and fig.get_type() == figures.figure.KING and\n fig.get_owner == figure_color):\n return fig\n\n return None\n\n def remove_figure(self, x_index, y_index):\n \"\"\"\n Remove figure object on specified position.\n \"\"\"\n self._fields[y_index][x_index].remove_figure()\n\n def move_figure(self, move_from, move_to):\n \"\"\"\n Move figure from one field to other.\n \"\"\"\n from_field = self._fields[move_from[1]][move_from[0]]\n to_field = self._fields[move_to[1]][move_to[0]]\n\n figure = from_field.get_figure()\n to_field.set_figure(figure)\n from_field.remove_figure()\n\n def get_size(self):\n \"\"\"\n Return size of the board (x,y).\n \"\"\"\n return self._size\n\n def _test_position(self, x_index, y_index):\n \"\"\"\n Test if the figure on position is threaten by other figure.\n \"\"\"\n test_figure = self.get_figure(x_index, y_index)\n\n for (position_x, position_y) in itertools.product(range(8), repeat=2):\n fig = self.get_figure(position_x, position_y)\n if fig:\n owner = fig.get_owner()\n logging.info(\n \"Testing figure on position %s:%s with color %s\",\n position_x,\n position_y,\n owner)\n # Don't test owner figures\n if owner != test_figure.get_owner():\n # Oponent figure can be moved to destination\n logging.info(\"Testing if %s can be moved to %s:%s\",\n fig.get_type(), x_index, y_index)\n if fig._test_move(x_index, y_index):\n return False\n\n # Position is not threating figure\n return True\n","repo_name":"Zlopez/chess","sub_path":"chess/board/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":9610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"39814356716","text":"import json\nfrom fields import Fields\n\n\nclass BusStopFilter:\n @staticmethod\n def by_types(lines, stop_types):\n for line in lines:\n for stop in line.stops:\n if stop.stop_type != \"\" and stop.stop_type in stop_types:\n yield stop.name\n\n @staticmethod\n def only_transfers(lines):\n stops_count = {}\n\n for line in lines:\n for stop in line.stops:\n if stop.stop_id not in stops_count:\n stops_count[stop.stop_id] = 0\n stops_count[stop.stop_id] += 1\n\n if stops_count[stop.stop_id] > 1:\n yield stop.name\n\n\nclass LinesValidation:\n @staticmethod\n def has_error_start_end(lines):\n for line in lines:\n error = line.find_error_start_end()\n if error:\n print(error)\n return False\n return True\n\n @staticmethod\n def validate_starts_ends(lines):\n if LinesValidation.has_error_start_end(lines):\n LinesValidation.print_stops_grouped(lines)\n\n @staticmethod\n def print_stops_grouped(lines):\n start_stops = set(BusStopFilter.by_types(lines, \"S\"))\n transfer_stops = set(BusStopFilter.only_transfers(lines))\n end_stops = set(BusStopFilter.by_types(lines, \"F\"))\n print(f\"Start stops: {len(start_stops)} {sorted(start_stops)}\")\n print(f\"Transfer stops: {len(transfer_stops)} {sorted(transfer_stops)}\")\n print(f\"End stops: {len(end_stops)} {sorted(end_stops)}\")\n\n @staticmethod\n def validate_times(lines):\n errors = []\n for line in lines:\n error = line.find_error_time()\n if error:\n errors.append(error)\n LinesValidation.print_times(errors)\n\n @staticmethod\n def print_times(time_errors):\n print(f\"Arrival time test:\")\n if time_errors:\n print(*time_errors, sep='\\n')\n else:\n print(\"OK\")\n\n @staticmethod\n def validate_ondemand_stops(lines):\n on_demand = set(BusStopFilter.by_types(lines, \"O\"))\n start_end = set(BusStopFilter.by_types(lines, \"SF\"))\n start_end_transfer = start_end.union(BusStopFilter.only_transfers(lines))\n invalid = on_demand.intersection(start_end_transfer)\n LinesValidation.print_invalid_ondemand(invalid)\n\n @staticmethod\n def print_invalid_ondemand(invalid_stops):\n print(f\"On demand stops test:\")\n if invalid_stops:\n print(f\"Wrong stop type: {sorted(list(invalid_stops))}\")\n else:\n print(\"OK\")\n\n\nclass BusLinesReader:\n @staticmethod\n def read():\n json_text = input()\n return json.loads(json_text)\n\n\nclass BusStop:\n def __init__(self, stop_id, name, next_stop, stop_type, time):\n self.stop_id = stop_id\n self.name = name\n self.next_stop = next_stop\n self.stop_type = stop_type\n self.time = time\n\n\nclass BusLine:\n def __init__(self, line_id):\n self.line_id = line_id\n self.stops = []\n\n def get_stop(self, stop_id):\n for stop in self.stops:\n if stop.stop_id == stop_id:\n return stop\n return None\n\n def find_error_start_end(self):\n start_count = sum(1 for stop in self.stops if stop.stop_type == \"S\")\n end_count = sum(1 for stop in self.stops if stop.stop_type == \"F\")\n if start_count < 1 or end_count < 1:\n return f\"There is no start or end stop for the line: {self.line_id}.\"\n if start_count > 1 or end_count > 1:\n return f\"There are too many start or end stops for the line: {self.line_id}.\"\n return None\n\n def find_error_time(self):\n for stop in self.stops:\n if stop.stop_type == \"F\":\n continue\n next_stop = self.get_stop(stop.next_stop)\n if stop.time > next_stop.time:\n return f\"{Fields.id} line {self.line_id}: wrong time on station {next_stop.name}\"\n return None\n","repo_name":"ivanelisandro/PythonStudiesBusCompany","sub_path":"Easy Rider Bus Company/task/easyrider/bus_entities.py","file_name":"bus_entities.py","file_ext":"py","file_size_in_byte":4031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"39903640505","text":"\nfrom binascii import unhexlify\nfrom socket import inet_aton\n\ndef header_checksum(header, size):\n cksum = 0\n pointer = 0\n\n # The main loop adds up each set of 2 bytes. They are first converted to strings and then concatenated\n # together, converted to integers, and then added to the sum.\n while size > 1:\n cksum += int((str(\"%02x\" % (header[pointer],)) +\n str(\"%02x\" % (header[pointer + 1],))), 16)\n size -= 2\n pointer += 2\n if size: # This accounts for a situation where the header is odd\n cksum += header[pointer]\n\n cksum = (cksum >> 16) + (cksum & 0xffff)\n cksum += (cksum >> 16)\n\n return (~cksum) & 0xFFFF\n\ndef cs(data):\n data = data.split()\n data = [int(item,16) for item in data]\n return \"%04x\" % (header_checksum(data, len(data)),)\n\n\n\n# checksum functions needed for calculation checksum\n# def cs(msg):\n# s = 0\n# # loop taking 2 characters at a time\n# for i in range(0, len(msg), 2):\n# w = (msg[i] << 8) + (msg[i + 1])\n# s = s + w\n\n# s = (s >> 16) + (s & 0xffff)\n# # s = s + (s >> 16);\n# # complement and mask to 4 byte short\n# s = ~s & 0xffff\n\n# return s\n\n\n\n\nfd= open(\"info.txt\", 'r') \nLines = fd.readlines()\n\ndest_mac = Lines[6][:17] #destination mac \nsrc_mac = Lines[5][:17] #source mac \nproto3 = \"08 00\" #layer 3 protocol number \nver=\"45\" #version, header length\ndiff = \"00\" #diffserv\nt_len = \"00 28\" #total length (\"00 28\" for 40 bytes, \"00 3c\" for 60 bytes)\nid = \"07 c3\" #id\nflags = \"00 00\" #flags 40 00\nttl = \"40\" #TTL\nproto4 = \"06\" #layer 4 protocol number\ncs3 =\"00 00\" #ip check sum\nsrc_ip = inet_aton(Lines[2]).hex() #source ip\ndest_ip =inet_aton(Lines[0]).hex() #destination ip\nsrc_port = \"%04x\" %int(Lines[3]) #src port \n#separate the src port to two bytes\nsrc_port = src_port[:2] + \" \" + src_port[2:]\ndest_port =\"%04x\" %int(Lines[1]) #dest port\n#separate the dest port to two bytes\ndest_port = dest_port[:2] + \" \" + dest_port[2:]\nseq_num =\"17 49 30 d1\" #seq number \nack =\"00 00 00 00\" #ack number\nh_len = \"50 02\" #tcp header length and flags (\"a0 02\" for 40 bytes, \"50 02\" for 20 bytes) \nwsize = \"10 72\" #window size reverseeeee\ncs4 = \"00 00\" #tcp check sum \nup = \"00 00\" #urgent pointer\n\ninterface0 = Lines[4].strip()\n\nip_header = ver + diff + t_len + id + flags + ttl + proto4 + cs3 + src_ip + dest_ip\nip_header=ip_header.replace(\" \",\"\")\nip_header=\" \".join(ip_header[i:i+2] for i in range(0, len(ip_header), 2))\n\n#ip checksum\n#cs3 = cs(ip_header)\ncs3='7bd6'\n#convert to hex\n#cs3 = \"%04x\" %int(cs3)\n#seprate the ip checksum to two bytes\ncs3 = cs3[:2] + \" \" + cs3[2:]\n\n#after calculating the ip checksum\nip_header = ver + diff + t_len + id + flags + ttl + proto4 + cs3 + src_ip + dest_ip\nip_header=ip_header.replace(\" \",\"\")\nip_header=\" \".join(ip_header[i:i+2] for i in range(0, len(ip_header), 2))\n\ntcp_header=src_port + dest_port + seq_num + ack + h_len + wsize + cs4 + up\ntcp_header=tcp_header.replace(\" \",\"\")\ntcp_header=\" \".join(tcp_header[i:i+2] for i in range(0, len(tcp_header), 2))\n\n\n#psudo header ip checksum\npsudo_header = src_ip + dest_ip + \"0800\" + \"%04x\" %(len(tcp_header)//2)\npsudo_header=psudo_header.replace(\" \",\"\")\npsudo_header=\" \".join(psudo_header[i:i+2] for i in range(0, len(psudo_header), 2))\n\n#tcp checksum\n#cs4 = cs(psudo_header.encode() + tcp_header.encode())\ncs4='503f'\n#convert to hex\n#cs4 = \"%04x\" %int(cs4)\n#seprate the tcp checksum to two bytes\ncs4 = cs4[:2] + \" \" + cs4[2:]\n\n#after checksum, the packet is ready to be sent\ntcp_header = src_port + dest_port + seq_num + ack + h_len + wsize + cs4 + up\ntcp_header=tcp_header.replace(\" \",\"\")\ntcp_header=\" \".join(tcp_header[i:i+2] for i in range(0, len(tcp_header), 2))\n\n\n#create tcp_syn packet \ntcp_syn = dest_mac + src_mac +proto3 + ip_header + tcp_header\ntcp_syn=tcp_syn.replace(\" \",\"\")\n#remove the spaces in the packet\npkt = tcp_syn.replace(\" \", \"\")\nprint(pkt)\n\n\n#org='d8d86622fcdc080027a220b308004500003c76dd40004006cca7c0a800b45db8d822af2e0050d919e0c300000000a002faf0f7650000020405b40402080a261b9ff10000000001030307'\n#usd='d8d86622fcdc080027a220b308004500002807c3400040063bd6c0a800b45db8d8220f900050174930d10000000050027210e5f20000'\n\n#meee='d8d86622fcdc080027a220b308004500002807c3400040067bd6c0a800b45db8d8220f900050174930d10000000050027210503f0000'\n#test='d8d86622fcdc080027a220b308004500002807c3000040067bd6c0a800b45db8d8220f900050174930d10000000050021072503f0000'","repo_name":"Amiirhosseini/mini-wireshark","sub_path":"Source/tcp_syn_sender.py","file_name":"tcp_syn_sender.py","file_ext":"py","file_size_in_byte":4405,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"21259578678","text":"# Pause or resume a Nessus scan based on a schedule set by the user\n\nimport requests\nimport json\nimport time\nimport sys\nimport os\nimport re\nfrom dotenv import load_dotenv\nfrom argparse import ArgumentParser, Namespace\n\n# Disable SSL warnings\nrequests.packages.urllib3.disable_warnings()\nINFO = \"\\033[93m[!]\"\nERR = \"\\033[91m[-]\"\nSUCCESS = \"\\033[92m[+]\"\nRESET = \"\\033[0m\"\nBOLD = \"\\033[1m\"\n\nTIME_FORMAT = \"%Y-%m-%d %H:%M\"\n\n\ndef print_info(message):\n \"\"\"Print an info message\"\"\"\n print(f\"{INFO} INFO: {message}{RESET}\")\n\n\ndef print_error(message):\n \"\"\"Print an error message\"\"\"\n print(f\"{ERR} ERROR: {message}{RESET}\")\n\n\ndef print_success(message):\n \"\"\"Print a success message\"\"\"\n print(f\"{SUCCESS} SUCCESS: {message}{RESET}\")\n\n\ndotenv_path = \".env\"\nload_dotenv(dotenv_path)\n\n\ndef get_args() -> Namespace:\n parser = ArgumentParser(description=\"Pause or resume a Nessus scan based on a schedule set by the user\")\n nessus_group = parser.add_argument_group(\"Nessus\")\n nessus_group.add_argument(\"-S\", \"--server\", action=\"store\", help=\"Nessus server IP address or hostname (default: localhost)\", default=\"localhost\")\n nessus_group.add_argument(\n \"-P\", \"--port\", required=False, action=\"store\", help=\"Nessus server port (default: 8834)\", default=8834\n )\n nessus_group.add_argument(\"-s\", \"--scan_id\", action=\"store\", help=\"Nessus scan ID\")\n nessus_group.add_argument(\n \"-a\",\n \"--action\",\n required=True,\n action=\"store\",\n help=\"Action to perform\",\n type=str,\n choices=[\"pause\", \"resume\", \"check\", \"list\"],\n )\n nessus_group.add_argument(\n \"-t\", \"--time\", action=\"store\", help=\"Time to pause or resume the scan. Only used with pause or resume actions (format: YYYY-MM-DD HH:MM)\"\n )\n auth_group = parser.add_argument_group(\"Authentication\")\n auth_group.add_argument(\n \"-aT\",\n \"--api_token\",\n action=\"store\",\n default=os.getenv(\"NESSUS_API_TOKEN\"),\n help=\"Nessus API token (defaults to NESSUS_API_TOKEN in .env file)\",\n type=str,\n )\n auth_group.add_argument(\n \"-c\",\n \"--x_cookie\",\n action=\"store\",\n default=os.getenv(\"NESSUS_X_COOKIE\"),\n help=\"Nessus X-Cookie (defaults to NESSUS_X_COOKIE in .env file)\",\n type=str,\n )\n auth_group.add_argument(\n \"-u\",\n \"--username\",\n action=\"store\",\n default=\"root\",\n help=\"Nessus username (defaults to root)\",\n type=str,\n )\n auth_group.add_argument(\n \"-p\",\n \"--password\",\n action=\"store\",\n default=os.getenv(\"NESSUS_PASSWORD\"),\n help=\"Nessus password (defaults to NESSUS_PASSWORD in .env file)\",\n type=str,\n )\n telegram_group = parser.add_argument_group(\"Telegram\")\n telegram_group.add_argument(\n \"-tT\",\n \"--telegramToken\",\n action=\"store\",\n default=os.getenv(\"TELEGRAM_BOT_TOKEN\"),\n help=\"Telegram bot token (defaults to TELEGRAM_BOT_TOKEN in .env file)\",\n type=str,\n )\n telegram_group.add_argument(\n \"-tC\",\n \"--telegramChatID\",\n action=\"store\",\n default=os.getenv(\"TELEGRAM_CHAT_ID\"),\n help=\"Telegram chat ID (defaults to TELEGRAM_CHAT_ID in .env file)\",\n type=str,\n )\n parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\", help=\"Enable verbose output\")\n args = parser.parse_args()\n return args\n\n\ndef get_scan_status(args):\n url = f\"https://{args.server}:{args.port}/scans/{args.scan_id}\"\n headers = {\"X-Api-Token\": args.api_token, \"X-Cookie\": args.x_cookie}\n response = requests.get(url, headers=headers, verify=False)\n scan = json.loads(response.text)\n if response.status_code != 200:\n return {\"status\": scan['error'], \"name\": \"error\", \"response_code\": response.status_code}\n return {\"status\": scan[\"info\"][\"status\"], \"name\": scan[\"info\"][\"name\"], \"response_code\": response.status_code}\n\ndef get_scans_list(args):\n url = f\"https://{args.server}:{args.port}/scans\"\n headers = {\"X-Api-Token\": args.api_token, \"X-Cookie\": args.x_cookie}\n response = requests.get(url, headers=headers, verify=False)\n scans = json.loads(response.text)\n if response.status_code != 200:\n return {\"status\": scans['error'], \"name\": \"error\", \"response_code\": response.status_code}\n list = []\n for scan in scans[\"scans\"]:\n list.append({\"id\": scan[\"id\"], \"name\": scan[\"name\"], \"status\": scan[\"status\"]})\n \n return {\"status\": list, \"name\": \"scans\", \"response_code\": response.status_code}\n\ndef get_headers(args):\n if args.x_cookie != None and args.api_token != None:\n headers = {\"X-Cookie\": f\"token={args.x_cookie}\", \"X-API-Token\": args.api_token}\n elif args.x_cookie != None and args.api_token == None:\n url = f\"https://{args.server}:{args.port}/nessus6.js\"\n try:\n response = requests.get(url, verify=False)\n except:\n print_error(\"Unable to connect to Nessus server. Check server IP and port\")\n sys.exit(1)\n if response.status_code != 200:\n print_error(f'Status code {response.status_code} - {json.loads(response.text)[\"error\"]}')\n sys.exit(1)\n if args.verbose:\n print_info(f\"Obtained X-API-Token\")\n api_token_regex = '\"[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{12}\"'\n token_header = re.findall(api_token_regex, response.text)[0].replace('\"', '')\n headers = {\"X-Cookie\": f\"token={args.x_cookie}\", \"X-API-Token\": token_header}\n elif args.x_cookie == None and args.api_token != None and args.password == None:\n print_error(\"X-Cookie or password is required\")\n sys.exit(1)\n elif args.x_cookie == None and args.api_token != None and args.password != None:\n url = f\"https://{args.server}:{args.port}/session\"\n try:\n response = requests.post(url, data={\"username\": args.username, \"password\": args.password}, verify=False)\n except:\n print_error(\"Unable to connect to Nessus server. Check server IP and port\")\n sys.exit(1)\n if response.status_code != 200:\n print_error(f'Status code {response.status_code} - {json.loads(response.text)[\"error\"]}')\n sys.exit(1)\n if args.verbose:\n print_success(f\"Username and password work!\")\n print_info(f\"Obtained X-Cookie\")\n cookie_header = json.loads(response.text)['token']\n headers = {\"X-Cookie\": f\"token={cookie_header}\", \"X-API-Token\": args.api_token}\n elif args.x_cookie == None and args.api_token == None and args.password != None:\n url = f\"https://{args.server}:{args.port}/session\"\n try:\n response = requests.post(url, data={\"username\": args.username, \"password\": args.password}, verify=False)\n except:\n print_error(\"Unable to connect to Nessus server. Check server IP and port\")\n sys.exit(1)\n if response.status_code != 200:\n print_error(f'Status code {response.status_code} - {json.loads(response.text)[\"error\"]}')\n sys.exit(1)\n if args.verbose:\n print_success(f\"Username and password work!\")\n print_info(f\"Obtained X-Cookie\")\n cookie_header = json.loads(response.text)['token']\n url = f\"https://{args.server}:{args.port}/nessus6.js\"\n try:\n response = requests.get(url, verify=False)\n except:\n print_error(\"Unable to connect to Nessus server. Check server IP and port\")\n sys.exit(1)\n if response.status_code != 200:\n print_error(f'Status code {response.status_code} - {json.loads(response.text)[\"error\"]}')\n sys.exit(1)\n if args.verbose:\n print_info(f\"Obtained X-API-Token\")\n api_token_regex = '\"[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{12}\"'\n token_header = re.findall(api_token_regex, response.text)[0].replace('\"', '')\n headers = {\"X-Cookie\": f\"token={cookie_header}\", \"X-API-Token\": token_header}\n else:\n print_error(\"X-Cookie or password is required\")\n sys.exit(1)\n return headers\n \ndef scan_actions(args: Namespace) -> None:\n if args.action == \"pause\" or args.action == \"resume\":\n url = f\"https://{args.server}:{args.port}/scans/{args.scan_id}/{args.action}\"\n headers = {\"X-Api-Token\": args.api_token, \"X-Cookie\": args.x_cookie}\n response = requests.post(url, headers=headers, verify=False)\n if response.status_code != 200:\n print_error(f'Status code {response.status_code} - {json.loads(response.text)[\"error\"]}')\n if args.telegramToken and args.telegramChatID:\n telegram_bot_sendtext(\n f\"Nessus Error: {response.status_code} - Scan {args.scan_id} not {args.action}\",\n args\n )\n sys.exit(1)\n else:\n print_error('Invalid action specified (must be \"pause\" or \"resume\")')\n sys.exit(1)\n\n# Send telegram message\ndef telegram_bot_sendtext(bot_message: str, args: Namespace) -> None:\n # check if telegramToken and telegramChatID are set if action is not check\n if args.telegramToken != None and args.telegramChatID != None and args.action not in [\"check\", \"list\"]:\n telegram_message = bot_message.replace(\" \", \"%20\")\n telegram_url = f\"https://api.telegram.org/bot{args.telegramToken}/sendMessage?chat_id={args.telegramChatID}&text={telegram_message}\"\n try:\n response = requests.get(telegram_url)\n return response.json()\n except:\n print(\"Error sending telegram message. Check token and chat ID\")\n sys.exit(1)\n else:\n return\n \ndef isTimeFormat(input):\n try:\n time.strptime(input, TIME_FORMAT)\n return True\n except ValueError:\n return False\n \ndef reformat_time(input):\n try:\n formatted_time = time.strptime(input, TIME_FORMAT)\n return time.strftime(TIME_FORMAT, formatted_time)\n except ValueError:\n return False\n\n\ndef main():\n args = get_args()\n \n formatted_time = None\n \n if args.action not in [\"check\", \"list\"]:\n # check if time is specified and if it is in the correct format\n if args.time is not None and isTimeFormat(args.time) == False:\n print_error(\"Invalid time format (YYYY-MM-DD HH:MM)\")\n sys.exit(1)\n # check if time is specified and formatted close to the correct format\n elif args.time is not None and args.action not in [\"check\", \"list\"]:\n formatted_time = reformat_time(args.time)\n # if the time is in the past then exit\n if formatted_time < time.strftime(TIME_FORMAT):\n print_error(\"Time specified is in the past\")\n sys.exit(1)\n\n # check if scan_id is specified for all actions except list before getting headers\n if args.action not in [\"list\"]:\n if args.scan_id is None:\n print_error(\"Scan ID is required to run that action\")\n sys.exit(1)\n\n # get X-API-Token and X-Cookie\n headers = get_headers(args)\n args.api_token = headers[\"X-API-Token\"]\n args.x_cookie = headers[\"X-Cookie\"]\n \n # list scans\n if args.action == \"list\":\n scans = get_scans_list(args)\n response_code = scans[\"response_code\"]\n response = scans[\"status\"]\n if response_code != 200:\n print_error(f'Status code {response_code} - {response}')\n sys.exit(1)\n if args.verbose:\n print_success(f\"X-API-Token and X-Cookie work!\")\n print_info(f\"{'ID':<10}{'Name':<70}{'Status':<10}\")\n print_info(f\"{'-'*10:<10}{'-'*70:<70}{'-'*10:<10}\")\n for scan in response:\n print_info(f\"{scan['id']:<10}{scan['name']:<70}{BOLD}{scan['status']:<10}{RESET}\")\n sys.exit(0)\n \n # get scan status\n check = get_scan_status(args)\n status = check[\"status\"]\n name = check[\"name\"]\n response_code = check[\"response_code\"]\n if response_code != 200:\n print_error(f'Status code {response_code} - {status}')\n sys.exit(1)\n \n if args.verbose:\n print_success(f\"X-API-Token and X-Cookie work!\")\n print_info(f'Scan \"{name}\" status: {BOLD}{status}{RESET}')\n \n # if it was just a check then exit else continue\n if args.action == \"check\":\n sys.exit(0)\n\n # check if scan is running or paused and exit if it is already running or paused\n if status == \"running\" and args.action == \"resume\":\n print_error(\"Scan is already running\")\n sys.exit(1)\n elif status == \"paused\" and args.action == \"pause\":\n print_error(\"Scan is already paused\")\n sys.exit(1)\n\n # Scheduled action handling\n if formatted_time is not None:\n telegram_bot_sendtext(f\"Nessus: Scan {name} has been tasked to {args.action} at {formatted_time}\", args)\n if args.verbose:\n print_info(f\"Scan {name} has been tasked to {args.action} at {formatted_time}\")\n while True:\n current_time = time.strftime(\"%Y-%m-%d %H:%M\")\n if current_time == formatted_time:\n break\n time.sleep(50)\n \n # Perform action\n scan_actions(args)\n now_time = time.strftime(\"%Y-%m-%d %H:%M\")\n if args.verbose:\n if args.action == \"pause\":\n print_info(f'{args.action.capitalize().split(\"e\")[0]}ing scan')\n elif args.action == \"resume\": \n print_info(f'{args.action.capitalize().split(\"e\")[1]}ing scan')\n\n # check if scan is running or paused and wait until it is paused or running\n while True:\n check = get_scan_status(args)\n status = check[\"status\"]\n name = check[\"name\"]\n response_code = check[\"response_code\"]\n # Error handling\n if response_code != 200:\n print_error(f'Status code {response_code} - {status}')\n telegram_bot_sendtext(\n f\"Nessus Error: {response_code} - Scan {args.scan_id} not {args.action}d. Reason: {status}\",\n args\n )\n sys.exit(1)\n elif status == \"running\" and args.action == \"pause\":\n time.sleep(60)\n elif status == \"paused\" and args.action == \"resume\":\n time.sleep(60)\n else:\n break\n\n now_time = time.strftime(\"%Y-%m-%d %H:%M\")\n print_success(f'Scan \"{name}\" {args.action}d')\n telegram_bot_sendtext(f\"Nessus: Scan {name} {args.action}d at {now_time}\", args)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"minniear/schedule-nesssus","sub_path":"schedule-nessus.py","file_name":"schedule-nessus.py","file_ext":"py","file_size_in_byte":14630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"74877028840","text":"\"\"\"\nBot-ассистент проверки домашних заданий Практикум.Домашка.\nВсе настройки модуля в файле settings.py.\nНе меняйте код внутри данного файла.\nПериодичность запросов к API задается константой RETRY_PERIOD.\n\"\"\"\nimport http\nimport logging\nimport requests\nimport sys\nimport telegram\nimport time\n\nfrom json.decoder import JSONDecodeError\n\nimport exceptions as ex\nfrom settings import (ENDPOINT, HEADERS, HOMEWORK_VERDICTS,\n LOG_FORMAT_STRING, LOG_LEVEL, LOG_OUTPUT, LOG_FILENAME,\n PRACTICUM_TOKEN, RETRY_PERIOD, TELEGRAM_TOKEN,\n TELEGRAM_CHAT_ID)\n\n\ndef check_tokens() -> bool:\n \"\"\"Проверка наличия токенов и id телеграм чата.\"\"\"\n return all((PRACTICUM_TOKEN, TELEGRAM_TOKEN, TELEGRAM_CHAT_ID))\n\n\ndef send_message(bot: telegram.Bot, message: str) -> None:\n \"\"\"Отправка сообщения в телеграм канал.\"\"\"\n try:\n bot.send_message(chat_id=TELEGRAM_CHAT_ID, text=message)\n except Exception as error:\n logging.error(f'Ошибка при отправке сообщения телеграм: {error}')\n else:\n logging.debug(f'Сообщение: \"{message}\" успешно отправлено')\n\n\ndef get_api_answer(timestamp: int) -> dict:\n \"\"\"Отправка запроса на сервер и получение ответа.\"\"\"\n payload: dict = {'from_date': timestamp}\n try:\n logging.debug(f'Отправлен запрос на {ENDPOINT}')\n response = requests.get(ENDPOINT, headers=HEADERS, params=payload)\n logging.debug(f'Ответ с {ENDPOINT} получен')\n if response.status_code != http.HTTPStatus.OK:\n message = (f'Ошибка при отправке запроса на {ENDPOINT}: '\n f'status_code - {response.status_code}, ',\n f'reason - {response.reason}, ',\n f'text - {response.text}, ',\n f'headers - {HEADERS}, ',\n f'params - {payload}')\n raise ex.BadRequest(message)\n homework_statuses = response.json()\n return homework_statuses\n except JSONDecodeError as error:\n raise ex.JSONException(error)\n except Exception as error:\n raise ex.UnknownAPIException(ENDPOINT, error)\n\n\ndef check_response(response: dict) -> tuple:\n \"\"\"Проверка API-ответа на соответствие требований документации.\"\"\"\n logging.debug('Старт проверки ответа сервера')\n if not isinstance(response, dict):\n message: str = 'Ошибка: ответ сервера должен содержать тип данных dict'\n raise ex.APIResponseTypeErrorException(message)\n if 'homeworks' not in response.keys():\n message: str = ('Ошибка: в словаре ответа сервера отсутствует ключ'\n ' \"homeworks\"')\n raise ex.DictKeyErrorException(message)\n if 'current_date' not in response.keys():\n message: str = ('Ошибка: в словаре ответа сервера отсутствует ключ'\n ' \"current_date\"')\n raise ex.DictKeyErrorException(message)\n homeworks = response['homeworks']\n current_date = response['current_date']\n if not isinstance(homeworks, list):\n message: str = 'Ошибка: тип данных объекта \"homeworks\" не list'\n raise ex.APIResponseTypeErrorException(message)\n if not isinstance(current_date, int):\n message: str = 'Ошибка: тип данных объекта \"current_date\" не int'\n raise ex.APIResponseTypeErrorException(message)\n logging.debug('Проверка ответа успешно выполнена')\n return homeworks, current_date\n\n\ndef parse_status(homework: dict) -> str:\n \"\"\"Парсинг статуса домашней работы.\"\"\"\n if not isinstance(homework, dict):\n message: str = 'Ошибка: у объекта \"homework\" тип данных не dict'\n raise ex.DictKeyErrorException(message)\n homework_name: str = homework.get('homework_name')\n status: str = homework.get('status')\n if not homework_name:\n message: str = ('Ошибка: в объекте homework отсутствует ключ'\n ' \"homework_name\"')\n raise ex.DictKeyErrorException(message)\n if not status:\n message: str = ('Ошибка: в объекте homework отсутствует ключ'\n ' \"status\"')\n raise ex.DictKeyErrorException(message)\n verdict = HOMEWORK_VERDICTS.get(status)\n if not verdict:\n message: str = ('Ошибка: в словаре HOMEWORK_VERDICTS отсутствует ключ'\n f' \"{status}\"')\n raise ex.DictKeyErrorException(message)\n return f'Изменился статус проверки работы \"{homework_name}\". {verdict}'\n\n\ndef main() -> None:\n \"\"\"\n Основная логика работы бота.\n 1. Проверка доступности обязательных переменных окружения check_tokens().\n 2. Запрос к API get_api_answer().\n 3. Проверка ответа на корректность данных check_response().\n 4. При наличии корректных данных в ответе парсинг статуса parse_status().\n 5. Отправка сообщений в Telegram send_message().\n 6. Пауза RETRY_PERIOD и возврат к началу цикла.\n \"\"\"\n if not check_tokens():\n message = ('Отсутствует обязательная переменная окружения, работа '\n 'модуля не возможна')\n logging.critical(message)\n sys.exit(message)\n\n bot: telegram.Bot = telegram.Bot(token=TELEGRAM_TOKEN)\n timestamp: int = int(time.time())\n old_message: str = ''\n old_error: str = ''\n while True:\n try:\n homework_statuses: dict = get_api_answer(timestamp)\n homeworks, timestamp = check_response(homework_statuses)\n if homeworks:\n message: str = parse_status(homeworks[0])\n if message != old_message:\n send_message(bot, message)\n old_message = message\n else:\n logging.debug('Новая информация о статусе отсутствует')\n except ex.NoImportantInformation as error:\n logging.error(error)\n except Exception as error:\n message: str = f'Сбой в работе программы: {error}'\n logging.error(message, exc_info=error)\n if message != old_error:\n send_message(bot, message)\n old_error = message\n finally:\n time.sleep(RETRY_PERIOD)\n\n\nif __name__ == '__main__':\n logging.basicConfig(\n format=LOG_FORMAT_STRING,\n level=LOG_LEVEL,\n handlers=[\n logging.StreamHandler(stream=LOG_OUTPUT),\n logging.FileHandler(LOG_FILENAME)\n ]\n )\n main()\n","repo_name":"last-ui/homework_bot","sub_path":"homework.py","file_name":"homework.py","file_ext":"py","file_size_in_byte":7476,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"18158261543","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nnp.random.seed(333)\n\n# example data\nx = np.random.rand()\ny = np.exp(-x)\n\n# First illustrate basic pyplot interface, using defaults where possible.\nplt.figure()\nplt.errorbar(x, y, xerr=0.2, yerr=0.4, fmt='o')\nplt.title(\"Simplest errorbars, 0.2 in x, 0.4 in y\")\nplt.show()","repo_name":"0xbunyip/dqnet-Thesis","sub_path":"Chapter3/plotting_code/double.py","file_name":"double.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"36751006403","text":"import numpy as np\r\nimport pandas as pd\r\nimport cv2\r\nimport os\r\n\r\nIMAGES_PATH = 'personai_icartoonface_rectrain/icartoonface_rectrain'\r\n\r\n#Bounding boxes\r\nBB_INFO_PATH = 'personai_icartoonface_rectrain/icartoonface_rectrain_det.txt'\r\n\r\nOUT_PATH = 'clean_dataset/train_data'\r\n\r\ndef load_bb_info(path):\r\n return pd.read_csv(path, sep='\\t', header=None).to_numpy()\r\n\r\ndef crop_to_bb(img_bb, inpath, outpath):\r\n for i in img_bb:\r\n f_name = i[0].split('/')[-1]\r\n img = cv2.imread(os.path.join(inpath, i[0]))\r\n img = img[i[2]:i[4], i[1]:i[3]]\r\n cv2.imwrite(os.path.join(outpath, f_name), img)\r\n\r\n print('Cropped image {}'.format(f_name))\r\n\r\ndef main():\r\n bb_info = load_bb_info(BB_INFO_PATH)\r\n crop_to_bb(bb_info, IMAGES_PATH, OUT_PATH)\r\n\r\nif __name__ == '__main__':\r\n main()","repo_name":"ece324-2020/MirrorMe","sub_path":"preprocess_icartoon.py","file_name":"preprocess_icartoon.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"36925923561","text":"from contextlib import contextmanager\n\nimport arcade\nfrom arcade import Sprite, Window\n\n\nclass MarginCamera:\n def __init__(self, window: Window):\n self.window = window\n if window is None:\n raise Exception('Window required')\n\n # How many pixels to keep as a minimum margin between the character\n # and the edge of the screen.\n self.left_viewport_margin = 400\n self.right_viewport_margin = 400\n self.bottom_viewport_margin = 100\n self.top_viewport_margin = 300\n\n # Used to keep track of our scrolling\n self.view_bottom = 0\n self.view_left = 0\n\n def update(self, player: Sprite):\n changed = False\n\n # Scroll left\n left_boundary = self.view_left + self.left_viewport_margin\n if player.left < left_boundary:\n self.view_left -= left_boundary - player.left\n changed = True\n\n # Scroll right\n right_boundary = self.view_left + self.window.width - self.right_viewport_margin\n if player.right > right_boundary:\n self.view_left += player.right - right_boundary\n changed = True\n\n # Scroll up\n top_boundary = self.view_bottom + self.window.height - self.top_viewport_margin\n if player.top > top_boundary:\n self.view_bottom += player.top - top_boundary\n changed = True\n\n # Scroll down\n bottom_boundary = self.view_bottom + self.bottom_viewport_margin\n if player.bottom < bottom_boundary:\n self.view_bottom -= bottom_boundary - player.bottom\n changed = True\n\n if changed:\n # Only scroll to integers. Otherwise we end up with pixels that\n # don't line up on the screen\n self.view_bottom = int(self.view_bottom)\n self.view_left = int(self.view_left)\n\n # Do the scrolling\n arcade.set_viewport(self.view_left,\n self.window.width + self.view_left,\n self.view_bottom,\n self.window.height + self.view_bottom)\n\n\nclass ZoomCamera:\n def __init__(self, window: Window):\n if window is None:\n raise Exception('Window required')\n\n self._window = window\n self._zoom = 1\n\n def get_pos(self, x: int, y: int):\n \"\"\"\n Calculates in game position of mouse\n \"\"\"\n w_width, w_height = self._window.get_size()\n left, right, bottom, top = self._window.get_viewport()\n\n x = left + (right - left) * x / w_width\n y = bottom + (top - bottom) * y / w_height\n\n return x, y\n\n def on_scroll(self, direction: int):\n if direction > 0:\n self._zoom = max(self._zoom - 0.1, 0.5)\n elif direction < 0:\n self._zoom = min(self._zoom + 0.1, 2)\n\n def update(self, player: Sprite):\n width, height = self._window.get_size()\n\n c_x = player.center_x\n c_y = player.center_y\n\n width = width * self._zoom\n height = height * self._zoom\n\n self._window.set_viewport(\n left=c_x - width // 2,\n right=c_x + width // 2,\n bottom=c_y - height // 2,\n top=c_y + height // 2,\n )\n\n\nclass FreeCamera:\n def __init__(self, window: Window = None):\n if window is None:\n window = arcade.get_window()\n\n self._window = window\n self._zoom = 1\n\n self.center = window.width // 2, window.height // 2\n\n def get_pos(self, x: int, y: int):\n \"\"\"\n Calculates in game position of mouse\n \"\"\"\n w_width, w_height = self._window.get_size()\n left, right, bottom, top = self._window.get_viewport()\n\n x = left + (right - left) * x / w_width\n y = bottom + (top - bottom) * y / w_height\n\n return x, y\n\n def on_scroll(self, direction: int):\n if direction > 0:\n self._zoom = max(self._zoom - 0.1, 0.5)\n elif direction < 0:\n self._zoom = min(self._zoom + 0.1, 2)\n\n def to_world_coords(self, x: float, y: float):\n return x + self.viewport()[0], y + self.viewport()[2]\n\n @contextmanager\n def view(self):\n old_viewport = self._window.get_viewport()\n self._window.set_viewport(*self.viewport())\n yield\n self._window.set_viewport(*old_viewport)\n\n def viewport(self):\n width, height = self._window.get_size()\n\n width = width * self._zoom\n height = height * self._zoom\n\n c_x, c_y = self.center\n\n return c_x - width // 2, c_x + width // 2, c_y - height // 2, c_y + height // 2\n\n def move(self, dx, dy):\n self.center = self.center[0] + dx, self.center[1] + dy","repo_name":"eruvanos/mcore","sub_path":"mcore/game/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":4739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"31081710181","text":"#!/usr/bin/python3\n\"\"\"\nTo run it needs the following:\ncargo install rustfilt\nrustup component add --toolchain nightly llvm-tools-preview\n\"\"\"\nimport os\nimport re\nimport sys\nimport subprocess\nfrom glob import glob\n\n# Compute the coverage for all fuzzing targets, or not\nFUZZING = False\n\nROOT = os.path.dirname(os.path.abspath(__file__))\ntarget_folder = os.path.join(ROOT, \"target\")\n# Get info about the rust installation\nrustup_info = subprocess.check_output(\"rustup show\", shell=True).decode()\narch = re.findall(r\"Default host: (.+)\", rustup_info)[0]\n\n# Get where LLVM is installed\nsysroot = subprocess.check_output(\"rustc --print sysroot\", shell=True).decode().strip()\nllvm_path = os.path.join(sysroot, \"lib\", \"rustlib\", arch, \"bin\")\n\n# Check that people can read the doc\nif not os.path.exists(os.path.join(llvm_path, \"llvm-profdata\")):\n print(\"PLEASE run:\")\n print(\"rustup component add --toolchain nightly llvm-tools-preview\")\n print(\"cargo install rustfilt\")\n sys.exit(1)\n\n# Settings\ntest_cov_path = os.path.join(target_folder, \"cargo-test-cov-%p-%m.profraw\")\nfinal_cov_path = os.path.join(target_folder, \"total_coverage.profdata\")\ndoc_tests_bin = os.path.join(target_folder, \"doctestbins\")\n\ncov_files = []\nexec_files = []\n\n# Clean up the targets folder\nsubprocess.check_call(\n \"cargo clean\",\n shell=True,\n cwd=ROOT,\n)\n# Create a folder for the test coverage\nos.makedirs(target_folder, exist_ok=True)\n\nout = subprocess.check_output(\n \"cargo test\",\n shell=True,\n cwd=ROOT,\n stderr=subprocess.STDOUT,\n universal_newlines=True,\n env={\n **os.environ,\n \"CARGO_INCREMENTAL\":\"0\",\n \"RUSTFLAGS\": \"-C instrument-coverage\",\n \"RUSTDOCFLAGS\":\"-Zinstrument-coverage -Z unstable-options --persist-doctests {}\".format(doc_tests_bin),\n \"LLVM_PROFILE_FILE\": test_cov_path,\n },\n)\n\n# For the doctests see:\n# https://github.com/rust-lang/rust/issues/79417\n\n# add the test binaries\nexec_files.extend(\n os.path.join(ROOT, file)\n for file in re.findall(r\"Running \\S+ \\((target/.+?)\\)\", out)\n)\n# add all the doc tests binaries\nexec_files.extend(\n file\n for file in glob(os.path.join(doc_tests_bin, \"**\", \"*\"))\n)\ncov_files.extend(\n os.path.join(os.path.dirname(test_cov_path), file)\n for file in os.listdir(os.path.dirname(test_cov_path))\n if file.endswith(\".profraw\")\n)\n\nif FUZZING:\n # Get the list of fuzzing targets\n fuzz_targets = (\n subprocess.check_output(\n \"cargo fuzz list\",\n shell=True,\n cwd=ROOT,\n )\n .decode()\n .split(\"\\n\")[:-1]\n )\n # Generate coverage for all the targets\n for fuzz_target in fuzz_targets:\n subprocess.check_call(\n \"cargo fuzz coverage {}\".format(fuzz_target),\n shell=True,\n cwd=ROOT,\n )\n\n cov_files.extend(\n os.path.join(ROOT, \"fuzz\", \"coverage\", fuzz_target, \"coverage.profdata\")\n for fuzz_target in fuzz_targets\n )\n exec_files.extend(\n os.path.join(\n ROOT, \"target\", arch, \"coverage\", arch, \"release\", fuzz_target\n )\n for fuzz_target in fuzz_targets\n )\n\n# Merge the coverages into an unique file\nsubprocess.check_call(\n \"{}/llvm-profdata merge -sparse {} -o {}\".format(\n llvm_path,\n \" \".join(cov_files),\n final_cov_path,\n ),\n shell=True,\n cwd=ROOT,\n)\n\n# Create the report!\nsubprocess.check_call(\n (\n \"{}/llvm-cov report --Xdemangler=rustfilt --instr-profile={} -object {} \"\n ).format(\n llvm_path,\n final_cov_path,\n \" -object \".join(exec_files),\n ),\n shell=True,\n cwd=ROOT,\n)","repo_name":"LucaCappelletti94/hyperloglog-rs","sub_path":"coverage.py","file_name":"coverage.py","file_ext":"py","file_size_in_byte":3637,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"18"} +{"seq_id":"36132080343","text":"from django.conf.urls import patterns, url\n\nfrom blog import views\n\nurlpatterns = patterns('',\n # ex: /blog/\n url(r'^$', views.IndexView.as_view(), name='index'),\n # ex: /blog/5/\n url(r'^(?P\\d+)/$', views.DetailView.as_view(), name='detail'),\n # ex: /blog/5/comment\n url(r'^(?P\\d+)/comment/$', views.comment, name=\"comment\"),\n)\n","repo_name":"lukitaa/django-blog","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"36477922886","text":"# 프로그래머스_레벨 1_실패율\r\n# 220715\r\n\r\n# 실패율이 제일 높은 N개의 stage를 list에 담아 return\r\n# 같은 애들은 오름차순으로\r\n\r\ndef solution(N, stages):\r\n answer = [] \r\n stage_cnt = []\r\n lista = [0] * N\r\n \r\n # lista에 각 난이도 별 실패율을 계산해서 넣어 줌\r\n for i in range(1, N+1):\r\n # 예외 처리 - DivisionByZero case를 생각해 줌\r\n try: lista[i-1] = stages.count(i)/(len(stages) - sum(stage_cnt))\r\n except: lista[i-1] = 0\r\n stage_cnt.append(stages.count(i))\r\n \r\n # 실패율이 높은 순서대로 answer list에 삽입\r\n for i in range(N):\r\n maxi = -1\r\n temp = 9999\r\n for j in range(len(lista)):\r\n if lista[j] > maxi:\r\n maxi = lista[j]\r\n temp = j\r\n \r\n lista[temp] = -1\r\n answer.append(temp+1)\r\n \r\n return answer","repo_name":"jeneve11/Algorithm_Study","sub_path":"프로그래머스/레벨 1_실패율.py","file_name":"레벨 1_실패율.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"73527280040","text":"from random import uniform, randint\nfrom matplotlib import pyplot as plt\n\nimport utils\n\n\nNUMBER_OF_ITERATIONS = 100 # number of iterations for the algorithm to run\nEVAPORATION_FACTOR = 0.5 # number between 0 and 1; 0 for complete evaporation, 1 for no evaporation\nUPDATED_PHEROMONE_VALUE = 7 # exact pheromone value for each ant travelled, initial value is 1\nALPHA = 1 # parameter to control the influence of previous trails on the edge\nBETA = 1 # parameter to control the influence of the desirability of the edge\n\n\ndef iterate_ants(ants, edges, number_of_cities):\n trails1 = []\n for a in ants:\n # print('for ant', a)\n visited = [0] * number_of_cities\n trail = []\n count = 0\n q = a\n visited[q] = 1\n trail.append(q)\n while count < number_of_cities - 1:\n # print('on node ', q, ' ', end='')\n sum1 = 0\n numerator = []\n prob = []\n sub_visit = [0] * number_of_cities\n for v in range(len(visited)):\n if visited[v] == 1:\n sub_visit[v] = 1\n for p in range(number_of_cities):\n if sub_visit[p] == 0:\n this_edge = edges[(number_of_cities * p) + q]\n ph = this_edge[2]\n w = this_edge[1]\n ph = ph ** ALPHA\n w = w ** BETA\n if w == 0:\n num = float('inf')\n else:\n num = ph / w\n numerator.append((p, num))\n sum1 += num\n for v in range(len(numerator)):\n prob.append((numerator[v][0], numerator[v][1] / sum1))\n seed = uniform(0, 1)\n sum2 = 0\n temp = None\n for p in range(len(prob)):\n sum2 += prob[p][1]\n if sum2 < seed:\n continue\n else:\n temp = prob[p][0]\n break\n visited[temp] = 1\n trail.append(temp)\n count += 1\n q = temp\n trails1.append(trail)\n return trails1\n\n\ndef update_edge(a, b, edges, number_of_cities):\n this1 = edges[(number_of_cities * a) + b]\n this2 = edges[(number_of_cities * b) + a]\n w1 = this1[1]\n w2 = this2[1]\n ph1 = this1[2]\n ph2 = this2[2]\n # print(w1, w2, ph1, ph2)\n edges[(number_of_cities * a) + b] = ((a, b), w1, ph1 + UPDATED_PHEROMONE_VALUE)\n edges[(number_of_cities * b) + a] = ((b, a), w2, ph2 + UPDATED_PHEROMONE_VALUE)\n\n\ndef update_pheromone_and_find_best_path(ts, nodes, edges, number_of_cities):\n min1 = float('inf')\n min_path1 = []\n for t in ts:\n sum1 = 0\n for i in range(len(t) - 1):\n a = t[i]\n b = t[i + 1]\n sum1 += utils.dist(a, b, nodes)\n update_edge(a, b, edges, number_of_cities)\n if min1 > sum1:\n min1 = sum1\n min_path1 = []\n min_path1 += t\n ret_path = min_path1[:]\n ret_path.append(min_path1[0])\n min1 += utils.dist(min_path1[-1], min_path1[0], nodes)\n return min1, ret_path\n\n\ndef evaporation(edges, number_of_cities):\n for e in edges:\n p1 = e[0][0]\n p2 = e[0][1]\n w = e[1]\n ph = e[2]\n edges[(number_of_cities * p1) + p2] = ((p1, p2), w, ph * EVAPORATION_FACTOR)\n\n\ndef execute(points, number_of_cities):\n counter2 = 0\n print()\n print(\"Ant Colony Optimization\")\n nodes = points\n edges = []\n for x in range(number_of_cities):\n for y in range(number_of_cities):\n if x is not y:\n edges.append(((x, y), utils.dist(x, y, nodes), 1))\n else:\n edges.append(((x, y), float(\"inf\"), 1))\n global_minima = float('inf')\n global_min_path = []\n for r in range(NUMBER_OF_ITERATIONS):\n ants = list(range(number_of_cities))\n trails = iterate_ants(ants, edges, number_of_cities)\n min_value, min_path = update_pheromone_and_find_best_path(trails, nodes, edges, number_of_cities)\n evaporation(edges, number_of_cities)\n if global_minima > min_value:\n global_minima = min_value\n global_min_path = min_path\n counter2 += 1\n print(counter2, ': ', min_value, ' ', min_path)\n\n print()\n print('Best path from ant colony optimisation : ', global_min_path, ' ', global_minima)\n\n plot_x = []\n plot_y = []\n\n for p in global_min_path:\n plot_x.append(nodes[p][0])\n plot_y.append(nodes[p][1])\n\n plt.plot(plot_x, plot_y, 'ko-', linewidth=1.5, label='ant colony ' + \"{:.2f}\".format(global_minima))\n plt.legend(loc='upper left')\n plt.show()\n","repo_name":"guroosh/TSP-using-Genetic-and-Ant-Colony-metaheuristic-","sub_path":"ant_colony_optimization.py","file_name":"ant_colony_optimization.py","file_ext":"py","file_size_in_byte":4746,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"1306342285","text":"import pymel.core as pm\nimport os\nfrom functools import partial\n\nimport System.utils as utils\nreload(utils)\n\nclass GroupSelected:\n\n def __init__(self):\n #self.directory = '%s/nwModularRiggingTool' %pm.internalVar(userScriptDir = True)\n self.objectsToGroup = []\n\n\n\n def ShowUI(self):\n self.FindSelectionToGroup()\n \n if len(self.objectsToGroup) == 0:\n return\n \n self.UIElements = {}\n \n if pm.window(\"groupSelected_UI_window\", exists = True):\n pm.deleteUI(\"groupSelected_UI_window\")\n \n \n windowWidth = 300\n windowHeight = 150\n self.UIElements[\"window\"] = pm.window(\"groupSelected_UI_window\", width = windowWidth, height = windowHeight, title = \"Group Selected\", sizeable = False)\n \n self.UIElements[\"topLevelColumn\"] = pm.columnLayout(adjustableColumn = True, columnAlign = 'center', rowSpacing = 3)\n self.UIElements[\"groupName_rowColumn\"] = pm.rowColumnLayout(numberOfColumns = 2, columnAttach = (1, 'right', 0), columnWidth = [(1, 80), (2, windowWidth - 90)], parent = self.UIElements[\"topLevelColumn\"])\n \n pm.text(label = \"Group Name: \", parent = self.UIElements[\"groupName_rowColumn\"])\n self.UIElements[\"groupName\"] = pm.textField(text = \"group\", parent = self.UIElements[\"groupName_rowColumn\"])\n \n self.UIElements[\"createAt_rowColumn\"] = pm.rowColumnLayout(numberOfColumns = 3, rowOffset = [(1, 'top', 2), (2, 'both', 2), (3, 'both', 2)], columnWidth = [(1, 80), (2, windowWidth - 170), (3, 80)], parent = self.UIElements[\"topLevelColumn\"])\n pm.text(label = \"Position at: \", parent = self.UIElements[\"createAt_rowColumn\"])\n pm.text(label = '', parent = self.UIElements[\"createAt_rowColumn\"])\n pm.text(label = '', parent = self.UIElements[\"createAt_rowColumn\"])\n \n pm.text(label = '', parent = self.UIElements[\"createAt_rowColumn\"])\n self.UIElements[\"createAt_lastSelected\"] = pm.button(label = \"Last Selected\", command = self.CreateAtLastSelected, parent = self.UIElements[\"createAt_rowColumn\"])\n pm.text(label = '', parent = self.UIElements[\"createAt_rowColumn\"])\n \n pm.text(label = '', parent = self.UIElements[\"createAt_rowColumn\"])\n self.UIElements[\"createAt_averagePosition\"] = pm.button(label = \"Average Position\", command = self.CreateAtAveragePosition, parent = self.UIElements[\"createAt_rowColumn\"])\n pm.text(label = '', parent = self.UIElements[\"createAt_rowColumn\"])\n \n pm.separator(style = 'in', parent = self.UIElements[\"topLevelColumn\"])\n \n \n columnWidth = (windowWidth / 2) - 5\n self.UIElements[\"button_row\"] = pm.rowLayout(numberOfColumns = 2, columnWidth = [(1, columnWidth), (2, columnWidth)], columnAttach = [(1, 'both', 10), (2, 'both', 10)], columnAlign = [(1, 'center'), (2, 'center')], parent = self.UIElements[\"topLevelColumn\"])\n pm.button(label = \"Accept\", command = self.AcceptWindow, parent = self.UIElements[\"button_row\"])\n pm.button(label = \"Cancel\", command = self.CancelWindow, parent = self.UIElements[\"button_row\"])\n \n \n pm.showWindow(self.UIElements[\"window\"])\n \n \n self.CreateTemporaryGroupRepresentation()\n \n self.CreateAtAveragePosition()\n \n pm.select(self.tempGroupTransform, replace = True)\n pm.setToolTo(\"moveSuperContext\")\n \n \n \n \n def FindSelectionToGroup(self):\n selectedObjects = pm.ls(selection = True, transforms = True)\n \n self.objectsToGroup = []\n for obj in selectedObjects:\n valid = False\n \n if obj.find(\"module_transform\") != -1:\n splitString = obj.rsplit(\"module_transform\")\n \n if splitString[1] == \"\":\n valid = True\n \n \n if valid == False and obj.find(\"Group__\") == 0:\n valid = True\n \n \n if valid == True:\n self.objectsToGroup.append(obj)\n \n \n def CreateTemporaryGroupRepresentation(self):\n #controlGrpFile = \"%s/ControlObjects/Blueprint/controlGroup_control.ma\" %self.directory\n controlGrpFile = \"%s/ControlObjects/Blueprint/controlGroup_control.ma\" %os.environ[\"RIGGING_TOOL_ROOT\"]\n \n pm.importFile(controlGrpFile)\n \n self.tempGroupTransform = pm.rename(\"controlGroup_control\", \"Group__tempGroupTransform__\")\n \n pm.connectAttr(\"%s.scaleY\" %self.tempGroupTransform, \"%s.scaleX\" %self.tempGroupTransform)\n pm.connectAttr(\"%s.scaleY\" %self.tempGroupTransform, \"%s.scaleZ\" %self.tempGroupTransform)\n \n for attr in ['scaleX', 'scaleZ', 'visibility']:\n pm.setAttr(\"%s.%s\" %(self.tempGroupTransform, attr), lock = True, keyable = False)\n \n \n pm.aliasAttr('globalScale', \"%s.scaleY\" %self.tempGroupTransform)\n \n \n def CreateAtLastSelected(self, *args):\n controlPosition = pm.xform(self.objectsToGroup[len(self.objectsToGroup)-1], query = True, worldSpace = True, translation = True)\n pm.xform(self.tempGroupTransform, worldSpace = True, absolute = True, translation = controlPosition)\n \n \n def CreateAtAveragePosition(self, *args):\n controlPos = [0.0, 0.0, 0.0]\n \n for obj in self.objectsToGroup:\n objPos = pm.xform(obj, query = True, worldSpace = True, translation = True)\n controlPos[0] += objPos[0] # add X-pos\n controlPos[1] += objPos[1] # add Y-pos\n controlPos[2] += objPos[2] # add Z-pos\n \n objectCount = len(self.objectsToGroup)\n controlPos[0] /= objectCount # average X-pos\n controlPos[1] /= objectCount # average Y-pos\n controlPos[2] /= objectCount # average Z-pos\n \n pm.xform(self.tempGroupTransform, worldSpace = True, absolute = True, translation = controlPos)\n \n \n def CancelWindow(self, *args):\n pm.deleteUI(self.UIElements[\"window\"])\n pm.delete(self.tempGroupTransform)\n \n \n def AcceptWindow(self, *args):\n groupName = pm.textField(self.UIElements[\"groupName\"], query = True, text = True)\n \n if self.CreateGroup(groupName) != None:\n pm.deleteUI(self.UIElements[\"window\"])\n \n \n \n def CreateGroup(self, _groupName):\n fullGroupName = \"Group__%s\" %_groupName\n \n # Check for valid names\n if pm.objExists(fullGroupName):\n pm.confirmDialog(title = \"Name Conflict\", message = 'Group \"%s\" already exits.' %_groupName, button= \"Accept\", defaultButton = \"Accept\")\n \n return None\n \n groupTransform = pm.rename(self.tempGroupTransform, fullGroupName)\n\n \n # Create container for grouped objects\n groupContainer = \"Group_container\"\n if not pm.objExists(groupContainer):\n pm.container(name = groupContainer)\n \n \n # Store containers to be grouped in a list\n containers = [groupContainer]\n for obj in self.objectsToGroup:\n if obj.find(\"Group__\") == 0:\n continue\n \n objNamespace = utils.StripLeadingNamespace(obj)[0]\n containers.append(\"%s:module_container\" %objNamespace)\n \n \n # Unlock all grouped containers\n for c in containers:\n pm.lockNode(c, lock = False, lockUnpublished = False)\n \n \n if len(self.objectsToGroup) != 0:\n \n # Group objects temprorarily to simulate final heirarchy\n tempGroup = pm.group(self.objectsToGroup, absolute = True)\n groupParent = pm.listRelatives(tempGroup, parent = True)\n \n \n if groupParent != []:\n pm.parent(groupTransform, groupParent[0], absolute = True)\n \n \n pm.parent(self.objectsToGroup, groupTransform, absolute = True)\n \n pm.delete(tempGroup)\n \n \n self.AddGroupToContainer(groupTransform)\n \n # Lock all group containers\n for c in containers:\n pm.lockNode(c, lock = True, lockUnpublished = True)\n \n \n # Make sure the created group is selected\n pm.setToolTo(\"moveSuperContext\")\n pm.select(groupTransform, replace = True)\n \n return groupTransform\n \n \n \n def AddGroupToContainer(self, _group):\n groupContainer = \"Group_container\"\n utils.AddNodeToContainer(groupContainer, _group, _includeShapes = True)\n \n groupName = _group.partition(\"Group__\")[2]\n \n pm.container(groupContainer, edit = True, publishAndBind = [\"%s.translate\" %_group, \"%s_t\" %groupName])\n pm.container(groupContainer, edit = True, publishAndBind = [\"%s.rotate\" %_group, \"%s_r\" %groupName])\n pm.container(groupContainer, edit = True, publishAndBind = [\"%s.globalScale\" %_group, \"%s_globalScale\" %groupName])\n\n\n\n\n def CreateGroupAtSpecified(self, _name, _targetGroup, _parent):\n self.CreateTemporaryGroupRepresentation()\n \n parentConstraint = pm.parentConstraint(_targetGroup, self.tempGroupTransform, maintainOffset = False)\n pm.delete(parentConstraint)\n \n scale = pm.getAttr(\"%s.globalScale\" %_targetGroup)\n pm.setAttr(\"%s.globalScale\" %self.tempGroupTransform, scale)\n \n if _parent != None:\n pm.parent(self.tempGroupTransform, _parent, absolute = True)\n \n newGroup = self.CreateGroup(_name)\n \n return newGroup\n\n\n\n\nclass UngroupSelected:\n \n def __init__(self):\n selectedObjects = pm.ls(selection = True, transforms = True)\n \n filteredGroups = []\n for obj in selectedObjects:\n if obj.find(\"Group__\") == 0:\n filteredGroups.append(obj)\n \n if len(filteredGroups) == 0:\n return\n \n # Recursively find and store grouped module namespaces in a list\n groupContainer = \"Group_container\"\n modules = []\n for group in filteredGroups:\n modules.extend(self.FindChildModules(group))\n \n \n \n # Store all the grouped container nodes in a list\n moduleContainers = [groupContainer]\n for module in modules:\n moduleContainer = \"%s:module_container\" %module\n moduleContainers.append(moduleContainer)\n \n \n # Unlock containers\n for container in moduleContainers:\n pm.lockNode(container, lock = False, lockUnpublished = False)\n\n # Ungroup\n for group in filteredGroups:\n childCount = len(pm.listRelatives(group, children = True))\n \n if childCount > 1:\n pm.ungroup(group, absolute = True)\n \n for attr in [\"t\", \"r\", \"globalScale\"]:\n pm.container(groupContainer, edit = True, unbindAndUnpublish = \"%s.%s\" %(group, attr))\n \n parentGroup = pm.listRelatives(group, parent = True)\n \n pm.delete(group)\n \n # Recursively delete empty parent groups\n if len(parentGroup) != 0:\n parentGroup = parentGroup[0]\n children = pm.listRelatives(parentGroup, children = True)\n children = pm.ls(children, transforms = True)\n \n if len(children) == 0:\n pm.select(parentGroup, replace = True)\n UngroupSelected()\n \n # Lock module containers after ungrouping is finished\n for container in moduleContainers:\n if pm.objExists(container):\n pm.lockNode(container, lock = True, lockUnpublished = True)\n \n \n \n \n def FindChildModules(self, _group):\n modules = []\n children = pm.listRelatives(_group, children = True)\n \n # Recursively search group heirarchy for modules\n if children != None:\n for child in children:\n moduleNamespaceInfo = utils.StripLeadingNamespace(child)\n \n if moduleNamespaceInfo != None:\n modules.append(moduleNamespaceInfo[0])\n \n elif child.find(\"Group__\") != -1:\n modules.extend(self.FindChildModules(child))\n \n return modules","repo_name":"Shadowtags/ModularRiggingTool","sub_path":"nwModularRiggingTool/Modules/System/groupSelected.py","file_name":"groupSelected.py","file_ext":"py","file_size_in_byte":12547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"12036698481","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\nimport os\n\n# Extract central version information\nwith open(os.path.join(os.path.dirname(__file__), \"ando\", \"VERSION\")) as version_file:\n version = version_file.read().strip()\n\n\nsetup(\n name=\"AnDO\",\n version=version,\n packages=find_packages(),\n include_package_data=True,\n package_data={\n # If any package contains *.json or *.csv files, include them:\n \"\": [\"*.json\", '*.csv', '*.tsv'],\n },\n author=\"Jeremy Garcia, Sylvain Takerkart , Julia Sprenger\",\n description=\"Checks the validity of a directory with respect to the ANimal Data Organization (ANDO) specifications \",\n license='MIT',\n install_requires=[],\n entry_points={\n 'console_scripts': ['AnDOChecker=ando.AnDOChecker:main',\n 'AnDOGenerator=ando.tools.generator.AnDOGenerator:main',\n 'BEP032Templater=ando.tools.generator.BEP032Templater:main',\n 'AnDOViewer=ando.tools.viewer.AnDOViewer:main'],\n },\n python_requires='>=3.6',\n extras_require={\n 'tools': ['pandas', 'pynwb'],\n 'test': ['pytest', 'datalad']\n }\n)\n","repo_name":"Remi-Gau/BEP032tools","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"18"} +{"seq_id":"31239637564","text":"'''\n------------------------------------------------------------------------\n MOMENTUM.PY\n------------------------------------------------------------------------\nIf extreme, overtraded or wix issue long, or short signals, trigger the\nexecutor\n'''\n\n\n'''\n------------------------------------------------------------------------\n IMPORTS\n------------------------------------------------------------------------\n'''\n\n# Standard library imports\npass\n\n# Third party imports\nimport pandas as pd\nimport numpy as np\n\n\n# Local application imports\nfrom ccat import wix\nfrom ccat import overtraded\nfrom ccat import extreme\n# from ccat import height\n# from ccat import ema\n# from ccat import df_x_df\n\n\n\n'''\n------------------------------------------------------------------------\n CLASSES\n------------------------------------------------------------------------\n'''\n\nclass Momentum:\n\n def __init__(self,\n df_bucket:pd.DataFrame,\n len_ma_top_wix:int,\n len_ma_bottom_wix:int,\n len_ma_top_Extreme:int,\n len_ma_bottom_Extreme:int,\n len_rsi:int,\n overbought:int,\n oversold:int,\n peak:int,\n trough:int,\n col:str = 'price_close'):\n\n\n # Shared\n self.df_bucket = df_bucket\n\n # Wix\n self.len_ma_top_wix = len_ma_top_wix\n self.len_ma_bottom_wix = len_ma_bottom_wix\n\n # Extreme\n self.len_ma_top_Extreme = len_ma_top_Extreme\n self.len_ma_bottom_Extreme = len_ma_bottom_Extreme\n\n # Overtraded\n self.len_rsi = len_rsi\n self.overbought = overbought\n self.oversold = oversold\n self.peak = peak\n self.trough = trough\n self.col = col\n\n\n def wixes(self):\n '''Get Wix signal'''\n\n w = wix.Wix(\n df_bucket = self.df_bucket,\n len_ma_top = self.len_ma_top_wix,\n len_ma_bottom = self.len_ma_bottom_wix)\n\n df_wix = w.get()\n\n return df_wix\n\n\n def extreme(self):\n '''Get Extreme signal\n '''\n\n e = extreme.Extreme(\n df_bucket = self.df_bucket,\n len_ma_top = self.len_ma_top_Extreme,\n len_ma_bottom = self.len_ma_bottom_Extreme)\n\n df_extreme = e.get()\n\n return df_extreme\n\n\n def overtraded(self):\n '''Get Overtraded signal\n '''\n\n o = overtraded.Overtraded(\n df_bucket = self.df_bucket,\n len_rsi = self.len_rsi,\n overbought = self.overbought,\n oversold = self.oversold,\n peak = self.peak,\n trough = self.trough,\n col = self.col)\n\n df_overtraded = o.get()\n\n return df_overtraded\n\n\n def merge(self):\n ''' Merges the top and bottom wick ema's into a df_out dataframe\n '''\n\n # Initialize df_out dataframe\n self.df_out = pd.DataFrame()\n\n # Read the individual signals used in the strategy\n df_w = self.wixes()\n df_o = self.overtraded()\n df_e = self.extreme()\n\n # Merge the three dataframes\n # self.df_out = pd.merge(df_w, df_o, on='id')\n\n # Merge the three dataframes\n self.df_out = pd.merge(\n pd.merge(\n df_w,\n df_o,\n on='id'),\n df_e,on='id')\n\n cols = [\n 'signal_wix',\n 'signal_overtraded',\n 'signal_extreme']\n\n # Compiled signal\n self.df_out['signal'] = self.df_out[cols].sum(axis=1)\n\n\n def signals(self):\n '''Triggers the chain of methods and returns the df_out\n dataframe\n '''\n\n self.merge()\n\n return self.df_out\n\n'''\n------------------------------------------------------------------------\n __MAIN__\n------------------------------------------------------------------------\n'''\n\nif __name__ == '__main__':\n\n from ccat import config as cnf\n from ccat import bucket\n\n # Create a momentum strategy for the 1d BTCUSD candles on Bitmex\n\n # Settings\n market_id = 1 # Bitmex\n timeframe_id = 6 # 1d\n\n time_end = cnf.now()\n count = 500\n\n len_ma_top_wix = 40\n len_ma_bottom_wix = 40\n\n len_ma_top_Extreme = 40\n len_ma_bottom_Extreme = 40\n\n len_rsi = 40\n\n overbought = 60\n oversold = 40\n peak = 92\n trough = 32\n\n col = 'price_close'\n\n # Get a bucket object from Bucket\n b = bucket.Bucket(market_id=market_id, timeframe_id=timeframe_id)\n\n # Update the table\n b.update()\n\n # Get a dataframe with all the data for the market and timeframe\n df_bucket = b.read_until(count = count, time_end = time_end)\n\n m = Momentum(\n df_bucket = df_bucket,\n len_ma_top_wix=len_ma_top_wix,\n len_ma_bottom_wix=len_ma_bottom_wix,\n len_ma_top_Extreme=len_ma_top_Extreme,\n len_ma_bottom_Extreme=len_ma_bottom_Extreme,\n len_rsi=len_rsi,\n overbought=overbought,\n oversold=oversold,\n peak=peak,\n trough=trough,\n col=col)\n\n df_signal = m.signals()\n df_s = df_signal[['id', 'signal']]\n df_b = df_bucket[['id', 'time_close', 'price_close']]\n # print(df_s)\n\n df_out = pd.merge(df_b, df_s, on='id')\n\n print(df_out)\n\n\n\n\n\n\n","repo_name":"bliiir/ccat","sub_path":"ccat/controller/strategy/TODO_sample_strategy.py","file_name":"TODO_sample_strategy.py","file_ext":"py","file_size_in_byte":5250,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"} +{"seq_id":"10606681111","text":"import csv\nimport sys\nimport json\n\nimport util\n\nfrom ip2location_db import lookup\n\nutil.IPASN_DIR = \"/net/gura/srv/hdd/pyasn/ipv4\"\n\nas_names = json.loads(open(\"../notebooks/asnames.dat\").read())\n\ndef as_name(asnum):\n if asnum is None:\n return None\n elif str(asnum) in as_names:\n return as_names[str(asnum)]\n return \"Unknown\"\n\nipl = lookup.IP2Loc(\"../ip2location.sqlite\")\n\nASNDATE = sys.argv[1]\n\noutwriter = csv.writer(sys.stdout, lineterminator=\"\\n\", delimiter=\"\\t\")\n\nfor ip in sys.stdin:\n ip = ip.strip()\n asn = util.ip2asn(ip, ASNDATE)\n iploc = ipl.lookupv4(ip)\n outwriter.writerow((ip, asn, as_name(asn), \n iploc[\"country_name\"], iploc[\"isp\"], iploc[\"domain\"], iploc[\"usage_type\"],\n ))\n","repo_name":"ralphholz/bc-comparisons","sub_path":"ip_asn_usagedata.py","file_name":"ip_asn_usagedata.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"43332371070","text":"import pyaudio # import necessary libraries for audio recording and storage\nimport wave\nfrom array import array\nimport numpy as np\nimport librosa\nimport soundfile\nfrom sklearn.svm import OneClassSVM\nimport os\nfrom pynput.mouse import Button,Controller\nfrom random import uniform\n\n\ndef record(deployed):\n \"\"\"This function records incoming audio by first running for a set\n time; if the user speaks above the set volume threshold then append\n the audio data to an array called frames. If the user has already\n spoke and volume is below the threshold then end the function call. Returns\n the sample width and the array of audio data for processing.\n\n Accepts boolean 'deployed' that toggles displaying recording prompts\n \"\"\"\n FORMAT = pyaudio.paInt16 # format the audio input\n CHANNELS = 1\n RATE = 44100\n CHUNK = 1024\n RECORD_SECONDS = 15\n audio = pyaudio.PyAudio()\n stream = audio.open(format = FORMAT,channels = CHANNELS,\n rate = RATE,\n input = True,\n frames_per_buffer = CHUNK)\n frames = []\n check_state = False\n for _ in range(0,int(RATE / CHUNK * RECORD_SECONDS)): # begin recording process\n data = stream.read(CHUNK,exception_on_overflow = False)\n data_chunk = array('h',data)\n vol = max(data_chunk)\n if vol >= 3200:\n check_state = True\n frames.append(data)\n if not deployed:\n print(\"something said\")\n elif check_state == True and vol < 2500:\n break\n else:\n check_state = False\n if not deployed:\n print(\"nothing\")\n if not deployed:\n print(\"\\n\")\n sample_width = audio.get_sample_size(FORMAT)\n return sample_width,frames,CHANNELS,RATE\n\n\ndef file_writer(FILE_NAME,deployed):\n \"\"\"retrieves the sample width, array of audio data, and record settings\n as they are returned from record(). The function uses that information to write\n a wav file with that data\n\n Note: deployed functions the same as in record()\n \"\"\"\n sample_width,frames,CHANNELS,RATE = record(deployed)\n wf = wave.open(FILE_NAME,'wb')\n wf.setnchannels(CHANNELS)\n wf.setsampwidth(sample_width)\n wf.setframerate(RATE)\n wf.writeframes(b''.join(frames)) # append frames recorded to file\n wf.close()\n\n\ndef extract_features(file_name,mfcc): # ,mel):\n \"\"\"Extracts the features for ML classification from a given audio file.\n It takes the presence of each feature as arguments such that the model\n can be tuned for feature relevance later\"\"\"\n with soundfile.SoundFile(file_name) as audio_data:\n x = audio_data.read(dtype = \"float64\") # read the file\n sample_rate = audio_data.samplerate\n x_normalized = librosa.util.normalize(x) # normalize the clip's volume\n features = np.array([])\n if mfcc:\n mfccs = np.mean(librosa.feature.mfcc(y = x_normalized, # extract mel frequency cepstral coefficients\n sr = sample_rate,n_mfcc = 4).T,axis = 0)\n mfccs_norm = librosa.util.normalize(mfccs)\n features = np.hstack((features,mfccs_norm)) # append to NumPy array of features\n # if mel:\n # mel = np.mean(librosa.feature.melspectrogram(y = x_normalized, # extract mel spectrogram\n # sr = sample_rate).T,axis = 0)\n # mel_norm = librosa.util.normalize(mel)\n # features = np.hstack((features,mel_norm)) # append to NumPy array of features\n return features\n\n\ndef build_dataset(testing):\n \"\"\"creates a numPy array, containing the features of an audio\n recording\"\"\"\n FILE_NAME = \"recording.wav\"\n file_writer(FILE_NAME,testing) # record a sample\n X = extract_features(FILE_NAME,mfcc = True) # ,mel = False) # extract the sample's features\n os.remove(FILE_NAME)\n return X\n\n\ndef get_training_data(samples,state):\n \"\"\"returns complete arrays of n samples\"\"\"\n observations = np.empty((0,4))\n for i in range(samples):\n X_samples = build_dataset(state) # record and extract features\n examples = np.array([X_samples])\n observations = np.append(observations,examples,axis = 0) # append to larger 2D array of features\n observations.reshape(1,-1)\n return observations\n\n\ndef get_sample():\n \"\"\"This function is very similar, however instead of collecting a set pool of\n samples based on the argument provided, it collects a single audio signal\"\"\"\n X_test = np.empty((0,4))\n filename = \"test.wav\"\n file_writer(filename,deployed = True)\n new_sample = extract_features(filename,mfcc = True) # ,mel = True)\n inference = np.array([new_sample])\n X_test = np.append(X_test,inference,axis = 0)\n os.remove(filename)\n return X_test\n\n\ndef generate_data(test_size):\n \"\"\"Creates a synthetic dataset with about a 20%\n difference from sample to sample generated randomly\"\"\"\n synthetic_data = np.empty((0,4))\n X_train = get_training_data(test_size,state = False)\n synthetic_data = np.append(synthetic_data,X_train,axis = 0)\n for i in range(800):\n X_random = X_train * uniform(0.8,1.2) # randomly alter each sample ~20% from the initial sample\n synthetic_data = np.append(synthetic_data,X_random,axis = 0)\n return synthetic_data\n\n\ndef deploy_model(test_size):\n \"\"\"Initializes a OneClassSVM for binary classification, trained on data\n collected from the generate_data() function. After the model is trained, the\n function calls get_sample() and classifies that sound either as a click or not.\n If a click is successful, generate a left click event\"\"\"\n mouse = Controller()\n clf = OneClassSVM(nu = .0001,kernel = 'rbf',gamma = 'auto',max_iter = test_size ** 2)\n X = generate_data(test_size)\n clf.fit(X)\n while True:\n X_predict = get_sample()\n if clf.predict(X_predict) == 1:\n print('Click!')\n mouse.click(Button.left,1)\n # new_trainingX = np.append(X, X_predict,axis = 0)\n # clf.fit(new_trainingX)\n else:\n print(\"failed\")\n print(\"\\n\")\n\n\nif __name__ == \"__main__\":\n deploy_model(5)\n\n","repo_name":"phoenixp123/SoundClicker","sub_path":"Listen.py","file_name":"Listen.py","file_ext":"py","file_size_in_byte":6222,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"37300099794","text":"class TrieNode:\n def __init__(self):\n self.child = {}\n self.end = False\n self.ctr = 0\n\nclass Trie:\n def __init__(self):\n self.root = TrieNode()\n\n def insert(self, s):\n curr = self.root\n curr.ctr += 1\n for c in s:\n if c not in curr.child:\n curr.child[c] = TrieNode()\n curr = curr.child[c]\n curr.ctr += 1\n curr.end = True\n\nclass Solution:\n def longestValidSubstring(self, word: str, forbidden: List[str]) -> int:\n n = len(word)\n trie = Trie()\n for f in forbidden:\n trie.insert(f[::-1])\n res = 0\n currlongest = 0\n for i in range(n):\n curr = trie.root\n found = False\n x = 0\n for j in range(i, max(i - 10, -1), -1):\n if word[j] in curr.child:\n curr = curr.child[word[j]]\n x += 1\n if curr.end:\n found = True\n break\n else:\n break\n if found:\n currlongest = min(max(x - 1, 0), currlongest + 1)\n else:\n currlongest += 1\n res = max(res, currlongest)\n return res","repo_name":"theabbie/leetcode","sub_path":"length-of-the-longest-valid-substring.py","file_name":"length-of-the-longest-valid-substring.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"18"} +{"seq_id":"18030852461","text":"# Define the pizza menu\nmenu = {\n \"Margherita\": 150,\n \"Pepperoni\": 180,\n \"Vegetarian\": 190,\n \"Hawaiian\": 130,\n \"Meat Lovers\": 200\n}\n\n# Define the topping menu\ntoppings = {\n \"Mushrooms\": 20,\n \"Olives\": 20,\n \"Onions\": 15,\n \"Peppers\": 25,\n \"Extra Cheese\": 20,\n \"Bacon\": 20,\n \"Sausage\": 15,\n \"Ham\": 30\n}\n\n# Define a function to display the menu\ndef display_menu(menu):\n print(\"Menu:\")\n for item, price in menu.items():\n print(f\"{item} - Rs {price}\")\n\n# Define a function to take the order\ndef take_order(menu, toppings):\n order = {}\n while True:\n display_menu(menu)\n item = input(\"What would you like to order? \")\n if item not in menu:\n print(\"Sorry, that item is not on the menu.\")\n continue\n else:\n order[item] = menu[item]\n while True:\n display_menu(toppings)\n topping = input(\"Would you like to add a topping? (y/n) \")\n if topping.lower() == \"n\":\n break\n elif topping.lower() == \"y\":\n topping_choice = input(\"Which topping would you like to add? \")\n if topping_choice not in toppings:\n print(\"Sorry, that topping is not available.\")\n continue\n else:\n order[topping_choice] = toppings[topping_choice]\n else:\n print(\"Please enter 'y' or 'n'.\")\n more = input(\"Would you like to order anything else? (y/n) \")\n if more.lower() == \"n\":\n break\n return order\n\n# Define a function to calculate the total cost of the order\ndef calculate_total(order):\n total = sum(order.values())\n return total\n\n# Define a function to take the user's details\ndef get_user_details():\n name = input(\"What is your name? \")\n phone = input(\"What is your phone number? \")\n address = input(\"What is your address? \")\n return {\"name\": name, \"phone\": phone, \"address\": address}\n\n# Define the main function to run the pizza ordering system\ndef main():\n print(\"Welcome to our pizza ordering system!\")\n print(\"------------------------------------\")\n order = take_order(menu, toppings)\n total = calculate_total(order)\n user_details = get_user_details()\n print(\"------------------------------------\")\n print(\"Your order:\")\n for item, price in order.items():\n print(f\"{item} - Rs {price}\")\n print(f\"Total: Rs {total}\")\n print(\"------------------------------------\")\n print(\"Your details:\")\n print(f\"Name: {user_details['name']}\")\n print(f\"Phone: {user_details['phone']}\")\n print(f\"Address: {user_details['address']}\")\n print(\"------------------------------------\")\n print(\"Thank you for ordering!\")\n\n# Run the main function\nmain()\n","repo_name":"PrateekJhawar/Python","sub_path":"Pizza_Order.py","file_name":"Pizza_Order.py","file_ext":"py","file_size_in_byte":2802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"70924162919","text":"#!/usr/bin/python3\n\n\n################################################\n### Exemple d'utilisation du module keras_facile\n### Cas d'une variable\n################################################\n\n\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom tensorflow import keras\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\n\n\n# Importer le module\nfrom keras_facile import *\n\n\n### Exemple 1 : un seul neurone\ndef exemple1():\n\tprint(\"\\n--- Exemple 1 : un seul neurone ---\")\n\t# Architecture du réseau\n\tmodele = Sequential()\n\tmodele.add(Dense(1, input_dim=1, activation=heaviside, name='Couche_0'))\n\n\t# Définition des poids neurone par neurone\n\tdefinir_poids(modele,0,0,-0.5,1) # definir_poids(modele,couche,rang,coeff,biais)\n\taffiche_poids(modele,0) # affiche poids de la couche 0\n\n\t# # Exemples d'évaluation\n\tentree = 0\n\t# entree = [7.0]\n\t# entree = np.array([7.0])\n\n\tsortie = evaluation(modele,entree)\n\tprint(\"\\n=== Evaluation ===\")\n\tprint('Entrée :',entree)\n\tprint('Sortie :',sortie)\n\n\t# Affichage graphique de la fonction définie par le réseau sur [a,b] (ici [-5,5])\n\taffichage_evaluation_une_var(modele,-5,5)\n\treturn\n\n\n\n### Exemple 2 : couche 0 deux neurones, couche 1 : un neurone\ndef exemple2():\n\tprint(\"\\n--- Exemple 2 : 2+1 neurones ---\")\n\t# Architecture du réseau\n\tmodele = Sequential()\n\tmodele.add(Dense(2, input_dim=1, activation=heaviside, name='Couche_0'))\n\tmodele.add(Dense(1, activation='linear', name='Couche_1'))\n\n\n\t# Définition des poids neurone par neurone\n\tdefinir_poids(modele,0,0,1,-1) # definir_poids(modele,couche,rang,coeff,biais)\n\tdefinir_poids(modele,0,1,-0.5,1)\n\taffiche_poids(modele,0) # affiche poids de la couche 0\n\tdefinir_poids(modele,1,0,[2,2],-2) # definir_poids(modele,couche,rang,coeff,biais)\n\taffiche_poids(modele,1) \n\n\t# Evaluation\n\t# entree = [7.0]\n\t# entree = np.array([7.0])\n\n\tentree = 0\n\tsortie = evaluation(modele,entree)\n\tprint(\"\\n=== Evaluation ===\")\n\tprint('Entrée :',entree)\n\tprint('Sortie :',sortie)\n\n\tprint(\"Vérification\")\n\tentree = np.array([entree])\n\tsortie = modele.predict(entree)\n\tprint('Entrée :',entree,'Sortie :',sortie)\n\n\t# Affichage graphique\n\taffichage_evaluation_une_var(modele,-5,5)\n\t\n\n\treturn\n\n\n### Exemple 3 : théorème d'approximation universel\ndef exemple3():\n\t# Théorème d'approximation universel\n\tprint(\"\\n--- Exemple 3 : théorème d'approximation universel ---\")\n\t# Fonctions à approcher\n\tdef f(x):\n\t\treturn np.cos(2*x) + x*np.sin(3*x) + x**0.5\n\n\t# Intervalle [a,b] et nombre de divisions\n\ta = 2\n\tb = 10\n\tn = 10\n\n\t# Architecture du réseau\n\tmodele = Sequential()\n\n\tmodele.add(Dense(2*n,input_dim=1,activation=heaviside))\n\tmodele.add(Dense(1,activation='linear'))\n\n\t# poids_a_zeros(modele,0)\n\t# poids_a_zeros(modele,1)\n\n\tcalcul_approximation(modele,f,a,b,n) # calcule et définis les poids\n\n\t# affiche_poids(modele,0)\n\t# affiche_poids(modele,1)\n\n\taffichage_approximation(modele,f,a,b)\n\treturn\n\n\n# exemple1()\n# exemple2()\nexemple3()","repo_name":"exo7math/deepmath-exo7","sub_path":"pythontf1/python/test_keras_facile_1var.py","file_name":"test_keras_facile_1var.py","file_ext":"py","file_size_in_byte":2994,"program_lang":"python","lang":"fr","doc_type":"code","stars":39,"dataset":"github-code","pt":"18"} +{"seq_id":"10767952080","text":"from multiprocessing import Process, Queue\nimport pygame as p\nimport ChessEngine\nfrom Move import PromotionMove\nimport AI\n\nBOARD_WIDTH = BOARD_HEIGHT = 512\nMOVE_LOG_PANEL_WIDTH = 250\nMOVE_LOG_PANEL_HEIGHT = BOARD_HEIGHT\nDIMENSION = 8\nSQUARE_SIZE = BOARD_HEIGHT // DIMENSION\nMAX_FPS = 15\nIMAGES = {}\n\n\ndef load_images():\n pieces = ['wp', 'wN', 'wR', 'wB', 'wK', 'wQ', 'bp', 'bR', 'bN', 'bB', 'bK', 'bQ']\n for piece in pieces:\n IMAGES[piece] = p.transform.scale(p.image.load(\"pieces/\" + piece + '.png').convert_alpha(),\n (SQUARE_SIZE, SQUARE_SIZE))\n\n\ndef main(player_one, player_two):\n p.init()\n screen = p.display.set_mode((BOARD_WIDTH + MOVE_LOG_PANEL_WIDTH, BOARD_HEIGHT))\n load_images()\n move_log_font = p.font.SysFont('Arial', 12, False, False)\n clock = p.time.Clock()\n game_state = ChessEngine.GameState()\n valid_moves = game_state.get_valid_moves()\n\n animate = False\n move_made = False\n player_clicks = []\n game_over = False\n ai_thinking = False\n move_finder_process = False\n running = True\n while running:\n is_human_turn = (game_state.states.white_turn and player_one) or (\n not game_state.states.white_turn and player_two)\n for e in p.event.get():\n if e.type == p.QUIT:\n running = False\n elif e.type == p.MOUSEBUTTONDOWN: # mouse handler\n move_made, animate = mouse_handler(game_over, is_human_turn, player_clicks, game_state, valid_moves)\n if e.type == p.KEYDOWN:\n move_made, animate, game_over = keyboard_handler(e, game_state, valid_moves, player_clicks)\n\n if not game_over and not is_human_turn:\n if not ai_thinking:\n ai_thinking = True\n return_queue = Queue()\n move_finder_process = Process(target=AI.find_best_move, args=(game_state, valid_moves, return_queue))\n move_finder_process.start()\n\n if not move_finder_process.is_alive():\n ai_move = return_queue.get()\n if ai_move is None:\n ai_move = AI.find_random_move(valid_moves)\n game_state.process_ai_move(ai_move)\n move_made, animate, ai_thinking = True, True, False\n\n if move_made:\n if animate:\n animate_move(game_state.logs.moves[-1], screen, game_state.board, clock)\n valid_moves = game_state.get_valid_moves()\n move_made = False\n draw_game_state(screen, game_state, valid_moves, player_clicks[0] if player_clicks else (), move_log_font)\n\n if game_state.states.checkmate or game_state.states.stalemate:\n game_over = True\n text = \"Stalemate\" if game_state.states.stalemate else \"Black wins by checkmate\" \\\n if game_state.states.white_turn else \"White wins by checkmate\"\n draw_text(screen, text)\n\n clock.tick(MAX_FPS)\n p.display.flip()\n\n\ndef mouse_handler(game_over, is_human_turn, player_clicks, game_state, valid_moves):\n move_made, animate = False, False\n if not game_over and is_human_turn:\n location = p.mouse.get_pos()\n column, row = map(transform_to_grid, location)\n if column >= 8 or player_clicks and (row, column) == player_clicks[0]:\n player_clicks = []\n else:\n if len(player_clicks) == 0 and game_state.board.get(row, column).color == game_state.states.player:\n player_clicks.append((row, column))\n elif len(player_clicks) == 1:\n start, target = player_clicks.pop(), (row, column)\n for move in valid_moves:\n if move.start_square == start and move.end_square == target:\n if isinstance(move, PromotionMove):\n color = game_state.states.player\n promotion_piece = input(\n \"choose a character to promote to 'N', 'Q', 'B', 'R'\")\n while promotion_piece not in ('N', 'Q', 'B', 'R'):\n promotion_piece = input(\n \"choose a character to promote to 'N', 'Q', 'B', 'R'\")\n move.promotion_piece = color + promotion_piece\n game_state.process_move(move)\n move_made = True\n animate = True\n else:\n player_clicks.append((row, column))\n return move_made, animate\n\n\ndef keyboard_handler(e, game_state, valid_moves, player_clicks):\n move_made, animate, game_over = False, False, False\n if e.key == p.K_z:\n game_state.undo_move()\n move_made = True\n animate = False\n game_over = False\n if e.key == p.K_r:\n game_state = ChessEngine.GameState()\n valid_moves = game_state.get_valid_moves()\n player_clicks = []\n move_made = False\n animate = False\n game_over = False\n return move_made, animate, game_over\n\n\ndef draw_game_state(screen, game_state, valid_moves, square_selected, move_log_font):\n draw_board(screen)\n highlight_selected_square(screen, game_state, valid_moves, square_selected)\n draw_pieces(screen, game_state.board)\n draw_move_log(screen, game_state, move_log_font)\n\n\ndef draw_board(screen):\n global colors\n colors = [p.Color(\"white\"), p.Color(\"gray\")]\n for row in range(DIMENSION):\n for column in range(DIMENSION):\n color = colors[(row + column) % 2]\n p.draw.rect(screen, color, p.Rect(column * SQUARE_SIZE, row * SQUARE_SIZE, SQUARE_SIZE, SQUARE_SIZE))\n\n\ndef highlight_selected_square(screen, game_state, valid_moves, square_selected):\n if square_selected != ():\n row, column = square_selected\n if game_state.board.get(row, column).color == game_state.states.player:\n surface = p.Surface((SQUARE_SIZE, SQUARE_SIZE))\n surface.set_alpha(100)\n surface.fill(p.Color('blue'))\n screen.blit(surface, (column * SQUARE_SIZE, row * SQUARE_SIZE))\n\n surface.fill(p.Color('yellow'))\n for move in valid_moves:\n if move.start_row == row and move.start_column == column:\n screen.blit(surface, (SQUARE_SIZE * move.end_column, SQUARE_SIZE * move.end_row))\n\n\ndef draw_pieces(screen, board):\n for row in range(DIMENSION):\n for column in range(DIMENSION):\n piece = board.get(row, column)\n if piece.board_value != '--':\n screen.blit(IMAGES[piece.board_value],\n p.Rect(column * SQUARE_SIZE, row * SQUARE_SIZE, SQUARE_SIZE, SQUARE_SIZE))\n\n\ndef draw_move_log(screen, game_state, move_log_font):\n move_log_rect = p.Rect(BOARD_WIDTH, 0, MOVE_LOG_PANEL_WIDTH, MOVE_LOG_PANEL_HEIGHT)\n p.draw.rect(screen, p.Color('black'), move_log_rect)\n move_log = game_state.logs.moves\n move_texts = []\n moves_per_row = 3\n for i in range(0, len(move_log), 2):\n move_string = f\"{i // 2 + 1}. {str(move_log[i])} \"\n if i + 1 < len(move_log):\n move_string += f\"{str(move_log[i + 1])} \"\n move_texts.append(move_string)\n padding = 5\n text_y = padding\n for i in range(0, len(move_texts), moves_per_row):\n text = \"\"\n for j in range(moves_per_row):\n if i + j < len(move_texts):\n text += move_texts[i + j]\n\n text_object = move_log_font.render(text, True, p.Color('white'))\n text_location = move_log_rect.move(padding, text_y)\n screen.blit(text_object, text_location)\n text_y += text_object.get_height()\n\n\ndef animate_move(move, screen, board, clock):\n global colors\n change_row = move.end_row - move.start_row\n change_column = move.end_column - move.start_column\n frames_per_square = 10\n frame_count = (abs(change_row) + abs(change_column)) * frames_per_square\n for frame in range(frame_count + 1):\n row, column = (move.start_row + (change_row * frame / frame_count),\n move.start_column + change_column * frame / frame_count)\n draw_board(screen)\n draw_pieces(screen, board)\n color = colors[(move.end_row + move.end_column) % 2]\n end_square = p.Rect(move.end_column * SQUARE_SIZE, move.end_row * SQUARE_SIZE, SQUARE_SIZE, SQUARE_SIZE)\n p.draw.rect(screen, color, end_square)\n if move.piece_captured != '--':\n screen.blit(IMAGES[move.piece_captured], end_square)\n\n screen.blit(IMAGES[move.piece_moved], p.Rect(column * SQUARE_SIZE, row * SQUARE_SIZE, SQUARE_SIZE, SQUARE_SIZE))\n p.display.flip()\n clock.tick(60)\n\n\ndef draw_text(screen, text):\n font = p.font.SysFont('Helvitca', 32, True, False)\n text_object = font.render(text, 0, p.Color('black'))\n text_location = p.Rect(0, 0, BOARD_WIDTH, BOARD_HEIGHT).move(BOARD_WIDTH / 2 - text_object.get_width() / 2,\n BOARD_HEIGHT / 2 - text_object.get_height() / 2)\n screen.blit(text_object, text_location)\n\n\ndef transform_to_grid(coordinate):\n return coordinate // SQUARE_SIZE\n\n\none_player_button = p.Rect(100, 100, 200, 50)\ntwo_player_button = p.Rect(100, 200, 200, 50)\nspectate_button = p.Rect(100, 300, 200, 50)\n\n\ndef find_number_of_players():\n p.init()\n screen = p.display.set_mode((BOARD_WIDTH, BOARD_HEIGHT))\n while True:\n for event in p.event.get():\n if event.type == p.QUIT:\n running = False\n elif event.type == p.MOUSEBUTTONDOWN:\n # Check if the user clicked on one of the buttons\n mouse_pos = event.pos\n if one_player_button.collidepoint(mouse_pos):\n return True, False\n elif two_player_button.collidepoint(mouse_pos):\n return True, True\n elif spectate_button.collidepoint(mouse_pos):\n return False, False\n draw_main_menu(screen)\n p.display.update()\n\n\ndef draw_main_menu(screen: p.display) -> None:\n font = p.font.Font(None, 32)\n p.draw.rect(screen, p.Color('red'), one_player_button)\n one_player_text = font.render('One Player', True, p.Color('Black'))\n screen.blit(one_player_text, (150, 115))\n\n # Draw the two player button\n p.draw.rect(screen, p.Color('Green'), two_player_button)\n two_player_text = font.render('Two Player', True, p.Color('Black'))\n screen.blit(two_player_text, (150, 215))\n\n # Draw the spectate button\n p.draw.rect(screen, p.Color('blue'), spectate_button)\n spectate_text = font.render('Spectate', True, p.Color('Black'))\n screen.blit(spectate_text, (150, 315))\n\n\nif __name__ == '__main__':\n player_one, player_two = find_number_of_players()\n main(player_one, player_two)\n","repo_name":"jzou9125/ChessEngine","sub_path":"ChessBoard.py","file_name":"ChessBoard.py","file_ext":"py","file_size_in_byte":10923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"73751929321","text":"#https://leetcode.com/problems/gcd-sort-of-an-array/\n#You are given an integer array nums, and you can perform the following operation any number of times on nums:\n#Swap the positions of two elements nums[i] and nums[j] if gcd(nums[i], nums[j]) > 1 where gcd(nums[i], nums[j]) is the greatest common divisor of nums[i] and nums[j].\n#Return true if it is possible to sort nums in non-decreasing order using the above swap method, or false otherwise.\n\n\nclass Solution(object):\n def gcdSort(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: bool\n \"\"\"\n def find(x):\n if parent[x] != x:\n parent[x] = find(parent[x])\n return parent[x]\n \n def union(x, y):\n parent[find(x)] = find(y)\n \n parent = range(max(nums) + 1)\n for x in nums:\n for i in range(2, int(x**0.5) + 1):\n if x % i == 0:\n union(x, i)\n union(x, x/i)\n return sorted(nums) == sorted(nums, key=find)\n \n \n ","repo_name":"Wang-dongyu123/CopilotCodeQuality","sub_path":"ScanProject/PyAnalysis/SonarPy/Hard/2112-gcd-sort-of-an-array.py","file_name":"2112-gcd-sort-of-an-array.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"38746284141","text":"n = int(input())\nnglist = []\nfor _ in range(0, 3):\n nglist.append(int(input()))\n\nif n in nglist:\n print(\"NO\")\n exit(0)\n\nfor i in range(0, 100):\n for j in reversed(range(1, 4)):\n if (n - j) in nglist:\n if j == 1:\n print(\"NO\")\n exit(0)\n else:\n n -= j\n break\n\nelse:\n print(\"YES\" if n < 1 else \"NO\")","repo_name":"yumechi/AtCoderHandoutCodes","sub_path":"ABC/ABC011/abc011c.py","file_name":"abc011c.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"72403058919","text":"from abc import ABC\nfrom abc import abstractmethod\nfrom typing import Iterable\nfrom typing import List\nfrom typing import Optional\nfrom typing import Union\n\nimport numpy as np\nfrom nptyping import NDArray\n# from collections import Iterable\n\nIUPAC_CODE_MAP = {\n 'A': 'A',\n 'C': 'C',\n 'G': 'G',\n 'T': 'T',\n 'M': 'AC',\n 'R': 'AG',\n 'W': 'AT',\n 'S': 'CG',\n 'Y': 'CT',\n 'K': 'GT',\n 'V': 'ACG',\n 'H': 'ACT',\n 'D': 'AGT',\n 'B': 'CGT',\n 'N': 'ACGT'\n}\n\nDNA_CHAR_ARRAY = np.array([ # ..CTGA\n '-', # 0b0000 -\n 'A', # 0b0001 A\n 'G', # 0b0010 G\n 'R', # 0b0011 GA == puRine\n 'T', # 0b0100 T\n 'W', # 0b0101 TA == Weak\n 'K', # 0b0110 TG == Keto\n 'D', # 0b0111 TGA == Not C -> D\n 'C', # 0b1000 C\n 'M', # 0b1001 CA == aMino\n 'S', # 0b1010 CG == Strong\n 'V', # 0b1011 CGA == Not T -> V\n 'Y', # 0b1100 TC == pYrimidine\n 'H', # 0b1101 TCA == Not G -> H\n 'B', # 0b1110 CTG == Not A -> B\n 'N', # 0b1111 TCGA = aNy\n])\n\nDNA_CHAR_TO_BIT_MAP = {char: val for val, char in enumerate(DNA_CHAR_ARRAY)}\n\n\ndef complement(x: Union[int, NDArray[np.int8]]):\n return x >> 2 | (x & 0b00110011) << 2\n\n\ndef complement4(x: Union[int, NDArray[np.int8]]):\n \"\"\"Four bit complement for 2 nucleotide packed in 8bit\"\"\"\n return (x & 0b11001100) >> 2 | (x & 0b00110011) << 2\n\n\nclass DNAStr:\n data: Optional[NDArray[np.int8]]\n\n def __init__(self, x: Optional[str] = None):\n if isinstance(x, str):\n self.init_from_string(x)\n elif isinstance(x, np.ndarray):\n self.data = x\n else:\n self.data = np.array([], dtype=np.int8)\n\n def init_from_string(self, x: str):\n self.data = np.array(\n [DNA_CHAR_TO_BIT_MAP[c] for c in x], dtype=np.int8)\n\n def __len__(self):\n return len(self.data)\n\n def __repr__(self):\n return ''.join(DNA_CHAR_ARRAY[x] for x in self.data)\n\n def __eq__(self, other):\n return np.array_equal(self.data, other.data)\n\n @staticmethod\n def tobit(x: str):\n return 0b00000001\n\n def complement(self):\n cls = type(self)\n nt = self.data >> 2 | (self.data << 2 & 0b1100)\n return cls(nt)\n\n\nclass Vec(ABC): # TODO: move to Range\n @abstractmethod\n def resize(self):\n pass\n\n\nclass DNAVec(Vec):\n data: List[Optional[NDArray[np.int8]]]\n\n def __init__(\n self,\n x: Union[Iterable[str], List[NDArray[np.int8]], None] = None):\n if isinstance(x, List):\n self.data = x\n elif isinstance(x, Iterable):\n self.data = [self.array_from_str(string) for string in x]\n else:\n self.data = []\n\n def array_from_str(self, x: str):\n return np.array([DNA_CHAR_TO_BIT_MAP[c] for c in x], dtype=np.int8)\n\n def __iter__(self) -> Iterable[Optional[DNAStr]]:\n # TODO: Return somthing like DNAstr.NA instead None? Or do not use\n # DNAStr at all and DNAVec[1] will bee enought?\n for x in self.data:\n if x is None:\n yield None\n else:\n yield DNAStr(x)\n\n def resize(self, x):\n data = [nt[:x] if len(nt) >= x else None for nt in self.data]\n return DNAVec(data)\n","repo_name":"gfederix/pybiorange","sub_path":"bioranges/struct.py","file_name":"struct.py","file_ext":"py","file_size_in_byte":3611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"34898104815","text":"\n# I had to use the solution code to help me with some of these\n\ndef basic():\n for x in range(151):\n print(x)\n\nbasic()\n\ndef multiples_of_five():\n for x in range(5,5005,5):\n print(x)\n\nmultiples_of_five()\n\n\ndef the_doj0_way():\n for x in range(1, 101):\n if x % 10 == 0:\n print(\"Coding Dojo\")\n elif x % 5 == 0:\n print(\"Coding\")\n else:\n print(x)\n\nthe_dojo_way()\n\ndef woah_huge():\n final_sum = 0\n for x in range(1, 5000000, 2):\n final_sum += x\n print(final_sum)\n\nwoah_huge()\n\ndef countdown_by_fours():\n for x in range(2018, 0, -4):\n print(x)\n\ncountdown_by_fours()\n\ndef flexible_counter(low_num, high_num, mult):\n for x in range(low_num, high_num + 1):\n if x % mult == 0:\n print(x)\n\nflexible_counter(2, 9, 3)","repo_name":"boeschthomas/python","sub_path":"for_loop_basic1.py","file_name":"for_loop_basic1.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"3271099528","text":"import matplotlib.pyplot as plt\nimport seaborn as sns\nimport os\nimport numpy as np\nimport pickle\nfrom typing import Union\nimport time\nfrom datetime import datetime\n\nimport torch\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom utils.util import fig_base\n\ndef monitor_visual(path, data_dict):\n print(type(data_dict))\n if data_dict is None:\n return 1\n\n scalar_keys = set()\n for idx, _key in enumerate(sorted(data_dict.keys())):\n data = data_dict[_key]\n if _key.startswith(\"Scalar/\") or isinstance(data, float) or len(data.shape) == 0:\n scalar_keys.add(_key)\n non_scalar_keys = [_k for _k in data_dict.keys() if _k not in scalar_keys]\n \n fig = plt.figure(figsize=(16, 12))\n fig_num = len(non_scalar_keys) + int(len(scalar_keys) > 0)\n if fig_num == 0:\n return 1\n _fig_base = fig_base(fig_num)\n\n for idx, _key in enumerate(sorted(non_scalar_keys)):\n data = data_dict[_key]\n fig_idx = idx + 1\n ax = fig.add_subplot(_fig_base+fig_idx)\n if isinstance(data, torch.Tensor):\n data = data.cpu().detach().numpy()\n\n if len(data.shape) == 1 or (len(data.shape) == 2 and data.shape[1] == 1):\n data = data.reshape(data.shape[0])\n ax = sns.lineplot(x=np.arange(data.shape[0]), y=data)\n plt.xlabel(\"# of Samples\", fontsize=16)\n plt.ylabel(_key, fontsize=16)\n elif len(data.shape) == 2:\n ax = sns.heatmap(data, \n # cmap=\"RdBu_r\"\n # cmap=\"gray_r\"\n cmap=\"YlGnBu_r\"\n # cmap=\"OrRd\"\n )\n plt.xlabel(\"# of Features\", fontsize=16)\n plt.ylabel(\"# of Samples\", fontsize=16)\n elif len(len(data.shape) == 0):\n ### Scalar\n raise\n else:\n raise\n\n plt.title(_key, fontsize=16)\n # ax = sns.heatmap(model_para_list[0], mask=mask, cmap=\"YlGnBu\")\n\n ax = fig.add_subplot(_fig_base+fig_num)\n y_width = 10\n for idx, _key in enumerate(sorted(scalar_keys)):\n __key = _key.split(\"Scalar/\")[1] if _key.startswith(\"Scalar/\") else _key\n print(f\"{__key}={data_dict[_key]}\")\n # plt.text(x=0, y=idx*y_width,\n # s=f\"{__key}={data_dict[_key]}\",\n # fontsize=24,\n # alpha=0.5,\n # color='r')\n\n plt.tight_layout()\n save_dir = os.path.join(path, \".fig/metalearner\")\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n plt.savefig(os.path.join(save_dir, \"me_learner.png\"))\n plt.close()\n x = input(\"Continue?\")\n return int(x) if x.isdigit() else 1\n\ndef visual_error(preds, labels, path):\n if isinstance(preds, torch.Tensor):\n preds = preds.cpu().detach().numpy()\n if isinstance(labels, torch.Tensor):\n labels = labels.cpu().detach().numpy()\n fig = plt.figure(figsize=(8, 5))\n _fig_base = fig_base(1)\n ax = fig.add_subplot(_fig_base+1)\n ax.scatter(preds, labels, label=\"Predicted\", alpha=0.5, edgecolors='none')\n ax.plot(labels, labels, c=\"y\", label=\"Ideal\")\n plt.xlabel(\"Predicted\", fontsize=16)\n plt.ylabel(\"Measured\", fontsize=16)\n plt.legend(fontsize=16)\n plt.tight_layout()\n save_dir = os.path.join(path, \".fig/metalearner\")\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n plt.savefig(os.path.join(save_dir, \"cross_domain_error.png\"))\n plt.close()\n\nclass ProgressChecker:\n def __init__(self, every, step_based=True):\n self.every = every\n self.step_based = step_based\n\n def check(self, step_cnt, batch_cnt, epoch):\n if self.step_based:\n return step_cnt % self.every == 0\n else:\n return batch_cnt == 1 and epoch % self.every == 0\n \n def __str__(self):\n return f\"[ProgressChecker] Every {self.every} {'step' if self.step_based else 'epoch'}\"\n\nclass Monitor:\n def __init__(self, tb_log_dir=None, debug=False):\n self.monitor_dict: Union[None, dict] = {} if debug else None\n if tb_log_dir is not None:\n self.summary_writer = SummaryWriter(log_dir=tb_log_dir)\n else:\n self.summary_writer = None\n tb_log_dir = \".workspace/runs/default\"\n\n ### delete backward hook to visual net\n # self.summary_writer.add_graph(self.match_net, support_x)\n\n ### Used for hooks\n self.hooks = []\n\n self.train_step = 1\n self.batch_cnt = 1\n self.epoch_cnt = 1\n\n self.epoch_ts: Union[None, float] = None\n self.batch_ts: Union[None, float] = None\n self.epoch_durs = []\n self.batch_durs = []\n\n ### For monitor\n self.next_step = 1\n self.last_step = 1\n\n self.log_checker = ProgressChecker(1000)\n self.summary_checker = ProgressChecker(1, False)\n self.cache_checker = ProgressChecker(10, False)\n \n def summary_tensor(self, tensor, name):\n if not isinstance(tensor, np.ndarray):\n tensor = tensor.data.cpu().numpy()\n if len(tensor.shape) == 1:\n ### histogram\n self.summary_writer.add_histogram(\n name, tensor,\n global_step=self.train_step,\n walltime=None)\n else:\n dataformats = 'HW' if len(tensor.shape) == 2 else 'CHW'\n self.summary_writer.add_image(\n name, tensor,\n global_step=self.train_step,\n dataformats=dataformats)\n \n def make_bw_hook_fn(self, name):\n def hook_fn(module, ginp, gout):\n if self.summary_writer is None:\n return\n if self.is_summary:\n try:\n for idx in range(len(ginp)):\n if ginp[idx] is None or len(ginp[idx].shape) < 2:\n continue\n self.summary_tensor(ginp[idx], f\"Grads/{name}_{idx}\")\n \n ### debug\n # for idx in range(len(gout)):\n # if gout[idx] is None or len(gout[idx].shape) < 2:\n # continue\n # self.summary_tensor(gout[idx], f\"GradsOutput/{name}_{idx}\")\n\n for para_name, param in module.named_parameters():\n if param.requires_grad:\n if len(param.shape) < 2:\n continue\n self.summary_tensor(param, f\"Weights/{name}/{para_name}\")\n except:\n pass\n if self.monitor_dict is not None:\n for idx in range(len(ginp)):\n self.monitor_dict[f\"Grads/{name}_{idx}\"] = ginp[idx].data.cpu().numpy()\n\n return hook_fn\n \n def make_fw_hook_fn(self, name):\n def hook_fn(module, _input, _output):\n if self.summary_writer is None:\n return\n if self.is_summary:\n try:\n if not isinstance(_input, tuple):\n _input = (_input,)\n for idx in range(len(_input)):\n if _input[idx] is None:\n continue\n self.summary_tensor(_input[idx], f\"_Input/{name}_{idx}\")\n\n if not isinstance(_output, tuple):\n _output = (_output,)\n for idx in range(len(_output)):\n if _output[idx] is None:\n continue\n self.summary_tensor(_output[idx], f\"_Output/{name}_{idx}\")\n except:\n pass\n if self.monitor_dict is not None:\n for idx in range(len(_output)):\n self.monitor_dict[f\"Activations/{name}_{idx}\"] = _output[idx].data.cpu().numpy()\n\n return hook_fn\n\n def register_bw_hook(self, module, name, compatible=False):\n if not isinstance(module, torch.nn.Module):\n print(f\"[Warning] Fail to register a bw hook for {name} with type{type(module)}\")\n else:\n if compatible:\n hook_handler = module.register_backward_hook(\n self.make_bw_hook_fn(name))\n else:\n hook_handler = module.register_full_backward_hook(\n self.make_bw_hook_fn(name))\n self.hooks.append(hook_handler)\n \n def register_fw_hook(self, module, name):\n if not isinstance(module, torch.nn.Module):\n print(f\"[Warning] Fail to register a fw hook for {name} with type{type(module)}\")\n else:\n hook_handler = module.register_forward_hook(\n self.make_fw_hook_fn(name))\n self.hooks.append(hook_handler)\n \n def epoch_start(self):\n self.batch_cnt = 1\n self.summary([(\"text\", \"Epoch/Train\", str(self.epoch_cnt))])\n \n def epoch_end(self):\n self.epoch_cnt += 1\n\n def step(self):\n self.train_step += 1\n self.batch_cnt += 1\n \n if self.monitor_dict is None:\n self.last_step = self.train_step\n elif self.train_step - self.last_step >= self.next_step:\n self.next_step = self.visual(\".\")\n self.last_step = self.train_step\n \n @property\n def is_summary(self):\n return self.summary_checker.check(self.train_step, self.batch_cnt, self.epoch_cnt)\n \n @property\n def is_log(self):\n # print(\"is_log\", self.train_step, self.batch_cnt, self.epoch_cnt, self.log_checker.every, self.log_checker.step_based)\n return self.log_checker.check(self.train_step, self.batch_cnt, self.epoch_cnt)\n \n @property\n def is_cache(self):\n return self.cache_checker.check(self.train_step, self.batch_cnt, self.epoch_cnt)\n \n def summary(self, summary_list):\n if self.summary_writer is None:\n return\n for summary_type, name, tensor in summary_list:\n if tensor is None:\n continue\n if summary_type == \"scalar\":\n self.summary_writer.add_scalar(name, tensor, global_step=self.train_step, walltime=None)\n elif summary_type == \"histogram\":\n self.summary_writer.add_histogram(name, tensor, global_step=self.train_step, walltime=None)\n elif summary_type == \"image\":\n if len(tensor.shape) == 2:\n self.summary_writer.add_image(name, tensor, global_step=self.train_step, dataformats='HW')\n elif len(tensor.shape) == 3:\n self.summary_writer.add_image(name, tensor[:3], global_step=self.train_step, dataformats='CHW')\n elif summary_type == \"graph\":\n model, input_to_model = name, tensor\n self.summary_writer.add_graph(model, input_to_model)\n elif summary_type == \"text\":\n self.summary_writer.add_text(name, tensor, global_step=self.train_step)\n else:\n raise ValueError(f\"Invalid summary type: {summary_type}\")\n self.summary_writer.flush()\n \n def close(self):\n if self.summary_writer is not None:\n self.summary_writer.close()\n \n def add_monitor(self, _monitor_dict):\n if self.monitor_dict is None:\n return\n self.monitor_dict.update(_monitor_dict)\n \n def clear_hooks(self):\n for hook in self.hooks:\n hook.remove()\n self.hooks = []\n \n def save(self, _path):\n with open(os.path.join(_path, \"monitor.pickle\"), \"wb\") as f:\n pickle.dump([\n self.train_step,\n self.batch_cnt,\n self.epoch_cnt\n ], f)\n\n def load(self, _path):\n with open(os.path.join(_path, \"monitor.pickle\"), \"rb\") as f:\n self.train_step, self.batch_cnt, self.epoch_cnt = pickle.load(f)\n\n def visual(self, _path):\n monitor_visual(_path, self.monitor_dict)","repo_name":"joapolarbear/cdmpp","sub_path":"metalearner/monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":11996,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"17566152188","text":"# Stdlib imports\nimport random\n\n# Core Django imports\nfrom django.core.files.base import ContentFile\n\n# Third-party app imports\nfrom factory import LazyAttribute, LazyFunction, SubFactory\nfrom factory.django import DjangoModelFactory, ImageField\n\n# Imports from my apps\nfrom src.stations.tests.factories import StationFactory\nfrom src.surface_meteograms.models import SMPoints, SMType, SurfaceMeteogram\nfrom src.vertical_meteograms.tests.factories import VMDateFactory\n\n\nclass SMTypeFactory(DjangoModelFactory):\n class Meta:\n model = SMType\n\n name = LazyFunction(lambda: random.choice(SMType.Options.choices)[0])\n\n\nclass SMPointsFactory(DjangoModelFactory):\n class Meta:\n model = SMPoints\n\n name = LazyFunction(lambda: random.choice(SMType.Options.choices)[0])\n\n\nclass SurfaceMeteogramFactory(DjangoModelFactory):\n class Meta:\n model = SurfaceMeteogram\n\n type = SubFactory(SMTypeFactory)\n points = SubFactory(SMPointsFactory)\n\n location = SubFactory(StationFactory)\n\n date = SubFactory(VMDateFactory)\n\n img_height = 0\n img_width = 0\n\n img = LazyAttribute(\n lambda _: ContentFile(\n ImageField()._make_data({\"width\": 750, \"height\": 800}), \"example.png\"\n )\n )\n","repo_name":"julienpaul/django-weathervis","sub_path":"src/surface_meteograms/tests/factories.py","file_name":"factories.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"6730377755","text":"import glob, math\nimport numpy as np\n\n# csvファイルを読み込む\ncsvfile = 'doc/GSWLC_v20230404.csv'\ndata = np.loadtxt(csvfile, delimiter=',', dtype = 'unicode')\n\n# 天体名を変更する\nname = []\nfor this_name in data[:,0]:\n this_name = this_name.lower()\n if 'disk' in this_name:\n this_name = 'ms' + this_name[-4:]\n name.append(this_name)\n\n# カタログ再構成\ndat = np.c_[\n name,\n np.array(data[:,4]), # ra\n np.array(data[:,5]), # dec\n np.array(data[:,13]), # z\n np.array(data[:,15]), # log10 Mstar\n np.array(data[:,16]), # error log10 Mstar\n np.array(data[:,17]), # log10 SFR\n np.array(data[:,18]), # error log10 SFR\n ]\n\nfor i in range(len(name)):\n ra = str(np.round(float(dat[i,1]),6))\n ra = ra.split('.')[0] + '.' + ra.split('.')[1].rjust(6, '0')\n dec = str(np.round(float(dat[i,2]),6)).replace('-','$-$')\n dec = dec.split('.')[0].rjust(5, ' ') + '.' + dec.split('.')[1].rjust(6, '0')\n mass = str(np.round(float(dat[i,4]),2)).ljust(5, '0')\n emass = str(np.round(float(dat[i,5]),2)).ljust(4, '0').replace('0.00','0.01')\n sfr = str(np.round(float(dat[i,6]),2)).ljust(4, '0').replace('-','$-$').rjust(7, ' ')\n esfr = str(np.round(float(dat[i,7]),2)).ljust(4, '0')\n\n this = dat[i,0].rjust(6, ' ') + ' & ' + \\\n ra.rjust(10, ' ') + ' & ' + \\\n dec.rjust(10, ' ') + ' & ' + \\\n dat[i,3].ljust(6, '0') + ' & ' + \\\n mass + ' \\\\pm ' + emass + ' & ' + \\\n sfr + ' \\\\pm ' + esfr + ' \\\\\\\\'\n print(this)\n\n#######\n# end #\n#######","repo_name":"toshikisaito1005/myUtils","sub_path":"proj_archived/prep_Yamashita_aca_merger/table_catalog.py","file_name":"table_catalog.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"39513563680","text":"# The currently serving U.S. congressmember with the most Twitter followers\nimport json\nimport os\nimport tweepy\nfrom datetime import datetime\nfrom jinja2 import Template\nDEFAULT_TWITTER_CREDS_PATH = '~/.twittercreds.json'\n\ndef get_twitter_client(path = DEFAULT_TWITTER_CREDS_PATH):\n \"\"\"\n returns: a Tweepy API object\n \"\"\"\n full_creds_path = os.path.expanduser(path)\n creds = json.load(open(full_creds_path))\n auth = tweepy.OAuthHandler(consumer_key = creds['consumer_key'],\n consumer_secret = creds['consumer_secret'])\n auth.set_access_token(creds['access_token'],\n creds['access_token_secret'])\n return tweepy.API(auth)\n\n# gets a whole bunch of tweets\ndef get_full_user_timeline(screen_name, api = None, count = 3300):\n if api is None:\n api = get_twitter_client()\n tweets = []\n cursor = tweepy.Cursor(api.user_timeline, id = screen_name,\n trim_user = True, exclude_replies = False, include_rts = True)\n for tweet in cursor.items(count):\n tweets.append(tweet._json)\n\n return tweets\n\n\n# returns a list of screen_names found in the tweets that were actual replies\ndef get_screen_names_replied_to(tweets_arr, api = None):\n tweets = tweepy_to_dict(tweets_arr)\n if api is None:\n api = get_twitter_client()\n\n screen_names = set()\n for t in tweets:\n if t['in_reply_to_screen_name']:\n screen_names.add(t['in_reply_to_screen_name'].lower())\n # now fetch the\n return screen_names\n\n\n\n\n\n\n# https://dev.twitter.com/rest/reference/get/lists/members\ndef get_all_list_members(list_owner, list_slug, api = None):\n if api is None:\n api = get_twitter_client()\n members = []\n cursor = tweepy.Cursor(api.list_members, count = 5000,\n owner_screen_name = list_owner, slug = list_slug)\n for m in cursor.items():\n members.append(m._json)\n\n return members\n\n############\n\ndef create_members_web_table(arr):\n members = tweepy_to_dict(arr)\n content = \"\"\"\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \"\"\"\n\n html_rows = []\n for m in members:\n days_since_joined = days_since_ts(m['created_at'])\n if m['friends_count'] > 0:\n followers_ratio = round(m['followers_count'] / m['friends_count'])\n else:\n followers_ratio = m['followers_count']\n\n t = m.get('status')\n if t:\n latest_tweet_text = t['text']\n days_since_latest_tweet = days_since_ts(t['created_at'])\n twitter_source = re.search('>(.+?)<', t['source']).groups()[0]\n\n else:\n latest_tweet_text = \"\"\n twitter_source = \"\"\n days_since_latest_tweet = days_since_joined\n\n mstr = TWEET_HTML.render(\n name = m['name'],\n screen_name = m['screen_name'],\n profile_pic = m['profile_image_url'],\n bio = m['description'],\n followers_count = m['followers_count'],\n friends_count = m['friends_count'],\n days_since_joined = days_since_joined,\n tweets_count = m['statuses_count'],\n tweets_per_day = round(m['statuses_count'] / days_since_joined, 1),\n latest_tweet_text = latest_tweet_text,\n days_since_latest_tweet = days_since_latest_tweet,\n twitter_source = twitter_source\n )\n html_rows.append(mstr)\n\n content += \"\\n\".join(html_rows)\n content += \"\"\n\n return PAGE_HTML.render(content = content)\n\n\n\n\n\n\ndef convert_twitter_timestamp(ts):\n # ts looks like: \"Fri Oct 03 20:18:31 +0000 2008\"\n return datetime.strptime(ts, '%a %b %d %H:%M:%S +0000 %Y')\n\n# ts looks like: \"Fri Oct 03 20:18:31 +0000 2008\"\ndef days_since_ts(ts):\n t = convert_twitter_timestamp(ts)\n return (datetime.today() - t).days\n\n\n# A convenience function for converting Tweepy models into plain\n# ol lists and dicts\ndef tweepy_to_dict(collection):\n if collection is dict:\n return collection\n else:\n arr = []\n for i in collection:\n arr.append(i._json)\n return arr\n\n\n\n\nPAGE_HTML = Template(\n\"\"\"\n\n\n \n\n Some Page\n \n \n \n\n \n\n \n\n\n\n\n
\n {{ content}}\n
\n\n\n\"\"\"\n)\n\n\nTWEET_HTML = Template(\n\"\"\"\n\n \n \n \n \n \n \n \n \n \n \n\n\"\"\"\n)\n","repo_name":"compjour/compjour-class-site","sub_path":"source/files/code/twitter/tweepy_wrapper.py","file_name":"tweepy_wrapper.py","file_ext":"py","file_size_in_byte":5751,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"} +{"seq_id":"8649555465","text":"import nnabla as nn\nimport nnabla.functions as F\nimport nnabla.parametric_functions as PF\nimport numpy as np\n\nfrom typing import Optional\nfrom typing import Tuple\n\n@PF.parametric_function_api('simple_rnn')\ndef simple_rnn(inputs: nn.Variable, units: int, mask: Optional[nn.Variable] = None,\n return_sequences: bool = False, fix_parameters=False) -> nn.Variable:\n '''\n A vanilla recurrent neural network layer\n Args:\n inputs (nnabla.Variable): A shape of [batch_size, length, embedding_size].\n units (int): Dimensionality of the output space.\n mask (nnabla.Variable): A shape of [batch_size, length, 1].\n return_sequences (bool): Whether to return the last output. in the output sequence, or the full sequence.\n fix_parameters (bool): Fix parameters (Set need_grad=False).\n Returns:\n nn.Variable: A shape [batch_size, length, units]\n or\n nn.Variable: A shape [batch_size units].\n '''\n\n hs = []\n batch_size, length, embedding_size = inputs.shape\n h0 = F.constant(0, shape=(batch_size, units))\n\n h = h0\n\n if mask is None:\n mask = F.constant(1, shape=(batch_size, length, 1))\n\n for x, cond in zip(F.split(inputs, axis=1), F.split(mask, axis=1)):\n h_t = F.tanh(PF.affine(F.concatenate(x, h, axis=1), units, fix_parameters=fix_parameters))\n h = where(cond, h_t, h)\n hs.append(h)\n\n if return_sequences:\n hs = F.stack(*hs, axis=1)\n return hs\n else:\n return hs[-1]\n\ndef lstm_cell(x: nn.Variable, c: nn.Variable, h: nn.Variable) -> nn.Variable:\n batch_size, units = c.shape\n _hidden = PF.affine(F.concatenate(x, h, axis=1), 4*units)\n\n a = F.tanh (_hidden[:, units*0: units*1])\n input_gate = F.sigmoid(_hidden[:, units*1: units*2])\n forgate_gate = F.sigmoid(_hidden[:, units*2: units*3])\n output_gate = F.sigmoid(_hidden[:, units*3: units*4])\n\n cell = input_gate * a + forgate_gate * c\n hidden = output_gate * F.tanh(cell)\n return cell, hidden\n\n@PF.parametric_function_api('lstm')\ndef lstm(inputs: nn.Variable, units: int, mask: Optional[nn.Variable] = None, initial_state: Tuple[nn.Variable, nn.Variable] = None,\n return_sequences: bool = False, return_state: bool = False, fix_parameters: bool = False) -> nn.Variable:\n '''\n A long short-term memory\n Args:\n inputs (nnabla.Variable): A shape of [batch_size, length, embedding_size].\n units (int): Dimensionality of the output space.\n mask (nnabla.Variable): A shape of [batch_size, length].\n initial_state ([nnabla.Variable, nnabla.Variable]): A tuple of an initial cell and an initial hidden state.\n return_sequences (bool): Whether to return the last output. in the output sequence, or the full sequence.\n return_state (bool): Whether to return the last state which is consist of the cell and the hidden state.\n fix_parameters (bool): Fix parameters (Set need_grad=False).\n Returns:\n nn.Variable: A shape [batch_size, length, units].\n or\n nn.Variable: A shape [batch_size units]\n '''\n \n batch_size, length, embedding_size = inputs.shape\n\n if initial_state is None:\n c0 = F.constant(0, shape=(batch_size, units))\n h0 = F.constant(0, shape=(batch_size, units))\n else:\n assert type(initial_state) is tuple or type(initial_state) is list, \\\n 'initial_state must be a typle or a list.'\n assert len(initial_state) == 2, \\\n 'initial_state must have only two states.'\n\n c0, h0 = initial_state\n\n assert c0.shape == h0.shape, 'shapes of initial_state must be same.'\n assert c0.shape[0] == batch_size, \\\n 'batch size of initial_state ({0}) is different from that of inputs ({1}).'.format(c0.shape[0], batch_size)\n assert c0.shape[1] == units, \\\n 'units size of initial_state ({0}) is different from that of units of args ({1}).'.format(c0.shape[1], units)\n\n cell = c0\n hidden = h0\n\n hs = []\n\n if mask is None:\n mask = F.constant(1, shape=(batch_size, length, 1))\n for x, cond in zip(F.split(inputs, axis=1), F.split(mask, axis=1)):\n cell_t, hidden_t = lstm_cell(x, cell, hidden)\n cell = where(cond, cell_t, cell)\n hidden = where(cond, hidden_t, hidden)\n hs.append(hidden)\n\n if return_sequences:\n ret = F.stack(*hs, axis=1)\n else:\n ret = hs[-1]\n\n if return_state:\n return ret, cell, hidden\n else:\n return ret\n\n\n@PF.parametric_function_api('highway')\ndef highway(x: nn.Variable, fix_parameters: bool = False) -> nn.Variable:\n '''\n A densely connected highway network layer\n Args:\n x (nnabla.Variable): A shape of [batch_size, units]\n fix_parameters (bool): Fix parameters (Set need_grad=False).\n Returns:\n nn.Variable: A shape [batch_size, units].\n '''\n batch_size, in_out_size = x.shape\n\n with nn.parameter_scope('plain'):\n out_plain = F.relu(PF.affine(x, in_out_size, fix_parameters=fix_parameters))\n with nn.parameter_scope('transform'):\n out_transform = F.sigmoid(PF.affine(x, in_out_size, fix_parameters=fix_parameters))\n y = out_plain * out_transform + x * (1 - out_transform)\n return y\n\n\ndef where(condition: nn.Variable, x:nn.Variable, y: nn.Variable) -> nn.Variable:\n '''\n This function returns x if condition is 1, and y if condition is 0.\n Args:\n condition (nnabla.Variable): A shape of (batch_size, 1)\n x (nnabla.Variable): A shape of (batch_size, embedding_size)\n y (nnabla.Variable): A shape of (batch_size, embedding_size)\n '''\n if x.ndim == 1:\n true_condition = F.reshape(condition, shape=list(condition.shape)+[1])\n else:\n true_condition = condition\n false_condition = F.constant(1, shape=true_condition.shape) - true_condition\n return true_condition * x + false_condition * y\n","repo_name":"minatosato/nlp-nnabla","sub_path":"common/parametric_functions.py","file_name":"parametric_functions.py","file_ext":"py","file_size_in_byte":5955,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"18"} +{"seq_id":"72976787559","text":"class Solution:\n def bestClosingTime(self, customers: str) -> int:\n N = len(customers)\n prefix_y = [0] * N\n\n number_of_y = 0\n for i in range(N-1,-1,-1):\n if customers[i] == \"Y\":\n number_of_y += 1\n prefix_y[i] = number_of_y\n prefix_y.append(0)\n\n prefix_n = []\n number_of_n = 0\n for i in range(N):\n prefix_n.append(number_of_n)\n if customers[i] == \"N\":\n number_of_n += 1\n prefix_n.append(number_of_n)\n\n minimum_penality = prefix_n[0] + prefix_y[0]\n hour = 0\n for i in range(1,N+1):\n curr_penality = prefix_n[i] + prefix_y[i]\n if curr_penality < minimum_penality:\n minimum_penality = curr_penality\n hour = i\n\n\n return hour","repo_name":"benj35/competitve-programming-2","sub_path":"2483-minimum-penalty-for-a-shop/2483-minimum-penalty-for-a-shop.py","file_name":"2483-minimum-penalty-for-a-shop.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"725670985","text":"'''\nCreated on 27.03.2021\nChange speed of all files in material and\nsave the edited files to production\n@author: Alvaro Ortiz for Museum fuer Naturkunde Berlin\n'''\nimport sys\nimport glob\nimport argparse\nimport traceback\nimport os\nfrom arg_utils import parse_args\n\n\nclass SoxConverter:\n def outfile_path(self, infile, outfolder, pitch):\n \"\"\"Returns the path to the out file\"\"\"\n outfile = \"%s/%s_%sx.mp3\" % (\n outfolder,\n os.path.basename(infile).rsplit(\".\", 1)[0],\n pitch\n )\n return outfile\n\n def process(self, infile, outfolder, pitch):\n outfile = self.outfile_path(infile, outfolder, pitch)\n command = \"sox %s %s pitch -q %s\" % (\n infile,\n outfile,\n float(pitch) * 1200\n )\n print(command)\n os.system(command)\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Chage speed of audio files\n\n command-line arguments:\n @arg --in path to folder with audio files as mounted in container\n relative to notebook-home (e.g. material)\n @arg --out path to folder where audio files will be stored as mounted\n in container relative to notebook-home (e.g. production)\n @arg --pitch shift in octaves\n \"\"\"\n try:\n # Parse command-line arguments\n arg_parser = argparse.ArgumentParser()\n args = parse_args(arg_parser)\n\n # list all audio files in input folder\n file_list = []\n for infolder in args.infolder.split(\",\"):\n file_list += glob.glob(\"%s/*.mp3\" % infolder)\n # draw spectrograms all files in infolder, save to outfolder\n converter = SoxConverter()\n\n for infile in file_list:\n converter.process(infile, args.outfolder, args.pitch)\n\n except Exception as e:\n print(e)\n print(traceback.format_exc())\n arg_parser.print_help()\n sys.exit(1)\n","repo_name":"MfN-Berlin/HIP_Audio_Filters","sub_path":"src/post/pitch.py","file_name":"pitch.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"35593200810","text":"import collections\nimport os\nfrom collections import Counter\n\nPATH = '/Users/liangliang/Desktop/AdventofCode2021'\n\ndef read_txt(filename):\n with open(filename) as file:\n lines = file.readlines()\n \n return lines \n\ndef solution1():\n fn = os.path.join(PATH, 'day12', 'input.txt')\n l = read_txt(fn)\n lines = [x.replace('\\n', '') for x in l]\n res = []\n brd = collections.defaultdict(list)\n for x in lines:\n s, e = x.split('-')\n brd[s].append(e)\n brd[e].append(s)\n # traverse brd \n # dfs with backtrack \n def dfs(node, path):\n if node == 'end':\n res.append(path)\n return \n # only lower case can visited at most once\n if not node.isupper() and node != 'start':\n visited.add(node)\n\n # search for neighbour\n for ngh in brd[node]:\n if ngh == 'start': continue \n if ngh not in visited:\n dfs(ngh, path + [ngh])\n\n if not node.isupper() and node != 'start':\n visited.remove(node)\n\n visited = set('start') \n dfs('start', ['start'])\n print(res)\n print(len(res)) \n\ndef solution2_1():\n fn = os.path.join(PATH, 'day12', 'input.txt')\n l = read_txt(fn)\n lines = [x.replace('\\n', '') for x in l]\n res = []\n brd = collections.defaultdict(list)\n for x in lines:\n s, e = x.split('-')\n brd[s].append(e)\n brd[e].append(s)\n # traverse brd \n # dfs with backtrack \n \n def dfs(node, path, visited):\n if node == 'end':\n res.append(path)\n return \n \n if not node.isupper() and node != 'start':\n visited += [node]\n\n # search for neighbour\n for ngh in brd[node]:\n if ngh == 'start': continue \n # lower case can visited at most 2 once\n if ngh not in visited or len(visited) <= len(set(visited)):\n dfs(ngh, path + [ngh], visited)\n\n if not node.isupper() and node != 'start':\n visited.remove(node)\n\n visited = ['start']\n dfs('start', ['start'], visited)\n print(res)\n print(len(res)) \n\n# TLE\ndef solution2():\n fn = os.path.join(PATH, 'day12', 'test.txt')\n l = read_txt(fn)\n lines = [x.replace('\\n', '') for x in l]\n res = []\n brd = collections.defaultdict(set)\n visited = {}\n for x in lines:\n s, e = x.split('-')\n brd[s].add(e)\n brd[e].add(s)\n visited[s] = 0\n visited[e] = 0\n # traverse brd \n # dfs with backtrack \n def dfs(node, path):\n if node == 'end' and path not in res:\n res.append(path)\n return \n # only lower case can visited at most once\n if not node.isupper() and node != 'start':\n visited[node] += 1\n # all the remaining could be 2 \n # search for neighbour\n for ngh in brd[node]:\n if ngh == 'start': continue \n if 0 <= visited[ngh] < 2:\n dfs(ngh, path + [ngh])\n\n if not node.isupper() and node != 'start':\n visited[node] -= 1\n\n\n dfs('start', ['start'])\n ans = 0\n brd_time = {}\n\n for r in res:\n small = 0\n for b in brd.keys():\n if not b.isupper():\n brd_time[b] = 0\n\n for r_sub in r:\n if not r_sub.isupper():\n brd_time[r_sub] += 1\n print(brd_time)\n for v in brd_time.keys():\n if brd_time[v] >= 2:\n small += 1\n\n if small <= 1:\n ans += 1\n\n print(ans)\n\nsolution2_1()","repo_name":"ZhengLiangliang1996/AdventofCode2021","sub_path":"day12/day12.py","file_name":"day12.py","file_ext":"py","file_size_in_byte":3578,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"10382824692","text":"'''\r\nYou are given an array of intervals - that is, an array of tuples (start, end).\r\nThe array may not be sorted, and could contain overlapping intervals.\r\nReturn another array where the overlapping intervals are merged.\r\n\r\nFor example:\r\n[(1, 3), (5, 8), (4, 10), (20, 25)]\r\n\r\nThis input should return [(1, 3), (4, 10), (20, 25)] since\r\n(5, 8) and (4, 10) can be merged into (4, 10).\r\n'''\r\n\r\ndef merge(intervals):\r\n i = 0\r\n while i < len(intervals):\r\n curr = intervals[i]\r\n j = i+1\r\n while j < len(intervals):\r\n next = intervals[j]\r\n if (curr[1] > next[0] or next[1] > curr[0]):\r\n intervals.pop(j)\r\n intervals.pop(i)\r\n left = min(curr[0], next[0])\r\n right = max(curr[1], next[1])\r\n intervals.append((left, right))\r\n j += 1\r\n i += 1\r\n return intervals\r\n\r\nprint(merge([(1, 3), (5, 8), (4, 10), (20, 25)]))\r\n# [(1, 3), (4, 10), (20, 25)]\r\n","repo_name":"reedless/dailyinterviewpro_answers","sub_path":"2019_08/daily_question_20190828.py","file_name":"daily_question_20190828.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"34767432684","text":"import discord\r\nfrom discord import app_commands\r\nfrom discord.ext import commands\r\nfrom core.classes import Cog_Extension\r\nimport json, datetime\r\nfrom typing import Optional\r\n\r\nwith open('setting.json', mode='r', encoding='utf8') as jfile:\r\n jdata = json.load(jfile)\r\nclass AddReaction(Cog_Extension):\r\n @app_commands.command(name=\"add_reaction\", description=\"Add reaction\")\r\n @app_commands.describe(msg_id=\"message id in this channel\", text=\"reactions\")\r\n async def add_reaction(self, interaction: discord.Interaction, msg_id: str, text: Optional[str] = None):\r\n try:\r\n message = await interaction.channel.fetch_message(int(msg_id))\r\n except Exception as e:\r\n print(\"Failed to fetch message, except: \", e)\r\n return\r\n for i in range(len(text)):\r\n try:\r\n await message.add_reaction(text[i])\r\n except discord.NotFound:\r\n continue\r\n except discord.InvalidArgument:\r\n continue\r\n\r\nasync def setup(bot):\r\n await bot.add_cog(AddReaction(bot))","repo_name":"trentyy/pyBot","sub_path":"cogs/add_reaction.py","file_name":"add_reaction.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"15664473000","text":"##########################\r\nimport sys\r\nfrom random import choice, randrange\r\nfrom time import sleep\r\n\r\ntry:\r\n from OpenGL.GLUT import *\r\n from OpenGL.GL import *\r\n from OpenGL.GLU import *\r\nexcept:\r\n print ('''\r\nERROR: PyOpenGL not installed properly.\r\n ''')\r\n sys.exit()\r\n###########################\r\n\r\n### 32 Column 40 rows ###\r\n\r\n# variabel global # \r\n(keylocx,keylocy) = (400,430)\r\nambil_kunci = False\r\nlevels = [\"\"]\r\nani = 0\r\n\r\n### Variabel untuk player ###\r\nkoorx = 0\r\nkoory = 0\r\nleft = 0\r\nright = 0\r\nup = 0\r\ndown = 0\r\n\r\n### Variabel untuk ghost ###\r\nleftg = 0\r\nrightg = 0\r\nupg = 0\r\ndowng = 0\r\nkoorxg = 0\r\nkooryg = 0\r\nchange_dir = False\r\ndirg = choice(['up','down','left','right'])\r\n\r\n### Variabel untuk ghost1 ###\r\nleftg1 = 0\r\nrightg1 = 0\r\nupg1 = 0\r\ndowng1 = 0\r\nkoorxg1 = 0\r\nkooryg1 = 0\r\nchange_dir1 = False\r\ndirg1 = choice(['up','down','left','right'])\r\nGhost_alpha = 0\r\n\r\n### Variabel untuk ghost2 ###\r\nleftg2 = 0\r\nrightg2 = 0\r\nupg2 = 0\r\ndowng2 = 0\r\nkoorxg2 = 0\r\nkooryg2 = 0\r\nchange_dir2 = False\r\ndirg2 = choice(['up','down','left','right'])\r\n\r\n### Baterai ###\r\nnyala_baterai = False\r\nbaterai = 90\r\nbaterai_habis = False\r\n\r\n### MAZE ###\r\nscreen_x = 0\r\nscreen_y = 0\r\n\r\nlevel_1 = [\r\n\"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\",\r\n\"X XXXXXXXXXXXX X\",\r\n\"X XXXXX XXXXXXXXX XXXXXXXXX XXXXXXXXX XXXXXXXXX\",\r\n\"XX XXXX XXXXXXXX XXX XXXXXXXXX\",\r\n\"XX XXXX XXXXXX XXX XXXXX X XXXXX XXXX\",\r\n\"XX XXX XXXXX XXXXXXX XXXX XXXXXXXX XXXX\",\r\n\"XX XXX XXXX XXXXXX XXXXXXXXX XX XXXX\",\r\n\"XX XXX XXXX XXXXX XXXXXXX XXXXXXXX XXXXX\",\r\n\"XXX XXX XXX XXXXXX XXXXXXXX XXX XXXXXXXXX XXXX\",\r\n\"XX XX XXXX XXXXXXXXX XXXXX XXXX XXXXX\",\r\n\"XXX X XXXXXXX XXXXXX XXXXXXXXX XX XXXXX\",\r\n\"XXX X XXXXXXXXXX XXXXXXXXXXXXXXXX X XXXXX\",\r\n\"XXX XXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXX XXXXX\",\r\n\"XXX XXXXXXXXXXXXXXX XXXXXXXX XXX XXXXX\",\r\n\"X XX X XXXXXX XXXXXXX XX XXXXXXXX\",\r\n\"XX X XXX XXX XXX XXXXXX XXXXXXX XXX XXX\",\r\n\"XX XX XXXXXXXXXX XXXXXXXXX XXX XXXXX XXX\",\r\n\"XX XXX XXX XXXX XXXX XXXXXX XX X\",\r\n\"X XXX XXXXX XXXXXXX XX X\",\r\n\"X XX XXXXX XXXXXX XXX XXX XXXXXXXXXX\",\r\n\"X XX XXXXXX XXX XXXXX XXXXX XXXX XXXXXXXXX\",\r\n\"X XXXXX XXXXXX XXXXX XXXXX XX XXXXX\",\r\n\"XX XXXXXXX XXXX XXXXXXXXX XXXXXX XXXXXXXXX\",\r\n\"XX XXXXX XXXXXXXX XXXXXXXXX XXXXXXXX XXXXX XXX\",\r\n\"XX XX XXXXXXXXX XXXXXXXXX XXXXXXXXXX XXX\",\r\n\"XX XX XXXXX XXXXXXXXX XXXXXXX XXXXXX XX\",\r\n\"XX XX XXXXX XX XXXXXX XXX XXXX XX\",\r\n\"XX XXX XXX XXX XXXXXX XXXXX XX XXXXXX XX\",\r\n\"XX XXX XXXX XXX XX XXXXXX XXXXXXXXX X\",\r\n\"X XXXX XXX XXXXX XXXX XXXXXX XXXXXXXXXX XX\",\r\n\"X X XXXXX XXXX XXXX XXXX XXXXXXX XXX\",\r\n\"X XXX XXXXXXXXXXX XX\",\r\n\"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\",\r\n]\r\n\r\nlevels.append(level_1) #menambah level ke list levels\r\nwalls = [] #koordinat dinding\r\n\r\ndef animation(value): #animasi karakter yang dapat bergerak\r\n global ani\r\n if ani == 0:\r\n ani += 2\r\n glutTimerFunc(150,animation,0)\r\n\r\n elif ani== 2:\r\n ani -= 2\r\n glutTimerFunc(150,animation,0)\r\n\r\nclass Draw(): #membuat class draw untuk menggambar\r\n def dot(self,size,red,green,blue,x,y): #point\r\n self.size = size\r\n self.red = red\r\n self.green = green\r\n self.blue = blue\r\n self.x = x\r\n self.y = y\r\n glPointSize(size)\r\n glBegin(GL_POINTS)\r\n glColor3f(red,green,blue)\r\n glVertex2f(x,y)\r\n glEnd()\r\n\r\n def draw_ghost(self,size,red,green,blue,alpha,x,y): #point dengan alpha\r\n self.size = size\r\n self.red = red\r\n self.green = green\r\n self.blue = blue\r\n self.alpha = alpha\r\n self.x = x\r\n self.y = y\r\n glPushMatrix()\r\n glPointSize(size)\r\n glTranslatef(0,ani,0)\r\n glColor4f(red,green,blue,alpha)\r\n glBegin(GL_POINTS)\r\n glVertex2f(x,y)\r\n glEnd()\r\n glPopMatrix()\r\n\r\n def draw_ghostline(self,size,red,green,blue,alpha,x1,y1,x2,y2): #line dengan alpha\r\n self.size = size\r\n self.red = red\r\n self.green = green\r\n self.blue = blue\r\n self.alpha = alpha\r\n self.x1 = x1\r\n self.y1 = y1\r\n self.x2 = x2\r\n self.y2 = y2\r\n glPushMatrix()\r\n glLineWidth(size)\r\n glTranslatef(0,ani,0)\r\n glColor4f(red,green,blue,alpha)\r\n glBegin(GL_LINES)\r\n glVertex2f(x1,y1)\r\n glVertex2f(x2,y2)\r\n glEnd()\r\n glPopMatrix()\r\n\r\n def draw_walls(self,size,red,green,blue): #point untuk dinding\r\n global screen_x,screen_y\r\n self.size = size\r\n self.red = red\r\n self.green = green\r\n self.blue = blue\r\n glPointSize(size)\r\n glBegin(GL_POINTS)\r\n glColor3f(red,green,blue)\r\n for i in walls:\r\n glVertex2f(i[0],i[1])\r\n glEnd()\r\n\r\ndef setup_maze(level): #setup maze dengan parameter level\r\n global walls\r\n for y in range(len(level)): # untuk setiap baris\r\n for x in range(len(level[y])): #setiap koordinat pada baris\r\n character = level[y][x] #variabel karakter dengan isi setiap pasang koordinat\r\n screen_x = 10 + (x*15) #mengasign koordinat x pada setup maze\r\n screen_y = 10 + (y*15) #mengasign koordinat y pada setup maze\r\n if character == \"X\": #jika karakter adalah X\r\n walls.append((screen_x,screen_y)) #menambah koordinat screen_x dan screen_y pada walls\r\n\r\nsetup_maze(levels[1]) #memanggil setup maze\r\n\r\nclass Player(): #membuat kelas player\r\n def playerk(self,x,y): #menggambar player\r\n global koorx,koory\r\n self.koorx = koorx = x\r\n self.koory = koory = y\r\n draw.draw_ghostline(4,0.9,0.8,0.8,1,(koorx-4)+left+right,(koory+2)+up+down,(koorx+4)+left+right,(koory+2)+up+down)\r\n draw.draw_ghostline(5,0.6,0.6,0.6,1,(koorx-4)+left+right,(koory+6)+up+down,(koorx+4)+left+right,(koory+6)+up+down)\r\n draw.draw_ghostline(1,0.3,0.3,0.3,1,(koorx-4)+left+right,(koory-3)+up+down,(koorx-4)+left+right,(koory+3)+up+down)\r\n draw.draw_ghostline(2,0.9,0.8,0.8,1,(koorx-2)+left+right,(koory-7)+up+down,(koorx-2)+left+right,(koory-4)+up+down)\r\n draw.draw_ghostline(1,0.3,0.3,0.3,1,(koorx-5)+left+right,(koory+2)+up+down,(koorx-5)+left+right,(koory+8)+up+down)\r\n draw.draw_ghostline(1,0.3,0.3,0.3,1,(koorx-5)+left+right,(koory+8)+up+down,(koorx-3)+left+right,(koory+10)+up+down)\r\n draw.draw_ghostline(1,0.3,0.3,0.3,1,(koorx-3)+left+right,(koory+10)+up+down,(koorx+3)+left+right,(koory+10)+up+down)\r\n draw.draw_ghostline(3,0.9,0.8,0.9,1,(koorx+4)+left+right,(koory-1)+up+down,(koorx-4)+left+right,(koory-1)+up+down)\r\n draw.draw_ghostline(1,0.3,0.3,0.3,1,(koorx-5)+left+right,(koory)+up+down,(koorx+4)+left+right,(koory)+up+down)\r\n draw.draw_ghostline(2,0.8,0.1,0.9,1,(koorx+2)+left+right,(koory)+up+down,(koorx-2)+left+right,(koory)+up+down)\r\n draw.draw_ghostline(1,0.3,0.3,0.3,1,(koorx+4)+left+right,(koory-3)+up+down,(koorx-4)+left+right,(koory-3)+up+down)\r\n draw.draw_ghostline(2,0.6,0.1,0.8,1,(koorx+3)+left+right,(koory-3)+up+down,(koorx-3)+left+right,(koory-3)+up+down)\r\n draw.draw_ghostline(1,0.3,0.3,0.3,1,(koorx+5)+left+right,(koory+2)+up+down,(koorx+5)+left+right,(koory+8)+up+down)\r\n draw.draw_ghostline(1,0.3,0.3,0.3,1,(koorx+4)+left+right,(koory-3)+up+down,(koorx+4)+left+right,(koory+3)+up+down)\r\n draw.draw_ghostline(2,0.9,0.8,0.8,1,(koorx+2)+left+right,(koory-7)+up+down,(koorx+2)+left+right,(koory-4)+up+down)\r\n draw.draw_ghostline(1,0.3,0.3,0.3,1,(koorx+5)+left+right,(koory+8)+up+down,(koorx+3)+left+right,(koory+10)+up+down)\r\n\r\n draw.draw_ghost(2,0.9,0.8,0.8,1,(koorx+1)+left+right,(koory+5)+up+down)\r\n draw.draw_ghost(2,0.9,0.8,0.8,1,(koorx-1.5)+left+right,(koory+5)+up+down)\r\n draw.draw_ghostline(1,0,0.7,0.9,1,(koorx+1.5)+left+right,(koory+3)+up+down,(koorx+1.5)+left+right,(koory+6)+up+down)\r\n draw.draw_ghostline(1,0,0.7,0.9,1,(koorx-1.5)+left+right,(koory+3)+up+down,(koorx-1.5)+left+right,(koory+6)+up+down)\r\n\r\n def player_movement(key,x,y): #membuat perilaku dan colision player \r\n global up, down, left, right, koorxg, kooryg, ambil_kunci, Ghost_alpha,nyala_baterai,baterai_habis\r\n ### collision dengan ghost ###\r\n #jika player berada tepat pada ghost atau diatasnya satu kotak\r\n if (koorx+left+right, koory+up+down) == ((koorxg+leftg+rightg, kooryg+upg+downg)) or (koorx+left+right, koory+up+down) == ((koorxg1+leftg1+rightg1, kooryg1+upg1+downg1)):\r\n draw.dot(15,0.9,0.9,0.9,koorx+left+right,koory+up+down) #membuat player berwarna putih untuk animasi mati\r\n print(\"Player tertangkap oleh hantu!!\\nGame Over\") #pemberitahuan di terminal\r\n left -= 1000 #player dipindah keluar dari map\r\n\r\n ### Player Movement ###\r\n if (koorx+left+right, koory+up+down+15) not in walls: #jika koordinat diatas player tidak ada pada walls\r\n if key == GLUT_KEY_UP: #jika menekan tombol up\r\n up += 15\r\n if (koorx+left+right, koory+up+down-15) not in walls:\r\n if key == GLUT_KEY_DOWN:\r\n down -= 15\r\n if (koorx+left+right+15, koory+up+down) not in walls:\r\n if key == GLUT_KEY_RIGHT:\r\n right += 15\r\n if (koorx+left+right-15, koory+up+down) not in walls:\r\n if key == GLUT_KEY_LEFT:\r\n left -= 15\r\n if ambil_kunci == True: #jika mengambil kunci\r\n if (koorx+left+(right), koory+up+down) == (820,25): #jikakoordinat player = pintu keluar\r\n print(\"SELAMAT\\nANDA BERHASIL KELUAR DARI LABIRIN!!\") \r\n right += 45 #mengeluarkan player melalui pintu keluar\r\n\r\n ### Jika baterai > 0###\r\n if baterai >= 0:\r\n baterai_habis = False\r\n ### Kalau hantunya tidak kelihatan ###\r\n if Ghost_alpha == 0:\r\n ### Kalau player menekan tombol END ###\r\n if key == GLUT_KEY_END:\r\n if baterai >= 0:\r\n ### nyala baterai jadi True\r\n nyala_baterai = True\r\n ### Transparasi hantu jadi 1 ###\r\n Ghost_alpha += 1\r\n\r\n #jika hantu kelilhatan\r\n elif Ghost_alpha == 1:\r\n if key == GLUT_KEY_END:\r\n nyala_baterai = False\r\n Ghost_alpha -= 1\r\n baterai == 0\r\n\r\n #jika baterai habis\r\n elif baterai == 0 or baterai <= 0:\r\n Ghost_alpha = 0 #hantu akan tidak terlihat\r\n baterai_habis = True\r\n else:\r\n Ghost_alpha = 0\r\n\r\n def minbaterai(value): #logika pengurangan baterai\r\n global baterai, nyala_baterai\r\n if baterai_habis == False: #jika baterai belum habis\r\n if nyala_baterai == True: #jika ghost detector dinyalakan\r\n baterai -= 5 #baterai berkurang\r\n elif nyala_baterai == False: #jika ghost detector tidak dinyalakan\r\n baterai -= 0 #baterai tidak berkurang\r\n elif baterai_habis == True: #jika baterai habis\r\n baterai = 0 #baterai tetap pada angka 0\r\n else:\r\n baterai = 0\r\n glutTimerFunc(500,Player.minbaterai,0) #mengulangi fungsi setiap 0.5 detik\r\n\r\n def gbr_baterai(): #gambar baterai\r\n ### BATERAI ###\r\n glBegin(GL_QUADS)\r\n glColor3f(1,1,0)\r\n glVertex2f(860,460)\r\n glVertex2f(860,480)\r\n glVertex2f(860+baterai,480)\r\n glVertex2f(860+baterai,460)\r\n glVertex2f(848,465)\r\n glVertex2f(848,475)\r\n glVertex2f(856,475)\r\n glVertex2f(856,465)\r\n glEnd()\r\n\r\n glLineWidth(1)\r\n glBegin(GL_LINES)\r\n glColor3f(1,1,0)\r\n glVertex2f(858,482)\r\n glVertex2f(858,458)\r\n glVertex2f(857,482)\r\n glVertex2f(952,482)\r\n glVertex2f(952,482)\r\n glVertex2f(952,458)\r\n glVertex2f(952,458)\r\n glVertex2f(858,458)\r\n glEnd()\r\n\r\nclass Ghost(): #kelas ghost\r\n### Ghost 0 ###\r\n def slender(self,gx,gy,atas,bawah,kanan,kiri): #method untuk menggambar ghost\r\n self.x = x = gx\r\n self.y = y = gy\r\n self.at = at = atas\r\n self.ba = ba = bawah\r\n self.ka = ka = kanan\r\n self.ki = ki = kiri\r\n ### Kepala ###\r\n draw.draw_ghostline(12,0.9,0.9,0.9,Ghost_alpha,(x-4)+ki+ka,(y+20)+at+ba, (x+4)+ki+ka,(y+20)+at+ba)\r\n draw.draw_ghostline(2.5,0,0,0,Ghost_alpha, (x-4)+ki+ka,(y+28)+at+ba, (x+4)+ki+ka,(y+28)+at+ba)\r\n draw.draw_ghost(2,0,0,0,Ghost_alpha, (x-5)+ki+ka, (y+27)+at+ba)\r\n draw.draw_ghost(2,0,0,0,Ghost_alpha, (x+5)+ki+ka, (y+27)+at+ba)\r\n draw.draw_ghostline(2.5,0,0,0,Ghost_alpha, (x-6)+ki+ka,(y+26)+at+ba, (x-6)+ki+ka,(y+14)+at+ba)\r\n draw.draw_ghostline(2.5,0,0,0,Ghost_alpha, (x+6)+ki+ka,(y+26)+at+ba, (x+6)+ki+ka,(y+14)+at+ba)\r\n draw.draw_ghost(2,0,0,0,Ghost_alpha, (x-5)+ki+ka, (y+13)+at+ba)\r\n draw.draw_ghost(2,0,0,0,Ghost_alpha, (x+5)+ki+ka, (y+13)+at+ba)\r\n draw.draw_ghostline(2.5,0,0,0,Ghost_alpha, (x-4)+ki+ka,(y+12)+at+ba, (x+4)+ki+ka,(y+12)+at+ba)\r\n ### Wajah ###\r\n draw.draw_ghostline(3,0,0,0,Ghost_alpha, (x)+ki+ka,(y+20)+at+ba, (x)+ki+ka,(y+15)+at+ba)\r\n draw.draw_ghost(3,0.7,0,0,Ghost_alpha, (x-3)+ki+ka,(y+23)+at+ba)\r\n draw.draw_ghost(3,0.7,0,0,Ghost_alpha, (x+2)+ki+ka,(y+23)+at+ba)\r\n ### Badan ###\r\n draw.draw_ghostline(2,0,0,0,Ghost_alpha, (x-4)+ki+ka,(y+10)+at+ba, (x+4)+ki+ka,(y+10)+at+ba)\r\n draw.draw_ghostline(2,0,0,0,Ghost_alpha, (x-6)+ki+ka,(y+8)+at+ba, (x+6)+ki+ka,(y+8)+at+ba)\r\n draw.draw_ghostline(2,0,0,0,Ghost_alpha, (x-8)+ki+ka,(y+6)+at+ba, (x+8)+ki+ka,(y+6)+at+ba)\r\n draw.draw_ghostline(2,0,0,0,Ghost_alpha, (x-8)+ki+ka,(y+4)+at+ba, (x+8)+ki+ka,(y+4)+at+ba)\r\n draw.draw_ghostline(2,0,0,0,Ghost_alpha, (x-8)+ki+ka,(y+2)+at+ba, (x+8)+ki+ka,(y+2)+at+ba)\r\n draw.draw_ghostline(2,0,0,0,Ghost_alpha, (x-8)+ki+ka,(y)+at+ba, (x+8)+ki+ka,(y)+at+ba)\r\n draw.draw_ghostline(2,0,0,0,Ghost_alpha, (x-6)+ki+ka,(y-2)+at+ba, (x+6)+ki+ka,(y-2)+at+ba)\r\n draw.draw_ghost(3.5,0.9,0.9,0.9,Ghost_alpha, (x)+ki+ka, (y+6)+at+ba)\r\n draw.draw_ghostline(2,0,0,0,Ghost_alpha, (x-4)+ki+ka,(y-4)+at+ba, (x+4)+ki+ka,(y-4)+at+ba)\r\n draw.draw_ghost(2,0.9,0.9,0.9,Ghost_alpha, (x-5)+ki+ka, (y)+at+ba)\r\n draw.draw_ghost(2,0.9,0.9,0.9,Ghost_alpha, (x+5)+ki+ka, (y)+at+ba)\r\n ### Dasi ###\r\n draw.draw_ghostline(2,0.8,0,0,Ghost_alpha, (x)+ki+ka,(y+8)+at+ba, (x)+ki+ka,(y)+at+ba)\r\n ### Kaki ###\r\n draw.draw_ghostline(5,0,0,0,Ghost_alpha, (x-3)+ki+ka,(y-2)+at+ba, (x-3)+ki+ka,(y-8)+at+ba)\r\n draw.draw_ghostline(5,0,0,0,Ghost_alpha, (x+3)+ki+ka,(y-2)+at+ba, (x+3)+ki+ka,(y-8)+at+ba)\r\n\r\n def ghost(self): #ghost pertama\r\n global upg,downg,leftg,rightg, koorxg, kooryg,left, Ghost_alpha\r\n self.koorxg = koorxg = 370 #koordinat pertama\r\n self.kooryg = kooryg = 340\r\n self.Ghost_alpha = Ghost_alpha\r\n self.slender(koorxg,kooryg,upg,downg,rightg,leftg) #memanggil fungsi menggambar\r\n if (koorx+left+right, koory+up+down) == ((koorxg+leftg+rightg, kooryg+upg+downg)) or (koorx+left+right, koory+up+down) == ((koorxg+leftg+rightg, kooryg+upg+downg+15)):\r\n #collision dengan player\r\n print(\"Player tertangkap oleh hantu!!\\nGame Over\")\r\n draw.draw_ghost(15,0.9,0.9,0.9,Ghost_alpha,koorx+left+right,koory+up+down)\r\n left -= 1000\r\n\r\n def g_dir(self): #movement ai dari ghost\r\n global change_dir,dirg\r\n self.change_dir = True #pengubah arah\r\n self.dirg = dirg = choice([\"up\",\"down\",\"right\",\"left\"]) #pemilih aras\r\n if (koorx+left+(right), koory+up+down) != (880,25): #jika player belum keluar dari pintu exit\r\n if self.change_dir == True: #pengubah arah = True\r\n upg == 0\r\n downg == 0\r\n leftg == 0\r\n rightg == 0\r\n\r\n if dirg == \"up\": #jika ke atas\r\n self.go_up(0) #memanggil fungsi keatas\r\n elif dirg == \"down\": #dan seterusnya\r\n self.go_down(0)\r\n elif dirg == \"left\":\r\n self.go_left(0)\r\n elif dirg == \"right\":\r\n self.go_right(0)\r\n\r\n def go_up(self,value): #fungsi ke atas\r\n global upg\r\n self.upg = upg \r\n self.g_dir == True #pengubah arah = True\r\n if (koorxg+leftg+rightg, kooryg+upg+downg+15) not in walls: #jika hantu tidak menabrak dinding\r\n upg += 15 #berjalan ke atas\r\n glutTimerFunc(randrange(200,300,100),self.go_up,0) #mengulangi sampai menabrak dinding\r\n else:\r\n self.g_dir() #jika menabrak dinding, maka mencari arah baru\r\n\r\n #fungsi sama dengan go_up\r\n def go_down(self,value):\r\n global downg\r\n self.downg = downg\r\n self.g_dir == True\r\n if (koorxg+leftg+rightg, kooryg+upg+downg-15) not in walls:\r\n downg -= 15\r\n glutTimerFunc(randrange(200,300,100),self.go_down,0)\r\n else:\r\n self.g_dir()\r\n\r\n def go_left(self,value):\r\n global leftg\r\n self.leftg = leftg\r\n self.g_dir == True\r\n if (koorxg+leftg+rightg-15, kooryg+upg+downg) not in walls:\r\n leftg -= 15\r\n glutTimerFunc(randrange(200,300,100),self.go_left,0)\r\n else:\r\n self.g_dir()\r\n # print(\"change\")\r\n\r\n def go_right(self,value):\r\n global rightg\r\n self.rightg = rightg\r\n self.g_dir == True\r\n if (koorxg+leftg+rightg+15, kooryg+upg+downg) not in walls:\r\n rightg += 15\r\n draw.draw_ghost(8,0,0,0,Ghost_alpha, (koorxg+7)+leftg+rightg, (kooryg+7)+upg+downg)\r\n glutTimerFunc(randrange(200,300,100),self.go_right,0)\r\n else:\r\n self.g_dir()\r\n\r\n### Ghost 1 ###\r\n### Slenderman ###\r\n def ghost1(self):\r\n global upg1,downg1,leftg1,rightg1, koorxg1, kooryg1,left, Ghost_alpha\r\n self.koorxg1 = koorxg1 = 610\r\n self.kooryg1 = kooryg1 = 55\r\n self.Ghost_alpha = Ghost_alpha\r\n self.slender(koorxg1,kooryg1,upg1,downg1,rightg1,leftg1)\r\n if (koorx+left+right, koory+up+down) == ((koorxg1+leftg1+rightg1, kooryg1+upg1+downg1)) or (koorx+left+right, koory+up+down) == ((koorxg1+leftg1+rightg1, kooryg1+upg1+downg1+15)):\r\n print(\"Player tertangkap oleh hantu!!\\nGame Over\")\r\n draw.draw_ghost(35,0.9,0.9,0.9,Ghost_alpha,koorx+left+right,koory+up+down)\r\n left -= 990\r\n\r\n def g_dir1(self):\r\n global change_dir1, dirg1\r\n self.change_dir1 = True\r\n self.dirg1 = dirg1 = choice([\"up\",\"down\",\"right\",\"left\"])\r\n if (koorx+left+(right), koory+up+down) != (880,25):\r\n if self.change_dir1 == True:\r\n upg1 == 0\r\n downg1 == 0\r\n leftg1 == 0\r\n rightg1 == 0\r\n if dirg1 == \"up\":\r\n self.go_up1(0)\r\n elif dirg1 == \"down\":\r\n self.go_down1(0)\r\n elif dirg1 == \"left\":\r\n self.go_left1(0)\r\n elif dirg1 == \"right\":\r\n self.go_right1(0)\r\n\r\n def go_up1(self,value):\r\n global upg1\r\n self.upg1 = upg1\r\n self.g_dir1 == True\r\n if (koorxg1+leftg1+rightg1, kooryg1+upg1+downg1+15) not in walls:\r\n upg1 += 15\r\n glutTimerFunc(randrange(200,300,100),self.go_up1,0)\r\n else:\r\n self.g_dir1()\r\n\r\n def go_down1(self,value):\r\n global downg1\r\n self.downg1 = downg1\r\n self.g_dir1 == True\r\n if (koorxg1+leftg1+rightg1, kooryg1+upg1+downg1-15) not in walls:\r\n downg1 -= 15\r\n glutTimerFunc(randrange(200,300,100),self.go_down1,0)\r\n else:\r\n self.g_dir1()\r\n\r\n def go_left1(self,value):\r\n global leftg1\r\n self.leftg1 = leftg1\r\n self.g_dir1 == True\r\n if (koorxg1+leftg1+rightg1-15, kooryg1+upg1+downg1) not in walls:\r\n leftg1 -= 15\r\n glutTimerFunc(randrange(200,300,100),self.go_left1,0)\r\n else:\r\n self.g_dir1()\r\n\r\n def go_right1(self,value):\r\n global rightg1\r\n self.rightg1 = rightg1\r\n self.g_dir1 == True\r\n if (koorxg1+leftg1+rightg1+15, kooryg1+upg1+downg1) not in walls:\r\n rightg1 += 15\r\n glutTimerFunc(randrange(200,300,100),self.go_right1,0)\r\n else:\r\n self.g_dir1()\r\n\r\n### Ghost 2 ###\r\n def ghost2(self):\r\n global upg2,downg2,leftg2,rightg2, koorxg2, kooryg2,left, Ghost_alpha\r\n self.koorxg2 = koorxg2 = 115\r\n self.kooryg2 = kooryg2 = 55\r\n self.Ghost_alpha = Ghost_alpha\r\n self.slender(koorxg2,kooryg2, upg2,downg2,rightg2,leftg2)\r\n if (koorx+left+right, koory+up+down) == ((koorxg2+leftg2+rightg2, kooryg2+upg2+downg2)) or (koorx+left+right, koory+up+down) == ((koorxg2+leftg2+rightg2, kooryg2+upg2+downg2+15)):\r\n print(\"Player tertangkap oleh hantu!!\\nGame Over\")\r\n draw.draw_ghost(35,0.9,0.9,0.9,Ghost_alpha,koorx+left+right,koory+up+down)\r\n left -= 990\r\n\r\n def g_dir2(self):\r\n global change_dir2, dirg2\r\n self.change_dir2 = True\r\n self.dirg2 = dirg2 = choice([\"up\",\"down\",\"right\",\"left\"])\r\n if (koorx+left+(right), koory+up+down) != (880,25):\r\n if self.change_dir2 == True:\r\n upg2 == 0\r\n downg2 == 0\r\n leftg2 == 0\r\n rightg2 == 0\r\n if dirg2 == \"up\":\r\n self.go_up2(0)\r\n elif dirg2 == \"down\":\r\n self.go_down2(0)\r\n elif dirg2 == \"left\":\r\n self.go_left2(0)\r\n elif dirg2 == \"right\":\r\n self.go_right2(0)\r\n\r\n def go_up2(self,value):\r\n global upg2\r\n self.upg2 = upg2\r\n self.g_dir2 == True\r\n if (koorxg2+leftg2+rightg2, kooryg2+upg2+downg2+15) not in walls:\r\n upg2 += 15\r\n glutTimerFunc(randrange(200,300,100),self.go_up2,0)\r\n else:\r\n self.g_dir2()\r\n\r\n def go_down2(self,value):\r\n global downg2\r\n self.downg2 = downg2\r\n self.g_dir2 == True\r\n if (koorxg2+leftg2+rightg2, kooryg2+upg2+downg2-15) not in walls:\r\n downg2 -= 15\r\n glutTimerFunc(randrange(200,300,100),self.go_down2,0)\r\n else:\r\n self.g_dir2()\r\n\r\n def go_left2(self,value):\r\n global leftg2\r\n self.leftg2 = leftg2\r\n self.g_dir2 == True\r\n if (koorxg2+leftg2+rightg2-15, kooryg2+upg2+downg2) not in walls:\r\n leftg2 -= 15\r\n glutTimerFunc(randrange(200,300,100),self.go_left2,0)\r\n else:\r\n self.g_dir2()\r\n\r\n def go_right2(self,value):\r\n global rightg2\r\n self.rightg2 = rightg2\r\n self.g_dir2 == True\r\n if (koorxg2+leftg2+rightg2+15, kooryg2+upg2+downg2) not in walls:\r\n rightg2 += 15\r\n glutTimerFunc(randrange(200,300,100),self.go_right2,0)\r\n else:\r\n self.g_dir2()\r\n\r\nclass ExitKey(): #kelas kunci keluar\r\n def exitkey(self,x,y): #method untuk menggambar kunci\r\n global keylocx,keylocy,ambil_kunci,koorx,left,right, koory,up,down\r\n self.keylocx = keylocx = x\r\n self.keylocy = keylocy = y\r\n draw.dot(10,255,0,255,keylocx,keylocy)\r\n draw.dot(4,0,0,0,keylocx,keylocy)\r\n draw.dot(6,255,0,255,keylocx+8,keylocy)\r\n draw.dot(6,255,0,255,keylocx+14,keylocy)\r\n draw.dot(6,255,0,255,keylocx+20,keylocy)\r\n draw.dot(6,255,0,255,keylocx+20,keylocy-3)\r\n\r\n if ambil_kunci == True: #jika player telah mengambil kunci\r\n draw.dot(15,0.45,0.65,0.4,835,25) #membuka pintu keluar\r\n glColor3f(0.7,0.4,0)\r\n ExitKey.text_exit(850,20,\"Exit\") #memberitahu letak pintu keluar\r\n\r\n if (keylocx,keylocy) == (koorx+(left-15)+(right+15), koory+(up+15)+(down-15)): #jika player berada pada letak kunci\r\n ambil_kunci = True #ambil kunci = True\r\n keylocx += 470 #mengubah lokasi kunci ke interface\r\n keylocy -= 90\r\n print(\"Kunci keluar telah diambil\\nCepat! Cari jalan keluar!\")\r\n\r\n def text_kunci(xpos, ypos,text): #teks kunci\r\n glRasterPos2i(xpos,ypos)\r\n for i in range(len(text)):\r\n glutBitmapCharacter(GLUT_BITMAP_TIMES_ROMAN_24, ord(text[i]))\r\n\r\n def text_exit(xpos, ypos,text): #teks exit\r\n glRasterPos2i(xpos,ypos)\r\n for i in range(len(text)):\r\n glutBitmapCharacter(GLUT_BITMAP_9_BY_15, ord(text[i]))\r\n\r\n def petunjuk(xpos, ypos,text): #teks petunjuk\r\n glRasterPos2i(xpos,ypos)\r\n for i in range(len(text)):\r\n glutBitmapCharacter(GLUT_BITMAP_HELVETICA_10, ord(text[i]))\r\n\r\n### class instance ###\r\ndraw = Draw()\r\nplayer = Player()\r\nexitkey = ExitKey()\r\nghosts = Ghost()\r\n\r\ndef iterate(): #iterasi\r\n glViewport(0, 0, 1000, 500) #ukuran viewport\r\n glMatrixMode(GL_PROJECTION)\r\n glLoadIdentity()\r\n glOrtho(0.0, 1000, 0.0, 500, 0.0, 1.0)\r\n glMatrixMode (GL_MODELVIEW)\r\n glEnable(GL_BLEND) \r\n glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) #untuk menyalakan alpha\r\n glLoadIdentity()\r\n\r\n\r\ndef showScreen(): #apa yang ditampilkan di viewport\r\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) #jenis color dan depth buffer\r\n glClearColor(0.1,0.2,0.3,0.4) #warna latar belakang\r\n glLoadIdentity()\r\n iterate() #memanggil iterasi\r\n draw.draw_walls(15,0.1,0.7,1.9) #menggambar dinding\r\n player.playerk(25,25) #menggambar player\r\n sleep(0.15) # membuat player berjalan lebih lambat dari hantu\r\n glColor3f(0, 0.5, 0.9) #warna teks kunci\r\n ExitKey.text_kunci(850,350,\"kunci\") #teks kunci\r\n glColor3f(0.9,0.9,0)\r\n ExitKey.petunjuk(850,440,\"Baterai Ghost Detector\")\r\n glColor3f(0.9,0,0)\r\n ExitKey.petunjuk(850,100,\"Tekan tombol END\")\r\n ExitKey.petunjuk(850,80,\"untuk aktifkan ghost detector\")\r\n ghosts.ghost() #menggambar ghost 1\r\n ghosts.ghost1() #ghost2\r\n ghosts.ghost2() #ghost3\r\n Player.gbr_baterai() #gambar baterai\r\n exitkey.exitkey(keylocx,keylocy) #kunci exit\r\n glutSwapBuffers()\r\n\r\nglutInit()\r\nglutInitDisplayMode(GLUT_RGBA)\r\nglutInitWindowSize(1000, 500)\r\nglutInitWindowPosition(100,0)\r\nwind = glutCreateWindow(\"Ghost.gl\")\r\nglutDisplayFunc(showScreen)\r\nglutIdleFunc(showScreen)\r\nglutSpecialFunc(Player.player_movement) #fungsi special untuk player\r\nghosts.g_dir() #memanggil ai dari ghost\r\nghosts.g_dir1() #ghost 2\r\nghosts.g_dir2() #ghost3\r\nplayer.minbaterai() #pengurangan baterai saat dinyalakan\r\nanimation(0) #animasi dari player dan hantu\r\nglutMainLoop()","repo_name":"Samyugultom/Ghost.gl---Game","sub_path":"main.py.py","file_name":"main.py.py","file_ext":"py","file_size_in_byte":27358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"35799937903","text":"from typing import Tuple\n\nfrom webdnn.graph.attribute import Attribute\nfrom webdnn.graph.operator import Operator\nfrom webdnn.graph.variable import Variable\n\n\nclass Associative(Attribute[Operator]):\n \"\"\"Associative(op, var_keys)\n Associative property\n\n The operator with this attribute satisfies follow conditions.\n\n - The operator is elementwise operator.\n - The operator receives just two input variables.\n - Rearranging the parentheses in sequence of this operation will not change its value.\n\n .. math::\n\n \\left( a \\circ b \\right) \\circ c = a \\circ \\left( b \\circ c \\right)\n\n Attributes:\n var_keys (tuple of str): input names which can be swapped.\n \"\"\"\n\n def __init__(self, op: Operator, var_keys: Tuple[str, str]):\n super(Associative, self).__init__(op)\n self.var_keys = var_keys\n\n @property\n def vars(self):\n return tuple(self.base.inputs[key] for key in self.var_keys)\n\n def reorder(self, other_op: Operator):\n \"\"\"\n Reorder operation\n\n .. code-block:: text\n\n (x1 + x2) + x3\n\n x1 -[var1]-+\n +-{op1}- h -[var1]-+\n x2 -[var2]-+ +-{op2}- y -\n |\n x3 --------------------[var2]-+\n\n In this case, :code:`op2_associative_attr.reorder(other_op=op1)` modified the computation graph as follows,\n\n .. code-block:: text\n\n x1 + (x2 + x3)\n\n x1 --------------------[var1]-+\n |\n x2 -[var1]-+ +-{op1}- y -\n +-{op2}- h -[var2]-+\n x3 -[var2]-+\n \"\"\"\n if not isinstance(other_op, self.base.__class__):\n raise TypeError(f\"\"\"\nThe parameter \"other_op\" must be the instance of the same class as base operator:\n (self.base.__class__) = {self.base.__class__}\n (other_op.__class__) = {other_op.__class__}\"\"\")\n\n op2 = self.base\n op1 = other_op\n op1_attr = op1.get_attribute(Associative)[0]\n\n y = list(op2.outputs.values())[0]\n\n if self.vars[0].output_from == other_op:\n \"\"\"\n case A: First operand is created from `other_op`: (x1 + x2) + x3\n\n x1 -[var1]-+\n +-{op1}- h -[var1]-+\n x2 -[var2]-+ +-{op2}- y -\n |\n x3 --------------------[var2]-+\n \"\"\"\n h, x3 = self.vars\n x1, x2 = op1_attr.vars\n\n elif self.vars[1].output_from == other_op:\n \"\"\"\n case B: Second operand is created from `other_op`: x1 + (x2 + x3)\n\n x1 --------------------[var1]-+\n |\n x2 -[var1]-+ +-{op2}- y -\n +-{op1}- h -[var2]-+\n x3 -[var2]-+\n \"\"\"\n x1, h = self.vars\n x2, x3 = op1_attr.vars\n\n else:\n raise ValueError(f\"\"\"\nThe parameter \"other_op\" must be the creator of either one input variable of base operator:\n (var1.output_from) = {self.vars[0].output_from}\n (var2.output_from) = {self.vars[1].output_from}\n (other_op) = {other_op}\"\"\")\n\n if len(h.input_to) > 1:\n raise ValueError(f\"Reordering cannot be performed. Intermediate value is used by other operator.\")\n\n op2.replace_input(h, x2, with_assert=False)\n \"\"\"\n case A: (x1+x2)+x3 => x1+(x2+x3)\n\n x1 -[var1]-+\n +-{op1}- h\n x2 -[var2]-+\n\n x2 -[var1]-+\n +-{op2}- y\n x3 -[var2]-+\n\n -------------------------------------------------------\n\n case B: x1+(x2+x3) => (x1+x2)+x3\n\n x1 -[var1]-+\n +-{op2}- y -\n x2 -[var2]-+\n\n x2 -[var2]-+\n +-{op1}- h\n x3 -[var1]-+\n \"\"\"\n\n h_new = Variable(x2.shape, x2.order)\n op1.replace_input(x2, h_new, with_assert=False)\n \"\"\"\n case A: (x1+x2)+x3 => x1+(x2+x3)\n\n x1 -[var1]-+\n +-{op1}- h\n h_new -[var2]-+\n\n x2 -[var1]-+\n +-{op2}- y -\n x3 -[var2]-+\n\n -------------------------------------------------------\n\n case B: x1+(x2+x3) => (x1+x2)+x3\n\n x1 -[var1]-+\n +-{op2}- y -\n x2 -[var2]-+\n\n h_new -[var2]-+\n +-{op1}- h\n x3 -[var1]-+\n \"\"\"\n\n op2.replace_output(y, h_new, with_assert=False)\n \"\"\"\n case A: (x1+x2)+x3 => x1+(x2+x3)\n\n x1 -[var1]-+\n x2 -[var1]-+ +-{op1}- h\n +-{op2}- h_new -[var2]-+\n x3 -[var2]-+\n\n -------------------------------------------------------\n\n case B: x1+(x2+x3) => (x1+x2)+x3\n\n x1 -[var1]-+\n +-{op2}- h_new -[var1]-+\n x2 -[var2]-+ +-{op1}- h\n x3 -[var2]-+\n \"\"\"\n\n op1.replace_output(h, y, with_assert=False)\n \"\"\"\n case A: (x1+x2)+x3 => x1+(x2+x3)\n\n x1 -[var1]-+\n x2 -[var1]-+ +-{op1}- y -\n +-{op2}- h_new -[var2]-+\n x3 -[var2]-+\n\n -------------------------------------------------------\n\n case B: x1+(x2+x3) => (x1+x2)+x3\n\n x1 -[var1]-+\n +-{op2}- h_new -[var1]-+\n x2 -[var2]-+ +-{op1}- y -\n x3 -[var2]-+\n \"\"\"\n","repo_name":"LinXueyuanStdio/hash2face","sub_path":"webdnn/src/graph_transpiler/webdnn/graph/operators/attributes/associative.py","file_name":"associative.py","file_ext":"py","file_size_in_byte":5871,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"} +{"seq_id":"27346225621","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import division\n\nimport os\nimport pytest\nfrom pandas import DataFrame\nfrom psychopy.visual.window import Window\nfrom psychopy.visual.form import Form\nfrom psychopy.visual.text import TextStim\nfrom psychopy.visual.slider import Slider\nfrom psychopy import constants\nimport shutil\nfrom tempfile import mkdtemp\n\n\nclass Test_Form(object):\n \"\"\"Test suite for Form component\"\"\"\n\n def setup_class(self):\n # Create temp files for storing items\n self.temp_dir = mkdtemp()\n self.fileName_xlsx = os.path.join(self.temp_dir, 'items.xlsx')\n self.fileName_csv = os.path.join(self.temp_dir, 'items.csv')\n\n # create some questions\n self.questions = []\n self.genderItem = {\"questionText\": \"What is your gender?\",\n \"questionWidth\": 0.7,\n \"type\": \"radio\",\n \"responseWidth\": 0.3,\n \"options\": \"Male, Female, Other\",\n \"layout\": 'vert',\n \"index\": 0,\n \"questionColor\": \"white\",\n \"responseColor\": \"white\"\n }\n self.questions.append(self.genderItem)\n # then a set of ratings\n items = [\"running\", \"cake\", \"programming\"]\n for idx, item in enumerate(items):\n entry = {\"questionText\": \"How much you like {}\".format(item),\n \"questionWidth\": 0.7,\n \"type\": \"rating\",\n \"responseWidth\": 0.3,\n \"options\":\"Lots, some, Not a lot, Longest Option\",\n \"layout\": 'horiz',\n \"index\": idx+1,\n \"questionColor\": \"white\",\n \"responseColor\": \"white\"\n }\n self.questions.append(entry)\n\n self.win = Window(units='height', allowStencil=True, autoLog=False)\n self.survey = Form(self.win, items=self.questions, size=(1.0, 0.3), pos=(0.0, 0.0), autoLog=False)\n\n # Create datafiles\n df = DataFrame(self.questions)\n df.to_excel(self.fileName_xlsx, index=False)\n df.to_csv(self.fileName_csv, index=False)\n\n def test_importItems(self):\n wrongFields = [{\"a\": \"What is your gender?\",\n \"b\": 0.7,\n \"c\": \"radio\",\n \"d\": 0.3,\n \"e\": \"Male, Female, Other\",\n \"f\": 'vert',\n \"g\": \"white\",\n \"h\": \"white\"\n }]\n\n wrongOptions = [{\"questionText\": \"What is your gender?\",\n \"questionWidth\": 0.7,\n \"type\": \"radio\",\n \"responseWidth\": 0.3,\n \"options\": \"Other\",\n \"layout\": 'vert',\n \"index\": 0,\n \"questionColor\": \"white\",\n \"responseColor\": \"white\"}]\n\n reducedHeaders = [{\"questionText\": \"What is your gender?\"}]\n\n # Check options for list of dicts\n with pytest.raises(ValueError):\n self.survey = Form(self.win, items=wrongOptions, size=(1.0, 0.3), pos=(0.0, 0.0), autoLog=False)\n\n # Check default values are applied\n self.survey = Form(self.win, items=reducedHeaders, size=(1.0, 0.3), pos=(0.0, 0.0), autoLog=False)\n\n # Check csv\n self.survey = Form(self.win, items=self.fileName_csv,\n size=(1.0, 0.3), pos=(0.0, 0.0), autoLog=False)\n # Check Excel\n self.survey = Form(self.win, items=self.fileName_xlsx,\n size=(1.0, 0.3), pos=(0.0, 0.0), randomize=False, autoLog=False)\n\n def test_randomize_items(self):\n assert self.questions == self.survey.items\n self.survey.randomize = True\n assert self.questions != self.survey.randomizeItems(self.questions)\n\n def test_set_scroll_speed(self):\n items = 2\n for multipliers in [1,2,3,4]:\n assert self.survey.setScrollSpeed([0] * items, multipliers) == items * multipliers\n assert self.survey.setScrollSpeed([0] * items, multipliers) == items * multipliers\n assert self.survey.setScrollSpeed([0] * items, multipliers) == items * multipliers\n\n def test_question_text_wrap(self):\n for size in [.2, .3, .4]:\n assert self.survey._questionTextWrap(size) == size * self.survey.size[0] - (self.survey.itemPadding * 2)\n\n def test_response_text_wrap(self):\n options = ['a', 'b', 'c']\n for size in [.2, .3, .4]:\n item = {\"responseWidth\": size, \"options\": options}\n assert self.survey._responseTextWrap(item) == size * self.survey.size[0] / len(options)\n\n def test_set_questions(self):\n survey = Form(self.win, items=[self.genderItem], size=(1.0, 0.3), pos=(0.0, 0.0), autoLog=False)\n textStim, questionHeight, questionWidth = survey._setQuestion(self.genderItem)\n\n assert type(textStim) == TextStim\n assert type(questionHeight) == float\n assert type(questionWidth) == float\n\n def test_set_response(self):\n survey = Form(self.win, items=[self.genderItem], size=(1.0, 0.3), pos=(0.0, 0.0), autoLog=False)\n textStim, questionHeight, questionWidth = survey._setQuestion(self.genderItem)\n sliderStim, respHeight = survey._setResponse(self.genderItem, textStim)\n\n assert type(sliderStim) == Slider\n assert type(respHeight) == float\n\n def test_form_size(self):\n assert self.survey.size[0] == (1.0, 0.3)[0] # width\n assert self.survey.size[1] == (1.0, 0.3)[1] # height\n\n def test_aperture_size(self):\n assert self.survey.aperture.size[0] == self.survey.size[0]\n assert self.survey.aperture.size[1] == self.survey.size[1]\n\n def test_border_limits(self):\n survey = self.survey\n assert survey.leftEdge == survey.pos[0] - survey.size[0]/2.0\n assert survey.rightEdge == survey.pos[0] + survey.size[0]/2.0\n assert survey.topEdge == survey.pos[1] + survey.size[1]/2.0\n\n def test_text_height(self):\n assert self.survey.textHeight == 0.02\n\n def test_item_padding(self):\n assert self.survey.itemPadding == 0.05\n\n def test_form_units(self):\n assert self.survey.units == 'height'\n\n def test_scroll_offset(self):\n for idx, positions in enumerate([1, 0]): # 1 is start position\n self.survey.scrollbar.markerPos = positions\n posZeroOffset = (self.survey.size[1]\n - self.survey.itemPadding\n + min(self.survey._baseYpositions))\n assert self.survey._getScrollOffset() == [0., posZeroOffset][idx]\n\n def test_screen_status(self):\n assert self.survey._inRange(self.survey.formElements['question'][0])\n if constants.PY3:\n with pytest.raises(AssertionError):\n assert self.survey._inRange(self.survey.formElements['question'][3])\n\n def test_get_data(self):\n self.survey = Form(self.win, items=self.questions, size=(1.0, 0.3), pos=(0.0, 0.0), autoLog=False)\n data = self.survey.getData()\n assert set(data['questions']) == {'What is your gender?',\n 'How much you like running',\n 'How much you like cake',\n 'How much you like programming',}\n assert set(data['ratings']) == {None}\n assert set(data['rt']) == {None}\n assert set(data['itemIndex']) == {0, 1, 2, 3}\n\n def teardown_class(self):\n shutil.rmtree(self.temp_dir)\n self.win.close()\n\n\nif __name__ == \"__main__\":\n test = Test_Form()\n test.setup_class()\n test.teardown_class()\n","repo_name":"soheilbr82/BluegrassWorkingMemory","sub_path":"Python_Engine/Lib/site-packages/psychopy/tests/test_all_visual/test_form.py","file_name":"test_form.py","file_ext":"py","file_size_in_byte":7882,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"18"} +{"seq_id":"26522213984","text":"# coding: UTF-8\r\nfrom PySide import QtCore, QtGui\r\nimport sys\r\nimport pyqtgraph as pg\r\nimport numpy as np\r\nfrom TCI.styles.LabelStyles import *\r\n\r\nclass Slider(pg.GradientEditorItem):\r\n def __init__(self, *args, **kargs):\r\n super(Slider,self).__init__(*args,**kargs)\r\n def tickClicked(self,tick,ev):\r\n pass\r\n\r\nclass Axis(pg.AxisItem):\r\n def __init__(self, *args, **kargs):\r\n super(Axis,self).__init__(*args,**kargs)\r\n\r\nclass SliderWidget(pg.GraphicsLayout):\r\n sigRangeChanged = QtCore.Signal(object)\r\n def __init__(self):\r\n super(SliderWidget,self).__init__(None)\r\n self.setupGUI()\r\n self.slider.sigGradientChanged.connect(self.emitRangeChangedSig)\r\n\r\n def emitRangeChangedSig(self):\r\n self.sigRangeChanged.emit(self)\r\n self.interval()\r\n\r\n def setupGUI(self):\r\n self.setWindowTitle(\"Igor\")\r\n pg.setConfigOption('background', (255, 255, 255))\r\n pg.setConfigOption('foreground', (0, 0, 0))\r\n # self.setGeometry(500, 300, 350, 200)\r\n self.slider = Slider(orientation='top', allowAdd=False)\r\n # self.slider = pg.TickSliderItem(orientation='top', allowAdd=False)\r\n self.addItem(self.slider)\r\n self.slider.tickSizer = 0\r\n self.slider.rectSize = 0\r\n for i in self.slider.ticks:\r\n self.slider.setTickColor(i, QtGui.QColor(150, 150, 150))\r\n self.axis = pg.AxisItem('bottom')\r\n self.axis = Axis('bottom')\r\n self.nextRow()\r\n self.addItem(self.axis)\r\n # self.slider.setMaxDim(5)\r\n self.axis.setStyle(tickTextOffset=TICK_OFFSET)\r\n self.axis.tickFont = TICK_FONT_TYPE\r\n\r\n def setRange(self, min, max):\r\n self.axis.setRange(min, max)\r\n\r\n def axisRange(self):\r\n return self.axis.range\r\n\r\n def interval(self):\r\n r = self.axisRange()\r\n interval = []\r\n for i in self.slider.ticks:\r\n interval.append(self.slider.tickValue(i))\r\n scale = self.axisRange()[1]\r\n interval = np.array(sorted(interval))*scale\r\n return interval\r\n\r\n def setInterval(self, interval):\r\n '''\r\n interval list [min, max]\r\n min and max are floats >0 and < 1\r\n '''\r\n for i, tick in enumerate(self.slider.ticks):\r\n self.slider.setTickValue(tick, interval[i])\r\n\r\n\r\nif __name__ == '__main__':\r\n pg.setConfigOption('foreground',(0,0,0))\r\n pg.setConfigOption('background', (255,255,255))\r\n # ya cacas\r\n\r\n App = QtGui.QApplication(sys.argv)\r\n view = pg.GraphicsView()\r\n slider = SliderWidget()\r\n view.setCentralItem(slider)\r\n view.setGeometry(80, 50, 800, 600)\r\n view.show()\r\n App.exec_()\r\n","repo_name":"ishovkun/TCI","sub_path":"base_widgets/Slider.py","file_name":"Slider.py","file_ext":"py","file_size_in_byte":2693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"69921239400","text":"class Solution:\n def sumOddLengthSubarrays(self, arr: List[int]) -> int:\n sum=0\n n=len(arr)\n for i in range(n):\n sum+=((((i+1)*(n-i)+1)//2)*arr[i])\n return sum\n \n \n \n ","repo_name":"dhanashreeg368/DSA-Practice-Questions","sub_path":"Arrays/1588-sum-of-all-odd-length-subarrays/1588-sum-of-all-odd-length-subarrays.py","file_name":"1588-sum-of-all-odd-length-subarrays.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"10693562821","text":"#!/usr/bin/env python\nimport rospy\nfrom Queue import Queue, Empty\nimport time\nimport threading\nimport numpy as np\nimport copy\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom matplotlib.collections import LineCollection\n\n# messages\nfrom exercise3.msg import stitching_points\n\n\nclass PointsViewer:\n def __init__(self):\n self.points_queue = Queue()\n self.fig = plt.figure()\n self.ax = self.fig.add_subplot(111, aspect='equal')\n\n self.global_pts, = plt.plot([], [], 'bo')\n self.local_pts, = plt.plot([], [], 'r.', markersize=8)\n self.local_trans_pts, = plt.plot([], [], 'k.')\n self.local_lines = LineCollection([], linewidths=0.5, zorder=0, colors='black')\n self.ax.add_collection(self.local_lines)\n self.robot_pos, = plt.plot([], [], 'ro')\n\n d = 5.75 # laser range\n plt.xlim(-d, d)\n plt.ylim(-d, d)\n self.ani = animation.FuncAnimation(self.fig, self._updatefig,\n interval=100)\n\n def show(self, blocking=False):\n plt.ion() # interactive\n plt.show(block=blocking)\n\n def _updatefig(self, x):\n try:\n msg = self.points_queue.get_nowait()\n except Empty:\n if __name__ == '__main__' and rospy.is_shutdown():\n plt.close() # this will crash tk but oh well\n\n return (self.global_pts, self.local_pts, self.local_trans_pts,\n self.robot_pos, self.local_lines)\n\n #with open('local_map.py', 'w') as f:\n #f.write('cells = {}\\n'.format(new_data))\n #print('local map written to file')\n\n if msg.counter is not None:\n self.ax.set_title('local map {}'.format(msg.counter))\n #plt.draw()\n\n\n self.global_pts.set_xdata(msg.global_xs)\n self.global_pts.set_ydata(msg.global_ys)\n\n self.local_pts.set_xdata(msg.local_xs)\n self.local_pts.set_ydata(msg.local_ys)\n\n self.local_trans_pts.set_xdata(msg.local_trans_xs)\n self.local_trans_pts.set_ydata(msg.local_trans_ys)\n\n local_pts = zip(msg.local_xs, msg.local_ys)\n local_trans_pts = zip(msg.local_trans_xs, msg.local_trans_ys)\n self.local_lines.set_segments(list(zip(local_pts, local_trans_pts)))\n\n self.robot_pos.set_xdata([msg.rx, msg.trans_rx])\n self.robot_pos.set_ydata([msg.ry, msg.trans_ry])\n\n return (self.global_pts, self.local_pts, self.local_trans_pts,\n self.robot_pos, self.local_lines)\n\n def update_points(self, msg):\n self.points_queue.put(msg)\n\n\ndef points_callback(msg):\n global pv\n pv.update_points(msg)\n\ndef ros_spin():\n rospy.spin()\n\ndef fake_points_msg():\n\n ps = stitching_points()\n ps.global_xs = [1, 1]\n ps.global_ys = [0, 1]\n ps.local_xs = [2, 2]\n ps.local_ys = [0, 1]\n ps.local_trans_xs = [1.5, 1.5]\n ps.local_trans_ys = [0, 1]\n ps.rx = 0\n ps.ry = 0\n ps.trans_rx = 1\n ps.trans_ry = -1\n points_callback(ps)\n\nif __name__ == '__main__':\n pv = PointsViewer()\n\n #fake_points_msg()\n\n rospy.init_node('points_viewer')\n\n ros_thread = threading.Thread(target=ros_spin)\n ros_thread.daemon = True\n rospy.Subscriber('local_map_points', stitching_points, points_callback)\n ros_thread.start()\n\n\n # non-blocking isn't working great\n block = True\n if block:\n pv.show(blocking=True)\n else:\n pv.show(blocking=False)\n\n while not rospy.is_shutdown():\n pv.fig.canvas.draw()\n #plt.draw()\n time.sleep(0.1)\n\n plt.close()\n","repo_name":"joeski1/Robotics","sub_path":"exercise3/src/utils/point_cloud_viewer.py","file_name":"point_cloud_viewer.py","file_ext":"py","file_size_in_byte":3596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"73041660199","text":"from node import Node\nfrom edge import Edge\nfrom queue import PriorityQueue\n\ndef queueContains(queue, costP, node):\n for costQ, pathQ in list(queue.queue):\n if node in pathQ and costP >= costQ:\n return True\n\ndef aStarIsBorn(graph, startnode, goalnode):\n queue = PriorityQueue()\n queue.put((0, [startnode]))\n\n while not queue.empty():\n cost, path = queue.get()\n \n print(path)\n\n lastNode = path[len(path) - 1]\n\n if lastNode == goalnode:\n return path\n\n for edge in lastNode.edges:\n if edge.node not in path:\n totalCost = cost + edge.cost - lastNode.heuristic + edge.node.heuristic\n newPath = path.copy()\n newPath.append(edge.node)\n\n if not queueContains(queue, totalCost, edge.node):\n queue.put((totalCost, newPath))","repo_name":"JoachimVeulemans/PXL-DIGITAL","sub_path":"PXL_DIGITAL_JAAR_2/AI & Robotics/Week 2/Exercises/Solutions/Lesson 2/Exercise 1/aStar.py","file_name":"aStar.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"18"} +{"seq_id":"34451855166","text":"from flask import Blueprint, jsonify, current_app, render_template\n\n\nbp = Blueprint('test', __name__)\n\n@bp.route('/')\ndef index():\n rules_iterator = current_app.url_map.iter_rules()\n return jsonify(\n {rule.endpoint: rule.rule for rule in rules_iterator if rule.endpoint not in ('route_map', 'static')})\n\n\n@bp.route('/test')\ndef test():\n return render_template('test.html')","repo_name":"xlztongxue/xlzwenxue","sub_path":"xlzwx-backend/applet_app/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"8957455481","text":"# Recursion\n# Creating a solution to a problem by relying upon earlier solutions.\n\n# The main concept: \n# \"A problem with input of N may be too large and difficult to solve\"\n# \"Maybe we can solve a smaller version of the problem when N gets small enough\"\n# \"We will shrink the problem by calling the function on an argument smaller than N ... called X for example. \n# If f(x) is solvable, then maybe we can use that to solve f(n).\"\n\n# The hardest parts of Recursion\n# 1. How to identify when a recursion can be used.\n''' Some Problem Classifications:\n Divide and Conquer Problems\n Tree and Graph Structures\n Nested Structures\n Backtracking\n Dynamic Programming\n'''\n# 2. How to construct a recursive statement\n\n# Given Problem: Add all numbers from 1 to N; where N is a positive integer\n''' \nLet N be 5\n f(5) --> 5 + 4 + 3 + 2 + 1\n f(4) --> 4 + 3 + 2 + 1\n\n therefore, f(5) = 5 + f(4)\n f(4) = 4 + f(3)\n f(3) = 3 + f(2)\n f(2) = 2 + f(1)\n f(1) = 1\n\n then maybe ... f(N) = N + f(N-1)\n'''\n\n# recipes for creating a recursive function\n# 1. Create a base case (the simplest versions of the problem which have simple solutions)\n\n# 2. If the given argument for the function is not one of the base case:\n# 2a. Design how to work towards one of the base cases\n# 2b. Get to the base case by calling the function itself with a smaller \n\ndef r_sum(num):\n # Base Case 1 , num is 0\n if num == 0:\n return 0\n # Base Case 2, num is 1\n elif num == 1:\n return 1\n else:\n return num + r_sum(num-1)\n# end of r_sum\n\nprint(f\"Sum of all numbers from 1 to 10: {r_sum(10)}\")","repo_name":"mrparkonline/ics4u_2023F","sub_path":"video_solution/vid48.py","file_name":"vid48.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"75085391081","text":"\n# coding: utf-8\n\n# In[51]:\n\n# Finish data wrangling and create graphs\n# Stock Sentiment\n# Ethan, Sarah, and Brandon\n\n# Import libraries\nimport pandas as pd\nfrom datetime import datetime\nget_ipython().magic(u'matplotlib inline')\nimport matplotlib as mp\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\n# In[52]:\n\n# Import data\nStock_Polarity = pd.read_csv(\"Stock_Polarity_Data.csv\") # Index, Symbol, Date, Close, Polarity\nTwitter_Positivity = pd.read_csv(\"Twitter_Positivity_Data.csv\")\nTwitter_Positivity = Twitter_Positivity.drop([\"Unnamed: 0\"], axis=1)\n\n\n# In[68]:\n\n# Compress the stock data into compressed overall data\nunique_dates = pd.unique(Stock_Polarity[\"Date\"])\npositives = {}\ntotals = {}\npercent = {}\nfor date in unique_dates:\n positives[date] = Stock_Polarity[(Stock_Polarity[\"Date\"] == date) & (Stock_Polarity[\"Polarity\"] == 1)].count()[\"Polarity\"]\n totals[date] = Stock_Polarity[(Stock_Polarity[\"Date\"] == date)].count()[\"Polarity\"]\n percent[date] = float(positives[date]) / float(totals[date])\n\n\n# In[54]:\n\n# Create new compressed data frame\ndf1 = pd.DataFrame(positives.items(), columns=[\"Date\",\"PositiveStocks\"])\ndf2 = pd.DataFrame(totals.items(), columns=[\"Date\",\"TotalStocks\"])\ndf3 = pd.DataFrame(percent.items(), columns=[\"Date\",\"StocksPercentPositive\"])\n\nStock_Data = df1.merge(df2).merge(df3, sort=True)\n\n\n# In[55]:\n\nMaster_Data = Stock_Data.merge(Twitter_Positivity, sort=True, how=\"inner\", on=[\"Date\"])\nMaster_Data[\"DateInt64\"] = pd.to_datetime(Master_Data.Date).astype(np.int64)\n\n\n# In[71]:\n\nMaster_Data\n\n\n# In[74]:\n\n# Create a line graph of the data\nline_graph = Master_Data.plot(kind=\"line\", x=\"DateInt64\", y=[\"Percentage\",\"StocksPercentPositive\"], title=\"Twitter Percentage of Positve Tweets to Percent of Increasing Stocks 6/1/09 to 6/5/09\", legend=False)\npatches, default_labels = line_graph.get_legend_handles_labels()\nline_graph.legend(patches, [\"Twitter Percentage Positive\", \"Stocks Percentage Positive\"], loc=\"best\")\nline_graph.set_xlabel(\"Date as Int64 Representation\")\nline_graph.set_ylabel(\"Positive Percentage\")\nline_graph.set_xlim(1241136000000000000,1244160000000000000)\n\n\n# In[75]:\n\n# Save the graoh\nline_graph.get_figure().savefig(\"linegraph.png\")\n\n\n# In[72]:\n\n\n\n\n# In[ ]:\n\n\n\n","repo_name":"egroves01/CMDA","sub_path":"Final Project/Code/Final_Project_Part2.py","file_name":"Final_Project_Part2.py","file_ext":"py","file_size_in_byte":2232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"42011024160","text":"from alarmDbConnectionManager import AlarmConnectionManager\nfrom AlarmTimeEntity import AlarmTimeEntity\n\nclass AlarmTimeDao:\n def __init__(self):\n self.conn = (AlarmConnectionManager()).getConnection\n self.cursor = self.conn.cursor()\n\n def select(self, where=\"\", limit=0, join=\"\", columnsPlus=\"\"):\n limitStr = \"\"\n \n if(limit > 0):\n limitStr = \"limit \" + str(limit)\n \n columnsPlus = \",\"+columnsPlus if columnsPlus is not \"\" else columnsPlus\n \n where = \" WHERE \" + where + \" \" if where is not \"\" else where\n \n return self.cursor.execute(\"SELECT Alarm_times.id as Alarm_timesId, Alarm_times.days as Alarm_timesDays, \"\n +\"Alarm_times.times as Alarm_timesTimes, Alarm_times.alarm_id as Alarm_timesAlarm_id\"\n + columnsPlus\n +\" FROM Alarm_times \"\n + join+\" \"\n + where\n + \" order by times asc \"\n + limitStr).fetchall()\n \n def selectNext(self, day, time=-1, id=-1):\n timeClause = \" AND times >= '\"+str(time)+\"' \" if time is not -1 else \"\"\n whereClause = \"(days = '\" + str(day)+\"'\" + str(timeClause) + \") AND Alarm_timesId != '\" + str(id)+\"'\"\n \n join = \" JOIN Alarm ON Alarm_times.alarm_id = Alarm.id \"\n columnsPlus = \"Alarm.id, Alarm.desc\"\n \n resultArray = self.select(whereClause, 1, join, columnsPlus)\n \n if(len(resultArray) > 0):\n return AlarmTimeEntity(resultArray[0])\n \n return None\n\n @property\n def close(self):\n self.conn.close()\n \n","repo_name":"Guervyl/GVPAlarm-manager","sub_path":"AlarmTimeDao.py","file_name":"AlarmTimeDao.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"1975049554","text":"#!/usr/bin/env python3\n\"\"\" LeNet-5 (Keras) \"\"\"\nimport tensorflow.keras as K\n\n\ndef lenet5(x):\n \"\"\" builds modified version of LeNet-5 architecture using keras \"\"\"\n conv_lay1 = K.layers.Conv2D(filters=6,\n kernel_size=(5, 5),\n padding='same',\n activation='relu',\n kernel_initializer='he_normal')(x)\n pool_lay1 = K.layers.MaxPooling2D(pool_size=(2, 2),\n strides=(2, 2))(conv_lay1)\n conv_lay2 = K.layers.Conv2D(filters=16,\n kernel_size=(5, 5),\n padding='valid',\n activation='relu',\n kernel_initializer='he_normal')(pool_lay1)\n pool_lay2 = K.layers.MaxPooling2D(pool_size=(2, 2),\n strides=(2, 2))(conv_lay2)\n flatten = K.layers.Flatten(input_shape=(28, 28))(pool_lay2)\n full_lay3 = K.layers.Dense(units=120,\n activation='relu',\n kernel_initializer='he_normal')(flatten)\n full_lay4 = K.layers.Dense(units=84,\n activation='relu',\n kernel_initializer='he_normal')(full_lay3)\n softmax = K.layers.Dense(units=10,\n activation='softmax',\n kernel_initializer='he_normal')(full_lay4)\n model = K.Model(inputs=x, outputs=softmax)\n model.compile(optimizer='adam',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n return model\n","repo_name":"yasheymateen/holbertonschool-machine_learning","sub_path":"supervised_learning/0x07-cnn/5-lenet5.py","file_name":"5-lenet5.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"42380764666","text":"import numpy as np\n\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\n\nfrom flask import Flask, jsonify\nimport datetime as dt\nfrom datetime import datetime\n\n\n#################################################\n# Database Setup\n#################################################\nengine = create_engine(\"sqlite:///Resources/hawaii.sqlite?check_same_thread=False\")\n\n# reflect an existing database into a new model\nBase = automap_base()\n# reflect the tables\nBase.prepare(engine, reflect=True)\n\n# We can view all of the classes that automap found\nBase.classes.keys()\n\n# Save references to each table\nMeasurement = Base.classes.measurement\nStation = Base.classes.station\n\n# Create our session (link) from Python to the DB\nsession = Session(engine)\n#################################################\n# Flask Setup\n#################################################\napp = Flask(__name__)\n\n\n#################################################\n# Flask Routes\n#################################################\n\n@app.route(\"/\")\ndef home():\n # Creating a landing page\n return (\n f\"Available Routes:
\"\n f\"/api/v1.0/precipitation
\"\n f\"/api/v1.0/stations
\"\n f\"/api/v1.0/tobs
\"\n f\"/api/v1.0/start
\"\n f\"/api/v1.0/start/end\"\n )\n\n@app.route(\"/api/v1.0/precipitation\")\ndef precipitation():\n # Createing a query to retrieve dates and precipitation values\n scores = session.query(Measurement.date, Measurement.prcp).all()\n \n # Converting the query results to a Dictionary\n precipitation = []\n for score in scores:\n precipitation_dict = {}\n precipitation_dict[\"date\"] = score.date\n precipitation_dict[\"precipitation\"] = score.prcp\n precipitation.append(precipitation_dict)\n return jsonify(precipitation)\n\n\n@app.route(\"/api/v1.0/stations\")\ndef stations():\n # Createing a query to retrieve station values\n stations_query = session.query(Station.station).all()\n\n # Converting the query results to a Dictionary\n stations = []\n for station_val in stations_query:\n stations_dict = {}\n stations_dict[\"station\"] = station_val.station\n stations.append(stations_dict)\n return jsonify(stations)\n# \n@app.route(\"/api/v1.0/tobs\")\ndef tobs():\n # Calculate latest date in the database\n latest_date = session.query(Measurement.date).first()\n # Convert latest_date string to a date object\n date_object = datetime.strptime(str(latest_date), \"('%Y-%m-%d',)\")\n # Calcuate one year ago from the latest date\n year_ago = date_object - dt.timedelta(days=365)\n # Createing a query to retrieve\n temp_obs_querry = session.query(Measurement.date, Measurement.tobs).filter(Measurement.date > year_ago).all()\n \n # Converting the query results to a Dictionary\n temp_obs = []\n for obs in temp_obs_querry:\n temp_obs_dict = {}\n temp_obs_dict[\"date\"] = obs.date\n temp_obs_dict[\"temp_bservation\"] = obs.tobs\n temp_obs.append(temp_obs_dict)\n return jsonify(temp_obs)\n\n\n@app.route(\"/api/v1.0/\")\ndef start_func(start):\n # Createing a query to retrieve max, min and avg temp observations for a givan date range\n start_temp_val = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs))\\\n .filter(Measurement.date >= start).all()\n start_date_temp = []\n \n # Converting the query results to a Dictionary\n for value in start_temp_val:\n start_date_temp_dict = {}\n start_date_temp_dict[\"min_temp\"] = value[0]\n start_date_temp_dict[\"avg_temp\"] = value[1]\n start_date_temp_dict[\"max_temp\"] = value[2]\n start_date_temp.append(start_date_temp_dict)\n return jsonify(start_date_temp)\n \n@app.route(\"/api/v1.0//\")\ndef start_end(start,end):\n # Createing a query to retrieve max, min and avg temp observations for a givan date range\n start_end_temp_val = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start).filter(Measurement.date <= end).all()\n \n # Converting the query results to a Dictionary \n start_end_date_temp = []\n for val in start_end_temp_val:\n start_end_date_temp_dict = {}\n start_end_date_temp_dict[\"min_temp\"] = val[0]\n start_end_date_temp_dict[\"avg_temp\"] = val[1]\n start_end_date_temp_dict[\"max_temp\"] = val[2]\n start_end_date_temp.append(start_end_date_temp_dict)\n return jsonify(start_end_date_temp)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"nandriychuk/SQLAlchemy","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"74814180520","text":"#!python airflow\r\n\r\nfrom datetime import datetime\r\nfrom datetime import timedelta\r\n\r\nfrom airflow import DAG\r\nfrom airflow.operators.dummy_operator import DummyOperator\r\nfrom airflow.operators.bash_operator import BashOperator\r\nfrom airflow.operators.python_operator import PythonOperator\r\n\r\nfrom airflow.utils.dates import days_ago\r\n\r\ndefault_args = {\r\n 'owner': 'athoillah',\r\n 'retries': 5,\r\n 'retry_delay': timedelta(minutes=5)}\r\n\r\nwith DAG(\r\n default_args=default_args,\r\n dag_id='Big_Data_Processing',\r\n description='ETL Using BashOperator and PostgresOperator',\r\n start_date=datetime(2022, 11, 23),\r\n schedule_interval='59 23 * * *'\r\n ) as dag:\r\n\r\n t1 = DummyOperator(\r\n task_id='Start')\r\n\r\n t2 = BashOperator(\r\n task_id='Dump_Data',\r\n bash_command='python3 /home/athoillah/Downloads/Data Engineering_DigitalSkola/Project/Project-5/dump.py',\r\n dag=dag)\r\n\r\n t3 = BashOperator(\r\n task_id='ETL_Mapreduce',\r\n bash_command='python3 /home/athoillah/Downloads/Data Engineering_DigitalSkola/Project/Project-5/mapreduce_etl.py',\r\n dag=dag)\r\n\r\n t4 = DummyOperator(\r\n task_id='Stop')\r\n\r\n t1 >> t2 >> t3 >> t4","repo_name":"Athoillah21/Project-5---Airflow-in-Big-Data","sub_path":"dag_bigdata_processing.py","file_name":"dag_bigdata_processing.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"20023125559","text":"LISTE_CHIFFRE = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\nLISTE_MOT = [\"Michel\", \"Robert\", \"Jean\", \"Pierre\", \"Paul\", \"Jacques\", \"Marie\", \"Anne\", \"Sophie\", \"Julie\"]\nLISTE_METIER = [\"Boulanger\", \"Pâtissier\", \"Boucher\", \"Charcutier\", \"Cuisinier\", \"Traiteur\", \"Boucher\", \"Boulanger\",\n \"Pâtissier\", \"Cuisinier\"]\nLISTE_ERREUR = [33, 0, 26.5, 0.28, 1234, 0.4, 0, -1, 29, 0.325]\n\n\ndef liste_totale():\n print(\"Impression d'une liste totale :\")\n for ele_nom in LISTE_MOT:\n print(ele_nom)\n\n\ndef liste_condition():\n print(\"\\n\")\n print(\"Impression de deux listes en parallèle :\")\n for ele_nom in LISTE_MOT:\n for ele_metier in LISTE_METIER:\n if ele_nom == \"Michel\":\n return print(ele_nom, \"est\", ele_metier)\n\n\ndef liste_parallele():\n print(\"\\n\")\n print(\"Impression de deux listes en parallèle avec condition :\")\n for ele_nom in range(len(LISTE_MOT)):\n for ele_metier in range(len(LISTE_METIER)):\n if LISTE_MOT[ele_nom] == \"Michel\" and LISTE_METIER[ele_metier] == \"Boulanger\":\n return print(LISTE_MOT[ele_nom], \"est\", LISTE_METIER[ele_metier])\n\n\ndef correction_donnes():\n print(\"\\nCorrection des données d'une liste\")\n for ele_chiffre in range(len(LISTE_ERREUR)):\n if LISTE_ERREUR[ele_chiffre] < 0 or LISTE_ERREUR[ele_chiffre] > 100:\n LISTE_ERREUR[ele_chiffre] = 0\n elif LISTE_ERREUR[ele_chiffre] < 1:\n LISTE_ERREUR[ele_chiffre] *= 100\n LISTE_ERREUR[ele_chiffre] = int(LISTE_ERREUR[ele_chiffre])\n return LISTE_ERREUR\n\n\ndef main():\n print(\"Avant la correction d'une liste\")\n print(LISTE_ERREUR)\n correction_donnes()\n print(\"Après la correection d'une liste\")\n print(LISTE_ERREUR)\n\n\nmain()\n","repo_name":"lxchub-inst/ALP","sub_path":"Prepa-Examens/Listes.py","file_name":"Listes.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"4819267082","text":"\"\"\"\n148. Sort List\nGiven the head of a linked list, return the list after sorting it in ascending order.\n\nFollow up: Can you sort the linked list in O(nlogn) time and O(1) memory (i.e. constant space)?\nExample1:\nInput: head = [4, 2 ,1, 3]\nOutput: [1, 2, 3, 4]\n\nExample2:\nInput: head = [-1, 5, 3, 4, 0]\nOutput: [-1, 0, 3, 4, 5]\n\nExample3:\nInput: head = []\nOutput: []\n\"\"\"\n\n\"\"\"\nNote:\n1. Merge sort using recursion: O(nlogn) time | O(logn) space\n2. \n\"\"\"\n\n\n\n\nfrom typing import List\nimport unittest\nclass ListNode:\n def __init__(self, val=0, next=None) -> None:\n self.val = val\n self.next = next\n\n # TEST ONLY\n def __repr__(self):\n if self is None:\n return \"None\"\n nums = [self.val]\n while self.next:\n nums.append(self.next.val)\n self = self.next\n return \"->\".join(str(num) for num in nums)\n\n @classmethod\n def fromArray(cls, arr):\n if arr is None:\n return None\n idx = 0\n length = len(arr)\n dummy = cls(0)\n current = dummy\n while idx < length:\n current.next = cls(arr[idx])\n current = current.next\n idx += 1\n return dummy.next\n\n\nclass Solution:\n def sortList(self, head: ListNode) -> ListNode:\n if not head or not head.next:\n return head\n fast, slow = head.next, head\n while fast and fast.next:\n fast = fast.next.next\n slow = slow.next\n mid = slow.next\n slow.next = None\n left, right = self.sortList(head), self.sortList(mid)\n return self.merge(left, right)\n\n def merge(self, left, right):\n if not left or not right:\n return left or right\n dummy = p = ListNode(0)\n while left and right:\n if left.val < right.val:\n p.next = left\n left = left.next\n else:\n p.next = right\n right = right.next\n p = p.next\n p.next = left or right\n return dummy.next\n\n\n# Unit Tests\n\nfuncs = [Solution().sortList]\n\n\nclass TestSortList(unittest.TestCase):\n def testSortList1(self):\n head = ListNode.fromArray([4, 2, 1, 3])\n for func in funcs:\n self.assertEqual(repr(func(head=head)), '1->2->3->4')\n\n def testSortList2(self):\n head = ListNode.fromArray([-1, 5, 3, 4, 0])\n for func in funcs:\n self.assertEqual(repr(func(head=head)), '-1->0->3->4->5')\n\n def testSortList3(self):\n head = ListNode.fromArray([])\n for func in funcs:\n self.assertEqual(repr(func(head=head)), 'None')\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"tkwang0530/LeetCode","sub_path":"0148.py","file_name":"0148.py","file_ext":"py","file_size_in_byte":2689,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"} +{"seq_id":"5846252664","text":"\"\"\"\nUCI SPAM classification using knn\n\"\"\"\n\n# #Library\nimport os\nimport pandas as pd\npd.set_option('display.max_colwidth', -1)\n\nscriptdir = os.path.dirname(os.path.abspath(__file__))\n\n# os.chdir(\"C:\\\\NotBackedUp\\\\datafiles\\\\uci\\\\\")\nos.chdir(scriptdir)\n\n# Read in Data\nwith open('SMSSpamCollection', 'r') as f:\n lines = f.readlines()\n\n# print(lines[0:2])\ntext_class = [l.split('\\t')[0] for l in lines]\ntext_msg = [l.split('\\t')[1] for l in lines]\ntext_msg = [t.replace('\\n', '') for t in text_msg]\n\n# Create DataFrame\ndf = pd.DataFrame({'textclass': text_class,\n 'textmessage': text_msg})\n\n\n# Subset dataframe for spam\nspamdf = df[df['textclass'] == 'spam']\nprint(spamdf.textmessage.tolist()[0:10])\n\n# Subset dataframe for ham\nhamdf = df[df['textclass'] == 'ham']\n#print(hamdf.head())\n\n# Checking what is the most common word in spam\nspamwordlist = [word for line in spamdf.textmessage.tolist() for word in line.split()]\nprint(spamwordlist[0:2])\n\n# lowercase\nspamwordlist = [word.lower() for word in spamwordlist]\n\n# excluding stop words\nstopwords = ['i', 'you', 'he',' she', 'it', 'they', 'we', 'a', 'the', 'is', 'am', 'are', 'was', 'were', 'for', 'to', 'or', 'your']\nspamwordlist = [word for word in spamwordlist if word not in stopwords]\n\nsetspam = set(spamwordlist)\nspamcounter = [spamwordlist.count(word) for word in setspam]\nspamdict = dict(zip(list(setspam), spamcounter))\n\nimport operator\n\nsortedspam = sorted(spamdict.items(), key=operator.itemgetter(1), reverse=True)\nprint((sortedspam[0:20]))\n\n# Checking what is the most common word in ham\nhamwordlist = [word for line in hamdf.textmessage.tolist() for word in line.split()]\nprint(hamwordlist[0:2])\n\n# lowercase\nhamwordlist = [word.lower() for word in hamwordlist]\n\n# excluding stop words\nstopwords2 = ['i', 'you', 'he',' she', 'it', 'they', 'we', 'a', 'the',\n 'is', 'am', 'are', 'was', 'were', 'for', 'to', 'or', 'your',\n ':-):):-):-)']\nhamwordlist = [word for word in hamwordlist if word not in stopwords2]\n\nsetham = set(hamwordlist)\nhamcounter = [hamwordlist.count(word) for word in setspam]\nhamdict = dict(zip(list(setham), hamcounter))\n\nimport operator\n\nsortedham = sorted(hamdict.items(), key=operator.itemgetter(1), reverse=True)\nprint((sortedham[0:20]))\n\n\n\n\n# A lot of call (342) and free (180) in spam word list\n\n\n","repo_name":"maximillianus/machine-learning","sub_path":"uci_spam/uci_sms_spam.py","file_name":"uci_sms_spam.py","file_ext":"py","file_size_in_byte":2330,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"18"} +{"seq_id":"28419005610","text":"from __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nANSIBLE_METADATA = {\n 'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'\n}\n\nDOCUMENTATION = '''\n---\nmodule: snap\n\nshort_description: Manages snaps\n\n\ndescription:\n - \"Manages snaps packages.\"\n\noptions:\n name:\n description:\n - Name of the snap to install or remove. Can be a list of snaps.\n required: true\n state:\n description:\n - Desired state of the package.\n required: false\n default: present\n choices: [ absent, present ]\n classic:\n description:\n - Confinement policy. The classic confinement allows a snap to have\n the same level of access to the system as \"classic\" packages,\n like those managed by APT. This option corresponds to the --classic argument.\n This option can only be specified if there is a single snap in the task.\n type: bool\n required: false\n default: False\n channel:\n description:\n - Define which release of a snap is installed and tracked for updates.\n This option can only be specified if there is a single snap in the task.\n type: str\n required: false\n default: stable\n\nauthor:\n - Victor Carceler (@vcarceler) \n - Stanislas Lange (@angristan) \n'''\n\nEXAMPLES = '''\n# Install \"foo\" and \"bar\" snap\n- name: Install foo\n snap:\n name:\n - foo\n - bar\n\n# Remove \"foo\" snap\n- name: Remove foo\n snap:\n name: foo\n state: absent\n\n# Install a snap with classic confinement\n- name: Install \"foo\" with option --classic\n snap:\n name: foo\n classic: yes\n\n# Install a snap with from a specific channel\n- name: Install \"foo\" with option --channel=latest/edge\n snap:\n name: foo\n channel: latest/edge\n'''\n\nRETURN = '''\nclassic:\n description: Whether or not the snaps were installed with the classic confinement\n type: bool\n returned: When snaps are installed\nchannel:\n description: The channel the snaps were installed from\n type: str\n returned: When snaps are installed\ncmd:\n description: The command that was executed on the host\n type: str\n returned: When changed is true\nsnaps_installed:\n description: The list of actually installed snaps\n type: list\n returned: When any snaps have been installed\nsnaps_removed:\n description: The list of actually removed snaps\n type: list\n returned: When any snaps have been removed\n'''\n\nimport operator\nimport re\n\nfrom ansible.module_utils.basic import AnsibleModule\n\n\ndef validate_input_snaps(module):\n \"\"\"Ensure that all exist.\"\"\"\n for snap_name in module.params['name']:\n if not snap_exists(module, snap_name):\n module.fail_json(msg=\"No snap matching '%s' available.\" % snap_name)\n\n\ndef snap_exists(module, snap_name):\n snap_path = module.get_bin_path(\"snap\", True)\n cmd_parts = [snap_path, 'info', snap_name]\n cmd = ' '.join(cmd_parts)\n rc, out, err = module.run_command(cmd, check_rc=False)\n\n return rc == 0\n\n\ndef is_snap_installed(module, snap_name):\n snap_path = module.get_bin_path(\"snap\", True)\n cmd_parts = [snap_path, 'list', snap_name]\n cmd = ' '.join(cmd_parts)\n rc, out, err = module.run_command(cmd, check_rc=False)\n\n return rc == 0\n\n\ndef get_snap_for_action(module):\n \"\"\"Construct a list of snaps to use for current action.\"\"\"\n snaps = module.params['name']\n\n is_present_state = module.params['state'] == 'present'\n negation_predicate = operator.not_ if is_present_state else bool\n\n def predicate(s):\n return negation_predicate(is_snap_installed(module, s))\n\n return [s for s in snaps if predicate(s)]\n\n\ndef get_base_cmd_parts(module):\n action_map = {\n 'present': 'install',\n 'absent': 'remove',\n }\n\n state = module.params['state']\n\n classic = ['--classic'] if module.params['classic'] else []\n channel = ['--channel', module.params['channel']] if module.params['channel'] and module.params['channel'] != 'stable' else []\n\n snap_path = module.get_bin_path(\"snap\", True)\n snap_action = action_map[state]\n\n cmd_parts = [snap_path, snap_action]\n if snap_action == 'install':\n cmd_parts += classic + channel\n\n return cmd_parts\n\n\ndef get_cmd_parts(module, snap_names):\n \"\"\"Return list of cmds to run in exec format.\"\"\"\n is_install_mode = module.params['state'] == 'present'\n has_multiple_snaps = len(snap_names) > 1\n\n cmd_parts = get_base_cmd_parts(module)\n has_one_pkg_params = '--classic' in cmd_parts or '--channel' in cmd_parts\n\n if not (is_install_mode and has_one_pkg_params and has_multiple_snaps):\n return [cmd_parts + snap_names]\n\n return [cmd_parts + [s] for s in snap_names]\n\n\ndef run_cmd_for(module, snap_names):\n cmds_parts = get_cmd_parts(module, snap_names)\n cmd = '; '.join(' '.join(c) for c in cmds_parts)\n cmd = 'sh -c \"{0}\"'.format(cmd)\n\n # Actually execute the snap command\n return (cmd, ) + module.run_command(cmd, check_rc=False)\n\n\ndef execute_action(module):\n is_install_mode = module.params['state'] == 'present'\n exit_kwargs = {\n 'classic': module.params['classic'],\n 'channel': module.params['channel'],\n } if is_install_mode else {}\n\n actionable_snaps = get_snap_for_action(module)\n if not actionable_snaps:\n module.exit_json(changed=False, **exit_kwargs)\n\n changed_def_args = {\n 'changed': True,\n 'snaps_{result}'.\n format(result='installed' if is_install_mode\n else 'removed'): actionable_snaps,\n }\n\n if module.check_mode:\n module.exit_json(**dict(changed_def_args, **exit_kwargs))\n\n cmd, rc, out, err = run_cmd_for(module, actionable_snaps)\n cmd_out_args = {\n 'cmd': cmd,\n 'rc': rc,\n 'stdout': out,\n 'stderr': err,\n }\n\n if rc == 0:\n module.exit_json(**dict(changed_def_args, **dict(cmd_out_args, **exit_kwargs)))\n else:\n msg = \"Ooops! Snap installation failed while executing '{cmd}', please examine logs and error output for more details.\".format(cmd=cmd)\n if is_install_mode:\n m = re.match(r'^error: This revision of snap \"(?P\\w+)\" was published using classic confinement', err)\n if m is not None:\n err_pkg = m.group('package_name')\n msg = \"Couldn't install {name} because it requires classic confinement\".format(name=err_pkg)\n module.fail_json(msg=msg, **dict(cmd_out_args, **exit_kwargs))\n\n\ndef main():\n module_args = {\n 'name': dict(type='list', required=True),\n 'state': dict(type='str', required=False, default='present', choices=['absent', 'present']),\n 'classic': dict(type='bool', required=False, default=False),\n 'channel': dict(type='str', required=False, default='stable'),\n }\n module = AnsibleModule(\n argument_spec=module_args,\n supports_check_mode=True,\n )\n\n validate_input_snaps(module)\n\n # Apply changes to the snaps\n execute_action(module)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ansible-collection-migration/community.general","sub_path":"plugins/modules/packaging/os/snap.py","file_name":"snap.py","file_ext":"py","file_size_in_byte":7215,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"12311059651","text":"from selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.action_chains import ActionChains # scroll to element\nimport platform # определение платформы\nimport traceback\nfrom time import sleep\nimport time\nfrom datetime import datetime\nimport os\nimport random\n# from urllib.request import urlopen\nimport pickle # печеньки\nimport csv\n\ndef driver_start(device=None, proxy=None, useragent=None):\n \n if device == None:\n device = 'iPhone X'\n \n d = { # user agents\n 'iPhone X' : 'Mozilla/5.0 (iPhone; CPU iPhone OS 12_2 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148', \n 'Pixel 2' : 'Mozilla/5.0 (Linux; Android 8.0; Pixel 2 Build/OPD3.170816.012) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.125 Mobile Safari/537.36',\n 'Galaxy S5' : 'ozilla/5.0 (Linux; Android 5.0; SAMSUNG SM-G900F Build/LRX21T) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/3.0',\n }\n\n if ((platform.platform())[:5]) == \"Linux\":\n dr = \"/Linux/chromedriver\"\n elif ((platform.platform())[:7]) == \"Windows\":\n dr = \"\\\\Windows\\\\chromedriver.exe\"\n elif ((platform.platform())[:5]) == \"macOS\":\n dr = \"/MacOS/chromedriver\"\n \n\n mobile_emulation = { \"deviceName\": device} # type your device from list\n opts = Options()\n opts.add_experimental_option(\"mobileEmulation\", mobile_emulation)\n\n opts.add_argument(d[device]) # user agent\n opts.add_argument(\"accept=*/*\")\n\n driver = webdriver.Chrome(chrome_options=opts, executable_path=r\"driver\" + dr)\n driver.set_window_size(1000, 1078)\n driver.set_window_position(0, 0)\n\n return driver\n\ndef sleepr(min=2, max=6):\n if min <= 0:\n min = 1\n sleep(random.uniform(min,max))\n\ndef authorization(driver):\n \n my_login = read_file('login.txt')\n driver.get('https://www.instagram.com/accounts/login/')\n sleepr()\n # try:\n # pass\n # #Забираем куки из файла\n # cookies = pickle.load(open(\"cookies.pkl\", \"rb\"))\n # for cookie in cookies:\n # if 'expiry' in cookie:\n # del cookie['expiry']\n # driver.driver.add_cookie(cookie)\n # print(\"Cookies have been read\")\n\n # except: # куки не найдены или возникла иная ошибка\n print(\"No cookies found\")\n el = driver.element_by_name('username')\n el.send_keys(my_login[0])\n el = driver.element_by_name('password')\n el.send_keys(my_login[1])\n sleepr()\n\n el = driver.element_by_xpath('//button[@type=\"submit\"]')\n \n # ! Предупреждение от 06.06.21: Инстаграм теперь предлагает устновить их приложение, нужно учитывать это окно\n # ! Внести правки!\n \n if el != None: # ? иногда срабатывает автоматический вход без нажатия кнопки отправки формы?\n el.click()\n\n print(\"Login to your account\")\n sleepr(min=6, max=10)\n #Код из СМС\n # В случае ��одтверждения через СМС\n if driver.driver.current_url != \"https://www.instagram.com/\":\n pass\n # print('Enter your code from SMS')\n while (driver.driver.current_url != \"https://www.instagram.com/\") and (\n driver.driver.current_url != 'https://www.instagram.com/accounts/onetap/?next=%2F'):\n sleep(1)\n\n # # Создание Cookies\n # pickle.dump(driver.driver.get_cookies() , open(\"cookies.pkl\",\"wb\"))\n # print(\"Cookies has been created\")\n\n # проверка, был ли вход\n if driver.element_by_xpath(\"//section/nav/div[2]/div/div\") != None:\n print(\"Вход успешно выполнен\")\n else:\n pass\n # print(\"Ошибка входа, проверьте правильность логина и пароля\") \n\n else:\n print(\"Login to your account\")\n\ndef scroll_block(driver, pause_time, max_users, block=False):\n iteration = 0\n while (iteration*12+12) < max_users:\n print(f'Iteration: {iteration+1}, users: {(iteration+1)*12+12}')\n el = '//span[@aria-label=\"Load more comments\"]'\n el = driver.element_by_xpath(el,2)\n if el == None:\n el = '//span[@aria-label=\"Загрузить больше комментариев\"]' # ? is it?\n el = driver.element_by_xpath(el,1)\n if el == None:\n print('Not scrolling element')\n return None # then exit\n el = el.find_element_by_xpath('..')\n driver.driver.execute_script(\"arguments[0].scrollIntoView();\", el)\n driver.driver.execute_script(\"arguments[0].click();\", el)\n sleepr(pause_time-2, pause_time+2) \n iteration += 1\n\n if iteration % 82 == 0:\n print('Pause')\n timer(60*random.randint(7,13))\n continue\n \n if iteration % 41 == 0:\n print('Pause...')\n timer(60*random.randint(1,2))\n continue\n \n if iteration % 10 == 0:\n print('Pause...')\n sleepr(7,10)\n continue\n\ndef scroll_page(driver, pause_time, max_users):\n last_height = driver.driver.execute_script(\"return document.body.scrollHeight\") # Get scroll height\n i = 0 # iteration count\n number_stop = 0 # num of not load page\n num_users = 24 # start num\n \n while number_stop < 5:\n # Scroll down to bottom\n driver.driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n i += 1\n num_users += 12\n # Wait to load page\n sleepr(pause_time-2,pause_time+2)\n\n # Calculate new scroll height and compare with last scroll height\n new_height = driver.driver.execute_script(\"return document.body.scrollHeight\")\n print(f'Iteration: {i}, users: ~{num_users}, height:', new_height)\n\n if new_height == last_height:\n number_stop += 1\n num_users -= 12\n elif number_stop != 0:\n number_stop = 0\n\n last_height = new_height\n\n if num_users >= max_users:\n print('The maximum number of users has been reached')\n break\n\n if i % 82 == 0:\n print('Pause')\n timer(60*random.randint(7,13))\n continue\n \n if i % 41 == 0:\n print('Pause...')\n timer(60*random.randint(1,2))\n continue\n \n if i % 10 == 0:\n print('Pause...')\n sleepr(7,10)\n continue\n\n# def hour_waiting(start_time, current_num, max_num_in_hour):\n# if ((time.time() - start_time) <= 60*60) and (current_num >= max_num_in_hour):\n# print(\"\\nLimit of users per hour reached:\", max_num_in_hour)\n# now = datetime.now()\n# if (now.minute / 10) <= 0:\n# str_ = \"Time: \" + str(now.hour) + \":0\" + str(now.minute)\n# else:\n# str_ = \"Time: \" + str(now.hour) + \":\" + str(now.minute)\n# print(str_)\n# timer(1) # ! test mod\n# # timer(60*60 - (time.time() - start_time))\n# current_num = 0\n# start_time = time.time()\n# return None\n# # zero hour\n# if ((time.time() - start_time) >= 60*60):\n# start_time = time.time()\n# return start_time # return new start_time value\n# return None\n\ndef timer(seconds):\n if (seconds/60) <= 1:\n print(\"Wait\", int(seconds), \"sec.\")\n sleep(seconds + random.uniform(0,1))\n else:\n minutes = seconds/60\n seconds = seconds%60\n print(\"Wait\", int(minutes), \"min.\")\n count = 0\n while int(minutes) > count: # add -1\n count += 1\n print(count, \"out of\", int(minutes), \"min.\")\n sleep(60)\n sleep(seconds + random.uniform(0,1))\n print(\"Works on\")\n\n# colors for console\nclass Terminal():\n __HEADER = '\\033[95m'\n __OKBLUE = '\\033[94m'\n __OKCYAN = '\\033[96m'\n __OKGREEN = '\\033[92m'\n __WARNING = '\\033[93m'\n __FAIL = '\\033[91m'\n __BOLD = '\\033[1m'\n __UNDERLINE = '\\033[4m'\n __ENDC = '\\033[0m'\n\n def warning(self, string):\n print(f'{self.__FAIL}' + string + f'{self.__ENDC}')\n def ok(self, string):\n print(f'{self.__OKGREEN}' + string + f'{self.__ENDC}')\n\n def print_except(self):\n print(f'{self.__WARNING}\"\\nERROR!\"' + traceback.format_exc() + f'{self.__ENDC}')\n\n\ndef replace_path(file_path):\n if ((platform.platform())[:7]) == \"Windows\":\n file_path = file_path.replace('/', '\\\\')\n\n return file_path\n\ndef read_file(file_path):\n file_path = replace_path(file_path)\n with open(file_path, 'r', encoding=\"utf-8\", errors='ignore') as f:\n lines = [line.rstrip('\\n') for line in f]\n return lines\n\ndef write_file(file_path, lines, mod='a'):\n file_path = replace_path(file_path)\n with open(file_path, mod, encoding=\"utf-8\", errors='ignore') as f:\n for line in lines:\n f.write(line)\n f.write('\\n')\n","repo_name":"DaemonLife/InstaBotPro","sub_path":"lib.py","file_name":"lib.py","file_ext":"py","file_size_in_byte":9238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"11433436394","text":"import random, os, sys\nimport numpy as np\nimport csv\nimport pandas as pd\nfrom tensorflow.keras.callbacks import ModelCheckpoint, Callback\nfrom tensorflow.keras.optimizers import Adam\nimport tensorflow_addons as tfa\nfrom scipy.stats import pearsonr,spearmanr\nfrom model_new import KerasMultiSourceGCNModel_new\nimport hickle as hkl\nimport argparse\nimport codecs\nfrom subword_nmt.apply_bpe import BPE\nfrom sklearn.metrics import accuracy_score,f1_score,recall_score,precision_score\n\n####################################Settings#################################\nparser = argparse.ArgumentParser(description='Drug_response_pre')\nparser.add_argument('-gpu_id', dest='gpu_id', type=str, default='2', help='GPU devices')\nparser.add_argument('-use_mut', dest='use_mut', type=bool, default=True, help='use gene mutation or not')\nparser.add_argument('-use_gexp', dest='use_gexp', type=bool, default=True, help='use gene expression or not')\nparser.add_argument('-use_methy', dest='use_methy', type=bool, default=True, help='use methylation or not')\nparser.add_argument('-use_copy', dest='use_copy', type=bool, default=True, help='use copy number or not')\nparser.add_argument('-israndom', dest='israndom', type=bool, default=False, help='randomlize X and A')\n# hyparameters for GCN\nparser.add_argument('-unit_list', dest='unit_list', nargs='+', type=int, default=[128, 128, 128],\n help='unit list for GCN')\nparser.add_argument('-unit_edge_list', dest='unit_edge_list', nargs='+', type=int, default=[32, 32, 32],\n help='unit list for edge GCN')\nparser.add_argument('-Max_atoms', dest='Max_atoms', type=int, default=100, help='molecule padding size')\nparser.add_argument('-batch_size_set', dest='batch_size_set', type=int, default=256, help='batch_size_set')\nparser.add_argument('-epoch_set', dest='epoch_set', type=int, default=500, help='max epoch')\nparser.add_argument('-Dropout_rate', dest='Dropout_rate', type=float, default=0.2, help='Dropout_rate')\nparser.add_argument('-activation', dest='activation', type=str, default='gelu', help='activation func')\nparser.add_argument('-use_bn', dest='use_bn', type=bool, default=True, help='use batchnormalization for GCN')\nparser.add_argument('-use_relu', dest='use_relu', type=bool, default=True, help='use relu for GCN')\nparser.add_argument('-use_GMP', dest='use_GMP', type=bool, help='use GlobalMaxPooling for GCN')\nargs = parser.parse_args()\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu_id\nuse_mut, use_gexp, use_methy,use_copy = args.use_mut, args.use_gexp, args.use_methy,args.use_copy\nisrandom = args.israndom\nmodel_suffix = ('with_mut' if use_mut else 'without_mut')+'_'+('with_gexp' if use_gexp else 'without_gexp')+'_'+(\n 'with_methy' if use_methy else 'without_methy')+(\n 'with_copy' if use_copy else 'without_copy')\n\nGCN_deploy = '_'.join(map(str, args.unit_list))+'_'+('bn' if args.use_bn else 'no_bn')+'_'+(\n 'relu' if args.use_relu else 'tanh')+'_'+('GMP' if args.use_GMP else 'GAP')\nmodel_suffix = model_suffix+'_'+GCN_deploy\n\n####################################Constants Settings###########################\nTCGA_label_set = [\"ALL\", \"BLCA\", \"BRCA\", \"CESC\", \"DLBC\", \"LIHC\", \"LUAD\",\n \"ESCA\", \"GBM\", \"HNSC\", \"KIRC\", \"LAML\", \"LCML\", \"LGG\",\n \"LUSC\", \"MESO\", \"MM\", \"NB\", \"OV\", \"PAAD\", \"SCLC\", \"SKCM\",\n \"STAD\", \"THCA\", 'COAD/READ']\nDPATH = '../data'\nDrug_info_file = '%s/GDSC/Drug_listMon Apr 10 09_29_53 2023.csv' % DPATH\nCell_line_info_file = '%s/CCLE/Cell_lines_annotations_20181226.txt' % DPATH\nDrug_feature_file = '%s/GDSC/aug_data_2' % DPATH\nCancer_response_exp_file = '%s/CCLE/GDSC_IC50.csv' % DPATH\nGene_info_file='%s/CCLE/Result_StandardScaler.csv' % DPATH\ndef MetadataGenerate(Drug_info_file, Cell_line_info_file, Drug_feature_file,\n Gene_info, filtered):\n # drug_id --> pubchem_id\n reader = csv.reader(open(Drug_info_file, 'r'))\n rows = [item for item in reader]\n drugid2pubchemid = {item[0]: item[5] for item in rows if item[5].isdigit()}\n # map cellline --> cancer type\n cellline2cancertype = {}\n for line in open(Cell_line_info_file).readlines()[1:]:\n cellline_id = line.split('\\t')[1]\n TCGA_label = line.strip().split('\\t')[-1]\n # if TCGA_label in TCGA_label_set:\n cellline2cancertype[cellline_id] = TCGA_label\n\n # load drug features\n drug_pubchem_id_set = []\n drug_feature = {}\n for each in os.listdir(Drug_feature_file):\n drug_pubchem_id_set.append(each.split('.')[0])\n node_features_Mol, lcq_adj, edges_feature, smiles_feature = hkl.load('%s/%s' % (Drug_feature_file, each))\n # features_aug=hkl.load('%s/%s' % (Drug_feature_file, each))\n drug_feature[each.split('.')[0]] = [node_features_Mol, lcq_adj, edges_feature,smiles_feature]\n # drug_feature[each.split('.')[0]] = features_aug\n\n assert len(drug_pubchem_id_set) == len(drug_feature.values())\n experiment_data = pd.read_csv(Cancer_response_exp_file, sep=',', header=0, index_col=[0])\n # filter experiment data\n drug_match_list = [item for item in experiment_data.index if item.split(':')[1] in drugid2pubchemid.keys()]\n res=[item for item in experiment_data.index ]\n experiment_data_filtered = experiment_data.loc[drug_match_list]\n data_idx = []\n gene_info=pd.read_csv(Gene_info_file, sep=',', index_col=[0])\n for each_drug in experiment_data_filtered.index:\n for each_cellline in experiment_data_filtered.columns:\n pubchem_id = drugid2pubchemid[each_drug.split(':')[-1]]\n if str(pubchem_id) in drug_pubchem_id_set and each_cellline in gene_info.columns:\n if not np.isnan(experiment_data_filtered.loc[each_drug, each_cellline]) and each_cellline in cellline2cancertype.keys():\n ln_IC50 = float(experiment_data_filtered.loc[each_drug, each_cellline])\n data_idx.append((each_cellline, pubchem_id, ln_IC50, cellline2cancertype[each_cellline]))\n\n\n\n for each_drug in experiment_data_filtered.index:\n for each_cellline in experiment_data_filtered.columns:\n pubchem_id = drugid2pubchemid[each_drug.split(':')[-1]]\n for aug_data in drug_pubchem_id_set:\n if aug_data.startswith(\"k\"):\n pubchem_id_aug_tmp = aug_data.split(\":\")[0]\n pubchem_id_aug=pubchem_id_aug_tmp.split(\"_\")[1]\n if aug_data.startswith(\"k\") and pubchem_id_aug==str(pubchem_id) and str(pubchem_id) in drug_pubchem_id_set and each_cellline in gene_info.columns:\n if not np.isnan(experiment_data_filtered.loc[each_drug, each_cellline]) and each_cellline in cellline2cancertype.keys():\n ln_IC50 = float(experiment_data_filtered.loc[each_drug, each_cellline])\n data_idx.append((each_cellline, aug_data, ln_IC50, cellline2cancertype[each_cellline]))\n\n\n nb_celllines = len(set([item[0] for item in data_idx]))\n nb_drugs = len(set([item[1] for item in data_idx]))\n print('%d instances across %d cell lines and %d drugs were generated.' % (len(data_idx), nb_celllines, nb_drugs))\n return drug_feature, gene_info, data_idx\n\n# def DataSplit(data_idx, ratio=0.95):\n# data_train_idx, data_test_idx = [], []\n# for each_type in TCGA_label_set:\n# data_subtype_idx = [item for item in data_idx if item[-1] == each_type]\n# train_list = random.sample(data_subtype_idx, int(ratio * len(data_subtype_idx)))\n# test_list = [item for item in data_subtype_idx if item not in train_list]\n# data_train_idx += train_list\n# data_test_idx += test_list\n# assert len(data_test_idx) >= args.batch_size_set\n# print(len(data_train_idx),len(data_test_idx),\"?\")\n# return data_train_idx, data_test_idx\n\ndef split_list(lst, n):\n\n split_index = int(len(lst) * (1/n)) # 计算分割点索引\n aug_list=lst[split_index:]\n result = []\n l=n-1\n for i in range(l):\n result.append(aug_list[i::l])\n\n return lst[:split_index],result\n\ndef DataSplit(data_idx, ratio=0.95):\n data_true_idx,aug_list=split_list(data_idx,3)\n data_train_idx, data_test_idx = [], []\n for each_type in TCGA_label_set:\n data_subtype_idx = [item for item in data_true_idx if item[-1] == each_type]\n selected_indices = random.sample(range(len(data_subtype_idx)), int(ratio * len(data_subtype_idx)))\n train_list = [data_subtype_idx[i] for i in selected_indices]\n test_list = [data_subtype_idx[i] for i in range(len(data_subtype_idx)) if i not in selected_indices]\n data_train_idx += train_list\n data_test_idx += test_list\n for data_subtype_idx_subaug in aug_list:\n data_subtype_idx_each_aug = [item for item in data_subtype_idx_subaug if item[-1] == each_type]\n train_list_aug = [data_subtype_idx_each_aug[i] for i in selected_indices]\n data_train_idx += train_list_aug\n print(len(data_train_idx),len(data_test_idx),\"?\")\n return data_train_idx, data_test_idx\n\n\ndef features_padding(node_feature, edges_feature, size):\n node_pad = np.pad(node_feature, ((0, args.Max_atoms-node_feature.shape[0]), (0, 0)), 'constant')\n edge_pad = np.pad(edges_feature,\n ((0, args.Max_atoms-edges_feature.shape[0]), (0, args.Max_atoms-edges_feature.shape[0]), (0, 0)),\n 'constant')\n return [node_pad, edge_pad]\ndef _drug2emb_encoder(smile):\n # vocab_path = \"{}/ESPF/drug_codes_chembl_freq_1500.txt\".format(self.vocab_dir)\n # sub_csv = pd.read_csv(\"{}/ESPF/subword_units_map_chembl_freq_1500.csv\".format(self.vocab_dir))\n vocab_path = \"ESPF/drug_codes_chembl_freq_1500.txt\"\n sub_csv = pd.read_csv(\"ESPF/subword_units_map_chembl_freq_1500.csv\")\n bpe_codes_drug = codecs.open(vocab_path)\n dbpe = BPE(bpe_codes_drug, merges=-1, separator='')\n\n idx2word_d = sub_csv['index'].values\n words2idx_d = dict(zip(idx2word_d, range(0, len(idx2word_d))))\n\n max_d = 50\n t1 = dbpe.process_line(smile).split() # split\n try:\n i1 = np.asarray([words2idx_d[i] for i in t1]) # index\n except:\n i1 = np.array([0])\n\n l = len(i1)\n if l < max_d:\n i = np.pad(i1, (0, max_d-l), 'constant', constant_values=0)\n input_mask = ([1] * l)+([0] * (max_d-l))\n else:\n i = i1[:max_d]\n input_mask = [1] * max_d\n\n return i, np.asarray(input_mask)\ndef FeatureExtract(data_idx, drug_feature, gene_feature):\n cancer_type_list = []\n nb_instance = len(data_idx)\n drug_data = [[] for item in range(nb_instance)]\n target = np.zeros(nb_instance, dtype='float32')\n gene_data=[]\n for idx in range(nb_instance):\n cell_line_id, pubchem_id, ln_IC50, cancer_type = data_idx[idx]\n # modify\n\n feat_mat, adj_list, _ ,smiles_feature= drug_feature[str(pubchem_id)]\n # fill drug data,padding to the same size with zeros\n drug_data[idx] = features_padding(feat_mat, adj_list, args.Max_atoms)\n drug_data[idx].append(smiles_feature[0])\n drug_data[idx].append(smiles_feature[1])\n # randomlize X A\n target[idx] = ln_IC50\n cell_line_info_tmp=gene_feature[cell_line_id].values\n cell_line_info_tmp_afterprocess=[]\n for geneinfo in cell_line_info_tmp:\n cell_line_info_tmp_afterprocess.append(eval(geneinfo))\n gene_data.append(cell_line_info_tmp_afterprocess)\n cancer_type_list.append([cancer_type, cell_line_id, pubchem_id])\n gene_data=np.array(gene_data)\n return drug_data, gene_data, target, cancer_type_list\n\nclass MyCallback(Callback):\n def __init__(self, validation_data, patience):\n self.x_val = validation_data[0]\n self.y_val = validation_data[1]\n self.best_weight = None\n self.patience = patience\n def on_train_begin(self, logs={}):\n self.wait = 0\n self.stopped_epoch = 0\n self.best = -np.Inf\n print(\"begin\")\n return\n def on_train_end(self, logs={}):\n self.model.set_weights(self.best_weight)\n self.model.save('MyBestDeepEAG_%s.h5' % self.best)\n\n if self.stopped_epoch > 0:\n print('Epoch %05d: early stopping' % (self.stopped_epoch+1))\n return\n def on_epoch_begin(self, epoch, logs={}):\n return\n def on_epoch_end(self, epoch, logs={}):\n y_pred_val = self.model.predict(self.x_val, batch_size=args.batch_size_set)\n pcc_val = pearsonr(self.y_val, y_pred_val[:, 0])[0]\n print('pcc-val: %s' % str(round(pcc_val, 4)))\n if pcc_val > self.best:\n self.best = pcc_val\n self.wait = 0\n self.best_weight = self.model.get_weights()\n else:\n self.wait += 1\n if self.wait >= self.patience:\n self.stopped_epoch = epoch\n self.model.stop_training = True\n return\n\ndef generate_batch_data(data_idx, batch_size, drug_feature, gene_feature):\n np.random.shuffle(data_idx)\n while True:\n if len(data_idx) % batch_size == 0:\n times_valid = len(data_idx) // batch_size\n else:\n times_valid = len(data_idx) // batch_size+1\n for step_valid in range(times_valid):\n if (step_valid+1) * batch_size > len(data_idx):\n present_idx = data_idx[len(data_idx)-batch_size:]\n else:\n present_idx = data_idx[step_valid * batch_size:(step_valid+1) * batch_size]\n X_drug_data_train, X_cell_line_test, Y_train, cancer_type_train_list \\\n = FeatureExtract(present_idx, drug_feature, gene_feature)\n X_drug_feat_data_train = [item[0] for item in X_drug_data_train]\n X_drug_adj_data_train = [item[1] for item in X_drug_data_train]\n X_drug_smiles_data_train = [item[2] for item in X_drug_data_train]\n X_drug_smiles_mask_data_train = [item[3] for item in X_drug_data_train]\n\n X_drug_feat_data_train = np.array(X_drug_feat_data_train) # nb_instance * Max_stom * feat_dim\n X_drug_adj_data_train = np.array(X_drug_adj_data_train) # nb_instance * Max_stom * Max_stom\n X_drug_smiles_data_train = np.array(X_drug_smiles_data_train)\n X_drug_smiles_mask_data_train = np.array(X_drug_smiles_mask_data_train)\n\n copy=X_cell_line_test[...,0]\n gexpr=X_cell_line_test[...,1]\n mutation=X_cell_line_test[...,2]\n methy=X_cell_line_test[...,3]\n yield [X_drug_feat_data_train, X_drug_adj_data_train,X_drug_smiles_data_train,X_drug_smiles_mask_data_train, copy,mutation,gexpr,methy], Y_train\n # yield [X_drug_feat_data_train, X_drug_adj_data_train, X_cell_line_test], Y_train\n\n\n\ndef ModelTraining(model, data_train_idx, drug_feature, gene_feature,\n data_test_idx, validation_data, nb_epoch=args.epoch_set, batch_size=args.batch_size_set):\n # optimizer=tfa.optimizers.AdamW(learning_rate=0.001,beta_1=0.9, beta_2=0.999,epsilon=None,weight_decay=0.0,amsgrad=False)\n optimizer = Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)\n model.compile(optimizer=optimizer, loss='mean_squared_error', metrics=['mse'],run_eagerly=True)\n callbacks = [ModelCheckpoint('best_DeepEAG_%s.h5' % model_suffix, monitor='val_loss', save_best_only=False,\n save_weights_only=False),\n MyCallback(validation_data=validation_data, patience=20)]\n if len(data_train_idx) % args.batch_size_set == 0:\n steps_per_epoch = len(data_train_idx) // args.batch_size_set\n else:\n steps_per_epoch = len(data_train_idx) // args.batch_size_set+1\n if len(data_test_idx) % args.batch_size_set == 0:\n validation_steps = len(data_test_idx) // args.batch_size_set\n else:\n validation_steps = len(data_test_idx) // args.batch_size_set+1\n training_generator = generate_batch_data(data_train_idx, batch_size=args.batch_size_set, drug_feature=drug_feature,gene_feature=gene_feature)\n\n model.fit(x=training_generator, steps_per_epoch=steps_per_epoch, epochs=nb_epoch, verbose=1,callbacks=callbacks, validation_data=validation_data, validation_steps=validation_steps,use_multiprocessing=False)\n # model.fit(x=[X_drug_feat_data_train,X_drug_adj_data_train,X_mutation_data_train,X_gexpr_data_train,X_methylation_data_train],y=Y_train,batch_size=1,epochs=nb_epoch,validation_split=0,callbacks=callbacks)\n return model\n\ndef ModelEvaluate(model, X_drug_data_test, X_cell_line_test, Y_test):\n X_drug_feat_data_test = [item[0] for item in X_drug_data_test]\n X_drug_adj_data_test = [item[1] for item in X_drug_data_test]\n X_drug_smiles_data_test = [item[2] for item in X_drug_data_test]\n X_drug_smiles_mask_data_test = [item[3] for item in X_drug_data_test]\n\n X_drug_feat_data_test = np.array(X_drug_feat_data_test) # nb_instance * Max_stom * feat_dim\n X_drug_adj_data_test = np.array(X_drug_adj_data_test) # nb_instance * Max_stom * Max_stom\n X_drug_smiles_data_test=np.array(X_drug_smiles_data_test)\n X_drug_smiles_mask_data_test=np.array(X_drug_smiles_mask_data_test)\n\n copy = X_cell_line_test[..., 0]\n gexpr = X_cell_line_test[..., 1]\n mutation = X_cell_line_test[..., 2]\n methy = X_cell_line_test[..., 3]\n\n Y_pred = model.predict([X_drug_feat_data_test, X_drug_adj_data_test,X_drug_smiles_data_test,X_drug_smiles_mask_data_test,\n copy,mutation,gexpr,methy],batch_size=args.batch_size_set)\n import csv\n file_path = \"2.csv\"\n if not os.path.isfile(file_path):\n with open('2.csv', mode='w', newline='') as file:\n writer = csv.writer(file)\n for i in range(len(Y_test)):\n writer.writerow([Y_pred[:, 0][i],Y_test[i]])\n overall_pcc = pearsonr(Y_pred[:, 0], Y_test)[0]\n\n from math import sqrt\n mse = np.sum((Y_test - Y_pred[:, 0]) ** 2) / len(Y_test)\n rmse = sqrt(mse)\n spearmanr_pcc=spearmanr(Y_pred[:, 0], Y_test)[0]\n\n print(\"The overall Pearson's correlation is %.4f.\" % overall_pcc)\n print(\"The overall rmse's is %.4f.\" % rmse)\n print(\"The overall spearmanr_pcc's is %.4f.\" % spearmanr_pcc)\n\n\n # with open('result0.1tf2.log', 'a') as f:\n # f.write(str(overall_pcc)+\" overall_pcc\"+\"0.1 tf2\\n\")\n\ndef loadmodel():\n import tensorflow\n from tensorflow import keras\n from layers.graph import GraphConvTest\n import tensorflow_addons as tfa\n from transformer_drug.new_helper import transformer\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"7\"\n model = keras.models.load_model(\"MyBestDeepCDREAG_0.9284564799414046.h5\",\n custom_objects={'GraphConvTest': GraphConvTest(units=128, units_edge=32, step=0),'transformer':transformer },\n )\n return model\ndef main():\n random.seed(0)\n #all features with aug\n drug_feature, gene_feature, data_idx = MetadataGenerate\\\n (Drug_info_file,Cell_line_info_file,Drug_feature_file,Gene_info_file,False)\n\n data_train_idx, data_test_idx = DataSplit(data_idx)\n X_drug_data_test, X_cell_line_test, Y_test, cancer_type_test_list=\\\n FeatureExtract(data_test_idx,drug_feature,gene_feature)\n X_drug_feat_data_test = [item[0] for item in X_drug_data_test]\n X_drug_adj_data_test = [item[1] for item in X_drug_data_test]\n X_drug_smiles_data_test = [item[2] for item in X_drug_data_test]\n X_drug_smiles_mask_data_test = [item[3] for item in X_drug_data_test]\n\n\n X_drug_feat_data_test = np.array(X_drug_feat_data_test) # nb_instance * Max_stom * feat_dim\n X_drug_adj_data_test = np.array(X_drug_adj_data_test)\n X_drug_smiles_data_test=np.array(X_drug_smiles_data_test)\n X_drug_smiles_mask_data_test=np.array(X_drug_smiles_mask_data_test)\n\n copy = X_cell_line_test[..., 0]\n gexpr = X_cell_line_test[..., 1]\n mutation = X_cell_line_test[..., 2]\n methy = X_cell_line_test[..., 3]\n validation_data = ([X_drug_feat_data_test, X_drug_adj_data_test,X_drug_smiles_data_test,X_drug_smiles_mask_data_test,copy,mutation,gexpr,methy],Y_test)\n print(Y_test[-1])\n\n model = KerasMultiSourceGCNModel_new(use_mut, use_gexp, use_methy,use_copy).createMaster(X_drug_data_test[0][0].shape[-1],\n X_drug_data_test[0][1].shape[-1],\n X_drug_data_test[0][2].shape[-1],\n X_drug_data_test[0][3].shape[-1],\n X_cell_line_test.shape,\n args.unit_list, args.unit_edge_list,\n args.batch_size_set, args.Dropout_rate,\n args.activation, args.use_relu,\n args.use_bn, args.use_GMP)\n print('Begin training...')\n model = ModelTraining(model, data_train_idx, drug_feature, gene_feature,data_test_idx, validation_data, nb_epoch=args.epoch_set, batch_size=args.batch_size_set)\n\n # model = loadmodel()\n # optimizer = tfa.optimizers.AdamW(learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, weight_decay=0.0,\n # amsgrad=False)\n # model.compile(optimizer=optimizer, loss='mean_squared_error', metrics=['mse'], run_eagerly=True)\n ModelEvaluate(model, X_drug_data_test, X_cell_line_test, Y_test)\nif __name__ == '__main__':\n main()","repo_name":"zhejiangzhuque/DeepEAG","sub_path":"prog/run_DeepEAG_newaug2.py","file_name":"run_DeepEAG_newaug2.py","file_ext":"py","file_size_in_byte":21770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"1682250954","text":"\"\"\"\nhttps://stepik.org/lesson/680263/step/12?unit=678921\n\nBeegeek\nНапишите программу, определяющую:\n\nколичество строк, в которых bee встречается в качестве подстроки не менее двух раз\nколичество строк, в которых geek встречается в качестве слова хотя бы один раз\nФормат входных данных\nНа вход программе произвольное количество строк, каждая из которых содержит набор произвольных символов.\n\nФормат выходных данных\nПрограмма должна вывести два числа:\n\nпервое — количество строк, в которых bee встречается в качестве подстроки не менее двух раз\nвторое — количество строк, в которых geek встречается в качестве слова хотя бы один раз\nкаждое на отдельной строке.\n\nПримечание 1. Словом будем считать любую непрерывную последовательность из одной или нескольких символов, соответствующих \\w.\n\nПримечание 2. Строка может одновременно удовлетворять обоим условиям.\n\nПримечание 3. В первой строке первого теста bee встречается в качестве подстроки\n3\n3 раза:\n\nbeebee bee\nВо второй строке bee встречается в качестве подстроки лишь один раз, а слово geek отсутствует.\n\nВ третьей строке bee встречается в качестве подстроки\n2\n2 раза, geek в качестве слова —\n1\n1 раз:\n\nbee geek bee\n\"\"\"\n\nimport re\nfrom sys import stdin\n\ni = [i.strip('\\n') for i in stdin]\npattern1 = r'.*bee.*bee.*'\npattern2 = r'\\bgeek\\b'\nbee_count = geek_count = 0\nfor string in i:\n if re.search(pattern1, string):\n bee_count += 1\n if re.search(pattern2, string):\n geek_count += 1\n\nprint(bee_count, geek_count, sep='\\n')","repo_name":"dihofrin/stepik","sub_path":"Python for Professionals/11. Regular expressions/part 6/step 12.py","file_name":"step 12.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"26915484079","text":"'''\nCreated on Dec 31, 2017\n\n@author: jghuf\n'''\nimport time \n\ndef sumOfEvenFibonacci(limit):\n fib0 = 1\n fib1 = 1\n fib2 = 0\n sum = 0\n while fib2 < limit:\n fib2 = fib0 + fib1\n if (fib2 % 2) == 0:\n sum = sum + fib2\n fib0 = fib1\n fib1 = fib2\n return sum\n\n\nif __name__ == '__main__':\n limit = 4000000\n start = time.time()\n sumofEven = sumOfEvenFibonacci(limit)\n elapsed = (time.time() - start)\n \n print(\"Sum of even Fibonacci numbers up to \"+ str(limit)+\" is %s , found in %s seconds\" % (sumofEven,elapsed))\n","repo_name":"jghuf/EulerProblems","sub_path":"EulerProblem2/EulerProblem2.py","file_name":"EulerProblem2.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"19238960971","text":"#POLYTECH LED WALL TEAM 2019\nimport tkinter as tk\nimport os\nimport sys\nimport time\nimport random\nimport sched\nimport time\n\nLED_WALL_WIDTH = 16\nLED_WALL_HEIGHT = 10\nPIXEL_SIZE = 50\nDRIVER_PATH = \"/tmp/lwfb\"\n\nold_timestamp = 0;\n\nclass Application(tk.Frame):\n def __init__(self, master=None):\n super().__init__(master)\n self._cached_stamp = 0;\n self.master = master\n self.pack()\n self.create_widgets()\n \n def create_widgets(self):\n self.lw_canv = tk.Canvas(self.master, width=16 * PIXEL_SIZE, height=10 * PIXEL_SIZE, bg='#888888')\n self.lw_canv.pack(side=\"top\")\n \n self.refresh_rdm_color()\n \n self.btn_quit = tk.Button(self, text=\"QUIT\", fg=\"red\", command=self.master.destroy)\n self.btn_quit.pack()\n \n self.btn_refresh = tk.Button(self, text=\"REFRESH\")\n self.btn_refresh[\"command\"] = self.refresh_file\n self.btn_refresh.pack()\n\n self.btn_random = tk.Button(self, text=\"RANDOM\")\n self.btn_random[\"command\"] = self.refresh_rdm_color\n self.btn_random.pack()\n\n def refresh_rdm_color(self):\n for i in range(LED_WALL_WIDTH):\n for j in range(LED_WALL_HEIGHT):\n self.lw_canv.create_rectangle(i * PIXEL_SIZE, j * PIXEL_SIZE, i * PIXEL_SIZE + PIXEL_SIZE, j * PIXEL_SIZE + PIXEL_SIZE, fill=(\"#\" + \"%06x\" % random.randint(0, 0xFFFFFF)))\n\n def refresh_file(self):\n idx_color = 0\n file_hdlr = open(DRIVER_PATH, \"rb\")\n bytes_read = file_hdlr.read()\n file_hdlr.close()\n rgb_values = []\n for b in bytes_read:\n rgb_values.append(int(b))\n #Check if the frame is valid\n if (len(rgb_values) != (LED_WALL_WIDTH * LED_WALL_HEIGHT) * 3):\n return -1\n #print(rgb_values)\n for y in range(LED_WALL_HEIGHT):\n for x in range(LED_WALL_WIDTH):\n color = '#%02x%02x%02x' % (rgb_values[idx_color], rgb_values[idx_color + 1], rgb_values[idx_color + 2])\n idx_color += 3\n self.lw_canv.create_rectangle(x * PIXEL_SIZE, y * PIXEL_SIZE, x * PIXEL_SIZE + PIXEL_SIZE, y * PIXEL_SIZE + PIXEL_SIZE, fill=color)\n #print(idx_color)\n return 0\n\nroot = tk.Tk()\nroot.title(\"LED Wall simulator\")\napp = Application(master=root)\n#app.mainloop()\nwhile True:\n current_timestamp = os.path.getmtime(DRIVER_PATH)\n if (old_timestamp == 0 or current_timestamp != old_timestamp):\n app.update()\n old_timestamp = current_timestamp\n app.refresh_file()\n time.sleep(0.01)\n\n","repo_name":"louisthomaspro/LedWallApp","sub_path":"simulator/simulateur_ledwall.py","file_name":"simulateur_ledwall.py","file_ext":"py","file_size_in_byte":2584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"32581592481","text":"import turtle\r\nimport numpy as np\r\n\r\nn=5\r\nturtle.shape('turtle')\r\nfor i in range(n):\r\n\tturtle.forward(70)\r\n\tturtle.left(180-180/n)\r\nturtle.penup()\r\nturtle.left(90)\r\nturtle.forward(200)\r\nturtle.pendown()\r\nn=11\r\nfor i in range(n):\r\n\tturtle.forward(70)\r\n\tturtle.left(180-180/n)\r\n","repo_name":"infa2021Chikalkin/infa_2021_Chikalkin","sub_path":"turtle14.py","file_name":"turtle14.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"6060298584","text":"n_islands, n_bridges = map(int, input().split())\n\n\nclass Node:\n def __init__(self, island_number, army_size, nxt):\n self.island_number = island_number\n self.army_size = army_size\n self.nxt = nxt\n\n def __str__(self):\n return f'island: {self.island_number}, \\n army_size: {self.army_size}, \\n next: {str(self.nxt)}'\n\n def add_node(self, nxt_node):\n self.nxt.append(nxt_node)\n\n def set_army(self, size):\n self.army_size = size\n\n\nnodes = [Node(i, 0, []) for i in range(1, n_islands + 1)]\n\nfor i in range(n_bridges):\n a, b = map(int, input().split())\n\n # Add connection\n nodes[a - 1].add_node(nodes[b - 1])\n nodes[b - 1].add_node(nodes[a - 1])\n\nfor j in range(n_islands):\n # Set army values\n nodes[j].set_army(int(input()))\n\n\ndef conquer(head, army_size, nodes, previous):\n for node in head.nxt:\n print(head.island_number, \"->\", node.island_number)\n print(\"possible\", node.nxt)\n if army_size > node.army_size:\n # can only conquer smaller armies\n army_size += node.army_size\n node.size = 0\n\n # move to the next island\n print(head.island_number, \"CONQUERED\", node.island_number)\n print(\"current army size\", army_size)\n return conquer(node, army_size, nodes, head)\n else:\n print(\"END OF TREE\", head.island_number, \"COULDN'T CONQUER\", node.island_number)\n\nsizes = []\nconquerer = nodes[0]\nsize = conquerer.army_size\nconquerer.army_size = 0\nprint(conquerer.nxt)\n\nfor nxt_node in conquerer.nxt:\n print(nxt_node.island_number)\n sizes.append(conquer(nxt_node, size, nodes, conquerer))\n\nprint(sizes)\n\n\n","repo_name":"cadecrandall/sandbox","sub_path":"code/icpc-rockies/icpc_rockies/f.py","file_name":"f.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"1046534179","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimg=cv2.imread('pic.jpg',cv2.IMREAD_COLOR)\n\ncv2.line(img,(0,0),(150,150),(255,0,0),15) #to draw a line #255,0,0 for blue - BGR\ncv2.rectangle(img,(50,50),(100,100),(255,255,255),2) #to draw a rectangle\ncv2.circle(img,(500,500),55,(0,0,255),-1) #to draw a circle #-1 to fill in the circle\n\npts=np.array([[10,5],[20,30],[70,20],[50,10]],np.int32) #points of the polygon\ncv2.polylines(img,[pts],True,(0,255,255),3) #to draw on the polygon\n\ncv2.imshow('img',img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()","repo_name":"rishabh-malik/openCV-python","sub_path":"drawing_on_images.py","file_name":"drawing_on_images.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"19765504260","text":"#!/usr/bin/env python\n\n#首先找到最高bit 1:然后构造全1,然后求差\ndef find_complete(num):\n tmp = num\n maxbits = 0\n while tmp != 0:\n tmp = tmp >> 1\n maxbits += 1\n sum = 0\n for i in range(maxbits):\n sum |= 1< 0].min()\r\n np.fill_diagonal(dis, 0.)\r\n embedding = embedding.fit(dis)\r\n X = embedding.embedding_\r\n movies = ['DUNKIRK', 'GRAVITY', 'INTERSTELLAR', 'KILL BILL VOL.1', 'KILL BILL VOL.2', 'THE MARTIAN', 'TITANIC']\r\n\r\n plt.figure(figsize=(17, 6))\r\n plt.scatter(X[:, 0], X[:, 1], alpha=0.)\r\n plt.axis('equal')\r\n plt.axis('off')\r\n c = {'KILL BILL VOL.1': 'red', 'KILL BILL VOL.2': 'red', 'TITANIC': 'blue', 'DUNKIRK': 'blue', 'GRAVITY': 'black',\r\n 'INTERSTELLAR': 'black', 'THE MARTIAN': 'black'}\r\n for film in movies:\r\n i = movies.index(film)\r\n plt.gca().annotate(film, X[i], size=30, ha='center', color=c[film], weight=\"bold\", alpha=0.7)\r\n plt.show()\r\n\r\ndef Sinkhorn(a, b, cost, epsilon, max_iter=200):\r\n K = np.exp(-cost/epsilon)\r\n v = np.ones(b.shape[0])\r\n for i in range(max_iter):\r\n print(K.dot(v))\r\n u = a/K.dot(v)\r\n v = b/K.T.dot(u)\r\n return np.diag(u).dot(K).dot(np.diag(v))\r\n\r\n\r\nif __name__=='__main__':\r\n OT_distances = np.zeros((7, 7))\r\n texts = load_data()\r\n reg = 0.1\r\n for i in range(7):\r\n for j in range(i + 1, 7):\r\n C = costMatrix(i, j, texts)\r\n a = texts[i][1]\r\n b = texts[j][1]\r\n OT_plan = ot.sinkhorn(a, b, C, reg=reg)\r\n OT_distances[i, j] = np.sum(C * OT_plan)\r\n OT_distances[j, i] = OT_distances[i, j]\r\n clustering(OT_distances)\r\n","repo_name":"Ulteraa/Document_Clustering_Optimal_Transport","sub_path":"document_clustering.py","file_name":"document_clustering.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"4973304555","text":"#Problem: https://www.hackerrank.com/challenges/s10-binomial-distribution-1/problem\n\n\nfrom itertools import combinations\n\nprob_boy = 1.09 / (1.09 + 1)\nprob_girl = 1 / (1.09 + 1)\n\nsolution = 0\nfor num_boys in range(3, 7): \n bin_dist = \\\n len([*combinations(range(6), num_boys)])\\\n * (prob_boy ** num_boys) * (prob_girl ** (6 - num_boys)) \n solution += bin_dist \nprint(round(solution, 3))\n","repo_name":"IhorVodko/Hackerrank_solutions","sub_path":"10_Days_of_Statistics/Day 4: Binomial Distribution I.py","file_name":"Day 4: Binomial Distribution I.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"73811695720","text":"from configargparse import ArgumentParser\nimport json\nimport logging\nimport os\nimport subprocess\nimport sys\nimport time\nfrom base64 import b64encode\nfrom io import BytesIO\nfrom typing import Any, Dict, Iterable, List, Optional, Tuple, cast\nfrom urllib.parse import urldefrag, urljoin, urlparse\n\nimport requests\nimport ruamel.yaml\nimport schema_salad\nfrom wes_client.util import WESClient # type: ignore\nfrom wes_client.util import wes_reponse as wes_response\n\nfrom toil.wdl.utils import get_version as get_wdl_version\n\n\"\"\"\nA CWL runner that submits a workflow to a WES server, waits for it to finish,\nand outputs the results.\n\n\nEnvironment variables:\n+----------------------------------+----------------------------------------------------+\n| TOIL_WES_ENDPOINT | URL to the WES server to use for this WES-based |\n| | CWL runner. |\n+----------------------------------+----------------------------------------------------+\n| TOIL_WES_USER | Username to use with HTTP Basic Authentication to |\n| | log into the WES server. |\n+----------------------------------+----------------------------------------------------+\n| TOIL_WES_PASSWORD | Password to use with HTTP Basic Authentication to |\n| | log into the WES server. |\n+----------------------------------+----------------------------------------------------+\n\n\nExample usage with cwltest:\n\n```\ncwltest --verbose \\\n --tool=toil-wes-cwl-runner \\\n --test=src/toil/test/cwl/spec_v12/conformance_tests.yaml \\\n -n=1-50 \\\n --timeout=2400 \\\n -j2 \\\n -- \\\n --wes_endpoint=http://localhost:8080 \\\n --disableCaching \\\n --clean=always \\\n --logDebug\n```\n\"\"\"\n\nlogger = logging.getLogger(__name__)\n\n\ndef generate_attachment_path_names(paths: List[str]) -> Tuple[str, List[str]]:\n \"\"\"\n Take in a list of path names and return a list of names with the common path\n name stripped out, while preserving the input order. This guarantees that\n there are no relative paths that traverse up.\n\n For example, for the following CWL workflow where \"hello.yaml\" references\n a file \"message.txt\",\n\n ~/toil/workflows/hello.cwl\n ~/toil/input_files/hello.yaml\n ~/toil/input_files/message.txt\n\n This may be run with the command:\n toil-wes-cwl-runner hello.cwl ../input_files/hello.yaml\n\n Where \"message.txt\" is resolved to \"../input_files/message.txt\".\n\n We'd send the workflow file as \"workflows/hello.cwl\", and send the inputs as\n \"input_files/hello.yaml\" and \"input_files/message.txt\".\n\n :param paths: A list of absolute or relative path names. Relative paths are\n interpreted as relative to the current working directory.\n :return: The common path name and a list of minimal path names.\n \"\"\"\n if not paths:\n return os.getcwd(), []\n\n # Make sure we are working with absolute paths\n paths = [os.path.abspath(path) for path in paths]\n\n # Find the common ancestor of the hierarchy so we can drop it for every path\n common_path = os.path.commonpath(paths)\n\n if len(paths) == 1:\n # If we just have one file (probably the main CWL workflow), make sure\n # we don't include the file name as well.\n common_path = os.path.dirname(paths[0])\n\n return common_path, [os.path.relpath(path, common_path) for path in paths]\n\n\nclass WESClientWithWorkflowEngineParameters(WESClient): # type: ignore\n \"\"\"\n A modified version of the WESClient from the wes-service package that\n includes workflow_engine_parameters support.\n\n TODO: Propose a PR in wes-service to include workflow_engine_params.\n \"\"\"\n def __init__(self, endpoint: str, auth: Optional[Tuple[str, str]] = None) -> None:\n \"\"\"\n :param endpoint: The http(s) URL of the WES server. Must include the\n protocol.\n :param auth: Authentication information that will be attached to every\n request to the WES server.\n \"\"\"\n proto, host = endpoint.split(\"://\")\n super().__init__({\n # TODO: use the auth argument in requests.post so we don't need to encode it ourselves\n \"auth\": {\"Authorization\": \"Basic \" + b64encode(f\"{auth[0]}:{auth[1]}\".encode()).decode(\"utf-8\")} if auth else {},\n \"proto\": proto,\n \"host\": host\n })\n\n def get_version(self, extension: str, workflow_file: str) -> str:\n \"\"\"Determines the version of a .py, .wdl, or .cwl file.\"\"\"\n # TODO: read from the web?\n\n if workflow_file.startswith(\"file://\"):\n workflow_file = workflow_file[7:]\n\n if extension == \"py\":\n # For Toil testing, we only ever run CWL workflows.\n # Just pretend all Python workflows are version 3.8 for now.\n return \"3.8\"\n elif extension == \"cwl\":\n with open(workflow_file) as f:\n return str(ruamel.yaml.safe_load(f)[\"cwlVersion\"])\n elif extension == \"wdl\":\n with open(workflow_file) as f:\n return get_wdl_version(f)\n else:\n raise RuntimeError(f\"Invalid workflow extension: {extension}.\")\n\n def parse_params(self, workflow_params_file: str) -> Dict[str, Any]:\n \"\"\"\n Parse the CWL input file into a dictionary to be attached to the body of\n the WES run request.\n\n :param workflow_params_file: The URL or path to the CWL input file.\n \"\"\"\n loader = schema_salad.ref_resolver.Loader(\n {\"location\": {\"@type\": \"@id\"}, \"path\": {\"@type\": \"@id\"}}\n )\n\n # recursive types may be complicated for MyPy to deal with\n workflow_params: Any\n workflow_params, _ = loader.resolve_ref(workflow_params_file, checklinks=False)\n\n return cast(Dict[str, Any], workflow_params)\n\n def modify_param_paths(self, base_dir: str, workflow_params: Dict[str, Any]) -> None:\n \"\"\"\n Modify the file paths in the input workflow parameters to be relative\n to base_dir.\n\n :param base_dir: The base directory to make the file paths relative to.\n This should be the common ancestor of all attached files, which\n will become the root of the execution folder.\n :param workflow_params: A dict containing the workflow parameters.\n \"\"\"\n\n def replace(field: str, file_obj: Dict[str, str]) -> None:\n \"\"\"\n Given a file object with the \"location\" or \"path\" field, replace it\n to be relative to base_dir.\n \"\"\"\n value = file_obj.get(field, None)\n if isinstance(value, str) and urlparse(value).scheme in (\"file\", \"\"):\n if value.startswith(\"file://\"):\n value = value[7:]\n file_obj[field] = os.path.relpath(value, base_dir)\n\n def replace_paths(obj: Any) -> None:\n for file in obj:\n if isinstance(file, dict) and (\"location\" in file or \"path\" in file):\n replace(\"location\", file)\n replace(\"path\", file)\n\n # recursively find all imported files\n if \"secondaryFiles\" in file:\n replace_paths(file.get(\"secondaryFiles\"))\n elif isinstance(file, dict):\n replace_paths(file.values())\n elif isinstance(file, list):\n replace_paths(file)\n replace_paths(workflow_params.values())\n\n def build_wes_request(\n self,\n workflow_file: str,\n workflow_params_file: Optional[str],\n attachments: Optional[List[str]],\n workflow_engine_parameters: Optional[List[str]] = None\n ) -> Tuple[Dict[str, str], Iterable[Tuple[str, Tuple[str, BytesIO]]]]:\n \"\"\"\n Build the workflow run request to submit to WES.\n\n :param workflow_file: The path or URL to the CWL workflow document.\n Only file:// URL supported at the moment.\n :param workflow_params_file: The path or URL to the CWL input file.\n :param attachments: A list of local paths to files that will be uploaded\n to the server.\n :param workflow_engine_parameters: A list of engine parameters to set\n along with this workflow run.\n\n :returns: A dictionary of parameters as the body of the request, and an\n iterable for the pairs of filename and file contents to upload\n to the server.\n \"\"\"\n\n local_workflow_file = urlparse(workflow_file).scheme in (\"\", \"file\")\n\n if workflow_file.startswith(\"file://\"):\n workflow_file = workflow_file[7:]\n\n # Read from the workflow_param file and parse it into a dict\n if workflow_params_file:\n workflow_params = self.parse_params(workflow_params_file)\n else:\n workflow_params = {}\n\n # Initialize the basic parameters for the run request\n wf_url, frag = urldefrag(workflow_file)\n\n workflow_type = wf_url.lower().split(\".\")[-1] # Grab the file extension\n workflow_type_version = self.get_version(workflow_type, wf_url)\n data: Dict[str, str] = {\n \"workflow_url\": workflow_file,\n \"workflow_params\": \"\", # to be set after attachments are processed\n \"workflow_type\": workflow_type,\n \"workflow_type_version\": workflow_type_version\n }\n\n # Convert engine arguments into a JSON object\n if workflow_engine_parameters:\n params = {}\n for param in workflow_engine_parameters:\n if '=' not in param: # flags like \"--logDebug\"\n k, v = param, None\n else:\n k, v = param.split('=', 1)\n params[k] = v\n data[\"workflow_engine_parameters\"] = json.dumps(params)\n\n # Deal with workflow attachments\n if attachments is None:\n attachments = []\n\n # Upload the CWL workflow file if it is local\n if local_workflow_file and wf_url not in attachments:\n attachments.append(wf_url)\n\n # Prepare attachments and generate new path names with the common prefix stripped out\n workflow_attachments = []\n common_path, attachment_paths = generate_attachment_path_names(attachments)\n for src, dest in zip(attachments, attachment_paths):\n with open(src, \"rb\") as f:\n workflow_attachments.append((dest, BytesIO(f.read())))\n\n # Make sure we let the server know where the main CWL file is located\n if local_workflow_file:\n data[\"workflow_url\"] = os.path.relpath(workflow_file, common_path)\n\n # Since the input file will be located at the root of the execution\n # folder, we make sure the input files are relative to the root.\n self.modify_param_paths(common_path, workflow_params)\n data[\"workflow_params\"] = json.dumps(workflow_params)\n\n return data, [(\"workflow_attachment\", val) for val in workflow_attachments]\n\n def run_with_engine_options(\n self,\n workflow_file: str,\n workflow_params_file: Optional[str],\n attachments: Optional[List[str]],\n workflow_engine_parameters: Optional[List[str]]\n ) -> Dict[str, Any]:\n \"\"\"\n Composes and sends a post request that signals the WES server to run a\n workflow.\n\n :param workflow_file: The path to the CWL workflow document.\n :param workflow_params_file: The path to the CWL input file.\n :param attachments: A list of local paths to files that will be uploaded\n to the server.\n :param workflow_engine_parameters: A list of engine parameters to set\n along with this workflow run.\n\n :return: The body of the post result as a dictionary.\n \"\"\"\n data, files = self.build_wes_request(workflow_file,\n workflow_params_file,\n attachments,\n workflow_engine_parameters)\n post_result = requests.post(\n urljoin(f\"{self.proto}://{self.host}\", \"/ga4gh/wes/v1/runs\"),\n data=data,\n files=files,\n headers=self.auth,\n )\n\n return cast(Dict[str, Any], wes_response(post_result))\n\n\ndef get_deps_from_cwltool(cwl_file: str, input_file: Optional[str] = None) -> List[str]:\n \"\"\"\n Return a list of dependencies of the given workflow from cwltool.\n\n :param cwl_file: The CWL file.\n :param input_file: Omit to get the dependencies from the CWL file. If set,\n this returns the dependencies from the input file.\n \"\"\"\n\n option = '--print-input-deps' if input_file else '--print-deps'\n\n args = ['cwltool', option, '--relative-deps', 'cwd', cwl_file]\n if input_file:\n args.append(input_file)\n\n p = subprocess.run(args=args, check=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)\n\n result = p.stdout.decode()\n if not result:\n return []\n\n json_result: Dict[str, Any] = json.loads(result)\n deps = []\n\n def get_deps(obj: Any) -> None:\n \"\"\"\n A recursive function to add file dependencies from the cwltool output to\n the deps list. For directory objects without listing, contents of the\n entire directory will be included.\n \"\"\"\n for file in obj:\n if isinstance(file, dict) and \"location\" in file:\n loc = cast(str, file.get(\"location\"))\n if urlparse(loc).scheme not in [\"\", \"file\"]:\n # Ignore nonlocal files.\n continue\n\n # Check directory\n if file.get(\"class\") == \"Directory\":\n if file.get(\"listing\"):\n get_deps(file.get(\"listing\"))\n else:\n # no listing, so import all files in the directory\n for folder, _, sub_files in os.walk(loc):\n for sub_file in sub_files:\n deps.append(os.path.join(folder, sub_file))\n else:\n deps.append(loc)\n\n # check secondaryFiles\n if \"secondaryFiles\" in file:\n get_deps(file.get(\"secondaryFiles\"))\n\n get_deps(json_result.get(\"secondaryFiles\", []))\n return deps\n\n\ndef submit_run(client: WESClientWithWorkflowEngineParameters,\n cwl_file: str,\n input_file: Optional[str] = None,\n engine_options: Optional[List[str]] = None) -> str:\n \"\"\"\n Given a CWL file, its input files, and an optional list of engine options,\n submit the CWL workflow to the WES server via the WES client.\n\n This function also attempts to find the attachments from the CWL workflow\n and its input file, and attach them to the WES run request.\n\n :param client: The WES client.\n :param cwl_file: The path to the CWL workflow document.\n :param input_file: The path to the CWL input file.\n :param engine_options: A list of engine parameters to set along with this\n workflow run.\n \"\"\"\n # First, get the list of files to attach to this workflow\n attachments = get_deps_from_cwltool(cwl_file)\n\n if input_file:\n attachments.extend(get_deps_from_cwltool(cwl_file, input_file))\n\n run_result: Dict[str, Any] = client.run_with_engine_options(\n cwl_file,\n input_file,\n attachments=attachments,\n workflow_engine_parameters=engine_options)\n return run_result.get(\"run_id\", None)\n\n\ndef poll_run(client: WESClientWithWorkflowEngineParameters, run_id: str) -> bool:\n \"\"\" Return True if the given workflow run is in a finished state.\"\"\"\n status_result = client.get_run_status(run_id)\n state = status_result.get(\"state\")\n\n return state in (\"COMPLETE\", \"CANCELING\", \"CANCELED\", \"EXECUTOR_ERROR\", \"SYSTEM_ERROR\")\n\n\ndef print_logs_and_exit(client: WESClientWithWorkflowEngineParameters, run_id: str) -> None:\n \"\"\"\n Fetch the workflow logs from the WES server, print the results, then exit\n the program with the same exit code as the workflow run.\n\n :param client: The WES client.\n :param run_id: The run_id of the target workflow.\n \"\"\"\n data = client.get_run_log(run_id)\n\n outputs = json.dumps(data.get(\"outputs\", {}), indent=4)\n exit_code = data.get(\"run_log\", {}).get(\"exit_code\", 1)\n\n sys.stdout.write(outputs)\n sys.exit(exit_code)\n\n\ndef main() -> None:\n parser = ArgumentParser(description=\"A CWL runner that runs workflows through WES.\")\n\n # the first two positional arguments are the CWL file and its input file\n parser.add_argument(\"cwl_file\", type=str)\n parser.add_argument(\"input_file\", type=str, nargs=\"?\", default=None)\n # arguments used by the WES runner\n parser.add_argument(\"--wes_endpoint\",\n default=os.environ.get(\"TOIL_WES_ENDPOINT\", \"http://localhost:8080\"),\n help=\"The http(s) URL of the WES server. (default: %(default)s)\")\n # the rest of the arguments are passed as engine options to the WES server\n options, rest = parser.parse_known_args()\n\n cwl_file = options.cwl_file\n input_file = options.input_file\n\n # Initialize client and run the workflow\n endpoint = options.wes_endpoint\n\n # For security reasons, username and password can only come from environment variables\n wes_user = os.environ.get(\"TOIL_WES_USER\", None)\n wes_password = os.environ.get(\"TOIL_WES_PASSWORD\", None)\n\n client = WESClientWithWorkflowEngineParameters(\n endpoint=endpoint,\n auth=(wes_user, wes_password) if wes_user and wes_password else None)\n\n run_id = submit_run(client, cwl_file, input_file, engine_options=rest)\n assert run_id\n\n done = False\n while not done:\n time.sleep(1)\n done = poll_run(client, run_id)\n\n print_logs_and_exit(client, run_id)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"DataBiosphere/toil","sub_path":"src/toil/server/cli/wes_cwl_runner.py","file_name":"wes_cwl_runner.py","file_ext":"py","file_size_in_byte":18389,"program_lang":"python","lang":"en","doc_type":"code","stars":856,"dataset":"github-code","pt":"18"} +{"seq_id":"31056370458","text":"#!/usr/bin/env python\nfrom base import *\n\n\"\"\" A representation of a nagios service dependency\"\"\"\n\nclass Command(BaseObject):\n TYPE = 'command'\n TEMPLATE_CLASS = None\n PARAMS = (\n 'command_name',\n 'command_line'\n )\n\n REQUIRED_PARAMS = PARAMS\n\n @property\n def identity(self):\n return self.command_name\n","repo_name":"johnskopis/naglib","sub_path":"naglib/config/command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"3162286341","text":"# -*- coding: utf-8 -*-\nfrom multiprocessing import TimeoutError\n\nimport scrapy\nfrom pymongo.mongo_client import MongoClient\nfrom scrapy.spidermiddlewares.httperror import HttpError\nfrom twisted.internet.error import DNSLookupError, TCPTimedOutError\nfrom PyDictionary import PyDictionary\nfrom bs4 import BeautifulSoup\nimport logging\nimport urlparse\nimport datetime\nimport re\n\nimport nltk\nfrom nltk.corpus import wordnet, stopwords\n\nfrom nltk.tokenize import sent_tokenize, word_tokenize\nimport spacy\nfrom spacy.symbols import VERB\n\nclass CompanyWebSiteSpider(scrapy.Spider):\n\n name = \"website_crawler\"\n\n EMAIL_RE = r'[\\w\\.-]+@[\\w\\.-]+'\n\n speciality = None\n\n mongo_client = None\n gatherer_database = None\n\n nlp = None\n dictionary = PyDictionary()\n\n stop_words = None\n\n words_to_find = None\n\n verbs_to_find = None\n\n def __init__(self, speciality=None, *args, **kwargs):\n super(CompanyWebSiteSpider, self).__init__(*args, **kwargs)\n\n self.logger.logger.setLevel(logging.INFO)\n\n if speciality is None:\n raise Exception(\"The speciality must be informed\")\n\n self.speciality = speciality\n\n nltk.data.path.append(\n '/Users/xalperte/BigMLDev/company_web_scrapy/webscrawler/nltk_data')\n\n self.stop_words = stopwords.words('english')\n\n self.nlp = spacy.load('en')\n\n def init(self):\n\n self.mongo_client = MongoClient(\n self.settings.get('MONGO_GATHERER_BD_URI'))\n self.gatherer_database = self.mongo_client.get_default_database()\n\n self.prepare_words_to_find()\n self.prepare_verbs_to_find()\n\n def start_requests(self):\n\n self.init()\n\n companies_by_id = self.load_companies()\n company_num = 0\n current_company = None\n try:\n # companies_by_id = self.load_jim_companies()\n\n for company_id, company in companies_by_id.iteritems():\n current_company = company\n\n if 'webpage' in company:\n company_num += 1\n self.logger.info(\"[%d] Launch Home Page Request for %s - %s - %s - %s\" %\n (\n company_num,\n company['webpage'],\n company['company_id'],\n company['company_name'],\n company['webpage']\n ))\n yield scrapy.Request(url=company['webpage'],\n meta={\n 'url': company['webpage'],\n 'company_id': company['company_id'],\n 'company_name': company['company_name'],\n 'company_num': company_num,\n 'company_home_page': company['webpage']},\n errback=self.error,\n callback=self.parse_website)\n\n # company_num += 1\n # if company_num == 10:\n # break\n else:\n self.logger.warning(\"The company [%s] \"\n \"doesn't have webpage infomed\" %\n company_id)\n except Exception as e:\n self.logger.error(\"Error processing company at [%d]. Company ID: [%s]. Cause [%s]\" %\n (company_num, current_company['company_id'], repr(e)))\n\n def parse_website(self, response):\n\n try:\n self.logger.info(\"[%d] Parsing Home Page from %s - %s - %s - %s\" % (\n response.meta['company_num'],\n response.url,\n response.meta['company_id'],\n response.meta['company_name'],\n response.meta['company_home_page']))\n\n self.update_company_page(response)\n\n home_url = urlparse.urlparse(response.url)\n home_netloc = home_url.netloc.lower()\n\n # Following only the links to the company website. Forget about the\n # links to other websites.\n processed_links = set()\n requested_links = 0\n for link_data in self.get_links(self.guess_root(response.url), response.body):\n if link_data[1] not in processed_links:\n processed_links.add(link_data[1])\n\n link_url = urlparse.urlparse(link_data[1])\n link_netloc = link_url.netloc.lower()\n\n if home_netloc == link_netloc:\n if self.follow_link(link_data):\n requested_links += 1\n\n # Only X links to follow\n if requested_links > 10:\n break\n\n yield scrapy.Request(url=link_data[1],\n meta={\n 'url_name': link_data[0],\n 'url': link_data[1],\n 'company_id': response.meta['company_id'],\n 'company_name': response.meta['company_name'],\n 'company_home_page': response.meta['company_home_page']},\n errback=self.error,\n callback=self.parse_internal_page)\n\n self.logger.debug(\"\\t Link to follow: [%s] - [%s]\" %\n (link_data[0], link_data[1]))\n else:\n self.logger.debug(\n \"\\t NOT Link to follow (not words in the link): [%s] - [%s].\" %\n (link_data[0], link_data[1]))\n else:\n self.logger.debug(\n \"\\t NOT Link to follow (out of the company website): [%s] - [%s]. \"\n \"Home Netloc [%s] - Link Netloc [%s]\" %\n (link_data[0], link_data[1], home_netloc, link_netloc))\n\n except Exception as e:\n self.logger.error(\"Home page parsing exception. URL [%s]. Cause [%s]\" %\n (response.url, repr(e)))\n\n def parse_internal_page(self, response):\n\n try:\n self.logger.debug(\"Parsing Internal Page from %s - %s - %s - %s - %s\" % (\n response.meta['url_name'],\n response.url,\n response.meta['company_id'],\n response.meta['company_name'],\n response.meta['company_home_page']))\n\n self.update_company_page(response,\n url_name=response.meta['url_name'],\n is_home=False)\n\n except Exception as e:\n self.logger.error(\"Internal page parsing exception. URL [%s]. Cause [%s]\" %\n (response.url), repr(e))\n\n\n\n def follow_link(self, link_data):\n for word in self.words_to_find:\n if word in link_data[0] or \\\n word in link_data[2] or \\\n word in link_data[3]:\n return True\n return False\n\n\n def error(self, failure):\n # log all failures\n self.logger.error(\"Request Error!\")\n self.logger.error(repr(failure))\n\n # in case you want to do something special for some errors,\n # you may need the failure's type:\n if failure.check(HttpError):\n # these exceptions come from HttpError spider middleware\n # you can get the non-200 response\n response = failure.value.response\n self.logger.error('HttpError on %s', response.url)\n self.write_wrong_website('HTTP',\n response.meta['company_id'], response.meta['company_name'],\n response.url,\n repr(failure))\n\n elif failure.check(DNSLookupError):\n # this is the original request\n request = failure.request\n self.logger.error('DNSLookupError on %s', request.url)\n self.write_wrong_website('DNSLookup',\n request.meta['company_id'],\n request.meta['company_name'],\n request.url,\n repr(failure))\n\n elif failure.check(TimeoutError, TCPTimedOutError):\n request = failure.request\n self.logger.error('TimeoutError on %s', request.url)\n self.write_wrong_website('Timeout',\n request.meta['company_id'],\n request.meta['company_name'],\n request.url,\n repr(failure))\n\n def get_links(self, root, html_doc):\n soup = BeautifulSoup(html_doc, 'html.parser')\n return self.resolve_links(root, soup.find_all('a', href=True))\n\n\n def guess_root(self, base_url):\n if base_url.startswith('http'):\n parsed_link = urlparse.urlparse(base_url)\n scheme = parsed_link.scheme + '://'\n netloc = parsed_link.netloc\n return scheme + netloc\n\n\n def resolve_links(self, root, links):\n for link in links:\n link_title = link.get_text()\n link_href = link['href']\n if not link_href.startswith('http'):\n link_href = urlparse.urljoin(root, link_href)\n\n # Bad urls (email attached to the url)\n match = re.findall(self.EMAIL_RE, link_href)\n if match and len(match) > 0:\n for email in match:\n link_href = link_href.replace(email, '')\n\n if link_href.endswith('/admin'):\n link_href = link_href.replace('/admin', '')\n\n yield (link_title, link_href,\n urlparse.urlparse(link_href).path,\n urlparse.urlparse(link_href).query)\n\n\n def prepare_verbs_to_find(self):\n\n base_verbs = [ 'give', 'offer', 'contribute', 'administer',\n 'bring', 'provide', 'supply', 'manufacture', 'produce',\n 'automate', 'commodity', 'sell', 'solve', 'build' ]\n\n self.verbs_to_find = set()\n for word in base_verbs:\n self.verbs_to_find.add(word)\n for idx, synset in enumerate(wordnet.synsets(word)):\n for synonym in synset.lemma_names():\n self.verbs_to_find.add(synonym.replace('_', ' '))\n\n # hypernyms = [l.lemma_names() for l in synset.hypernyms()]\n # for hypernym in hypernyms:\n # for word in hypernym:\n # self.verbs_to_find.add(word.replace('_', ' '))\n #\n # hyponyms = [l.lemma_names() for l in synset.hyponyms()]\n # for hyponym in hyponyms:\n # for word in hyponym:\n # self.verbs_to_find.add(word.replace('_', ' '))\n\n\n stop_verbs = set(['get', 'have', 'be', 'add', 'work',\n 'reach', 'open', 'create', 'take', 'break'])\n\n self.verbs_to_find = self.verbs_to_find.difference(stop_verbs)\n\n def prepare_words_to_find(self):\n\n all_speciality_words = set()\n for idx, synset in enumerate(wordnet.synsets(self.speciality)):\n for synonym in synset.lemma_names():\n all_speciality_words.add(synonym.replace('_', ' '))\n\n # hypernyms = [l.lemma_names() for l in synset.hypernyms()]\n # for hypernym in hypernyms:\n # for word in hypernym:\n # all_speciality_words.add(word.replace('_', ' '))\n #\n # hyponyms = [l.lemma_names() for l in synset.hyponyms()]\n # for hyponym in hyponyms:\n # for word in hyponym:\n # all_speciality_words.add(word.replace('_', ' '))\n\n # Words related to the maket\n market_words = ['mechanics','unfolding','marketplace','deploy','give',\n 'contribute','administer', 'bring', 'service', 'result',\n 'technology','market','use', 'compose', 'prepare',\n 'provide','make','support','business','supply',\n 'manufacture','product','robotics','ability','form',\n 'automate','produce','about','resource', 'commodity',\n 'vend','wholesale','work','solution','duty','retail',\n 'display', 'mission', 'vision']\n\n all_market_words = set()\n for word in market_words:\n all_market_words.add(word)\n for idx, synset in enumerate(wordnet.synsets(word)):\n for synonym in synset.lemma_names():\n all_market_words.add(synonym.replace('_', ' '))\n\n # hypernyms = [l.lemma_names() for l in synset.hypernyms()]\n # for hypernym in hypernyms:\n # for word in hypernym:\n # all_market_words.add(word.replace('_', ' '))\n #\n # hyponyms = [l.lemma_names() for l in synset.hyponyms()]\n # for hyponym in hyponyms:\n # for word in hyponym:\n # all_market_words.add(word.replace('_', ' '))\n\n communication_words = ['disclosure','communication','article',\n 'announcement','story','record', 'blog',\n 'intelligence', 'journal', 'advice', 'diary',\n 'news','forum']\n\n all_communication_words = set()\n for word in communication_words:\n all_communication_words.add(word)\n for idx, synset in enumerate(wordnet.synsets(word)):\n for synonym in synset.lemma_names():\n all_communication_words.add(synonym.replace('_', ' '))\n\n # hypernyms = [l.lemma_names() for l in synset.hypernyms()]\n # for hypernym in hypernyms:\n # for word in hypernym:\n # all_communication_words.add(word.replace('_', ' '))\n #\n # hyponyms = [l.lemma_names() for l in synset.hyponyms()]\n # for hyponym in hyponyms:\n # for word in hyponym:\n # all_communication_words.add(word.replace('_', ' '))\n\n special_words = set('rss')\n\n self.logger.debug(\"Speciality Words to find: [%s]\" %\n ','.join(all_speciality_words))\n self.logger.debug(\"Communication Words to find: [%s]\" %\n ','.join(all_communication_words))\n self.logger.debug(\"Market Words to find: [%s]\" %\n ','.join(all_market_words))\n self.logger.debug(\"Commons Words between sections: [%s]\" %\n ','.join(set.intersection(\n all_speciality_words,\n all_market_words,\n all_communication_words)))\n\n self.words_to_find = set.union(all_speciality_words,\n all_market_words,\n all_communication_words,\n special_words)\n\n def write_wrong_website(self, type, company_id, company_name, company_url, error):\n with open('error-%s.txt' % type, 'ab') as f:\n f.write(\"\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",\\\"%s\\\"\\n\" %\n (company_id, company_name, company_url, error))\n\n def write_wrong_specialty(self, company_id, company_name, company_url):\n with open('wrong-specialty-%s.txt' % self.speciality, 'ab') as f:\n f.write(\"\\\"%s\\\",\\\"%s\\\",\\\"%s\\\"\\n\" %\n (company_id, company_name, company_url))\n\n\n def update_company_page(self, response, url_name='Home', is_home=True):\n\n companies_pages = \\\n self.gatherer_database['company_webpages']\n\n soup = BeautifulSoup(response.body)\n\n # Remove the script and style tags\n [x.extract() for x in soup.findAll('script')]\n [x.extract() for x in soup.findAll('style')]\n [x.extract() for x in soup.select('[style*=\"visibility:hidden\"]')]\n [x.extract() for x in soup.select('[style*=\"display:none\"]')]\n\n page_text = soup.get_text()\n\n # strip empty lines\n page_text = \"\".join([s for s in page_text.strip().splitlines(True) if s.strip()])\n\n if self.speciality not in page_text and is_home:\n self.write_wrong_specialty(response.meta['company_id'],\n response.meta['company_name'],\n response.url)\n\n keywords = self.get_page_meta('keywords', response.body)\n if keywords is not None:\n keywords = [keyword.strip() for keyword in keywords.split(',')]\n\n title = self.get_page_meta('title', response.body)\n title_bags_of_words = self.decompose_sentences([title] if title is not None else None)\n\n description = self.get_page_meta('description', response.body)\n description_bags_of_words = self.decompose_sentences([description] if description is not None else None)\n\n abstract = self.get_page_meta('abstract', response.body)\n abstract_bags_of_words = self.decompose_sentences([abstract] if abstract is not None else None)\n\n sentences = self.find_sentences(page_text)\n sentences = self.decompose_sentences(sentences)\n\n companies_pages.update({\n 'company_id': response.meta['company_id'],\n 'url': response.url\n }, {\n '$setOnInsert': {\n 'company_id': response.meta['company_id'],\n 'url': response.url,\n 'created': datetime.datetime.now()\n },\n\n '$set': {\n 'updated': datetime.datetime.now(),\n 'url_name': url_name.strip(),\n 'company_name': response.meta['company_name'],\n 'specialty': self.speciality,\n 'title': title,\n 'description': description,\n 'abstract': abstract,\n 'keywords': keywords,\n 'is_home': is_home,\n 'content': soup.prettify(),\n 'content_plain_text': page_text,\n 'sentences': sentences,\n 'bags_of_words_in_meta': {\n 'title': title_bags_of_words,\n 'description': description_bags_of_words,\n 'abstract': abstract_bags_of_words,\n 'keywords': keywords\n }\n }\n }, upsert=True)\n\n\n def find_sentences(self, page_content):\n # http://stackoverflow.com/questions/36610179/how-to-get-the-dependency-tree-with-spacy/36612605\n\n # page_content = page_content.lower()\n\n # lines = page_content.split('\\n')\n\n # remove lines with less than 4 words\n # processed_text = \"\"\n # for line in page_content.split('\\n'):\n # if line.split(' ') >= 4:\n # processed_text += line\n\n if isinstance(page_content, str):\n page_content = page_content.decode('utf-8')\n\n doc = self.nlp(page_content.replace('\\n','.\\n'))\n\n sents = set()\n\n for sent in doc.sents:\n for token in sent:\n\n # Phasal verb?\n if token.dep_ == \"prt\" and token.head.pos_ == \"VERB\" :\n verb = token.head.orth_\n particle = token.orth_\n phrasal_verb = ' '.join([verb, particle])\n if phrasal_verb in self.verbs_to_find:\n sents.add(sent.string)\n\n elif token.pos == VERB and \\\n token.lemma_ in self.verbs_to_find:\n sents.add(sent.string)\n\n return list(sents) if len(list(sents)) > 0 else None\n # for token in sent:\n # if token.is_alpha:\n #\n # sentences_list = []\n # for line in lines:\n # sentences_list.extend(word_tokenize(sentence) for sentence in sent_tokenize(line))\n #\n # parser = nltk.ChartParser(gro)\n # for sentence in sentences_list:\n #\n\n # return sentences_list\n\n def decompose_sentences(self, sentences):\n \"\"\"\n For each sentence we are going to create different bags of words based\n on the kind of the meaning/content:\n\n entities: {\n persons:\n organizations:\n locations:\n products:\n events:\n work_of_art:\n languages:\n },\n\n noun_chunks: {\n }\n\n nouns: {\n }\n\n verbs: {\n }\n\n Supported entities\n\n PERSON\tPeople, including fictional.\n NORP\tNationalities or religious or political groups.\n FACILITY\tBuildings, airports, highways, bridges, etc.\n ORG\tCompanies, agencies, institutions, etc.\n GPE\tCountries, cities, states.\n LOC\tNon-GPE locations, mountain ranges, bodies of water.\n PRODUCT\tObjects, vehicles, foods, etc. (Not services.)\n EVENT\tNamed hurricanes, battles, wars, sports events, etc.\n WORK_OF_ART\tTitles of books, songs, etc.\n LANGUAGE\tAny named language.\n\n :param sentences: the list of sentences\n :return: a dictionary with the different types of bags of words\n \"\"\"\n\n sentences_list = []\n\n if sentences is not None:\n for sentence in sentences:\n\n sentence_data = {\n 'sentence': sentence,\n 'bags_of_words': None\n }\n\n bags_of_words = {\n 'all': [],\n 'entities': {\n 'PERSON': [],\n 'NORP': [],\n 'FACILITY': [],\n 'ORG': [],\n 'GPE': [],\n 'LOC': [],\n 'PRODUCT': [],\n 'EVENT': [],\n 'WORK_OF_ART': [],\n 'LANGUAGE': []\n },\n 'noun_chunks': [],\n 'VERB': [],\n 'NOUN': []\n }\n\n doc = self.nlp(sentence if isinstance(sentence, unicode) else sentence.decode('utf-8'))\n\n # process the entities of the sentence\n for entity in doc.ents:\n if entity.label_ in ['PERSON', 'NORP', 'FACILITY', 'ORG',\n 'GPE', 'LOC', 'PRODUCT', 'EVENT',\n 'WORK_OF_ART', 'LANGUAGE']:\n entity_val = self.clean_stopwords(entity.string.lower())\n if len(entity_val.strip()) > 0:\n synonyms = self.get_synonyms(sentence, entity_val)\n bags_of_words['entities'][entity.label_].append((entity_val, synonyms))\n bags_of_words['all'].append((entity_val, synonyms))\n\n # process the noun chunks of the sentence\n for noun_chunk in doc.noun_chunks:\n # Lemmatizing the nouns (steamming)\n noun_chunk_val = self.clean_stopwords(noun_chunk.lemma_.lower())\n if len(noun_chunk_val.strip()) > 0:\n synonyms = self.get_synonyms(sentence, noun_chunk_val)\n\n bags_of_words['noun_chunks'].append((noun_chunk_val, synonyms))\n bags_of_words['all'].append((noun_chunk_val, synonyms))\n\n # process verbs and nouns of the sentence\n for word in doc:\n if word.pos_ in ['VERB', 'NOUN']:\n # Lemmatizing the words (stemming)\n word_val = self.clean_stopwords(word.lemma_.lower())\n if len(word_val.strip()) > 0:\n synonyms = self.get_synonyms(sentence, word_val)\n\n bags_of_words[word.pos_].append((word_val, synonyms))\n bags_of_words['all'].append((word_val, synonyms))\n\n sentence_data['bags_of_words'] = bags_of_words\n sentences_list.append(sentence_data)\n\n return sentences_list\n\n def clean_stopwords(self, text):\n return ' '.join([w for w in text.split(' ') if w.lower() not in self.stop_words])\n\n\n def get_synonyms(self, sentence, word):\n from pywsd.lesk import simple_lesk\n\n synonyms = set()\n\n if isinstance(sentence, str):\n sentence = sentence.decode('utf-8')\n\n if isinstance(word, str):\n word = word.decode('utf-8')\n\n\n synset = simple_lesk(sentence, word)\n if synset is not None:\n for synonym in synset.lemma_names():\n synonyms.add(synonym.replace('_', ' '))\n\n # for idx, synset in enumerate(wordnet.synsets(word)):\n # for synonym in synset.lemma_names():\n # synonyms.add(synonym.replace('_', ' '))\n\n return list(synonyms)\n\n def get_page_meta(self, meta_name, page_html):\n soup = BeautifulSoup(page_html)\n\n value = \"\"\n for meta in soup.findAll(\"meta\"):\n metaname = meta.get('name', '').lower()\n metaprop = meta.get('property', '').lower()\n if meta_name == metaname or metaprop.find(meta_name) > 0:\n if 'content' in meta.__dict__['attrs']:\n try:\n value = ' '.join([value, meta['content'].strip().encode('utf-8')])\n except:\n self.logger.error(\"Error looking for [%s] in the metadata. Meta: [%s]\" % (meta_name, meta))\n raise Exception(\"Error looking for [%s] in the metadata. Meta: [%s]\" % (meta_name, meta))\n\n return value.strip() if value != \"\" else None\n\n\n def load_jim_companies(self):\n companies_collection = \\\n self.gatherer_database['consolidated_company']\n\n companies_by_id = {}\n\n with open('wp.txt', 'rb') as file:\n for i, line in enumerate(file):\n values = line.split(',')\n company_id = values[0].replace('\"', '')\n webpage = values[1].replace('\"', '')\n if len(webpage.strip()) > 0:\n if webpage.startswith('http//'):\n webpage = webpage.replace('http//', 'http://')\n if webpage.startswith('https//'):\n webpage = webpage.replace('https//', 'https://')\n if not webpage.startswith('http://') and not webpage.startswith('https://'):\n webpage = \"http://%s\" % webpage\n\n companies_by_id[company_id] = {\n 'company_id': company_id,\n 'webpage': webpage,\n 'top_speciality': self.speciality,\n }\n\n # Adding the website information from the consolidated_company collection\n companies_domain = companies_collection.find(\n {\n \"company_id\": {\n '$in': [company['company_id'] for (company_id, company) in\n companies_by_id.iteritems()]},\n \"webpage\": {'$exists': True},\n },\n {\"company_id\": 1, \"company_name\": 1})\n\n for company in companies_domain:\n companies_by_id[company['company_id']]['company_name'] = company['company_name']\n\n self.logger.info(\n \"Companies with website informed: [%d]\" % len(companies_by_id))\n\n return companies_by_id\n\n def load_companies(self):\n\n companies_collection = \\\n self.gatherer_database['consolidated_company']\n companies_top_speciality_collection = \\\n self.gatherer_database['company_specialities']\n\n # Looking for all the companies in Machine Learning\n companies = companies_top_speciality_collection.find(\n {\"tf_idf.top_tag\": self.speciality},\n {\"company_id\": 1, \"tf_idf.top_tag\": 1})\n\n companies_by_id = {}\n\n for company in companies:\n companies_by_id[company['company_id']] = {\n 'company_id': company['company_id'],\n 'top_speciality': company['tf_idf']['top_tag']\n }\n\n # Adding the website information from the consolidated_company collection\n companies_domain = companies_collection.find(\n {\n \"company_id\": {\n '$in': [company['company_id'] for (company_id, company) in\n companies_by_id.iteritems()]},\n \"webpage\": {'$exists': True},\n },\n {\"company_id\": 1, \"company_name\": 1, \"webpage\": 1})\n\n for company in companies_domain:\n companies_by_id[company['company_id']]['company_name'] = company['company_name']\n webpage = company['webpage']\n if webpage.startswith('http//'):\n webpage = webpage.replace('http//', 'http://')\n if webpage.startswith('https//'):\n webpage = webpage.replace('https//', 'https://')\n if not webpage.startswith('http://') and not webpage.startswith(\n 'https://'):\n webpage = \"http://%s\" % webpage\n\n companies_by_id[company['company_id']]['webpage'] = webpage\n\n self.logger.info(\n \"Companies with website informed: [%d]\" % len(companies_by_id))\n\n return companies_by_id","repo_name":"preseries/company_web_scrapy","sub_path":"webscrawler/webscrawler/spiders/website_crawler.py","file_name":"website_crawler.py","file_ext":"py","file_size_in_byte":30614,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"9776743474","text":"import torch\nfrom torch import nn\nfrom model_no_cord import UNetModel\nfrom ny_ladda_data import ladda\nfrom loss_function import distance_loss, binary_loss\nfrom real_loss import real_loss\n\ndevice = torch.device('mps')\n\nx = UNetModel(4, 3, 20, 64).to(device)\n\n\n\n\nloss = 0\nnum_epochs = 1\nbatch_size = 1\noptim = torch.optim.SGD(x.parameters(), lr=0.00000001, momentum=0.9)\nx.train()\nloss = 0\ntrue_loss = 0\nclassification_loss = 0\n\nnum_divide_data = 50\n\n\nfor epoch in range(num_epochs):\n percent_fetched = 0\n for data_round in range(num_divide_data):\n print('Collect Data') \n train_data = ladda(typ='träning', seed=2, validation_round=1, data_divided_into=num_divide_data, data_pass=data_round)\n print('Data Collected')\n for i in range(len(train_data)):\n output_cord = x(train_data[i][0].to(device))\n\n\n loss += distance_loss(output_cord, train_data[i][1].to(device))\n\n\n\n if i % (batch_size) == (batch_size-1) or i == (len(train_data)-1):\n print(data_round,i,'Cord.loss:', loss/batch_size)\n\n optim.zero_grad()\n loss.backward()\n optim.step()\n loss = 0\n true_loss = 0\n classification_loss = 0\n\ntorch.save(x.state_dict(), \"landmark_model.pth\")","repo_name":"hedbergare/IIS_Signe_Project","sub_path":"LE/train_no_cord.py","file_name":"train_no_cord.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"23314824758","text":"from nsds_lab_to_nwb.metadata.resources import read_metadata_resource\n\n\ndef check_stimulus_name(stim_name_input):\n stim_directory = read_metadata_resource('list_of_stimuli')\n\n if stim_name_input in stim_directory.keys():\n # if there is a matching key, just read the corresponding entry\n return stim_name_input, stim_directory[stim_name_input]\n # if stim_name does not match any key, try the alternative names\n for key, stim_info in stim_directory.items():\n for alt_name in stim_info['alt_names']:\n if stim_name_input == alt_name:\n return key, stim_info\n raise ValueError(f\"Stimulus type '{stim_name_input}' not found.\")\n\n\ndef get_stimulus_metadata(stim_name_input):\n stim_name, stim_info = check_stimulus_name(stim_name_input)\n stim_info_full = {'name': stim_name}\n stim_info_full.update(**stim_info)\n return stim_info_full\n","repo_name":"BouchardLab/nsds_lab_to_nwb","sub_path":"nsds_lab_to_nwb/metadata/stim_name_helper.py","file_name":"stim_name_helper.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"4003766754","text":"from tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops.distributions import kullback_leibler\nfrom tensorflow.python.ops.distributions import normal\nfrom tensorflow.python.platform import test\n\n# pylint: disable=protected-access\n_DIVERGENCES = kullback_leibler._DIVERGENCES\n_registered_kl = kullback_leibler._registered_kl\n\n# pylint: enable=protected-access\n\n\nclass KLTest(test.TestCase):\n\n def testRegistration(self):\n\n class MyDist(normal.Normal):\n pass\n\n # Register KL to a lambda that spits out the name parameter\n @kullback_leibler.RegisterKL(MyDist, MyDist)\n def _kl(a, b, name=None): # pylint: disable=unused-argument,unused-variable\n return name\n\n a = MyDist(loc=0.0, scale=1.0)\n self.assertEqual(\"OK\", kullback_leibler.kl_divergence(a, a, name=\"OK\"))\n\n @test_util.run_deprecated_v1\n def testDomainErrorExceptions(self):\n\n class MyDistException(normal.Normal):\n pass\n\n # Register KL to a lambda that spits out the name parameter\n @kullback_leibler.RegisterKL(MyDistException, MyDistException)\n # pylint: disable=unused-argument,unused-variable\n def _kl(a, b, name=None):\n return array_ops.identity([float(\"nan\")])\n\n # pylint: disable=unused-argument,unused-variable\n\n with self.cached_session():\n a = MyDistException(loc=0.0, scale=1.0, allow_nan_stats=False)\n kl = kullback_leibler.kl_divergence(a, a, allow_nan_stats=False)\n with self.assertRaisesOpError(\n \"KL calculation between .* and .* returned NaN values\"):\n self.evaluate(kl)\n with self.assertRaisesOpError(\n \"KL calculation between .* and .* returned NaN values\"):\n a.kl_divergence(a).eval()\n a = MyDistException(loc=0.0, scale=1.0, allow_nan_stats=True)\n kl_ok = kullback_leibler.kl_divergence(a, a)\n self.assertAllEqual([float(\"nan\")], self.evaluate(kl_ok))\n self_kl_ok = a.kl_divergence(a)\n self.assertAllEqual([float(\"nan\")], self.evaluate(self_kl_ok))\n cross_ok = a.cross_entropy(a)\n self.assertAllEqual([float(\"nan\")], self.evaluate(cross_ok))\n\n def testRegistrationFailures(self):\n\n class MyDist(normal.Normal):\n pass\n\n with self.assertRaisesRegex(TypeError, \"must be callable\"):\n kullback_leibler.RegisterKL(MyDist, MyDist)(\"blah\")\n\n # First registration is OK\n kullback_leibler.RegisterKL(MyDist, MyDist)(lambda a, b: None)\n\n # Second registration fails\n with self.assertRaisesRegex(ValueError, \"has already been registered\"):\n kullback_leibler.RegisterKL(MyDist, MyDist)(lambda a, b: None)\n\n def testExactRegistrationsAllMatch(self):\n for (k, v) in _DIVERGENCES.items():\n self.assertEqual(v, _registered_kl(*k))\n\n def _testIndirectRegistration(self, fn):\n\n class Sub1(normal.Normal):\n\n def entropy(self):\n return \"\"\n\n class Sub2(normal.Normal):\n\n def entropy(self):\n return \"\"\n\n class Sub11(Sub1):\n\n def entropy(self):\n return \"\"\n\n # pylint: disable=unused-argument,unused-variable\n @kullback_leibler.RegisterKL(Sub1, Sub1)\n def _kl11(a, b, name=None):\n return \"sub1-1\"\n\n @kullback_leibler.RegisterKL(Sub1, Sub2)\n def _kl12(a, b, name=None):\n return \"sub1-2\"\n\n @kullback_leibler.RegisterKL(Sub2, Sub1)\n def _kl21(a, b, name=None):\n return \"sub2-1\"\n\n # pylint: enable=unused-argument,unused_variable\n\n sub1 = Sub1(loc=0.0, scale=1.0)\n sub2 = Sub2(loc=0.0, scale=1.0)\n sub11 = Sub11(loc=0.0, scale=1.0)\n\n self.assertEqual(\"sub1-1\", fn(sub1, sub1))\n self.assertEqual(\"sub1-2\", fn(sub1, sub2))\n self.assertEqual(\"sub2-1\", fn(sub2, sub1))\n self.assertEqual(\"sub1-1\", fn(sub11, sub11))\n self.assertEqual(\"sub1-1\", fn(sub11, sub1))\n self.assertEqual(\"sub1-2\", fn(sub11, sub2))\n self.assertEqual(\"sub1-1\", fn(sub11, sub1))\n self.assertEqual(\"sub1-2\", fn(sub11, sub2))\n self.assertEqual(\"sub2-1\", fn(sub2, sub11))\n self.assertEqual(\"sub1-1\", fn(sub1, sub11))\n\n def testIndirectRegistrationKLFun(self):\n self._testIndirectRegistration(kullback_leibler.kl_divergence)\n\n def testIndirectRegistrationKLSelf(self):\n self._testIndirectRegistration(\n lambda p, q: p.kl_divergence(q))\n\n def testIndirectRegistrationCrossEntropy(self):\n self._testIndirectRegistration(\n lambda p, q: p.cross_entropy(q))\n\n def testFunctionCrossEntropy(self):\n self._testIndirectRegistration(kullback_leibler.cross_entropy)\n\n\nif __name__ == \"__main__\":\n test.main()\n","repo_name":"tensorflow/tensorflow","sub_path":"tensorflow/python/kernel_tests/distributions/kullback_leibler_test.py","file_name":"kullback_leibler_test.py","file_ext":"py","file_size_in_byte":4520,"program_lang":"python","lang":"en","doc_type":"code","stars":178918,"dataset":"github-code","pt":"18"} +{"seq_id":"38445359392","text":"import testenv; testenv.configure_for_tests()\nimport unittest\n\nimport inheritance.alltests as inheritance\nimport sharding.alltests as sharding\n\ndef suite():\n modules_to_test = (\n 'orm.attributes',\n 'orm.query',\n 'orm.lazy_relations',\n 'orm.eager_relations',\n 'orm.mapper',\n 'orm.expire',\n 'orm.selectable',\n 'orm.collection',\n 'orm.generative',\n 'orm.lazytest1',\n 'orm.assorted_eager',\n\n 'orm.naturalpks',\n 'orm.sessioncontext',\n 'orm.unitofwork',\n 'orm.session',\n 'orm.cascade',\n 'orm.relationships',\n 'orm.association',\n 'orm.merge',\n 'orm.pickled',\n 'orm.memusage',\n\n 'orm.cycles',\n\n 'orm.entity',\n 'orm.compile',\n 'orm.manytomany',\n 'orm.onetoone',\n 'orm.dynamic',\n )\n alltests = unittest.TestSuite()\n for name in modules_to_test:\n mod = __import__(name)\n for token in name.split('.')[1:]:\n mod = getattr(mod, token)\n alltests.addTest(unittest.findTestCases(mod, suiteClass=None))\n alltests.addTest(inheritance.suite())\n alltests.addTest(sharding.suite())\n return alltests\n\n\nif __name__ == '__main__':\n testenv.main(suite())\n","repo_name":"arianepaola/tg2jython","sub_path":"sqlalchemy/test/orm/alltests.py","file_name":"alltests.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"2069546392","text":"from sys import version\r\nimport numpy as np\r\nfrom numpy.random.mtrand import multinomial \r\nimport pandas as pd\r\nfrom sklearn.datasets import fetch_openml\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom PIL import Image\r\nimport PIL.ImageOps\r\n\r\nX = np.load('image.npz')['arr_0']\r\ny = pd.read_csv(\"lables.csv\")[\"labels\"]\r\n\r\nprint(pd.Series(y) .value_counts())\r\n\r\nclasses = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','X','Y','Z']\r\n\r\nnclasses = len(classes)\r\n\r\nX_train,X_test,y_train,y_test = train_test_split(X,y,random_state = 9,train_size=3500,test_size = 500)\r\nX_train_scaled = X_train / 255.0\r\nX_test_scaled = X_test / 255.0\r\nclf = LogisticRegression(solver=\"saga\",multi_class=\"multinomial\").fit(X_train_scaled,y_train)\r\n\r\ndef getPrediction(image):\r\n im_open = Image.open(image)\r\n im_bw = im_open.convert(\"L\")\r\n im_bwrz = im_bw.resize((22,30),Image.ANTIALIAS)\r\n pixelFilter = 20\r\n minPixel = np.percentile(im_bwrz,pixelFilter)\r\n im_bwrzinv = np.clip(im_bwrz-minPixel,0,255)\r\n maxPixel = np.max(im_bwrz)\r\n im_bwrzinv = np.asarray(im_bwrzinv)/maxPixel\r\n testSample = np.array(im_bwrzinv).reshape(1,784)\r\n testPredict = clf.predict(testSample)\r\n return testPredict[0]\r\n\r\n \r\n\r\n\r\n","repo_name":"YASHBITU1/CLASS-125-MODEL","sub_path":"predictalpha.py","file_name":"predictalpha.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"42630100764","text":"import json\nimport logging\nimport traceback\nimport flask\nimport sys\nfrom flask import Flask\nfrom flask import request\nimport argparse\nfrom flask_restful import reqparse\n\nsys.path.append('../')\nfrom classifier.search_rules import all_rules\nfrom classifier.rule_insertion import main\nfrom classifier.rule_testing import test_new_rule\n\napp = Flask(__name__)\n\ndef arguments():\n\n parser = reqparse.RequestParser(bundle_errors=True)\n parser.add_argument('Error_Type', type=str, default=\"None\",choices=['non DCI','DCI'],help='Error label')\n parser.add_argument('Job_ID', type=str, default=\"0\")\n parser.add_argument('Stage_of_Failure', type=str, default=\"0\",help='Stage_of_Failure')\n parser.add_argument('Error_Message', type=str, default=\"0\",help='Error content')\n parser.add_argument('Is_user_text', type=int,choices=[0,1],default=0, help='user_text.yml in failed bucket')\n parser.add_argument('Is_SUT', type=int,choices=[0,1],default=0, help='SUT.yml in failed bucket')\n parser.add_argument('Is_install', type=int,choices=[0,1],default=0, help='install.yml in failed bucket')\n parser.add_argument('Is_logs', type=int,choices=[0,1],default=0, help='logs.yml in failed bucket')\n parser.add_argument('Is_dci_rhel_cki', type=int,choices=[0,1],default=0, help='Failed task dci-rhel-cki')\n\n arg = parser.parse_args()\n return arg\n\n@app.route('/rules', methods=['GET'])\ndef show_all_rules():\n rules,flag = all_rules()\n \n if(flag == False):\n return flask.Response(json.dumps(rules), 400, content_type='application/json')\n else:\n return flask.Response(json.dumps(rules), 200, content_type='application/json')\n\n\n@app.route('/rules', methods=['POST'])\ndef create_new_rule():\n args = arguments()\n try:\n response,flag=main(args)\n res = flask.request.json\n if(flag == False):\n return flask.Response(json.dumps(response), 400, content_type='application/json') \n else:\n return flask.Response(json.dumps(res), 201, content_type='application/json')\n \n except Exception as err:\n return flask.Response({\"message\": \"Rule creation failed\"}, 400, content_type='application/json')\n\n@app.route('/rules/test', methods=['POST'])\ndef test_rule():\n args = arguments()\n try:\n response,flag = test_new_rule(args)\n if (flag == False):\n return flask.Response(json.dumps(response), 400, content_type='application/json')\n else:\n res = flask.request.json\n return flask.Response(json.dumps(res),200)\n \n except Exception as err:\n return flask.Response({\"message\":\"Rule testing failed\"}, 400, content_type='application/json')\n \n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=1234, use_reloader=True)\n","repo_name":"ShubhangiJ01/dci-error-log-classification","sub_path":"classifier/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"19336575694","text":"'''\nEOF를 쓰는 경우는\n우리가 몇 줄 입력받을지를 모를 때 사용한다!\n\n'''\n\n\ns=''\nwhile True:\n try:\n line=input()\n s += line\n except EOFError:\n break\narr = list(map(int, s.split(',')))\nprint(sum(arr))","repo_name":"Youngseo-Jeon0313/baekjoon","sub_path":"백준10823.py","file_name":"백준10823.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"18"} +{"seq_id":"1973567192","text":"from assert_helpers import assert_difference, assert_no_difference\nfrom ekklesia_portal.datamodel import ArgumentRelation, ArgumentVote\nfrom ekklesia_portal.enums import ArgumentType\nfrom webtest_helpers import assert_deform\n\n\ndef test_argumentrelation(client, argument_relation):\n proposition = argument_relation.proposition\n argument = argument_relation.argument\n res = client.get(f\"/p/{proposition.id}/a/{argument.id}\")\n assert 'argument_vote_btn' not in res, 'vote button not present'\n html = res.html\n proposition_title_link = html.find(class_=\"proposition_title\").find(\"a\")\n assert proposition_title_link.text == proposition.title\n assert str(proposition.id) in proposition_title_link[\"href\"]\n assert html.find(class_=\"argument_title\").find(\"a\").text == argument.title\n assert html.find(class_=\"argument_abstract\").text == argument.abstract\n assert html.find(class_=\"argument_details_extended\").text == argument.details\n\n\ndef test_argumentrelation_with_logged_in_user(client, argument_relation, logged_in_user):\n proposition = argument_relation.proposition\n argument = argument_relation.argument\n res = client.get(f\"/p/{proposition.id}/a/{argument.id}\")\n assert 'argument_vote_btn' in res, 'vote button present'\n\n\ndef test_new(client, logged_in_user, proposition):\n res = client.get(f'/p/{proposition.id}/a/+new?relation_type={ArgumentType.PRO.name}')\n\n expected = {'proposition_id': proposition.id, 'relation_type': ArgumentType.PRO.name}\n assert_deform(res, expected)\n\n\ndef test_create(db_query, client, logged_in_user, proposition):\n data = {\n 'proposition_id': proposition.id,\n 'relation_type': ArgumentType.PRO.name,\n 'title': 'test title',\n 'abstract': 'test abstract',\n 'details': 'test details'\n }\n\n with assert_difference(db_query(ArgumentRelation).count, 1):\n client.post(f\"/p/{proposition.id}/a/\", data, status=302)\n\n\ndef test_does_not_create_without_title(db_query, client, logged_in_user, proposition):\n data = {\n 'proposition_id': proposition.id,\n 'relation_type': ArgumentType.PRO.name,\n 'abstract': 'test abstract',\n 'details': 'test details'\n }\n\n with assert_no_difference(db_query(ArgumentRelation).count):\n client.post(f\"/p/{proposition.id}/a/\", data, status=200)\n\n\ndef test_vote(db_query, client, logged_in_user, argument_relation):\n url = f\"/p/{argument_relation.proposition_id}/a/{argument_relation.argument_id}/vote\"\n client.post(url, {'weight': 1}, status=302)\n qq = db_query(ArgumentVote).filter_by(member_id=logged_in_user.id, relation_id=argument_relation.id).one\n vote = qq()\n assert vote.weight == 1\n\n client.post(url, {'weight': 0}, status=302)\n vote = qq()\n assert vote.weight == 0\n\n client.post(url, {'weight': -1}, status=302)\n vote = qq()\n assert vote.weight == -1\n\n client.post(url, {'weight': -2}, status=400)\n vote = qq()\n assert vote.weight == -1\n","repo_name":"edemocracy/ekklesia-portal","sub_path":"tests/concepts/argument_relation/test_argument_relation.py","file_name":"test_argument_relation.py","file_ext":"py","file_size_in_byte":2982,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"18"} +{"seq_id":"32795073568","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 9 22:44:49 2023\n\n@author: USER\n\"\"\"\n\n# Problem Description\n# You are given a string A which is a serialized string. You have to restore the original array of strings.\n# The string in the output array should only have lowercase english alphabets.\n# Serialization: Scan each element in a string, calculate its length and append it with a string and a element separator or deliminator (the deliminator is ~). We append the length of the string so that we know the length of each element.\n# For example, for a string 'interviewbit', its serialized version would be 'interviewbit12~'.\n\n# Problem Constraints\n# 1 <= |A| <= 106\n\n# Input Format\n# The first argument is the string A.\n# Output Format\n# Return an array of strings which are deserialized.\n\n# Example Input\n# Input 1:\nA = 'scaler6~academy7~'\n# Input 2:\nA = 'interviewbit12~'\n\n\n# Example Output\n# Output 1:\n# ['scaler', 'academy']\n# Output 2:\n\n# ['interviewbit']\n# Example Explanation\n# Explanation 1:\n# Length of 'scaler' is 6 and academy is 7. So, the resulting string is scaler6~academy7~.\n# We hve to reverse the process.\n# Explanation 2:\n\n# Explained in the description above.\n\n# class Solution:\n # @param A : string\n # @return a list of strings\n # def deserialize(self, A):\n \n# def serialize(A):\n# n_A = len(A)\n# result = \"\"\n# for i in range(n_A):\n# n_curr_i = len(A[i])\n# # result = result + \"~\" + A[i] + str(n_curr_i)\n# result = result + A[i] + str(n_curr_i) + \"~\"\n# # n_result = len(result)\n# # return result[1:]\n# return result\n# serialize(A)\n\ndef deserialize(A):\n import string\n # n_A = len(A)\n result = []\n counter = 0\n lower_chars = string.ascii_lowercase\n # for i in range(n_A):\n # ix = 0\n while (counter > -1):\n counter = A.find(\"~\")\n # print(counter)\n curr_ele = ''\n # for i in range(ix, ix + counter):\n for i in range(counter):\n if A[i] in lower_chars:\n curr_ele = curr_ele + A[i]\n # ix += 1 \n # print(curr_ele)\n result.append(curr_ele)\n try: \n A = A[counter+1:]\n except:\n break\n n_result = len(result)\n return(result[:n_result-1]) \n \ndeserialize(A) \n\n\n# Editorial:\n# class Solution:\n# # @param A : string\n# # @return a list of strings\n# def deserialize(self, A):\n# i=j=0\n# ans=[]\n# while (j Solution::deserialize(string A) {\n# vector ans;\n# int i = -1;\n# int j = 0;\n# while(j < A.size()){\n# while(A[j] <= 122 && A[j] >= 97){\n# j++;\n# }\n# ans.push_back(A.substr(i + 1, j - i - 1));\n# while(j < A.size() && A[j] != '~'){\n# j++;\n# }\n# i = j++;\n# } \n# return ans;\n# }\n \n# /**\n# * @input A : String\n# * \n# * @Output string array.\n# */\n# func deserialize(A string ) ([]string) {\n# words := make([]string,0)\n# word := \"\"\n# first := true\n# for i:=0; i 122){\n# if first == true {\n# words = append(words, word)\n# first = false\n# }\n# word = \"\"\n# continue;\n# }\n# first = true\n# word += string(A[i])\n# }\n# return words\n# }\n\n\n","repo_name":"dmtwong/Brush_up","sub_path":"Python/string_deserialize.py","file_name":"string_deserialize.py","file_ext":"py","file_size_in_byte":3749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"6374999164","text":"import timeit\nfrom timeit import Timer\n\ndef fibs1(number):\n if number <= 1:\n return number\n else:\n return int(fibs1(number-1))+int(fibs1(number-2))\n\n\nresultlist = {0:0, 1:1}\ndef fibs2(number):\n if number in resultlist:\n return resultlist[number]\n\n result = fibs2(number-1) + fibs2(number-2)\n resultlist[number] = result\n return result\n\n\ndef main():\n\n t1 = Timer(stmt=\"fibs1(30)\", setup=\"from __main__ import fibs1\")\n print (\"fibs1:\", t1.timeit(number=1), \"milliseconds\")\n t2 = Timer(stmt=\"fibs2(30)\", setup=\"from __main__ import fibs2\")\n print (\"fibs1:\", t2.timeit(number=1), \"milliseconds\")\n\nif __name__ == '__main__':\n main()","repo_name":"clelandgt/Python","sub_path":"Assignments/Recursion/Fibonacci.py","file_name":"Fibonacci.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"20737124362","text":"'''\nReturn a subarray of consecutive numbers with the largest sum\n'''\n\ndef maxSubArray(arr, num):\n if len(arr) < 0:\n return None\n # make a max value\n maxSum = 0\n # make subArray to contain those elems\n # make a window containting two elements in the array\n for i in range(0, num, 1):\n maxSum += arr[i]\n tempSum = maxSum\n for i in range(num, len(arr), 1):\n print(arr[i], '/', arr[i - num])\n tempSum = tempSum - arr[i - num] + arr[i]\n maxSum = max(maxSum, tempSum)\n\n # save the sum of those two elements in the array, and keep going\n # when we get to the end of the array, return the subarray containing the\n # elements that summed up `max`\n return maxSum\n\nprint(maxSubArray([1,2,3,4,5],3))\nprint(maxSubArray([100,200,300,400], 2))","repo_name":"irisjitomo/HackerRankStudy","sub_path":"Section4-OptionalChallenges/maxSubArray.py","file_name":"maxSubArray.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"12734514143","text":"import re\nfrom pyspark import SparkConf, SparkContext\n\n\ndef normalizeWords(text):\n return re.compile(r'\\W+', re.UNICODE).split(text.lower())\n\n\nconf = SparkConf().setMaster(\"local\").setAppName(\"WordCount\")\nsc = SparkContext(conf = conf)\n\ninput = sc.textFile(\"file:///sparkcourse/book.txt\")\nwords = input.flatMap(normalizeWords)\n\n# (x, 1) extra 1 counts up the number of data\n# x key is a word, and x + y counts up\nwordCounts = words.map(lambda x: (x, 1)).reduceByKey(lambda x, y: x + y)\n\n# Sort by the count in an ascending order\n# x[1] is count, x[0] is a word\nwordCountsSorted = wordCounts.map(lambda x: (x[1], x[0])).sortByKey()\nresults = wordCountsSorted.collect()\n\nfor result in results:\n count = str(result[0])\n word = result[1].encode('ascii', 'ignore')\n if (word):\n print(word.decode() + \":\\t\\t\" + count)\n","repo_name":"yukikitayama/spark","sub_path":"activity/word-count-better-sorted.py","file_name":"word-count-better-sorted.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"17871617905","text":"\"\"\"\nThis tests a simple example of using elm-py.\n\"\"\"\n\nimport sys\nsys.path.append('../src')\n\nfrom Elm import F, wrap, pipe\nfrom Kernel import toElm, toPy\nimport List\nimport Tuple\n\n# We could do this F-wrapping more automatically, but\n# here we do it explicitly.\nList.indexedMap = F(List.indexedMap)\nList.sortBy = F(List.sortBy)\nList.map = F(List.map)\n\n@wrap(toElm, toPy)\ndef ranks(lst):\n \"\"\"\n This returns a list of integers representing the\n \"each\" element in the input list. The rank of 0\n is for the \"lowest\" element in the list, and so on.\n \"\"\"\n return pipe(\n lst, [\n (List.indexedMap) (Tuple.pair),\n (List.sortBy) (Tuple.second),\n (List.map) (Tuple.first),\n (List.indexedMap) (Tuple.pair),\n (List.sortBy) (Tuple.second),\n (List.map) (Tuple.first),\n ])\n\nif __name__ == '__main__':\n res = ranks([77, 0, 66, 22, 55, 99, 44, 33, 11, 88])\n assert res == [7, 0, 6, 2, 5, 9, 4, 3, 1, 8]\n\n","repo_name":"showell/elm-py","sub_path":"tests/testRanks.py","file_name":"testRanks.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"18"} +{"seq_id":"74281606759","text":"################################################################################\n# Install gspread using pip : python -m pip install gpread\n# Add the Google API for Python : python -m pip google-api-python-client\n# Google token 'C:/Learn/API_Scripting_Python/google_api/clientsecret.json'\n# Spreadsheet in Google drive ServiceAccountTest\n# Link https://gspread.readthedocs.io/en/latest/\n################################################################################\n\n\n# Import gspread\nimport gspread\n\n\n# Set up the authentication import the ServiceAccountCredentials() module\nfrom oauth2client.service_account import ServiceAccountCredentials\n\n#Set up the credentials\n\n# Scope - tells what we are interacting with\nscope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']\n\n# Create credentials using the Google API token\ncreds = ServiceAccountCredentials.from_json_keyfile_name('C:/Learn/API_Scripting_Python/google_api/clientsecret.json',scope)\n\n\n# Create client using the credentials\nclient = gspread.authorize(creds)\n\n# Create sheet\n# client.open() tells the client to open the spreadsheet, sheet1\nsheet = client.open('ServiceAccountTest').sheet1\n# Update the spreadsheet e.g. Adds 'test' to cell 1,1\nsheet.update_cell(1,1,\"test\")\n\n# Read data from the sheet e.g. from ServiceAccountTest row 1\n# sheet.row_values(1)\nprint(sheet.row_values(1))\n\n# Read all values from the spreadsheet\n# sheet.get_all_values()\nprint(sheet.get_all_values())\n\n# Complex Data using enumerate() function\n# Write 1,2, and 3 in cells 11, 12 and 13\n# 4,5, and 6 in cells 21 22 and 23, respectively\n# 1. Loops over the list\n# 2. Gives the value at each point, gives index that corresponds to the value\n# row_index 0 and 1 (two rows of the sheet)\n# row 1,2,3,4,5,6\n# c00 c11 -> 1 c10 c21 -> 4\n# c01 c12 -> 2 c11 c22 -> 5\n# c02 c13 -> 3 c12 c23 -> 5\n#\n\nmy_data = [[1,2,3],[4,5,6]]\nfor row_index, row in enumerate(my_data):\n for col_index, value in enumerate(row):\n sheet.update_cell(row_index+1, col_index+1, value)\n \n\n\n","repo_name":"greenwarrior/AutomationTesting_PYTHON","sub_path":"API_Scripting_Python/2_6_Google_Sheets_API.py","file_name":"2_6_Google_Sheets_API.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"9314052640","text":" # sort keys\nd = {5:'pen',10:'pencil',2:'eraser'}\nout = sorted(d.keys())\nprint(out)\n# sort values\nd = {5:'pen',10:'pencil',2:'eraser'}\nout = sorted(d.values())\nprint(out)\n# sort items\n\nd = {5:'pen',10:'pencil',2:'eraser'}\nout = sorted(d.items())\nprint(out)\n","repo_name":"palakkatara14/python-program","sub_path":"sort items ,key ,values in dictionary.py","file_name":"sort items ,key ,values in dictionary.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"34904273197","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Utilities used in testing.\"\"\"\n\nfrom glob import glob\nimport inspect\nimport logging\nimport numbers\nimport os\nimport random\nimport re\nimport string\nimport sys\nfrom gmusicapi.compat import unittest\n\nfrom gmusicapi.api import Api\nfrom gmusicapi.exceptions import CallFailure, NotLoggedIn\nfrom gmusicapi.protocol.metadata import md_expectations\n\nlog = logging.getLogger(__name__)\n\n#A regex for the gm id format, eg:\n#c293dd5a-9aa9-33c4-8b09-0c865b56ce46\nhex_set = \"[0-9a-f]\"\ngm_id_regex = re.compile((\"{h}{{8}}-\" +\n (\"{h}{{4}}-\" * 3) +\n \"{h}{{12}}\").format(h=hex_set))\n\n#Test files are located in the same directory as this file.\ncwd = os.getcwd()\nos.chdir(os.path.dirname(sys.argv[0]))\n\naudio_filenames = glob(u'audiotest*')\nmp3_filenames = [fn for fn in audio_filenames if fn.endswith('.mp3')]\nsmall_mp3 = u'audiotest_small.mp3'\nimage_filename = 'imagetest_10x10_check.png'\n\nos.chdir(cwd)\n\n#Get the full path of the test files.\n#Can't use abspath since this is relative to where _this_ file is,\n# not necessarily the calling curdir.\npath = os.path.realpath(__file__)\nreal_path = lambda lp: path[:string.rfind(path, os.sep)] + os.sep + lp\n\nmp3_filenames = map(real_path, mp3_filenames)\naudio_filenames = map(real_path, audio_filenames)\nimage_filename = real_path(image_filename)\nsmall_mp3 = real_path(small_mp3)\n\n\nclass NoticeLogging(logging.Handler):\n \"\"\"A log handler that, if asked to emit, will set\n ``self.seen_message`` to True.\n \"\"\"\n\n def __init__(self):\n logging.Handler.__init__(self) # cannot use super in py 2.6; logging is still old-style\n self.seen_message = False\n\n def emit(self, record):\n self.seen_message = True\n\n\ndef new_test_api(**kwargs):\n \"\"\"Make an instance of a return-verified Api, login and return it.\n\n kwargs are passed through to api.login().\n \"\"\"\n\n api = UnitTestedApi(debug_logging=True)\n api.login(**kwargs)\n\n return api\n\n\ndef modify_md(md_name, val):\n \"\"\"Returns a value of the same type as val that will not equal val.\"\"\"\n\n #Check for metadata that must get specific values.\n if md_expectations[md_name].allowed_values is not None:\n #Assume old_val is a possible value, and return\n # the value one modulus index after it.\n\n possible = md_expectations[md_name].allowed_values\n val_i = 0\n try:\n val_i = possible.index(val)\n except ValueError:\n log.warning(\"non-allowed metadata value '%s' for key %s\", val, md_name)\n\n return possible[(val_i + 1) % len(possible)]\n\n #Generic handlers for other data types.\n if isinstance(val, basestring):\n return val + \"_mod\"\n\n #Need to check for bool first, bools are instances of Number for some reason.\n elif isinstance(val, bool):\n return not val\n elif isinstance(val, numbers.Number):\n return val + 1\n else:\n raise TypeError(\"modify expects only strings, numbers, and bools\")\n\n\ndef md_entry_same(entry_name, s1, s2):\n \"\"\"Returns (s1 and s2 have the same value for entry_name?, message).\"\"\"\n\n s1_val = s1[entry_name]\n s2_val = s2[entry_name]\n\n return (s1_val == s2_val, \"(\" + entry_name + \") \" + repr(s1_val) + \", \" + repr(s2_val))\n\n\ndef is_gm_id(s):\n \"\"\"Returns True if the given string is in Google Music id form.\"\"\"\n return re.match(gm_id_regex, s) is not None\n\n\ndef is_song(d):\n \"\"\"Returns True is the given dict is a GM song dict.\"\"\"\n #Not really precise, but should be good enough.\n return is_gm_id(d[\"id\"])\n\n\ndef is_song_list(lst):\n return all(map(is_song, lst))\n\n\ndef is_id_list(lst):\n \"\"\"Returns True if the given list is made up of all strings in GM id form.\"\"\"\n return all(map(is_gm_id, lst))\n\n\ndef is_id_pair_list(lst):\n \"\"\"Returns True if the given list is made up of all (id, id) pairs.\"\"\"\n a, b = zip(*lst)\n return is_id_list(a+b)\n\n\nclass enforced(object):\n \"\"\"A callable that enforces the return of a function with a predicate.\"\"\"\n def __init__(self, pred):\n self.pred = pred\n\n def __call__(self, f):\n def wrapped_f(*args, **kwargs):\n res = f(*args, **kwargs)\n if not self.pred(res):\n raise AssertionError # bad return format\n return res\n return wrapped_f\n\n#Return information for most api member functions.\nreturns_id = (\"change_playlist_name\",\n \"create_playlist\",\n \"delete_playlist\",\n \"copy_playlist\",\n \"change_playlist\")\n\nreturns_id_list = (\"change_song_metadata\",\n \"delete_songs\")\n\nreturns_songs = (\"get_all_songs\",\n \"get_playlist_songs\")\n\nreturns_id_pairs = (\"add_songs_to_playlist\",\n \"remove_songs_from_playlist\")\nfname_to_pred = {}\n\nfor fnames, pred in ((returns_id, is_gm_id),\n (returns_id_list, is_id_list),\n (returns_songs, is_song_list),\n (returns_id_pairs, is_id_pair_list)):\n for fname in fnames:\n fname_to_pred[fname] = pred\n\n\n#TODO this needs to go.\n\nclass UnitTestedApi(Api):\n \"\"\"An Api, with most functions wrapped to assert a proper return.\"\"\"\n\n def __getattribute__(self, name):\n orig = object.__getattribute__(self, name)\n #Enforce any name in the lists above with the right pred.\n if name in fname_to_pred:\n return enforced(fname_to_pred[name])(orig)\n else:\n return orig\n\n\nclass BaseTest(unittest.TestCase):\n \"\"\"Abstract class providing some useful features for testing the api.\"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\"Init and log in to an api, then get the library and playlists.\"\"\"\n\n cls.api = new_test_api()\n\n if not cls.api.is_authenticated():\n raise NotLoggedIn\n\n #These are assumed to succeed, but errors here will prevent further testing.\n cls.library = cls.api.get_all_songs()\n\n #I can't think of a way to test auto playlists and instant mixes.\n cls.playlists = cls.api.get_all_playlist_ids()['user']\n\n @classmethod\n def tearDownClass(cls):\n \"\"\"Log out of the api.\"\"\"\n\n cls.api.logout()\n\n def setUp(self):\n \"\"\"Get a random song id.\"\"\"\n\n #This will fail if we have no songs.\n self.r_song_id = random.choice(self.library)['id']\n\n #---\n # Utility functions:\n #---\n\n def collect_steps(self, prefix):\n \"\"\"Yields the steps of a monolithic test in name-sorted order.\"\"\"\n\n methods = inspect.getmembers(self, predicate=inspect.ismethod)\n\n #Sort functions based on name.\n for name, func in sorted(methods, key=lambda m: m[0]):\n if name.startswith(prefix):\n yield name, func\n\n def run_steps(self, prefix):\n \"\"\"Run the steps defined by this prefix in order.\"\"\"\n\n for name, step in self.collect_steps(prefix):\n try:\n step()\n\n #Only catch exceptions raised from _our_ test code.\n #Other kinds of exceptions may be raised inside the code\n # being tested; those should be re-raised so we can trace them.\n except CallFailure as f:\n raise self.fail(\"test {} step {} failure: {}\".format(prefix, step, f))\n except AssertionError:\n raise # reraise so we can track down what went wrong\n","repo_name":"TimSimpsonR/Unofficial-Google-Music-API","sub_path":"gmusicapi/test/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"30"} +{"seq_id":"11215939440","text":"#!/usr/local/bin/python\n# coding: utf-8\n\nimport sys\nimport os\nimport subprocess\nimport traceback\n\nLOG = os.path.abspath(os.path.dirname(__file__)) + '/scanner.log'\n\nclass Scanner:\n def __init__(self, target):\n self.warnings = set()\n self.threshold_cnt = 66\n self.target = target\n self.keywords = [\n 'base64_decode',\n 'eval',\n 'create_function',\n '$_POST'\n ]\n self.whitelist = [\n 'ionCube Loader',\n 'Google+ embeds',\n 'WORDFENCE_VERSION',\n 'Wordfence',\n 'wfDateLocalization'\n ]\n self.logged = False\n self.is_suspect = False\n self.logfile = LOG\n self.log_fh = open(self.logfile, 'a')\n\n def run(self):\n self.inspect()\n if len(self.warnings) > 0:\n self.check_keyword()\n\n def _any(self, line, whitelist):\n for w in whitelist:\n if w in line:\n return True\n return False\n\n def inspect(self):\n wtb = {}\n is_include_pattern=False\n f = open(self.target, 'r')\n linenum = 0\n for line in f:\n if linenum == 10:\n break\n line = line.rstrip()\n if line.count > 500:\n self.add('long line')\n if ('@include' in line) and ('.ico\";' in line or '\\\\x69co\";' in line or 'i\\\\x63o\";' in line \\\n or '\\\\x69c\\\\x6f\";' in line or 'ic\\\\x6f\";' in line or '\\\\x2eico' in line):\n self.add('early suspect strings')\n if linenum == 0 and \"DOCTYPE html PUBLIC\" in line:\n break\n if self._any(line, self.whitelist):\n break\n if self.is_multibyte(line):\n continue\n if line[0] in ['*', '#']:\n continue\n for chr_ in line:\n if chr_ == \" \": continue\n if not chr_ in wtb.keys():\n wtb[chr_] = 0\n else:\n wtb[chr_] += 1\n linenum += 1\n f.close()\n if len(wtb) > self.threshold_cnt:\n self.warnings.add('many ascii characters.')\n self.debug(\"SUSPICIOUS : %s has %d ascii characters\" % (self.target, len(wtb)))\n if is_include_pattern:\n self.warnings.add('suspect include pattern.')\n self.debug(\"SUSPICIOUS : %s matched include pattern\" % self.target)\n\n def check_keyword(self):\n for keyword in self.keywords:\n p = subprocess.Popen(['grep', '-q', keyword, self.target])\n p.communicate()\n if p.returncode == 0:\n self.warnings.add('suspect keyword pattern')\n self.debug(\"SUSPICIOUS : %s has %s\" % (self.target, keyword))\n\n def is_multibyte(self, string):\n try:\n for ch in string:\n ch.encode('ascii', 'strict')\n except UnicodeDecodeError:\n return True\n return False\n\n def debug(self, message):\n print(message)\n\n def logger(self):\n if not self.logged:\n cmd1 = 'ls -lc %s' % (self.target)\n p1=subprocess.Popen(cmd1, stdout=subprocess.PIPE, shell=True)\n out1, _ = p1.communicate()\n cmd2 = 'head -10 %s' % (self.target)\n p2=subprocess.Popen(cmd2, stdout=subprocess.PIPE, shell=True)\n out2, _ = p2.communicate()\n self.log_fh.write(\"###START SCAN\\n\")\n self.log_fh.write(\"###FILE\\n\")\n self.log_fh.write(\"%s\\n\" % (self.target))\n self.log_fh.write(\"###STAT\\n\")\n self.log_fh.write(out1.split(\"\\n\")[0] + \"\\n\")\n self.log_fh.write(\"###HEAD\\n\")\n self.log_fh.write(out2 + \"\\n\")\n self.log_fh.write(\"###END SCAN\\n\")\n self.log_fh.close()\n self.logged = True\n\ndef get_files(userid):\n path = '/usr/home/%s/html' % (userid)\n return [os.path.join(d, file) \\\n for (d, _, files) in os.walk(path) \\\n for file in files if file.endswith('.php')]\n\nif __name__ == '__main__':\n userid = sys.argv[1]\n if os.path.exists(LOG):\n os.unlink(LOG)\n for line in get_files(userid):\n line = line.rstrip('\\r\\n')\n scanner = Scanner(line)\n scanner.run()\n","repo_name":"kentatogashi/wp_simple_scanner","sub_path":"wp_simple_scanner.py","file_name":"wp_simple_scanner.py","file_ext":"py","file_size_in_byte":4300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"10366553105","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jul 25 07:35:37 2022\r\n\r\n@author: Hina\r\n\"\"\"\r\nimport numpy as np \r\nimport matplotlib.pyplot as plt\r\ndef incident(path, filename):\r\n file = open(path + '/' + filename + '_out_ang.txt' )\r\n file.readline()\r\n Cont_nu, incident, trans, DiffOut, net_trans, reflec, total, reflin, outlin, lineID, cont, nLine = np.array([]),np.array([]),np.array([]),np.array([]),np.array([]),np.array([]),np.array([]),np.array([]),np.array([]),np.array([]),np.array([]),np.array([])\r\n for line in file:\r\n line = line.rstrip('\\n')\r\n line = line.split('\\t')\r\n Cont_nu = np.append(Cont_nu, float(line[0]))\r\n incident = np.append(incident, float(line[1]))\r\n trans = np.append(trans, float(line[2]))\r\n DiffOut = np.append(DiffOut, float(line[3]))\r\n net_trans = np.append(net_trans, float(line[4]))\r\n reflec = np.append(DiffOut, float(line[5]))\r\n total = np.append(total, float(line[6]))\r\n reflin = np.append(reflin, float(line[7]))\r\n outlin = np.append(outlin, float(line[8]))\r\n lineID = np.append(lineID, line[9])\r\n cont = np.append(cont, line[10])\r\n nLine = np.append(nLine, float(line[11]))\r\n return Cont_nu, incident, total, net_trans\r\n\r\ndef inc_vs_keV(name_index, Cont_nu, incident):\r\n fig, ax = plt.subplots(figsize=(8, 6))\r\n #ax.set(title='AGN incident continuum vs energy', xlabel='Energy [KeV]', ylabel='$4\\pi \\\\nu$ $J_{\\\\nu} $ [erg $cm^{-2} s^{-1}$ ]')\r\n ax.tick_params(axis='x', labelsize= 18) \r\n ax.tick_params(axis='y', labelsize= 18)\r\n\r\n plt.title(str(name_index[3]) + \" incident continuum vs energy at $\\\\xi$=5\", fontsize=18)\r\n plt.xlabel('log$_{10}$(E/[KeV])', fontsize=18)\r\n plt.ylabel('log$_{10}(4\\pi \\\\nu$ $J_{\\\\nu} $/[erg $cm^{-2} s^{-1}$ ])', fontsize=18)\r\n #plt.ylabel(r'log($\\frac{E}{KeV}$)', fontsize=14)\r\n plt.plot(np.log10(Cont_nu), np.log10(incident), label='a(x)=-1.0', linewidth=3)\r\n #plt.scatter(Cont_nu[n]/13.6, incident[n])\r\n #plt.text(Cont_nu[n]/1e6, incident[n], str(Cont_nu[n]/1e6) + ',' + str(incident[n]))\r\n #plt.xlim(-3, 6)\r\n #plt.xlim(1e-3, 1e6)\r\n #plt.ylim(1e-4, 1e2)\r\n plt.xlim(-3, 6)\r\n #plt.xscale('log')\r\n #plt.yscale('log')\r\n return ax\r\n \r\ndef inc_vs_keV_3(name_index, Cont_nu1, incident1, Cont_nu2, incident2, Cont_nu3, incident3):\r\n fig, ax = plt.subplots(figsize=(8, 6))\r\n ax.tick_params(axis='x', labelsize= 18) \r\n ax.tick_params(axis='y', labelsize= 18)\r\n \r\n plt.title(str(name_index[3]) + \" total continuum vs energy at $\\\\xi$=5\", fontsize=18)\r\n plt.xlabel('log$_{10}$(E/[KeV])', fontsize=18)\r\n plt.ylabel('log$_{10}(4\\pi \\\\nu$ $J_{\\\\nu} $/[erg $cm^{-2} s^{-1}$ ])', fontsize=18)\r\n plt.plot(np.log10(Cont_nu1), np.log10(incident1), label=str(name_index[0]), linewidth=3)\r\n plt.plot(np.log10(Cont_nu2), np.log10(incident2), label=str(name_index[1]), linewidth=3)\r\n plt.plot(np.log10(Cont_nu3), np.log10(incident3), label=str(name_index[2]), linewidth=3)\r\n #plt.scatter(Cont_nu[n]/13.6, incident[n])\r\n #plt.text(Cont_nu[n]/1e6, incident[n], str(Cont_nu[n]/1e6) + ',' + str(incident[n]))\r\n #plt.xlim(-3, 6)\r\n plt.xlim(1e-3, 1e6)\r\n #plt.ylim(1e-4, 1e2)\r\n plt.xlim(.5, 1)\r\n #plt.xscale('log')\r\n #plt.yscale('log')\r\n plt.legend(fontsize = 17, frameon=False)\r\n return ax\r\n\r\ndef trans(path, filename):\r\n file = open(path + '/' + filename + '_out_ang.txt' )\r\n file.readline()\r\n Cont_nu, trans = np.array([]),np.array([])\r\n for line in file:\r\n line = line.rstrip('\\n')\r\n line = line.split('\\t')\r\n Cont_nu = np.append(Cont_nu, float(line[0]))\r\n trans = np.append(trans, float(line[1]))\r\n return Cont_nu, trans","repo_name":"hina0830g/Summer2022_NASA_Goddard","sub_path":"Continuum_keV.py","file_name":"Continuum_keV.py","file_ext":"py","file_size_in_byte":3736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"14564147470","text":"import copy\n\nCOMMANDS = ['SPEED', 'SLOW', 'JUMP', 'WAIT', 'UP', 'DOWN']\n\n#def get_score():\n\n\ndef evaluate_bike(bike_count, bike_required, grid, speed_init, bike_data, chromosome):\n finished = False\n index = 0\n x = 0\n speed = speed_init\n bridge_length = len(grid[0])\n sim_bike_data = [copy.deepcopy(bike_data)]\n while not finished and index < len(chromosome):\n #print(bike_data, bridge_length, file=sys.stderr, flush=True)\n gene = chromosome[index]\n dy = 1 if (gene == 5 and bike_data[-1][1] < 3) else -1 if (gene == 4 and bike_data[0][1] > 0) else 0\n speed += 1 if gene == 0 else -1 if (gene == 1 and speed > 0) else 0\n jumping = gene == 2\n for i in range(len(bike_data)):\n if bike_data[i][2] == 1:\n y = bike_data[i][1]\n # See if bike fell in hole\n in_hole = False\n if not jumping:\n x_max = min(bridge_length, x + speed - 1)\n for xx in range(x + 1, x_max):\n in_hole |= grid[y][xx] == '0'\n if abs(dy) > 0:\n in_hole |= grid[y + dy][xx] == '0'\n if x + speed < bridge_length:\n in_hole |= grid[y + dy][x + speed] == '0'\n if in_hole:\n bike_data[i][2] = 0\n bike_data[i][0] += speed\n bike_data[i][1] += dy\n x += speed\n finished = x >= bridge_length\n sim_bike_data.append(copy.deepcopy(bike_data))\n index += 1\n number_at_end = sum([bike_data[i][2] for i in range(bike_count)])\n score = 10 * (bike_required - number_at_end) + max(0, bridge_length - x)\n return [score, sim_bike_data]\n\n\ndef solve_bridge(bike_count, bike_required, grid, speed, bike_data):\n # Do something\n i = 0\n\n\n\n","repo_name":"HarryMountain/Codingame","sub_path":"TheBridge/bridge_crosser.py","file_name":"bridge_crosser.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"37819087256","text":"# encoding=utf-8\nimport wx\nimport wx.adv\nimport wx.lib.masked as masked\nfrom wx.adv import CalendarCtrl\nimport MyPopupctl\n\n\nclass PopControl(MyPopupctl.MyPopupControl):\n def __init__(self, parent, mode, data, *_args, **_kwargs):\n MyPopupctl.MyPopupControl.__init__(self, *_args, **_kwargs)\n self.win = wx.Window(self, -1, pos=(0, 0), style = 0)\n self.parent = parent\n self.mode = mode\n self.pop_obj = None\n self.selected_items = list()\n if mode == 1:\n self.pop_obj = CalendarCtrl(self.win, -1, pos=(0, 0))\n self.pop_obj.Bind(wx.adv.EVT_CALENDAR, self.on_calendar)\n elif mode == 2:\n if data is not None:\n self.pop_obj = wx.CheckListBox(self.win, -1, (0, 0), wx.DefaultSize, data)\n self.pop_obj.Bind(wx.EVT_CHECKLISTBOX, self.on_checklist_selected)\n self.parent.Bind(wx.EVT_LEFT_UP, self.on_checklist)\n elif mode == 3:\n if data is not None:\n spin = wx.SpinButton(self.win, -1, wx.DefaultPosition, (-1, 20), wx.SP_VERTICAL)\n self.pop_obj = masked.TimeCtrl(\n self.win,\n -1,\n name=\"time_picker\",\n fmt24hr=True,\n display_seconds=True,\n spinButton = spin\n )\n self.pop_obj.SetValue('00:00:00')\n\n if self.pop_obj is not None:\n bz = self.pop_obj.GetBestSize()\n self.win.SetSize(bz)\n self.SetPopupContent(self.win)\n\n def on_checklist(self, event):\n self.PopDown()\n event.Skip()\n\n\n def on_checklist_selected(self, event):\n self.SetValue(','.join(self.pop_obj.CheckedStrings))\n event.Skip()\n\n def on_calendar(self, evt):\n self.PopDown()\n date = self.pop_obj.GetDate()\n self.SetValue('%04d%02d%02d' % (date.GetYear(),\n date.GetMonth()+1,\n date.GetDay()))\n evt.Skip()\n\n def FormatContent(self):\n if self.mode == 1:\n txtValue = self.GetValue()\n didSet = False\n if len(txtValue) > 0:\n y = int(txtValue[:4])\n m = int(txtValue[4:6]) - 1\n d = int(txtValue[6:8])\n if d > 0 and d < 31:\n if m >= 0 and m < 12:\n if y > 1000:\n self.pop_obj.SetDate(wx.DateTime.FromDMY(d, m, y))\n didSet = True\n\n if not didSet:\n self.pop_obj.SetDate(wx.DateTime.Today())\n elif self.mode == 2:\n txtValue = self.GetValue()\n if txtValue == '':\n self.pop_obj.Checked = []\n else:\n try:\n self.pop_obj.Checked = [self.pop_obj.GetItems().index(x) for x in txtValue.split(',')]\n except Exception as e:\n self.pop_obj.Checked = []","repo_name":"imsaux/dmp","sub_path":"PopupControl.py","file_name":"PopupControl.py","file_ext":"py","file_size_in_byte":3036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"31068650614","text":"#!/usr/bin/python3\r\n# template.py by Barron Stone\r\n# This is an exercise file from Python GUI Development with Tkinter on lynda.com\r\n\r\nfrom tkinter import *\r\n \r\n \r\nroot = Tk()\r\n\r\nmyText = Text(root, width=40, height=10)\r\nmyText.pack()\r\n\r\n\r\nroot.mainloop()\r\n","repo_name":"SarahAdmin/My-Python-Projects","sub_path":"Exercise 8.py","file_name":"Exercise 8.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"40618075294","text":"from .._unit import Unit\nfrom .mixing import *\nfrom .splitting import *\nfrom .phase_equilibrium import *\nfrom .molecular_sieve import *\nfrom .vacuum_system import *\nfrom ._pump import *\nfrom .heat_exchange import *\nfrom .tank import *\nfrom .distillation import *\nfrom ._duplicator import *\nfrom ._junction import *\nfrom ._scaler import *\nfrom ._balance import *\nfrom ._diagram_only_units import *\nfrom ._flash import *\nfrom ._multi_effect_evaporator import *\nfrom .solids_separation import *\nfrom ._batch_bioreactor import *\nfrom ._batch_crystallizer import *\nfrom ._fermentation import *\nfrom ._enzyme_treatment import *\nfrom ._clarifier import *\nfrom ._screw_feeder import *\nfrom ._magnetic_separator import *\nfrom ._conveying_belt import *\nfrom ._vent_scrubber import *\nfrom ._vibrating_screen import *\nfrom ._carbon_capture import *\nfrom .stirred_tank_reactor import *\nfrom .compressor import *\nfrom .turbine import *\nfrom .valve import *\nfrom .drying import *\nfrom .size_reduction import *\nfrom .size_enlargement import *\nfrom .liquid_liquid_extraction import *\nfrom .adsorption import *\nfrom .auxiliary import *\nfrom .agitator import *\nfrom .stripping import * \n\nfrom . import (\n _flash, \n _pump, \n _multi_effect_evaporator, \n _magnetic_separator,\n _conveying_belt, \n _vent_scrubber,\n _vibrating_screen,\n _junction,\n _scaler,\n _fermentation, \n _enzyme_treatment, \n _clarifier, \n _balance, \n _screw_feeder,\n stirred_tank_reactor,\n molecular_sieve,\n vacuum_system,\n adsorption,\n size_reduction, \n size_enlargement,\n drying,\n distillation, \n tank,\n liquid_liquid_extraction, \n mixing, \n splitting, \n phase_equilibrium,\n heat_exchange, \n solids_separation,\n decorators, \n design_tools, \n _duplicator,\n _diagram_only_units, \n _batch_bioreactor,\n _batch_crystallizer,\n _carbon_capture,\n compressor,\n turbine,\n valve,\n auxiliary,\n agitator,\n stripping,\n)\n\n\n__all__ = ('Unit',\n *molecular_sieve.__all__,\n *liquid_liquid_extraction.__all__,\n *_diagram_only_units.__all__,\n *_flash.__all__,\n *mixing.__all__,\n *splitting.__all__,\n *phase_equilibrium.__all__,\n *_pump.__all__,\n *heat_exchange.__all__,\n *_multi_effect_evaporator.__all__,\n *distillation.__all__,\n *tank.__all__,\n *stirred_tank_reactor.__all__,\n *_conveying_belt.__all__,\n *_vent_scrubber.__all__,\n *_vibrating_screen.__all__,\n *_junction.__all__,\n *_scaler.__all__,\n *solids_separation.__all__,\n *_fermentation.__all__, \n *_enzyme_treatment.__all__,\n *_clarifier.__all__,\n *size_reduction.__all__,\n *size_enlargement.__all__,\n *_balance.__all__, \n *_screw_feeder.__all__,\n *_magnetic_separator.__all__,\n *_duplicator.__all__,\n *_batch_bioreactor.__all__,\n *_batch_crystallizer.__all__,\n *_carbon_capture.__all__,\n *drying.__all__,\n *adsorption.__all__,\n *compressor.__all__,\n *turbine.__all__,\n *valve.__all__,\n *vacuum_system.__all__,\n *auxiliary.__all__,\n *agitator.__all__,\n *stripping.__all__,\n 'adsorption',\n 'drying',\n 'tank',\n 'mixing',\n 'splitting',\n 'phase_equilibrium',\n 'distillation',\n 'decorators',\n 'design_tools',\n 'heat_exchange',\n 'solids_separation',\n 'liquid_liquid_extraction',\n 'size_reduction',\n 'size_enlargement',\n 'stripping',\n)\n\n\n\n","repo_name":"BioSTEAMDevelopmentGroup/biosteam","sub_path":"biosteam/units/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3795,"program_lang":"python","lang":"en","doc_type":"code","stars":138,"dataset":"github-code","pt":"30"} +{"seq_id":"468109078","text":"from direct.directbase.DirectStart import *\nfrom direct.actor.Actor import Actor\nfrom panda3d.core import * \nfrom pandac.PandaModules import * \nfrom direct.gui.DirectGui import * \nfrom direct.interval.IntervalGlobal import *\nfrom direct.task.Task import Task\nfrom direct.showbase.ShowBase import ShowBase\nfrom direct.showbase.DirectObject import DirectObject \nimport random, sys, math\n\n\nclass Env(ShowBase):\n def __init__(self):\n ShowBase.__init__(self)\n \n base.setFrameRateMeter(True)\n \n #\n self.balls = {}\n \n # load a environment\n cube = self.loader.loadModel(\"models/cube\")\n cube.setPos(0, 0, 0)\n cube.reparentTo(self.render)\n cube.setScale(0.5, 0.5, 0.5)\n \n # cam\n self.cam.setPos(15, 15, 15)\n light = PointLight('light')\n self.render.setLight(self.cam.attachNewNode(light))\n self.cam.lookAt(cube)\n \n # to handler collisions\n #self.traverser = CollisionTraverser()\n #self.rayHandler = CollisionHandlerQueue()\n \n self.sphere = loader.loadModel(\"models/ball\")\n self.sphere.reparentTo(render)\n self.sphere.setPos(5, 5, 5)\n self.sphere.setScale(.5)\n \n\n def addball(self):\n pos = Vec3(random.uniform(-5, 5), random.uniform(-5, 5), random.uniform(-5, 5))\n f = loader.loadModel(\"models/ball\")\n f.setPos(pos)\n f.setScale(.3)\n f.reparentTo(render)\n f.setCollideMask(0)\n return pos \n\n\n def drawLine(self,startPoint,endPoint,color=None,thickness=None):\n if color is None: color = (100,100,100,100)\n if thickness is None: thickness = .4\n linesegs = LineSegs(\"lines\")\n linesegs.setColor(color)\n linesegs.setThickness(thickness)\n linesegs.moveTo(startPoint)\n linesegs.drawTo(endPoint) \n node = linesegs.create(False) \n nodePath = render.attachNewNode(node)\n \n \n\nclass Application(DirectObject):\n def __init__(self):\n e = Env()\n center =(5,5,5)\n list_count = 10\n for i in range(list_count):\n #.................. \n position = e.addball()\n \n #e.drawLine(center,position,(0,0,0,0),.8)\n e.drawLine(center,position,(0,255,0,1),.8)\n \n\nw = Application()\nrun()","repo_name":"SaraQamar/FYP","sub_path":"2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":2327,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"32845043146","text":"# -*- coding: utf-8 -*-\nimport argparse\nimport os\nfrom pathlib import Path\n\nfrom kaggle.api.kaggle_api_extended import KaggleApi\n\n\ndef download_data(competition, train_data, test_data,\n output_dir='./train/raw', credentials='.kaggle/kaggle.json'):\n \"\"\"Download raw dataset from Kaggle\"\"\"\n credentials = Path.home().joinpath(credentials)\n output_dir = Path(output_dir).resolve()\n\n assert (os.path.isfile(credentials)), FileNotFoundError(credentials)\n assert (os.path.isdir(output_dir)), NotADirectoryError(output_dir)\n\n api = KaggleApi()\n api.authenticate()\n\n api.competition_download_file(competition, train_data, path=output_dir)\n api.competition_download_file(competition, test_data, path=output_dir)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', '--competition', dest='competition',\n required=True, help='Kaggle competition to download')\n parser.add_argument('-tr', '--train_data', dest='train_data',\n required=True, help='Train CSV data file')\n parser.add_argument('-te', '--test_data', dest='test_data',\n required=True, help='Test CSV data file')\n parser.add_argument('-o', '--output_dir', dest='output_dir',\n required=False, help='Output directory')\n\n args = parser.parse_args()\n args.output_dir = Path(args.output_dir).resolve()\n\n train_path = args.output_dir.joinpath(args.train_data)\n test_path = args.output_dir.joinpath(args.test_data)\n\n download_data(args.competition, args.train_data, args.test_data, output_dir=args.output_dir)\n","repo_name":"truocpham-agilityio/kaggle-house-prices-dvc","sub_path":"src/data/make_dataset.py","file_name":"make_dataset.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"17067302838","text":"import math\nimport random\n\nclass City:\n def __init__(self, x=None, y=None, name =None):\n self.x = None\n self.y = None\n self.name = None\n if x is not None:\n self.x = x\n else:\n self.x = int(random.random() * 200)\n if y is not None:\n self.y = y\n else:\n self.y = int(random.random() * 200)\n if name is not None:\n self.name = name\n else:\n self.y = \"City\" + str(int(random.random())*200)\n\n def get_x(self):\n return self.x\n\n def get_y(self):\n return self.y\n\n def dist(self, city):\n pi = math.pi\n y1 = self.get_y()\n x1 = self.get_x()\n y2 = city.get_y()\n x2 = city.get_x()\n R = 3958.76\n y1 *= pi/180.0\n x1 *= pi/180.0\n y2 *= pi/180.0\n x2 *= pi/180.0\n if(y1 == y2 and x1 == x2):\n return 0\n return math.acos( math.sin(y1)*math.sin(y2) + math.cos(y1)*math.cos(y2)*math.cos(x2-x1) ) * R\n\n def __repr__(self):\n return self.name + \": (\" + str(self.get_x()) + \", \" + str(self.get_y()) + \")\"\n\nclass TourManager:\n destinationCities = []\n\n def addCity(self, city):\n self.destinationCities.append(city)\n\n def get_city(self, index):\n return self.destinationCities[index]\n\n def tour_size(self):\n return len(self.destinationCities)\n\nclass Tour:\n def __init__(self, tourmanager, tour=None):\n self.tourmanager = tourmanager\n self.tour = []\n self.fitness = 0.0\n self.dist = 0\n if tour is not None:\n self.tour = tour\n else:\n for i in range(0, self.tourmanager.tour_size()):\n self.tour.append(None)\n\n def __len__(self):\n return len(self.tour)\n\n def __getitem__(self, index):\n return self.tour[index]\n\n def __setitem__(self, key, value):\n self.tour[key] = value\n\n def __repr__(self):\n geneString = \"|\"\n for i in range(0, self.ts()):\n geneString += str(self.get_city(i)) + \"| \"\n return geneString\n\n def generateIndividual(self):\n for cityIndex in range(0, self.tourmanager.tour_size()):\n self.set_city(cityIndex, self.tourmanager.get_city(cityIndex))\n random.shuffle(self.tour)\n\n def get_city(self, tpition):\n return self.tour[tpition]\n\n def set_city(self, tpition, city):\n self.tour[tpition] = city\n self.fitness = 0.0\n self.dist = 0\n\n def get_fit(self):\n if self.fitness == 0:\n self.fitness = 1/float(self.get_d())\n return self.fitness\n\n def get_d(self):\n if self.dist == 0:\n td = 0\n for cityIndex in range(0, self.ts()):\n fromCity = self.get_city(cityIndex)\n destinationCity = None\n if cityIndex+1 < self.ts():\n destinationCity = self.get_city(cityIndex+1)\n else:\n destinationCity = self.get_city(0)\n td += fromCity.dist(destinationCity)\n self.dist = td\n return self.dist\n\n def ts(self):\n return len(self.tour)\n\n def contians_city(self, city):\n return city in self.tour\n\nclass Population:\n def __init__(self, tourmanager, size_population, initialise):\n self.tours = []\n for i in range(0, size_population):\n self.tours.append(None)\n\n if initialise:\n for i in range(0, size_population):\n newTour = Tour(tourmanager)\n newTour.generateIndividual()\n self.save(i, newTour)\n\n def __setitem__(self, key, value):\n self.tours[key] = value\n\n def __getitem__(self, index):\n return self.tours[index]\n\n def save(self, index, tour):\n self.tours[index] = tour\n\n def get_tour(self, index):\n return self.tours[index]\n\n def get_fittest(self):\n fittest = self.tours[0]\n for i in range(0, self.size_population()):\n if fittest.get_fit() <= self.get_tour(i).get_fit():\n fittest = self.get_tour(i)\n return fittest\n\n def size_population(self):\n return len(self.tours)\n\nclass GA:\n def __init__(self, tourmanager):\n self.tourmanager = tourmanager\n self.mutationRate = 0.25\n self.tournamentSize = 5\n self.elitism = True\n\n def evolve(self, pop):\n newPopulation = Population(self.tourmanager, pop.size_population(), False)\n elitismOffset = 0\n if self.elitism:\n newPopulation.save(0, pop.get_fittest())\n elitismOffset = 1\n\n for i in range(elitismOffset, newPopulation.size_population()):\n parent1 = self.select(pop)\n parent2 = self.select(pop)\n child = self.crossover(parent1, parent2)\n newPopulation.save(i, child)\n\n for i in range(elitismOffset, newPopulation.size_population()):\n self.mutate(newPopulation.get_tour(i))\n\n return newPopulation\n\n def crossover(self, parent1, parent2):\n child = Tour(self.tourmanager)\n\n startPos = int(random.random() * parent1.ts())\n endPos = int(random.random() * parent1.ts())\n\n for i in range(0, child.ts()):\n if startPos < endPos and i > startPos and i < endPos:\n child.set_city(i, parent1.get_city(i))\n elif startPos > endPos:\n if not (i < startPos and i > endPos):\n child.set_city(i, parent1.get_city(i))\n\n for i in range(0, parent2.ts()):\n if not child.contians_city(parent2.get_city(i)):\n for j in range(0, child.ts()):\n if child.get_city(j) == None:\n child.set_city(j, parent2.get_city(i))\n break\n\n return child\n\n def mutate(self, tour):\n for tp1 in range(0, tour.ts()):\n if random.random() < self.mutationRate:\n tp2 = int(tour.ts() * random.random())\n\n city1 = tour.get_city(tp1)\n city2 = tour.get_city(tp2)\n\n tour.set_city(tp2, city1)\n tour.set_city(tp1, city2)\n\n def select(self, pop):\n tournament = Population(self.tourmanager, self.tournamentSize, False)\n for i in range(0, self.tournamentSize):\n randomId = int(random.random() * pop.size_population())\n tournament.save(i, pop.get_tour(randomId))\n fittest = tournament.get_fittest()\n return fittest\n\nd = dict()\ntourmanager = TourManager()\n\nopen_names = open(\"rrNodeCity.txt\", \"r\")\nfor n in open_names:\n n = n.strip(\"\\n\")\n arr = n.split(\" \")\n idc = arr[0]\n name = arr[1]\n # print(str(idc) +\":\"+ str(name))\n d[idc] = name\n\n# print(d)\nopen_names.close()\nopen_nodes = open(\"rrNodes.txt\", \"r\")\nfor n in open_nodes:\n n = n.strip(\"\\n\")\n arr = n.split(\" \")\n idc = arr[0]\n y = float(arr[1])\n x = float(arr[2])\n if(idc in d):\n city = City(x, y, d[idc])\n tourmanager.addCity(city)\nopen_nodes.close()\n\npop = Population(tourmanager, 50, True)\n\n\nprint(\"Initial distance: \" + str(pop.get_fittest().get_d()))\nprint(\"Intial solution:\")\nprint(pop.get_fittest())\nprint(\"\")\nga = GA(tourmanager)\npop = ga.evolve(pop)\nfor i in range(0, 100):\n pop = ga.evolve(pop)\nprint(\"Finished\")\nprint(\"\")\nprint(\"Final distance: \" + str(pop.get_fittest().get_d()))\nprint(\"Final Solution:\")\nprint(pop.get_fittest())\n","repo_name":"clankur/Development","sub_path":"ai/tsp/tsp_4_MishraAnkur.py","file_name":"tsp_4_MishraAnkur.py","file_ext":"py","file_size_in_byte":7107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"4878939825","text":"#!/usr/bin/env python2\n\nimport csv\nimport sys\nfrom afap.models import ConstituencyGroup\nfrom afap.models import Application\nfrom afap.models import Allocation\n\ndef gen_report():\n# f = open('report.csv', 'wb')\n w = csv.writer(sys.stdout)\n w.writerow(['Organization', 'Account Number', 'Allocation', 'Appeal'])\n constituencies = ConstituencyGroup.objects.order_by('name')\n for c in constituencies:\n apps = Application.objects.filter(year=2011, organization__constituency_group=c).order_by('organization__name')\n if len(apps) is 0:\n continue\n w.writerow([c.name])\n for a in apps:\n allocs = [alloc.amount for alloc in Allocation.objects.filter(application=a).order_by('-description')]\n if len(allocs) is 0:\n continue\n w.writerow([a.organization.name, a.organization.account_number] + allocs)\n w.writerow([])\n \n\n return\n\nif __name__ == '__main__': gen_report()\n","repo_name":"bkgood/misc-code","sub_path":"afapsite/util/report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"23465594268","text":"from . import common as cmmn\nimport uuid\nimport random\nimport time\nimport imagesize\nimport logging\nimport pprint\n\nfrom pathlib import Path\nfrom dataclasses import dataclass, field\nfrom typing import Optional, List, Union\n\nfrom ..helpers import get_image_type\nfrom instauto.api.structs import PostLocation\nfrom instauto.api.constants import DEFAULT_DEVICE_PROFILE\n\nlogger = logging.getLogger(__name__)\n\n#####################################\n# DATACLASSES\n#####################################\n\n\n@dataclass\nclass Location:\n \"\"\"Contains all information about the location. This can be used to set the location tag for an Instagram post.\"\"\"\n name: str = \"\"\n address: str = \"\"\n lat: float = None\n lng: float = None\n external_source: str = \"\"\n facebook_places: str = \"\"\n facebook_places_id: str = \"\"\n\n def __repr__(self):\n return pprint.pformat(self.__dict__)\n\n\n@dataclass\nclass Device:\n \"\"\"Contains information about the device that is used to post the image. This defaults to the same info used in\n the DeviceProfile of the ApiClient class.\"\"\"\n manufacturer: str\n model: str\n android_version: int\n android_release: str\n\n def __repr__(self):\n return pprint.pformat(self.__dict__)\n\n\n@dataclass\nclass Edits:\n \"\"\"Contains information about how the image was edited (zooming and positioning). Defaults to no edits.\"\"\"\n crop_original_size: List[int]\n crop_center: List[float] = field(default_factory=lambda: [0.0, 0.0])\n crop_zoom: float = 1.0\n\n def __repr__(self):\n return pprint.pformat(self.__dict__)\n\n\n@dataclass\nclass Extra:\n \"\"\"Contains information about the image uploaded. Defaults to the actual size of the image.\"\"\"\n source_width: int\n source_height: int\n\n def __repr__(self):\n return pprint.pformat(self.__dict__)\n\n\n#####################################\n# STRUCTS\n#####################################\n\nclass _Base(cmmn.Base):\n \"\"\"Contains values that are pretty much shared across all API requests.\"\"\"\n radio_type: str = ''\n is_carousel_bumped_post: str = 'False'\n container_module: str = None\n media_id: str = None\n feed_position: str = None\n\n def __init__(self, media_id: str, feed_position: str = None, container_module: str = None,\n delivery_class: str = 'organic', is_carousel_bumped_post: str = 'False', *args, **kwargs):\n self.media_id = media_id\n self.container_module = container_module\n self.feed_position = feed_position\n self.delivery_class = delivery_class\n self.is_carousel_bumped_post = is_carousel_bumped_post\n self.radio_type = 'wifi-none'\n super().__init__(*args, **kwargs)\n self._exempt.append('media_id')\n\n\nclass Unlike(_Base):\n action = 'unlike'\n\n def __init__(self, media_id: str, container_module: str = \"something\", *args, **kwargs):\n super().__init__(media_id=media_id, container_module=container_module, *args, **kwargs)\n\n\nclass Like(_Base):\n action = 'like'\n\n def __init__(self, media_id: str, container_module: str = \"something\", *args, **kwargs):\n super().__init__(media_id=media_id, container_module=container_module, *args, **kwargs)\n\n\nclass Save(_Base):\n action = 'save'\n\n def __init__(self, media_id: str, container_module: str = \"something\", *args, **kwargs):\n super().__init__(media_id=media_id, container_module=container_module, *args, **kwargs)\n\n\nclass Comment(_Base):\n action = 'save'\n\n def __init__(self, media_id: str, comment_text: str, container_module: str = \"something\", *args, **kwargs):\n self.comment_text = comment_text\n self.idempotence_token: str = str(uuid.uuid4())\n super().__init__(media_id=media_id, container_module=container_module, *args, **kwargs)\n\n\nclass UpdateCaption(_Base):\n action = 'edit_media'\n\n def __init__(self, media_id: str, caption_text: Optional[str] = None, location: Optional[Location] = None, container_module: str = \"something\", *args, **kwargs):\n self.caption_text = caption_text\n self.location = location\n super().__init__(media_id=media_id, container_module=container_module, *args, **kwargs)\n\n\nclass _PostBase(cmmn.Base):\n def __init__(self, path: Union[str, Path], source_type: PostLocation, edits: Optional[Edits],\n extra: Optional[Extra], device: Optional[Device], *args, **kwargs):\n self.upload_id = str(time.time()).split('.')[0]\n self.timezone_offset = str(time.localtime().tm_gmtoff)\n self.scene_capture_type = ''\n self.media_folder = 'Pictures'\n self.x_fb_waterfall_id = str(uuid.uuid4())\n self.entity_name = f'{self.upload_id}_0_{random.randint(1000000000, 9999999999)}'\n\n self.source_type = source_type.value\n\n image_type = get_image_type(path)\n # See issue #65\n if image_type not in ['jpg', 'jpeg']:\n raise ValueError(\"Instagram only accepts jpg/jpeg images\")\n\n self.entity_type = f'image/{image_type}'\n self.image_path = path\n\n with open(path, 'rb') as f:\n f.seek(0, 2)\n self.entity_length = f.tell()\n\n if edits is not None and extra is None:\n self.extra = Extra(edits.crop_original_size[0], edits.crop_original_size[1])\n elif extra is not None and edits is None:\n self.edits = Edits([extra.source_width, extra.source_height])\n elif extra is None and edits is None:\n if hasattr(self, 'size'):\n size = self.size\n else:\n size = imagesize.get(self.image_path)\n self.edits = Edits(size)\n self.extra = Extra(*size)\n\n self.device = device or Device(\n DEFAULT_DEVICE_PROFILE['manufacturer'],\n DEFAULT_DEVICE_PROFILE['model'],\n DEFAULT_DEVICE_PROFILE['android_sdk_version'],\n DEFAULT_DEVICE_PROFILE['android_release']\n )\n super().__init__(*args, **kwargs)\n\n\nclass PostFeed(_PostBase):\n \"\"\"Contains all information about a post, that is necessary to upload it to Instagram.\"\"\"\n device_id: str = None\n\n def __init__(self, path: Union[str, Path], caption: str,\n location: Optional[Location] = None, edits: Optional[Edits] = None,\n extra: Optional[Extra] = None, device: Optional[Device] = None, *args, **kwargs):\n self.suggested_venue_position = -1\n self.multi_sharing = '-1'\n self.caption = caption\n self.location = location\n self.size = imagesize.get(path)\n super().__init__(path, PostLocation.Feed, edits, extra, device, *args, **kwargs)\n\n\nclass PostStory(_PostBase):\n _csrftoken: str = None\n _uid: str = None\n _uuid: str = None\n device_id: str = None\n\n def __init__(self, path: Union[str, Path], edits: Optional[Edits] = None,\n extra: Optional[Extra] = None, device: Optional[Device] = None, *args, **kwargs):\n self.camera_session_id = str(uuid.uuid4())\n self.creation_surface = 'camera'\n current_time = time.time()\n self.imported_taken_at = str(current_time - random.randint(10000, 200000)).split('.')[0]\n self.client_timestamp = str(current_time - 3).split('.')[0]\n self.client_shared_at = str(current_time + 1).split('.')[0]\n self.capture_type = 'normal'\n self.configure_mode = '1'\n self.supported_capabilities_new = \"[{\\\"name\\\":\\\"SUPPORTED_SDK_VERSIONS\\\",\\\"value\\\":\\\"66.0,67.0,68.0,69.0,70.0,71.0,72.0,73.0,74.0,75.0,76.0,77.0,78.0,79.0,80.0,81.0,82.0,83.0,84.0,85.0,86.0,87.0,88.0,89.0,90.0,91.0,92.0\\\"},{\\\"name\\\":\\\"FACE_TRACKER_VERSION\\\",\\\"value\\\":\\\"14\\\"},{\\\"name\\\":\\\"segmentation\\\",\\\"value\\\":\\\"segmentation_enabled\\\"},{\\\"name\\\":\\\"COMPRESSION\\\",\\\"value\\\":\\\"ETC2_COMPRESSION\\\"},{\\\"name\\\":\\\"world_tracker\\\",\\\"value\\\":\\\"world_tracker_enabled\\\"},{\\\"name\\\":\\\"gyroscope\\\",\\\"value\\\":\\\"gyroscope_enabled\\\"}]\"\n super().__init__(path, PostLocation.Story, edits, extra, device, *args, **kwargs)\n self._datapoint_from_client['device_id'] = lambda client: client.state.android_id\n\n\nclass RetrieveByUser(cmmn.Base):\n def __init__(self, user_id: str, exclude_comment: str = 'true', only_fetch_first_carousel_media: str = 'false', *args, **kwargs):\n self.user_id = user_id\n self.max_id: Optional[str] = None\n self.exclude_comment = exclude_comment\n self.only_fetch_first_carousel_media = only_fetch_first_carousel_media\n self.page = 0\n super().__init__(*args, **kwargs)\n","repo_name":"juhas96/instauto","sub_path":"instauto/api/actions/structs/post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":8497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"30"} +{"seq_id":"2374856321","text":"# MAKING OF BLACKJACK\r\nimport os\r\nimport random\r\n\r\n\r\ndef clearConsole():\r\n command = 'clear'\r\n if os.name in ('nt', 'dos'): # If Machine is running on Windows, use cls\r\n command = 'cls'\r\n os.system(command)\r\n\r\n\r\nprint(\"\"\"\r\n.------. _ _ _ _ _ \r\n|A_ _ |. | | | | | | (_) | | \r\n|( \\/ ).-----. | |__ | | __ _ ___| | ___ __ _ ___| | __\r\n| \\ /|K /\\ | | '_ \\| |/ _` |/ __| |/ / |/ _` |/ __| |/ /\r\n| \\/ | / \\ | | |_) | | (_| | (__| <| | (_| | (__| < \r\n`-----| \\ / | |_.__/|_|\\__,_|\\___|_|\\_\\ |\\__,_|\\___|_|\\_\\\\\r\n | \\/ K| _/ | \r\n `------' |__/ \r\n\"\"\"\r\n )\r\n\r\ncards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\r\n\r\n\r\ndef card_choice():\r\n random_num = int(random.random() * len(cards))\r\n card = cards[random_num]\r\n return card\r\n\r\n\r\ndef play():\r\n user_cards = []\r\n computer_cards = []\r\n\r\n # user_cards.append(card_choice())\r\n # user_cards.append(card_choice())\r\n # computer_cards.append(card_choice())\r\n # computer_cards.append(card_choice())\r\n # print(user_cards, computer_cards)\r\n\r\n for number in range(2):\r\n user_cards.append(card_choice())\r\n computer_cards.append(card_choice())\r\n print(user_cards, computer_cards)\r\n\r\n # user_cards = [11, 10]\r\n\r\n def calculate_score(list):\r\n sum = 0\r\n for num in range(len(list)):\r\n sum += int(list[num])\r\n # 0 will represent a blackjack in our game.\r\n if sum == 21 and len(list) == 2:\r\n return 0\r\n elif 11 in list and sum > 21:\r\n list.remove(list.index(11))\r\n list.append(1)\r\n sum = 0\r\n for i in range(len(list)):\r\n sum += int(list[i])\r\n return sum\r\n else:\r\n return sum\r\n\r\n print(calculate_score(user_cards))\r\n\r\n while calculate_score(computer_cards) < 17:\r\n computer_cards.append(card_choice())\r\n\r\n if calculate_score(user_cards) == 21 or calculate_score(computer_cards) == 21:\r\n print(\"Game Ends\")\r\n elif calculate_score(user_cards) > 21:\r\n print(\"Game Ends\")\r\n elif calculate_score(computer_cards) <= 21 and calculate_score(user_cards) <= 21 and calculate_score(computer_cards) == calculate_score(user_cards):\r\n print(\"IT's a DRAW\")\r\n else:\r\n if input(\"Type yes to draw another card else type no: \\n\") == \"yes\":\r\n user_cards.append(card_choice())\r\n calculate_score(user_cards)\r\n else:\r\n print(\"Game Ends\")\r\n\r\n\r\nplay()\r\nagain = input(\"Type yes to play again else type no\")\r\n\r\nwhile again == \"yes\":\r\n play()\r\n","repo_name":"vipul-vaishnav/MY-PYTHON","sub_path":"day11.py","file_name":"day11.py","file_ext":"py","file_size_in_byte":2748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"44953647821","text":"import copy\nimport inspect\nimport logging\nimport os\nimport random\nimport sys\nimport types\nfrom collections import Counter\nfrom typing import Callable\nfrom typing import Counter as CounterType\n\nimport networkx as nx\nimport torch\nfrom networkx.algorithms.dag import lexicographical_topological_sort\nfrom path import Path\nfrom torch import nn\n\nfrom ...utils.common import AttrDict\nfrom .primitives import AbstractPrimitive, Identity\n\n\ndef log_formats(x):\n if isinstance(x, torch.Tensor):\n return x.shape\n if isinstance(x, dict):\n return {k: log_formats(v) for k, v in x.items()}\n else:\n return x\n\n\ndef _find_caller():\n \"\"\"\n Returns:\n str: module name of the caller\n tuple: a hashable key to be used to identify different callers\n \"\"\"\n frame = sys._getframe(2) # pylint: disable=protected-access\n while frame:\n code = frame.f_code\n if os.path.join(\"utils\", \"logger.\") not in code.co_filename:\n mod_name = frame.f_globals[\"__name__\"]\n if mod_name == \"__main__\":\n mod_name = \"detectron2\"\n return mod_name, (code.co_filename, frame.f_lineno, code.co_name)\n frame = frame.f_back\n\n\n_LOG_COUNTER: CounterType = Counter()\n_LOG_TIMER: dict = {}\n\n\ndef log_first_n(lvl, msg, n=1, *, name=None, key=\"caller\"):\n \"\"\"\n Log only for the first n times.\n Args:\n lvl (int): the logging level\n msg (str):\n n (int):\n name (str): name of the logger to use. Will use the caller's module by default.\n key (str or tuple[str]): the string(s) can be one of \"caller\" or\n \"message\", which defines how to identify duplicated logs.\n For example, if called with `n=1, key=\"caller\"`, this function\n will only log the first call from the same caller, regardless of\n the message content.\n If called with `n=1, key=\"message\"`, this function will log the\n same content only once, even if they are called from different places.\n If called with `n=1, key=(\"caller\", \"message\")`, this function\n will not log only if the same caller has logged the same message before.\n \"\"\"\n if isinstance(key, str):\n key = (key,)\n assert len(key) > 0\n\n caller_module, caller_key = _find_caller()\n hash_key = ()\n if \"caller\" in key:\n hash_key = hash_key + caller_key\n if \"message\" in key:\n hash_key = hash_key + (msg,)\n\n _LOG_COUNTER[hash_key] += 1\n if _LOG_COUNTER[hash_key] <= n:\n logging.getLogger(name or caller_module).log(lvl, msg)\n\n\ndef iter_flatten(iterable):\n \"\"\"\n Flatten a potentially deeply nested python list\n \"\"\"\n # taken from https://rightfootin.blogspot.com/2006/09/more-on-python-flatten.html\n it = iter(iterable)\n for e in it:\n if isinstance(e, (list, tuple)):\n yield from iter_flatten(e)\n else:\n yield e\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Graph(torch.nn.Module, nx.DiGraph):\n \"\"\"\n Base class for defining a search space. Add nodes and edges\n as for a directed acyclic graph in `networkx`. Nodes can contain\n graphs as children, also edges can contain graphs as operations.\n\n Note, if a graph is copied, the shared attributes of its edges are\n shallow copies whereas the private attributes are deep copies.\n\n To differentiate copies of the same graph you can define a `scope`\n with `set_scope()`.\n\n **Graph at nodes:**\n >>> graph = Graph()\n >>> graph.add_node(1, subgraph=Graph())\n\n If the node has more than one input use `set_input()` to define the\n routing to the input nodes of the subgraph.\n\n **Graph at edges:**\n >>> graph = Graph()\n >>> graph.add_nodes_from([1, 2])\n >>> graph.add_edge(1, 2, EdgeData({'op': Graph()}))\n\n **Modify the graph after definition**\n\n If you want to modify the graph e.g. in an optimizer once\n it has been defined already use the function `update_edges()`\n or `update_nodes()`.\n\n **Use as pytorch module**\n If you want to learn the weights of the operations or any\n other parameters of the graph you have to parse it first.\n >>> graph = getFancySearchSpace()\n >>> graph.parse()\n >>> logits = graph(data)\n >>> optimizer.min(loss(logits, target))\n\n To update the pytorch module representation (e.g. after removing or adding\n some new edges), you have to unparse. Beware that this is not fast, so it should\n not be done on each batch or epoch, rather once after discretizising. If you\n want to change the representation of the graph use rather some shared operation\n indexing at the edges.\n >>> graph.update(remove_random_edges)\n >>> graph.unparse()\n >>> graph.parse()\n >>> logits = graph(data)\n\n \"\"\"\n\n \"\"\"\n Usually the optimizer does not operate on the whole graph, e.g. preprocessing\n and post-processing are excluded. Scope can be used to define that or to\n differentate instances of the \"same\" graph.\n \"\"\"\n OPTIMIZER_SCOPE = \"all\"\n\n \"\"\"\n Whether the search space has an interface to one of the tabular benchmarks which\n can then be used to query architecture performances.\n\n If this is set to true then `query()` should be implemented.\n \"\"\"\n QUERYABLE = False\n\n def __init__(self, name: str = None, scope: str = None):\n \"\"\"\n Initialise a graph. The edges are automatically filled with an EdgeData object\n which defines the default operation as Identity. The default combination operation\n is set as sum.\n\n Note:\n When inheriting form `Graph` note that `__init__()` cannot take any parameters.\n This is due to the way how networkx is implemented, i.e. graphs are reconstructed\n internally and no parameters for init are considered.\n\n Our recommended solution is to create static attributes before initialization and\n then load them dynamically in `__init__()`.\n\n >>> def __init__(self):\n >>> num_classes = self.NUM_CLASSES\n >>> MyGraph.NUM_CLASSES = 42\n >>> my_graph_42_classes = MyGraph()\n\n \"\"\"\n # super().__init__()\n nx.DiGraph.__init__(self)\n torch.nn.Module.__init__(self)\n\n # Make DiGraph a member and not inherit. This is because when inheriting from\n # `Graph` note that `__init__()` cannot take any parameters. This is due to\n # the way how networkx is implemented, i.e. graphs are reconstructed internally\n # and no parameters for init are considered.\n # Therefore __getattr__ and __iter__ forward the DiGraph methods for straight-forward\n # usage as if we would inherit.\n\n # self._nxgraph = nx.DiGraph()\n\n # Replace the default dicts at the edges with `EdgeData` objects\n # `EdgeData` can be easily customized and allow shared parameters\n # across different Graph instances.\n\n # self._nxgraph.edge_attr_dict_factory = lambda: EdgeData()\n self.edge_attr_dict_factory = lambda: EdgeData() # pylint: disable=W0108\n\n # Replace the default dicts at the nodes to include `input` from the beginning.\n # `input` is required for storing the results of incoming edges.\n\n # self._nxgraph.node_attr_dict_factory = lambda: dict({'input': {}, 'comb_op': sum})\n self.node_attr_dict_factory = lambda: dict({\"input\": {}, \"comb_op\": sum})\n\n # remember to add all members also in `unparse()`\n self.name = name\n self.scope = scope\n self.input_node_idxs = None\n self.is_parsed = False\n self._id = random.random() # pytorch expects unique modules in `add_module()`\n\n def __eq__(self, other):\n return self.name == other.name and self.scope == other.scope\n\n def __hash__(self):\n \"\"\"\n As it is very complicated to compare graphs (i.e. check all edge\n attributes, do the have shared attributes, ...) use just the name\n for comparison.\n\n This is used when determining whether two instances are copies.\n \"\"\"\n h = 0\n h += hash(self.name)\n h += hash(self.scope) if self.scope else 0\n h += hash(self._id)\n return h\n\n def __repr__(self):\n return \"Graph {}-{:.07f}, scope {}, {} nodes\".format(\n self.name, self._id, self.scope, self.number_of_nodes()\n )\n\n def modules_str(self):\n \"\"\"\n Once the graph has been parsed, prints the modules as they appear in pytorch.\n \"\"\"\n if self.is_parsed:\n result = \"\"\n for g in self._get_child_graphs(single_instances=True) + [self]:\n result += \"Graph {}:\\n {}\\n==========\\n\".format(\n g.name, torch.nn.Module.__repr__(g)\n )\n return result\n else:\n return self.__repr__()\n\n def set_scope(self, scope: str, recursively=True):\n \"\"\"\n Sets the scope of this instance of the graph.\n\n The function should be used in a builder-like pattern\n `'subgraph'=Graph().set_scope(\"scope\")`.\n\n Args:\n scope (str): the scope\n recursively (bool): Also set the scope for all child graphs.\n default True\n\n Returns:\n Graph: self with the setted scope.\n \"\"\"\n self.scope = scope\n if recursively:\n for g in self._get_child_graphs(single_instances=False):\n g.scope = scope\n return self\n\n def add_node(self, node_index, **attr):\n \"\"\"\n Adds a node to the graph.\n\n Note that adding a node using an index that has been used already\n will override its attributes.\n\n Args:\n node_index (int): The index for the node. Expect to be >= 1.\n **attr: The attributes which can be added in a dict like form.\n \"\"\"\n assert node_index >= 1, \"Expecting the node index to be greater or equal 1\"\n nx.DiGraph.add_node(self, node_index, **attr)\n\n def copy(self):\n \"\"\"\n Copy as defined in networkx, i.e. a shallow copy.\n\n Just handling recursively nested graphs seperately.\n \"\"\"\n\n def copy_dict(d):\n copied_dict = d.copy()\n for k, v in d.items():\n if isinstance(v, Graph):\n copied_dict[k] = v.copy()\n elif isinstance(v, list):\n copied_dict[k] = [i.copy() if isinstance(i, Graph) else i for i in v]\n elif isinstance(v, torch.nn.Module) or isinstance(v, AbstractPrimitive):\n copied_dict[k] = copy.deepcopy(v)\n return copied_dict\n\n G = self.__class__()\n G.graph.update(self.graph)\n G.add_nodes_from((n, copy_dict(d)) for n, d in self._node.items())\n G.add_edges_from(\n (u, v, datadict.copy())\n for u, nbrs in self._adj.items()\n for v, datadict in nbrs.items()\n )\n G.scope = self.scope\n G.name = self.name\n return G\n\n def set_input(self, node_idxs: list):\n \"\"\"\n Route the input from specific parent edges to the input nodes of\n this subgraph. Inputs are assigned in lexicographical order.\n\n Example:\n - Parent node (i.e. node where `self` is located on) has two\n incoming edges from nodes 3 and 5.\n - `self` has two input nodes 1 and 2 (i.e. nodes without\n an incoming edge)\n - `node_idxs = [5, 3]`\n Then input of node 5 is routed to node 1 and input of node 3\n is routed to node 2.\n\n Similarly, if `node_idxs = [5, 5]` then input of node 5 is routed\n to both node 1 and 2. Warning: In this case the output of another\n incoming edge is ignored!\n\n Should be used in a builder-like pattern: `'subgraph'=Graph().set_input([5, 3])`\n\n Args:\n node_idx (list): The index of the nodes where the data is coming from.\n\n Returns:\n Graph: self with input node indices set.\n\n \"\"\"\n num_innodes = sum(self.in_degree(n) == 0 for n in self.nodes)\n assert num_innodes == len(\n node_idxs\n ), \"Expecting node index for every input node. Excpected {}, got {}\".format(\n num_innodes, len(node_idxs)\n )\n self.input_node_idxs = node_idxs # type: ignore[assignment]\n return self\n\n def num_input_nodes(self) -> int:\n \"\"\"\n The number of input nodes, i.e. the nodes without an\n incoming edge.\n\n Returns:\n int: Number of input nodes.\n \"\"\"\n return sum(self.in_degree(n) == 0 for n in self.nodes)\n\n def _assign_x_to_nodes(self, x):\n \"\"\"\n Assign x to the input nodes of self. Depending whether on\n edge or nodes.\n\n Performs also several sanity checks of the input.\n\n Args:\n x (Tensor or dict): Input to be assigned.\n \"\"\"\n # We need dict in case of cell and int in case of motif\n assert isinstance(x, dict) or isinstance(x, torch.Tensor)\n\n if self.input_node_idxs is None:\n assert (\n self.num_input_nodes() == 1\n ), \"There are more than one input nodes but input indeces are not defined.\"\n input_node = [n for n in self.nodes if self.in_degree(n) == 0][0]\n assert (\n len(list(self.predecessors(input_node))) == 0\n ), \"Expecting node 1 to be the parent.\"\n assert (\n \"subgraph\" not in self.nodes[input_node].keys()\n ), \"Expecting node 1 not to have a subgraph as it serves as input node.\"\n assert isinstance(x, torch.Tensor)\n self.nodes[input_node][\"input\"] = {0: x}\n else:\n # assign the input to the corresponding nodes\n assert all(\n [i in x.keys() for i in self.input_node_idxs]\n ), \"got x from an unexpected input edge\"\n if self.num_input_nodes() > len(x):\n # here is the case where the same input is assigned to more than one node\n # this can happen when there are cells with two inputs but at the very first\n # layer of the network, there is just one output (i.e. the data inputed to the\n # makro input node). Handle it and log a Info. This should happen only rarly\n logger.debug(\n f\"We are using the same x for two inputs in graph {self.name}\"\n )\n input_node_iterator = iter(self.input_node_idxs)\n for node_idx in lexicographical_topological_sort(self):\n if self.in_degree(node_idx) == 0:\n self.nodes[node_idx][\"input\"] = {0: x[next(input_node_iterator)]}\n\n def forward(self, x, *args): # pylint: disable=W0613\n \"\"\"\n Forward some data through the graph. This is done recursively\n in case there are graphs defined on nodes or as 'op' on edges.\n\n Args:\n x (Tensor or dict): The input. If the graph sits on a node the\n input can be a dict with {source_idx: Tensor} to be routed\n to the defined input nodes. If the graph sits on an edge,\n x is the feature tensor.\n args: This is only required to handle cases where the graph sits\n on an edge and receives an EdgeData object which will be ignored\n \"\"\"\n logger.debug(f\"Graph {self.name} called. Input {log_formats(x)}.\")\n\n # Assign x to the corresponding input nodes\n self._assign_x_to_nodes(x)\n\n for node_idx in lexicographical_topological_sort(self):\n node = self.nodes[node_idx]\n logger.debug(\n \"Node {}-{}, current data {}, start processing...\".format(\n self.name, node_idx, log_formats(node)\n )\n )\n\n # node internal: process input if necessary\n if (\"subgraph\" in node and \"comb_op\" not in node) or (\n \"comb_op\" in node and \"subgraph\" not in node\n ):\n log_first_n(\n logging.WARN, \"Comb_op is ignored if subgraph is defined!\", n=1\n )\n # TODO: merge 'subgraph' and 'comb_op'. It is basicallly the same thing. Also in parse()\n if \"subgraph\" in node:\n x = node[\"subgraph\"].forward(node[\"input\"])\n else:\n if len(node[\"input\"].values()) == 1:\n x = list(node[\"input\"].values())[0]\n else:\n x = node[\"comb_op\"](\n [node[\"input\"][k] for k in sorted(node[\"input\"].keys())]\n )\n node[\"input\"] = {} # clear the input as we have processed it\n\n if (\n len(list(self.neighbors(node_idx))) == 0\n and node_idx < list(lexicographical_topological_sort(self))[-1]\n ):\n # We have more than one output node. This is e.g. the case for\n # auxillary losses. Attach them to the graph, handling must done\n # by the user.\n logger.debug(\n \"Graph {} has more then one output node. Storing output of non-maximum index node {} at graph dict\".format(\n self, node_idx\n )\n )\n self.graph[f\"out_from_{node_idx}\"] = x\n else:\n # outgoing edges: process all outgoing edges\n for neigbor_idx in self.neighbors(node_idx):\n edge_data = self.get_edge_data(node_idx, neigbor_idx)\n # inject edge data only for AbstractPrimitive, not Graphs\n if isinstance(edge_data.op, Graph):\n edge_output = edge_data.op.forward(x)\n elif isinstance(edge_data.op, AbstractPrimitive):\n logger.debug(\n \"Processing op {} at edge {}-{}\".format(\n edge_data.op, node_idx, neigbor_idx\n )\n )\n edge_output = edge_data.op.forward(x)\n else:\n raise ValueError(\n \"Unknown class as op: {}. Expected either Graph or AbstactPrimitive\".format(\n edge_data.op\n )\n )\n self.nodes[neigbor_idx][\"input\"].update({node_idx: edge_output})\n\n logger.debug(f\"Node {self.name}-{node_idx}, processing done.\")\n\n logger.debug(f\"Graph {self.name} exiting. Output {log_formats(x)}.\")\n return x\n\n def to_pytorch(self, **kwargs) -> nn.Module:\n return self._to_pytorch(**kwargs)\n\n def _to_pytorch(self, write_out: bool = False) -> nn.Module:\n def _import_code(code: str, name: str):\n module = types.ModuleType(name)\n exec(code, module.__dict__) # pylint: disable=exec-used\n return module\n\n if not self.is_parsed:\n self.parse()\n\n input_node = [n for n in self.nodes if self.in_degree(n) == 0][0]\n input_name = \"x0\"\n self.nodes[input_node][\"input\"] = {0: input_name}\n\n forward_f = []\n used_input_names = [int(input_name[1:])]\n submodule_list = []\n for node_idx in lexicographical_topological_sort(self):\n node = self.nodes[node_idx]\n if \"subgraph\" in node:\n # TODO implementation not checked yet!\n max_xidx = max(used_input_names)\n submodule = node[\"subgraph\"].to_pytorch(write_out=write_out)\n submodule_list.append(submodule)\n _forward_f = f\"x{max_xidx + 1}=self.module_list[{len(submodule_list) - 1}]({node['input']})\"\n input_name = f\"x{max_xidx + 1}\"\n used_input_names.append(max_xidx + 1)\n forward_f.append(_forward_f)\n x = f\"x{max_xidx+1}\"\n else:\n if len(node[\"input\"].values()) == 1:\n x = next(iter(node[\"input\"].values()))\n else:\n max_xidx = max(used_input_names)\n if (\n \"__name__\" in dir(node[\"comb_op\"])\n and node[\"comb_op\"].__name__ == \"sum\"\n ):\n _forward_f = f\"x{max_xidx+1}=sum([\"\n elif isinstance(node[\"comb_op\"], torch.nn.Module):\n submodule_list.append(node[\"comb_op\"])\n _forward_f = f\"x{max_xidx + 1}=self.module_list[{len(submodule_list) - 1}]([\"\n else:\n raise NotImplementedError\n\n for inp in node[\"input\"].values():\n _forward_f += inp + \",\"\n _forward_f = _forward_f[:-1] + \"])\"\n forward_f.append(_forward_f)\n x = f\"x{max_xidx+1}\"\n if int(x[1:]) not in used_input_names:\n used_input_names.append(int(x[1:]))\n node[\"input\"] = {} # clear the input as we have processed it\n if (\n len(list(self.neighbors(node_idx))) == 0\n and node_idx < list(lexicographical_topological_sort(self))[-1]\n ):\n # We have more than one output node. This is e.g. the case for\n # auxillary losses. Attach them to the graph, handling must done\n # by the user.\n raise NotImplementedError\n else:\n # outgoing edges: process all outgoing edges\n for neigbor_idx in self.neighbors(node_idx):\n max_xidx = max(used_input_names)\n edge_data = self.get_edge_data(node_idx, neigbor_idx)\n # inject edge data only for AbstractPrimitive, not Graphs\n if isinstance(edge_data.op, Graph):\n submodule = edge_data.op.to_pytorch(write_out=write_out)\n submodule_list.append(submodule)\n _forward_f = f\"x{max_xidx + 1}=self.module_list[{len(submodule_list) - 1}]({x})\"\n input_name = f\"x{max_xidx + 1}\"\n used_input_names.append(max_xidx + 1)\n forward_f.append(_forward_f)\n elif isinstance(edge_data.op, AbstractPrimitive):\n # edge_data.op.forward = partial( # type: ignore[assignment]\n # edge_data.op.forward, edge_data=edge_data\n # )\n submodule_list.append(edge_data.op)\n _forward_f = f\"x{max_xidx + 1}=self.module_list[{len(submodule_list) - 1}]({x})\"\n input_name = f\"x{max_xidx + 1}\"\n used_input_names.append(max_xidx + 1)\n forward_f.append(_forward_f)\n else:\n raise ValueError(\n \"Unknown class as op: {}. Expected either Graph or AbstactPrimitive\".format(\n edge_data.op\n )\n )\n self.nodes[neigbor_idx][\"input\"].update({node_idx: input_name})\n\n forward_f.append(f\"return {x}\")\n\n model_file = \"# Auto generated\\nimport torch\\nimport torch.nn\\n\\nclass Model(torch.nn.Module):\\n\\tdef __init__(self):\\n\"\n model_file += \"\\t\\tsuper().__init__()\\n\"\n model_file += \"\\t\\tself.module_list=torch.nn.ModuleList()\\n\"\n model_file += \"\\n\\tdef set_module_list(self,module_list):\\n\"\n model_file += \"\\t\\tself.module_list=torch.nn.ModuleList(module_list)\\n\"\n model_file += \"\\n\\tdef forward(self,x0,*args):\\n\"\n for forward_lines in forward_f:\n for forward_line in (\n [forward_lines] if isinstance(forward_lines, str) else forward_lines\n ):\n model_file += f\"\\t\\t{forward_line}\\n\"\n\n try:\n module_model = _import_code(model_file, \"model\")\n model = module_model.Model()\n except Exception as e:\n raise Exception(e) from e\n\n model.set_module_list(submodule_list)\n\n if write_out:\n tmp_path = Path(os.path.dirname(os.path.realpath(__file__))) / \"model.py\"\n with open(tmp_path, \"w\", encoding=\"utf-8\") as outfile:\n outfile.write(model_file)\n\n return model\n\n def parse(self):\n \"\"\"\n Convert the graph into a neural network which can then\n be optimized by pytorch.\n \"\"\"\n for node_idx in lexicographical_topological_sort(self):\n if \"subgraph\" in self.nodes[node_idx]:\n self.nodes[node_idx][\"subgraph\"].parse()\n self.add_module(\n f\"{self.name}-subgraph_at({node_idx})\",\n self.nodes[node_idx][\"subgraph\"],\n )\n else:\n if isinstance(self.nodes[node_idx][\"comb_op\"], torch.nn.Module):\n self.add_module(\n f\"{self.name}-comb_op_at({node_idx})\",\n self.nodes[node_idx][\"comb_op\"],\n )\n for neigbor_idx in self.neighbors(node_idx):\n edge_data = self.get_edge_data(node_idx, neigbor_idx)\n if isinstance(edge_data.op, Graph):\n edge_data.op.parse()\n elif edge_data.op.get_embedded_ops():\n for primitive in edge_data.op.get_embedded_ops():\n if isinstance(primitive, Graph):\n primitive.parse()\n self.add_module(\n f\"{self.name}-edge({node_idx},{neigbor_idx})\",\n edge_data.op,\n )\n self.is_parsed = True\n\n def unparse(self):\n \"\"\"\n Undo the pytorch parsing by reconstructing the graph uusing the\n networkx data structures.\n\n This is done recursively also for child graphs.\n\n Returns:\n Graph: An unparsed shallow copy of the graph.\n \"\"\"\n g = self.__class__()\n g.clear()\n\n graph_nodes = self.nodes\n graph_edges = self.edges\n\n # unparse possible child graphs\n # be careful with copying/deepcopying here cause of shared edge data\n for _, data in graph_nodes.data():\n if \"subgraph\" in data:\n data[\"subgraph\"] = data[\"subgraph\"].unparse()\n for _, _, data in graph_edges.data():\n if isinstance(data.op, Graph):\n data.set(\"op\", data.op.unparse())\n\n # create the new graph\n # Remember to add all members here to update. I know it is ugly but don't know better\n g.add_nodes_from(graph_nodes.data())\n g.add_edges_from(graph_edges.data())\n g.graph.update(self.graph)\n g.name = self.name\n g.input_node_idxs = self.input_node_idxs\n g.scope = self.scope\n g.is_parsed = False\n g._id = self._id # pylint: disable=W0212\n g.OPTIMIZER_SCOPE = self.OPTIMIZER_SCOPE\n g.QUERYABLE = self.QUERYABLE\n\n return g\n\n def _get_child_graphs(self, single_instances: bool = False) -> list:\n \"\"\"\n Get all child graphs of the current graph.\n\n Args:\n single_instances (bool): Whether to return multiple instances\n (i.e. copies) of the same graph. When changing shared data\n this should be set to True.\n\n Returns:\n list: A list of all child graphs (can be empty)\n \"\"\"\n graphs = []\n for node_idx in lexicographical_topological_sort(self):\n node_data = self.nodes[node_idx]\n if \"subgraph\" in node_data:\n graphs.append(node_data[\"subgraph\"])\n graphs.append(\n node_data[\"subgraph\"]._get_child_graphs() # pylint: disable=W0212\n )\n\n for _, _, edge_data in self.edges.data():\n if isinstance(edge_data.op, Graph):\n graphs.append(edge_data.op)\n graphs.append(edge_data.op._get_child_graphs()) # pylint: disable=W0212\n elif isinstance(edge_data.op, list):\n for op in edge_data.op:\n if isinstance(op, Graph):\n graphs.append(op)\n graphs.append(op._get_child_graphs()) # pylint: disable=W0212\n elif isinstance(edge_data.op, AbstractPrimitive):\n # maybe it is an embedded op?\n embedded_ops = edge_data.op.get_embedded_ops()\n if embedded_ops is not None:\n if isinstance(embedded_ops, Graph):\n graphs.append(embedded_ops)\n graphs.append(\n embedded_ops._get_child_graphs() # pylint: disable=W0212\n )\n elif isinstance(embedded_ops, list):\n for child_op in edge_data.op.get_embedded_ops():\n if isinstance(child_op, Graph):\n graphs.append(child_op)\n graphs.append(\n child_op._get_child_graphs() # pylint: disable=W0212\n )\n else:\n logger.debug(\n \"Got embedded op, but is neither a graph nor a list: {}\".format(\n embedded_ops\n )\n )\n elif inspect.isclass(edge_data.op):\n assert not issubclass(\n edge_data.op, Graph\n ), \"Found non-initialized graph. Abort.\"\n # we look at an uncomiled op\n elif callable(edge_data.op):\n pass\n else:\n raise ValueError(f\"Unknown format of op: {edge_data.op}\")\n\n graphs = [g for g in iter_flatten(graphs)]\n\n if single_instances:\n single: list = []\n for g in graphs:\n if g.name not in [sg.name for sg in single]:\n single.append(g)\n return sorted(single, key=lambda g: g.name)\n else:\n return sorted(graphs, key=lambda g: g.name)\n\n def get_all_edge_data(\n self, key: str, scope=\"all\", private_edge_data: bool = False\n ) -> list:\n \"\"\"\n Get edge attributes of this graph and all child graphs in one go.\n\n Args:\n key (str): The key of the attribute\n scope (str): The scope to be applied\n private_edge_data (bool): Whether to return data from graph copies as well.\n\n Returns:\n list: All data in a list.\n \"\"\"\n assert scope is not None\n result = []\n for graph in self._get_child_graphs(single_instances=not private_edge_data) + [\n self\n ]:\n if (\n scope == \"all\"\n or graph.scope == scope\n or (isinstance(scope, list) and graph.scope in scope)\n ):\n for _, _, edge_data in graph.edges.data():\n if edge_data.has(key):\n result.append(edge_data[key])\n return result\n\n def set_at_edges(self, key, value, shared=False):\n \"\"\"\n Sets the attribute for all edges in this and any child graph\n \"\"\"\n for graph in self._get_child_graphs(single_instances=shared) + [self]:\n logger.debug(f\"Updating edges of graph {graph.name}\")\n for _, _, edge_data in graph.edges.data():\n if not edge_data.is_final():\n edge_data.set(key, value, shared)\n\n def compile(self):\n \"\"\"\n Instanciates the ops at the edges using the arguments specified at the edges\n \"\"\"\n for graph in self._get_child_graphs(single_instances=False) + [self]:\n logger.debug(f\"Compiling graph {graph.name}\")\n for _, v, edge_data in graph.edges.data():\n if not edge_data.is_final():\n attr = edge_data.to_dict()\n op = attr.pop(\"op\")\n\n if isinstance(op, list):\n compiled_ops = []\n for i, o in enumerate(op):\n if inspect.isclass(o):\n # get the relevant parameter if there are more.\n a = {\n k: v[i] if isinstance(v, list) else v\n for k, v in attr.items()\n }\n compiled_ops.append(o(**a))\n else:\n logger.debug(f\"op {o} already compiled. Skipping\")\n edge_data.set(\"op\", compiled_ops)\n elif isinstance(op, AbstractPrimitive):\n logger.debug(f\"op {op} already compiled. Skipping\")\n elif inspect.isclass(op) and issubclass(op, AbstractPrimitive):\n # Init the class\n if \"op_name\" in attr:\n del attr[\"op_name\"]\n edge_data.set(\"op\", op(**attr))\n elif isinstance(op, Graph):\n pass # This is already covered by _get_child_graphs\n else:\n raise ValueError(f\"Unkown format of op: {op}\")\n\n @staticmethod\n def _verify_update_function(update_func: Callable, private_edge_data: bool):\n \"\"\"\n Verify that the update function actually modifies only\n shared/private edge data attributes based on setting of\n `private_edge_data`.\n\n Args:\n update_func (callable): callable that expects one argument\n named `current_edge_data`.\n private_edge_data (bool): Whether the update function is applied\n to all graph instances including copies or just to one instance\n per graph\n \"\"\"\n\n test = EdgeData()\n test.set(\"shared\", True, shared=True)\n test.set(\"op\", [True])\n\n try:\n result = test.clone()\n update_func(current_edge_data=result)\n except Exception:\n log_first_n(\n logging.WARN,\n \"Update function could not be veryfied. Be cautious with the \"\n \"setting of `private_edge_data` in `update_edges()`\",\n n=5,\n )\n return\n\n assert isinstance(\n result, EdgeData\n ), \"Update function does not return the edge data object.\"\n\n if private_edge_data:\n assert result._shared == test._shared, ( # pylint: disable=W0212\n \"The update function changes shared data although `private_edge_data` set to True. \"\n \"This is not the indended use of `update_edges`. The update function should only modify \"\n \"private edge data.\"\n )\n else:\n assert result._private == test._private, ( # pylint: disable=W0212\n \"The update function changes private data although `private_edge_data` set to False. \"\n \"This is not the indended use of `update_edges`. The update function should only modify \"\n \"shared edge data.\"\n )\n\n def update_edges(\n self, update_func: Callable, scope=\"all\", private_edge_data: bool = False\n ):\n \"\"\"\n This updates the edge data of this graph and all child graphs.\n This is the preferred way to manipulate the edges after the definition\n of the graph, e.g. by optimizers who want to insert their own op.\n `update_func(current_edge_data)`. This way optimizers\n can initialize and store necessary information at edges.\n\n Note that edges marked as 'final' will not be updated here.\n\n Args:\n update_func (callable): Function which accepts one argument called `current_edge_data`.\n and returns the modified EdgeData object.\n scope (str or list(str)): Can be \"all\" or list of scopes to be updated.\n private_edge_data (bool): If set to true, this means update_func will be\n applied to all edges. THIS IS NOT RECOMMENDED FOR SHARED\n ATTRIBUTES. Shared attributes should be set only once, we\n take care it is syncronized across all copies of this graph.\n\n The only usecase for setting it to true is when actually changing\n `op` during the initialization of the optimizer (e.g. replacing it\n with MixedOp or SampleOp)\n \"\"\"\n Graph._verify_update_function(update_func, private_edge_data)\n assert scope is not None\n for graph in self._get_child_graphs(single_instances=not private_edge_data) + [\n self\n ]:\n if (\n scope == \"all\"\n or scope == graph.scope\n or (isinstance(scope, list) and graph.scope in scope)\n ):\n logger.debug(f\"Updating edges of graph {graph.name}\")\n for u, v, edge_data in graph.edges.data():\n if not edge_data.is_final():\n edge = AttrDict(head=u, tail=v, data=edge_data)\n update_func(edge=edge)\n self._delete_flagged_edges()\n\n def update_nodes(\n self, update_func: Callable, scope=\"all\", single_instances: bool = True\n ):\n \"\"\"\n Update the nodes of the graph and its incoming and outgoing edges by iterating over the\n graph and applying `update_func` to each of it. This is the\n preferred way to change the search space once it has been defined.\n\n Note that edges marked as 'final' will not be updated here.\n\n Args:\n update_func (callable): Function that accepts three incoming parameters named\n `node, in_edges, out_edges`.\n - `node` is a tuple (int, dict) containing the\n index and the attributes of the current node.\n - `in_edges` is a list of tuples with the index of\n the tail of the edge and its EdgeData.\n - `out_edges is a list of tuples with the index of\n the head of the edge and its EdgeData.\n scope (str or list(str)): Can be \"all\" or list of scopes to be updated. Only graphs\n and child graphs with the specified scope are considered\n single_instance (bool): If set to false, this means update_func will be\n applied to nodes of all copies of a graphs. THIS IS NOT RECOMMENDED FOR SHARED\n ATTRIBUTES, i.e. when manipulating the shared data of incoming or outgoing edges.\n Shared attributes should be set only once, we take care it is syncronized across\n all copies of this graph.\n\n The only usecase for setting it to true is when actually changing\n `op` during the initialization of the optimizer (e.g. replacing it\n with MixedOp or SampleOp)\n \"\"\"\n assert scope is not None\n for graph in self._get_child_graphs(single_instances) + [self]:\n if (\n scope == \"all\"\n or graph.scope == scope\n or (isinstance(scope, list) and graph.scope in scope)\n ):\n logger.debug(f\"Updating nodes of graph {graph.name}\")\n for node_idx in lexicographical_topological_sort(graph):\n node = (node_idx, graph.nodes[node_idx])\n in_edges = list(graph.in_edges(node_idx, data=True)) # (v, u, data)\n in_edges = [\n (v, data) for v, u, data in in_edges if not data.is_final()\n ] # u is same for all\n out_edges = list(graph.out_edges(node_idx, data=True)) # (v, u, data)\n out_edges = [\n (u, data) for v, u, data in out_edges if not data.is_final()\n ] # v is same for all\n update_func(node=node, in_edges=in_edges, out_edges=out_edges)\n self._delete_flagged_edges()\n\n def _delete_flagged_edges(self):\n \"\"\"\n Delete edges which associated EdgeData is flagged as deleted.\n \"\"\"\n for graph in self._get_child_graphs(single_instances=False) + [\n self\n ]: # we operate on shallow copies\n to_remove = []\n for u, v, edge_data in graph.edges.data():\n if edge_data.is_deleted():\n to_remove.append((u, v))\n if to_remove:\n # logger.info(\"Removing edges {} from graph {}\".format(to_remove, graph))\n graph.remove_edges_from(to_remove)\n\n def clone(self):\n \"\"\"\n Deep copy of the current graph.\n\n Returns:\n Graph: Deep copy of the graph.\n \"\"\"\n return copy.deepcopy(self)\n\n def reset_weights(self, inplace: bool = False):\n \"\"\"\n Resets the weights for the 'op' at all edges.\n\n Args:\n inplace (bool): Do the operation in place or\n return a modified copy.\n Returns:\n Graph: Returns the modified version of the graph.\n \"\"\"\n\n def weight_reset(m):\n if isinstance(m, torch.nn.Conv2d) or isinstance(m, torch.nn.Linear):\n m.reset_parameters()\n\n if inplace:\n graph = self\n else:\n graph = self.clone()\n\n graph.apply(weight_reset)\n\n return graph\n\n def prepare_discretization(self):\n \"\"\"\n In some cases the search space is manipulated before the final\n discretization is happening, e.g. DARTS. In such chases this should\n be defined in the search space, so all optimizers can call it.\n \"\"\"\n\n def prepare_evaluation(self):\n \"\"\"\n In some cases the evaluation architecture does not match the searched\n one. An example is where the makro_model is extended to increase the\n parameters. This is done here.\n \"\"\"\n\n def get_dense_edges(self):\n \"\"\"\n Returns the edge indices (i, j) that would make a fully connected\n DAG without circles such that i < j and i != j. Assumes nodes are\n already created.\n\n Returns:\n list: list of edge indices.\n \"\"\"\n edges = []\n nodes = sorted(list(self.nodes()))\n for i in nodes:\n for j in nodes:\n if i != j and j > i:\n edges.append((i, j))\n return edges\n\n def add_edges_densly(self):\n \"\"\"\n Adds edges to get a fully connected DAG without cycles\n \"\"\"\n self.add_edges_from(self.get_dense_edges())\n\n\nclass EdgeData:\n \"\"\"\n Class that holds data for each edge.\n Data can be shared between instances of the graph\n where the edges lives in.\n\n Also defines the default key 'op', which is `Identity()`. It must\n be private always.\n\n Items can be accessed directly as attributes with `.key` or\n in a dict-like fashion with `[key]`. To set a new item use `.set()`.\n \"\"\"\n\n def __init__(self, data: dict = None):\n \"\"\"\n Initializes a new EdgeData object.\n 'op' is set as Identity() and private by default\n\n Args:\n data (dict): Inject some initial data. Will be always private.\n \"\"\"\n if data is None:\n data = {}\n self._private = {}\n self._shared = {}\n\n # set internal attributes\n self._shared[\"_deleted\"] = False\n self._private[\"_final\"] = False\n\n # set defaults and potential input\n self.set(\"op\", Identity(), shared=False)\n for k, v in data.items():\n self.set(k, v, shared=False)\n\n def has(self, key: str):\n \"\"\"\n Checks whether `key` exists.\n\n Args:\n key (str): The key to check.\n\n Returns:\n bool: True if key exists, False otherwise.\n\n \"\"\"\n assert not key.startswith(\"_\"), \"Access to private keys not allowed!\"\n return key in self._private.keys() or key in self._shared.keys()\n\n def __getitem__(self, key: str):\n assert not str(key).startswith(\"_\"), \"Access to private keys not allowed!\"\n return self.__getattr__(str(key))\n\n def get(self, key: str, default):\n try:\n return self.__getattr__(key)\n except AttributeError:\n return default\n\n def __getattr__(self, key: str):\n if key.startswith(\"__\"): # Required for deepcopy, not sure why\n raise AttributeError(key) #\n assert not key.startswith(\"_\"), \"Access to private keys not allowed!\"\n if key in self._private:\n return self._private[key]\n elif key in self._shared:\n return self._shared[key]\n else:\n raise AttributeError(f\"Cannot find field '{key}' in the given EdgeData!\")\n\n def __setattr__(self, name: str, val):\n if name.startswith(\"_\"):\n super().__setattr__(name, val)\n else:\n raise ValueError(\"not allowed. use set().\")\n\n def __str__(self):\n return f\"private: <{str(self._private)}>, shared: <{str(self._shared)}>\"\n\n def __repr__(self):\n return self.__str__()\n\n def update(self, data):\n \"\"\"\n Update the data in here. If the data is added as dict,\n then all variables will be handled as private.\n\n Args:\n data (EdgeData or dict): If dict, then values will be set as\n private. If EdgeData then all entries will be replaced.\n \"\"\"\n if isinstance(data, dict):\n for k, v in data.items():\n self.set(k, v)\n elif isinstance(data, EdgeData):\n # TODO: do update and not replace!\n self.__dict__.update(data.__dict__)\n else:\n raise ValueError(f\"Unsupported type {data}\")\n\n def remove(self, key: str):\n \"\"\"\n Removes an item from the EdgeData\n\n Args:\n key (str): The key for the item to be removed.\n \"\"\"\n if key in self._private:\n del self._private[key]\n elif key in self._shared:\n del self._shared[key]\n else:\n raise KeyError(f\"Tried to delete unkown key {key}\")\n\n def copy(self):\n \"\"\"\n When a graph is copied to get multiple instances (e.g. when\n reusing subgraphs at more than one location) then\n this function will be called for all edges.\n\n It will create a deep copy for the private entries but\n only a shallow copy for the shared entries. E.g. architectural\n weights should be shared, but parameters of a 3x3 convolution not.\n\n Therefore 'op' must be always private.\n\n Returns:\n EdgeData: A new EdgeData object with independent private\n items, but shallow shared items.\n \"\"\"\n new_self = EdgeData()\n new_self._private = copy.deepcopy(self._private) # pylint: disable=W0212\n new_self._shared = self._shared # pylint: disable=W0212\n\n # we need to handle copy of graphs seperately\n for k, v in self._private.items():\n if isinstance(v, Graph):\n new_self._private[k] = v.copy() # pylint: disable=W0212\n elif isinstance(v, list):\n new_self._private[k] = [ # pylint: disable=W0212\n i.copy() if isinstance(i, Graph) else i for i in v\n ]\n\n return new_self\n\n def set(self, key: str, value, shared=False):\n \"\"\"\n Used to assign a new item to the EdgeData object.\n\n Args:\n key (str): The key.\n value (object): The value to store\n shared (bool): Default: False. Whether the item should\n be a shallow copy between different instances of EdgeData\n (and consequently between different instances of Graph).\n \"\"\"\n assert isinstance(key, str), \"Accepting only string keys, got {}\".format(\n type(key)\n )\n assert not key.startswith(\"_\"), \"Access to private keys not allowed!\"\n assert not self.is_final(), \"Trying to change finalized edge!\"\n if shared:\n if key in self._private:\n raise ValueError(\"Key {} alredy defined as non-shared\")\n else:\n self._shared[key] = value\n else:\n if key in self._shared:\n raise ValueError(f\"Key {key} alredy defined as shared\")\n else:\n self._private[key] = value\n\n def clone(self):\n \"\"\"\n Return a true deep copy of EdgeData. Even shared\n items are not shared anymore.\n\n Returns:\n EdgeData: New independent instance.\n \"\"\"\n return copy.deepcopy(self)\n\n def delete(self):\n \"\"\"\n Flag to delete the edge where this instance is attached to.\n \"\"\"\n self._shared[\"_deleted\"] = True\n\n def is_deleted(self):\n \"\"\"\n Returns true if the edge is flagged to be deleted\n \"\"\"\n return self._shared[\"_deleted\"]\n\n def finalize(self):\n \"\"\"\n Sets this edge as final. This means it cannot be changed\n anymore and will also not appear in the update functions\n of the graph.\n \"\"\"\n self._private[\"_final\"] = True\n return self\n\n def is_final(self):\n \"\"\"\n Returns:\n bool: True if the edge was finalized, False else\n \"\"\"\n return self._private[\"_final\"]\n\n def to_dict(self, subset=\"all\"):\n if subset == \"shared\":\n return {k: v for k, v in self._shared.items() if not k.startswith(\"_\")}\n elif subset == \"private\":\n return {k: v for k, v in self._private.items() if not k.startswith(\"_\")}\n elif subset == \"all\":\n d = self.to_dict(\"private\")\n d.update(self.to_dict(\"shared\"))\n return d\n else:\n raise ValueError(f\"Unknown subset {subset}\")\n","repo_name":"automl/neps","sub_path":"src/neps/search_spaces/architecture/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":50567,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"30"} +{"seq_id":"16673610503","text":"import random\r\nimport math\r\nimport numpy as np\r\nimport time\r\n\r\ndef dist(x,y,dimension):\r\n distance=0\r\n x=x.split()\r\n y=y.split()\r\n for i in range(0,dimension):\r\n xtal=float(x[i])\r\n ytal=float(y[i])\r\n distance+=distance+(xtal-ytal)**2\r\n return math.sqrt(distance)\r\n\r\ndef closest_node_dist(node, nodes):\r\n #print(node, nodes)\r\n nodes = np.asarray(nodes)\r\n deltas = nodes - node\r\n dist_2 = np.einsum('ij,ij->i', deltas, deltas)\r\n #print(dist_2)\r\n return math.sqrt(min(dist_2))\r\n\r\n#If we want to calculate the sum of the actual distances to closest center and not just the assigned one\r\ndef actualcost(data, facilities):\r\n sum1=0.0\r\n for x in data:\r\n sum1+=closest_node_dist(x,facilities)\r\n return sum1\r\n\r\ndef meyerson(data, dimension, f,facil,overcount):\r\n data= np.random.permutation(data)\r\n #print(data[0:10])\r\n #setfacil = set(facil)\r\n #print('agurk')\r\n #print(setfacil)\r\n #print(type(setfacil))\r\n facilities= []\r\n cost=0\r\n counter=0\r\n numberofcenters=0\r\n for point in data:\r\n #print(point)\r\n counter=counter+1\r\n #if counter % 100==0:\r\n #print(counter)\r\n #find nearest facility\r\n if numberofcenters>0:\r\n nearest = closest_node_dist(point,facilities)\r\n #print(nearest)\r\n else:\r\n nearest = f+1\r\n #print('hej')\r\n if random.uniform(0,1)*fhowlong:\r\n lastfacil,lastcost,holder,overcount=meyersonmanytimes(currentdata,dimension,f,timesrecompute,currentfacil,overcount)\r\n howlong=4*lastcost/f\r\n TotalNumberofCentersOpened+=holder\r\n #print(howlong)\r\n lasttime=i\r\n currentcost=lastcost\r\n TotalRecompute+=1\r\n currentfacil=lastfacil\r\n else:\r\n #print(data[i-1],currentfacil)\r\n currentcost-=closest_node_dist(data[i-1],currentfacil)\r\n nearest=closest_node_dist(data[i+window-1],currentfacil)\r\n if nearestmenu-->>\\033[1;0m\\033[1;36m\\033[1;4m $ \\033[1;0m')\n\n if usr_pas == 'menu':\n menu.menu()\n elif usr_pas == 'help':\n colores()\n cls()\n helper()\n colores()\n creacion()\n \n elif usr_pas == 'set fb-id':\n os.system('python2 modulos/ID-F-BOOK.py')\n creacion()\n elif usr_pas == 'clear':\n cls()\n colores()\n creacion()\n elif usr_pas == 'use force_brute':\n colores()\n print('..........')\n time.sleep(0.50)\n print(' ..........')\n time.sleep(0.50)\n print('-----abriendo modulo-----')\n colores()\n time.sleep(0.50)\n print('..........')\n time.sleep(0.50)\n print('..........')\n time.sleep(0.50)\n cls()\n os.system('python3 modulos/mod_force.py')\n colores()\n creacion()\n elif usr_pas == 'install fluxion':\n fluxion()\n creacion()\n elif usr_pas == 'open msfconsole':\n print('pleace wait ... execute msfconsole \\n\\n')\n os.system('msfconsole')\n creacion()\n elif usr_pas == 'create automated payload':\n os.system('bash modulos/modsh/modulo_payload.sh')\n colores()\n creacion()\n elif usr_pas == usr_pas:\n\n creacion()\n \n elif usr_pas == 'use Brute':\n brutedum()\n cls()\n creacion()\n elif usr_pas == 'use check-pass':\n os.system('python3 modulos/checkpass.py')\n input('\\n\\nPress enter para coontinuar\\n')\n cls()\n creacion()\n elif usr_pas == 'set locate':\n iplocate()\n creacion()\n elif usr_pas == usr_pas:\n os.system(usr_pas)\n colores()\n creacion()\n elif usr_pas != usr_pas:\n cls()\n print('\\n\\n error ')\n creacion()\n\ncreacion()\n","repo_name":"anonymous-sys19/fthV2","sub_path":"partials/linux.py","file_name":"linux.py","file_ext":"py","file_size_in_byte":3082,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"30"} +{"seq_id":"71144475284","text":"import warnings\n\ntry:\n import matplotlib.pyplot as plt\n MATPLOTLIB = True\nexcept ImportError:\n MATPLOTLIB = False\n\ndef matplotlib_available():\n if MATPLOTLIB:\n return True\n warnings.warn(\"Matplotlib not available. Plotting will not be available\")\n return False\n\ndef plot_array(A, xsize=None, ysize=None, cmap=None):\n \"\"\"\n A: a 2D numpy array\n xsize, ysize: the \"realworld\" size of the array\n \"\"\"\n Y, X = A.shape\n if xsize is None:\n xsize = X \n if ysize is None:\n ysize = Y\n if matplotlib_available():\n plt.imshow(A, origin='upper', extent=[0, xsize, 0, ysize], cmap=cmap)","repo_name":"gesellkammer/lambdasim","sub_path":"lambdasim/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"8236503582","text":"#!/usr/bin/env python\n\nimport os\nimport datetime\n\ntargets = []\n\nday = str(datetime.datetime.now().day)\ndate = '201505%s' % day\n\npage_template = 'http://hzdaily.hangzhou.com.cn/cb/page/1681/2015-05/%s' %day + '/%(page)s/%(date)s%(page)s_pdf.pdf'\n\npages = range(1, 25)\n\npdf_files = ['%s01_pdf.pdf' % date]\n\nfor page in pages:\n return_code = os.system('wget %s' % page_template % {'page': '%02d' % page, 'date': date })\n if return_code == 0:\n pdf_files.append('%s%02d_pdf.pdf' % (date,page))\n\nprint(pdf_files)\nos.system('pdftk %s cat output cb_issue_%s.pdf' % (' '.join(pdf_files), date))\n\nfor pdf_file in pdf_files:\n os.system('rm %s' % pdf_file)\n","repo_name":"sebastian-code/ideas_sueltas","sub_path":"bin/cb_download.py","file_name":"cb_download.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"18202332198","text":"\r\n#code here\r\narr=[5,8,7,6,2,3,1,0,8,4]\r\nprint(arr)\r\nN=10\r\ncount=1\r\nl=[]\r\nl1=[]\r\nl2=[]\r\nfor i in range(N):\r\n l1.append(arr[i])\r\nl2=sorted(l1)\r\nprint(l2)\r\n \r\n \r\nfor i in range(N-1):\r\n if(l2[i]+1==l2[i+1]):\r\n count=count+1\r\n else:\r\n l.append(count)\r\n count=0\r\n \r\n \r\nl.sort()\r\nprint(l[-1])\r\nprint(l)\r\n\r\n\r\n","repo_name":"SamratRode/JAVA_DSA_SOLUTIONS","sub_path":"potd.py","file_name":"potd.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"25862502707","text":"from py_ecc import bn128 as bn\n\n\nclass PubKey:\n \"\"\" The format of public key\"\"\"\n\n def __init__(self, pk):\n if isinstance(pk, type(bn.G1)):\n self.pk = pk # An element of G1 field\n else:\n raise TypeError(\"require G1 type for pk!\")\n\n def __str__(self):\n return str(self.pk)\n\n\nclass PriKey:\n \"\"\" The format of private key\"\"\"\n\n def __init__(self, sk):\n if isinstance(sk, int):\n self.sk = sk # An integer for G1 field\n else:\n raise TypeError(\"require int type for sk!\")\n\n def __str__(self):\n return str(self.sk)\n\n\nclass Key:\n \"\"\" The format of user's key\"\"\"\n\n def __init__(self, prikey):\n if isinstance(prikey, PriKey):\n self.prikey = prikey\n pk = bn.multiply(bn.G1, self.prikey.sk) # pk = g^sk\n self.pubkey = PubKey(pk)\n else:\n raise TypeError(\"require PubKey and PriKey type!\")\n\n def __str__(self):\n return \"===== pubkey =====\\n\" + str(self.pubkey) + \"\\n===== prikey =====\\n\" + str(self.prikey)\n\n\nclass TrapKey:\n \"\"\" The format of trapdoor \"\"\"\n\n def __init__(self, tk):\n if isinstance(tk, type(bn.G2)):\n self.tk = tk\n else:\n raise TypeError(\"require G2 type for tk!\")\n\n def __str__(self):\n return str(self.tk)\n\n\nclass ReKey:\n \"\"\" The format of rekey \"\"\"\n\n def __init__(self, rk):\n if isinstance(rk, type(bn.G1)):\n self.rk = rk\n else:\n raise TypeError(\"require G1 type for rk!\")\n\n def __str__(self):\n return str(self.rk)\n\n\nclass Token:\n \"\"\" The format of token\"\"\"\n\n def __init__(self, tk):\n if isinstance(tk, type(bn.G2)):\n self.tk = tk\n else:\n raise TypeError(\"require G2 type for tk!\")\n\n def __str__(self):\n return str(self.tk)\n\n\nclass Cipher:\n \"\"\" The abstract of ciphertext \"\"\"\n pass\n\n\nclass Srecord:\n \"\"\" The format of hash table element\"\"\"\n\n def __init__(self, sid, hc):\n if isinstance(sid, int) and isinstance(hc, int):\n self.sid = sid\n self.hc = hc\n else:\n raise TypeError(\"require int type for sid and int type for hc!\")\n def __str__(self):\n return \"===== Srecord =====\\n\" + \"sid: \" + str(self.sid) + \"\\nhc: \" + str(\n self.hc)\n\n\n\nclass CipherII(Cipher):\n \"\"\" The format of second-level ciphertext \"\"\"\n\n def __init__(self, c1, c2, c3):\n b1 = isinstance(c1, type(bn.G1))\n b2 = isinstance(c2, type(bn.G2))\n b3 = isinstance(c3, bn.FQ12)\n if b1 and b2 and b3:\n self.c1 = c1\n self.c2 = c2\n self.c3 = c3\n else:\n raise TypeError(\"require (G1,G2,FQ12) for second-level ciphertext!\")\n\n def __str__(self):\n return \"c1: \" + str(self.c1) + \"\\nc2: \" + str(self.c2) + \"\\nc3: \" + str(self.c3)\n\n\nclass CipherI(Cipher):\n \"\"\" The format of first-level ciphertext \"\"\"\n\n def __init__(self, c1, c2, c3, c4):\n b1 = isinstance(c1, type(bn.G1))\n b2 = isinstance(c2, type(bn.G2))\n b3 = isinstance(c3, bn.FQ12)\n b4 = isinstance(c4, bn.FQ12)\n if b1 and b2 and b3 and b4:\n self.c1 = c1\n self.c2 = c2\n self.c3 = c3\n self.c4 = c4\n else:\n raise TypeError(\"require (G1,G2,FQ12,FQ12) for second-level ciphertext!\")\n\n def __str__(self):\n return str(self.c1) +' '+ str(self.c2) +' '+ str(self.c3) +' '+ str(self.c4)\n","repo_name":"Devil-fire/Data-Trade","sub_path":"Server/Mine/Entity.py","file_name":"Entity.py","file_ext":"py","file_size_in_byte":3509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"23883300082","text":"import json\n\nfrom apischema.json_schema import deserialization_schema, JsonSchemaVersion\n\nfrom optunaz.config.optconfig import OptimizationConfig\nfrom optunaz.utils.schema import (\n replacekey,\n addsibling,\n delsibling,\n copytitle,\n replaceenum,\n addtitles,\n)\n\n\ndef patch_schema_generic(schema):\n\n addtitles(schema)\n\n # Replace singleton enums with const.\n # For some reason, this was not needed in AZDock. A mystery.\n schema = replaceenum(schema)\n\n # Replace \"anyOf\" with \"oneOf\".\n schema = replacekey(schema)\n\n # Add \"type\": \"object\" to any elements that contain \"oneOf\": [...].\n schema = addsibling(schema)\n\n # Delete \"type\": \"string\" for \"enum\".\n schema = delsibling(schema, {\"enum\": \"type\"})\n\n # Delete most of the stuff for \"const\".\n schema = delsibling(schema, {\"const\": \"type\"})\n schema = delsibling(schema, {\"const\": \"default\"})\n schema = delsibling(schema, {\"const\": \"title\"})\n\n # Copy title from $refs into oneOf.\n schema = copytitle(schema, schema)\n\n return schema\n\n\ndef patch_schema_optunaz(schema):\n (\n schema.get(\"$defs\", {})\n .get(\"MolData\", {})\n .get(\"properties\", {})\n .get(\"file_path\", {})\n )[\"format\"] = \"uri\"\n\n # Dataset\n (\n schema.get(\"$defs\", {})\n .get(\"Dataset\", {})\n .get(\"properties\", {})\n .get(\"save_intermediate_files\", {})\n )[\"const\"] = True\n (\n schema.get(\"$defs\", {})\n .get(\"Dataset\", {})\n .get(\"properties\", {})\n .get(\"intermediate_training_dataset_file\", {})\n )[\"const\"] = \"{{run.path}}/intermediate_training_dataset_file.csv\"\n # (\n # schema.get(\"$defs\", {})\n # .get(\"Dataset\", {})\n # .get(\"properties\", {})\n # .pop(\"test_dataset_file\", None)\n # )\n # (\n # schema.get(\"$defs\", {})\n # .get(\"Dataset\", {})\n # .get(\"properties\", {})\n # .get(\"training_dataset_file\", {})\n # )[\"format\"] = \"file\"\n\n (\n schema.get(\"$defs\", {})\n .get(\"MolData\", {})\n .get(\"properties\", {})\n .get(\"file_path\", {})\n )[\"format\"] = \"uri\"\n\n # Root OptimizationConfig\n (\n schema.get(\"$defs\", {})\n .get(\"OptimizationConfig\", {})\n .get(\"properties\", {})\n .pop(\"mode\", None)\n )\n (\n schema.get(\"$defs\", {})\n .get(\"OptimizationConfig\", {})\n .get(\"properties\", {})\n .pop(\"visualization\", None)\n )\n # (\n # schema.get(\"$defs\", {})\n # .get(\"OptimizationConfig\", {})\n # .get(\"properties\", {})\n # )[\"mode\"] = {\n # \"$ref\": \"#/$defs/ModelMode\",\n # \"title\": \"Mode mode: regression or classification\",\n # \"default\": \"regression\"\n # }\n\n drop_algs = {\"PLS\", \"RandomForest\", \"XGBregressor\"}\n drop_refs = {f\"#/$defs/{alg}\" for alg in drop_algs}\n alg_items = (\n schema.get(\"$defs\", {})\n .get(\"OptimizationConfig\", {})\n .get(\"properties\", {})\n .get(\"algorithms\", {})\n .get(\"items\", {})\n )\n algs = alg_items.get(\"anyOf\", {})\n alg_items[\"anyOf\"] = [alg for alg in algs if alg[\"$ref\"] not in drop_refs]\n\n (\n schema.get(\"$defs\", {})\n .get(\"Settings\", {})\n .get(\"properties\", {})\n .pop(\"mode\", None)\n )\n\n (schema.get(\"$defs\", {}).get(\"Settings\", {}).get(\"properties\", {}))[\"n_jobs\"] = {\n \"const\": -1\n }\n\n (schema.get(\"$defs\", {}).get(\"Settings\", {}).get(\"properties\", {}))[\n \"track_to_mlflow\"\n ] = {\"const\": False}\n\n (schema.get(\"$defs\", {}).get(\"Settings\", {}).get(\"properties\", {}))[\n \"optuna_storage\"\n ] = {\"const\": \"sqlite:///{{run.path}}/optuna_storage.sqlite\"}\n\n (\n schema.get(\"$defs\", {})\n .get(\"Settings\", {})\n .get(\"properties\", {})\n .pop(\"shuffle\", None)\n )\n\n (\n schema.get(\"$defs\", {})\n .get(\"Settings\", {})\n .get(\"properties\", {})\n .pop(\"direction\", None)\n )\n\n (\n schema.get(\"$defs\", {})\n .get(\"Settings\", {})\n .get(\"properties\", {})\n .pop(\"scoring\", None)\n )\n\n (\n schema.get(\"$defs\", {})\n .get(\"Settings\", {})\n .get(\"properties\", {})\n .pop(\"tracking_rest_endpoint\", None)\n )\n\n (\n schema.get(\"$defs\", {})\n .get(\"ScaledDescriptorParameters\", {})\n .get(\"properties\", {})\n .pop(\"scaler\", None)\n )\n\n (\n schema.get(\"$defs\", {})\n .get(\"PhyschemDescriptors\", {})\n .get(\"properties\", {})\n .pop(\"parameters\", {})\n )\n\n (\n schema.get(\"$defs\", {})\n .get(\"Stratified\", {})\n .get(\"properties\", {})\n .pop(\"bins\", {})\n )\n\n (\n schema.get(\"$defs\", {})\n .get(\"Dataset\", {})\n .get(\"properties\", {})\n .get(\"training_dataset_file\", {})\n )[\"format\"] = \"uri\"\n\n (\n schema.get(\"$defs\", {})\n .get(\"MolDescriptor\", {})\n .get(\"anyOf\", [])\n .remove({\"$ref\": \"#/$defs/PrecomputedDescriptorFromFile\"})\n )\n\n\n return schema\n\n\ndef main():\n schema = deserialization_schema(\n OptimizationConfig, all_refs=True, version=JsonSchemaVersion.DRAFT_2019_09\n )\n schema = patch_schema_optunaz(schema)\n schema = patch_schema_generic(schema)\n print(json.dumps(schema, indent=2))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"MolecularAI/Qptuna","sub_path":"optunaz/schemagen.py","file_name":"schemagen.py","file_ext":"py","file_size_in_byte":5314,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"30"} +{"seq_id":"27900427716","text":"from bs4 import BeautifulSoup as soup\nfrom urllib.request import urlopen as uReq\n\nmy_url = 'https://www.newegg.com/p/pl?d=graphic+card&cm_sp=KeywordRelated-_-graphics%20card-_-graphic%20card-_-INFOCARD'\n\n# opening up a connection, grabbing the page before closing the connection\nuClient = uReq(my_url)\npage_html = uClient.read()\nuClient.close()\n\n# HTML parsing\npage_soup = soup(page_html, \"html.parser\")\n\n# grab each product\ncells = page_soup.findAll(\"div\", {\"class\": \"item-cell\"})\n\nfilename = \"products.csv\"\n\n# loop through every product for their name, price and shipping fee before\n# writing them into a csv file\nwith open(filename, \"w\") as f:\n headers = \"product_name, brand, shipping_price\\n\"\n f.write(headers)\n\n for cell in cells:\n title_container = cell.findAll(\"a\", {\"class\": \"item-title\"})\n\n try:\n product_name = title_container[0].text.strip()\n except IndexError:\n product_name = \"N/A\"\n\n try:\n brand = cell.div.div.a.img[\"title\"]\n except TypeError:\n brand = \"N/A\"\n except AttributeError:\n brand = \"N/A\"\n\n shipping_container = cell.findAll(\"li\", {\"class\": \"price-ship\"})\n try:\n shipping_price = shipping_container[0].text.strip()\n except IndexError:\n shipping_price = \"N/A\"\n\n f.write(product_name.replace(\",\", \"|\") + \", \" + brand + \", \" + shipping_price + \", \" + \"\\n\")\n","repo_name":"zJunKitz/CodeInPlace-FinalProject","sub_path":"NewEgg_Scrap.py","file_name":"NewEgg_Scrap.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"2922405823","text":"from argparse import ArgumentParser\nimport random\nimport sys\nimport warnings\nimport os\n\nimport pytorch_lightning as pl\nimport torch\n\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n\nimport spacetimeformer as stf\n\n_MODELS = [\"spacetimeformer\", \"mtgnn\", \"lstm\", \"lstnet\", \"linear\", \"s4\"]\n\n_DSETS = [\n \"asos\",\n \"metr-la\",\n \"pems-bay\",\n \"exchange\",\n \"precip\",\n \"toy1\",\n \"toy2\",\n \"solar_energy\",\n \"mnist\",\n \"cifar\",\n \"copy\",\n \"crypto\",\n \"stock_index\",\n]\n\n\ndef create_parser():\n model = sys.argv[1]\n dset = sys.argv[2]\n\n # Throw error now before we get confusing parser issues\n assert (\n model in _MODELS\n ), f\"Unrecognized model (`{model}`). Options include: {_MODELS}\"\n assert dset in _DSETS, f\"Unrecognized dset (`{dset}`). Options include: {_DSETS}\"\n\n parser = ArgumentParser()\n parser.add_argument(\"model\")\n parser.add_argument(\"dset\")\n\n if dset == \"precip\":\n stf.data.precip.GeoDset.add_cli(parser)\n stf.data.precip.CONUS_Precip.add_cli(parser)\n elif dset == \"metr-la\" or dset == \"pems-bay\":\n stf.data.metr_la.METR_LA_Data.add_cli(parser)\n elif dset == \"mnist\":\n stf.data.image_completion.MNISTDset.add_cli(parser)\n elif dset == \"cifar\":\n stf.data.image_completion.CIFARDset.add_cli(parser)\n elif dset == \"copy\":\n stf.data.copy_task.CopyTaskDset.add_cli(parser)\n else:\n stf.data.CSVTimeSeries.add_cli(parser)\n stf.data.CSVTorchDset.add_cli(parser)\n stf.data.DataModule.add_cli(parser)\n\n if model == \"lstm\":\n stf.lstm_model.LSTM_Forecaster.add_cli(parser)\n stf.callbacks.TeacherForcingAnnealCallback.add_cli(parser)\n elif model == \"lstnet\":\n stf.lstnet_model.LSTNet_Forecaster.add_cli(parser)\n elif model == \"mtgnn\":\n stf.mtgnn_model.MTGNN_Forecaster.add_cli(parser)\n elif model == \"spacetimeformer\":\n stf.spacetimeformer_model.Spacetimeformer_Forecaster.add_cli(parser)\n elif model == \"linear\":\n stf.linear_model.Linear_Forecaster.add_cli(parser)\n elif model == \"s4\":\n stf.s4_model.S4_Forecaster.add_cli(parser)\n\n stf.callbacks.TimeMaskedLossCallback.add_cli(parser)\n\n parser.add_argument(\"--null_value\", type=float, default=None)\n parser.add_argument(\"--wandb\", action=\"store_true\")\n parser.add_argument(\"--plot\", action=\"store_true\")\n parser.add_argument(\"--attn_plot\", action=\"store_true\")\n parser.add_argument(\"--debug\", action=\"store_true\")\n parser.add_argument(\"--run_name\", type=str, required=True)\n parser.add_argument(\"--accumulate\", type=int, default=1)\n parser.add_argument(\"--val_check_interval\", type=float, default=1.0)\n parser.add_argument(\"--limit_val_batches\", type=float, default=1.0)\n parser.add_argument(\n \"--trials\", type=int, default=1, help=\"How many consecutive trials to run\"\n )\n\n if len(sys.argv) > 3 and sys.argv[3] == \"-h\":\n parser.print_help()\n sys.exit(0)\n\n return parser\n\n\ndef create_model(config):\n x_dim, yc_dim, yt_dim = None, None, None\n if config.dset == \"metr-la\":\n x_dim = 2\n yc_dim = 207\n yt_dim = 207\n elif config.dset == \"pems-bay\":\n x_dim = 2\n yc_dim = 325\n yt_dim = 325\n elif config.dset == \"precip\":\n x_dim = 2\n yc_dim = 49\n yt_dim = 49\n elif config.dset == \"asos\":\n x_dim = 6\n yc_dim = 6\n yt_dim = 6\n elif config.dset == \"solar_energy\":\n x_dim = 6\n yc_dim = 137\n yt_dim = 137\n elif config.dset == \"exchange\":\n x_dim = 6\n yc_dim = 8\n yt_dim = 8\n elif config.dset == \"toy1\":\n x_dim = 6\n yc_dim = 20\n yt_dim = 20\n elif config.dset == \"toy2\":\n x_dim = 6\n yc_dim = 20\n yt_dim = 20\n elif config.dset == \"mnist\":\n x_dim = 1\n yc_dim = 1\n yt_dim = 1\n elif config.dset == \"cifar\":\n x_dim = 1\n yc_dim = 3\n yt_dim = 3\n elif config.dset == \"copy\":\n x_dim = 1\n yc_dim = config.copy_vars\n yt_dim = config.copy_vars\n elif config.dset == \"crypto\":\n x_dim = 6\n yc_dim = 18\n yt_dim = 18\n elif config.dset == \"stock_index\":\n x_dim = 6\n yc_dim = 8\n yt_dim = 8\n \n assert x_dim is not None\n assert yc_dim is not None\n assert yt_dim is not None\n\n if config.model == \"lstm\":\n forecaster = stf.lstm_model.LSTM_Forecaster(\n # encoder\n d_x=x_dim,\n d_yc=yc_dim,\n d_yt=yt_dim,\n time_emb_dim=config.time_emb_dim,\n hidden_dim=config.hidden_dim,\n n_layers=config.n_layers,\n dropout_p=config.dropout_p,\n # training\n learning_rate=config.learning_rate,\n teacher_forcing_prob=config.teacher_forcing_start,\n l2_coeff=config.l2_coeff,\n loss=config.loss,\n linear_window=config.linear_window,\n )\n elif config.model == \"mtgnn\":\n forecaster = stf.mtgnn_model.MTGNN_Forecaster(\n d_x=x_dim,\n d_yc=yc_dim,\n d_yt=yt_dim,\n context_points=config.context_points,\n target_points=config.target_points,\n gcn_depth=config.gcn_depth,\n dropout_p=config.dropout_p,\n node_dim=config.node_dim,\n dilation_exponential=config.dilation_exponential,\n conv_channels=config.conv_channels,\n subgraph_size=config.subgraph_size,\n skip_channels=config.skip_channels,\n end_channels=config.end_channels,\n residual_channels=config.residual_channels,\n layers=config.layers,\n propalpha=config.propalpha,\n tanhalpha=config.tanhalpha,\n learning_rate=config.learning_rate,\n kernel_size=config.kernel_size,\n l2_coeff=config.l2_coeff,\n time_emb_dim=config.time_emb_dim,\n loss=config.loss,\n linear_window=config.linear_window,\n )\n elif config.model == \"lstnet\":\n forecaster = stf.lstnet_model.LSTNet_Forecaster(\n d_x=x_dim,\n d_yc=yc_dim,\n d_yt=yt_dim,\n context_points=config.context_points,\n hidRNN=config.hidRNN,\n hidCNN=config.hidCNN,\n hidSkip=config.hidSkip,\n CNN_kernel=config.CNN_kernel,\n skip=config.skip,\n dropout_p=config.dropout_p,\n output_fun=config.output_fun,\n learning_rate=config.learning_rate,\n l2_coeff=config.l2_coeff,\n loss=config.loss,\n linear_window=config.linear_window,\n )\n elif config.model == \"spacetimeformer\":\n forecaster = stf.spacetimeformer_model.Spacetimeformer_Forecaster(\n d_x=x_dim,\n d_yc=yc_dim,\n d_yt=yt_dim,\n start_token_len=config.start_token_len,\n attn_factor=config.attn_factor,\n d_model=config.d_model,\n n_heads=config.n_heads,\n e_layers=config.enc_layers,\n d_layers=config.dec_layers,\n d_ff=config.d_ff,\n dropout_emb=config.dropout_emb,\n dropout_token=config.dropout_token,\n dropout_attn_out=config.dropout_attn_out,\n dropout_qkv=config.dropout_qkv,\n dropout_ff=config.dropout_ff,\n global_self_attn=config.global_self_attn,\n local_self_attn=config.local_self_attn,\n global_cross_attn=config.global_cross_attn,\n local_cross_attn=config.local_cross_attn,\n performer_kernel=config.performer_kernel,\n performer_redraw_interval=config.performer_redraw_interval,\n post_norm=config.post_norm,\n norm=config.norm,\n activation=config.activation,\n init_lr=config.init_lr,\n base_lr=config.base_lr,\n warmup_steps=config.warmup_steps,\n decay_factor=config.decay_factor,\n initial_downsample_convs=config.initial_downsample_convs,\n intermediate_downsample_convs=config.intermediate_downsample_convs,\n embed_method=config.embed_method,\n l2_coeff=config.l2_coeff,\n loss=config.loss,\n linear_window=config.linear_window,\n class_loss_imp=config.class_loss_imp,\n time_emb_dim=config.time_emb_dim,\n null_value=config.null_value,\n )\n elif config.model == \"linear\":\n forecaster = stf.linear_model.Linear_Forecaster(\n d_x=x_dim,\n d_yc=yc_dim,\n d_yt=yt_dim,\n context_points=config.context_points,\n learning_rate=config.learning_rate,\n l2_coeff=config.l2_coeff,\n loss=config.loss,\n linear_window=config.linear_window,\n )\n elif config.model == \"s4\":\n forecaster = stf.s4_model.S4_Forecaster(\n context_points=config.context_points,\n target_points=config.target_points,\n d_state=config.d_state,\n d_model=config.d_model,\n d_x=x_dim,\n d_yc=yc_dim,\n d_yt=yt_dim,\n layers=config.layers,\n time_emb_dim=config.time_emb_dim,\n channels=config.channels,\n dropout_p=config.dropout_p,\n learning_rate=config.learning_rate,\n l2_coeff=config.l2_coeff,\n loss=config.loss,\n linear_window=config.linear_window,\n )\n\n return forecaster\n\n\ndef create_dset(config):\n INV_SCALER = lambda x: x\n SCALER = lambda x: x\n NULL_VAL = None\n PLOT_VAR_IDXS = None\n PLOT_VAR_NAMES = None\n\n if config.dset == \"metr-la\" or config.dset == \"pems-bay\":\n if config.dset == \"pems-bay\":\n assert (\n \"pems_bay\" in config.data_path\n ), \"Make sure to switch to the pems-bay file!\"\n data = stf.data.metr_la.METR_LA_Data(config.data_path)\n DATA_MODULE = stf.data.DataModule(\n datasetCls=stf.data.metr_la.METR_LA_Torch,\n dataset_kwargs={\"data\": data},\n batch_size=config.batch_size,\n workers=config.workers,\n )\n INV_SCALER = data.inverse_scale\n SCALER = data.scale\n NULL_VAL = 0.0\n\n elif config.dset == \"precip\":\n dset = stf.data.precip.GeoDset(dset_dir=config.dset_dir, var=\"precip\")\n DATA_MODULE = stf.data.DataModule(\n datasetCls=stf.data.precip.CONUS_Precip,\n dataset_kwargs={\n \"dset\": dset,\n \"context_points\": config.context_points,\n \"target_points\": config.target_points,\n },\n batch_size=config.batch_size,\n workers=config.workers,\n )\n NULL_VAL = -1.0\n elif config.dset in [\"mnist\", \"cifar\"]:\n if config.dset == \"mnist\":\n config.target_points = 28 * 28 - config.context_points\n datasetCls = stf.data.image_completion.MNISTDset\n else:\n config.target_points = 32 * 32 - config.context_points\n datasetCls = stf.data.image_completion.CIFARDset\n DATA_MODULE = stf.data.DataModule(\n datasetCls=datasetCls,\n dataset_kwargs={\"context_points\": config.context_points},\n batch_size=config.batch_size,\n workers=config.workers,\n )\n elif config.dset == \"copy\":\n # set these manually in case the model needs them\n config.context_points = config.copy_length + int(\n config.copy_include_lags\n ) # seq + lags\n config.target_points = config.copy_length\n DATA_MODULE = stf.data.DataModule(\n datasetCls=stf.data.copy_task.CopyTaskDset,\n dataset_kwargs={\n \"length\": config.copy_length,\n \"copy_vars\": config.copy_vars,\n \"lags\": config.copy_lags,\n \"mask_prob\": config.copy_mask_prob,\n \"include_lags\": config.copy_include_lags,\n },\n batch_size=config.batch_size,\n workers=config.workers,\n )\n else:\n data_path = config.data_path\n if config.dset == \"asos\":\n if data_path == \"auto\":\n data_path = \"./data/temperature-v1.csv\"\n target_cols = [\"ABI\", \"AMA\", \"ACT\", \"ALB\", \"JFK\", \"LGA\"]\n elif config.dset == \"solar_energy\":\n if data_path == \"auto\":\n data_path = \"./data/solar_AL_converted.csv\"\n target_cols = [str(i) for i in range(137)]\n elif \"toy\" in config.dset:\n if data_path == \"auto\":\n if config.dset == \"toy1\":\n data_path = \"./data/toy_dset1.csv\"\n elif config.dset == \"toy2\":\n data_path = \"./data/toy_dset2.csv\"\n else:\n raise ValueError(f\"Unrecognized toy dataset {config.dset}\")\n target_cols = [f\"D{i}\" for i in range(1, 21)]\n elif config.dset == \"exchange\":\n if data_path == \"auto\":\n data_path = \"./data/exchange_rate_converted.csv\"\n target_cols = [\n \"Australia\",\n \"United Kingdom\",\n \"Canada\",\n \"Switzerland\",\n \"China\",\n \"Japan\",\n \"New Zealand\",\n \"Singapore\",\n ]\n elif config.dset == \"stock_index\":\n if data_path == \"auto\":\n data_path = \"./data/stock_index.csv\"\n target_cols = [\n \"SP500\",\n \"DOW30\",\n \"NASDAQ\",\n \"SSE\",\n \"SZI\",\n \"KOSPI\",\n \"DAX30\",\n \"CAC40\",\n ]\n elif config.dset == \"crypto\":\n if data_path == \"auto\":\n data_path = \"./data/crypto_dset.csv\"\n target_cols = [\n \"ETH_open\",\n \"ETH_high\",\n \"ETHT_low\",\n \"ETH_close\",\n \"Volume BTC\",\n \"Volume USDT\",\n \"ETH_tradecount\",\n \"BTC_open\",\n \"BTC_high\",\n \"BTC_low\",\n \"BTC_close\",\n \"BTC_tradecount\",\n \"LTCUSDT_open\",\n \"LTCUSDT_high\",\n \"LTCUSDT_low\",\n \"LTCUSDT_close\",\n \"Volume LTC\",\n \"LTCUSDT_tradecount\",\n ]\n # only make plots of a few vars\n PLOT_VAR_NAMES = [\"ETH_close\", \"BTC_close\", \"ETH_high\", \"BTC_high\"]\n PLOT_VAR_IDXS = [target_cols.index(x) for x in PLOT_VAR_NAMES]\n\n dset = stf.data.CSVTimeSeries(\n data_path=data_path,\n target_cols=target_cols,\n ignore_cols=\"all\",\n )\n DATA_MODULE = stf.data.DataModule(\n datasetCls=stf.data.CSVTorchDset,\n dataset_kwargs={\n \"csv_time_series\": dset,\n \"context_points\": config.context_points,\n \"target_points\": config.target_points,\n \"time_resolution\": config.time_resolution,\n },\n batch_size=config.batch_size,\n workers=config.workers,\n )\n INV_SCALER = dset.reverse_scaling\n SCALER = dset.apply_scaling\n NULL_VAL = None\n\n return DATA_MODULE, INV_SCALER, SCALER, NULL_VAL, PLOT_VAR_IDXS, PLOT_VAR_NAMES\n\n\ndef create_callbacks(config):\n saving = pl.callbacks.ModelCheckpoint(\n dirpath=f\"./data/stf_model_checkpoints/{config.run_name}_{''.join([str(random.randint(0,9)) for _ in range(9)])}\",\n monitor=\"val/mse\",\n mode=\"min\",\n filename=f\"{config.run_name}\" + \"{epoch:02d}-{val/mse:.2f}\",\n save_top_k=1,\n )\n callbacks = [saving]\n\n callbacks.append(\n pl.callbacks.early_stopping.EarlyStopping(\n monitor=\"val/loss\",\n patience=10,\n )\n )\n\n if config.wandb:\n callbacks.append(pl.callbacks.LearningRateMonitor())\n\n if config.model == \"lstm\":\n callbacks.append(\n stf.callbacks.TeacherForcingAnnealCallback(\n start=config.teacher_forcing_start,\n end=config.teacher_forcing_end,\n steps=config.teacher_forcing_anneal_steps,\n )\n )\n if config.time_mask_loss:\n callbacks.append(\n stf.callbacks.TimeMaskedLossCallback(\n start=config.time_mask_start,\n end=config.target_points,\n steps=config.time_mask_anneal_steps,\n )\n )\n return callbacks\n\n\ndef main(args):\n if args.wandb:\n import wandb\n\n project = os.getenv(\"STF_WANDB_PROJ\")\n entity = os.getenv(\"STF_WANDB_ACCT\")\n log_dir = os.getenv(\"STF_LOG_DIR\")\n if log_dir is None:\n log_dir = \"./data/STF_LOG_DIR\"\n print(\n \"Using default wandb log dir path of ./data/STF_LOG_DIR. This can be adjusted with the environment variable `STF_LOG_DIR`\"\n )\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n assert (\n project is not None and entity is not None\n ), \"Please set environment variables `STF_WANDB_ACCT` and `STF_WANDB_PROJ` with \\n\\\n your wandb user/organization name and project title, respectively.\"\n experiment = wandb.init(\n project=project,\n entity=entity,\n config=args,\n dir=log_dir,\n reinit=True,\n )\n config = wandb.config\n wandb.run.name = args.run_name\n wandb.run.save()\n logger = pl.loggers.WandbLogger(\n experiment=experiment, save_dir=\"./data/stf_LOG_DIR\"\n )\n logger.log_hyperparams(config)\n\n # Dset\n (\n data_module,\n inv_scaler,\n scaler,\n null_val,\n plot_var_idxs,\n plot_var_names,\n ) = create_dset(args)\n\n # Model\n args.null_value = null_val\n forecaster = create_model(args)\n forecaster.set_inv_scaler(inv_scaler)\n forecaster.set_scaler(scaler)\n forecaster.set_null_value(null_val)\n\n # Callbacks\n callbacks = create_callbacks(args)\n test_samples = next(iter(data_module.test_dataloader()))\n\n if args.wandb and args.plot:\n callbacks.append(\n stf.plot.PredictionPlotterCallback(\n test_samples,\n var_idxs=plot_var_idxs,\n var_names=plot_var_names,\n total_samples=min(8, args.batch_size),\n )\n )\n\n if args.wandb and args.dset in [\"mnist\", \"cifar\"] and args.plot:\n callbacks.append(\n stf.plot.ImageCompletionCallback(\n test_samples,\n total_samples=min(16, args.batch_size),\n )\n )\n\n if args.wandb and args.dset == \"copy\" and args.plot:\n callbacks.append(\n stf.plot.CopyTaskCallback(\n test_samples,\n total_samples=min(16, args.batch_size),\n )\n )\n\n if args.wandb and args.model == \"spacetimeformer\" and args.attn_plot:\n\n callbacks.append(\n stf.plot.AttentionMatrixCallback(\n test_samples,\n layer=0,\n total_samples=min(16, args.batch_size),\n )\n )\n\n trainer = pl.Trainer(\n gpus=args.gpus,\n callbacks=callbacks,\n logger=logger if args.wandb else None,\n accelerator=\"dp\",\n gradient_clip_val=args.grad_clip_norm,\n gradient_clip_algorithm=\"norm\",\n overfit_batches=20 if args.debug else 0,\n accumulate_grad_batches=args.accumulate,\n sync_batchnorm=True,\n val_check_interval=args.val_check_interval,\n limit_val_batches=args.limit_val_batches,\n )\n\n # Train\n trainer.fit(forecaster, datamodule=data_module)\n\n # Test\n trainer.test(datamodule=data_module, ckpt_path=\"best\")\n\n # Predict (only here as a demo and test)\n forecaster.to(\"cuda\")\n xc, yc, xt, _ = test_samples\n yt_pred = forecaster.predict(xc, yc, xt)\n\n if args.wandb:\n experiment.finish()\n\n\nif __name__ == \"__main__\":\n # CLI\n parser = create_parser()\n args = parser.parse_args()\n\n for trial in range(args.trials):\n main(args)\n","repo_name":"AZZMM/stf","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":20357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"21612512535","text":"import json\nimport re\n\nfrom scrapy.spiders import SitemapSpider\n\nfrom locations.hours import OpeningHours\nfrom locations.items import Feature\n\nHOURS_RE = re.compile(r\"(?P\\w+) (?P\\S+) - (?P\\S+)\")\n\n\nclass EatnParkSpider(SitemapSpider):\n name = \"eatnpark\"\n item_attributes = {\"brand\": \"Eat'n Park\", \"brand_wikidata\": \"Q5331211\"}\n sitemap_urls = [\"https://locations.eatnpark.com/robots.txt\"]\n sitemap_rules = [(r\"/restaurants-\", \"parse\")]\n\n def parse(self, response):\n ldjson = response.xpath('//script[@type=\"application/ld+json\"]/text()')\n [data] = json.loads(ldjson.get())\n\n opening_hours = OpeningHours()\n for m in HOURS_RE.finditer(data[\"openingHours\"]):\n g = m.groupdict()\n opening_hours.add_range(g[\"day\"], g[\"open_time\"], g[\"close_time\"])\n\n properties = {\n \"ref\": re.search(r\"-(\\d+)\\.html\", response.url).group(1),\n \"website\": response.url,\n \"name\": response.css(\"span.location-name::text\").get(),\n \"lat\": data[\"geo\"][\"latitude\"],\n \"lon\": data[\"geo\"][\"longitude\"],\n \"street_address\": data[\"address\"][\"streetAddress\"],\n \"city\": data[\"address\"][\"addressLocality\"],\n \"state\": data[\"address\"][\"addressRegion\"],\n \"postcode\": data[\"address\"][\"postalCode\"],\n \"opening_hours\": opening_hours.as_opening_hours(),\n \"phone\": data[\"address\"][\"telephone\"],\n }\n yield Feature(**properties)\n","repo_name":"alltheplaces/alltheplaces","sub_path":"locations/spiders/eatnpark.py","file_name":"eatnpark.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","stars":488,"dataset":"github-code","pt":"30"} +{"seq_id":"23823808205","text":"import math\nNama = input(\"Nama : \")\nn = float(input(\"Panjang persegi nametag (cm) : \"))\nm = float(input(\"Panjang trapesium nametag (cm) : \"))\nbanyak_nametag = float(input(\"Banyak nametag : \"))\n\nSetengah_lingkaran = round((math.pi * ((n /2)** 2) / 2), 2)\nPersegi = round((n * n), 2)\nSegitiga = round(((n * n) / 2), 2)\nTrapesium = round(((n + m) * n / 2), 2)\n\nSetengah_lingkaran = float(Setengah_lingkaran)\nPersegi = float(Persegi)\nSegitiga = float(Segitiga)\nTrapesium = float(Trapesium)\n\nLuas_1_nametag = (Setengah_lingkaran + Persegi + Segitiga + Trapesium)\nLuas_total_nametag = round((banyak_nametag * Luas_1_nametag), 2)\nUang = Luas_total_nametag * 4 / 10\nUang = math.ceil(Uang/1000) * 1000\n\nprint ()\nprint (\"Halo\", Nama, \"! Berikut informasi terkait nametag kamu :\")\nprint ()\nprint (\"Luas 1 nametag :\", Luas_1_nametag, \"cm^2\")\nprint (\"Luas total nametag :\", Luas_total_nametag, \"cm^2\")\nprint (\"Uang yang diperlukan : Rp\", Uang)\n","repo_name":"Haramchu/Sem-1","sub_path":"DDP-1/Python/F_NET_2206082114_ClementSamuelMarly_Lab01.py","file_name":"F_NET_2206082114_ClementSamuelMarly_Lab01.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"3920212901","text":"from django.contrib import admin\nfrom mqtt.models import Device,DevicePowerLog,DeviceTimerLog\n\n\n# Register your models here.\nclass DeviceAdminView(admin.ModelAdmin):\n list_display=[\n \"id\",\n \"deviceName\",\n \"status\",\n \"power\",\n \"voltage\",\n \"current\",\n \"unit\",\n \"powerRate\",\n \"updatedAt\",\n \"registeredAt\"\n ]\n\n\nclass DevicePowerLogAdminView(admin.ModelAdmin):\n list_display=[\n \"id\",\n \"logId\",\n \"power\",\n \"createdAt\"\n ]\n\nclass DeviceTimerLogAdminView(admin.ModelAdmin):\n list_display=[\n \"deviceId\",\n \"startTime\",\n \"endTime\",\n \"averagePower\"\n ]\n\nadmin.site.register(Device,DeviceAdminView)\nadmin.site.register(DevicePowerLog,DevicePowerLogAdminView)\nadmin.site.register(DeviceTimerLog,DeviceTimerLogAdminView)","repo_name":"karthidev2021/Energy_Monitor_Mini_Pro","sub_path":"backend/powerMon/mqtt/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"8402296352","text":"#!/usr/bin/env python3\n\n\nimport RPi.GPIO as GPIO\nGPIO.setmode(GPIO.BOARD)\nimport time\nimport configparser\nimport logging\nfrom pushover import PushOver\n\nconf = configparser.ConfigParser()\nconf.read(['doorbell.ini','doorbell_local.ini'])\n\npushover=PushOver(conf['Pushover']['Key'], conf['Pushover']['Token'])\t\t\n\nlog = logging.getLogger(__name__)\nlogging.basicConfig(\n level=conf[\"LOG\"][\"LEVEL\"],\n format=\"%(levelname)s %(module)s.%(funcName)s %(message)s\",\n)\nlog.info(f\"Starting service loglevel={conf['LOG']['LEVEL']} \")\n\n\nclass Doorbell:\n\tdef __init__(self):\n\t\tself.last_pressed=time.time()\n\t\tGPIO.setup(7, GPIO.IN)\n\t\tself.stateChangeTime=time.time()\n\t\n\tdef check_knap(self):\n\t\twhile True:\n\t\t\tlog.debug(\"venter\")\n\t\t\tedge=GPIO.wait_for_edge(7, GPIO.RISING)\n\t\t\ttime.sleep(0.005) # debounce for 5mSec\n # only show valid edges\n\t\t\tif GPIO.input(7) == 1:\n\t\t\t\tlog.debug(\"True Rising\")\n\t\t\t\tlast_stateChangeTime=self.stateChangeTime \n\t\t\t\tself.stateChangeTime=time.time()\n\t\t\t\tlog.debug(\"Ringklokke trykket\" \" efter \" + str(self.stateChangeTime-last_stateChangeTime))\t\n\t\t\t\tself.tryk(0)\n\t\t\telse:\n\t\t\t\tlog.debug(\"False Rising\")\n\n\n\t\t\t\n\tdef tryk(self,tryktid):\n\t\tif time.time()-self.last_pressed > 5: # kun et tryk pr 5 sekunder\n\t\t\tlog.info(\"Ringklokke trykket\")\n\t\t\trv = pushover.send(conf['Pushover']['MessageBody'],conf['Pushover']['MessageTitle'],conf['Pushover']['MessageSound'])\n\t\t\tlog.debug(rv)\n\t\tself.last_pressed=time.time()\n\n\n\t\t\ndef main():\n doorbell=Doorbell()\n doorbell.check_knap()\n\nif __name__ == \"__main__\":\n main()\n\n\n\t\t\t\n\n\n","repo_name":"avzdk/doorbell","sub_path":"doorbell.py","file_name":"doorbell.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"7205302931","text":"import cv2\nimport json\n\n# Read json files that contains shapes detail\nwith open('shapes.json') as json_file:\n shapes = json.load(json_file)\n\n# Read a test image and convert that to gray scale\nimage = cv2.imread('test/someshapes.jpg')\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n# Show preview image\ncv2.imshow('Identifying Shapes', image)\ncv2.waitKey(0)\n\n# Find the edge using Canny algorithm\nedged = cv2.Canny(gray, 10, 50)\n\n# Find the binary threshold\n_, thresh = cv2.threshold(edged, 127, 255, cv2.THRESH_BINARY)\n\n# Find contours of the image with list retrieval\n# Chain the points continuously with CHAIN_APPROX_NONE\ncontours, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)\n\nfor contour in contours:\n\n # Epsilon is accuracy parameter for maximum distance from\n # to approximated contour\n epsilon = 0.01 * cv2.arcLength(contour, closed=True) # Set accuracy to 1% to get detail contours\n approx = cv2.approxPolyDP(contour, epsilon, closed=True)\n\n # Approximate the shape's number of corners\n n_corners = len(approx)\n\n # Try to get name of shapes from json\n try:\n name = shapes[\"name\"][str(n_corners - 3)]\n except KeyError:\n name = \"\"\n\n # Get the centroid of the X and Y axes\n # https://en.wikipedia.org/wiki/Centroid\n M = cv2.moments(contour)\n cx = int(M['m10'] / M['m00'])\n cy = int(M['m01'] / M['m00'])\n\n # Draw contours if shape data is in json\n if n_corners in range(3, 7):\n cv2.drawContours(image, [contour], 0, shapes[\"color\"][str(n_corners - 3)], -1)\n\n # Distinguish shapes with four corners into squares and rectangles\n if n_corners == 4:\n x, y, weight, height = cv2.boundingRect(contour)\n if abs(weight - height) <= 3:\n name = \"Square\"\n\n # Assign name to Polygon\n elif 7 <= n_corners < 15:\n cv2.drawContours(image, [contour], 0, (255, 0, 0), -1)\n name = \"Polygon\"\n\n # Approximate the circle\n elif n_corners >= 15:\n cv2.drawContours(image, [contour], 0, (255, 0, 196), -1)\n name = \"Circle\"\n\n # Put text in center of the shape\n cv2.putText(image, name, (cx - 50 - len(name), cy), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)\n\n cv2.imshow('Identifying Shapes', image)\n cv2.waitKey(0)\n\ncv2.destroyAllWindows()\n","repo_name":"dark-hermes/OpenCV-Shape-Identifier","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2336,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"23866024902","text":"import torch.nn\nfrom scipy.spatial import distance_matrix\nimport numpy as np\nimport torch\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader, Subset\nimport tqdm\nfrom keras.models import Model\n\nfrom query_strategies.query_strategy import QueryMethod as QueryMethod\nfrom query_strategies.query_strategy import get_unlabeled_idx as get_unlabeled_idx\n\n\nclass CoreSetSampling(QueryMethod):\n \"\"\"\n An implementation of the greedy core set query strategy.\n \"\"\"\n\n def __init__(self, model, model_type, n_pool, embedding_shape, init_lb, dataset_name, model_name, gpu=None, **kwargs):\n\n super(CoreSetSampling, self).__init__(model, model_type, n_pool)\n self.strategy_name = \"coreset\"\n self.dataset_name = dataset_name\n self.model_name = model_name\n self.embeding_shape = embedding_shape\n self.lb_idxs = init_lb\n if gpu is None:\n self.device = torch.device(\"cpu\")\n elif type(gpu) == str:\n self.device = torch.device(\"cuda:{}\".format(gpu))\n self.kwargs = kwargs\n\n def greedy_k_center(self, labeled, unlabeled, amount):\n\n greedy_indices = []\n\n # get the minimum distances between the labeled and unlabeled examples (iteratively, to avoid memory issues):\n min_dist = np.min(distance_matrix(labeled[0, :].reshape((1, labeled.shape[1])), unlabeled), axis=0)\n min_dist = min_dist.reshape((1, min_dist.shape[0]))\n for j in range(1, labeled.shape[0], 1000):\n if j + 1000 < labeled.shape[0]:\n dist = distance_matrix(labeled[j:j+1000, :], unlabeled)\n else:\n dist = distance_matrix(labeled[j:, :], unlabeled)\n min_dist = np.vstack((min_dist, np.min(dist, axis=0).reshape((1, min_dist.shape[1]))))\n min_dist = np.min(min_dist, axis=0)\n min_dist = min_dist.reshape((1, min_dist.shape[0]))\n\n # iteratively insert the farthest index and recalculate the minimum distances:\n farthest = np.argmax(min_dist)\n greedy_indices.append(farthest)\n for i in range(amount-1):\n dist = distance_matrix(unlabeled[greedy_indices[-1], :].reshape((1,unlabeled.shape[1])), unlabeled)\n min_dist = np.vstack((min_dist, dist.reshape((1, min_dist.shape[1]))))\n min_dist = np.min(min_dist, axis=0)\n min_dist = min_dist.reshape((1, min_dist.shape[0]))\n farthest = np.argmax(min_dist)\n greedy_indices.append(farthest)\n\n return np.array(greedy_indices)\n\n def get_embedding_model(self):\n # tensorflow\n if self.task_model_type == \"tensorflow\":\n embedding_model = Model(input=self.task_model.input, output=self.task_model.get_layer('softmax').input)\n else:\n # pytorch version\n embedding_model = torch.nn.Sequential(*list(self.task_model.children())[:-1])\n return embedding_model\n\n def get_embedding(self, trainset):\n embedding_model = self.get_embedding_model()\n loader = DataLoader(trainset, shuffle=False, **self.kwargs['loader_te_args'])\n embedding_model.to(self.device)\n embedding_model.eval()\n\n train_num = len(trainset.targets)\n batch_size = self.kwargs['loader_te_args']['batch_size']\n embedding = np.zeros((train_num, self.embeding_shape))\n with torch.no_grad():\n for idx, (x, y) in enumerate(loader):\n x, y = x.to(self.device), y.to(self.device)\n out = embedding_model(x)\n p = out.view(out.shape[0], -1)\n embedding[idx*batch_size:(idx+1)*batch_size] = p.cpu().numpy()\n return embedding\n\n def query(self, embedding, amount):\n labeled_idx = self.lb_idxs\n\n unlabeled_idx = get_unlabeled_idx(embedding.shape[0], labeled_idx)\n\n # use the learned representation for the k-greedy-center algorithm:\n new_indices = self.greedy_k_center(embedding[labeled_idx, :], embedding[unlabeled_idx, :], amount)\n # return np.hstack((labeled_idx, unlabeled_idx[new_indices]))\n scores = np.ones_like(new_indices)\n return unlabeled_idx[new_indices], scores\n\n def update_lb_idxs(self, new_indices):\n self.lb_idxs = new_indices\n\n def train(self, total_epoch, task_model, complete_dataset):\n\n \"\"\"\n Only train samples from labeled dataset\n :return:\n \"\"\"\n print(\"[Training] labeled and unlabeled data\")\n\n task_model.to(self.device)\n # setting idx_lb\n idx_lb_train = self.lb_idxs\n train_dataset = Subset(complete_dataset, idx_lb_train)\n train_loader = DataLoader(train_dataset, batch_size=self.kwargs['loader_tr_args']['batch_size'], shuffle=True, num_workers=self.kwargs['loader_tr_args']['num_workers'])\n optimizer = optim.SGD(\n task_model.parameters(), lr=self.kwargs['optimizer_args']['lr'], momentum=self.kwargs['optimizer_args']['momentum'], weight_decay=self.kwargs['optimizer_args']['weight_decay']\n )\n criterion = torch.nn.CrossEntropyLoss(reduction='none')\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=total_epoch)\n\n\n for epoch in range(total_epoch):\n task_model.train()\n\n total_loss = 0\n n_batch = 0\n acc = 0\n\n for inputs, targets in train_loader:\n n_batch += 1\n inputs, targets = inputs.to(self.device), targets.to(self.device)\n\n optimizer.zero_grad()\n outputs = task_model(inputs)\n loss = criterion(outputs, targets)\n loss = torch.mean(loss)\n loss.backward()\n optimizer.step()\n\n total_loss += loss.item()\n predicted = outputs.argmax(1)\n b_acc = 1.0 * (targets == predicted).sum().item() / targets.shape[0]\n acc += b_acc\n\n total_loss /= n_batch\n acc /= n_batch\n\n if epoch % 50 == 0 or epoch == total_epoch-1:\n print('==========Inner epoch {:d} ========'.format(epoch))\n print('Training Loss {:.3f}'.format(total_loss))\n print('Training accuracy {:.3f}'.format(acc*100))\n scheduler.step()\n del self.task_model\n self.task_model = task_model\n\n def predict(self, testset):\n\n loader_te = DataLoader(testset, shuffle=False, **self.kwargs['loader_te_args'])\n self.task_model.to(self.device)\n self.task_model.eval()\n\n test_num = len(testset.targets)\n batch_size = self.kwargs['loader_te_args']['batch_size']\n pred = np.zeros(test_num, dtype=np.long)\n with torch.no_grad():\n for idx, (x, y) in enumerate(loader_te):\n x, y = x.to(self.device), y.to(self.device)\n out = self.task_model(x)\n p = out.argmax(1)\n pred[idx*batch_size:(idx+1)*batch_size] = p.cpu().numpy()\n return pred\n\n def test_accu(self, testset):\n pred = self.predict(testset)\n label = np.array(testset.targets)\n return np.sum(pred == label) / float(label.shape[0])\n\n\nif __name__ == \"__main__\":\n a = np.random.rand(200, 10)\n b = np.random.rand(300, 10)\n tot = np.concatenate((a, b), axis=0)\n model = None\n strategy = CoreSetSampling(model, \"pytorch\", 500)\n # query 20 new samples from unlabeled data\n new_idx = strategy.query(tot, np.arange(200), 20)\n\n","repo_name":"xianglinyang/ActiveLearning","sub_path":"query_strategies/coreset.py","file_name":"coreset.py","file_ext":"py","file_size_in_byte":7501,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"325009144","text":"# -*- coding:utf-8 -*-\nimport heapq\n\n\ndef solution(jobs):\n answer = 0\n time = 0\n count = 0\n condition = [False] * len(jobs)\n q = []\n while count != len(jobs):\n for i in range(len(jobs)):\n s, t = jobs[i][0], jobs[i][1]\n if not condition[i] and s <= time:\n heapq.heappush(q, [t, s])\n condition[i] = True\n if q:\n count += 1\n t, s = heapq.heappop(q)\n time += t\n answer += (time - s)\n else:\n time += 1\n\n return answer // len(jobs)\n\nprint(solution([[0, 3], [1, 9], [2, 6]]))","repo_name":"CodingTestStudy/cote_repo","sub_path":"프로그래머스 복습(python)/힙(Heap)/디스크 컨트롤러.py","file_name":"디스크 컨트롤러.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"30"} +{"seq_id":"17679199828","text":"sim = \"sim\"\nnão = \"não\"\n\nprint(\"Bem-vindo(a) a caixa ATM!\")\ninput(\"Numero do cartão: \")\ninput(\"Senha do cartão: \")\nprint(\"Bem-vindo(a) a sua conta! \")\nsaldo = input(\"Quer ver seu saldo? \")\nif saldo == \"sim\":\n print(\"Seu saldo é de 50000\")\n retirada = input(\"Deseja retirar? \")\n if retirada == \"sim\":\n quantidade = int(input(\"Quanto? \"))\n print(\"Seu saldo restante é de\",50000-quantidade)","repo_name":"alicefmc/projeto_100","sub_path":"C_100/Atm.py","file_name":"Atm.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"34720460110","text":"\"\"\"\r\n泡芙加速器 v1.0\r\n\r\n任务:签到 刷视频\r\n\r\ncookie填到变量 pfjsq 中, 多账户&间隔\r\nexport pfjsq=\"\"\r\n\r\ncron: 16 9,14 * * *\r\nconst $ = new Env(\"泡芙加速器\");\r\n\"\"\"\r\n\r\nimport requests\r\nimport time\r\nimport os\r\nimport sys\r\n\r\nclass PuffAccelerator:\r\n def __init__(self, pfjsq):\r\n # 检测账户变量\r\n self.pfjsq = pfjsq\r\n\r\n # 授权密钥\r\n self.headers = {\r\n \"Host\": \"api-admin-js.paofujiasu.com\",\r\n \"Connection\": \"keep-alive\",\r\n \"User-Agent\": \"Mozilla/5.0 (Linux; Android 13; M2007J1SC Build/TKQ1.221114.001; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/107.0.5304.141 Mobile Safari/537.36 XWEB/5075 MMWEBSDK/20230405 MMWEBID/8380 MicroMessenger/8.0.35.2360(0x2800235B) WeChat/arm64 Weixin NetType/WIFI Language/zh_CN ABI/arm64 MiniProgramEnv/android\",\r\n \"content-type\": \"application/json\",\r\n \"token\": self.pfjsq,\r\n \"tokenType\": \"applet\",\r\n \"Sec-Fetch-Dest\": \"empty\",\r\n \"Referer\": \"https://servicewechat.com/wx5bf04507567e9d72/14/page-frame.html\",\r\n \"Accept-Encoding\": \"gzip, deflate, br\"\r\n }\r\n\r\n # 查询用户信息\r\n def get_pfjsq_acceleration_time(self):\r\n url = 'https://api-admin-js.paofujiasu.com/api/v1/user/gw/userinfo'\r\n response = requests.post(url, headers=self.headers)\r\n if response.status_code == 200:\r\n data = response.json()\r\n if data['info'] == '查询成功':\r\n accelerate_time = data['data']['remain_accelerate_time']\r\n result = f'✅加速时间 | {accelerate_time}'\r\n return result\r\n else:\r\n return '⚠️cookie过期'\r\n else:\r\n return '⚠️cookie过期'\r\n \r\n # 查询用户信息\r\n def get_pfjsq_user(self):\r\n url = 'https://api-admin-js.paofujiasu.com/api/v1/user/gw/userinfo'\r\n response = requests.post(url, headers=self.headers)\r\n if response.status_code == 200:\r\n data = response.json()\r\n if data['info'] == '查询成功':\r\n user_account = data['data']['user_account']\r\n return user_account\r\n else:\r\n return '⚠️cookie过期'\r\n else:\r\n return '⚠️cookie过期'\r\n\r\n # 查询用户金币信息\r\n def get_pfjsq_coins(self):\r\n url = 'https://api-admin-js.paofujiasu.com/client/api/v1/virtual_currency/species_quantity'\r\n response = requests.get(url, headers=self.headers)\r\n if response.status_code == 200:\r\n data = response.json()\r\n if data['info'] == '请求成功':\r\n user_coins = data['data']['remaining_quantity']\r\n result = f'✅当前金币 | {user_coins}'\r\n return result\r\n else:\r\n return '⚠️cookie过期'\r\n else:\r\n return '⚠️cookie过期'\r\n\r\n # 用户签到\r\n def get_pfjsq_check(self):\r\n url = 'https://api-admin-js.paofujiasu.com/client/api/v1/virtual_currency/sign_in_for_species'\r\n data = {'res_type': 1}\r\n response = requests.post(url, headers=self.headers, json=data)\r\n if response.status_code == 200:\r\n data = response.json()\r\n if data['info'] == '请求成功':\r\n return '✅签到成功'\r\n else:\r\n return '⚠️cookie过期'\r\n elif response.status_code == 400:\r\n data = response.json()\r\n if data['info'] == '每天最多签到1次哦~':\r\n return '✅今日已签到'\r\n elif data['info'] == '最多拥有5个金币哦~':\r\n return '⚠️金币已上限'\r\n else:\r\n return '⚠️cookie过期'\r\n else:\r\n return '⚠️cookie过期'\r\n\r\n # 刷视频\r\n def get_pfjsq_video(self):\r\n url = 'https://api-admin-js.paofujiasu.com/client/api/v1/virtual_currency/look_ad_for_species'\r\n data = {'res_type': 1}\r\n response = requests.post(url, headers=self.headers, json=data)\r\n if response.status_code == 200:\r\n data = response.json()\r\n if data['info'] == '请求成功':\r\n return '✅刷视频成功'\r\n else:\r\n return '⚠️cookie过期'\r\n elif response.status_code == 400:\r\n data = response.json()\r\n if data['info'] == '每天最多3次看广告激励哦~':\r\n return '✅刷视频已上限'\r\n else:\r\n return '⚠️cookie过期'\r\n else:\r\n return '⚠️cookie过期'\r\n\r\n # 主程序\r\n def run(self):\r\n # 任务列表\r\n tasks = [\r\n (\"每日签到\", self.get_pfjsq_check),\r\n (\"第一次刷视频\", self.get_pfjsq_video),\r\n (\"第二次刷视频\", self.get_pfjsq_video),\r\n (\"第三次刷视频\", self.get_pfjsq_video),\r\n (\"查询时间\", self.get_pfjsq_acceleration_time),\r\n (\"查询金币\", self.get_pfjsq_coins)\r\n ]\r\n # 执行任务\r\n for task_name, task_function in tasks:\r\n if self.get_pfjsq_user() == '⚠️cookie过期':\r\n print(self.get_pfjsq_user())\r\n break\r\n print(f'🔁{self.get_pfjsq_user()} | 正在执行任务 | {task_name}')\r\n result = task_function()\r\n if result == '⚠️cookie过期' or result == '⚠️金币已上限':\r\n print(result)\r\n break\r\n print(result)\r\n time.sleep(5)\r\n print('*****************************************')\r\n\r\nif __name__ == '__main__':\r\n print('🔔泡芙加速器 | 开始')\r\n #检测账户变量\r\n pfjsq = os.environ.get(\"pfjsq\") \r\n if not pfjsq:\r\n sys.exit(\"⚠️未发现有效账号,退出程序!\") \r\n #分割账户\r\n if \"&\" not in pfjsq:\r\n accounts = [pfjsq]\r\n else:\r\n accounts = pfjsq.split(\"&\")\r\n # 遍历账户列表 | 为每个账户创建一个类实例并执行任务\r\n for account in accounts:\r\n paofujiasu_client = PuffAccelerator(account)\r\n paofujiasu_client.run()","repo_name":"Love-Lyu/Cute-wool","sub_path":"pfjsq.py","file_name":"pfjsq.py","file_ext":"py","file_size_in_byte":6248,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"30"} +{"seq_id":"11280919241","text":"import PySimpleGUI as sg\r\nfrom amiibo_functions import *\r\nfrom character_dictionary import CharacterDictionary\r\nimport os\r\nimport requests\r\n\r\n# Menu key variables for GUI\r\n# Transplant Tab\r\ncharacter_key = '_character_'\r\nsave_location_key = '_save_location_'\r\nsubmitted_key = '_transplant-submission_'\r\nbin_name_key = '_bin-name_'\r\nsuccess_text_key = '_transplant-success_'\r\nrandomize_sn_key = '_randomize-sn_'\r\nfolder_location_key = '_selected-folder_'\r\nbrowsed_key = '_browse-submission_'\r\npwd_key = '_current-directory_'\r\n# Serial Swapper Tab\r\nbrowse1_key = '_browse1_'\r\nbrowse2_key = '_browse2_'\r\ndonor_browse_key = '_donor-browse_'\r\nreceiver_browse_key = '_receiver-browse_'\r\ndonor_box_key = '_donor-box_'\r\nreceiver_box_key = '_receiver-box_'\r\nswapper_save_key = '_active-swap-save_'\r\nswap_save_location_key = '_swap-save-location_'\r\ndisplay_dir1_key = '_swap_dir1_display_'\r\ndisplay_dir2_key = '_swap_dir2_display_'\r\nsuccess_swap_key = '_swap-completed_'\r\n# Serial Shuffle\r\nbrowse3_key = '_browse3_'\r\nfolder_location2_key = '_selected-folder2_'\r\npwd2_key = '_current-directory2_'\r\nshuffle_save_key = '_save-shuffle_'\r\nshuffle_save_location_key = '_shuffle-save-location_'\r\nshuffle_name_key = '_shuffle-name_'\r\nsuccess_shuffle_key = '_shuffle-success_'\r\n# Character.xml Modify Tab\r\nadd_character_key = '_add-character_'\r\ncharacter_edit_box_key = '_character-edit-box_'\r\ndelete_character_key = '_delete-character_'\r\nchange_enable_key = '_enable-status_'\r\n\r\n# I know global variables are bad but I'm doing this until I work this into a class\r\nname_formatter = {}\r\n\r\n\r\ndef no_selection_error(error_phrase):\r\n sg.popup(\"ERROR:\", error_phrase)\r\n\r\n\r\ndef found_bins(directory):\r\n found_bins = []\r\n for bin_file in os.listdir(directory):\r\n if bin_file[-3:] == \"bin\":\r\n if bin_file != 'unfixed-info.bin' and bin_file != 'locked-secret.bin':\r\n # To catch powersaves dumb format\r\n if '[' not in bin_file:\r\n found_bins.append(bin_file[:-4])\r\n name_formatter[bin_file[:-4]] = bin_file\r\n else:\r\n found_bins.append(bin_file.split('[')[1].split(']')[0])\r\n name_formatter[bin_file.split('[')[1].split(']')[0]] = bin_file\r\n\r\n return found_bins\r\n\r\n\r\ndef update_all_listboxes(window, char_dict, directory1, directory2):\r\n if directory1 != \"\":\r\n updated_dir1 = found_bins(directory1)\r\n\r\n window[bin_name_key].update(updated_dir1)\r\n window[donor_box_key].update(updated_dir1)\r\n window[shuffle_name_key].update(updated_dir1)\r\n\r\n if directory2 != \"\":\r\n updated_dir2 = found_bins(directory2)\r\n window[receiver_box_key].update(updated_dir2)\r\n\r\n window[character_key].update(char_dict.get_list(insert_random=True))\r\n\r\n window[character_edit_box_key].update(char_dict.print_contents())\r\n\r\n # refresh window to show updates\r\n window.refresh()\r\n\r\n\r\ndef main():\r\n # Initialize variables that get used throughout the program\r\n version_number = \"2.1.0\"\r\n\r\n if not ((os.path.exists(\"Brain_Transplant_Assets/unfixed-info.bin\") and os.path.exists(\"Brain_Transplant_Assets/locked-secret.bin\")) or os.path.exists(\"Brain_Transplant_Assets/key_retail.bin\")):\r\n no_selection_error(\"You are missing the encryption/decryption keys for amiibo.\\nPlease place them in the Brain_Transplant_Assets Folder.\")\r\n os._exit(0)\r\n if not os.path.exists(\"Brain_Transplant_Assets/characters.xml\"):\r\n try:\r\n xml = requests.get(\"https://raw.githubusercontent.com/MiDe-S/amiibo_transplant/master/Brain_Transplant_Assets/characters.xml\")\r\n with open(\"Brain_Transplant_Assets/characters.xml\", \"w+\") as xml_file:\r\n xml_file.write(xml.text)\r\n xml_file.close()\r\n except Exception:\r\n no_selection_error(\"Something is wrong with characters.xml.\\nTry deleting and reinstalling the program or just grab character.xml from the github repository.\")\r\n os._exit(0)\r\n\r\n sg.theme(\"Dark Blue 12\")\r\n\r\n # gets current key_directory\r\n directory1 = os.path.dirname(os.path.realpath(__file__))\r\n\r\n char_dict = CharacterDictionary()\r\n\r\n transplanter = BinManager(char_dict)\r\n\r\n # To format names to fix powersaves formatting\r\n located_bins1 = found_bins(directory1)\r\n\r\n # transplant tab\r\n characters = char_dict.get_list(insert_random=True)\r\n transplant_layout = [[sg.FolderBrowse(target=browsed_key, key=folder_location_key, enable_events=True), sg.Text(\"Currently looking at:\"), sg.Text(directory1, key=pwd_key, auto_size_text=True)],\r\n [sg.Input(key=submitted_key, enable_events=True, visible=False), sg.Input(key=browsed_key, enable_events=True, visible=False)],\r\n [sg.Listbox(located_bins1, sg.LISTBOX_SELECT_MODE_SINGLE, size=(40, 10), key=bin_name_key), sg.VerticalSeparator(),\r\n sg.Listbox(characters, sg.LISTBOX_SELECT_MODE_SINGLE, size=(30, 10), key=character_key)],\r\n [sg.FileSaveAs(\"Transplant\", target=submitted_key, key=save_location_key, file_types=(('Bin Files', '*.bin'),), default_extension=\".bin\"),\r\n sg.Checkbox(\"Randomize Serial Number\", key=randomize_sn_key, enable_events=True, default=True)],\r\n [sg.Text(key=success_text_key, size=(10, 1), visible=False)]]\r\n\r\n # serial swap tab\r\n directory2 = directory1\r\n located_bins2 = found_bins(directory2)\r\n serial_swapper_layout = [[sg.Input(key=browse1_key, enable_events=True, visible=False), sg.Input(key=browse2_key, enable_events=True, visible=False), sg.Input(key=swapper_save_key, enable_events=True, visible=False)],\r\n [sg.FolderBrowse(\"Donor\", key=donor_browse_key, target=browse1_key, enable_events=True),\r\n sg.Text(directory1, key=display_dir1_key, auto_size_text=True),\r\n sg.FolderBrowse(\"Receiver\", key=receiver_browse_key, target=browse2_key, enable_events=True),\r\n sg.Text(directory2, key=display_dir2_key, auto_size_text=True)],\r\n [sg.Listbox(located_bins1, sg.LISTBOX_SELECT_MODE_SINGLE, key=donor_box_key, size=(40, 10)), sg.VerticalSeparator(), sg.Listbox(located_bins2, sg.LISTBOX_SELECT_MODE_SINGLE, key=receiver_box_key, size=(40, 10))],\r\n [sg.FileSaveAs(\"Transplant Figure Metadata\", target=swapper_save_key, key=swap_save_location_key, file_types=(('Bin Files', '*.bin'),), default_extension=\".bin\")],\r\n [sg.Text(key=success_swap_key, size=(10, 1), visible=False)],\r\n [sg.Text(\"Donor is a bin from the figure itself, Receiver is the bin that has the training data you want to put on the figure.\")],]\r\n\r\n # serial shuffle tab\r\n serial_shuffle_layout = [[sg.Input(key=browse3_key, enable_events=True, visible=False), sg.Input(key=shuffle_save_key, enable_events=True, visible=False), sg.FolderBrowse(target=browse3_key, key=folder_location2_key, enable_events=True), sg.Text(\"Currently looking at:\"), sg.Text(directory1, key=pwd2_key, auto_size_text=True)], [sg.Listbox(located_bins1, sg.LISTBOX_SELECT_MODE_SINGLE, size=(70, 11), key=shuffle_name_key)], [sg.Text(key=success_shuffle_key, size=(10, 1), visible=False)], [sg.FileSaveAs(target=shuffle_save_key, key = shuffle_save_location_key)]]\r\n\r\n # character list tab\r\n dict_contents = char_dict.print_contents()\r\n character_list_editor_layout = [[sg.Listbox(dict_contents, sg.LISTBOX_SELECT_MODE_SINGLE, size=(50, 13), key=character_edit_box_key, pad=(5, 5))],\r\n [sg.Button(\"Add\", key=add_character_key), sg.Button(\"Enable/Disable\", key=change_enable_key), sg.Button(\"Delete\", key=delete_character_key)]]\r\n\r\n # about tab\r\n about_layout = [[sg.Text(\"Version Number {}\".format(version_number))],\r\n [sg.Text(\"If you encounter issues raise an issue on github or dm MiDe#9934 on discord\")],\r\n [sg.Text(\"The transplant tab changes the character of the bin.\")],\r\n [sg.Text(\"The Figure Metadata transplant tab copies the SN of the donor on to the receiver.\")],\r\n [sg.Text(\"*In order to put a bin onto a figure, you have to perform the metadata transplant.\")],\r\n [sg.Text(\"*then put the transplanted bin on a powertag, save it in powersaves, THEN it will appear in the restore tab.\")],\r\n [sg.Text(\"*POWERSAVES will say the restoration failed, but it actually didn't.\")],\r\n [sg.Text(\"The Characters.xml Editor tab lets you add new amiibo ID's to the Character box.\")],\r\n [sg.Text(\"Shoutouts to the amiibo homies at USAC: https://discord.gg/2SEqk9p\", tooltip=\"I'm too lazy to make this an actual link for now\")]]\r\n\r\n tabs = [[sg.TabGroup([[sg.Tab(\"Transplant\", transplant_layout),\r\n sg.Tab(\"Figure Metadata Transplant\", serial_swapper_layout, element_justification='center'),\r\n sg.Tab('Serial Number Shuffle', serial_shuffle_layout, element_justification='center'),\r\n sg.Tab(\"Characters.xml Editor\", character_list_editor_layout, element_justification='center'),\r\n sg.Tab(\"About\", about_layout)]])]]\r\n\r\n window = sg.Window('MiDe\\'s Brain Transplant Service'.format(version_number), tabs, element_justification='center')\r\n\r\n while True:\r\n event, values = window.read()\r\n print(event, values)\r\n # Transplant Tab\r\n if event == submitted_key:\r\n bins_to_transplant = values[bin_name_key]\r\n selected_characters = values[character_key]\r\n # If an option wasn't selected\r\n if len(selected_characters) == 0 or len(bins_to_transplant) == 0:\r\n no_selection_error(\"Please a bin and a character to transplant.\")\r\n # If only 1 character and 1 bin were chosen do the single transplant\r\n elif len(selected_characters) == 1 and len(bins_to_transplant) == 1:\r\n # If save as menu is closed or cancelled do nothing\r\n if values[save_location_key] != '':\r\n chosen_character = transplanter.transplant(r\"\\\\\".join([directory1, name_formatter[bins_to_transplant[0]]]), selected_characters[0], values[save_location_key], values[randomize_sn_key])\r\n if chosen_character is None:\r\n no_selection_error(\"Not an amiibo bin\")\r\n else:\r\n # Prints message showing a successful transplant\r\n success_message = \"{} bin was saved at {}\".format(chosen_character, values[save_location_key])\r\n window[success_text_key].update(success_message, visible=True)\r\n window[success_text_key].set_size((len(success_message), 1))\r\n update_all_listboxes(window, char_dict, directory1, directory2)\r\n elif event == browsed_key:\r\n directory1 = values[folder_location_key]\r\n if len(directory1) != 0:\r\n # changes bin list when new folder is picked\r\n update_all_listboxes(window, char_dict, directory1, directory2)\r\n window[pwd_key].update(directory1)\r\n window.refresh()\r\n # Serial Swap Tab\r\n elif event == browse1_key:\r\n directory1 = values[donor_browse_key]\r\n if len(directory1) != 0:\r\n # changes bin list when new folder is picked\r\n update_all_listboxes(window, char_dict, directory1, directory2)\r\n window[display_dir1_key].update(directory1)\r\n window.refresh()\r\n elif event == browse2_key:\r\n directory2 = values[receiver_browse_key]\r\n if len(directory2) != 0:\r\n # changes bin list when new folder is picked\r\n update_all_listboxes(window, char_dict, directory1, directory2)\r\n window[display_dir2_key].update(directory2)\r\n window.refresh()\r\n\r\n elif event == browse3_key:\r\n directory1 = values[folder_location2_key]\r\n if len(directory1) != 0:\r\n # changes bin list when new folder is picked\r\n update_all_listboxes(window, char_dict, directory1, directory2)\r\n window[pwd2_key].update(directory1)\r\n window.refresh()\r\n\r\n elif event == shuffle_save_key:\r\n if values[shuffle_save_location_key] != '':\r\n to_shuffle = r\"\\\\\".join([directory1, name_formatter[values[shuffle_name_key][0]]])\r\n name = values[shuffle_name_key][0]\r\n if len(to_shuffle) == 0:\r\n no_selection_error(\"Please select bin to shuffle the serial number.\") \r\n else:\r\n transplanter.randomize_sn(bin_location=to_shuffle, save_to_location = values[shuffle_save_location_key])\r\n success_message = \"{} received a new SN and was successfully saved at {}\".format(name, values[shuffle_save_location_key])\r\n window[success_shuffle_key].update(success_message, visible=True)\r\n window[success_shuffle_key].set_size((len(success_message), 1))\r\n update_all_listboxes(window, char_dict, directory1, directory2)\r\n\r\n elif event == swapper_save_key:\r\n # If save as menu is closed or cancelled do nothing\r\n if values[swap_save_location_key] != '':\r\n donor_bin = r\"\\\\\".join([directory1, name_formatter[values[donor_box_key][0]]])\r\n receiver_bin = r\"\\\\\".join([directory2, name_formatter[values[receiver_box_key][0]]])\r\n if len(donor_bin) == 0 or len(receiver_bin) == 0:\r\n no_selection_error(\"Please select a donor and a receiver to transplant a serial number.\")\r\n else:\r\n success_check = transplanter.serial_swapper(donor_bin, receiver_bin, values[swap_save_location_key])\r\n if success_check is None:\r\n no_selection_error(\"Not an amiibo bin\")\r\n else:\r\n success_message = \"{} received a new SN and was successfully saved at {}\".format(receiver_bin, values[swap_save_location_key])\r\n window[success_swap_key].update(success_message, visible=True)\r\n window[success_swap_key].set_size((len(success_message), 1))\r\n update_all_listboxes(window, char_dict, directory1, directory2)\r\n\r\n # Character Dictionary Tab\r\n # Add Character\r\n elif event == add_character_key:\r\n # Addition menu\r\n add_name_key = \"_name_\"\r\n add_ID_key = \"_id_\"\r\n add_enable_key = \"_enable_\"\r\n add_cancel_key = \"_cancel_\"\r\n add_confirm_key = \"_confirm_\"\r\n\r\n add_layout = [[sg.InputText(key=add_name_key)],\r\n [sg.InputText(key=add_ID_key)],\r\n [sg.Checkbox(\"Enabled\", key=add_enable_key, default=True)],\r\n [sg.Submit(\"Confirm\", key=add_confirm_key), sg.Cancel(key=add_cancel_key)]]\r\n add_window = sg.Window(\"Add a character\", add_layout)\r\n while True:\r\n event, values = add_window.read()\r\n print(event, values)\r\n # Add character confirmation\r\n if event == add_confirm_key:\r\n if len(values[add_ID_key]) == 16:\r\n char_dict.add_character(values[add_name_key], values[add_ID_key], values[add_enable_key])\r\n char_dict.save_XML()\r\n update_all_listboxes(window, char_dict, directory1, directory2)\r\n break\r\n else:\r\n sg.Popup(\"Invalid Length!\", \"Amiibo character IDs are always 16 characters long\", \"Try Again!\")\r\n\r\n # Window Closed\r\n elif event == sg.WIN_CLOSED or event == add_cancel_key or event == \"Quit\":\r\n break\r\n add_window.close()\r\n # Remove / enable or disable character\r\n elif event == delete_character_key or event == change_enable_key:\r\n if len(values[character_edit_box_key]) == 0:\r\n no_selection_error(\"Please select a character to delete.\")\r\n else:\r\n # This long expression grabs the character name\r\n character = values[character_edit_box_key][0].split(',')[0]\r\n if event == delete_character_key:\r\n char_dict.remove_character(character)\r\n else:\r\n char_dict.enable(character)\r\n char_dict.save_XML()\r\n update_all_listboxes(window, char_dict, directory1, directory2)\r\n\r\n # Window Closed\r\n elif event == sg.WIN_CLOSED:\r\n break\r\n\r\n window.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"MiDe-S/amiibo_transplant","sub_path":"brain_transplant.py","file_name":"brain_transplant.py","file_ext":"py","file_size_in_byte":16980,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"28336686616","text":"def heapify(arr, n, i):\n largest = i # 初始化最大元素的索引为父节点 i\n left = 2 * i + 1 # 左子节点索引\n right = 2 * i + 2 # 右子节点索引\n\n # 如果左子节点存在且大于根节点,则更新最大元素的索引\n if left < n and arr[left] > arr[largest]:\n largest = left\n\n # 如果右子节点存在且大于根节点,则更新最大元素的索引\n if right < n and arr[right] > arr[largest]:\n largest = right\n\n # 如果最大元素的索引不是父节点 i,则交换父节点与最大元素\n \"\"\"\n 为什么这里能够保证当前子树的最大值节点保持在父节点就不用再往下了?\n 因为我们在建立最大堆的时候,是从最后一个非叶子节点出发开始创建的,当前子树的后序子树们都已经保��了最大堆\n 如果当前子树的左右节点都比当前的根节点小,那么根节点一定大于所有的后序子节点\n \"\"\"\n if largest != i:\n arr[i], arr[largest] = arr[largest], arr[i]\n\n # 对交换后的子树递归进行堆化操作\n heapify(arr, n, largest)\n\ndef nums_to_maxheap(nums):\n for i in range(len(nums)):\n # 将给定数组逐个和其父节点相比,如果比父节点大,则持续向上走\n while nums[i] > nums[int((i-1/2))]:\n tmp = nums[i]\n nums[i] = nums[int((i-1)/2)]\n nums[int((i-1)/2)] = tmp\n i = int((i-1)/2)\n\ndef heap_sort(arr):\n n = len(arr)\n # 构建最大堆,从最后一个非叶节点开始进行堆化操作\n for i in range(n // 2 - 1, -1, -1):\n heapify(arr, n, i)\n\n # nums_to_maxheap(arr)\n # 逐个将堆顶元素(最大值)与当前未排序部分的末尾元素交换,并对剩余元素进行堆化操作\n for i in range(n - 1, 0, -1):\n arr[i], arr[0] = arr[0], arr[i] # 交换堆顶元素与末尾元素\n heapify(arr, i, 0) # 对剩余元素进行堆化操作\n \n return arr\n\n\n# 测试示例\narr = [12, 11, 13, 5, 6, 7]\nsorted_arr = heap_sort(arr)\nprint(sorted_arr)\n\n\n","repo_name":"fzx401/py-study","sub_path":"data_structure_and_algorithm/Sort/heap_sort.py","file_name":"heap_sort.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"30639146308","text":"import json\nfrom json import JSONEncoder\nimport dateutil.parser\nimport babel\nimport flask\nimport datetime\nfrom datetime import datetime, date, timedelta\nfrom flask import Flask, render_template, jsonify, request, Response, flash, redirect, url_for, send_from_directory, session\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_migrate import Migrate, MigrateCommand\nfrom sqlalchemy import desc\n\n\n\ndb = SQLAlchemy()\nmigrate = Migrate()\n\nclass Exercice(db.Model): \n __tablename__ = 'exercice'\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String())\n difficulty = db.Column(db.Integer)\n muscles = db.Column(db.String())\n requirements = db.Column(db.String())\n likes = db.Column(db.Integer, default=0)\n video_path = db.Column(db.String())\n\n def delete(self):\n db.session.delete(self)\n db.session.commit()\n\n def format(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'difficulty': self.difficulty,\n 'muscles': self.muscles,\n 'requirements': self.requirements,\n 'likes': self.likes,\n 'video_path': self.video_path\n }\n\n\n\nclass Instructor(db.Model): \n __tablename__ = 'instructor'\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String())\n age = db.Column(db.Integer())\n profile_pic_path = db.Column(db.String())\n\n\n def delete(self):\n db.session.delete(self)\n db.session.commit()\n\n def format(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'age': self.age,\n 'profile_pic_path': self.profile_pic_path\n }","repo_name":"LeopoldKink/capstone","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"19855896717","text":"from django import forms\nfrom .cash_models import CashReceipt, CashReceiptItems\nfrom sales.creditsales import SalesInvoice\n\n\nclass CashReceiptForm(forms.ModelForm):\n class Meta:\n model = CashReceipt\n fields = ['customer', 'description']\n widgets = {\n 'customer': forms.Select(attrs={\n 'id': 'customer'\n }),\n 'description': forms.TextInput\n }\n\n\nclass ReceiptItemsForm(forms.ModelForm):\n class Meta:\n model = CashReceiptItems\n fields = [\n 'invoice', 'description', 'amount'\n ]\n widgets = {\n 'invoice': forms.Select(attrs={\n 'id': 'related-invoice'\n }),\n 'description': forms.TextInput(attrs={\n 'id': 'receipt-description'\n }),\n 'amount': forms.NumberInput(attrs={\n 'id': 'receipt-amount'\n })\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['invoice'].queryset = SalesInvoice.objects.none()\n","repo_name":"njaugodfrey/mauzo_store","sub_path":"accounts/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"12667668043","text":"import json\nimport ast\nimport collections \n\ndef getPath(filename):\n '''Takes in a student file of a list of dictionaries that specify their actions. \n Returns a list of tuples, (event_type,event), that maps the student's path'''\n oneFile=json.load(open(filename))\n paths=[]\n for i in oneFile:\n if i['event_type']=='pause_video' or i['event_type']=='play_video':\n d = json.loads(ast.literal_eval(i['event']))\n videoDict=dict((k,v) for (k,v) in d.items())\n paths.append((i['event_type'],videoDict['code']))\n elif 'goto_position' in i['event_type']:\n d2 = json.loads(ast.literal_eval(i['event']))\n videoDict2=dict((k,v) for (k,v) in d2.items())\n paths.append((i['event_type'],'position: ' + str(videoDict2['POST']['position']))) # position refers to the tab the user clicked on\n else:\n paths.append((i['event_type'],'N/A'))\n return paths\n \ndef count(listOfTuples):\n '''Returns a dictionary where the keys are event types and\n values are the number of times that event has occurred for one student'''\n return collections.Counter(listOfTuples)","repo_name":"dav-lab/Video-Data","sub_path":"Old_Code/paths.py","file_name":"paths.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"73578593364","text":"import numpy as np\nimport scipy.io\nimport imageio\nimport h5py\nimport os\nfrom torch.utils.data import Dataset\nimport matplotlib\nimport matplotlib.colors\nimport skimage.transform\nimport random\nimport torchvision\nimport torch\nfrom utils.utils import image_h, image_w\n\nimg_dir_train_file = './data/img_dir_train.txt'\ndepth_dir_train_file = './data/depth_dir_train.txt'\nlabel_dir_train_file = './data/label_train.txt'\nimg_dir_train_file = './data/img_dir_train.txt'\ndepth_dir_train_file = './data/depth_dir_train.txt'\nlabel_dir_train_file = './data/label_train.txt'\nimg_dir_test_file = './data/img_dir_test.txt'\ndepth_dir_test_file = './data/depth_dir_test.txt'\nlabel_dir_test_file = './data/label_test.txt'\ndir_files = \"./data/{field}_dir_{split}.txt\"\n\n\nclass SUNRGBD(Dataset):\n def __init__(\n self, transform=None, phase=\"train\", data_dir=None,\n train_subset_file=None, val_subset_file=None, test_subset_file=None, debug_mode=False,\n ):\n \"\"\"\n phase (str): train, val, test\n \"\"\"\n\n self.phase = phase\n self.transform = transform\n self.all_dirs = {\n \"train\": {\"img\": [], \"depth\": [], \"label\": []},\n \"val\": {\"img\": [], \"depth\": [], \"label\": []},\n \"test\": {\"img\": [], \"depth\": [], \"label\": []},\n }\n\n try:\n for dir_split in self.all_dirs:\n for field in self.all_dirs[dir_split]:\n with open(dir_files.replace(\"{field}\", field).replace(\"{split}\", dir_split), 'r') as f:\n for line in f:\n self.all_dirs[dir_split][field].append(line.strip().replace(\"SUNRGBD_v1//\", \"SUNRGBD_v1/\"))\n except:\n if data_dir is None:\n data_dir = '/path/to/SUNRGB-D'\n SUNRGBDMeta_dir = os.path.join(data_dir, 'SUNRGBDtoolbox/Metadata/SUNRGBDMeta.mat')\n allsplit_dir = os.path.join(data_dir, 'SUNRGBDtoolbox/traintestSUNRGBD/allsplit.mat')\n SUNRGBD2Dseg_dir = os.path.join(data_dir, 'SUNRGBDtoolbox/Metadata/SUNRGBD2Dseg.mat')\n self.SUNRGBD2Dseg = h5py.File(SUNRGBD2Dseg_dir, mode='r', libver='latest')\n\n SUNRGBDMeta = scipy.io.loadmat(SUNRGBDMeta_dir, squeeze_me=True,\n struct_as_record=False)['SUNRGBDMeta']\n split = scipy.io.loadmat(allsplit_dir, squeeze_me=True, struct_as_record=False)\n split_train = split['trainvalsplit'].train\n split_val = split['trainvalsplit'].val\n\n seglabel = self.SUNRGBD2Dseg['SUNRGBD2Dseg']['seglabel']\n\n for i, meta in enumerate(SUNRGBDMeta):\n meta_dir = '/'.join(meta.rgbpath.split('/')[:-2])\n real_dir = meta_dir.replace('/n/fs/sun3d/data', data_dir)\n depth_bfx_path = os.path.join(real_dir, 'depth_bfx/' + meta.depthname)\n rgb_path = os.path.join(real_dir, 'image/' + meta.rgbname)\n\n label_path = os.path.join(real_dir, 'label/label.npy')\n\n if not os.path.exists(label_path):\n os.makedirs(os.path.join(real_dir, 'label'), exist_ok=True)\n label = np.array(self.SUNRGBD2Dseg[seglabel[i][0]]).transpose(1, 0)\n np.save(label_path, label)\n\n if meta_dir in split_train:\n dir_split = \"train\"\n elif meta_dir in split_val:\n dir_split = \"val\"\n else:\n dir_split = \"test\"\n self.all_dirs[dir_split][\"img\"] = np.append(self.all_dirs[dir_split][\"img\"], rgb_path)\n self.all_dirs[dir_split][\"depth\"] = np.append(self.all_dirs[dir_split][\"depth\"], depth_bfx_path)\n self.all_dirs[dir_split][\"label\"] = np.append(self.all_dirs[dir_split][\"label\"], label_path)\n\n local_file_dir = '/'.join(dir_files.split('/')[:-1])\n if not os.path.exists(local_file_dir):\n os.mkdir(local_file_dir)\n for dir_split in self.all_dirs:\n for field in self.all_dirs[dir_split]:\n with open(dir_files.replace(\"{field}\", field).replace(\"{split}\", dir_split), 'w') as f:\n f.write('\\n'.join(self.all_dirs[dir_split][field]))\n\n self.subsets = {}\n if train_subset_file is not None:\n self.subsets[\"train\"] = set(open(train_subset_file).read().splitlines())\n if val_subset_file is not None:\n self.subsets[\"val\"] = set(open(val_subset_file).read().splitlines())\n if test_subset_file is not None:\n self.subsets[\"test\"] = set(open(test_subset_file).read().splitlines())\n for split in self.subsets:\n # keep items based on whether or not they're in the split...\n subset_img_dir = []\n subset_depth_dir = []\n subset_label_dir = []\n for idx, img_dir in enumerate(self.all_dirs[split][\"img\"]):\n if img_dir in self.subsets[split]:\n subset_img_dir.append(img_dir)\n subset_depth_dir.append(self.all_dirs[split][\"depth\"][idx])\n subset_label_dir.append(self.all_dirs[split][\"label\"][idx])\n self.all_dirs[split][\"img\"] = subset_img_dir\n self.all_dirs[split][\"depth\"] = subset_depth_dir\n self.all_dirs[split][\"label\"] = subset_label_dir\n\n if debug_mode:\n for split in self.all_dirs:\n for field in self.all_dirs[split]:\n self.all_dirs[split][field] = self.all_dirs[split][field][:100]\n\n def __len__(self):\n return len(self.all_dirs[self.phase][\"img\"])\n\n def __getitem__(self, idx):\n img_dir = self.all_dirs[self.phase][\"img\"]\n depth_dir = self.all_dirs[self.phase][\"depth\"]\n label_dir = self.all_dirs[self.phase][\"label\"]\n\n label = np.load(label_dir[idx])\n depth = imageio.imread(depth_dir[idx])\n image = imageio.imread(img_dir[idx])\n\n sample = {'image': image, 'depth': depth, 'label': label, 'id': img_dir[idx]}\n\n if self.transform:\n sample = self.transform(sample)\n\n return sample\n\n\nclass RandomHSV(object):\n \"\"\"\n Args:\n h_range (float tuple): random ratio of the hue channel,\n new_h range from h_range[0]*old_h to h_range[1]*old_h.\n s_range (float tuple): random ratio of the saturation channel,\n new_s range from s_range[0]*old_s to s_range[1]*old_s.\n v_range (int tuple): random bias of the value channel,\n new_v range from old_v-v_range to old_v+v_range.\n Notice:\n h range: 0-1\n s range: 0-1\n v range: 0-255\n \"\"\"\n\n def __init__(self, h_range, s_range, v_range):\n assert isinstance(h_range, (list, tuple)) and \\\n isinstance(s_range, (list, tuple)) and \\\n isinstance(v_range, (list, tuple))\n self.h_range = h_range\n self.s_range = s_range\n self.v_range = v_range\n\n def __call__(self, sample):\n img = sample['image']\n img_hsv = matplotlib.colors.rgb_to_hsv(img)\n img_h, img_s, img_v = img_hsv[:, :, 0], img_hsv[:, :, 1], img_hsv[:, :, 2]\n h_random = np.random.uniform(min(self.h_range), max(self.h_range))\n s_random = np.random.uniform(min(self.s_range), max(self.s_range))\n v_random = np.random.uniform(-min(self.v_range), max(self.v_range))\n img_h = np.clip(img_h * h_random, 0, 1)\n img_s = np.clip(img_s * s_random, 0, 1)\n img_v = np.clip(img_v + v_random, 0, 255)\n img_hsv = np.stack([img_h, img_s, img_v], axis=2)\n img_new = matplotlib.colors.hsv_to_rgb(img_hsv)\n\n return {'image': img_new, 'depth': sample['depth'], 'label': sample['label']}\n\n\nclass scaleNorm(object):\n def __call__(self, sample):\n image, depth, label = sample['image'], sample['depth'], sample['label']\n\n # Bi-linear\n image = skimage.transform.resize(image, (image_h, image_w), order=1,\n mode='reflect', preserve_range=True)\n # Nearest-neighbor\n depth = skimage.transform.resize(depth, (image_h, image_w), order=0,\n mode='reflect', preserve_range=True)\n label = skimage.transform.resize(label, (image_h, image_w), order=0,\n mode='reflect', preserve_range=True)\n\n return {'image': image, 'depth': depth, 'label': label}\n\n\nclass RandomScale(object):\n def __init__(self, scale):\n self.scale_low = min(scale)\n self.scale_high = max(scale)\n\n def __call__(self, sample):\n image, depth, label = sample['image'], sample['depth'], sample['label']\n\n target_scale = random.uniform(self.scale_low, self.scale_high)\n # (H, W, C)\n target_height = int(round(target_scale * image.shape[0]))\n target_width = int(round(target_scale * image.shape[1]))\n # Bi-linear\n image = skimage.transform.resize(image, (target_height, target_width),\n order=1, mode='reflect', preserve_range=True)\n # Nearest-neighbor\n depth = skimage.transform.resize(depth, (target_height, target_width),\n order=0, mode='reflect', preserve_range=True)\n label = skimage.transform.resize(label, (target_height, target_width),\n order=0, mode='reflect', preserve_range=True)\n\n return {'image': image, 'depth': depth, 'label': label}\n\n\nclass RandomCrop(object):\n def __init__(self, th, tw):\n self.th = th\n self.tw = tw\n\n def __call__(self, sample):\n image, depth, label = sample['image'], sample['depth'], sample['label']\n h = image.shape[0]\n w = image.shape[1]\n i = random.randint(0, h - self.th)\n j = random.randint(0, w - self.tw)\n\n return {'image': image[i:i + image_h, j:j + image_w, :],\n 'depth': depth[i:i + image_h, j:j + image_w],\n 'label': label[i:i + image_h, j:j + image_w]}\n\n\nclass RandomFlip(object):\n def __call__(self, sample):\n image, depth, label = sample['image'], sample['depth'], sample['label']\n if random.random() > 0.5:\n image = np.fliplr(image).copy()\n depth = np.fliplr(depth).copy()\n label = np.fliplr(label).copy()\n\n return {'image': image, 'depth': depth, 'label': label}\n\n\n# Transforms on torch.*Tensor\nclass Normalize(object):\n def __call__(self, sample):\n image, depth = sample['image'], sample['depth']\n image = image / 255\n image = torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])(image)\n depth = torchvision.transforms.Normalize(mean=[19050],\n std=[9650])(depth)\n sample['image'] = image\n sample['depth'] = depth\n\n return sample\n\n\nclass ToTensor(object):\n \"\"\"Convert ndarrays in sample to Tensors.\"\"\"\n\n def __call__(self, sample):\n image, depth, label = sample['image'], sample['depth'], sample['label']\n\n # Generate different label scales\n label2 = skimage.transform.resize(label, (label.shape[0] // 2, label.shape[1] // 2),\n order=0, mode='reflect', preserve_range=True)\n label3 = skimage.transform.resize(label, (label.shape[0] // 4, label.shape[1] // 4),\n order=0, mode='reflect', preserve_range=True)\n label4 = skimage.transform.resize(label, (label.shape[0] // 8, label.shape[1] // 8),\n order=0, mode='reflect', preserve_range=True)\n label5 = skimage.transform.resize(label, (label.shape[0] // 16, label.shape[1] // 16),\n order=0, mode='reflect', preserve_range=True)\n\n # swap color axis because\n # numpy image: H x W x C\n # torch image: C X H X W\n image = image.transpose((2, 0, 1))\n depth = np.expand_dims(depth, 0).astype(np.float)\n return {'image': torch.from_numpy(image).float(),\n 'depth': torch.from_numpy(depth).float(),\n 'label': torch.from_numpy(label).float(),\n 'label2': torch.from_numpy(label2).float(),\n 'label3': torch.from_numpy(label3).float(),\n 'label4': torch.from_numpy(label4).float(),\n 'label5': torch.from_numpy(label5).float()}\n","repo_name":"belindal/LaMPP","sub_path":"image_segmentation/RedNet_data.py","file_name":"RedNet_data.py","file_ext":"py","file_size_in_byte":12652,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"30"} +{"seq_id":"30961389643","text":"# https://www.acmicpc.net/problem/11053\n\nn = int(input())\narr = list(map(int, input().split()))\n\nans = [0] * n\n\nfor i in range(n):\n for j in range(n):\n if arr[j] < arr[i]:\n ans[i] = max(ans[i], ans[j] + 1)\nprint(max(ans))\n","repo_name":"hi6724/algorithm_study","sub_path":"5.25/LIS.py","file_name":"LIS.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"29316621424","text":"import sys\nfrom pprint import pprint\nfrom itertools import count\nimport time\n\nif __name__ == \"__main__\":\n filename = sys.argv[1]\n\n contents = open(filename).readlines()\n\n timestamp = int(contents[0])\n bus_ids = [int(bus_id) for bus_id in contents[1].split(\",\") if bus_id != \"x\"]\n\n offsets = [\n idx for idx, bus_id in enumerate(contents[1].split(\",\")) if bus_id != \"x\"\n ]\n\n first = bus_ids[0]\n step = count(1)\n offsets[0] = first\n\n while True:\n # We only need to check multiples of our first bus_id\n i = first * next(step)\n if all(\n bus_id - i % bus_id == offset for bus_id, offset in zip(bus_ids, offsets)\n ):\n print(\"timestamp={}\", i)\n break\n\n # Part 1\n # departs = [id for id in ids if round % id == 0]\n # if round >= timestamp and len(departs) > 0:\n # waits = round - timestamp\n # print(f\"{departs=} {round=} waits={waits} {departs[0]*waits}\")\n # break\n","repo_name":"cemathey/aoc_2020","sub_path":"day13/day13 copy.py","file_name":"day13 copy.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"21001971215","text":"# -*- coding: utf-8 -*-\r\n# @Time : 2019/5/24 14:58\r\n# @Author : YuYi\r\n\r\nimport tensorflow as tf\r\nfrom tensorflow.contrib import slim\r\nimport numpy as np\r\nfrom utils.layers_utils import conv2d\r\n\r\n\r\nclass Model:\r\n def __init__(self, num_class=20, weight_decay=1e-4):\r\n self.num_class = num_class\r\n self.weight_decay = weight_decay\r\n\r\n def forward(self, inputs, is_training=False):\r\n # set batch norm params\r\n batch_norm_params = {\r\n 'decay': 0.999,\r\n 'epsilon': 1e-05,\r\n 'scale': True,\r\n 'is_training': is_training,\r\n 'fused': None, # Use fused batch norm if possible.\r\n }\r\n with slim.arg_scope([slim.conv2d],\r\n normalizer_fn=slim.batch_norm,\r\n normalizer_params=batch_norm_params,\r\n biases_initializer=None,\r\n activation_fn=lambda x: tf.nn.leaky_relu(x, alpha=0.1),\r\n weights_regularizer=slim.l2_regularizer(self.weight_decay)):\r\n with tf.variable_scope('darknet_body'):\r\n net = darknet53_body(inputs)\r\n net = slim.conv2d(net, self.num_class, 3,\r\n stride=1, normalizer_fn=None,\r\n activation_fn=None,\r\n biases_initializer=tf.zeros_initializer())\r\n with tf.name_scope('global_avg_pool'):\r\n logits = tf.reduce_mean(net, axis=[1, 2])\r\n return logits\r\n\r\n def probability(self, logits):\r\n return tf.nn.sigmoid(logits)\r\n\r\n def prediction(self, probability, threshold):\r\n '''\r\n if probability greater threshold than it a object\r\n :param probability: a np.array which shape is [batch_size, 20]\r\n :param threshold: np.float32\r\n :return:\r\n prediction_list:[batch_size], prediction_list[i] is a set\r\n '''\r\n prediction_list = []\r\n for i in range(probability.shape[0]):\r\n prediction_list.append(set(np.where(probability[i] > threshold)[0]))\r\n return prediction_list\r\n\r\n def correct_sample(self, prediction_list, label_list):\r\n '''\r\n :param prediction_list: [batch_size], prediction_list[i] is a set\r\n :param label_list: a list which shape is [batch_size],label_list[i] is a set\r\n :return:\r\n '''\r\n assert len(prediction_list) == len(label_list)\r\n number_correct = 0\r\n correct_sample = []\r\n for i in range(len(prediction_list)):\r\n if prediction_list[i].issubset(label_list[i]):\r\n number_correct += 1\r\n correct_sample.append(1)\r\n else:\r\n correct_sample.append(0)\r\n return number_correct, correct_sample\r\n\r\n def compute_loss(self, logits, labels):\r\n with tf.name_scope('loss'):\r\n # loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=labels,\r\n # logits=logits,\r\n # reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE)\r\n loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=labels,\r\n logits=logits)\r\n return loss\r\n\r\n\r\ndef darknet53_body(inputs):\r\n def res_block(inputs, filters):\r\n shortcut = inputs\r\n net = conv2d(inputs, filters * 1, 1)\r\n net = conv2d(net, filters * 2, 3)\r\n\r\n net = net + shortcut\r\n\r\n return net\r\n\r\n # first two conv2d layers\r\n net = conv2d(inputs, 32, 3, strides=1)\r\n net = conv2d(net, 64, 3, strides=2)\r\n\r\n # res_block * 1\r\n net = res_block(net, 32)\r\n\r\n net = conv2d(net, 128, 3, strides=2)\r\n\r\n # res_block * 2\r\n for i in range(2):\r\n net = res_block(net, 64)\r\n\r\n net = conv2d(net, 256, 3, strides=2)\r\n\r\n # res_block * 8\r\n for i in range(2):\r\n net = res_block(net, 128)\r\n\r\n # route_1 = net\r\n net = conv2d(net, 512, 3, strides=2)\r\n\r\n # res_block * 8\r\n for i in range(2):\r\n net = res_block(net, 256)\r\n\r\n # route_2 = net\r\n net = conv2d(net, 1024, 3, strides=2)\r\n\r\n # res_block * 4\r\n for i in range(2):\r\n net = res_block(net, 512)\r\n out = net\r\n\r\n return out\r\n\r\n","repo_name":"yujack333/extra_work","sub_path":"Multi_label_Image_Classification/code/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"3023380423","text":"import locale\nimport os\nimport platform\nimport subprocess\nimport uuid\nfrom contextlib import contextmanager\n\nimport pytest\n\nfrom dvc.path_info import URLInfo\nfrom tests import PY39, PYARROW_NOT_AVAILABLE\n\nfrom .base import Base\n\n\nclass HDFS(Base, URLInfo): # pylint: disable=abstract-method\n @contextmanager\n def _hdfs(self):\n import pyarrow.fs\n\n conn = pyarrow.fs.HadoopFileSystem(self.host, self.port)\n yield conn\n\n def is_file(self):\n with self._hdfs() as _hdfs:\n import pyarrow.fs\n\n file_info = _hdfs.get_file_info(self.path)\n return file_info.type == pyarrow.fs.FileType.File\n\n def is_dir(self):\n with self._hdfs() as _hdfs:\n import pyarrow.fs\n\n file_info = _hdfs.get_file_info(self.path)\n return file_info.type == pyarrow.fs.FileType.Directory\n\n def exists(self):\n with self._hdfs() as _hdfs:\n import pyarrow.fs\n\n file_info = _hdfs.get_file_info(self.path)\n return file_info.type != pyarrow.fs.FileType.NotFound\n\n def mkdir(self, mode=0o777, parents=False, exist_ok=False):\n assert mode == 0o777\n assert parents\n assert not exist_ok\n\n with self._hdfs() as _hdfs:\n # NOTE: fs.create_dir creates parents by default\n _hdfs.create_dir(self.path)\n\n def write_bytes(self, contents):\n with self._hdfs() as _hdfs:\n with _hdfs.open_output_stream(self.path) as fobj:\n fobj.write(contents)\n\n def write_text(self, contents, encoding=None, errors=None):\n if not encoding:\n encoding = locale.getpreferredencoding(False)\n assert errors is None\n self.write_bytes(contents.encode(encoding))\n\n def read_bytes(self):\n with self._hdfs() as _hdfs:\n with _hdfs.open_input_stream(self.path) as fobj:\n return fobj.read()\n\n def read_text(self, encoding=None, errors=None):\n if not encoding:\n encoding = locale.getpreferredencoding(False)\n assert errors is None\n return self.read_bytes().decode(encoding)\n\n\n@pytest.fixture(scope=\"session\")\ndef hadoop(test_config):\n test_config.requires(\"hdfs\")\n\n if platform.system() != \"Linux\":\n pytest.skip(\"only supported on Linux\")\n\n if PY39:\n pytest.skip(PYARROW_NOT_AVAILABLE)\n\n import wget\n from appdirs import user_cache_dir\n\n hadoop_name = \"hadoop-2.7.2.tar.gz\"\n java_name = \"openjdk-7u75-b13-linux-x64-18_dec_2014.tar.gz\"\n\n base_url = \"https://s3-us-east-2.amazonaws.com/dvc-public/dvc-test/\"\n hadoop_url = base_url + hadoop_name\n java_url = base_url + java_name\n\n (cache_dir,) = (user_cache_dir(\"dvc-test\", \"iterative\"),)\n dname = os.path.join(cache_dir, \"hdfs\")\n\n java_tar = os.path.join(dname, java_name)\n hadoop_tar = os.path.join(dname, hadoop_name)\n\n java_home = os.path.join(dname, \"java-se-7u75-ri\")\n hadoop_home = os.path.join(dname, \"hadoop-2.7.2\")\n\n def _get(url, tar, target):\n if os.path.isdir(target):\n return\n\n if not os.path.exists(tar):\n wget.download(url, out=tar)\n assert os.system(f\"tar -xvf {tar} -C {dname}\") == 0\n assert os.path.isdir(target)\n\n os.makedirs(dname, exist_ok=True)\n _get(hadoop_url, hadoop_tar, hadoop_home)\n _get(java_url, java_tar, java_home)\n\n os.environ[\"JAVA_HOME\"] = java_home\n os.environ[\"HADOOP_HOME\"] = hadoop_home\n os.environ[\"PATH\"] += f\":{hadoop_home}/bin:{hadoop_home}/sbin\"\n\n # NOTE: must set CLASSPATH to connect using pyarrow.fs.HadoopFileSystem\n result = subprocess.run(\n [f\"{hadoop_home}/bin/hdfs\", \"classpath\", \"--glob\"],\n universal_newlines=True,\n stdout=subprocess.PIPE,\n check=False,\n )\n os.environ[\"CLASSPATH\"] = result.stdout\n\n\n@pytest.fixture(scope=\"session\")\ndef hdfs_server(hadoop, docker_compose, docker_services):\n import pyarrow.fs\n\n port = docker_services.port_for(\"hdfs\", 8020)\n web_port = docker_services.port_for(\"hdfs\", 50070)\n\n def _check():\n try:\n # NOTE: just connecting or even opening something is not enough,\n # we need to make sure that we are able to write something.\n conn = pyarrow.fs.HadoopFileSystem(\"hdfs://127.0.0.1\", port)\n with conn.open_output_stream(str(uuid.uuid4())) as fobj:\n fobj.write(b\"test\")\n return True\n except (pyarrow.ArrowException, OSError):\n return False\n\n docker_services.wait_until_responsive(timeout=30.0, pause=5, check=_check)\n\n return {\"hdfs\": port, \"webhdfs\": web_port}\n\n\n@pytest.fixture\ndef hdfs(hdfs_server):\n port = hdfs_server[\"hdfs\"]\n url = f\"hdfs://127.0.0.1:{port}/{uuid.uuid4()}\"\n yield HDFS(url)\n\n\nclass WebHDFS(Base, URLInfo): # pylint: disable=abstract-method\n @contextmanager\n def _webhdfs(self):\n from hdfs import InsecureClient\n\n client = InsecureClient(f\"http://{self.host}:{self.port}\", self.user)\n yield client\n\n def is_file(self):\n with self._webhdfs() as _hdfs:\n return _hdfs.status(self.path)[\"type\"] == \"FILE\"\n\n def is_dir(self):\n with self._webhdfs() as _hdfs:\n return _hdfs.status(self.path)[\"type\"] == \"DIRECTORY\"\n\n def exists(self):\n with self._webhdfs() as _hdfs:\n return _hdfs.status(self.path, strict=False) is not None\n\n def mkdir(self, mode=0o777, parents=False, exist_ok=False):\n assert mode == 0o777\n assert parents\n assert not exist_ok\n\n with self._webhdfs() as _hdfs:\n # NOTE: hdfs.makekdirs always creates parents\n _hdfs.makedirs(self.path, permission=mode)\n\n def write_bytes(self, contents):\n with self._webhdfs() as _hdfs:\n with _hdfs.write(self.path, overwrite=True) as writer:\n writer.write(contents)\n\n def write_text(self, contents, encoding=None, errors=None):\n if not encoding:\n encoding = locale.getpreferredencoding(False)\n assert errors is None\n self.write_bytes(contents.encode(encoding))\n\n def read_bytes(self):\n with self._webhdfs() as _hdfs:\n with _hdfs.read(self.path) as reader:\n return reader.read()\n\n def read_text(self, encoding=None, errors=None):\n if not encoding:\n encoding = locale.getpreferredencoding(False)\n assert errors is None\n return self.read_bytes().decode(encoding)\n\n\n@pytest.fixture\ndef webhdfs(hdfs_server):\n port = hdfs_server[\"webhdfs\"]\n url = f\"webhdfs://127.0.0.1:{port}/{uuid.uuid4()}\"\n yield WebHDFS(url)\n","repo_name":"genarocoronel/python-dvc","sub_path":"tests/remotes/hdfs.py","file_name":"hdfs.py","file_ext":"py","file_size_in_byte":6658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"3323880411","text":"from typing import List\n\nfrom crud import AbstractCRUD\nfrom database import get_session\nfrom models import Conference, ConferenceParticipant\n\n\nclass ConferenceCRUD(AbstractCRUD):\n def create(self, conference: Conference) -> Conference:\n with get_session() as session:\n session.add(conference)\n session.commit()\n return conference\n\n def read(self, conference_id: str, *args, **kwargs) -> Conference:\n with get_session() as session:\n conference = session.query(Conference).filter(Conference.id == conference_id).first()\n return conference\n\n def read_all_by_creator_id(self, creator_id) -> List[Conference]:\n with get_session() as session:\n conferences = session.query(Conference).filter(Conference.creator_id == creator_id).all()\n return conferences\n\n def update(self, updated_conference: Conference, *args, **kwargs):\n with get_session() as session:\n conference = self.read(updated_conference.id)\n conference.is_joining_allowed = updated_conference.is_joining_allowed\n conference.is_finished = updated_conference.is_finished\n conference.finished = updated_conference.finished\n session.add(conference)\n session.commit()\n return conference\n\n def delete(self, pk, *args, **kwargs):\n pass\n\n\nconference_crud = ConferenceCRUD()\n\n\nclass ConferenceParticipantCRUD(AbstractCRUD):\n def create(self, participant: ConferenceParticipant) -> ConferenceParticipant:\n with get_session() as session:\n session.add(participant)\n session.commit()\n return participant\n\n def read(self, conference_id: str, user_id: int, *args, **kwargs) -> ConferenceParticipant:\n with get_session() as session:\n participant = session.query(ConferenceParticipant).filter(\n ConferenceParticipant.conference_id == conference_id,\n ConferenceParticipant.user_id == user_id\n ).first()\n return participant\n\n def read_all(self, conference_id: str) -> List[ConferenceParticipant]:\n with get_session() as session:\n participants = session.query(ConferenceParticipant).filter(\n ConferenceParticipant.conference_id == conference_id\n ).all()\n return participants\n def read_user_conferences(self, user_id) -> List[str]:\n with get_session() as session:\n conferences = session.query(ConferenceParticipant.conference_id).filter(\n ConferenceParticipant.user_id == user_id\n ).all()\n return [conference[0] for conference in conferences]\n\n def update(self, updated_participant: ConferenceParticipant, *args, **kwargs) -> ConferenceParticipant:\n with get_session() as session:\n participant = self.read(updated_participant.conference_id, updated_participant.user_id)\n participant.is_banned = updated_participant.is_banned\n session.add(participant)\n session.commit()\n return participant\n\n def delete(self, pk, *args, **kwargs):\n pass\n\n\nconference_participant_crud = ConferenceParticipantCRUD()\n","repo_name":"MatweyL/VideoConferenceBackend","sub_path":"service/conference/crud.py","file_name":"crud.py","file_ext":"py","file_size_in_byte":3241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"2166061368","text":"import pymysql\nfrom decimal import Decimal\nfrom datetime import datetime\n\n\nclass MySQL:\n _ip = \"127.0.0.1\"\n _user = \"root\"\n _passwd = \"yujiahao\"\n _port = 3306\n _database = \"kbao\"\n\n def __init__(self, db):\n self.db = db\n self.cursor = db.cursor()\n\n @classmethod\n def connect(cls, ip=None, user=None, passwd=None, port=None, database=None):\n params = dict()\n params['host'] = MySQL._ip if ip is None else ip\n params['user'] = MySQL._user if user is None else user\n params['passwd'] = MySQL._passwd if passwd is None else passwd\n params['port'] = MySQL._port if port is None else int(port)\n params['db'] = MySQL._database if database is None else database\n params['charset'] = 'utf8'\n db = pymysql.connect(**params)\n ret = cls(db)\n return ret\n\n def run(self, sql):\n print('excute sql:', sql, sep='\\n')\n try:\n self.cursor.execute(sql)\n self.db.commit()\n return True\n except pymysql.MySQLError as e:\n print(e)\n self.db.rollback()\n return False\n finally:\n self.db.close()\n\n def execute(self, sql):\n print('excute sql:', sql, sep='\\n')\n try:\n self.cursor.execute(sql)\n self.db.commit()\n return True\n except pymysql.MySQLError as e:\n print(e)\n self.db.rollback()\n return False\n\n def select(self, sql: str, hold: bool = False):\n try:\n self.cursor.execute(sql)\n while 1:\n row = self.cursor.fetchone()\n if row is not None:\n yield row\n else:\n break\n except pymysql.MySQLError as e:\n print(e)\n finally:\n if not hold:\n self.db.close()\n\n def count(self, sql: str, hold: bool = False) -> int:\n cnt = 0\n for row in self.select(sql, hold):\n cnt += 1\n return cnt\n\n def close(self):\n self.db.close()\n\n @classmethod\n def make_insert_sql(cls, db, table, inputdata):\n \"\"\"输入表名、一条list记录,返会insert SQL\"\"\"\n sql = f\"insert into {db}.{table} values(\"\n i = 1\n for data in inputdata:\n col = \"''\"\n if data is None:\n col = 'null'\n elif isinstance(data, int) or isinstance(data, float) or isinstance(\n data, Decimal):\n col = f\"{data}\"\n elif isinstance(data, str):\n col = f\"'{data}'\"\n elif isinstance(data, datetime):\n col = f\"'%s'\" % data.strftime('%Y-%m-%d %H:%M:%S')\n sql = sql + col\n if i != len(inputdata):\n sql = sql + ','\n i += 1\n sql = sql + ');'\n return sql\n","repo_name":"myccy123/kbao","sub_path":"utils/dbutil.py","file_name":"dbutil.py","file_ext":"py","file_size_in_byte":2902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"27688601141","text":"command_list = [\"coding\", \"dog\", \"cat\", \"movie\"]\ncoffee = 0\nwhile True:\n multiply = 1\n commands = input()\n if commands == \"END\":\n break\n if commands.isupper():\n multiply = 2\n\n commands = commands.lower()\n if commands in command_list:\n coffee += 1 * multiply\n\nif coffee > 5:\n print(\"You need extra sleep\")\nelse:\n print(coffee)\n","repo_name":"StanDobrev11/Python_Fundamentals","sub_path":"01_Basic_Syntax_Conditional_Statements_and_Loops_-_Exercise/08_How_Much_Coffee.py","file_name":"08_How_Much_Coffee.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"15536313160","text":"from graphviz import Digraph\nfrom graphviz import Graph\n\ng = Graph(name='G', engine = 'fdp') # sfdp is not called.\ng.attr('node', shape = 'box')\ng.node('e')\n\nwith g.subgraph(name = 'ClusterA') as a:\n a.edge('a', 'b')\n with a.subgraph(name = 'ClusterC') as c:\n c.edge('C', 'D')\n\nwith g.subgraph(name = \"ClusterB\") as b:\n b.edge('d', 'f')\n\ng.edge('e', 'ClusterB')\ng.edge('d', 'D')\ng.edge('ClusterC', 'ClusterB')\n\ng.render(view = True)\n","repo_name":"wonkim0512/BDP","sub_path":"lectures/25more.py","file_name":"25more.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"15752500560","text":"\"\"\"\n\tModule for the Asset information display Panel.\t\n\n\t@Author: Peter Tiegs\n\tNascentia Corporation 2007 (Some Rights Reserved).\n\t\"\"\"\n\t\n__author__=\"Peter Tiegs\"\n\nimport wx.html\nfrom webview import infoTemplate\n\nclass infoPanel(wx.Panel):\n\t\"\"\"The Panel containing the info block HTML window.\"\"\"\n\tdef __init__(self):\n\t\t\"\"\"Panel Constructor.\n\t\t\t@size: the size of the panel\n\t\t\"\"\"\n\t\tp = wx.PrePanel()\n\t\t\n\t\tself.PostCreate(p)\n\t\tself.Bind(wx.EVT_WINDOW_CREATE, self.OnCreate)\n\t\tself.Bind(wx.EVT_SIZE, self.OnSize)\n\tdef OnCreate(self, evt):\n\t\tif self is evt.GetEventObject():\n\t\t\tself.infoText=wx.html.HtmlWindow(self, -1, size=self.GetSize())\n\t\t\tself.sizer = wx.BoxSizer(wx.VERTICAL)\n\t\t\tself.SetSizer(self.sizer)\n\t\t\tself.sizer.Add(self.infoText, 1)\n\t\tevt.Skip()\n\n\tdef OnSize(self, evt):\n\t\tif hasattr(self, 't'):\n\t\t\tsz= self.GetSize()\n\t\t\tw, h = self.t.GetTextExtent(self.t.GetLabel())\n\t\t\tself.t.SetPosition(((sz.width-w)/2, (sz.height-h)/2))\n\n\tdef displayInfo(self, model):\n\t\t\"\"\"Display the information about the model.\n\t\t\t@model: The model to display.\n\t\t\"\"\"\n\t\ttaglist = []\n\t\tfor tag in model.tags:\n\t\t\ttaglist.append(tag.tagname)\n\t\tself.infoText.SetPage(infoTemplate.render(model=model, tags= ','.join(taglist)))\n\nclass InfoFrame(wx.MiniFrame):\n\t\"\"\"A mini frame to display the info panel in a seperate window.\"\"\"\n\tdef __init__(self, parent):\n\t\t\"\"\"Frame Constructor.\"\"\"\n\t\twx.MiniFrame.__init__(self, parent, -1,\"Info\", pos=wx.DefaultPosition, size=wx.DefaultSize, style=wx.DEFAULT_FRAME_STYLE)\n\t\tself.info =infoPanel(self, -1, self.GetSize())\n\t\tself.Bind(wx.EVT_CLOSE, self.OnClose)\n\tdef OnClose(self, event):\n\t\t\"\"\"Capture wx.EVT_CLOSE event to preserve the handle to the frame.\n\t\t\"\"\"\n\t\tself.Show(False)\n","repo_name":"pgtiegs/dmilo","sub_path":"dmilo/infopanel.py","file_name":"infopanel.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"16643920834","text":"import argparse\nimport logging\nimport os\n\nimport boto3\nfrom botocore.exceptions import ClientError\n\nlogger = logging.getLogger(__name__)\n\n\ndef upload_image_get_metadata(image_path, bucket_name, s3_path=\"\", session=None):\n if not session:\n session = boto3._get_default_session()\n s3 = session.resource('s3')\n bucket = s3.Bucket(bucket_name)\n\n file_dir, file_name = os.path.split(image_path)\n # make sure s3_path ends in a slash\n if s3_path and not s3_path.endswith(\"/\"):\n s3_path += \"/\"\n\n # extend to handle in-memory later, once we have file upload on an application\n # https://thecodinginterface.com/blog/aws-s3-python-boto3/\n # bytes_data =\n # obj = s3.Object(bucket_name, f\"{s3_path}{file_name}\")\n # obj.put(Body=bytes_data)\n\n key = f\"{s3_path}{file_name}\" if s3_path else file_name\n\n # try to upload it\n try:\n bucket.upload_file(Filename=image_path, Key=key)\n return key\n except ClientError as e:\n logging.error(e)\n return False\n\n\ndef upload_directory(path, bucket_name, session=None):\n if not session:\n session = boto3._get_default_session()\n s3 = session.resource('s3')\n bucket = s3.Bucket(bucket_name)\n\n for subdir, dirs, files in os.walk(path):\n for file in files:\n full_path = os.path.join(subdir, file)\n with open(full_path, 'rb') as data:\n bucket.put_object(Key=full_path[len(path) + 1 :], Body=data)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--dir\", \"-d\", help=\"set directory\")\n parser.add_argument(\"--bucket\", \"-b\", help=\"set bucket\")\n parser.add_argument(\"--file\", \"-f\", help=\"set single file for upload\")\n parser.add_argument(\"--s3path\", help=\"Optional S3 path\")\n args = parser.parse_args()\n\n if not args.bucket:\n print(\"Please set a bucket which you can use in the current AWS session\")\n\n if args.dir and args.bucket:\n upload_directory(args.dir, args.bucket)\n\n if args.file and args.bucket:\n response = upload_image_get_metadata(\n args.file, args.bucket, s3_path=args.s3path\n )\n print(response)\n","repo_name":"martimpassos/iiif-ingest-service","sub_path":"src/IIIFingest/bucket.py","file_name":"bucket.py","file_ext":"py","file_size_in_byte":2179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"18"} +{"seq_id":"8411982664","text":"from django.shortcuts import render, get_object_or_404\nfrom django.views.decorators.http import require_GET\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom qa.models import Question, Answer\nfrom django.core.paginator import Paginator\nfrom qa.forms import AskForm, AnswerForm\nfrom datetime import timedelta\n\ndef test(request, *args, **kwargs):\n\treturn HttpResponse('OK')\n\ndef question_details(request, id):\n\tquestion = get_object_or_404(Question, id=id)\n\tanswers = Answer.objects.filter(question=question)\n\tif request.method == \"POST\":\n\t\tform = AnswerForm(request.POST)\n\t\tif form.is_valid(): \n\t\t\tform.save(request.user)\n\t\t\turl = '/question/' + str(id) + '/'\n\t\t\treturn HttpResponseRedirect(url)\n\telse:\n\t\tform = AnswerForm(initial={'question': question.id})\n\treturn render(request, 'question_details.html', {\n\t\t'question': question,\n\t\t'answers': answers,\n\t\t'form': form,\n})\n\n@require_GET\ndef question_new(request):\n\tquestions = Question.objects.new()\n\tlimit = request.GET.get('limit', 10)\n\tpage = request.GET.get('page', 1)\n\tpaginator = Paginator(questions, limit)\n\tpaginator.baseurl = '/?page='\n\tpage = paginator.page(page)\n\treturn render(request, 'question_new.html', {\n\t\t'questions': page.object_list,\n\t\t'paginator': paginator,\n\t\t'page': page,\n\t})\n\n@require_GET\ndef question_popular(request):\n\tquestions = Question.objects.popular()\n\tlimit = request.GET.get('limit', 10)\n\tpage = request.GET.get('page', 1)\n\tpaginator = Paginator(questions, limit)\n\tpaginator.baseurl = '/?page='\n\tpage = paginator.page(page)\n\treturn render(request, 'question_popular.html', {\n\t\t'questions': page.object_list,\n\t\t'paginator': paginator,\n\t\t'page': page,\n\t})\n\ndef ask(request):\n\tif request.method == \"POST\":\n\t\tform = AskForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tq = form.save(request.user)\n\t\t\turl = '/question/' + str(q) + '/'\n\t\t\treturn HttpResponseRedirect(url)\n\telse:\n\t\tform = AskForm()\n\treturn render(request, 'AskAnswerForms.html', {'form': form})\n\n","repo_name":"leonid-invalid/web","sub_path":"ask/qa/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"682090198","text":"\"\"\"A simple function to sum up a list.\"\"\"\n\n\ndef sum_algo(xs: list[int]) -> int:\n \"\"\"Summation of input list is returned.\"\"\"\n total: int = 0\n i: int = 0\n # while i < len(xs):\n # total += xs[i]\n # i += 1 \n\n for e in xs:\n total += e\n return total\n\ndef sum_two_lists(xs: list[int], ys: list[int]) -> list[int]:\n result: list[int] = []\n for i in range(0, len(xs)):\n result.append(xs[1] + ys[i])\n return result\n # i: int = 0\n # while i < len(xs):\n # result.append(xs[i] + ys[i])\n # i += 1\n","repo_name":"sjiang13/comp110-21ss1-workspace","sub_path":"lessons/ls15.py","file_name":"ls15.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"18"} +{"seq_id":"12169536135","text":"from flask import Flask, jsonify, request\n\nfrom config import config\n\nfrom routes import Usuarios\n\napp = Flask(__name__)\n\ndef page_not_found(error):\n return \"

error de algun tipo

\", 404\n\nif __name__ == '__main__':\n app.config.from_object(config['development'])\n \n app.register_blueprint(Usuarios.main, url_prefix = '/usuarios')\n\n app.register_error_handler(404,page_not_found)\n app.run()\n","repo_name":"OmarOporto/Diplomado_Flask_Res_Api","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"15458601750","text":"import os\nimport subprocess\nimport signal\nimport sys\nimport time\nimport pkg_resources\nimport argparse\n\n## A bug can appear with MacOSX if matplotlib is not set to a non-interactive mode\n#issue: https://github.com/matplotlib/matplotlib/issues/14304/\nimport matplotlib\nmatplotlib.use('agg')\n\nCONFIG_FILE_CONTENT = \"\"\"\n# This file have been automatically generated, please do not modify it. \n# You can remove it once done with the application.\n[DEFAULT]\nagents_dir={agents_dir}\nenv_dir={env_dir}\nn_cores={n_cores}\n# This file will be re generated to each call of \"python -m grid2viz.main\"\n\"\"\"\n\nARG_AGENTS_PATH_DESC = 'The path where the log of the Agents experiences are stored.' \\\n ' (default to None to study the example agents provided with the package)'\nARG_ENV_PATH_DESC = 'The path where the environment config is stored.' \\\n ' (default to None to use the provided default environment)'\nARG_PORT_DESC = 'The port to serve grid2viz on.'\\\n ' (default to 8050)'\nARG_DEBUG_DESC = 'Enable debug mode for developers.' \\\n ' (default to False)'\nARG_N_CORES_DESC = 'The number of cores to use for the first loading of the best agents of each scenario'\n\nARG_CACHE_DESC = 'True if you want to build all the cache data for all agents at once before relaunching grid2viz'\n\ndef main():\n parser_main = argparse.ArgumentParser(description='Grid2Viz')\n parser_main.add_argument('--agents_path', default=None,\n required=False, type=str,\n help=ARG_AGENTS_PATH_DESC)\n parser_main.add_argument('--env_path', default=None,\n required=False, type=str,\n help=ARG_ENV_PATH_DESC)\n parser_main.add_argument('--port', default=8050,\n required=False, type=int,\n help=ARG_PORT_DESC)\n parser_main.add_argument('--debug', action='store_true',\n help=ARG_DEBUG_DESC)\n\n parser_main.add_argument('--n_cores', default=2, type=int,\n help=ARG_N_CORES_DESC)\n parser_main.add_argument('--cache', default=False, type=bool,\n help=ARG_CACHE_DESC)\n\n\n args = parser_main.parse_args()\n\n pkg_root_dir = os.path.dirname(os.path.abspath(__file__))\n os.environ[\"GRID2VIZ_ROOT\"] = pkg_root_dir\n config_path = os.path.join(pkg_root_dir, \"config.ini\")\n\n if args.agents_path is not None:\n agents_dir = os.path.abspath(args.agents_path)\n else: \n agents_dir = os.path.join(pkg_root_dir, \"data\", \"agents\")\n print(\"Using default agents logs at {}\".format(agents_dir))\n\n if args.env_path is not None:\n env_dir = os.path.abspath(args.env_path)\n else:\n env_dir = os.path.join(pkg_root_dir, \"data\", \"env_conf\")\n print(\"Using default environment at {}\".format(env_dir))\n\n n_cores = args.n_cores\n\n with open(config_path, \"w\") as f:\n f.write(CONFIG_FILE_CONTENT.format(agents_dir=agents_dir,\n env_dir=env_dir,\n n_cores=n_cores))\n\n is_makeCache_only=args.cache\n\n # Inline import to load app only now\n if(is_makeCache_only):\n make_cache()\n else:\n from grid2viz.app import app_run\n app_run(args.port, args.debug)\n\n\ndef make_cache():\n from grid2viz.src.manager import (scenarios, agents,\n make_episode_without_decorate,\n n_cores, retrieve_episode_from_disk,\n save_in_ram_cache, cache_dir)\n\n from pathos.multiprocessing import ProcessPool\n if not os.path.exists(cache_dir):\n print('Starting Multiprocessing for reading the best agent of each scenario')\n\n ##TO DO: tous les agents n'ont pas forcément tourner sur exactement tous les mêmes scenarios\n # Eviter une erreur si un agent n'a pas tourné sur un scenario\n agent_scenario_list=[(agent,scenario) for agent in agents for scenario in scenarios]\n\n agents_data=[]\n if(n_cores==1):#no multiprocess useful for debug if needed\n i = 0\n for agent_scenario in agent_scenario_list:\n agents_data.append(make_episode_without_decorate(agent_scenario[0],agent_scenario[1]))\n i+=1\n else:\n pool = ProcessPool(n_cores)\n agents_data = list(pool.imap(make_episode_without_decorate,\n [agent_scenario[0] for agent_scenario in agent_scenario_list],#agents\n [agent_scenario[1] for agent_scenario in agent_scenario_list]))#scenarios #we go over all agents and all scenarios for each agent\n pool.close()\n print('Multiprocessing done')\n\n #####\n #saving data on disk\n i=0\n for agent_scenario in agent_scenario_list:\n print(i)\n agent=agent_scenario[0]\n episode_name=agent_scenario[1]\n agent_episode = agents_data[i]\n if agent_episode is not None:\n episode_data = retrieve_episode_from_disk(\n agent_episode.episode_name, agent_episode.agent)\n\n agent_episode.decorate(episode_data)\n save_in_ram_cache(agent_episode.episode_name,\n agent_episode.agent,\n agent_episode)\n i+=1\n\n\nif __name__ == \"__main__\":\n main()\n\n\n","repo_name":"Artelys-RTE/grid2viz","sub_path":"grid2viz/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"16741914843","text":"\"\"\"\nThe lookup process resolves URNs to Item objects, and maintains the lookuptables\nin db_files/lookuptables/\n\nTODO Add a chaching mechanism to this. Or at least allow multiple things inside\n the same file to be replaced/added/removed at once\n\"\"\"\n\nfrom Item import *\n\nclass LookupP:\n def __init__(self, lut_path, logP, next_URN):\n \"\"\"\n :param lut_path: Path to the lookup tables for the database\n :param logP: The database's LogP object\n :param next_URN: The next URN that has not been used in the LUT\n \"\"\"\n self.logger = logP\n self.next_URN = next_URN\n self.lut_path = lut_path\n\n if lut_path[-1] != '/':\n self.lut_path += '/'\n\n logP.log(\"lookupP initialized with lookup table directory \" + str(self.lut_path))\n\n\n\n\n def get_items(self, URN_list):\n \"\"\"Returns item objects with all of the URNs requested in the URN_list\n :param URN_list: The list of integer URNs to resolve. This should be sorted and all URNs\n should be in bounds (less than current max URN)\n :throws Exception if a URN in URN_list is out of bounds\n \"\"\"\n if URN_list == []:\n return []\n\n\n items = [] # List of Item objects to be returned\n current_filename = \"\" # Which .lut file is open currently\n file = None\n\n for URN in URN_list:\n if int(URN) >= self.next_URN:\n self.logger.log(\"Tried to access out of bounds URN: \"+str(URN))\n raise Exception(\"URN \" + str(URN) + \"Out of Bounds\")\n\n needed_filename = self.compute_file(URN)\n\n # Open the proper file for the next URN to grab\n if current_filename != needed_filename:\n if current_filename != \"\":\n file.close()\n current_filename = needed_filename\n file = open(current_filename, 'r')\n lines = file.readlines()\n\n items.append(Item.generate_item_from_string(lines[URN % 1000]))\n\n if file is not None:\n file.close()\n\n return items\n\n\n\n def add_item(self, item):\n \"\"\"\n\n :param item: An Item object. If the Item has a defined URN, it will be ignored and\n replaced with the next available URN\n :return:\n \"\"\"\n\n item.URN = self.next_URN\n self.next_URN += 1\n\n item_string = item.string()\n\n # Open and append the new item to the appropriate .lut file\n needed_file = self.compute_file(item.URN)\n file = open(self.lut_path + needed_file, 'a+')\n file.write(item_string + \"\\n\")\n file.close()\n\n\n def remove_item(self, urn):\n \"\"\"\n Remove an item from the lookup table.\n :param urn: Integer URN to be removed\n :return: True if the item was removed, False if it was not\n \"\"\"\n needed_lut_file = self.compute_file(urn)\n\n # Read the contents of the file\n file = open(needed_lut_file, 'r')\n lines = file.readlines()\n\n # Remove the desired URN\n remove_index = self.find_item_index(lines, urn)\n if remove_index >= 0:\n del lines[remove_index]\n else:\n return False\n\n file.close()\n\n # Write it back to the file\n file = open(needed_lut_file, 'w')\n file.writelines(lines)\n file.close()\n\n return True\n\n\n def replace_item(self, to_replace, new_item):\n \"\"\"\n Replaces the LUT entry corresponding to a given URN with a new item.\n :param to_replace: The URN of the entry to be replaced\n :param new_item: The Item object of the replacement item. If the replacement item already has\n a URN it is ignored and overwritten.\n :return: True if the item was replaced, False if it was not\n \"\"\"\n # Find and read the needed .lut file\n needed_lut_file = self.compute_file(to_replace)\n file = open(needed_lut_file, 'r')\n lines = file.readlines()\n file.close()\n\n # Edit the new Item's fields\n new_item.URN = to_replace\n\n # Find and replace the desired entry\n replace_index = self.find_item_index(lines, to_replace)\n if replace_index >= 0:\n lines[replace_index] = new_item.string()\n else:\n return False\n\n file = open(needed_lut_file, 'w')\n file.writelines(lines)\n file.close()\n return True\n\n\n\n\n def compute_file(self, urn):\n return self.lut + str(urn // 1000) + \".lut\"\n\n\n\n def find_item_index(self, user_list, urn):\n \"\"\"Search the user list for an entry beginning with urn\n :param user_list: The list of users to search\n :param urn: Integer URN\n :return: The index of the urn in the user_list if the urn is located, -1 if it is not\n \"\"\"\n for i in range(0, len(user_list)):\n user = user_list[i]\n if str(urn) == user[i][1:user.find(':')]:\n return i\n\n return -1\n\n\n\n def close(self):\n return self.next_URN\n #export the next urn back to the database\n pass\n\n","repo_name":"joshkirkham/tag-manager","sub_path":"LookupP.py","file_name":"LookupP.py","file_ext":"py","file_size_in_byte":5186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"24281739881","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 10 17:33:52 2023\n\n@author: giannidiarbi\n\n Gianni Diarbi\n DS2000\n Spring 2023\n HW 4 Problem 1\n suffrage.py\n \n\"\"\"\n\nVOTER_FILE = \"1920_women_voters.csv\"\n\nAGE_POSITION = 10\n\nTWENTIES_MIN = 20\nTHIRTIES_MIN = 30\nFORTIES_MIN = 40\nFIFTIES_MIN = 50\nOVER_FIFTY_MIN = 60\n\n\nimport matplotlib.pyplot as plt\n\ndef main():\n \n # Collect Data - Open file and read in the header\n with open(VOTER_FILE, \"r\") as infile:\n header = infile.readline()\n \n # Create an empty list to append age data to\n ages = []\n \n # Use a for loop to read in voter info\n for line in infile:\n \n # Turn the string line into a list of strings\n age_lst = line.split(\",\")\n \n # Define the age data\n age_data = age_lst[AGE_POSITION]\n \n # Check if an age exists in each line, and append the age data to\n # the ages list\n if age_data != \"\":\n ages.append(int(age_data))\n \n # Create separate lists for each age range\n twenties = []\n thirties = []\n forties = []\n fifties = []\n over_fifty = []\n \n # Append the age data to the appropriate age range list\n for i in ages:\n if i >= TWENTIES_MIN and i < THIRTIES_MIN:\n twenties.append(i)\n elif i >= THIRTIES_MIN and i < FORTIES_MIN:\n thirties.append(i)\n elif i >= FORTIES_MIN and i < FIFTIES_MIN:\n forties.append(i)\n elif i >= FIFTIES_MIN and i < OVER_FIFTY_MIN:\n fifties.append(i)\n else:\n over_fifty.append(age_data)\n \n # Communication - create a bar chart representing the number of \n # women in each age category\n plt.bar(\"twenties\", len(twenties), color = \"palevioletred\")\n plt.bar(\"thirties\", len(thirties), color = \"teal\")\n plt.bar(\"forties\", len(forties), color = \"darkslateblue\")\n plt.bar(\"fifties\", len(fifties), color = \"darkgoldenrod\")\n plt.bar(\"over fifty\", len(over_fifty), color = \"darkred\")\n \n # Add titles and axes labels to the axes\n plt.xlabel(\"Age Group\")\n plt.ylabel(\"Number of Women\")\n plt.title(\"Number of Women Voters in 1920, by Age\")\n \nmain()\n\n","repo_name":"giannidiarbi/suffrage.py","sub_path":"suffrage.py","file_name":"suffrage.py","file_ext":"py","file_size_in_byte":2387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"18917390373","text":"from builtins import str\nfrom geocal_swig import *\nimport subprocess\n\ndef gdal_to_erdas_file(infname, outfname):\n '''This converts a GDAL file to ERDAS. We include the calculation of\n statistics and an image pyramid'''\n subprocess.check_call([\"gdal_translate\", \n \"-of\", \"hfa\",\n \"-a_nodata\", \"0\",\n \"-co\", \"STATISTICS=YES\",\n \"-co\", \"COMPRESSED=YES\",\n infname, outfname])\n # Older versions of gdal required the levels. As of GDAL 2.3 this\n # isn't needed, and can in fact create errors. We'll assume that\n # we have the newer version of GDAL, but leave the old code in if\n # we need to go back to an older version of GDAL\n if True:\n cmd = [\"gdaladdo\", outfname]\n else:\n # The default blocksize of ERDAS is 64x64. Since we just\n # created the file, we know this is the blocksize.\n blocksize = 64\n cmd = [\"gdaladdo\", outfname, \"2\"]\n i = 2\n # We want to make image pyramids down to a single block\n infile = GdalRasterImage(infname)\n while(infile.number_line // i > blocksize and\n infile.number_sample // i > blocksize):\n i *= 2\n cmd.append(str(i))\n subprocess.check_call(cmd)\n\n__all__ = [\"gdal_to_erdas_file\"]\n","repo_name":"Cartography-jpl/geocal","sub_path":"python/lib/gdal_to_erdas_file.py","file_name":"gdal_to_erdas_file.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"74602043559","text":"import pybullet as p\nimport os\n\n\nclass Plane:\n def __init__(self, client):\n f_name = os.path.join(os.path.dirname(__file__), 'simpleplane.urdf')\n self.planeID = p.loadURDF(fileName=f_name,\n basePosition=[0, 0, 0],\n physicsClientId=client,\n useFixedBase=True)\n \n def get_ids(self):\n return self.planeID\n\n","repo_name":"emenriquez/Humanoid_PyBullet_PPO","sub_path":"Humanoid_Basic_Env/resources/plane.py","file_name":"plane.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"18"} +{"seq_id":"80052062","text":"#Напишите программу, которая будет преобразовывать десятичное число в двоичное.\n\nprint('Введите число:')\nN = int(input())\na = []\nx = 0\nwhile N >= 1:\n a.append(N % 2)\n N = N // 2\nprint(*list(reversed(a)), sep='')","repo_name":"Anpchernova/Python_HW","sub_path":"Task11.py","file_name":"Task11.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"5400906951","text":"\n\ndef createError(name,defaultDescription):\n\tdef wrapper(description=None,context={}):\n\t\tclass ExceptionTemplate(Exception):\n\t\t\tname = None\n\t\t\tdescription = None\n\t\t\tcontext = None\n\t\t\tdef __init__(self, description):\n\t\t\t\tsuper().__init__(description)\n\t\t\t\tself.name = name\n\t\t\t\tself.description = description\n\t\t\t\tself.context = context\n\t\tif description is None: description = defaultDescription\n\t\tExceptionTemplate.__name__ = name\n\t\treturn ExceptionTemplate(description)\n\treturn wrapper\n\nWrongOperationType = createError(\n\tname=\"WrongOperationType\",\n\tdefaultDescription=\"The given operation type doesn't exist\"\n)\n\nNoArgsGiven = createError(\n\tname=\"NoArgsGiven\",\n\tdefaultDescription=\"No arguments were given for the operation\"\n)\n\nMissingOperationType = createError(\n\tname=\"MissingOperationType\",\n\tdefaultDescription=\"No operation type was provided\"\n)\n\nNoInputGiven = createError(\n\tname=\"NoInputGiven\",\n\tdefaultDescription=\"No input was provided to the given operation. You probably forget to add a collection parameter\"\n)\n","repo_name":"abdulbahajaj/embeddedAnalytics","sub_path":"py_process/errors.py","file_name":"errors.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"25297693870","text":"import tensorflow as tf\n\nmeta_path = '/home/jupyter/repo/tensorflow-yolo-v3/saved_model/model.ckpt.meta' # Your .meta file\noutput_node_names = ['output:0'] # Output nodes\n\nwith tf.Session() as sess:\n \n # Restore the graph\n saver = tf.train.import_meta_graph(meta_path)\n\n # Load weights\n saver.restore(sess,tf.train.latest_checkpoint('/home/jupyter/repo/tensorflow-yolo-v3/saved_model/'))\n\n # Freeze the graph\n frozen_graph_def = tf.graph_util.convert_variables_to_constants(\n sess,\n sess.graph_def,\n output_node_names)\n\n # Save the frozen graph\n with open('/home/jupyter/repo/tensorflow-yolo-v3/output_graph.pb', 'wb') as f:\n f.write(frozen_graph_def.SerializeToString())","repo_name":"mk-hasan/OpenImage-Data-convert-YOLOV3","sub_path":"convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"18"} +{"seq_id":"6269341346","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport warnings\n\nimport astropy.units as u\nimport numpy as np\nfrom astropy.io import fits\nfrom astropy.table import QTable\nfrom astropy.utils.exceptions import AstropyUserWarning\n\nfrom regions.core import PixCoord, RegionMeta, Regions\nfrom regions.core.registry import RegionsRegistry\nfrom regions.io.fits.core import FITSParserError, shape_map\n\n__all__ = []\n\n\n@RegionsRegistry.register(Regions, 'parse', 'fits')\ndef _parse_fits(region_table):\n \"\"\"\n Parse a FITS region table.\n\n Parameters\n ----------\n region_table : `~astropy.table.Table`\n The table contents of a FITS region file.\n\n Returns\n -------\n regions : `regions.Regions`\n A `Regions` object containing a list of `~regions.Region`\n objects.\n \"\"\"\n regions = parse_table(region_table)\n return Regions(regions)\n\n\n@RegionsRegistry.register(Regions, 'read', 'fits')\ndef _read_fits(filename, cache=False):\n \"\"\"\n Read a FITS region file, converting a FITS regions table to a list\n of `~regions.Region` objects.\n\n Parameters\n ----------\n filename : str\n The FITS region filename. The first \"REGION\" FITS extension will\n be used.\n\n cache : bool or 'update', optional\n Whether to cache the contents of remote URLs. If 'update', check\n the remote URL for a new version but store the result in the\n cache.\n\n Returns\n -------\n regions : list\n A list of `~regions.Region` objects.\n \"\"\"\n with fits.open(filename, cache=cache) as hdul:\n for hdu in hdul:\n # use the first 'REGION' HDU\n if hdu.name == 'REGION':\n region_table = QTable.read(hdu)\n regions = _parse_fits(region_table)\n return regions\n\n raise FITSParserError('An extension with name (EXTNAME) \"REGION\" '\n 'was not found')\n\n\ndef get_shape(region_row):\n include = 1\n shape_key = 'SHAPE'\n if shape_key not in region_row.colnames:\n shape = 'point'\n return shape, include\n\n shape = region_row[shape_key].lower()\n if shape[0] == '!':\n include = 0\n shape = shape[1:]\n\n supported_shapes = list(shape_map.keys())\n unsupported_shapes = ['pie', 'sector', 'diamond', 'rhombus',\n 'rotdiamond', 'rotrhombus']\n valid_shapes = supported_shapes + unsupported_shapes\n\n if shape not in valid_shapes:\n raise FITSParserError(f'{shape!r} is not a valid FITS region shape')\n if shape not in supported_shapes:\n warnings.warn(f'{shape!r} is not supported by the regions package, '\n 'skipping.', AstropyUserWarning)\n shape = None\n\n return shape, include\n\n\ndef get_column_values(region_row, colname):\n index = None\n if colname[-1].isdigit():\n index = int(colname[-1])\n colname = colname[:-1]\n\n value = np.atleast_1d(region_row[colname])\n if isinstance(value, u.Quantity) and value.unit == u.pixel:\n value = value.value # remove pixel units\n\n if index is None: # polygon uses all values\n return value\n\n try:\n return value[index]\n except IndexError:\n raise FITSParserError(f'The {colname!r} column must have more '\n f'than {index!r} values for the region.')\n\n\ndef get_shape_params(shape, region_row, shape_columns):\n values = []\n for column in shape_columns:\n values.append(get_column_values(region_row, column))\n\n if 'rectangle' in shape:\n (xmin, xmax, ymin, ymax) = values[0:4]\n xcenter = 0.5 * (xmin + xmax)\n ycenter = 0.5 * (ymin + ymax)\n xsize = xmax - xmin\n ysize = ymax - ymin\n\n shape_params = [PixCoord(xcenter, ycenter), xsize, ysize]\n if shape == 'rotrectangle':\n shape_params.append(values[-1]) # angle\n\n return shape_params\n\n # center (or polygon) coordinates for all other regions\n shape_params = [PixCoord(values[0], values[1])]\n\n # shape params\n if shape == 'ellipse':\n # FITS uses semi-axis lengths;\n # the last value is always the rotation angle\n values[2:-1] = list(np.array(values[2:-1]) * 2.)\n\n shape_params.extend(values[2:])\n\n return shape_params\n\n\ndef parse_row(region_row):\n shape, include = get_shape(region_row)\n if shape is None:\n return None\n\n region_cls, shape_columns = shape_map[shape]\n\n for column in shape_columns:\n if column[-1].isdigit():\n column = column[:-1]\n if column not in region_row.colnames:\n warnings.warn(f'Table columns are missing for {shape!r} shape, '\n 'skipping.', AstropyUserWarning)\n return None\n\n shape_params = get_shape_params(shape, region_row, shape_columns)\n region = region_cls(*shape_params)\n\n meta = {}\n if include == 0:\n meta = {'include': include}\n region.meta = RegionMeta(meta)\n\n shape_key = 'COMPONENT'\n if shape_key in region_row.colnames:\n component = int(region_row[shape_key])\n meta = {'component': component}\n\n if meta:\n region.meta = RegionMeta(meta)\n\n return region\n\n\ndef parse_table(region_table):\n valid_columns = ('X', 'Y', 'SHAPE', 'R', 'ROTANG', 'COMPONENT')\n\n for column in region_table.colnames:\n if column not in valid_columns:\n raise FITSParserError(f'{column!r} is not a valid column name')\n\n regions = []\n for row in region_table:\n region = parse_row(row)\n if region is not None:\n regions.append(region)\n\n return regions\n","repo_name":"astropy/regions","sub_path":"regions/io/fits/read.py","file_name":"read.py","file_ext":"py","file_size_in_byte":5641,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"18"} +{"seq_id":"43154293067","text":"import os\nimport glob\n\n#===============Config Settings =====================\n\nconfig_file = \"BuildConfig.mk\"\n\nCFG_PARAM_BOARD_FIELD = \"BOARD\"\nCFG_PARAM_TARGET_FIELD = \"MK_TARGET\"\n\nconfig_params = {\n CFG_PARAM_BOARD_FIELD : None,\n CFG_PARAM_TARGET_FIELD : None\n}\n\n#====================================================\n\nstm32_boards = [\"stm32f103\"]\n\nboards = stm32_boards\n\nbuild_configs = [\n {\n CFG_PARAM_TARGET_FIELD : \"STM32\",\n CFG_PARAM_BOARD_FIELD : stm32_boards\n }\n]\n\ndef board_select():\n print(\"Please select the board:\")\n\n for board in boards:\n print(\"\\t{} - {}\".format(boards.index(board), board))\n\n board = int(input(\"Board: \"))\n\n if board < len(boards):\n config_params[CFG_PARAM_BOARD_FIELD] = boards[board]\n for config in build_configs:\n if boards[board] in config[CFG_PARAM_BOARD_FIELD]:\n config_params[CFG_PARAM_TARGET_FIELD] =\\\n config[CFG_PARAM_TARGET_FIELD]\n break\n\n print(config_params)\n\n else:\n print(\"Error: invaid board id!\")\n\n y = input(\"Do you want reselect? y/n: \")\n\n if \"y\" == y:\n board_select()\n\ndef change_config():\n buffer = []\n with open(config_file, \"r\") as config:\n for line in config.readlines():\n print(line)\n config_param = line[:line.find(\"=\")]\n if config_param in config_params:\n buffer.append(\"{}={}\".format(config_param,\n config_params[config_param]))\n else:\n buffer.append(\"{}=\".format(config_param))\n\n print(buffer)\n\n with open(config_file, \"w\") as config:\n for line in buffer:\n config.write(line)\n config.write(\"\\n\")\n\nif __name__ == \"__main__\":\n board_select()\n change_config()\n\n","repo_name":"SrMrBurchick/GreenHouseModule","sub_path":".setup_config.py","file_name":".setup_config.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"3238259936","text":"import os\n\nimport openai\n\nopenai.api_key = os.getenv(\"OPENAI_API_KEY\")\n\nresponse = openai.Completion.create(\n model=\"text-davinci-002\",\n prompt=\"Product description: A home milkshake maker\\nSeed words: fast, healthy, compact.\\nProduct names: HomeShaker, Fit Shaker, QuickShake, Shake Maker\\n\\nProduct description: A pair of shoes that can fit any foot size.\\nSeed words: adaptable, fit, omni-fit.\",\n temperature=0.8,\n max_tokens=60,\n top_p=1.0,\n frequency_penalty=0.0,\n presence_penalty=0.0,\n)\n\nopenai.api_key = os.getenv(\"OPENAI_API_KEY\")\n\n\n@app.route(\"/\", methods=(\"GET\", \"POST\"))\ndef index():\n if request.method == \"POST\":\n animal = request.form[\"animal\"]\n response = openai.Completion.create(\n model=\"text-davinci-002\",\n prompt=generate_prompt(animal),\n temperature=0.6,\n )\n return redirect(url_for(\"index\", result=response.choices[0].text))\n\n result = request.args.get(\"result\")\n return render_template(\"index.html\", result=result)\n\n\ndef generate_prompt(animal):\n return \"\"\"Suggest three names for an animal that is a superhero.\nAnimal: Cat\nNames: Captain Sharpclaw, Agent Fluffball, The Incredible Feline\nAnimal: Dog\nNames: Ruff the Protector, Wonder Canine, Sir Barks-a-Lot\nAnimal: {}\nNames:\"\"\".format(\n animal.capitalize()\n )\n","repo_name":"elagasteratou/athena-round2","sub_path":"back/core/naming.py","file_name":"naming.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"10538702212","text":"\"\"\"\nFollowing is the implementation of heap_sort algorithm.\nIt will sort the input array with time complexity O(N*logN)\nN being the size of array.\nSpace complexity is O(1).\n\"\"\"\n\n\ndef left_child_index(i):\n \"\"\"\n :param i: int\n Index of node in array (that is organized as heap)\n :return: int\n Position in array of left child of node\n \"\"\"\n\n return 2 * (i + 1) - 1\n\n\ndef right_child_index(i):\n \"\"\"\n :param i: int\n Index of node in array (that is organized as heap)\n :return: int\n Position in array of right child of node\n \"\"\"\n\n return left_child_index(i) + 1\n\n\ndef get_children_of_node(a, i):\n \"\"\"\n :param a: list\n List organized as heap, i.e for each node i:\n left children is at position 2(i + 1) - 1\n right children is at position 2(i + 1))\n :param i: int\n Index of parent node in array (that is organized as heap)\n :return: (left children index, left children node) and\n (right children index, right children node)\n \"\"\"\n\n l = left_child_index(i) # index and value of children\n l_val = a[l] if 0 <= l < len(a) else None\n r = right_child_index(i)\n r_val = a[r] if 0 <= r < len(a) else None\n return (l, l_val), (r, r_val)\n\n\ndef find_largest_in_family(a, i):\n \"\"\"\n :param a: list\n List organized as heap, i.e for each node i:\n left children is at position 2(i + 1) - 1\n right children is at position 2(i + 1))\n :param i: int\n Index of parent node in array (that is organized as heap)\n :return: index of largest value among parent and children\n \"\"\"\n\n (l, l_val), (r, r_val) = get_children_of_node(a, i)\n largest_i = i # find index of largest value among a[i] and its children\n if l_val is not None and l_val > a[i]:\n largest_i = l\n\n if r_val is not None and r_val > a[largest_i]:\n largest_i = r\n\n return largest_i\n\n\ndef max_heapify(a, i):\n \"\"\"\n :param a: list\n List organized as heap, i.e for each node i:\n left children is at position 2(i + 1) - 1\n right children is at position 2(i + 1))\n :param i: int\n Index of array where to start checking for max-heap condition from\n :return: list\n List organized as max-heap.\n \"\"\"\n\n largest_i = find_largest_in_family(a, i) # find largest node\n if largest_i != i: # swap only if it's needed\n a[largest_i], a[i] = a[i], a[largest_i] # swap\n return max_heapify(a, largest_i)\n\n return a\n\n\ndef heap_sort(a):\n \"\"\"\n :param a: list\n List of objects with an order\n :return: list\n Sorted list with heap sort algorithm\n \"\"\"\n\n for i in range(len(a) // 2, 0, -1):\n a = max_heapify(a, i) # create max-heap\n\n for i in range(len(a) // 2, 0, -1):\n a[0], a[i] = a[i], a[0] # swap first and last item\n a = max_heapify(a, i)\n\n return a\n\n\ndef main():\n \"\"\"\n :return: void\n Sorts sample list with heap sort algorithm,\n then prints sorted list\n \"\"\"\n\n unsorted_list = [\n 437230, 851821, 184681, 173673, 13306, 768361, 431982, 956700, 65143,\n 556681, 198208, 983511, 170469, 313978, 552536, 334818, 527289,\n 959491, 303675, 532988\n ]\n\n sorted_list = heap_sort(unsorted_list)\n print(sorted_list)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"iiitv/algos","sub_path":"heap_sort/heap_sort.py","file_name":"heap_sort.py","file_ext":"py","file_size_in_byte":3357,"program_lang":"python","lang":"en","doc_type":"code","stars":1064,"dataset":"github-code","pt":"18"} +{"seq_id":"75087016041","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('eguide', '0003_institution_quota'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='country',\n name='cedaw_ratified',\n field=models.CharField(help_text=b'Leave blank is the answer is NO. e.g. Yes (since 24 July 1985)', max_length=250, null=True, verbose_name=b'CIDAW ratified', blank=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='country',\n name='cedaw_signatory',\n field=models.CharField(help_text=b'Leave blank is the answer is NO. e.g. Yes (since 24 July 1980)', max_length=250, null=True, verbose_name=b'CIDAW signatory', blank=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='country',\n name='hdi_position',\n field=models.IntegerField(default=0, verbose_name=b'Human Development Index (HDI) Position'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='country',\n name='sigi',\n field=models.CharField(help_text=b'22nd out of 86 non-OECD countries (latest rankings are from 2012)', max_length=250, null=True, verbose_name=b'Social Institutions and Gender Index', blank=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='election',\n name='female_elected',\n field=models.IntegerField(default=0, help_text=b'Only enter a number of female elected officials such as parliament members, presidents, members of the assembly, etc.', verbose_name=b'Number of female elected leaders'),\n preserve_default=True,\n ),\n ]\n","repo_name":"acprsadmin/electionguide-testing","sub_path":"eguide/migrations/0004_auto_20150302_0906.py","file_name":"0004_auto_20150302_0906.py","file_ext":"py","file_size_in_byte":1873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"73531574121","text":"import ops\nimport iopc\n\nTARBALL_FILE=\"qemu-2.12.0.tar.xz\"\nTARBALL_DIR=\"qemu-2.12.0\"\nINSTALL_DIR=\"qemu-bin\"\npkg_path = \"\"\noutput_dir = \"\"\ntarball_pkg = \"\"\ntarball_dir = \"\"\ninstall_dir = \"\"\ninstall_tmp_dir = \"\"\ncc_host = \"\"\ntmp_include_dir = \"\"\ndst_include_dir = \"\"\ndst_lib_dir = \"\"\ndst_usr_local_lib_dir = \"\"\n\ndef set_global(args):\n global pkg_path\n global output_dir\n global tarball_pkg\n global install_dir\n global install_tmp_dir\n global tarball_dir\n global cc_host\n global tmp_include_dir\n global dst_include_dir\n global dst_lib_dir\n global dst_usr_local_lib_dir\n global dst_usr_local_libexec_dir\n global dst_usr_local_share_dir\n global src_pkgconfig_dir\n global dst_pkgconfig_dir\n global dst_bin_dir\n global dst_etc_dir\n global install_test_utils\n pkg_path = args[\"pkg_path\"]\n output_dir = args[\"output_path\"]\n tarball_pkg = ops.path_join(pkg_path, TARBALL_FILE)\n install_dir = ops.path_join(output_dir, INSTALL_DIR)\n install_tmp_dir = ops.path_join(output_dir, INSTALL_DIR + \"-tmp\")\n tarball_dir = ops.path_join(output_dir, TARBALL_DIR)\n cc_host_str = ops.getEnv(\"CROSS_COMPILE\")\n cc_host = cc_host_str[:len(cc_host_str) - 1]\n tmp_include_dir = ops.path_join(output_dir, ops.path_join(\"include\",args[\"pkg_name\"]))\n dst_include_dir = ops.path_join(\"include\",args[\"pkg_name\"])\n dst_lib_dir = ops.path_join(install_dir, \"lib\")\n dst_bin_dir = ops.path_join(install_dir, \"bin\")\n dst_etc_dir = ops.path_join(install_dir, \"etc\")\n dst_usr_local_lib_dir = ops.path_join(install_dir, \"usr/local/lib\")\n dst_usr_local_libexec_dir = ops.path_join(install_dir, \"usr/local/libexec\")\n dst_usr_local_share_dir = ops.path_join(install_dir, \"usr/local/share\")\n src_pkgconfig_dir = ops.path_join(pkg_path, \"pkgconfig\")\n dst_pkgconfig_dir = ops.path_join(install_dir, \"pkgconfig\")\n if ops.getEnv(\"INSTALL_TEST_UTILS\") == 'y':\n install_test_utils = True\n else:\n install_test_utils = False\n\n\ndef MAIN_ENV(args):\n set_global(args)\n\n ops.exportEnv(ops.setEnv(\"CC\", ops.getEnv(\"CROSS_COMPILE\") + \"gcc\"))\n ops.exportEnv(ops.setEnv(\"CXX\", ops.getEnv(\"CROSS_COMPILE\") + \"g++\"))\n ops.exportEnv(ops.setEnv(\"CROSS\", ops.getEnv(\"CROSS_COMPILE\")))\n ops.exportEnv(ops.setEnv(\"DESTDIR\", install_tmp_dir))\n\n return False\n\ndef MAIN_EXTRACT(args):\n set_global(args)\n\n ops.unTarXz(tarball_pkg, output_dir)\n ops.copyto(ops.path_join(pkg_path, \"qemu-ifup\"), output_dir)\n\n return True\n\ndef MAIN_PATCH(args, patch_group_name):\n set_global(args)\n for patch in iopc.get_patch_list(pkg_path, patch_group_name):\n if iopc.apply_patch(tarball_dir, patch):\n continue\n else:\n sys.exit(1)\n\n return True\n\ndef MAIN_CONFIGURE(args):\n set_global(args)\n\n extra_conf = []\n extra_conf.append(\"--target-list=x86_64-softmmu,x86_64-linux-user\")\n\n cflags = iopc.get_includes()\n libs = iopc.get_libs()\n \n extra_conf.append(\"--extra-cflags=\" + cflags)\n extra_conf.append(\"--extra-ldflags=\" + libs)\n #extra_conf.append(\"--enable-sdl\")\n\n iopc.configure(tarball_dir, extra_conf)\n\n return True\n\ndef MAIN_BUILD(args):\n set_global(args)\n\n ops.mkdir(install_dir)\n ops.mkdir(install_tmp_dir)\n iopc.make(tarball_dir)\n iopc.make_install(tarball_dir)\n\n ops.mkdir(install_dir)\n ops.mkdir(dst_lib_dir)\n ops.mkdir(dst_bin_dir)\n ops.mkdir(dst_usr_local_lib_dir)\n\n ops.copyto(ops.path_join(install_tmp_dir, \"usr/local/bin/ivshmem-client\"), dst_bin_dir)\n ops.copyto(ops.path_join(install_tmp_dir, \"usr/local/bin/ivshmem-server\"), dst_bin_dir)\n ops.copyto(ops.path_join(install_tmp_dir, \"usr/local/bin/qemu-ga\"), dst_bin_dir)\n ops.copyto(ops.path_join(install_tmp_dir, \"usr/local/bin/qemu-img\"), dst_bin_dir)\n ops.copyto(ops.path_join(install_tmp_dir, \"usr/local/bin/qemu-io\"), dst_bin_dir)\n ops.copyto(ops.path_join(install_tmp_dir, \"usr/local/bin/qemu-nbd\"), dst_bin_dir)\n ops.copyto(ops.path_join(install_tmp_dir, \"usr/local/bin/qemu-pr-helper\"), dst_bin_dir)\n ops.copyto(ops.path_join(install_tmp_dir, \"usr/local/bin/qemu-system-x86_64\"), dst_bin_dir)\n if install_test_utils:\n ops.copyto(ops.path_join(install_tmp_dir, \"usr/local/bin/qemu-x86_64\"), dst_bin_dir)\n\n ops.mkdir(dst_usr_local_libexec_dir)\n ops.copyto(ops.path_join(install_tmp_dir, \"usr/local/libexec/qemu-bridge-helper\"), dst_usr_local_libexec_dir)\n\n ops.mkdir(dst_usr_local_share_dir)\n ops.copyto(ops.path_join(install_tmp_dir, \"usr/local/share/qemu\"), dst_usr_local_share_dir)\n\n ops.mkdir(dst_etc_dir)\n ops.copyto(ops.path_join(output_dir, \"qemu-ifup\"), dst_etc_dir)\n #ops.mkdir(tmp_include_dir)\n #ops.copyto(ops.path_join(install_tmp_dir, \"usr/local/include/.\"), tmp_include_dir)\n\n #ops.mkdir(dst_pkgconfig_dir)\n #ops.copyto(ops.path_join(src_pkgconfig_dir, '.'), dst_pkgconfig_dir)\n\n return True\n\ndef MAIN_INSTALL(args):\n set_global(args)\n\n iopc.installBin(args[\"pkg_name\"], ops.path_join(ops.path_join(install_dir, \"lib\"), \".\"), \"lib\")\n iopc.installBin(args[\"pkg_name\"], ops.path_join(dst_bin_dir, \".\"), \"bin\")\n iopc.installBin(args[\"pkg_name\"], ops.path_join(dst_usr_local_lib_dir, \".\"), \"usr/local/lib\")\n iopc.installBin(args[\"pkg_name\"], ops.path_join(dst_usr_local_libexec_dir, \".\"), \"usr/local/libexec\")\n iopc.installBin(args[\"pkg_name\"], ops.path_join(dst_usr_local_share_dir, \".\"), \"usr/local/share\")\n iopc.installBin(args[\"pkg_name\"], ops.path_join(dst_etc_dir, \".\"), \"etc\")\n #iopc.installBin(args[\"pkg_name\"], ops.path_join(tmp_include_dir, \".\"), dst_include_dir)\n #iopc.installBin(args[\"pkg_name\"], ops.path_join(dst_pkgconfig_dir, '.'), \"pkgconfig\")\n\n return False\n\ndef MAIN_SDKENV(args):\n set_global(args)\n\n return False\n\ndef MAIN_CLEAN_BUILD(args):\n set_global(args)\n\n return False\n\ndef MAIN(args):\n set_global(args)\n\n","repo_name":"YuanYuLin/qemu","sub_path":"Package/CONFIG.py","file_name":"CONFIG.py","file_ext":"py","file_size_in_byte":5898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"26056717624","text":"import pandas as pd\r\nimport bs4 as bs\r\nimport urllib.request\r\nimport datetime\r\nimport time\r\n\r\n'''\r\n==================\r\nADD OFSTED COLUMNS\r\n==================\r\n'''\r\n\r\n\r\ndef get_school_url(url):\r\n global soup\r\n\r\n url_exist = True\r\n\r\n try:\r\n source = urllib.request.urlopen(url).read()\r\n except urllib.request.HTTPError as err:\r\n url_exist = False\r\n print('HTTP Error:', err.code)\r\n return url_exist\r\n\r\n soup = bs.BeautifulSoup(source, 'lxml')\r\n\r\n return url_exist\r\n\r\n\r\ndef get_rating():\r\n global soup\r\n\r\n inspect_date = ''\r\n rating = ''\r\n\r\n school_details = soup.find(class_='timeline')\r\n\r\n for s in school_details.find_all(['time', 'strong']):\r\n if s.name == 'time':\r\n inspect_date = str(datetime.datetime.strptime(s.text, '%d %B %Y'))[:10].replace('-', '')\r\n elif s.name == 'strong':\r\n rating = s.text\r\n break\r\n\r\n if rating == '':\r\n inspect_date = ''\r\n\r\n return inspect_date, rating\r\n\r\n\r\ndef main():\r\n start_time = time.time()\r\n\r\n for f in ['england_ks2final.csv', 'england_ks4final.csv', 'england_ks5final.csv']:\r\n df = pd.read_csv(f, engine='python')\r\n\r\n tot_rows = df.shape[0]\r\n ofsted_rating = []\r\n inspect_date = []\r\n\r\n for n, r in enumerate(range(0, len(df)), start=1):\r\n elapsed_time = time.time() - start_time\r\n print('-'*80)\r\n print(datetime.timedelta(seconds=elapsed_time), \":\", f, '[', n, '/', tot_rows, ']')\r\n\r\n urn = df['URN'][r]\r\n sch = df['SCHNAME'][r]\r\n\r\n ofsted_url = 'http://www.ofsted.gov.uk/oxedu_providers/full/(urn)/' + str(urn).split('.')[0]\r\n\r\n print(sch, ofsted_url)\r\n\r\n url_found = get_school_url(ofsted_url)\r\n if not url_found:\r\n ofsted_rating.append('')\r\n inspect_date.append('')\r\n print('*** Not Found ***')\r\n else:\r\n insp_date, rating = get_rating()\r\n ofsted_rating.append(rating)\r\n inspect_date.append(insp_date)\r\n print(urn, insp_date, rating)\r\n\r\n df['OFSTEDRATING'] = ofsted_rating\r\n df['INSPECTIONDT'] = inspect_date\r\n df.to_csv(f, index=False, encoding='utf-8')\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"waiky8/eng-schools","sub_path":"step2_get_ofsted_rating.py","file_name":"step2_get_ofsted_rating.py","file_ext":"py","file_size_in_byte":2340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"1215743155","text":"import pygame\nfrom imagenes import BTPLAY\nfrom imagenes import BTSETT\nfrom imagenes import BTQUIT\nfrom imagenes import SALIDA\nfrom imagenes import ROCK\nfrom imagenes import SISSORS\nfrom imagenes import PAPER\nfrom imagenes import LIZZARD\nfrom imagenes import SPOK\nfrom settings import SIZEBOTONP\nfrom settings import SIZEBOTONS\nfrom settings import SIZEBOTONQ\nfrom settings import SIZEBOTONSA\nfrom settings import SIZEBOTONRO\nfrom settings import SIZEBOTONSI\nfrom settings import SIZEBOTONPA\nfrom settings import SIZEBOTONLI\nfrom settings import SIZEBOTONSP\n\n\nclass Boton(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n \n\nclass PlayBoton(Boton):\n def __init__(self, botonplay, pos_x, pos_y):\n super().__init__()\n self.surface = pygame.Surface(SIZEBOTONP)\n self.rect = self.surface.get_rect()\n self.botonplay = botonplay\n self.pos_x = pos_x\n self.pos_y = pos_y\n \n\n def draw(self, surface):\n self.rect.x = self.pos_x\n self.rect.y = self.pos_y\n surface.blit(BTPLAY, self.rect)\n\nclass SettBoton(Boton):\n def __init__(self, botonplay, pos_x, pos_y):\n super().__init__()\n self.surface = pygame.Surface(SIZEBOTONS)\n self.rect = self.surface.get_rect()\n self.botonplay = botonplay\n self.pos_x = pos_x\n self.pos_y = pos_y\n \n\n def draw(self, surface):\n self.rect.x = self.pos_x\n self.rect.y = self.pos_y\n surface.blit(BTSETT, self.rect)\n\n\nclass QuitBoton(Boton):\n \n def __init__(self, botonquit, pos_x, pos_y):\n super().__init__()\n self.surface = pygame.Surface(SIZEBOTONQ)\n self.rect = self.surface.get_rect()\n self.botonquit = botonquit\n self.pos_x = pos_x\n self.pos_y = pos_y\n \n\n def draw(self, surface):\n self.rect.x = self.pos_x\n self.rect.y = self.pos_y\n surface.blit(BTQUIT, self.rect)\n\nclass SalidaBoton(Boton):\n def __init__(self, botonquit, pos_x, pos_y):\n super().__init__()\n self.surface = pygame.Surface(SIZEBOTONSA)\n self.rect = self.surface.get_rect()\n self.botonquit = botonquit\n self.pos_x = pos_x\n self.pos_y = pos_y\n \n\n def draw(self, surface):\n self.rect.x = self.pos_x\n self.rect.y = self.pos_y\n surface.blit(SALIDA, self.rect)\n\nclass RockBoton(Boton):\n def __init__(self, botonrock, pos_x, pos_y):\n super().__init__()\n self.surface = pygame.Surface(SIZEBOTONRO)\n self.rect = self.surface.get_rect()\n self.botonrock = botonrock\n self.pos_x = pos_x\n self.pos_y = pos_y\n \n\n def draw(self, surface):\n self.rect.x = self.pos_x\n self.rect.y = self.pos_y\n surface.blit(ROCK, self.rect)\n\nclass SissorsBoton(Boton):\n def __init__(self, botonsissors, pos_x, pos_y):\n super().__init__()\n self.surface = pygame.Surface(SIZEBOTONSI)\n self.rect = self.surface.get_rect()\n self.botonsissors = botonsissors\n self.pos_x = pos_x\n self.pos_y = pos_y\n \n\n def draw(self, surface):\n self.rect.x = self.pos_x\n self.rect.y = self.pos_y\n surface.blit(SISSORS, self.rect)\n\nclass PaperBoton(Boton):\n def __init__(self, botonpaper, pos_x, pos_y):\n super().__init__()\n self.surface = pygame.Surface(SIZEBOTONPA)\n self.rect = self.surface.get_rect()\n self.botonpaper = botonpaper\n self.pos_x = pos_x\n self.pos_y = pos_y\n \n\n def draw(self, surface):\n self.rect.x = self.pos_x\n self.rect.y = self.pos_y\n surface.blit(PAPER, self.rect)\n\nclass LizzardBoton(Boton):\n def __init__(self, botonlizard, pos_x, pos_y):\n super().__init__()\n self.surface = pygame.Surface(SIZEBOTONLI)\n self.rect = self.surface.get_rect()\n self.botonlizard = botonlizard\n self.pos_x = pos_x\n self.pos_y = pos_y\n \n\n def draw(self, surface):\n self.rect.x = self.pos_x\n self.rect.y = self.pos_y\n surface.blit(LIZZARD, self.rect)\n\nclass SpokBoton(Boton):\n def __init__(self, botonspok, pos_x, pos_y):\n super().__init__()\n self.surface = pygame.Surface(SIZEBOTONSP)\n self.rect = self.surface.get_rect()\n self.botonspok = botonspok\n self.pos_x = pos_x\n self.pos_y = pos_y\n \n\n def draw(self, surface):\n self.rect.x = self.pos_x\n self.rect.y = self.pos_y\n surface.blit(SPOK, self.rect)","repo_name":"Pedroff83/ProyectoBtcamp","sub_path":"botones.py","file_name":"botones.py","file_ext":"py","file_size_in_byte":4567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"8327037918","text":"import random\n#this method read the file, and return a two dimension array\ndef readMatrix(filename):\n txt=open(filename,'rw+')\n line=txt.readline()\n count=0\n matrix=[]\n while len(line)>0:\n attrs=line.strip(\"\\n\").split('\\t')\n matrix.append(attrs)\n count+=1\n line=txt.readline()\n txt.close()\n return matrix\n\n\n#this method get the traning and testing dataset from 'rec_log_train.txt'\ndef splitTrain_Test (filename):\n txt = open(filename, 'rw+')\n testing = open('../data/testing.txt', 'w')\n training = open('../data/training.txt', 'w')\n line = txt.readline()\n count = 0\n train_matrix = []\n test_matrix = []\n while len(line) > 0 and count<100:\n #attrs=line.strip(\"\\n\").split('\\t')\n if(random.random()>=0.7):\n testing.write(line)\n #test_matrix.append(attrs)\n else:\n training.write(line)\n #train_matrix.append(attrs)\n count += 1\n line = txt.readline()\n txt.close()\n #return [train_matrix, test_matrix]","repo_name":"YoufuLi/twbpredict","sub_path":"twbpredict/file_operation.py","file_name":"file_operation.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"74408666279","text":"# motionClassifierTest.py\n# Simple classifier to differentiate between forward push (Y) and upward lift (Z)\n\nimport sys\nimport time\nimport math\nimport IMU\nimport datetime\nimport os\nimport numpy as np\nimport csv\n\nRAD_TO_DEG = 57.29578\nM_PI = 3.14159265358979323846\nG_GAIN = 0.070\t\t # [deg/s/LSB] If you change the dps for gyro, you need to update this value accordingly\nAA = 0.40\t\t\t # Complementary filter constant\nMAG_LPF_FACTOR = 0.4\t# Low pass filter constant magnetometer\nACC_LPF_FACTOR = 0.4\t# Low pass filter constant for accelerometer\nACC_MEDIANTABLESIZE = 9\t\t # Median filter table size for accelerometer. Higher = smoother but a longer delay\nMAG_MEDIANTABLESIZE = 9\t\t # Median filter table size for magnetometer. Higher = smoother but a longer delay\n\n\n# Compass Calibration offset\n# Values found by running calibrateBerryIMU.py\nmagXmin = -22661\nmagYmin = 32736\nmagZmin = 3873\nmagXmax = -22584\nmagYmax = 32767\nmagZmax = 4260\n\n#Kalman filter variables\nQ_angle = 0.02\nQ_gyro = 0.0015\nR_angle = 0.005\ny_bias = 0.0\nx_bias = 0.0\nXP_00 = 0.0\nXP_01 = 0.0\nXP_10 = 0.0\nXP_11 = 0.0\nYP_00 = 0.0\nYP_01 = 0.0\nYP_10 = 0.0\nYP_11 = 0.0\nKFangleX = 0.0\nKFangleY = 0.0\n\n\na = datetime.datetime.now()\n\nIMU.detectIMU()\t #Detect if BerryIMU is connected.\nif(IMU.BerryIMUversion == 99):\n\tprint(\" No BerryIMU found... exiting \")\n\tsys.exit()\nIMU.initIMU()\t #Initialise the accelerometer, gyroscope and compass\n\n\naxs = []\nays = []\naxz = []\ngxs = []\ngys = []\ngzs = []\n\nlist_limit = 30\n\nt = 0\n# one minute: 1200\nwhile t < 600:\n\n\t#Read the accelerometer,gyroscope and magnetometer values\n\tACCx = IMU.readACCx()\n\tACCy = IMU.readACCy()\n\tACCz = IMU.readACCz()\n\tGYRx = IMU.readGYRx()\n\tGYRy = IMU.readGYRy()\n\tGYRz = IMU.readGYRz()\n\tMAGx = IMU.readMAGx()\n\tMAGy = IMU.readMAGy()\n\tMAGz = IMU.readMAGz()\n\n\taxs.append(ACCx)\n\tays.append(ACCy)\n\taxz.append(ACCz)\n\tgxs.append(GYRx)\n\tgys.append(GYRy)\n\tgzs.append(GYRz)\n\n\t\n\n\tif 0:\t\t\t\t\t #Change to '0' to stop showing the angles from the accelerometer\n\t\toutputString += \"# ACCX Angle %5.2f ACCY Angle %5.2f # \" % (ACCx, ACCy)\n\n\tif 0:\t\t\t\t\t #Change to '0' to stop showing the angles from the gyro\n\t\toutputString +=\"\\t# GRYX Angle %5.2f GYRY Angle %5.2f GYRZ Angle %5.2f # \" % (GYRx,GYRy,GYRz)\n\n\t#print(outputString)\n\n\t#slow program down a bit, makes the output more readable\n\ttime.sleep(0.05)\n\tt += 1\n\nprint(\"Data collected, writing to data.csv\")\ndata = np.matrix((axs,ays,axz,gxs,gys,gzs)).T\nnp.savetxt('data.csv', data, delimiter=',')\nprint(\"File saved\")\n# Sample length is 0.05*10000 seconds, 500 seconds","repo_name":"icd107/180DA-WarmUp","sub_path":"rawIMUVals.py","file_name":"rawIMUVals.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"19835035458","text":"import networkx as nx\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras import layers, Model\n\n# Load data\nG = nx.karate_club_graph()\nadj_matrix = nx.adjacency_matrix(G).toarray().astype(np.float32)\nfeatures = np.eye(adj_matrix.shape[0]).astype(np.float32)\nlabels = np.array([0 if G.nodes[i]['club']=='Mr. Hi' else 1 for i in range(G.number_of_nodes())]).astype(np.float32)\n\n# Preprocess data\nlabels = tf.keras.utils.to_categorical(labels)\n\n# Define GCN model\nclass GCN(Model):\n def __init__(self, input_dim, hidden_dim, output_dim):\n super(GCN, self).__init__()\n self.dense1 = layers.Dense(hidden_dim, activation='relu')\n self.dense2 = layers.Dense(output_dim, activation='softmax')\n self.dropout = layers.Dropout(0.5)\n\n def call(self, inputs):\n x, A = inputs\n x = self.dropout(x)\n x = self.dense1(tf.matmul(A, x))\n x = self.dropout(x)\n x = self.dense2(tf.matmul(A, x))\n return x\n\n# Define training loop\ndef train(model, x_train, y_train, epochs):\n optimizer = tf.keras.optimizers.Adam()\n loss_fn = tf.keras.losses.CategoricalCrossentropy()\n\n for epoch in range(epochs):\n with tf.GradientTape() as tape:\n logits = model(x_train)\n loss_value = loss_fn(y_train, logits)\n \n gradients = tape.gradient(loss_value, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n train_acc = np.mean(np.argmax(logits, axis=1) == np.argmax(y_train, axis=1))\n \n print(f\"Epoch {epoch+1}, loss: {loss_value:.4f}, train accuracy: {train_acc:.4f}\")\n\n# Initialize and train model\ninput_dim = features.shape[1]\nhidden_dim = 16\noutput_dim = labels.shape[1]\nmodel = GCN(input_dim, hidden_dim, output_dim)\ntrain(model, (features, adj_matrix), labels, epochs=200)\n","repo_name":"chirag2796/Pastry-Metis-Standalone","sub_path":"cora_partition/FederatedML/IndividualScripts/Karate/gnn.py","file_name":"gnn.py","file_ext":"py","file_size_in_byte":1850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"25465038676","text":"from mygrad.tensor_creation.funcs import full\n\n\ndef constant(*shape, value=0.0, dtype=None, constant=None):\n \"\"\"Initialize a :class:`mygrad.Tensor` of shape `shape` with a constant value.\n\n This function is a thin wrapper around ``mygrad.full``.\n\n Parameters\n ----------\n shape : Sequence[int]\n The output shape.\n\n value : Real, optional (default=0)\n The value with which to fill the tensor.\n\n dtype : data-type, optional (default=None)\n The data type of the output tensor, or None to match ``value``.\n\n constant : bool, optional (default=False)\n If ``True``, the returned tensor is a constant (it\n does not back-propagate a gradient).\n\n Returns\n -------\n Tensor\n A Tensor of ``value`` with the given shape and dtype.\n\n Examples\n ----------\n >>> import mygrad as mg\n >>> mg.nnet.initializers.constant(2, 3, value=1)\n Tensor([[1, 1, 1],\n [1, 1, 1]])\n\n >>> mg.nnet.initializers.constant((3, 3), value=7.1)\n Tensor([[7.1, 7.1, 7.1],\n [7.1, 7.1, 7.1],\n [7.1, 7.1, 7.1]])\n\n >>> mg.nnet.initializers.constant(4)\n Tensor([0., 0., 0., 0.])\n \"\"\"\n if len(shape) == 1:\n shape = shape[0]\n\n return full(shape, value, dtype=dtype, constant=constant)\n","repo_name":"rsokl/MyGrad","sub_path":"src/mygrad/nnet/initializers/constant.py","file_name":"constant.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","stars":181,"dataset":"github-code","pt":"18"} +{"seq_id":"40254615635","text":"#-*-coding=utf8-*-\n#coding = utf8\n#pymongo api: http://api.mongodb.org/python/current/tutorial.html\n\nimport StringIO\nimport glob\nimport os\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\ndef stats_all_file(dirname):\n stats = {}\n filelist = glob.glob(dirname + '*.txt')\n for filename in filelist:\n with open(filename) as f:\n for line in f:\n terms = line.split('\\t')\n terms = [term.strip() for term in terms]\n if len(terms) != 2:\n continue\n warc_file = terms[0][terms[0].find('.')+1:]\n #in windows\n warc_file = warc_file.replace('/','\\\\')\n count = int(terms[1])\n stats[warc_file] = count\n return stats\n\ndef get_doc_num(process_filename, dirname):\n stats = stats_all_file(dirname) \n for key,value in stats.items():\n if process_filename.endswith(key):\n return value\n return 0\n\nif __name__ == '__main__':\n dir = 'D:\\\\users\\\\yueming_shuo\\\\record_count\\\\'\n stats_all_file(dir)\n","repo_name":"RominYue/SearchEngineDemo","sub_path":"warc_to_mongo/test_count.py","file_name":"test_count.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"43160254202","text":"from typing import List\nfrom copy import deepcopy\n\nwith open('day3_input.txt') as f:\n input_data = [int(input.strip()) for input in f]\n\n#### PART 1 ####\ndef calc_gamma(t: List, bit_len: int) -> int:\n gamma = 0\n for i in range(0,bit_len):\n # Get each bit in the given pos\n bits = list(map(lambda n: 1 if (n >> i) & 0x1 == 1 else 0, t))\n gamma |= (1 << i) if bits.count(1) > bits.count(0) else 0\n return gamma\n\ndef calc_epsilon(gamma: int, bit_len: int) -> int:\n return ~gamma + 2**bit_len\n\ngamma = calc_gamma(input_data, 12)\nepsilon = calc_epsilon(gamma, 12)\nprint(gamma * epsilon)\n\n#### PART 2 ####\ndef calc_oxy(t: List, start: int) -> list:\n # Get each bit in the given pos\n bits = list(map(lambda n: 1 if (n >> start) & 0x1 == 1 else 0, t))\n\n # if there are more 1's keep them, otherwise keep the 0s\n to_keep = 1 if bits.count(1) >= bits.count(0) else 0\n filtered_list = [item for item in t if ((item >> start) & 0x1) == to_keep]\n return filtered_list\n\ndef calc_c02(t: List, start: int) -> list:\n # Get each bit in the given pos\n bits = list(map(lambda n: 1 if (n >> start) & 0x1 == 1 else 0, t))\n # if there are more 1's keep the 0s, otherwise keep the 1s\n to_keep = 0 if bits.count(1) >= bits.count(0) else 1\n filtered_list = [item for item in t if ((item >> start) & 0x1) == to_keep]\n return filtered_list\n\noxygen_list = deepcopy(input_data)\nco2_list = deepcopy(input_data)\n\nfor i in range(11,-1,-1):\n if len(oxygen_list) != 1:\n oxygen_list = calc_oxy(oxygen_list,i)\n if len(co2_list) != 1:\n co2_list = calc_c02(co2_list, i)\n\nprint(f'{oxygen_list[0]} * {co2_list[0]} = {oxygen_list[0] * co2_list[0]}')\n\n\n\n\n","repo_name":"agueo/AOC_solutions","sub_path":"2021/day3/day3.py","file_name":"day3.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"37721889269","text":"import json\nfrom PIL import Image\n\nAPPNAME = \"ben_skyblock_collection_stream\"\nAUTHOR = \"io.github.aws1313\"\nCACHE_DIR = \"/data/cache\"\nDATA_DIR = \"/data/data\"\ndefault_img = Image.new(\"RGBA\", (750,150))\n\n\ndef save_to_json(filename, data):\n with open(filename, 'w') as file:\n json.dump(data, file, indent=4)\n\n\ndef read_from_json(filename):\n with open(filename) as f:\n return json.loads(f.read())\n","repo_name":"aws1313/ben_skyblock_collection_stream","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"16063216991","text":"import MeCab\nfrom dai4syou import No30\nfrom collections import Counter\n\nm = No30.mapping_MeCab(\"neko.txt.mecab\")\n\nnouns = []\nnoun = []\nc = Counter()\n\nfor morphemes in m:\n for morpheme in morphemes:\n c.update(morpheme[\"surface\"])\n\nprint(c)","repo_name":"take9999/knock100","sub_path":"dai4syou/No35.py","file_name":"No35.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"17857984987","text":"def bullengulfing(open,close,high,low,time):\n if len(open)<2:\n pass\n elif len(open)>5:\n if close[1]0:\n return time[0]\n else:\n return 0\n elif close[1] dict:\n \"\"\"\n Parse k=v string into an object\n\n >>> key_value_to_obj(b'foo=bar&baz=qux&zap=zazzle')\n {b'foo': b'bar', b'baz': b'qux', b'zap': b'zazzle'}\n \"\"\"\n obj = {}\n for pair in key_value_str.split(b'&'):\n [key, value] = pair.split(b'=')\n obj[key] = value\n return obj\n\ndef profile_for(email: bytes, uid: int = 10) -> bytes:\n \"\"\"\n Generate user profile information with uid and role given an email. Eat the metacharacters &, =. \n\n >>> profile_for(b'foo@bar.com')\n b'email=foo@bar.com&uid=10&role=user'\n >>> profile_for(b'foo@bar.com&role=admin')\n b'email=foo@bar.comroleadmin&uid=10&role=user'\n \"\"\"\n email = email.replace(b'&', b'')\n email = email.replace(b'=', b'')\n encoded_profile = f'email={email.decode()}&uid={uid}&role=user'.encode()\n return encoded_profile\n\ndef encrypt_user_profile(encoded_profile: bytes, key: bytes = None) -> typing.Tuple[bytes, bytes]:\n \"\"\"\n Encrypts user profile using AES-128-ECB with a randomly generated key\n \"\"\"\n if key is None:\n key = random_aes_key()\n cipher = AES.new(key, AES.MODE_ECB)\n plaintext = pkcs7_padding(encoded_profile, len(encoded_profile) + 16 - len(encoded_profile) % 16)\n return key, cipher.encrypt(plaintext)\n\ndef decrypt_user_profile(key: bytes, ciphertext: bytes) -> bytes:\n \"\"\"\n Decrypts user profile encrypted in AES-128-ECB. Converts resulting text to a user profile object. \n \"\"\"\n cipher = AES.new(key, AES.MODE_ECB)\n return key_value_to_obj(cipher.decrypt(ciphertext))\n\ndef get_admin_profile() -> dict:\n \"\"\"\n Creates an admin profile using only the profile_for function.\n \"\"\"\n # get the 'email=...uid=...' ciphertext blocks\n email1 = b'f' + b'o' * 4 + b'@bar.com'\n key, ciphertext1 = encrypt_user_profile(profile_for(email1))\n\n # get the 'admin\\x04\\x04...' ciphertext block\n email2 = b'a' * 9 + b'@admin' + b'\\x04' * 11\n key, ciphertext2 = encrypt_user_profile(profile_for(email2), key)\n\n # concatenate and decrypt\n admin_profile = decrypt_user_profile(key, ciphertext1[:32] + ciphertext2[16:32])\n\n # strip padding\n admin_profile[b'role'] = admin_profile[b'role'][:5]\n return admin_profile\n\ndef main() -> None:\n doctest.testmod()\n admin_profile = get_admin_profile()\n assert(admin_profile[b'role'] == b'admin')\n\nif __name__ == \"__main__\":\n main()","repo_name":"benjaminwu16/cryptopals-solutions","sub_path":"set2/s2c13.py","file_name":"s2c13.py","file_ext":"py","file_size_in_byte":2801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"32654122786","text":"from __future__ import annotations\nfrom typing import List\nimport math\n\n\nclass Complex:\n def __init__(self, real: float = 0, img: float = 0) -> None:\n self.real = real\n self.img = img\n\n @property\n def real(self) -> int | float:\n return self._real\n\n @real.setter\n def real(self, value) -> None:\n if abs(value - int(value)) < 1e-5:\n if value <= 0:\n value = math.ceil(value)\n else:\n value = math.floor(value)\n self._real = value\n\n @property\n def img(self) -> int | float:\n return self._img\n\n @img.setter\n def img(self, value) -> None:\n if abs(value - int(value)) < 1e-5:\n if value <= 0:\n value = math.ceil(value)\n else:\n value = math.floor(value)\n self._img = value\n\n \n @property\n def polar(self) -> List[int | float]:\n radius = (self.real ** 2 + self.img ** 2) ** 0.5\n if radius == abs(self.img): # Means that the real part is 0\n if radius == self.img:\n theta = math.pi / 2\n else:\n theta = 270 * math.pi / 180\n elif radius == abs(self.real):\n if radius == self.real:\n theta = 0\n else:\n theta = math.pi\n else:\n theta = math.atan(self.img / self.real)\n\n print(radius, theta)\n\n return [radius, theta]\n\n def __repr__(self) -> str:\n return f\"Complex(real={self.real}, img={self.img}, polar={self.polar})\"\n\n def __str__(self) -> str:\n if self.img > 0:\n if self.real != 0:\n return f\"{self.real} + {self.img}i\"\n if self.img == 1:\n return \"i\"\n return f\"{self.img}i\"\n if self.img == 0:\n return f\"{self.real}\"\n if self.real == 0:\n if self.img == -1:\n return \"-i\"\n return f\"{self.img}i\"\n return f\"{self.real} - {abs(self.img)}i\"\n\n def __add__(self, other) -> Complex:\n if isinstance(other, Complex):\n return Complex(real=self.real + other.real, img=self.img + other.img)\n elif isinstance(other, (int, float)):\n return Complex(real=self.real + other, img=self.img)\n else:\n raise TypeError(\n \"Addition with complex numbers only supported by int, float, or Complex\"\n )\n\n def __iadd__(self, other) -> Complex:\n if isinstance(other, Complex):\n self.real += other.real\n self.img += other.img\n\n elif isinstance(other, (int, float)):\n self.real += other\n\n else:\n raise TypeError(\n \"Addition with complex numbers only supported by int, float, or Complex\"\n )\n\n return self\n\n def __neg__(self) -> Complex:\n return Complex(real=-self.real, img=-self.img)\n\n def __pos__(self) -> Complex:\n return Complex(real=abs(self.real), img=abs(self.img))\n\n def __rsub__(self, other) -> Complex:\n return -self + other\n\n def __eq__(self, other) -> bool:\n if isinstance(other, Complex):\n return self.real == other.real and self.img == other.img\n return False\n\n def __ne__(self, other) -> bool:\n if isinstance(other, Complex):\n return self.real != other.real or self.img != other.img\n return False\n\n def __sub__(self, other) -> Complex:\n if isinstance(other, Complex):\n return Complex(real=self.real - other.real, img=self.img - other.img)\n elif isinstance(other, (int, float)):\n return Complex(real=self.real - other, img=self.img)\n else:\n raise TypeError(\n \"Subtraction with complex numbers only supported with int, float, or Complex\"\n )\n\n def __isub__(self, other) -> Complex:\n if isinstance(other, Complex):\n self.real -= other.real\n self.img -= other.img\n\n elif isinstance(other, (int, float)):\n self.real -= other\n\n else:\n raise TypeError(\n \"Subtraction with complex numbers only supported with int, float, or Complex\"\n )\n\n return self\n\n def __mul__(self, other) -> Complex:\n if isinstance(other, Complex):\n real = self.real * other.real - self.img * other.img\n img = self.real * other.img + self.img * other.real\n return Complex(real=real, img=img)\n\n elif isinstance(other, (int, float)):\n return Complex(real=other * self.real, img=other * self.img)\n\n else:\n raise TypeError(\n \"Multiplication with complex numbers only supported by int, float, or Complex\"\n )\n\n def __truediv__(self, other) -> Complex:\n \n if isinstance(other, Complex):\n conjugate = other.conjugate()\n divisor = (other * conjugate).real\n numerator = self * conjugate\n numerator.real /= divisor\n numerator.img /= divisor\n return numerator\n\n elif isinstance(other, (int, float)):\n return Complex(real=self.real / other, img=self.img / other)\n\n else:\n raise TypeError(\n \"Division with complex numbers only supported with int, float, or complex\"\n )\n\n def __rtruediv__(self, other):\n conjugate = self.conjugate()\n if isinstance(other, (Complex, int, float)):\n divisor = (self * conjugate).real\n\n numerator = other * conjugate\n numerator.real /= divisor\n numerator.img /= divisor\n\n return numerator\n\n else:\n raise TypeError(\n \"Division with complex numbers only supported with int, float, or complex\"\n )\n\n def __rfloordiv__(self, other) -> Complex:\n result = other / self\n result.real //= 1\n result.img //= 1\n return result\n\n def __floordiv__(self, other):\n result = self / other\n result.real //= 1\n result.img //= 1\n return result\n\n def __pow__(self, power) -> Complex:\n if isinstance(power, Complex):\n if power.img != 0:\n polar_form = self.polar\n complex_log = Complex(real=math.log(polar_form[0]), img=polar_form[1])\n exponent = power * complex_log\n real_part = math.exp(exponent.real)\n imaginary = Complex(\n real=math.cos(exponent.img), img=math.sin(exponent.img)\n )\n return imaginary * real_part\n else:\n power = power.real\n if isinstance(power, (int, float)):\n polar = self.polar\n polar[0] **= power\n polar[1] *= power\n return Complex(\n real=polar[0] * math.cos(polar[1]), img=polar[0] * math.sin(polar[1])\n )\n\n if self.img == 0:\n return self.real ** power\n\n def __rpow__(self, other) -> Complex:\n real_part = pow(other, self.real)\n e = math.log(other) * self.img\n imaginary = Complex(real=math.cos(e), img=math.sin(e)) # Euler's formula\n return imaginary * real_part\n\n def __abs__(self):\n return self.polar[0]\n\n def conjugate(self) -> Complex:\n return Complex(real=self.real, img=-self.img)\n\n __radd__ = __add__\n __rmul__ = __mul__\n","repo_name":"CoderN-P/Custom-Python-Types","sub_path":"complex.py","file_name":"complex.py","file_ext":"py","file_size_in_byte":7480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"35010517125","text":"import numpy as np\n\n\ndef check_board(board):\n for i in range(5):\n if np.all(board[i, :] == -1):\n return True\n if np.all(board[:, i] == -1):\n return True\n return False\n\n\ndef check_last_board(board, boards):\n counter = 0\n winner = False\n for b in boards:\n if b[0, 0] == -2:\n counter += 1\n if counter == len(boards) - 1:\n winner = True\n\n for i in range(5):\n if np.all(board[i, :] == -1):\n if winner:\n return True\n board.fill(-2)\n if np.all(board[:, i] == -1):\n if winner:\n return True\n board.fill(-2)\n return False\n\n\ndef main():\n result = []\n\n boards = []\n with open(\"input.txt\", \"r\") as f:\n nums = np.fromstring(f.readline(), dtype=int, sep=\",\")\n lines = f.readlines()\n line_count = sum(1 for line in lines)\n for i in range(0, line_count, 6):\n boards.append(np.loadtxt(lines[i : i + 6], dtype=int))\n\n # Part 1\n boards_1 = boards.copy()\n loop = False\n for num in nums:\n for board in boards_1:\n board[board == num] = -1\n if check_board(board):\n board_sum = board[board != -1].sum()\n print(\n f\"Winner!\\n{board}\\nnumber: {num}\\nsum: {board_sum}\\nresult: {board_sum * num}\\n\"\n )\n loop = True\n break\n if loop:\n break\n\n result.append(board_sum * num)\n\n # Part 2\n boards_2 = boards.copy()\n loop = False\n for num in nums:\n for board in boards_2:\n board[board == num] = -1\n if check_last_board(board, boards_2):\n board_sum = board[board != -1].sum()\n print(\n f\"Last winner!\\n{board}\\nnumber: {num}\\nsum: {board_sum}\\nresult: {board_sum * num}\\n\"\n )\n loop = True\n break\n if loop:\n break\n\n result.append(board_sum * num)\n\n with open(\"output.txt\", \"w\") as f:\n for i in result:\n f.write(f\"{i}\\n\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"sjerzykiewicz/advent_of_code","sub_path":"2021/Day_04/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"71426996840","text":"data_from = \"kaggle\"\n\nif data_from == \"colab\":\n\n from google.colab import files\n\n files.upload()\n \n import zipfile\n\n def unzip(path):\n\n with zipfile.ZipFile(path,\"r\") as z:\n\n z.extractall('.')\n\n train_zip = \"/content/cactus/data/train\"\n\n unzip(train_zip+\".zip\")\n\n test_zip = \"/content/cactus/data/test\"\n\n unzip(test_zip+\".zip\")\n\n train_dir = \"/content/train\"\n\n test_dir = \"/content/test\"\n\n train_labels_path = \"/content/cactus/data/train.csv\"\n\n data_folder_path = \"/content\"\n\n csv_path = \"/content/cactus/data/sample_submission.csv\" \n\nelif data_from == \"local\":\n\n data_folder_path = \"aerial-cactus-identification\"\n\n train_dir = \"aerial-cactus-identification/train\"\n\n test_dir = \"aerial-cactus-identification/test\"\n\n train_labels_path = \"aerial-cactus-identification/train.csv\"\n\n csv_path = \"aerial-cactus-identification/sample_submission.csv\" \n\nelif data_from == \"kaggle\":\n\n import zipfile\n\n def unzip(path):\n\n with zipfile.ZipFile(path,\"r\") as z:\n\n z.extractall('.')\n\n train_zip = \"../input/aerial-cactus-identification/train\"\n\n unzip(train_zip+\".zip\")\n\n test_zip = \"../input/aerial-cactus-identification/test\"\n\n unzip(test_zip+\".zip\")\n\n train_dir = \"/kaggle/working/train\"\n\n test_dir = \"/kaggle/working/test\"\n\n train_labels_path = \"../input/aerial-cactus-identification/train.csv\"\n\n data_folder_path = \"/kaggle/working\"\n\n csv_path = \"../input/aerial-cactus-identification/sample_submission.csv\" \n\n \nimport os\n\nfrom datetime import datetime\n\n\n\nimport pandas as pd\n\nimport torch\n\nfrom torch import nn\n\nfrom torch.utils.data import Dataset, DataLoader, SubsetRandomSampler\n\nfrom tqdm.notebook import tqdm\n\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom torchvision import transforms, models\n\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\ntorch.manual_seed(100)\n\nnp.random.seed(100)\nclass CactusDataset2(Dataset):\n\n def __init__(self, labels_csv_path, root_dir, transform=None):\n\n \"\"\"\n\n Inputs:\n\n labels_csv_path (string): path to csv containing images' names and labels\n\n root_dir (string): path to directory with images\n\n transform (callable, optional): Optional transform to be applied on images\"\"\"\n\n\n\n df = pd.read_csv(labels_csv_path)\n\n self.labels = df['has_cactus']\n\n # Read data to memory\n\n self.images = []\n\n print(\"Reading images to memory\")\n\n for _, img_name in tqdm(df['id'].items()):\n\n img_path = os.path.join(root_dir, img_name)\n\n image = plt.imread(img_path)\n\n self.images.append(image)\n\n \n\n if transform is None:\n\n self.transform = transforms.Compose([transforms.ToTensor(),\n\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n\n else:\n\n self.transform = transform\n\n\n\n def __len__(self):\n\n return len(self.labels)\n\n\n\n def __getitem__(self, idx):\n\n if torch.is_tensor(idx):\n\n idx = idx.item()\n\n label = self.labels.iloc[idx].astype(np.float32).reshape(-1)\n\n label = torch.tensor(label).long()\n\n image = self.images[idx]\n\n \n\n image = self.transform(image)\n\n return image, label\nresize = 128\n\ntransform = transforms.Compose([transforms.ToPILImage(), transforms.Resize(resize),\n\n transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(),\n\n transforms.ColorJitter(brightness=0.25, contrast=0.25, saturation=0.25),\n\n transforms.RandomAffine(degrees=10, translate=(0.05, 0.05), scale=(1, 1.1)),\n\n transforms.ToTensor(),\n\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n\ntest_transform = transforms.Compose([transforms.ToPILImage(), transforms.Resize(resize), transforms.ToTensor(),\n\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n\n\n\ndataset_train = CactusDataset2(train_labels_path, train_dir, transform=transform)\n\ndataset_val = CactusDataset2(train_labels_path, train_dir, transform=test_transform)\n\ntestset = CactusDataset2(csv_path, test_dir, transform=test_transform)\n\n\n\n# Split training dataset to train and validation\n\n\n\nbatch_size = 64\n\n\n\nnum_train = len(dataset_train)\n\nvalid_percent = 0.01\n\nvalid_size = round(valid_percent * num_train)\n\ntrain_size = num_train - valid_size\n\nindices = list(range(num_train))\n\nnp.random.shuffle(indices)\n\ntrain_idx, valid_idx = indices[:train_size], indices[train_size:]\n\ntrain_sampler = SubsetRandomSampler(train_idx)\n\nvalid_sampler = SubsetRandomSampler(valid_idx)\n\n\n\n\n\ntrain_loader = DataLoader(dataset_train, batch_size=batch_size, sampler=train_sampler, num_workers=4)\n\nvalid_loader = DataLoader(dataset_val, batch_size=batch_size, sampler=valid_sampler, num_workers=4)\n\ntest_loader = DataLoader(testset, batch_size=batch_size, shuffle=False)\n# Test the augmentation\n\nindices = np.random.choice(list(range(len(dataset_val))), 16)\n\nfig, axes = plt.subplots(4, 4, figsize=(6, 6))\n\nfor i, idx in enumerate(indices):\n\n img, label = dataset_val[idx]\n\n img = 0.22*img + 0.45\n\n img = img.numpy().transpose(1, 2, 0)\n\n axes.flatten()[i].imshow(img)\n\n label = \"Cactus\" if label==1 else \"Garbage\"\n\n axes.flatten()[i].set_title(str(label))\n\n axes.flatten()[i].set_axis_off()\n\nfig.suptitle(\"Validation Sample\")\n\n# plt.tight_layout()\n\n\n\nindices = np.random.choice(list(range(len(dataset_train))), 16)\n\nfig, axes = plt.subplots(4, 4, figsize=(6, 6))\n\nfor i, idx in enumerate(indices):\n\n img, label = dataset_train[idx]\n\n img = 0.22*img + 0.45\n\n img = img.numpy().transpose(1, 2, 0)\n\n axes.flatten()[i].imshow(img)\n\n label = \"Cactus\" if label==1 else \"Garbage\"\n\n axes.flatten()[i].set_title(str(label))\n\n axes.flatten()[i].set_axis_off()\n\nfig.suptitle(\"Training Sample\")\n\n# plt.tight_layout()\n# Credit fast.ai https://github.com/fastai/fastai/blob/master/fastai/layers.py#L176\n\nclass AdaptiveConcatPool2d(nn.Module):\n\n \"Layer that concats `AdaptiveAvgPool2d` and `AdaptiveMaxPool2d`.\"\n\n def __init__(self, sz=None):\n\n super().__init__()\n\n \"Output will be 2*sz or 2 if sz is None\"\n\n self.output_size = sz or 1\n\n self.ap = nn.AdaptiveAvgPool2d(self.output_size)\n\n self.mp = nn.AdaptiveMaxPool2d(self.output_size)\n\n\n\n def forward(self, x): return torch.cat([self.mp(x), self.ap(x)], 1)\n\n \n\nclass TransferHead(nn.Module):\n\n def __init__(self, in_features):\n\n \"\"\"in_features is the number of channels in the last conv layer of the base model\"\"\"\n\n super().__init__()\n\n self.avg = AdaptiveConcatPool2d()\n\n self.in_features = in_features\n\n self.layer1 = nn.Sequential(nn.BatchNorm1d(2*in_features), nn.Dropout(0.25),\n\n nn.Linear(2*in_features, 512), nn.ReLU(inplace=True))\n\n self.layer2 = nn.Sequential(nn.BatchNorm1d(512), nn.Dropout(0.5),\n\n nn.Linear(512, 2))\n\n \n\n def forward(self, x):\n\n out = self.avg(x)\n\n out = out.view(-1, self.in_features*2)\n\n out = self.layer1(out)\n\n out = self.layer2(out)\n\n return out\n\n \n\nclass TransCactusTron(nn.Module):\n\n def __init__(self, freeze=True):\n\n super().__init__()\n\n base_arch = models.densenet161(pretrained=True)\n\n n_channels = base_arch.classifier.in_features\n\n self.body = nn.Sequential(base_arch.features, nn.ReLU(inplace=True))\n\n self.head = TransferHead(n_channels)\n\n \n\n for x in self.body.parameters():\n\n x.requires_grad = False\n\n for x in self.body.modules():\n\n if isinstance(x, nn.modules.batchnorm._BatchNorm):\n\n x.bias.requires_grad = True\n\n x.weight.requires_grad = True\n\n x.reset_running_stats()\n\n \n\n \n\n def forward(self, x):\n\n out = self.body(x)\n\n return self.head(out)\n\n \n\n def predict(self, loader, device=torch.device('cpu')):\n\n y_pred = torch.tensor([])\n\n with torch.no_grad():\n\n for data in loader:\n\n img = data[0].to(device)\n\n curr_pred = self(img)\n\n y_pred = torch.cat((y_pred, curr_pred.cpu().detach()))\n\n softmax = nn.Softmax(dim=1)\n\n y_pred = softmax(y_pred)\n\n return y_pred[:, 1].numpy()\n\n \n\n def unfreeze(self):\n\n for x in self.body.parameters():\n\n x.requires_grad = True\n\n \n\n\n\nfind_lr = True\n\n\n\nif find_lr:\n\n # According to https://arxiv.org/abs/1506.01186\n\n num_steps = len(train_loader)\n\n lrs = np.logspace(-8, 0, num=num_steps)\n\n device = torch.device(\"cuda:0\" if torch.cuda.is_available else \"cpu\")\n\n criterion = nn.CrossEntropyLoss(reduction='mean')\n\n model = TransCactusTron()\n\n model.train()\n\n model.to(device)\n\n\n\n optimizer = torch.optim.Adam(model.parameters() ,lr=lrs[0])\n\n train_loss = []\n\n best_loss = np.inf\n\n i = 0\n\n\n\n model.train()\n\n for img, label in tqdm(train_loader):\n\n img, label = img.to(device), label.to(device).squeeze()\n\n optimizer.zero_grad()\n\n predicted_label = model(img)\n\n loss = criterion(predicted_label, label)\n\n loss.backward()\n\n optimizer.step()\n\n train_loss.append(loss.item())\n\n if loss.item() < best_loss:\n\n best_loss = loss.item()\n\n\n\n if loss.item() > 20*best_loss:\n\n print(\"Loss diverged\")\n\n break\n\n\n\n optimizer.param_groups[0]['lr'] = lrs[i]\n\n i += 1\n\n\n\n # exponential smoothing\n\n train_loss = pd.Series(train_loss)\n\n train_loss_smooth = train_loss.ewm(alpha=0.02).mean()\n\n plt.plot(lrs[:len(train_loss)], train_loss_smooth)\n\n plt.grid()\n\n plt.xscale('log')\n####################\n\n# HYPER PARAMETERS #\n\n####################\n\n\n\nlr = 3e-2\n\nepochs = 10\n\n\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available else \"cpu\")\n\ncriterion = nn.CrossEntropyLoss(reduction='mean')\n\n\n\ntensorboard = False\n\n\n\n############\n\n# Training #\n\n############\n\n\n\nif tensorboard == True:\n\n time = datetime.now().strftime(\"%d%m-%H%M\")\n\n train_id = f\"{time}_lr={lr}_epochs={epochs}_wd={wd}_Trans_difflr_Adam\"\n\n writer = SummaryWriter(log_dir=f\"runs/{train_id}\")\n\n \n\n\n\nmodel = TransCactusTron()\n\nmodel.to(device)\n\n\n\nglobal_step = 0\n\n\n\nprint(f'\\nSTART TRAINING\\n')\n\nparameters = [{'params': model.body.parameters()},\n\n {'params': model.head.parameters()}]\n\noptimizer = torch.optim.Adam(parameters, lr=lr)\n\nscheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=[lr/10, lr],\n\n epochs=epochs, steps_per_epoch=len(train_loader))\n\n\n\nfor epoch in tqdm(range(epochs), desc='Epochs'):\n\n train_loss = 0.0\n\n valid_loss = 0.0\n\n start_time = datetime.now()\n\n\n\n #########\n\n # Train #\n\n #########\n\n i = 0\n\n model.train()\n\n for img, label in tqdm(train_loader, desc=\"Iteration\"):\n\n img, label = img.to(device), label.to(device).squeeze()\n\n optimizer.zero_grad()\n\n predicted_label = model(img)\n\n loss = criterion(predicted_label, label)\n\n loss.backward()\n\n optimizer.step()\n\n train_loss += loss.item()\n\n scheduler.step()\n\n\n\n ############\n\n # Validate #\n\n ############\n\n\n\n model.eval()\n\n with torch.no_grad():\n\n for img, label in valid_loader:\n\n img, label = img.to(device), label.to(device).squeeze()\n\n predicted_label = model(img)\n\n loss = criterion(predicted_label, label)\n\n valid_loss += loss.item()\n\n\n\n ########### \n\n # Logging #\n\n ###########\n\n\n\n avg_train_loss = train_loss/len(train_loader)\n\n avg_valid_loss = valid_loss/len(valid_loader)\n\n delta = datetime.now()-start_time\n\n print(f\"Epoch: {epoch}\\tTrain Loss: {avg_train_loss:.6f}\\tVal Loss: {avg_valid_loss:.6f}\\t Time:{delta}\")\n\n if tensorboard == True:\n\n writer.add_scalar(\"Loss/Train\", avg_train_loss, global_step=global_step)\n\n writer.add_scalar(\"Loss/Validation\", avg_valid_loss, global_step=global_step)\n\n writer.add_scalars(\"Loss/Cross\",{\"Train\": avg_train_loss,\n\n \"Validation\": avg_valid_loss},\n\n global_step=global_step)\n\n global_step += 1\n\n torch.save(model.state_dict(), f'model.pt')\n\n\n\n\n\nprint(\"FINISHED TRAINING\")\n\n\n\nif tensorboard == True:\n\n writer.close()\ndf = pd.read_csv(csv_path)\n\ny_pred = np.zeros(len(testset))\n\nmodel.eval()\n\ny_pred = model.predict(test_loader, device)\n\n\n\ndf['has_cactus'] = y_pred\n\ndf.to_csv(\"submission.csv\", index=False)\n\n\n\ndf.head(15)\nif data_from == \"kaggle\":\n\n import shutil\n\n shutil.rmtree('/kaggle/working/train')\n\n shutil.rmtree('/kaggle/working/test')\n","repo_name":"aorursy/new-nb-3","sub_path":"hadayo_finalcactustron.py","file_name":"hadayo_finalcactustron.py","file_ext":"py","file_size_in_byte":13029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"71283904681","text":"#!/usr/bin/env python\r\n# encoding:utf-8\r\n# Created by Andy @ 2020/8/24\r\n\r\n\r\nfrom django.contrib.auth.models import User\r\nfrom rest_framework import serializers\r\n\r\nfrom api.models import UserProfile\r\n\r\n\r\nclass UserSerializer(serializers.Serializer):\r\n user = serializers.CharField(read_only=True)\r\n avatar = serializers.ImageField(required=False)\r\n gender = serializers.CharField(source=\"get_gender_display\")\r\n phone = serializers.CharField(required=False)\r\n status = serializers.CharField(source=\"get_status_display\")\r\n\r\n def to_internal_value(self, data):\r\n # 对传进来的数据进行处理,比如如果状态传入的是中文,在这里转化成1或者0存入数据库\r\n user = data.get('user')\r\n user_obj = User.objects.filter(username=user).first()\r\n data['user'] = user_obj\r\n if not data.get('avatar'):\r\n data['avatar'] = \"/media/avatar/default_male_avatar.png\"\r\n return data\r\n\r\n def to_representation(self, instance):\r\n # 对允许为空的字段进行处理\r\n\r\n if not instance.avatar:\r\n instance.avatar = \"\"\r\n\r\n if not instance.phone:\r\n instance.phone = \"未绑定\"\r\n\r\n return super().to_representation(instance)\r\n\r\n def create(self, validated_data):\r\n user_name = validated_data.get('user')\r\n user = User.objects.filter(username=user_name).first()\r\n avatar = validated_data.get('avatar')\r\n gender = validated_data.get('gender')\r\n phone = validated_data.get('phone')\r\n status = validated_data.get('status')\r\n return UserProfile.objects.create(\r\n user=user,\r\n avatar=avatar,\r\n gender=gender,\r\n phone=phone,\r\n status=status\r\n )\r\n\r\n def update(self, instance, validated_data):\r\n for attr, value in validated_data.items():\r\n setattr(instance, attr, value)\r\n instance.save()\r\n return instance\r\n","repo_name":"Andy963/drf_demo","sub_path":"api/sers/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"33884343344","text":"# Python Program to Reverse a Number using While loop \n \nNumber = int(input(\"Please Enter any Number: \")) \noriginalno = Number \nReverse = 0 \nwhile(Number > 0): \n Reminder = Number %10 \n Reverse = (Reverse *10) + Reminder \n Number = Number //10 \n \nprint(\"\\n Reverse of entered number is = %d\" %Reverse) # -*- coding: utf-8 -*-\n\nif (originalno == Reverse):\n print('its a palindrome')\nelse:\n print('its not a palindrome')\n'''\nNumber = 123\n\n1st time loop\n\nRemainder = 123%10 = 3\nReverse = (0*10) + 3\nNumber = 123//10 = 12\n\n\n2nd loop\n12 >0\n\nRemainder = 12%10 = 2\n\nReverse = 3*10+2 = 32\nNumber = 12//10 = 1\n3nd loop\n1>0\n\nremainder = 1%10 = 1\nReverse = 32*10 + 1\n\n'''","repo_name":"raghuprasadks/pythontutoriallatest","sub_path":"assignment/reverse.py","file_name":"reverse.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"18"} +{"seq_id":"40755345609","text":"def solution(numbers) :\n numb = list(map(int, numbers))\n p_list = []\n for i in range(len(numb)): # 0~1\n p_list += (list(permutations(numb, i + 1)))\n #숫자 순열\n s_list = set()\n for i in p_list:\n j = 0\n s_make = ''\n while j < len(i):\n s_make += i[j]\n j += 1\n s_list.add(int(s_make))\n s_list = list(s_list)\n\n answer = []\n for i in s_list:\n if i == 1 or i == 0:\n continue\n cnt = 0\n for x in range(1, i + 1):\n if i % x == 0:\n cnt += 1\n if cnt == 2:\n answer.append(i)\n return len(answer)\n","repo_name":"hcw3737/algorithms","sub_path":"programmers/lv_2/소수찾기.py","file_name":"소수찾기.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"18"} +{"seq_id":"31531115338","text":"from numpy import *\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport urllib.parse\nimport urllib.request\nimport json\nfrom time import sleep\n\ndef loadDataSet(filename):\n dataMat = []\n fr = open(filename)\n for line in fr.readlines():\n curLine = line.strip().split('\\t')\n fltLine = list(map(float, curLine))\n dataMat.append(fltLine)\n return dataMat\n\n#calculate the distance\ndef distEclud(vecA, vecB):\n return sqrt(sum(power(vecA-vecB, 2)))\n\ndef randCent(dataSet, k):\n n = shape(dataSet)[1]\n centroids = mat(zeros((k, n)))\n for j in range(n):\n minJ = min(dataSet[:, j])\n rangeJ = float(max(dataSet[:, j]) - minJ)\n centroids[:, j] = minJ + rangeJ * random.rand(k, 1)\n return centroids\n\ndef kMeans(dataSet, k, distMeas=distEclud, createCent=randCent):\n m = shape(dataSet)[0]\n clusterAssment = mat(zeros((m, 2))) #index of the cluster and the error\n centroids = createCent(dataSet, k)\n clusterChanged = True\n while clusterChanged:\n clusterChanged = False\n for i in range(m):\n minDist = inf\n minIndex = -1\n for j in range(k):\n distJI = distMeas(centroids[j, :], dataSet[i, :])\n if distJI < minDist:\n minDist = distJI\n minIndex = j\n if clusterAssment[i, 0] != minIndex:\n clusterChanged = True\n clusterAssment[i, :] = minIndex, minDist ** 2\n print(centroids)\n for cent in range(k):\n a = nonzero(clusterAssment[:, 0].A == cent) #nonzero return a 2-D ndarray\n b = dataSet[2]\n ptsInClust = dataSet[nonzero(clusterAssment[:, 0].A == cent)[0]]\n centroids[cent, :] = mean(ptsInClust, axis=0)\n return centroids, clusterAssment\n\ndef biKmeans(dataSet, k, distMeas=distEclud):\n m = shape(dataSet)[0]\n clusterAssment = mat(zeros((m, 2)))\n centroid0 = mean(dataSet, axis=0).tolist()[0]\n centList = [centroid0] #Initialy create one cluster\n for j in range(m):\n clusterAssment[j, 1] = distMeas(mat(centroid0), dataSet[j, :]) ** 2\n while (len(centList) < k):\n lowestSSE = inf #sum of squared error\n for i in range(len(centList)): #Try splitting every cluster\n ptsInCurrCluster = dataSet[nonzero(clusterAssment[:, 0].A == i)[0], :]\n centroidMat, splitClustAss = kMeans(ptsInCurrCluster, 2, distMeas)\n sseSplit = sum(splitClustAss[:, 1])\n sseNotSplit = sum(clusterAssment[nonzero(clusterAssment[:, 0].A != i)[0], 1])\n print(\"sseSplit, and notSplit: \", sseSplit, sseNotSplit)\n if (sseSplit+ sseNotSplit) < lowestSSE:\n bestCentToSplit = i\n bestNewCents = centroidMat\n bestClustAss = splitClustAss.copy()\n lowestSSE = sseSplit + sseNotSplit\n bestClustAss[nonzero(bestClustAss[:, 0].A == 1)[0], 0] = len(centList) #update the cluster assignments\n bestClustAss[nonzero(bestClustAss[:, 0].A == 0)[0], 0] = bestCentToSplit\n print('the bestCentToSplit is: ', bestCentToSplit)\n print('the len of bestClustAss is: ', len(bestClustAss))\n centList[bestCentToSplit] = bestNewCents[0, :]\n centList.append(bestNewCents[1, :])\n clusterAssment[nonzero(clusterAssment[:, 0].A == bestCentToSplit)[0], :] = bestClustAss\n return centList, clusterAssment\n\ndef geoGrab(strAddress, city):\n apiStem = 'http://where.yahooapis.com/geocode?'\n params = {}\n params['flags'] = 'J'\n params['appid'] = 'ppp68N8t'\n params['location'] = '%s %s' % (strAddress, city)\n url_params = urllib.parse.urlencode(params)\n yahooApi = apiStem + url_params\n print(yahooApi)\n c = urllib.request.urlopen(yahooApi)\n return json.loads(c.read())\n\n def massPlaceFind(fileName):\n fw = open('places.txt', 'w')\n for line in open(fileName).readlines():\n line = line.strip()\n lineArr = line.split('\\t')\n retDict = geoGrab(lineArr[1], lineArr[2])\n if retDict['ResultSet']['Error'] == 0:\n lat = float(retDict['ResultSet']['Results'][0]['latitude'])\n lng = float(retDict['ResultSet']['Results'][0]['longitude'])\n print('%s\\t%f\\t%f' % (lineArr[0], lat, lng))\n fw.write('%s\\t%f\\t%f\\n' % (line, lat, lng))\n else:\n print('error fetching')\n sleep(1)\n fw.close()\n\n\nif __name__ == '__main__':\n # datMat = mat(loadDataSet('testSet2 _kmeans.txt'))\n # centList, myNewAssments = biKmeans(datMat,3)\n # print(centList)\n # myCentroids, clustAssing = kMeans(datMat, 4)\n # print(randCent(datMat, 2))\n geoResult = geoGrab('1 VA Center', 'Augusta, ME')\n print(geoResult)","repo_name":"shilx544/python","sub_path":"daily/kMeans.py","file_name":"kMeans.py","file_ext":"py","file_size_in_byte":4808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"39321769757","text":"import logging\nfrom dotenv import load_dotenv\n\nload_dotenv()\nfrom docai.db.init_db import init_db\nfrom docai.db.session import SessionLocal\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\ndef init() -> None:\n db = SessionLocal()\n init_db(db)\n\n\ndef main() -> None:\n logger.info(\"Creating initial data\")\n init()\n logger.info(\"Initial data created\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Chronicles-of-AI/gcp-docai-pyservice","sub_path":"service/initial_data.py","file_name":"initial_data.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"13895660947","text":"# -*- coding: utf-8 -*-\nimport json\nimport os\n\ndir = r'C:\\Users\\kevin\\Downloads\\shga_sample_750k\\p.json'\n\nf = open(dir, encoding=\"utf8\")\nline = f.readline()\nwhile line:\n print(line)\n json_d = json.loads(line)\n line = f.readline()\n\nf.close()\n","repo_name":"peerelss/kevin_py","sub_path":"shga_reader/shga_reader.py","file_name":"shga_reader.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"37211889620","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport time\nimport math\nfrom multiprocessing import Process, Queue\n\n\ndef prepareDatabseForMachineLearning(data, orderOfMatrix, R0=100.0, filename = False, giveUpdates = True):\n \"\"\"\n A function which takes a generated dataframe and prepares it for machine learning by making the proximity matrices and calculating their eigenvalues.\n \n data is the dataframe (as generated by makeRandomDatabase) for which the eigenvalues of the proximity matrices should be calculated.\n orderOfMatrix is a list of the 'order' (power to which the relative distance is taken) for each proximity matrix.\n filename is the name of the file the database get's exported to. The .json file extension get's added in the code itself. If the type of filename is not a string no file will be saved.\n giveUpdates is a boolean which determines if updates about the progress of the database preperation get's printed.\n \"\"\"\n \n timeStart = time.time()\n data = data.reset_index()\n numberOfDatapoints = len(data['particleCoordinates'])\n eigenvalues = []\n relativeDistances = []\n \n for a in range(0, numberOfDatapoints):\n # Loop trough each datapoint\n coordinates = data['particleCoordinates'][a]\n dimension = len(coordinates[0])\n widthOfCell = data['widthOfCell'][a]\n depthOfSurroundingCells = math.ceil(R0 / widthOfCell + 1)\n \n eigenvaluesRow = []\n relativeDistancesRow = [widthOfCell]\n \n for order in orderOfMatrix:\n # For each order of eigenvalues wanted a matrix is constructed.\n matrix = np.zeros((len(coordinates), len(coordinates)))\n relativeDistancesRowOrder = []# The relative distances are also calculated and saved.\n \n for i in range(0, len(coordinates)):\n for j in range(i, len(coordinates)):\n # Loop trough all matrix elements and define their proximity and relative distances.\n sumOfProximity = 0\n \n if (not i==j):\n # This part calculates the relative distance inputs.\n vectorA = coordinates[i]\n vectorB = coordinates[j]\n differenceVector = vectorA - vectorB\n relativeDistancesRowOrder.append(np.sqrt(differenceVector.dot(differenceVector))**(-order))\n \n cellCoordinates = [-depthOfSurroundingCells for k in range(dimension)]\n \n while True:\n # The while loop is to make sure that each coordinate in any amount of dimensions is taken into account.\n for k in range(len(cellCoordinates)):\n if cellCoordinates[k]>depthOfSurroundingCells:\n # If a cellCoordinate is large then the depth of the surrounding cells then make it minimal again and increase the next coordinate by one. Thus you will loop trough each possible permutation.\n cellCoordinates[k + 1] = cellCoordinates[k + 1] + 1\n cellCoordinates[k] = -depthOfSurroundingCells\n \n if not (i == j and sum([0==cellCoordinate for cellCoordinate in cellCoordinates])==dimension):\n # Calculate the addition to the proximity matrix element.\n vectorA = coordinates[i]\n vectorB = coordinates[j] + widthOfCell * np.array(cellCoordinates)# Also take into account the mirror images in the other surrounding cells.\n differenceVector = vectorA - vectorB\n distance = np.sqrt(differenceVector.dot(differenceVector))\n \n if distance < R0:\n sumOfProximity = sumOfProximity + ((R0 / distance) - (distance / R0))**(-order)\n \n if sum(cellCoordinates)==dimension*depthOfSurroundingCells:\n # If all cell coordinates are maximal the sum of it should be the dimension times the depth of surrounding cells and then the while loop should be stopped.\n break\n \n cellCoordinates[0] = cellCoordinates[0] + 1# Iterate the first cell coordinate.\n \n matrix[i][j] = sumOfProximity\n \n for i in range(0, len(coordinates)):\n for j in range(0, i):\n # Since the matrix is symetric make sure that you don't do the same calculation twice.\n matrix[i][j] = matrix[j][i]\n \n eigenvalue, eigenVector = np.linalg.eig(matrix)\n [eigenvaluesRow.append(i) for i in sorted(eigenvalue)]\n #[eigenvaluesRow.append(i) for i in eigenvalue]\n \n [relativeDistancesRow.append(i) for i in sorted(relativeDistancesRowOrder)]\n \n #eigenvalues.append(sorted(eigenvaluesRow))\n eigenvalues.append(eigenvaluesRow)\n relativeDistances.append(relativeDistancesRow)\n \n if giveUpdates:\n expectedTimeLeft = (numberOfDatapoints - 1 - a) / ((a + 1) / (time.time() - timeStart))\n print(str(math.ceil(100 * (a + 1) / (numberOfDatapoints))).rjust(3, ' '), '% done, expected time left', math.ceil(expectedTimeLeft), 'seconds,', math.ceil(time.time() - timeStart), 'seconds since start.')\n \n data['eigenvalues'] = eigenvalues\n data['relativeDistances'] = relativeDistances\n \n if type(filename) == str:\n # If wanted save the data to a json file.\n dataDF.to_json(filename + '.json', orient='columns')\n \n return data\n\n\ndef prepareDatabseForMachineLearningSingleQueue(q, data, orderOfMatrix, R0, giveUpdates):\n q.put(prepareDatabseForMachineLearning(data, orderOfMatrix, R0, False, giveUpdates))\n\n\ndef prepareDatabseForMachineLearningMultiprocessing(data, orderOfMatrix, R0=4.0, filename = False, amountOfProcesses = 5):\n \"\"\"\n A function which impliments multiprocessing for the prepareDatabseForMachineLearning function.\n \"\"\"\n q = Queue()\n processes = []\n splitData = np.array_split(data, amountOfProcesses)\n print('Using multiprocessing only the first procces gives updates')\n \n processes.append(Process(target = prepareDatabseForMachineLearningSingleQueue, args = (q, splitData[0], orderOfMatrix, R0, True)))\n \n for i in range(1, amountOfProcesses):\n processes.append(Process(target = prepareDatabseForMachineLearningSingleQueue, args = (q, splitData[i], orderOfMatrix, R0, False)))\n \n for i in processes:\n i.start()\n \n dataDF = pd.concat([q.get() for i in range(0, amountOfProcesses)], ignore_index = True, sort = False)\n \n if type(filename) == str:\n # If wanted save the data to a json file.\n dataDF.to_json(filename + '.json', orient='columns')\n \n return dataDF","repo_name":"TimHeiszwolf/Quantum_energy_ML","sub_path":"prepareDatabaseForMachineLearning.py","file_name":"prepareDatabaseForMachineLearning.py","file_ext":"py","file_size_in_byte":7180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"8994394869","text":"import numpy as np\r\nimport pandas as pd\r\nfrom sklearn.datasets import fetch_openml\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.model_selection import train_test_split\r\nfrom PIL import Image\r\nimport PIL.ImageOps\r\n\r\nx,y = fetch_openml('mnist_784',version = 1,return_X_y= True)\r\nxTrain,xTest,yTrain,yTest = train_test_split(x,y,train_size=7500,test_size=2500)\r\nxTrainScale = xTrain/255.0\r\nxTestScale = xTest/255.0\r\nmodel = LogisticRegression(solver = 'saga', multi_class='multinomial').fit(xTrainScale,yTrain)\r\n\r\ndef getPrediction(img):\r\n im_pil = Image.open(img)\r\n image_bw = im_pil.convert('L')\r\n image_bw_resized = image_bw.resize((28,28), Image.ANTIALIAS)\r\n\r\n pixel_filter = 20\r\n min_pixel = np.percentile(image_bw_resized, pixel_filter)\r\n image_bw_resized_inverted_scaled = np.clip(image_bw_resized-min_pixel, 0, 255)\r\n max_pixel = np.max(image_bw_resized)\r\n image_bw_resized_inverted_scaled = np.asarray(image_bw_resized_inverted_scaled)/max_pixel\r\n test_sample = np.array(image_bw_resized_inverted_scaled).reshape(1,784)\r\n test_pred = model.predict(test_sample)\r\n return test_pred[0]\r\n","repo_name":"Shaurya16002/c-125","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"2641594104","text":"# predict.py\r\n# This file is part of the Ptolemy Layer for Google Earth project.\r\n# It provides a common way to run the prediction part against any\r\n# of our models.\r\n\r\nimport os\r\nimport sys\r\nimport argparse\r\n\r\nimport common\r\n\r\nXCOLS = ['ptol_%s' % s for s in ('lat', 'lon')]\r\nYCOLS = [s.replace('ptol', 'modern') for s in XCOLS]\r\n\r\n\r\ndef main(filename, model, places):\r\n known, unknown = common.split_places(places)\r\n knownx = known.loc[:, XCOLS]\r\n knowny = known.loc[:, YCOLS]\r\n model.fit(knownx, knowny)\r\n unknownx = unknown.loc[:, XCOLS]\r\n unknowny = model.predict(unknownx)\r\n unknown.loc[:, YCOLS] = unknowny\r\n title = ' '.join(os.path.basename(filename)[0:-4].split('_'))\r\n common.write_kml_file(filename, None, known, unknown)\r\n common.write_csv_file(filename[0:-4] + '.csv', known, unknown)\r\n common.write_map_file(filename[0:-4] + '.pdf', known, unknown, 30, 24, 300, 'ptol_name', title)\r\n common.write_map_file(filename[0:-4] + '.png', known, unknown, 30, 24, 300, 'ptol_name', title)\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser(\r\n description='Predict unknown Ptolemy places.')\r\n parser.add_argument('--model', help='prediction model to use')\r\n parser.add_argument('--sgdb', help='read from sgdb with given prefix')\r\n parser.add_argument('--xlsx', help='xlsx to read from instead of sgdb')\r\n parser.add_argument('--output', help='output filename')\r\n\r\n args = parser.parse_args()\r\n model = common.construct_model(args.model)\r\n\r\n if args.sgdb:\r\n places = common.read_places(args.sgdb)\r\n elif args.xlsx:\r\n places = common.read_places_xlsx(args.xlsx)\r\n else:\r\n sys.stderr.write('must specify one of --sgdb or --xlsx')\r\n exit(1)\r\n\r\n if args.output:\r\n output = args.output\r\n else:\r\n output = os.path.join(common.PTOL_HOME, 'Data', '%s.kml' % args.model)\r\n\r\n main(output, model, places)\r\n","repo_name":"claudiusptolemy/ptolemy","sub_path":"python/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"30"} +{"seq_id":"43707494492","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nGaudet, Vernon\r\nASSIGNMENT 2: LEVEL 0: \"A NEW LANGUAGE\"\r\n\"\"\"\r\nimport numpy as np\r\n\r\nglobal name\r\nname = 'VERNON'\r\n\r\n# Print Print Exercises\r\ndef printex(name):\r\n print(\"PRINT EXERCISES\\nQ1-2) \\n\")\r\n # Print name char by char\r\n for char in name:\r\n print(char)\r\n print(\"\\n\\n\")\r\n\r\n return name\r\n\r\n# Operation Exercises: \r\ndef opex():\r\n \r\n #Example operations\r\n opex1 = (5/2 , 5.0/2.0)\r\n opex2 = 5 % 2\r\n opex3 = (2 ** 3, 4 // 3)\r\n opex4 = 2 + 6 + 4 * 4 / 2\r\n \r\n return opex1, opex2, opex3, opex4\r\n\r\n# Print results from operation exercises\r\ndef printopex(ex2):\r\n print(\"OPERATION EXERCISES\\n Q1-4\")\r\n for q, part in enumerate(ex2):\r\n print(('P' + str(q+1) + ') '), part)\r\n print('\\n')\r\n \r\n return\r\n\r\n\r\n# Variable Exercises\r\ndef varex():\r\n \r\n # Name letter variables\r\n letter1 = \"V\"\r\n letter2 = \"E\"\r\n letter3 = \"R\"\r\n letter4 = \"N\"\r\n letter5 = \"O\"\r\n letterX = letter1\r\n letter1 = \"Z\"\r\n \r\n return letter1, letter2, letter3, letter4, letter5, letterX\r\n\r\n# Print results from variable exercises\r\ndef printvarex(ex3):\r\n print(\"VARIABLE EXERCISES\\nQ1-6) \\n\")\r\n for part in ex3:\r\n print(part)\r\n print('\\n')\r\n return\r\n\r\n# Boolean Exercises:\r\ndef bolex():\r\n \r\n bolex1a = 1 == 1.0 \r\n bolex1b = \"1\" == \"1.0\"\r\n bolex2a = 5 == (3+2)\r\n \r\n # 5 different options to return true\r\n bolex3a = ([\r\n (1 == 1.0) and (\"1\" == \"1.0\") or (5 == (3+2)),\r\n\t\t(1 == 1.0) or (\"1\" == \"1.0\") or (5 == (3+2)),\r\n\t\t(1 == 1.0) and not (\"1\" == \"1.0\") or (5 == (3+2)),\r\n\t\t(1 == 1.0) or not (\"1\" == \"1.0\") or (5 == (3+2)),\r\n\t\t(1 == 1.0) and not (\"1\" == \"1.0\") or not (5 == (3+2))\r\n ])\r\n \r\n return bolex1a, bolex1b, bolex2a, bolex3a\r\n\r\n# Print results from boolean exercises\r\ndef printbolex(ex4):\r\n print(\"BOOLEAN EXERCISES\\nQ1-3\")\r\n for q, part in enumerate(ex4):\r\n print(('P' + str(q+1) + ') '), part)\r\n print('\\n')\r\n return\r\n\r\n# List Exercises\r\ndef listex():\r\n \r\n # Odd number list and list of integers 1-99 (between 0 and 100)\r\n oddlist= [1,3,5,7,9]\r\n intlist= list(range(1, 100))\r\n oddlen = len(oddlist)\r\n oddtype = type(oddlist)\r\n \r\n return oddlist, oddlen, oddtype, intlist\r\n\r\n# Print results from list exercises\r\ndef printlistex(ex5):\r\n print(\"LIST EXERCISES\\nQ1-6\")\r\n for q, part in enumerate(ex5):\r\n print(('P' + str(q+1) + ') '), part)\r\n print('\\n')\r\n return\r\n \r\n# Dictionary Exercises\r\ndef dictex():\r\n \r\n # Dictionary with personal information\r\n about_me = {\r\n 'name': \"Vernon Gaudet\",\r\n 'age': 24.7,\r\n 'ystudy': 4,\r\n 'ffoods': [\"pumpkin pie\", \"dumpling chicken soup\", \"tonkotsu ramen\"]\r\n }\r\n metype = type(about_me)\r\n melen = len(about_me)\r\n \r\n return about_me, metype, melen\r\n\r\n# Print results from dictionary exercises\r\ndef printdictex(ex6):\r\n print(\"DICTIONARY EXERCISES\\nQ1-3\")\r\n for q, part in enumerate(ex6):\r\n print(('P' + str(q+1) + ') '), part)\r\n print('\\n')\r\n return\r\n\r\n# Array Exercises\r\ndef arrex():\r\n \r\n # Mixed int and float array\r\n mixnums = np.array([ 1, 2, 3, 1.0, 2.0, 3.0])\r\n \r\n # Mixed type array (int, float, str)\r\n mixtypes = np.array([1, 2, 1.0, 2.0, \"1\", \"2\" ])\r\n \r\n # Odd number array from 1 to 100\r\n oddarray = np.arange(1, 100, 2)\r\n \r\n # Log array from 1 to 5 evenly spaced for 16 numbers.\r\n logarray = np.array(np.logspace(np.log10(1), np.log10(5), 16))\r\n \r\n return mixnums, mixtypes, oddarray, logarray\r\n\r\n# Print results from array exercises\r\ndef printarrex(ex7):\r\n print(\"ARRAY EXERCISES\\nQ1-4\")\r\n for q, part in enumerate(ex7):\r\n print(('P' + str(q+1) + ') '), part)\r\n print('\\n')\r\n return\r\n\r\n# Print menu for selecting exercise results to print\r\ndef selectionprint(ex1, ex2, ex3, ex4, ex5, ex6, ex7):\r\n \r\n options = ['1','2','3','4','5','6', '7', 'e']\r\n while True:\r\n \r\n print(\"SELECT OPTION TO PRINT:\\n\",\r\n \"1. PRINT EXERCISES\\n\", \r\n \"2. OPERATION EXERCISES\\n\",\r\n \"3. VARIABLE EXERCISES\\n\",\r\n \"4. BOOLEAN EXERCISES\\n\",\r\n \"5. LIST EXERCISES\\n\",\r\n \"6. DICTIONARY EXERCISES\\n\",\r\n \"7. ARRAY EXERCISES\\n\",\r\n \"Press 'e' for exit.\\n\"\r\n )\r\n select = input() \r\n print(\"\\n\")\r\n \r\n if select in options:\r\n if select == 'e':\r\n print(\"Goodbye!\\n\\n\")\r\n break\r\n elif select == '1':\r\n printex(ex1)\r\n elif select == '2':\r\n printopex(ex2)\r\n elif select == '3':\r\n printvarex(ex3)\r\n elif select == '4':\r\n printbolex(ex4)\r\n elif select == '5':\r\n printlistex(ex5)\r\n elif select == '6':\r\n printdictex(ex6)\r\n elif select == '7':\r\n printarrex(ex7)\r\n else:\r\n print(\"Invalid or empty entry. Please try again.\\n\")\r\n \r\n \r\n select = input(\"Press any key to continue...\\n\")\r\n\r\n print(\"\\n\\n\")\r\n \r\n return\r\n\r\n# Main function executing all exercises\r\ndef main():\r\n\r\n # Excecute exercise sections in following order: Print, Operations, Variables, \r\n # Booleans, Variables, Dictionaries, Arrays\r\n \r\n ex1 = name\r\n ex2 = opex()\r\n ex3 = varex()\r\n ex4 = bolex()\r\n ex5 = listex()\r\n ex6 = dictex()\r\n ex7 = arrex() \r\n \r\n # Display print menu\r\n selectionprint(ex1, ex2, ex3, ex4, ex5, ex6, ex7)\r\n \r\n print(\"Closing...\")\r\n \r\n return ex1, ex2, ex3, ex4, ex5, ex6, ex7\r\n \r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"vgaudet/Psych-403","sub_path":"Assignment2/yourname.py","file_name":"yourname.py","file_ext":"py","file_size_in_byte":5817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"33364673215","text":"#事先了解远程SMTP服务器能接受的信息类型是有有帮助的,例如大多数服务器对允许接收的消息大小有限制,可以事先检查\n#从EHLO获取信息\nimport smtplib, socket, sys\n\nmessage_template = \"\"\"To: {}\nFrom: {}\nSubject: Test Message from simple.py\n\nHello,\n\nWhat a wonderful world!\n\"\"\"\n\ndef main():\n if len(sys.argv) < 4:\n name = sys.argv[0]\n print(\"usage: {} server fromaddr toaddr [toaddr...]\".format(name))\n sys.exit(2)\n\n server, fromaddr, toaddrs = sys.argv[1], sys.argv[2], sys.argv[3:]\n message = message_template.format(', '.join(toaddrs), fromaddr)\n\n try:\n connection = smtplib.SMTP(server)\n report_on_message_size(connection, fromaddr, toaddrs, message)\n except (socket.gaierror, socket.error, socket.herror,\n smtplib.SMTPException) as e:\n print(\"Your message may not have been sent!\")\n print(e)\n sys.exit(1)\n else:\n s = '' if len(toaddrs) == 1 else 's'\n print(\"Message sent to {} recipient{}\".format(len(toaddrs), s))\n connection.quit()\n\ndef report_on_message_size(connection, fromaddr, toaddrs, message):\n #支持ESMTP的客户端会用EHLO命令,EHLO和HELO会返回两个列表,列表的第一项是由远程服务器返回一个由数字表示的结果代码\n #200-299表示成功\n code = connection.ehlo()[0]\n uses_esmtp = (200 <= code <= 299)\n #如果不支持ESMTP,则使用helo命令作为会话的起始命令\n if not uses_esmtp:\n code = connection.helo()[0]\n if not (200 <= code <= 299):\n print(\"Remote server refused HELO; code:\", code)\n sys.exit(1)\n #has_extn()返回True如果名称是在设置服务器返回的SMTP服务扩展的\n if uses_esmtp and connection.has_extn('size'):\n print(\"Maximum message size is\", connection.esmtp_features['size'])\n if len(message) > int(connection.esmtp_features['size']):\n print(\"Message too large; aborting.\")\n sys.exit(1)\n\n connection.sendmail(fromaddr, toaddrs, message)\n\nif __name__ == '__main__':\n main()","repo_name":"liuhaoze22/Foundations-of-Python-Network-Programming","sub_path":"Python-Network-Programming/SMTP/ehlo.py","file_name":"ehlo.py","file_ext":"py","file_size_in_byte":2118,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"72891480085","text":"\"\"\"\n\nariel@oapd\n15/06/2022\n\nCreates a grid of models and fits them\n\n\"\"\"\n\nimport bagpipes as pipes\nimport numpy as np\nfrom astropy.table import Table\nimport itertools\nfrom hst_pipeutils import load_data_models as load_data\nimport matplotlib.pyplot as plt\n\ndata_dir = '/Data/'\ntest_id = '5err_doublescreen_eta_vary_oldmass10'\n\nfilter_files = [data_dir + 'filters/HST_WFC3_UVIS2.F275W.dat',\n data_dir + 'filters/HST_WFC3_UVIS2.F336W.dat',\n data_dir + 'filters/HST_WFC3_UVIS2.F606W.dat',\n data_dir + 'filters/HST_WFC3_UVIS2.F680N.dat',\n data_dir + 'filters/HST_WFC3_UVIS2.F814W.dat']\n\nplot_dir = '/BAGPIPES/plots/Models/'\ntable_rows = []\nmodel_dict = {}\nmodel_ages = [0.005, 0.015, 0.025, 0.05, 0.075, 0.1, 0.15, 0.2, 0.25]\nmodel_Avs = [0.0, 0.1, 0.2, 0.4, 0.6]\n\ncombinations = [(model_age, model_Av) for model_age, model_Av in itertools.product(model_ages, model_Avs)]\n\nfor i in range(len(combinations)):\n\n model_age, model_Av = combinations[i]\n print(i+1, model_age, model_Av)\n\n exp_young = {}\n exp_young[\"age\"] = model_age\n exp_young[\"tau\"] = 2.5\n exp_young[\"massformed\"] = 5\n exp_young[\"metallicity\"] = 0.5\n\n exp_old = {}\n exp_old[\"age\"] = 10\n exp_old[\"tau\"] = 3\n exp_old[\"massformed\"] = 10\n exp_old[\"metallicity\"] = 0.5\n\n dust = {}\n dust[\"type\"] = \"Cardelli\"\n dust[\"Av\"] = model_Av\n dust[\"eta\"] = 2.\n\n nebular = {}\n nebular[\"logU\"] = -2.5\n\n model_components = {}\n model_components[\"redshift\"] = 0.04\n model_components[\"delayed1\"] = exp_young\n model_components[\"delayed2\"] = exp_old\n model_components[\"dust\"] = dust\n model_components[\"t_bc\"] = 0.02\n model_components[\"veldisp\"] = 200.\n model_components[\"nebular\"] = nebular\n\n model = pipes.model_galaxy(model_components, filt_list=filter_files)\n\n model_dict[str(model_age) + '_' + str(model_Av) + '_' + test_id] = model\n\nfor i in range(len(combinations)):\n\n model_age, model_Av = combinations[i]\n print(i+1, model_age, model_Av)\n\n model_id = str(model_age) + '_' + str(model_Av) + '_' + test_id\n\n blob_to_fit = pipes.galaxy(model_id, catalog=model_dict, load_data=load_data,\n filt_list=filter_files, phot_units=model.phot_units, spectrum_exists=False)\n\n exp_young_fit = {}\n exp_young_fit[\"age\"] = (0.0, 1.0)\n exp_young_fit[\"tau\"] = (0, 5)\n exp_young_fit[\"massformed\"] = (0., 10.)\n exp_young_fit[\"metallicity\"] = (0.005, 2.5)\n\n exp_old_fit = {}\n exp_old_fit[\"age\"] = (4, 14)\n exp_old_fit[\"tau\"] = (0, 15)\n exp_old_fit[\"massformed\"] = (0, 12)\n exp_old_fit[\"metallicity\"] = 0.5\n\n dust = {}\n dust[\"type\"] = \"Cardelli\"\n dust[\"Av\"] = (0, 3)\n\n nebular = {}\n nebular[\"logU\"] = -2.5\n dust[\"eta\"] = (1, 2)\n # dust[\"eta\"] = 2\n\n fit_instructions = {}\n fit_instructions[\"redshift\"] = 0.04\n fit_instructions[\"delayed1\"] = exp_young_fit\n fit_instructions[\"delayed2\"] = exp_old_fit\n fit_instructions[\"dust\"] = dust\n fit_instructions[\"nebular\"] = nebular\n\n fit = pipes.fit(blob_to_fit, fit_instructions)\n\n fit.fit(verbose=True)\n\n table_rows.append([model_age, model_Av, np.median(fit.posterior.samples['delayed1:age']),\n np.median(fit.posterior.samples['dust:Av']),\n np.median(fit.posterior.samples['delayed1:tau']),\n np.median(fit.posterior.samples['dust:eta']),\n np.median(fit.posterior.samples['delayed1:massformed']),\n np.median(fit.posterior.samples['delayed1:metallicity'])])\n\n fig = fit.plot_spectrum_posterior(save=False, show=False)\n plt.savefig(plot_dir + '/spec_' + model_id + '.png', dpi=100)\n fig = fit.plot_sfh_posterior(save=False, show=False)\n plt.savefig(plot_dir + '/sfh_' + model_id + '.png', dpi=100)\n fig = fit.plot_corner(save=False, show=False)\n plt.savefig(plot_dir + '/corner_' + model_id + '.png', dpi=100)\n\n plt.close('all')\n\n\ntable = Table(rows=table_rows, names=['model_age', 'model_Av', 'age', 'Av', 'tau', 'eta', 'massformed', 'metallicity'])\ntable.write('/home/ariel/Workspace/GASP/HST/Data/models/' + test_id + '.fits', overwrite=True)\n\n# exp_old_fit = {}\n# exp_old_fit[\"age\"] = (8.0, 14.)\n# exp_old_fit[\"tau\"] = (0.0, 30.)\n# exp_old_fit[\"massformed\"] = (0., 12.)\n# exp_old_fit[\"metallicity\"] = (0.005, 2.5)\n\n# exp_old = {}\n# exp_old[\"age\"] = 10.\n# exp_old[\"tau\"] = 4.\n# exp_old[\"massformed\"] = 5\n# exp_old[\"metallicity\"] = 0.5\n","repo_name":"arielwrl/HST_blobs","sub_path":"Archive/proof_of_concept.py","file_name":"proof_of_concept.py","file_ext":"py","file_size_in_byte":4484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"74439905364","text":"import numpy as np\r\n\r\nclass Hessian:\r\n \"\"\"Computes the Hessian numerically if given a geometry and potential function.\r\n The potential function should return the energy only.\r\n \"\"\"\r\n def __init__(self, potential_function):\r\n self.potential_function = potential_function\r\n\r\n def potential_wrapper(self, geometry, shape):\r\n return self.potential_function(geometry.reshape(shape))\r\n\r\n def evaluate(self, geometry, epsilon=1e-6):\r\n geometry = geometry.flatten()\r\n N = geometry.size\r\n hessian = np.zeros((N,N))\r\n df_0 = self.potential_wrapper(geometry, (-1, 3))[1].flatten()\r\n for i in range(N):\r\n xx0 = geometry[i]\r\n geometry[i] = xx0 + epsilon\r\n df_1 = self.potential_wrapper(geometry, (-1, 3))[1].flatten()\r\n hessian[i,:] = (df_1 - df_0)/epsilon\r\n geometry[i] = xx0\r\n return hessian\r\n\r\nif __name__ == '__main__':\r\n from Fragments import Fragments\r\n from Potential import *\r\n from MBE_Potential import MBE_Potential\r\n import sys\r\n\r\n try:\r\n ifile = sys.argv[1]\r\n except:\r\n print(\"Didn't get an xyz file.\")\r\n sys.exit(1)\r\n \r\n fragments = Fragments(ifile)\r\n ttm21f = TTM(21)\r\n mbe_ff = MBE_Potential(6, fragments, ttm21f)\r\n\r\n geometry = np.vstack(fragments.fragments)\r\n hessian_calculator = Hessian(mbe_ff.evaluate_on_geometry)\r\n","repo_name":"heindelj/pyMD","sub_path":"py_MD/Compute_Hessian.py","file_name":"Compute_Hessian.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"24587394295","text":"\"\"\"\n\nThis is an attempt at automating data formats common among Ralph Group Instruments.\nCurrently supported are UtilSweep, UtilMOKE, PPMSsweep, and PPMS (QD) file formats\nAll methods take a file of a given type and return a pandas dataframe. Instancing \nthe dataloader class is meant to remove the hassle of having to append the directory\neach time.\n\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport os\nimport matplotlib.pyplot as plt\nfrom DRRA import constants\n\n\n\nclass dataloader:\n\t\n\tdef __init__(self, workingdir = None):\n\t\tif workingdir is None:\n\t\t\tpass\n\t\telse:\n\t\t\tself.wkdir = workingdir\n\n\n\n\tdef load_tab_delimited(self, filename,header='infer',names=None):\n\t\t\n\t\ttemp = pd.read_csv(self.wkdir + filename, sep = '\\t',header=header, names=None)\n\t\t\n\t\treturn temp\n\n\tdef load_QD(self, filename):# rdata = False):\n\t\t\"\"\"\n\t\tUses code prototype gleaned from http://gb119.github.io/Stoner-PythonCode/_modules/Stoner/FileFormats.html for loading QD file\n\t\tformats. Empirically, Pandas has trouble with QD files so one needs to go more primitive with numpy.\n\t\tReturns a data frame with the Field, Moment, and Temperature data. \n\t\t\"\"\"\n\t\ttemp = np.genfromtxt(self.wkdir+filename, dtype = 'float', delimiter = ',', invalid_raise = False, skip_header = 23)\n\t\tfield = []\n\t\tmoment = []\n\t\ttemperature = []\n\n\t\tfor item in temp:\n\t\t\tfield.append(item[3])\n\t\t\tmoment.append(item[4])\n\t\t\ttemperature.append(item[2])\n\t\tdf = pd.DataFrame({'B':np.array(field),'M':np.array(moment),'T':np.array(temperature)})\n\n\t\treturn df\n\n\tdef load_UtilSweep(self, filename,verbose = False):\n\t\t\"\"\"\n\t\tUses load_tab_delimited method to load data from UtilSweep. Returns pandas DataFrame\n\t\twith shortened keys for easy df.KEY access. Code should be smart enough to handle any \n\t\tnumber of selected instruments. Currently only up to two lockins are supported\n\t\t\"\"\"\n\t\ttemp = self.load_tab_delimited(filename)\n\t\tkeys = temp.keys()\n\t\tutildict = {}\n\t\tfor item in keys:\n\t\t\tif item == 'LockinOnex':\n\t\t\t\tutildict['l1x'] = temp[item].values\n\t\t\tif item == 'LockinOney':\n\t\t\t\tutildict['l1y'] = temp[item].values\n\t\t\tif item == 'LockinAnotherOnex':\n\t\t\t\tutildict['l2x'] = temp[item].values\n\t\t\tif item == 'LockinAnotherOney':\n\t\t\t\tutildict['l2y'] = temp[item].values\n\t\t\tif item == 'Field(nominal)':\n\t\t\t\tutildict['field'] = temp[item].values\n\t\t\tif item == 'Azimuthnominal':\n\t\t\t\tutildict['azimuth'] = temp[item].values\n\t\t\tif item == 'PolarNominal':\n\t\t\t\tutildict['polar'] = temp[item].values\n\t\treturn pd.DataFrame(utildict)\n\n\tdef load_PPMSsweep(self, filename):\n\t\t\"\"\"\n\t\tUses load_tab_delimited method to load data from PPMSSweep. Returns pandas DataFrame\n\t\twith shortened keys for easy df.KEY access. Code should be smart enough to handle any \n\t\tnumber of selected instruments. Currently only up to two lockins are supported\n\t\t\"\"\"\n\t\ttemp = self.load_tab_delimited(filename)\n\t\tkeys = temp.keys()\n\t\tppmsdict = {}\n\t\tfor item in keys:\n\t\t\tif item == 'Lockin1x Volts':\n\t\t\t\tutildict['l1x'] = temp[item].values\n\t\t\tif item == 'Lockin1y Volts':\n\t\t\t\tutildict['l1y'] = temp[item].values\n\t\t\tif item == 'Lockin2x Volts':\n\t\t\t\tutildict['l2x'] = temp[item].values\n\t\t\tif item == 'Lockin2y Volts':\n\t\t\t\tutildict['l2y'] = temp[item].values\n\t\t\tif item == 'Field (Oe)':\n\t\t\t\tutildict['field'] = temp[item].values\n\t\t\tif item == 'Temp (K)':\n\t\t\t\tutildict['temp'] = temp[item].values\n\t\t\tif item == 'Position (Degrees)':\n\t\t\t\tutildict['position'] = temp[item].values\n\t\treturn pd.DataFrame(ppmsdict)\n\n\tdef load_UtilMOKE(self, filename, channel=1,shift=0):\n\t\t\"\"\"\n\t\tUses load_tab_delimited method to load all the standard instruments from a normal SHE MOKE\n\t\texperiment. The output is a dataframe that contains the Mirrorline values and the sum and\n\t\tdifference as plus and dif. \n\n\t\tChannel kwarg allows selection of which SR 7270 channel to load as data, X1 by default.\n\t\t\"\"\"\n\t\tmoketemp = self.load_tab_delimited(filename)\n\t\ti=0\n\t\ttempline=[]\n\t\ttempx1=[]\n\t\ttempx2=[]\n\t\ttempdc = []\n\t\twhile i=len(moketemp.DC_volts)//2:\n\t\t\t\tif not (i-shift)>=len(moketemp.DC_volts)//2:\n\t\t\t\t\ttempline.append(moketemp.MirrorLine.iloc[i])\n\t\t\t\t\ttempx1.append(moketemp.X1.iloc[i])\n\t\t\t\t\ttempx2.append(moketemp.X2.iloc[i])\n\t\t\t\t\ttempdc.append(moketemp.DC_volts.iloc[i])\n\t\t\t\telse:\n\t\t\t\t\ttempline.append(moketemp.MirrorLine.iloc[i-shift])\n\t\t\t\t\ttempx1.append(moketemp.X1.iloc[i-shift])\n\t\t\t\t\ttempx2.append(moketemp.X2.iloc[i-shift])\n\t\t\t\t\ttempdc.append(moketemp.DC_volts.iloc[i-shift])\n\t\t\ti+=1\n\t\tplt.plot(templine,moketemp.DC_volts)\n\t\tplt.show()\n\t\t\n\t\ttemp = pd.DataFrame({'MirrorLine':templine,'X1':tempx1,'X2':tempx2,'Field':moketemp.Field.values,'DC':tempdc})\n\t\tif channel == 1:\n\t\t\ttemp = pd.concat([temp.MirrorLine[:-1],temp.X1,temp.Field,temp.DC],axis=1)\n\t\telse:\n\t\t\ttemp = pd.concat([temp.MirrorLine[:-1],temp.X2,temp.Field,temp.DC],axis=1)\n\t\ttempplus = temp[temp['Field'].isin([temp.Field[0]])]\n\t\ttempdif = temp[temp['Field'].isin([temp.Field[len(temp)-1]])]\n\t\ti=0\n\t\tdif = np.array([2]*len(tempdif),dtype='float64')\n\t\twhile ilen(nparray):\n\t\t\t\t\tupper = len(nparray)-1\n\t\t\t\tlower = negmax-lowerbound\n\t\t\t\tif lower<0:\n\t\t\t\t\tlower = 0\n\t\t\t\tnparray = nparray[lower:upper]\n\t\t\t\tnparray = pd.DataFrame({'l1x':nparray.l1x.values*1e6,'field':nparray.field.values})\n\t\t\t\tif not curr in alldatadict:\n\t\t\t\t\talldatadict[curr]={}\n\t\t\t\talldatadict[curr][freq]=nparray\n\t\t\t\ti=i+1\n\t\treturn alldatadict\n\n\tdef load_DC_Bias_UtilSweep_pos(self,max=False,lowerbound = 100,upperbound = 100,points_from_zero = 30):\n\t\t\"\"\"\n\t\tUses load_UtilSweep method to load data from a dc biased stfmr experiment, keeping only the positive fields\n\t\tand triming the data to a region presumably only around the resonance.\n\t\tReturns pandas DataFrame with shortened keys for easy df.KEY access. \n\t\t\"\"\"\n\t\ti=0\n\t\talldatadict = {}\n\t\tfor item in (os.listdir(self.wkdir)):\n\t\t\tif item.split('_')[0] == 'azimuth':\n\t\t\t\tnparray = self.load_UtilSweep(item)[self.load_UtilSweep(item).field>0]\n\t\t\t\tcurr = float(item.split('_')[9])\n\t\t\t\tfreq = float(item.split('_')[7])\n\t\t\t\tnparray = nparray.reset_index()\n\t\t\t\tif max:\n\t\t\t\t\tnegmax = nparray[:-points_from_zero].l1x.argmax()\n\t\t\t\tif not max:\n\t\t\t\t\tnegmax = nparray[:-points_from_zero].l1x.argmin()\n\t\t\t\tupper = negmax+upperbound\n\t\t\t\tif upper>len(nparray):\n\t\t\t\t\tupper = len(nparray)-1\n\t\t\t\tlower = negmax-lowerbound\n\t\t\t\tif lower<0:\n\t\t\t\t\tlower = 0\n\t\t\t\tnparray = nparray[lower:upper]\n\t\t\t\tnparray = pd.DataFrame({'l1x':nparray.l1x.values*1e6,'field':nparray.field.values})\n\t\t\t\tif not curr in alldatadict:\n\t\t\t\t\talldatadict[curr]={}\n\t\t\t\talldatadict[curr][freq]=nparray\n\t\t\t\ti=i+1\n\t\treturn alldatadict\n\n\n\n","repo_name":"ndr37/DRRA","sub_path":"dataloadingprocedures.py","file_name":"dataloadingprocedures.py","file_ext":"py","file_size_in_byte":7994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"18199876228","text":"from functools import partial\nfrom django.shortcuts import render\nfrom rest_framework.response import Response\nfrom .models import Book\nfrom .serializers import BookSerializer\nfrom rest_framework import status\nfrom rest_framework.views import APIView\n\nclass BookAPI(APIView):\n def get(self,request,pk=None,format=None):\n id = pk\n if id is not None:\n book = Book.objects.get(id=id)\n serializer = BookSerializer(book)\n return Response(serializer.data)\n \n book = Book.objects.all()\n serializer = BookSerializer(book,many=True)\n return Response(serializer.data)\n \n def post(self,request,format=None):\n serializer = BookSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response({'msg':'Data created'},status=status.HTTP_201_CREATED)\n return Response(serializer.errors,status=status.HTTP_400_BAD_REQUEST)\n \n def put(self,request,pk,format=None):\n id = pk\n book = Book.objects.get(pk=id)\n serializer = BookSerializer(book,data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response({'msg':'Complete data updated'})\n return Response(serializer.errors,status==status.HTTP_400_BAD_REQUEST)\n \n def patch(self,request,pk,format=None):\n id = pk\n book = Book.objects.get(pk=id)\n serializer = BookSerializer(book,data=request.data,partial=True)\n if serializer.is_valid():\n serializer.save()\n return Response({'msg':'Partial data updated'})\n return Response(serializer.errors)\n \n def delete(self,request,pk,format=None):\n id = pk\n book = Book.objects.get(pk=id)\n book.delete()\n return Response({'msg':'Data deleted'})\n \n ","repo_name":"samratiam/Python-Django","sub_path":"restapi/books/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"851087064","text":"import numpy as np\nfrom scipy.stats import binom\nfrom numba import jit\nfrom ..tinyhouse import ProbMath\n\ndef performAssignement(assignInfo, assignment, args = None):\n\n if args.runtype in [\"opp\", \"both\"] :\n createOppHomozygoteAssignment(assignInfo, assignment, args)\n if args.runtype in [\"likelihood\", \"both\"] :\n createProbAssignment(assignInfo, assignment, args)\n\ndef createOppHomozygoteAssignment(assignInfo, assignment, args):\n child = assignment.ind\n altParent = assignment.alternativeParent\n potentialParents = assignment.potentialParents\n\n childGeno = child.genotypes\n if childGeno is None: childGeno = callGenotypes(assignInfo.getGenotypes(child))\n\n if altParent is not None: \n if altParent.genotypes is not None: altGeno = altParent.genotypes\n else: altGeno = callGenotypes(assignInfo.getGenotypes(altParent))\n else: altGeno = None\n\n for cont in potentialParents.values():\n\n parentGeno = cont.ind.genotypes\n if parentGeno is None: parentGeno = callGenotypes(assignInfo.getGenotypes(cont.ind))\n\n if altGeno is None:\n cont.opp, cont.numLoci = evaluateDifferenceNoAlt(childGeno, parentGeno)\n cont.oppWithoutOtherParent = cont.opp\n else:\n cont.opp, cont.numLoci = evaluateDifferenceAlt(childGeno, parentGeno, altGeno)\n cont.oppWithoutOtherParent = evaluateDifferenceNoAlt(childGeno, parentGeno)[0]\n if cont.numLoci > 0:\n cont.pValue = binom.logsf(cont.opp, cont.numLoci, args.error)\n else: \n cont.pValue = 1 \n\ndef callGenotypes(genoProbs) :\n combined = np.array([genoProbs[0,:], genoProbs[1,:] + genoProbs[2,:], genoProbs[3,:]])\n maxVals = combined.max(axis = 0)\n genotypes = np.argmax(combined, 0)\n genotypes[maxVals < .9] = 9\n return genotypes\n\ndef createProbAssignment(assignInfo, assignment, args = None):\n child = assignment.ind\n alternativeParent = assignment.alternativeParent\n potentialParents = assignment.potentialParents\n\n\n baseEstimates = generateNullDistributions(child, alternativeParent, assignInfo, args)\n \n for parentId, cont in potentialParents.items():\n parentGenotype = assignInfo.getGenotypes(cont.ind)\n cont.score, cont.probabilitySummaries = evaluateParent(parentGenotype, baseEstimates)\n\n score = cont.probabilitySummaries[0] - cont.probabilitySummaries[1]\n # print(\" \") \n # sireDist = [evaluateSimulatedParent(cont.ind, baseEstimates[0], baseEstimates) for i in range(10)]\n # fullDist = [evaluateSimulatedParent(cont.ind, baseEstimates[1], baseEstimates) for i in range(10)]\n # cont.score = 0\n # if all(score > fullDist) and any(score > sireDist): cont.score = 100\n\n return(potentialParents)\n\ndef evaluateSimulatedParent(realParent, targetProbabilities, nullDistributions) :\n\n gError = 0.01\n seqerror = 0.001\n nLoci = targetProbabilities.shape[1]\n\n reducedProbs = np.array([targetProbabilities[0,:],\n targetProbabilities[1,:] + targetProbabilities[2,:],\n targetProbabilities[3,:] ], dtype=np.float32)\n cumProbs = np.cumsum(reducedProbs, axis = 0)\n random = np.random.rand(nLoci)\n\n cumProbs = cumProbs - random[None,:]\n trueGenotypes = np.argmax(cumProbs > 0, axis = 0)\n #Bad code\n if realParent.genotypes is None:\n genotypes = None\n else:\n genotypes = trueGenotypes.copy()\n genotypes[realParent.genotypes == 9] = 9 \n\n if realParent.reads is None:\n reads = None\n else:\n readCounts = realParent.reads[0] + realParent.reads[1]\n refCounts = np.random.binomial(readCounts, p = .5)\n altCounts = readCounts - refCounts\n reads = getReads(refCounts, altCounts, trueGenotypes)\n\n simulatedGenotype = ProbMath.getGenotypeProbabilities(nLoci, genotypes, reads, gError, seqerror)\n score, probabilitySummaries = evaluateParent(simulatedGenotype, nullDistributions)\n \n return probabilitySummaries[0] - probabilitySummaries[1]\n\n@jit(nopython=True)\ndef getReads(refCounts, altCounts, genotypes) :\n nLoci = len(genotypes)\n read0 = np.full(nLoci, 0, dtype = np.int16)\n read1 = np.full(nLoci, 0, dtype = np.int16)\n\n for i in range(nLoci) :\n if genotypes[i] == 0:\n read0[i] = refCounts[i] + altCounts[i]\n if genotypes[i] == 1:\n read0[i] = refCounts[i]\n read1[i] = altCounts[i]\n if genotypes[i] == 2:\n read1[i] = refCounts[i] + altCounts[i]\n return (read0, read1)\n\ndef generateNullDistributions(child, alternativeParent, assignInfo, args):\n nullGenotypes = assignInfo.getMaf()\n\n segregation = ProbMath.generateSegregation(partial=True)\n\n childGeno = assignInfo.getGenotypes(child)\n altGeno = assignInfo.getGenotypes(alternativeParent)\n\n parentGenotypes = np.einsum(\"bi, ci, abc -> ai\", altGeno, childGeno, segregation) \n \n grandParentGenotypes = np.einsum(\"ci, abc -> abi\", parentGenotypes, segregation) \n \n if args.usemaf: grandParentGenotypes = np.einsum(\"abi, ai, bi -> abi\", grandParentGenotypes, nullGenotypes, nullGenotypes) \n\n grandSireGenotypes = np.einsum(\"abi, bi -> ai\", grandParentGenotypes, nullGenotypes)\n\n fullSibGenotypes = np.einsum(\"abi, abc -> ci\", grandParentGenotypes, segregation) \n halfSibGenotypes = np.einsum(\"ai, bi, abc -> ci\", grandSireGenotypes, nullGenotypes, segregation) \n\n if args.usemaf: parentGenotypes = parentGenotypes*nullGenotypes\n\n parentGenotypes = parentGenotypes/np.sum(parentGenotypes, 0)\n fullSibGenotypes = fullSibGenotypes/np.sum(fullSibGenotypes, 0)\n halfSibGenotypes = halfSibGenotypes/np.sum(halfSibGenotypes, 0)\n\n # print(parentGenotypes)\n # print(fullSibGenotypes)\n # print(halfSibGenotypes)\n \n # import sys\n # sys.exit()\n return((parentGenotypes, fullSibGenotypes, halfSibGenotypes, nullGenotypes))\n\n\n# def createProbAssignment_OLD(ped, child, potentialParents, alternativeParent=None, args = None):\n# nRegions = 1000\n# regions = getRegions(nRegions, ped.nLoci)\n# baseEstimates = generateNullDistributions(child, alternativeParent, ped)\n \n# childGenotypes = getGenotypeProbabilities(child, ped)\n# alternativeGenotypes = getGenotypeProbabilities(alternativeParent, ped, useMafWhenNull = True)\n\n# for parentId, cont in potentialParents.items():\n# parentGenotype = getGenotypeProbabilities(cont.ind, ped)\n# subLoci = None\n# if nRegions > ped.nLoci:\n# subLoci = None\n# elif args.subsample == \"coverage\":\n# subLoci = subsample(childGenotypes, parentGenotype, alternativeGenotypes, ped.maf, regions)\n# elif args.subsample == \"midpoint\":\n# subLoci = [ int((region[0] + region[1])/2) for region in regions]\n# cont.score, cont.probabilitySummaries, tmp = evaluateParent(parentGenotype, baseEstimates, subLoci)\n# return(potentialParents)\n\n# def getRegions(nRegions, nLoci) :\n# regions = []\n# regionSize = nLoci/(nRegions -1)\n\n# for i in range(nRegions) :\n# start = i*regionSize\n# stop = min( (i+1)*regionSize, nLoci-1)\n# regions.append( (int(start), int(stop)))\n\n# return regions\n\ndef evaluateParent(indGenotypes, targets) :\n scores = []\n for targetGenotypes in targets :\n combined = np.sum(indGenotypes*targetGenotypes, 0)\n # combined = np.einsum(\"ai, ai -> i\", indGenotypes, targetGenotypes)\n # combinedSquared = np.einsum(\"ai, ai -> i\", indGenotypes, targetGenotypes**2)\n score = np.sum(np.log(combined))\n scores.append(score)\n\n score, scoresArray = collapseScores(scores)\n return (score, scoresArray)\n\ndef collapseScores(scoresList) :\n scores = np.array(scoresList)\n scores -= np.max(scores)\n\n diff = logSum(scores) - logSum(scores[1:])\n\n return(diff, scores)\n\ndef logSum(llArray) :\n maxVal = np.max(llArray)\n llArray = llArray.copy() - maxVal\n\n retVal = maxVal + np.sum(np.exp(llArray))\n return retVal\n\n\n@jit(nopython=True)\ndef evaluateDifferenceNoAlt(childGeno, parentGeno) :\n nLoci = 0\n nOpp = 0\n for i in range(len(childGeno)):\n if childGeno[i] != 9 and parentGeno[i] != 9:\n nLoci += 1\n if (childGeno[i] == 0 and parentGeno[i] == 2) or (childGeno[i] == 2 and parentGeno[i] == 0):\n nOpp += 1\n return (nOpp, nLoci)\n\n@jit(nopython=True)\ndef evaluateDifferenceAlt(childGeno, parentGeno, altGeno) :\n nLoci = 0\n nOpp = 0\n for i in range(len(childGeno)):\n if childGeno[i] != 9 and parentGeno[i] != 9:\n nLoci += 1\n c = childGeno[i]\n if c == 1 and altGeno[i] == 0:\n c = 2\n if c == 1 and altGeno[i] == 2:\n c = 0\n if (c == 0 and parentGeno[i] == 2) or (c == 2 and parentGeno[i] == 0):\n nOpp += 1\n\n return (nOpp, nLoci)\n\n\ndef readInAssignments(fileName, findSire, pedigree) :\n assignments = []\n with open(fileName) as f:\n lines = f.readlines()\n for line in lines:\n parts = line.strip().split()\n idx = parts[0]\n potentialSires = parts[1:]\n sires = [pedigree.individuals[idx] for idx in potentialSires]\n \n if findSire: altParent = pedigree.individuals[idx].dam\n else: altParent = pedigree.individuals[idx].sire\n\n assignments.append(AssignmentHolder(pedigree.individuals[idx], altParent, sires, findSire))\n return assignments\n\n\nclass ScoreContainer(object) :\n\n def __init__(self, ind) :\n self.ind = ind\n\n # Probability Values\n self.score = None\n self.probabilitySummaries = None\n\n #Opposing Homozygote Values\n self.numLoci = None\n self.opp = None\n self.oppWithoutOtherParent = None\n self.pValue = None\n\n self.chosen=False\n\n\nclass AssignmentHolder(object):\n def __init__(self, ind, altParent, potentialSires, findSire = True):\n self.ind = ind\n\n self.findSire = findSire\n\n self.potentialParents = {ind.idx:ScoreContainer(ind) for ind in potentialSires}\n \n self.chosen = None\n self.top = None\n self.alternativeParent = altParent\n\n def chooseSire(self, threshold, p_threshold, runtype):\n\n if runtype == \"opp\" :\n scores = np.array([container.pValue for container in self.potentialParents.values()])\n containers = np.array([container for container in self.potentialParents.values()])\n\n bestInd = containers[np.argmax(scores)]\n self.top = bestInd.ind\n if bestInd.pValue > p_threshold :\n self.chosen = bestInd.ind\n\n if runtype in [\"likelihood\", \"both\"]:\n if runtype == \"likelihood\":\n containers = np.array([container for container in self.potentialParents.values()])\n if runtype == \"both\":\n containers = np.array([container for container in self.potentialParents.values() if container.pValue > p_threshold])\n\n scores = np.array([container.score for container in containers ])\n bestInd = containers[np.argmax(scores)]\n self.top = bestInd.ind\n\n if bestInd.score > threshold :\n self.chosen = bestInd.ind\n\n def writeLine(self, args) :\n\n chosen = self.chosen\n line = \"\" \n for pidx, item in self.potentialParents.items() :\n wasChosen = 0\n if self.chosen is not None: wasChosen = int(item.ind.idx == chosen.idx)\n if self.alternativeParent is None: alt = \"0\"\n else: alt = self.alternativeParent.idx\n header = f\"{self.ind.idx} {item.ind.idx} {alt} {wasChosen} \"\n \n prob = \"\"\n if args.runtype in [\"likelihood\", \"both\"]:\n score = item.score\n summary = \" \".join(str(e) for e in item.probabilitySummaries)\n prob = f\"{score} {summary} \"\n \n opp = \"\"\n if args.runtype in [\"opp\", \"both\"]:\n opp = \" \".join([str(item.opp), str(item.oppWithoutOtherParent), str(item.numLoci), str(item.pValue)])\n\n line += header + prob + opp + \"\\n\"\n return line\n\n\n def updatePedigree(self, pedigree, useTop = False):\n if self.findSire: parent = self.ind.sire\n else: parent = self.ind.dam\n\n if useTop:\n newSire = self.top\n\n else:\n newSire = self.chosen\n if newSire is not None:\n if self.findSire: self.ind.sire = newSire\n if not self.findSire: self.ind.dam = newSire\n else:\n if self.findSire: self.ind.sire = None\n if not self.findSire: self.ind.dam = None\n\ndef createAssignInfo(pedigree, args) :\n nLoci = pedigree.nLoci\n \n assignInfo = assignInformation(nInd=pedigree.maxIdn, nLoci=nLoci)\n \n for ind in pedigree: \n\n geno = ind.genotypes\n reads = ind.reads\n if reads is not None:\n geno = None\n assignInfo.penetrance[ind.idn,:,:] = ProbMath.getGenotypeProbabilities(assignInfo.nLoci, geno, reads, args.error, args.seqerror)\n assignInfo.pentranceSetup = True\n \n #I think we bypass pedigree maf, just because we want to use the sequence data too.\n\n if args.usemaf:\n maf = np.full(nLoci, 0, dtype = np.float32)\n for ind in pedigree:\n maf += assignInfo.getDosages(ind)\n maf = maf/(2*assignInfo.nInd)\n assignInfo.setMaf(maf)\n np.savetxt(\"maf.txt\", maf)\n return assignInfo\n\nclass assignInformation(object):\n def __init__(self, nInd, nLoci):\n self.nInd = nInd\n self.nLoci = nLoci\n\n self.pentranceSetup = False\n self.penetrance = np.full((nInd, 4, nLoci), .25, dtype=np.float32)\n self.mafGenotypes = np.full((4, nLoci), .25, dtype = np.float32)\n\n def getMaf(self):\n return self.mafGenotypes\n\n def setMaf(self, maf) :\n self.mafGenotypes[0,:] = maf**2\n self.mafGenotypes[1,:] = maf*(1-maf)\n self.mafGenotypes[2,:] = (1-maf)*maf\n self.mafGenotypes[3,:] = (1-maf)**2\n\n def getGenotypes(self, ind) :\n if ind is None: return self.mafGenotypes\n if self.pentranceSetup :\n return self.penetrance[ind.idn,:,:]\n else:\n return ProbMath.getGenotypeProbabilities(assignInfo.nLoci, ind.genotypes, ind.reads, args.error, args.seqerror)\n\n def getDosages(self, ind):\n genoProbs = self.getGenotypes(ind)\n return np.dot(np.array([0,1,1,2]), genoProbs)\n\n\n","repo_name":"AlphaGenes/AlphaAssign","sub_path":"src/tinyassign/Assign/assignEvaluate.py","file_name":"assignEvaluate.py","file_ext":"py","file_size_in_byte":14612,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"30"} +{"seq_id":"24111759252","text":"def solution(money):\n dp1= [0]*len(money)\n dp2= [0]*len(money)\n \n #첫번째 무조건 방문, 마지막 x\n dp1[0] = money[0]\n dp1[1] = max(money[0], money[1])\n \n for i in range(2, len(money)-1):\n dp1[i] = max(dp1[i-1], money[i]+dp1[i-2])\n \n #마지막 무조건 방문, 첫번째 x\n dp2[0] = 0\n dp2[1] = money[1]\n\n for i in range(2, len(money)):\n dp2[i] = max(dp2[i-1], money[i]+dp2[i-2])\n \n answer = max(max(dp1), max(dp2))\n \n return answer\n","repo_name":"jungyeji/Programmers-Algorithm","sub_path":"Dynamic-Programming/도둑질.py","file_name":"도둑질.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"5298090242","text":"from Plasma import *\nfrom PlasmaTypes import *\nfrom PlasmaConstants import *\n\nfumerol1Resp = ptAttribResponder(1, \"fumerol 1 responder\",['Opening','Closing','Rumble1','Rumble2','Rumble3','Rumble4','BlastOpen', 'MuffledBlastOnly'])\nfumerol2Resp = ptAttribResponder(2, \"fumerol 2 responder\",['Opening','Closing','Rumble1','Rumble2','Rumble3','Rumble4','BlastOpen', 'MuffledBlastOnly'])\nfumerol3Resp = ptAttribResponder(3, \"fumerol 3 responder\",['Opening','Closing','Rumble1','Rumble2','Rumble3','Rumble4','BlastOpen', 'MuffledBlastOnly'])\nfumerol4Resp = ptAttribResponder(4, \"fumerol 4 responder\",['Opening','Closing','Rumble1','Rumble2','Rumble3','Rumble4','BlastOpen', 'MuffledBlastOnly'])\nfumerol5Resp = ptAttribResponder(5, \"fumerol 5 responder\",['Opening','Closing','Rumble1','Rumble2','Rumble3','Rumble4','BlastOpen', 'MuffledBlastOnly'])\nfumerol6Resp = ptAttribResponder(6, \"fumerol 6 responder\",['Opening','Closing','Rumble1','Rumble2','Rumble3','Rumble4','BlastOpen', 'MuffledBlastOnly'])\nfumerol1Det = ptAttribActivator(7, \"detector fumerol 1\")\nfumerol2Det = ptAttribActivator(8, \"detector fumerol 2\")\nfumerol3Det = ptAttribActivator(9, \"detector fumerol 3\")\nfumerol4Det = ptAttribActivator(10, \"detector fumerol 4\")\nfumerol5Det = ptAttribActivator(11, \"detector fumerol 5\")\nfumerol6Det = ptAttribActivator(12, \"detector fumerol 6\")\nfumerol1BlastResp = ptAttribResponder(13,\"fumerol 1 Blast responder\",['Smoke','Blast1','Blast2','Blast3','Blast4','Blast5','Blast6'])\nfumerol2BlastResp = ptAttribResponder(14,\"fumerol 2 Blast responder\",['Smoke','Blast1','Blast2','Blast3','Blast4','Blast5','Blast6'])\nfumerol3BlastResp = ptAttribResponder(15,\"fumerol 3 Blast responder\",['Smoke','Blast1','Blast2','Blast3','Blast4','Blast5','Blast6'])\nfumerol4BlastResp = ptAttribResponder(16,\"fumerol 4 Blast responder\",['Smoke','Blast1','Blast2','Blast3','Blast4','Blast5','Blast6'])\nfumerol5BlastResp = ptAttribResponder(17,\"fumerol 5 Blast responder\",['Smoke','Blast1','Blast2','Blast3','Blast4','Blast5','Blast6'])\nfumerol6BlastResp = ptAttribResponder(18,\"fumerol 6 Blast responder\",['Smoke','Blast1','Blast2','Blast3','Blast4','Blast5','Blast6'])\nrockPuzBlast = ptAttribResponder(19,\"fumerol 1 blast special\",netForce=1)\nclothPuzBlast = ptAttribResponder(20,\"fumerol 6 blast special\",netForce=1)\nclothJumpBeh = ptAttribBehavior(22,\"cloth jump behavior\",netForce=1)\nrockJumpBeh = ptAttribBehavior(23,\"rock jump behavior\",netForce=1)\nfumerol1Act = ptAttribActivator(24, \"Activator fumerol 1\")\nfumerol2Act = ptAttribActivator(25, \"Activator fumerol 2\")\nfumerol3Act = ptAttribActivator(26, \"Activator fumerol 3\")\nfumerol4Act = ptAttribActivator(27, \"Activator fumerol 4\")\nfumerol5Act = ptAttribActivator(28, \"Activator fumerol 5\")\nfumerol6Act = ptAttribActivator(29, \"Activator fumerol 6\")\nfumerol01JumpResp = ptAttribResponder(30,\"fumerol 1 avatar resp\",['Level1','Level2','Level3','Level4','Level5','Level6'],netForce=1)\nfumerol02JumpResp = ptAttribResponder(31,\"fumerol 2 avatar resp\",['Level1','Level2','Level3','Level4','Level5','Level6'],netForce=1)\nfumerol03JumpResp = ptAttribResponder(32,\"fumerol 3 avatar resp\",['Level1','Level2','Level3','Level4','Level5','Level6'],netForce=1)\nfumerol04JumpResp = ptAttribResponder(33,\"fumerol 4 avatar resp\",['Level1','Level2','Level3','Level4','Level5','Level6'],netForce=1)\nfumerol05JumpResp = ptAttribResponder(34,\"fumerol 5 avatar resp\",['Level1','Level2','Level3','Level4','Level5','Level6'],netForce=1)\nfumerol06JumpResp = ptAttribResponder(35,\"fumerol 6 avatar resp\",['Level1','Level2','Level3','Level4','Level5','Level6'],netForce=1)\n\nfumerol01SteamTrig01 = ptAttribActivator(36,\"fumerol 1 trigger 1\")\nfumerol01SteamTrig02 = ptAttribActivator(37,\"fumerol 1 trigger 2\")\nfumerol01SteamTrig03 = ptAttribActivator(38,\"fumerol 1 trigger 3\")\nfumerol01SteamTrig04 = ptAttribActivator(39,\"fumerol 1 trigger 4\")\nfumerol01SteamTrig05 = ptAttribActivator(40,\"fumerol 1 trigger 5\")\nfumerol01SteamTrig06 = ptAttribActivator(41,\"fumerol 1 trigger 6\")\n\nfumerol02SteamTrig01 = ptAttribActivator(42,\"fumerol 2 trigger 1\")\nfumerol02SteamTrig02 = ptAttribActivator(43,\"fumerol 2 trigger 2\")\nfumerol02SteamTrig03 = ptAttribActivator(44,\"fumerol 2 trigger 3\")\nfumerol02SteamTrig04 = ptAttribActivator(45,\"fumerol 2 trigger 4\")\nfumerol02SteamTrig05 = ptAttribActivator(46,\"fumerol 2 trigger 5\")\nfumerol02SteamTrig06 = ptAttribActivator(47,\"fumerol 2 trigger 6\")\n\nfumerol03SteamTrig01 = ptAttribActivator(48,\"fumerol 3 trigger 1\")\nfumerol03SteamTrig02 = ptAttribActivator(49,\"fumerol 3 trigger 2\")\nfumerol03SteamTrig03 = ptAttribActivator(50,\"fumerol 3 trigger 3\")\nfumerol03SteamTrig04 = ptAttribActivator(51,\"fumerol 3 trigger 4\")\nfumerol03SteamTrig05 = ptAttribActivator(52,\"fumerol 3 trigger 5\")\nfumerol03SteamTrig06 = ptAttribActivator(53,\"fumerol 3 trigger 6\")\n\nfumerol04SteamTrig01 = ptAttribActivator(54,\"fumerol 4 trigger 1\")\nfumerol04SteamTrig02 = ptAttribActivator(55,\"fumerol 4 trigger 2\")\nfumerol04SteamTrig03 = ptAttribActivator(56,\"fumerol 4 trigger 3\")\nfumerol04SteamTrig04 = ptAttribActivator(57,\"fumerol 4 trigger 4\")\nfumerol04SteamTrig05 = ptAttribActivator(58,\"fumerol 4 trigger 5\")\nfumerol04SteamTrig06 = ptAttribActivator(59,\"fumerol 4 trigger 6\")\n\nfumerol05SteamTrig01 = ptAttribActivator(60,\"fumerol 5 trigger 1\")\nfumerol05SteamTrig02 = ptAttribActivator(61,\"fumerol 5 trigger 2\")\nfumerol05SteamTrig03 = ptAttribActivator(62,\"fumerol 5 trigger 3\")\nfumerol05SteamTrig04 = ptAttribActivator(63,\"fumerol 5 trigger 4\")\nfumerol05SteamTrig05 = ptAttribActivator(64,\"fumerol 5 trigger 5\")\nfumerol05SteamTrig06 = ptAttribActivator(65,\"fumerol 5 trigger 6\")\n\nfumerol06SteamTrig01 = ptAttribActivator(66,\"fumerol 6 trigger 1\")\nfumerol06SteamTrig02 = ptAttribActivator(67,\"fumerol 6 trigger 2\")\nfumerol06SteamTrig03 = ptAttribActivator(68,\"fumerol 6 trigger 3\")\nfumerol06SteamTrig04 = ptAttribActivator(69,\"fumerol 6 trigger 4\")\nfumerol06SteamTrig05 = ptAttribActivator(70,\"fumerol 6 trigger 5\")\nfumerol06SteamTrig06 = ptAttribActivator(71,\"fumerol 6 trigger 6\")\n\nfumerolSteamEmit01 = ptAttribResponder(72,\"fumerol 1 steam emitter\",['On','Off'])\nfumerolSteamEmit02 = ptAttribResponder(73,\"fumerol 2 steam emitter\",['On','Off'])\nfumerolSteamEmit03 = ptAttribResponder(74,\"fumerol 3 steam emitter\",['On','Off'])\nfumerolSteamEmit04 = ptAttribResponder(75,\"fumerol 4 steam emitter\",['On','Off'])\nfumerolSteamEmit05 = ptAttribResponder(76,\"fumerol 5 steam emitter\",['On','Off'])\nfumerolSteamEmit06 = ptAttribResponder(77,\"fumerol 6 steam emitter\",['On','Off'])\n\nfumerolJCClickable = ptAttribActivator(78,\"fumerol JC clickable\")\n\nfumerol01SteamSfx = ptAttribResponder(79,\"fumerol 1 sfx responder\",['Blast1','Blast2','Blast3','Blast4','Blast5','Blast6'])\nfumerol02SteamSfx = ptAttribResponder(80,\"fumerol 2 sfx responder\",['Blast1','Blast2','Blast3','Blast4','Blast5','Blast6'])\nfumerol03SteamSfx = ptAttribResponder(81,\"fumerol 3 sfx responder\",['Blast1','Blast2','Blast3','Blast4','Blast5','Blast6'])\nfumerol04SteamSfx = ptAttribResponder(82,\"fumerol 4 sfx responder\",['Blast1','Blast2','Blast3','Blast4','Blast5','Blast6'])\nfumerol05SteamSfx = ptAttribResponder(83,\"fumerol 5 sfx responder\",['Blast1','Blast2','Blast3','Blast4','Blast5','Blast6'])\nfumerol06SteamSfx = ptAttribResponder(84,\"fumerol 6 sfx responder\",['Blast1','Blast2','Blast3','Blast4','Blast5','Blast6'])\n\nfumerol01topAct = ptAttribActivator(85, \"fumerol 1 on top rgn\")\nfumerol02topAct = ptAttribActivator(86, \"fumerol 2 on top rgn\")\nfumerol03topAct = ptAttribActivator(87, \"fumerol 3 on top rgn\")\nfumerol04topAct = ptAttribActivator(88, \"fumerol 4 on top rgn\")\nfumerol05topAct = ptAttribActivator(89, \"fumerol 5 on top rgn\")\nfumerol06topAct = ptAttribActivator(90, \"fumerol 6 on top rgn\")\n\n\ninFumerol1 = 0\ninFumerol2 = 0\ninFumerol3 = 0\ninFumerol4 = 0\ninFumerol5 = 0\ninFumerol6 = 0\nkJumpConst = 5000\n\nonFumerol1 = 0\nonFumerol2 = 0\nonFumerol3 = 0\nonFumerol4 = 0\nonFumerol5 = 0\nonFumerol6 = 0\n\nclass GiraSteam(ptResponder):\n\n def __init__(self):\n ptResponder.__init__(self)\n self.id = 53628\n self.version = 3\n\n def OnServerInitComplete(self):\n ageSDL = PtGetAgeSDL()\n avatar = PtGetLocalAvatar()\n \n for x in range(6):\n var = \"giraSteamvent0\" + str(x + 1) + \"Open\"\n PtDebugPrint(\"GiraSteam.OnServerInitComplete():\\tsetting up for SDL var: \",var)\n ageSDL.setFlags(var, 1, 1)\n ageSDL.sendToClients(var)\n \n open = ageSDL[\"giraSteamvent01Open\"][0]\n PtDebugPrint(\"GiraSteam.OnServerInitComplete():\\tfumerol 1 open = \",open)\n if (open):\n fumerol1Resp.run(self.key,state='Opening',avatar=avatar,fastforward=True)\n self.SetSteam(fumerol1BlastResp, avatar)\n else:\n fumerol1Resp.run(self.key,state='Closing',avatar=avatar,fastforward=True)\n self.SetRumble(fumerol1Resp, avatar)\n \n open = ageSDL[\"giraSteamvent02Open\"][0]\n PtDebugPrint(\"GiraSteam.OnServerInitComplete():\\tfumerol 2 open = \",open)\n if (open):\n fumerol2Resp.run(self.key,state='Opening',avatar=avatar,fastforward=True)\n self.SetSteam(fumerol2BlastResp, avatar)\n else:\n fumerol2Resp.run(self.key,state='Closing',avatar=avatar,fastforward=True)\n self.SetRumble(fumerol2Resp, avatar)\n \n open = ageSDL[\"giraSteamvent03Open\"][0]\n PtDebugPrint(\"GiraSteam.OnServerInitComplete():\\tfumerol 3 open = \",open)\n if (open):\n fumerol3Resp.run(self.key,state='Opening',avatar=avatar,fastforward=True)\n self.SetSteam(fumerol3BlastResp, avatar)\n else:\n fumerol3Resp.run(self.key,state='Closing',avatar=avatar,fastforward=True)\n self.SetRumble(fumerol3Resp, avatar)\n \n open = ageSDL[\"giraSteamvent04Open\"][0]\n PtDebugPrint(\"GiraSteam.OnServerInitComplete():\\tfumerol 4 open = \",open)\n if (open):\n fumerol4Resp.run(self.key,state='Opening',avatar=avatar,fastforward=True)\n self.SetSteam(fumerol4BlastResp, avatar)\n else:\n fumerol4Resp.run(self.key,state='Closing',avatar=avatar,fastforward=True)\n self.SetRumble(fumerol4Resp, avatar)\n \n open = ageSDL[\"giraSteamvent05Open\"][0]\n PtDebugPrint(\"GiraSteam.OnServerInitComplete():\\tfumerol 5 open = \",open)\n if (open):\n fumerol5Resp.run(self.key,state='Opening',avatar=avatar,fastforward=True)\n self.SetSteam(fumerol5BlastResp, avatar)\n else:\n fumerol5Resp.run(self.key,state='Closing',avatar=avatar,fastforward=True)\n self.SetRumble(fumerol5Resp, avatar)\n \n open = ageSDL[\"giraSteamvent06Open\"][0]\n PtDebugPrint(\"GiraSteam.OnServerInitComplete():\\tfumerol 6 open = \",open)\n if (open):\n fumerol6Resp.run(self.key,state='Opening',avatar=avatar,fastforward=True)\n self.SetSteam(fumerol6BlastResp, avatar)\n else:\n fumerol6Resp.run(self.key,state='Closing',avatar=avatar,fastforward=True)\n self.SetRumble(fumerol6Resp, avatar)\n \n \n def SetRumble(self,resp, theavatar):\n numClosed = self.GetNumClosed()\n PtDebugPrint(\"GiraSteam.SetRumble():\\tnumClosed = \",numClosed)\n PtDebugPrint(\"GiraSteam.SetRumble():\\tresponder = \",resp.id)\n if (numClosed == 1):\n resp.run(self.key,state='MuffledBlastOnly',avatar=theavatar)\n elif (numClosed == 2):\n ##PtDebugPrint(\"running rumble 1\")\n resp.run(self.key,state='Rumble1',avatar=theavatar)\n elif(numClosed == 3):\n ##PtDebugPrint(\"running rumble 2\")\n resp.run(self.key,state='Rumble2',avatar=theavatar)\n elif(numClosed == 4):\n ##PtDebugPrint(\"running rumble 3\")\n resp.run(self.key,state='Rumble3',avatar=theavatar)\n elif(numClosed == 5):\n ##PtDebugPrint(\"running rumble 4\")\n resp.run(self.key,state='Rumble4',avatar=theavatar)\n if (resp.id == fumerol1Resp.id):\n fumerol1BlastResp.run(self.key,state='Smoke',avatar=theavatar)\n fumerolSteamEmit01.run(self.key,state='On',avatar=theavatar) \n if (resp.id == fumerol2Resp.id):\n fumerol2BlastResp.run(self.key,state='Smoke',avatar=theavatar)\n fumerolSteamEmit02.run(self.key,state='On',avatar=theavatar) \n if (resp.id == fumerol3Resp.id):\n fumerol3BlastResp.run(self.key,state='Smoke',avatar=theavatar)\n fumerolSteamEmit03.run(self.key,state='On',avatar=theavatar) \n if (resp.id == fumerol4Resp.id):\n fumerol4BlastResp.run(self.key,state='Smoke',avatar=theavatar)\n fumerolSteamEmit04.run(self.key,state='On',avatar=theavatar) \n if (resp.id == fumerol5Resp.id):\n fumerol5BlastResp.run(self.key,state='Smoke',avatar=theavatar)\n fumerolSteamEmit05.run(self.key,state='On',avatar=theavatar) \n if (resp.id == fumerol6Resp.id):\n fumerol6BlastResp.run(self.key,state='Smoke',avatar=theavatar)\n fumerolSteamEmit06.run(self.key,state='On',avatar=theavatar) \n return\n \n def SetSteam(self,resp, theavatar):\n numClosed = self.GetNumClosed()\n PtDebugPrint(\"GiraSteam.SetSteam():\\tnumClosed = \",numClosed)\n PtDebugPrint(\"GiraSteam.SetSteam():\\tresponder = \",resp.id)\n\n if (numClosed == 0):\n ##PtDebugPrint(\"running steam blast 2\")\n resp.run(self.key,state='Blast1',avatar=theavatar)\n elif (numClosed == 1):\n ##PtDebugPrint(\"running steam blast 3\")\n resp.run(self.key,state='Blast2',avatar=theavatar)\n elif (numClosed == 2):\n ##PtDebugPrint(\"running steam blast 4\")\n resp.run(self.key,state='Blast3',avatar=theavatar)\n elif (numClosed == 3):\n ##PtDebugPrint(\"running steam blast 5\")\n resp.run(self.key,state='Blast4',avatar=theavatar)\n elif (numClosed == 4):\n ##PtDebugPrint(\"running steam blast 6\")\n resp.run(self.key,state='Blast5',avatar=theavatar)\n elif (numClosed == 5):\n ##PtDebugPrint(\"running steam blast 6\")\n resp.run(self.key,state='Blast6',avatar=theavatar)\n \n if (resp.id == fumerol1Resp.id):\n fumerolSteamEmit01.run(self.key,state='Off',avatar=theavatar) \n if (resp.id == fumerol2Resp.id):\n fumerolSteamEmit02.run(self.key,state='Off',avatar=theavatar) \n if (resp.id == fumerol3Resp.id):\n fumerolSteamEmit03.run(self.key,state='Off',avatar=theavatar) \n if (resp.id == fumerol4Resp.id):\n fumerolSteamEmit04.run(self.key,state='Off',avatar=theavatar) \n if (resp.id == fumerol5Resp.id):\n fumerolSteamEmit05.run(self.key,state='Off',avatar=theavatar) \n if (resp.id == fumerol6Resp.id):\n fumerolSteamEmit06.run(self.key,state='Off',avatar=theavatar) \n \n \n def JumpAvatar(self,resp, theavatar):\n numClosed=self.GetNumClosed()\n PtDebugPrint(\"GiraSteam.JumpAvatar():\\tnumClosed = \",numClosed)\n PtDebugPrint(\"GiraSteam.JumpAvatar():\\tresponder = \",resp.id)\n if (numClosed == 0):\n ##PtDebugPrint(\"blast level 2\")\n resp.run(self.key,state='Level1',avatar=theavatar)\n if (numClosed == 1):\n ##PtDebugPrint(\"blast level 3\")\n resp.run(self.key,state='Level2',avatar=theavatar)\n if (numClosed == 2):\n ##PtDebugPrint(\"blast level 4\")\n resp.run(self.key,state='Level3',avatar=theavatar)\n if (numClosed == 3):\n ##PtDebugPrint(\"blast level 5\")\n resp.run(self.key,state='Level4',avatar=theavatar)\n if (numClosed == 4):\n ##PtDebugPrint(\"blast level 6\")\n resp.run(self.key,state='Level5',avatar=theavatar)\n if (numClosed == 5):\n ##PtDebugPrint(\"blast level 6\")\n resp.run(self.key,state='Level6',avatar=theavatar)\n \n def PlayBlastSfx(self,resp, theavatar):\n numClosed = self.GetNumClosed()\n if (numClosed == 0):\n #PtDebugPrint(\"blast sfx 1\")\n resp.run(self.key,state='Blast1',avatar=theavatar)\n if (numClosed == 1):\n #PtDebugPrint(\"blast sfx 2\")\n resp.run(self.key,state='Blast2',avatar=theavatar)\n if (numClosed == 2):\n #PtDebugPrint(\"blast sfx 3\")\n resp.run(self.key,state='Blast3',avatar=theavatar)\n if (numClosed == 3):\n #PtDebugPrint(\"blast sfx 4\")\n resp.run(self.key,state='Blast4',avatar=theavatar)\n if (numClosed == 4):\n #PtDebugPrint(\"blast sfx 5\")\n resp.run(self.key,state='Blast5',avatar=theavatar)\n if (numClosed == 5):\n #PtDebugPrint(\"blast sfx 6\")\n resp.run(self.key,state='Blast6',avatar=theavatar)\n\n\n def BeginAgeUnLoad(self, avObj):\n pass\n \n def OnTimer(self,id):\n PtDebugPrint(\"GiraSteam.OnTimer():\\tid = \",id)\n global onFumerol1\n global onFumerol2\n global onFumerol3\n global onFumerol4\n global onFumerol5\n global onFumerol6\n\n if (id == 0):\n #trigger failsafe here\n ##PtDebugPrint(\"opening all valves\")\n ageSDL = PtGetAgeSDL()\n open = ageSDL[\"giraSteamvent01Open\"][0]\n if (not open):\n fumerol1Resp.run(self.key,state='BlastOpen',avatar=PtGetLocalAvatar())\n ageSDL[\"giraSteamvent01Open\"] = (1,)\n fumerol1BlastResp.run(self.key,state='Blast2',avatar=PtGetLocalAvatar())\n open = ageSDL[\"giraSteamvent02Open\"][0]\n if (not open):\n fumerol2Resp.run(self.key,state='BlastOpen',avatar=PtGetLocalAvatar())\n ageSDL[\"giraSteamvent02Open\"] = (1,)\n fumerol2BlastResp.run(self.key,state='Blast2',avatar=PtGetLocalAvatar())\n open = ageSDL[\"giraSteamvent03Open\"][0]\n if (not open):\n fumerol3Resp.run(self.key,state='BlastOpen',avatar=PtGetLocalAvatar())\n ageSDL[\"giraSteamvent03Open\"] = (1,)\n fumerol3BlastResp.run(self.key,state='Blast2',avatar=PtGetLocalAvatar())\n open = ageSDL[\"giraSteamvent04Open\"][0]\n if (not open):\n fumerol4Resp.run(self.key,state='BlastOpen',avatar=PtGetLocalAvatar())\n ageSDL[\"giraSteamvent04Open\"] = (1,)\n fumerol4BlastResp.run(self.key,state='Blast2',avatar=PtGetLocalAvatar())\n open = ageSDL[\"giraSteamvent05Open\"][0]\n if (not open):\n fumerol5Resp.run(self.key,state='BlastOpen',avatar=PtGetLocalAvatar())\n ageSDL[\"giraSteamvent05Open\"] = (1,)\n fumerol5BlastResp.run(self.key,state='Blast2',avatar=PtGetLocalAvatar())\n open = ageSDL[\"giraSteamvent06Open\"][0]\n if (not open):\n fumerol6Resp.run(self.key,state='BlastOpen',avatar=PtGetLocalAvatar())\n ageSDL[\"giraSteamvent06Open\"] = (1,)\n fumerol6BlastResp.run(self.key,state='Blast2',avatar=PtGetLocalAvatar())\n\n if onFumerol1:\n fumerol01JumpResp.run(self.key, state = 'Level4', avatar = PtGetLocalAvatar())\n onFumerol1 = 0\n elif onFumerol2:\n fumerol02JumpResp.run(self.key, state = 'Level4', avatar = PtGetLocalAvatar())\n onFumerol2 = 0\n elif onFumerol3:\n fumerol03JumpResp.run(self.key, state = 'Level4', avatar = PtGetLocalAvatar())\n onFumerol3 = 0\n elif onFumerol4:\n fumerol04JumpResp.run(self.key, state = 'Level4', avatar = PtGetLocalAvatar())\n onFumerol4 = 0\n elif onFumerol5:\n fumerol05JumpResp.run(self.key, state = 'Level4', avatar = PtGetLocalAvatar())\n onFumerol5 = 0\n elif onFumerol6:\n fumerol06JumpResp.run(self.key, state = 'Level4', avatar = PtGetLocalAvatar())\n onFumerol6 = 0\n \n fumerol1Act.enable()\n fumerol2Act.enable()\n fumerol3Act.enable()\n fumerol4Act.enable()\n fumerol5Act.enable()\n fumerol6Act.enable()\n\n elif id == 99:\n PtSetGlobalClickability(1)\n \n def GetNumClosed(self):\n \n ageSDL = PtGetAgeSDL()\n numClosed = 0\n if (ageSDL[\"giraSteamvent01Open\"] == (0,)):\n numClosed = numClosed + 1\n if (ageSDL[\"giraSteamvent02Open\"] == (0,)):\n numClosed = numClosed + 1\n if (ageSDL[\"giraSteamvent03Open\"] == (0,)):\n numClosed = numClosed + 1\n if (ageSDL[\"giraSteamvent04Open\"] == (0,)):\n numClosed = numClosed + 1\n if (ageSDL[\"giraSteamvent05Open\"] == (0,)):\n numClosed = numClosed + 1\n if (ageSDL[\"giraSteamvent06Open\"] == (0,)):\n numClosed = numClosed + 1\n if (numClosed == 6):\n fumerol1Act.disable()\n fumerol2Act.disable()\n fumerol3Act.disable()\n fumerol4Act.disable()\n fumerol5Act.disable()\n fumerol6Act.disable()\n \n #PtDebugPrint(\"num closed \",numClosed)\n return numClosed\n\n def OnNotify(self,state,id,events):\n #PtDebugPrint(\"GiraSteam.OnNotify():\\tstate = %d, id = %s\" % (state,id))\n global inFumerol1\n global inFumerol2\n global inFumerol3\n global inFumerol4\n global inFumerol5\n global inFumerol6\n global kJumpConst\n\n global onFumerol1\n global onFumerol2\n global onFumerol3\n global onFumerol4\n global onFumerol5\n global onFumerol6\n \n avatar = PtFindAvatar(events)\n local = PtGetLocalAvatar()\n numClosed = self.GetNumClosed()\n ageSDL = PtGetAgeSDL()\n ##PtDebugPrint(\"id \",id)\n\n entry = False\n if avatar == local:\n for event in events:\n if (event[0] == kCollisionEvent and event[1]):\n # entered a region\n entry = True\n\n wasLocalNotify = PtWasLocallyNotified(self.key)\n if id == fumerol01topAct.id and wasLocalNotify:\n onFumerol1 = entry\n return\n elif id == fumerol02topAct.id and wasLocalNotify:\n onFumerol2 = entry\n return\n elif id == fumerol03topAct.id and wasLocalNotify:\n onFumerol3 = entry\n return\n elif id == fumerol04topAct.id and wasLocalNotify:\n onFumerol4 = entry\n return\n elif id == fumerol05topAct.id and wasLocalNotify:\n onFumerol5 = entry\n return\n elif id == fumerol06topAct.id and wasLocalNotify:\n onFumerol6 = entry\n return\n \n if (id == fumerol1Resp.id or id == fumerol2Resp.id or \\\n id == fumerol3Resp.id or id == fumerol4Resp.id or \\\n id == fumerol5Resp.id or id == fumerol6Resp.id):\n ##PtDebugPrint(\"responder callback\")\n if (numClosed == 6):\n ##PtDebugPrint(\"running release mechanism\")\n PtAtTimeCallback(self.key,1,0)\n else:\n #set rumble / steam for all fumerols\n if (ageSDL[\"giraSteamvent01Open\"][0]):\n self.SetSteam(fumerol1BlastResp, avatar)\n else:\n self.SetRumble(fumerol1Resp, avatar)\n if (ageSDL[\"giraSteamvent02Open\"][0]):\n self.SetSteam(fumerol2BlastResp, avatar)\n else:\n self.SetRumble(fumerol2Resp, avatar)\n if (ageSDL[\"giraSteamvent03Open\"][0]):\n self.SetSteam(fumerol3BlastResp, avatar)\n else:\n self.SetRumble(fumerol3Resp, avatar)\n if (ageSDL[\"giraSteamvent04Open\"][0]):\n self.SetSteam(fumerol4BlastResp, avatar)\n else:\n self.SetRumble(fumerol4Resp, avatar)\n if (ageSDL[\"giraSteamvent05Open\"][0]):\n self.SetSteam(fumerol5BlastResp, avatar)\n else:\n self.SetRumble(fumerol5Resp, avatar)\n if (ageSDL[\"giraSteamvent06Open\"][0]):\n self.SetSteam(fumerol6BlastResp, avatar)\n else:\n self.SetRumble(fumerol6Resp, avatar)\n \n if (id == fumerol1Act.id and state):\n fumerol1Act.disable()\n open = ageSDL[\"giraSteamvent01Open\"][0]\n if (open):\n ageSDL[\"giraSteamvent01Open\"] = (0,)\n fumerol1Resp.run(self.key,state='Closing',avatar=avatar)\n fumerol1BlastResp.run(self.key,state='Smoke',avatar=avatar)\n inFumerol1 = False\n \n else:\n ageSDL[\"giraSteamvent01Open\"] = (1,)\n fumerol1Resp.run(self.key,state='Opening',avatar=avatar)\n \n elif (id == fumerol2Act.id and state):\n fumerol2Act.disable()\n open = ageSDL[\"giraSteamvent02Open\"][0]\n if (open):\n ageSDL[\"giraSteamvent02Open\"] = (0,)\n fumerol2Resp.run(self.key,state='Closing',avatar=avatar)\n fumerol2BlastResp.run(self.key,state='Smoke',avatar=avatar)\n inFumerol2 = False\n \n else:\n ageSDL[\"giraSteamvent02Open\"] = (1,)\n fumerol2Resp.run(self.key,state='Opening',avatar=avatar)\n \n elif (id == fumerol3Act.id and state):\n fumerol3Act.disable()\n open = ageSDL[\"giraSteamvent03Open\"][0]\n if (open):\n ageSDL[\"giraSteamvent03Open\"] = (0,)\n fumerol3Resp.run(self.key,state='Closing',avatar=avatar)\n fumerol3BlastResp.run(self.key,state='Smoke',avatar=avatar)\n inFumerol3 = False\n \n else:\n ageSDL[\"giraSteamvent03Open\"] = (1,)\n fumerol3Resp.run(self.key,state='Opening',avatar=avatar)\n \n elif (id == fumerol4Act.id and state):\n fumerol4Act.disable()\n open = ageSDL[\"giraSteamvent04Open\"][0]\n if (open):\n ageSDL[\"giraSteamvent04Open\"] = (0,)\n fumerol4Resp.run(self.key,state='Closing',avatar=avatar)\n fumerol4BlastResp.run(self.key,state='Smoke',avatar=avatar)\n inFumerol4 = False\n \n else:\n ageSDL[\"giraSteamvent04Open\"] = (1,)\n fumerol4Resp.run(self.key,state='Opening',avatar=avatar)\n \n elif (id == fumerol5Act.id and state):\n fumerol5Act.disable()\n open = ageSDL[\"giraSteamvent05Open\"][0]\n PtDebugPrint(\"GiraSteam.OnNotify():\\tNotify from fumerol05Act; open = %d\" % (open))\n if (open):\n ageSDL[\"giraSteamvent05Open\"] = (0,)\n fumerol5Resp.run(self.key,state='Closing',avatar=avatar)\n fumerol5BlastResp.run(self.key,state='Smoke',avatar=avatar)\n inFumerol5 = False\n \n else:\n ageSDL[\"giraSteamvent05Open\"] = (1,)\n fumerol5Resp.run(self.key,state='Opening',avatar=avatar)\n \n elif (id == fumerol6Act.id and state):\n fumerol6Act.disable()\n open = ageSDL[\"giraSteamvent06Open\"][0]\n PtDebugPrint(\"GiraSteam.OnNotify():\\tNotify from fumerol06Act; open = %d\" % (open))\n if (open):\n ageSDL[\"giraSteamvent06Open\"] = (0,)\n fumerol6Resp.run(self.key,state='Closing',avatar=avatar)\n fumerol6BlastResp.run(self.key,state='Smoke',avatar=avatar)\n inFumerol6 = False\n \n else:\n ageSDL[\"giraSteamvent06Open\"] = (1,)\n fumerol6Resp.run(self.key,state='Opening',avatar=avatar)\n \n elif (id == fumerol1Det.id):\n #PtDebugPrint(\"1 entry \",entry)\n inFumerol1 = entry\n\n elif (id == fumerol2Det.id):\n #PtDebugPrint(\"2 entry \",entry)\n inFumerol2 = entry\n \n elif (id == fumerol3Det.id):\n #PtDebugPrint(\"3 entry \",entry)\n inFumerol3 = entry\n \n elif (id == fumerol4Det.id):\n #PtDebugPrint(\"4 entry \",entry)\n inFumerol4 = entry\n \n elif (id == fumerol5Det.id):\n PtDebugPrint(\"5 entry \",entry)\n inFumerol5 = entry\n \n elif (id == fumerol6Det.id):\n PtDebugPrint(\"6 entry \",entry)\n inFumerol6 = entry\n \n elif (id == rockJumpBeh.id):\n for event in events:\n if event[0] == kMultiStageEvent and event[1] == 0 and event[2] == kEnterStage:\n #rockPuzBlast.run(self.key,avatar=avatar)\n pass\n elif event[0] == kMultiStageEvent and event[1] == 0 and event[2] == kAdvanceNextStage:\n fumerol1Det.enable()\n \n elif (id == clothJumpBeh.id):\n for event in events:\n if event[0] == kMultiStageEvent and event[1] == 0 and event[2] == kEnterStage:\n #clothPuzBlast.run(self.key,avatar=avatar)\n pass\n elif event[0] == kMultiStageEvent and event[1] == 0 and event[2] == kAdvanceNextStage:\n #fumerolJCClickable.enable()\n PtAtTimeCallback(self.key, 1, 99)\n fumerol6Det.enable()\n #PtDebugPrint(\"enabled jc\")\n \n elif ((id == fumerol01SteamTrig01.id or id == fumerol01SteamTrig02.id or\\\n id == fumerol01SteamTrig03.id or id == fumerol01SteamTrig04.id or\\\n id == fumerol01SteamTrig05.id or id == fumerol01SteamTrig06.id) and state):\n self.PlayBlastSfx(fumerol01SteamSfx, avatar)\n if (inFumerol1):\n #inFumerol1 = False\n if (numClosed < 5):\n self.JumpAvatar(fumerol01JumpResp, avatar)\n elif (numClosed == 5):\n inFumerol1 = False\n fumerol1Det.disable()\n rockJumpBeh.run(avatar)\n \n elif ((id == fumerol02SteamTrig01.id or id == fumerol02SteamTrig02.id or\\\n id == fumerol02SteamTrig03.id or id == fumerol02SteamTrig04.id or\\\n id == fumerol02SteamTrig05.id or id == fumerol02SteamTrig06.id) and state):\n self.PlayBlastSfx(fumerol02SteamSfx, avatar)\n if (inFumerol2):\n #inFumerol2 = False\n self.JumpAvatar(fumerol02JumpResp, avatar)\n \n elif ((id == fumerol03SteamTrig01.id or id == fumerol03SteamTrig02.id or\\\n id == fumerol03SteamTrig03.id or id == fumerol03SteamTrig04.id or\\\n id == fumerol03SteamTrig05.id or id == fumerol03SteamTrig06.id) and state):\n self.PlayBlastSfx(fumerol03SteamSfx, avatar)\n if (inFumerol3):\n #inFumerol3 = False\n self.JumpAvatar(fumerol03JumpResp, avatar)\n \n elif ((id == fumerol04SteamTrig01.id or id == fumerol04SteamTrig02.id or\\\n id == fumerol04SteamTrig03.id or id == fumerol04SteamTrig04.id or\\\n id == fumerol04SteamTrig05.id or id == fumerol04SteamTrig06.id) and state):\n self.PlayBlastSfx(fumerol04SteamSfx, avatar)\n if (inFumerol4):\n #inFumerol4 = False\n self.JumpAvatar(fumerol04JumpResp, avatar)\n \n elif ((id == fumerol05SteamTrig01.id or id == fumerol05SteamTrig02.id or\\\n id == fumerol05SteamTrig03.id or id == fumerol05SteamTrig04.id or\\\n id == fumerol05SteamTrig05.id or id == fumerol05SteamTrig06.id) and state):\n self.PlayBlastSfx(fumerol05SteamSfx, avatar)\n if id == fumerol05SteamTrig01.id:\n PtDebugPrint(\"notify from fumerol05SteamTrig01; inFumerol5 = \",inFumerol5)\n if (inFumerol5):\n #inFumerol5 = False\n self.JumpAvatar(fumerol05JumpResp, avatar)\n \n elif ((id == fumerol06SteamTrig01.id or id == fumerol06SteamTrig02.id or\\\n id == fumerol06SteamTrig03.id or id == fumerol06SteamTrig04.id or\\\n id == fumerol06SteamTrig05.id or id == fumerol06SteamTrig06.id) and state):\n self.PlayBlastSfx(fumerol06SteamSfx, avatar)\n if id == fumerol06SteamTrig01.id:\n PtDebugPrint(\"notify from fumerol06SteamTrig01; inFumerol6 = \",inFumerol6)\n if (inFumerol6):\n #inFumerol6=False\n if (numClosed < 5):\n self.JumpAvatar(fumerol06JumpResp, avatar)\n elif (numClosed == 5):\n inFumerol6 = False\n PtSetGlobalClickability(0)\n fumerol6Det.disable()\n clothJumpBeh.run(avatar)\n\n \n \n","repo_name":"H-uru/Plasma","sub_path":"Scripts/Python/GiraSteam.py","file_name":"GiraSteam.py","file_ext":"py","file_size_in_byte":33398,"program_lang":"python","lang":"en","doc_type":"code","stars":191,"dataset":"github-code","pt":"30"} +{"seq_id":"71913614804","text":"import os \nimport json\nfrom HMMTrigram import HmmTrigram \nfrom sklearn.metrics import confusion_matrix\nimport numpy as np\n\n\ndef LoadRawData(path):\n print(\"Loading data from: %s\" % os.path.abspath(path))\n f = open(path, 'r')\n \n lines = f.readlines()\n\n return lines\n\ndef LoadJsonTokens(xTrain , Tokenize = True):\n parsedJsonTokens = []\n for sentence in xTrain:\n lineJsonTokens = json.loads(sentence)\n if(Tokenize == True):\n lineJsonTokens.insert(0,[\"\",\"\"])\n lineJsonTokens.insert(0,[\"\",\"\"])\n lineJsonTokens.append([\"\",\"\"])\n parsedJsonTokens.append(lineJsonTokens)\n return parsedJsonTokens;\n\ndef LoadJsonTokensTest(xTrain):\n parsedJsonTokens = []\n for sentence in xTrain:\n lineJsonTokens = json.loads(sentence)\n parsedJsonTokens.append(lineJsonTokens)\n return parsedJsonTokens;\n\ndef CalculateAccuracy(model, predictedTags , xDevParsed):\n correctCount = 0\n totalCount = 0\n actual = []\n predicted = []\n for i in range(len(xDevParsed)):\n for j in range(1,len(xDevParsed[i])-1):\n totalCount = totalCount + 1\n actual.append(xDevParsed[i][j][1])\n predicted.append(predictedTags[i][j])\n if( xDevParsed[i][j][1] == predictedTags[i][j]):\n correctCount = correctCount + 1\n return (correctCount/totalCount , confusion_matrix(actual,predicted, labels=model.AllTags))\ndef print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False, hide_threshold=None):\n \"\"\"pretty print for confusion matrixes\"\"\"\n columnwidth = max([len(x) for x in labels] + [5]) # 5 is value length\n empty_cell = \" \" * columnwidth\n \n # Begin CHANGES\n fst_empty_cell = (columnwidth-3)//2 * \" \" + \"t/p\" + (columnwidth-3)//2 * \" \"\n \n if len(fst_empty_cell) < len(empty_cell):\n fst_empty_cell = \" \" * (len(empty_cell) - len(fst_empty_cell)) + fst_empty_cell\n # Print header\n print(\" \" + fst_empty_cell, end=\" \")\n # End CHANGES\n \n for label in labels:\n print(\"%{0}s\".format(columnwidth) % label, end=\" \")\n \n print()\n # Print rows\n for i, label1 in enumerate(labels):\n print(\" %{0}s\".format(columnwidth) % label1, end=\" \")\n for j in range(len(labels)):\n cell = \"%{0}.1f\".format(columnwidth) % cm[i, j]\n if hide_zeroes:\n cell = cell if float(cm[i, j]) != 0 else empty_cell\n if hide_diagonal:\n cell = cell if i != j else empty_cell\n if hide_threshold:\n cell = cell if cm[i, j] > hide_threshold else empty_cell\n print(cell, end=\" \")\n print()\n\nxTrainRaw = LoadRawData(\"twt.bonus.json\")\nxTrainParsed = LoadJsonTokens(xTrainRaw)\n\nparam = {}\nparam[\"oovfrequency\"] = 1\nparam[\"usePartialTrainingData\"] = False\nparam[\"lambda2\"] = 0.15\nparam[\"lambda1\"] = 0.8\nparam[\"lambda3\"] = 0.05\nparam[\"smoothingfactor\"] = .0001\nparam[\"debug\"] = True\nif( param[\"usePartialTrainingData\"] == True):\n xTrainParsed = xTrainParsed[:100]\n\nhmmTrigram = HmmTrigram(xTrainParsed, param)\n\nxDevRaw = LoadRawData(\"twt.test.json\")\nxDevParsed = LoadJsonTokensTest(xDevRaw)\nif( param[\"usePartialTrainingData\"] == True):\n xDevParsed = xDevParsed[:10]\n\npredictedtags = hmmTrigram.FindBestTagSequences(xDevParsed, param)\naccuracy,confusionMatrix = CalculateAccuracy(hmmTrigram, predictedtags, xDevParsed)\nallTags = hmmTrigram.AllTags.copy()\n\nprint(\"Accuracy : \", accuracy)\nprint_cm(confusionMatrix, allTags)\n\n","repo_name":"jeeshnair/NLP","sub_path":"HiddenMarkovModels/HMMTrigramStarting.py","file_name":"HMMTrigramStarting.py","file_ext":"py","file_size_in_byte":3540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"40860767096","text":"import u\nfrom shipprovider import SinglePlayerShipProvider, TwoPlayerShipProvider\nfrom ship import Ship\nfrom signal import Signal\n\n\nclass TestShipProviders:\n def test_single_provider(self):\n provider = SinglePlayerShipProvider(4)\n assert provider.ships_remaining(u.PLAYER_ZERO) == 4\n for i in range(0, 4):\n assert provider.ships_remaining(u.PLAYER_ZERO) == 4 - i\n items = provider.provide()\n ship = next(s for s in items if isinstance(s, Ship))\n assert ship.position == u.CENTER\n signal = next(s for s in items if isinstance(s, Signal))\n assert signal.signal == u.PLAYER_ZERO\n assert provider.ships_remaining(u.PLAYER_ZERO) == 0\n assert not provider.provide()\n\n def test_single_add(self):\n provider = SinglePlayerShipProvider(4)\n provider.add_ship(u.PLAYER_ZERO)\n assert provider.ships_remaining(u.PLAYER_ZERO) == 5\n\n def test_two_player(self):\n provider = TwoPlayerShipProvider(2)\n assert provider.ships_remaining(u.PLAYER_ZERO) == 2\n assert provider.ships_remaining(u.PLAYER_ONE) == 2\n\n def test_one_ship_for_each(self):\n provider = TwoPlayerShipProvider(1)\n items = provider.provide()\n ship = next(s for s in items if isinstance(s, Ship))\n assert ship.position == u.CENTER\n signal = next(s for s in items if isinstance(s, Signal))\n assert signal.signal == u.PLAYER_ZERO\n items = provider.provide()\n ship = next(s for s in items if isinstance(s, Ship))\n assert ship.position == u.CENTER\n signal = next(s for s in items if isinstance(s, Signal))\n assert signal.signal == u.PLAYER_ONE\n assert not provider.ships_remaining(u.PLAYER_ZERO)\n assert not provider.ships_remaining(u.PLAYER_ONE)\n assert not provider.provide()\n\n def test_add_for_zero(self):\n provider = TwoPlayerShipProvider(1)\n provider.add_ship(u.PLAYER_ZERO)\n results = []\n results.append(self.execute_provider(provider)) # 0\n results.append(self.execute_provider(provider)) # 1\n results.append(self.execute_provider(provider)) # 0\n assert results == [0, 1, 0]\n\n def test_add_for_zone(self):\n provider = TwoPlayerShipProvider(1)\n provider.add_ship(u.PLAYER_ONE)\n results = []\n results.append(self.execute_provider(provider)) # 0\n results.append(self.execute_provider(provider)) # 1\n results.append(self.execute_provider(provider)) # 0\n assert results == [0, 1, 1]\n\n def test_tests_can_set(self):\n provider = SinglePlayerShipProvider(4)\n provider.testing_set_ships_remaining([2])\n assert provider.ships_remaining(0) == 2\n\n def execute_provider(self, provider):\n items = provider.provide()\n assert next(s for s in items if isinstance(s, Ship))\n signal = next(s for s in items if isinstance(s, Signal))\n return signal.signal\n\n","repo_name":"RonJeffries/python-asteroids-1","sub_path":"tests/test_ship_providers.py","file_name":"test_ship_providers.py","file_ext":"py","file_size_in_byte":3003,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"30"} +{"seq_id":"16039074544","text":"class GridworldMDP:\n def __init__(self, size, walls, terminal_states, reward, transition_probabilities, discount_rate):\n self.size = size\n self.walls = walls\n self.terminal_states = terminal_states\n self.reward = reward\n self.transition_probabilities = transition_probabilities\n self.discount_rate = discount_rate\n\n @property\n def states(self):\n all_states = [\n (col, row)\n for row in range(1, self.size[1] + 1)\n for col in range(1, self.size[0] + 1)\n if (col, row) not in self.walls\n ]\n return all_states\n\n def S(self):\n return [(col, row) for col in range(1, self.size[0] + 1) for row in range(1, self.size[1] + 1)]\n\n def A(self, state):\n if state in self.walls or state in self.terminal_states:\n return []\n return [(0, -1), (-1, 0), (0, 1), (1, 0)]\n\n def P(self, state, action):\n next_states = {}\n for prob_idx, prob in enumerate(self.transition_probabilities):\n next_col, next_row = state[0] + self.A(state)[(self.A(state).index(action) + prob_idx) % 4][0], \\\n state[1] + self.A(state)[(self.A(state).index(action) + prob_idx) % 4][1]\n if not (1 <= next_col <= self.size[0] and 1 <= next_row <= self.size[1]):\n next_col, next_row = state\n elif (next_col, next_row) in self.walls:\n next_col, next_row = state\n next_states[(next_col, next_row)] = prob\n return next_states\n\n def R(self, state, action, next_state):\n if next_state in self.terminal_states:\n return self.terminal_states[next_state]\n return self.reward\n\n def is_terminal(self, state):\n return state in self.terminal_states\n\ndef read_input(file):\n with open(file, 'r') as f:\n lines = f.readlines()\n\n config = {}\n for line in lines:\n if line.startswith('#') or line.strip() == '':\n continue\n key, value = line.strip().split(':', 1)\n config[key.strip()] = value.strip()\n\n return config\n\ndef parse_input(config):\n size = tuple(map(int, config['size'].split()))\n walls = [tuple(map(int, wall.split())) for wall in config['walls'].split(',')]\n walls = [(wall[0], wall[1]) for wall in walls]\n terminal_states = [tuple(map(int, state.split())) for state in config['terminal_states'].split(',')]\n terminal_states = {(state[0], state[1]): state[2] for state in terminal_states}\n reward = float(config['reward'])\n transition_probabilities = list(map(float, config['transition_probabilities'].split()))\n discount_rate = float(config['discount_rate'])\n epsilon = float(config['epsilon'])\n\n return size, walls, terminal_states, reward, transition_probabilities, discount_rate, epsilon\n\ndef Q_value(mdp, state, action, U):\n sum_rewards = 0\n\n # Define the transition probabilities\n transition_probs = [0.8, 0.1, 0, 0.1]\n\n for prob_idx, prob in enumerate(transition_probs):\n next_col, next_row = state[0] + mdp.A(state)[(mdp.A(state).index(action) + prob_idx) % 4][0], \\\n state[1] + mdp.A(state)[(mdp.A(state).index(action) + prob_idx) % 4][1]\n if not (1 <= next_col <= mdp.size[0] and 1 <= next_row <= mdp.size[1]):\n next_col, next_row = state\n elif (next_col, next_row) in mdp.walls:\n next_col, next_row = state\n next_states = (next_col, next_row)\n\n reward = mdp.R(state, action, next_states)\n sum_rewards += prob * (reward + mdp.discount_rate * U[next_states])\n return sum_rewards\n\ndef value_iteration_v2(mdp, epsilon):\n U = {state: 0 for state in mdp.states}\n U_prime = U.copy()\n delta = 0\n iteration = 0\n print(\"################ VALUE ITERATION ###########################\\n\")\n\n while True:\n U = U_prime.copy()\n delta = 0\n\n print(\"iteration:\", iteration)\n for row in reversed(range(1, mdp.size[1] + 1)):\n for col in range(1, mdp.size[0] + 1):\n state = (col, row)\n if state in mdp.walls:\n print(\"--------------\", end=\" \")\n elif state in mdp.terminal_states:\n print(\"0\", end=\" \") # Changed to always print 0 for terminal states\n else:\n print(U[state], end=\" \")\n print()\n print()\n\n for state in mdp.states:\n if mdp.is_terminal(state) or state in mdp.walls:\n continue\n \n max_q_value = float(\"-inf\")\n for action in mdp.A(state):\n q_value = Q_value(mdp, state, action, U)\n if q_value > max_q_value:\n max_q_value = q_value\n\n U_prime[state] = max_q_value\n diff = abs(U_prime[state] - U[state])\n\n if diff > delta:\n delta = diff\n\n if delta < epsilon * (1 - mdp.discount_rate) / mdp.discount_rate:\n print(\"Final Value After Convergence\")\n for row in reversed(range(1, mdp.size[1] + 1)):\n for col in range(1, mdp.size[0] + 1):\n state = (col, row)\n if state in mdp.walls:\n print(\"--------------\", end=\" \")\n elif state in mdp.terminal_states:\n print(\"0\", end=\" \") # Changed to always print 0 for terminal states\n else:\n print(U[state], end=\" \")\n print()\n print()\n break\n iteration += 1\n return U\n\ndef print_policy(policy, mdp):\n action_symbols = {(0, -1): 'S', (-1, 0): 'W', (0, 1): 'N', (1, 0): 'E'}\n print()\n for row in reversed(range(1, mdp.size[1] + 1)):\n for col in range(1, mdp.size[0] + 1):\n state = (col, row)\n if state in mdp.walls:\n print(\"-\", end=\" \")\n elif state in mdp.terminal_states:\n print(\"T\", end=\" \")\n else:\n print(action_symbols[policy[state]], end=\" \")\n print()\n print()\n\ndef main():\n config = read_input('mdp_input.txt')\n size, walls, terminal_states, reward, transition_probabilities, discount_rate, epsilon = parse_input(config)\n mdp = GridworldMDP(size, walls, terminal_states, reward, transition_probabilities, discount_rate)\n print(f\"({size[1]}, {size[0]}, [{', '.join([f'x={wall[0]} y={wall[1]}' for wall in walls])}], {({', '.join([f'x={state[0]} y={state[1]}: {value}' for state, value in terminal_states.items()])})}, {reward}, {transition_probabilities}, {discount_rate}, {epsilon})\")\n print()\n U = value_iteration_v2(mdp, epsilon)\n policy = {state: None for state in mdp.states}\n\n for state in mdp.states:\n if mdp.is_terminal(state) or state in mdp.walls:\n continue\n\n max_q_value = float(\"-inf\")\n best_action = None\n for action in mdp.A(state):\n q_value = Q_value(mdp, state, action, U)\n if q_value > max_q_value:\n max_q_value = q_value\n best_action = action\n policy[state] = best_action\n print()\n print(\"Final Policy\")\n print_policy(policy, mdp)\n print(\"################ POLICY ITERATION ###########################\")\n print_policy(policy, mdp)\n\nif __name__ == \"__main__\":\n main()","repo_name":"AdamSWS/Markov-Descion-Process","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":7353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"72531737043","text":"import os\n\nfrom datetime import timedelta\nfrom django.utils import timezone\nfrom logging import getLogger\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\n\nfrom utils.datetime import fromtimestamp\n\nlogger = getLogger(__name__)\n\n\nclass Command(BaseCommand):\n\n help = 'clear old ip_stat csv files'\n\n def handle(self, *args, **options):\n now = timezone.now()\n started = now\n if not os.path.exists(settings.META_STAT_ROOT):\n os.mkdir(settings.META_STAT_ROOT)\n deleted_count = 0\n for filename in os.listdir(settings.META_STAT_ROOT):\n if filename.startswith('meta_'):\n file = os.path.join(settings.META_STAT_ROOT, filename)\n stat = os.lstat(file)\n created_time = fromtimestamp(stat.st_ctime)\n if created_time < now - timedelta(days=30):\n os.remove(file)\n deleted_count += 1\n\n logger.info('Clear {} ip_stats files in {}s'.format(\n deleted_count, (timezone.now() - started).total_seconds()))\n","repo_name":"HippyFizz/conduster","sub_path":"collector/management/commands/clean_ip_stat_csv.py","file_name":"clean_ip_stat_csv.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"41279460301","text":"from libs import macro\r\nfrom discord.ext import commands\r\nimport discord\r\nimport random\r\nimport aiohttp\r\nfrom asyncurban import UrbanDictionary\r\nimport asyncio\r\nimport googlesearch\r\n\r\ndef foo(a:str): return float(a)\r\n\r\nclass Other(commands.Cog):\r\n def __init__(self, bot:commands.Bot):\r\n self.bot = bot\r\n @commands.command(name=\"say\")\r\n async def say(self, ctx, *, args):\r\n await ctx.message.delete()\r\n await ctx.send(args)\r\n\r\n @commands.command(name=\"8ball\")\r\n async def eightball(self, ctx):\r\n await ctx.send(\r\n embed=await macro.msg(\r\n desc=f\":8ball: The 8 ball says...\\n``{random.choice(['It is certain', 'It is decidedly so', 'Without a doubt', 'Yes – definitely', 'You may rely on it', 'As I see it, yes', 'Most likely', 'Outlook good', 'Yes Signs point to yes', 'Reply hazy', 'try again', 'Ask again later', 'Better not tell you now', 'Cannot predict now', 'Concentrate and ask again', 'Dont count on it', 'My reply is no', 'My sources say no', 'Outlook not so good', 'Very doubtful'])}``\"\r\n )\r\n )\r\n @commands.command(name=\"kill\")\r\n async def kill(self, ctx, member:discord.Member):\r\n await ctx.send(\r\n embed=await macro.msg(\r\n desc=f\"{ctx.message.author.mention} has killed {member.mention}! :scream:\"\r\n )\r\n )\r\n @commands.command(name=\"roulette\")\r\n async def roulette(self, ctx):\r\n if random.randint(1,4) == 1:\r\n await ctx.send(\r\n embed=await macro.msg(\r\n desc=f\"{ctx.message.author.mention} pulled the trigger and... died! :skull:\"\r\n )\r\n )\r\n else:\r\n await ctx.send(\r\n embed=await macro.msg(\r\n desc=f\"{ctx.message.author.mention} pulled the trigger and... lived! :angel:\"\r\n )\r\n )\r\n @commands.command(name=\"meme-template\")\r\n async def meme_template(self, ctx):\r\n async with aiohttp.ClientSession() as cl:\r\n async with cl.get(\"https://api.imgflip.com/get_memes\") as load:\r\n res = await load.json()\r\n await ctx.send(\r\n embed=await macro.img(\r\n url=random.choice(res['data']['memes']).get(\"url\")\r\n )\r\n )\r\n await cl.close()\r\n @commands.command(name=\"urban\")\r\n async def urban(self, ctx, *, args):\r\n if ctx.channel.is_nsfw():\r\n try:\r\n ud = UrbanDictionary()\r\n word = await ud.get_word(args)\r\n await ctx.send(\r\n embed=await macro.msg(\r\n f\"**{word}**\\n```{word.definition}```\"\r\n )\r\n )\r\n except:\r\n await ctx.send(\r\n embed=await macro.msg(\r\n desc=f\"Could not find any word under ``{args}``\"\r\n )\r\n )\r\n else:\r\n await ctx.send(\r\n embed=await macro.error(\r\n desc=\"Command must be used in an NSFW channel\"\r\n )\r\n )\r\n @commands.command(name=\"binary\")\r\n async def binary(self, ctx, *, args):\r\n await ctx.send(\r\n embed=await macro.msg(\r\n desc=\"\".join(str(bin(ord(letter))).replace(\"b\", \"\") + \" \" for letter in args)\r\n )\r\n )\r\n @commands.command(name=\"randomurban\")\r\n async def randurban(self, ctx):\r\n if not ctx.channel.is_nsfw():\r\n await ctx.send(\r\n embed=await macro.error(\r\n desc=\"Command must be used in an NSFW channel\"\r\n )\r\n )\r\n else:\r\n ud = UrbanDictionary()\r\n word = await ud.get_random()\r\n await ctx.send(\r\n embed=await macro.msg(\r\n desc=f\"**{word}**\\n```{word.definition}```\"\r\n )\r\n )\r\n @commands.command(name=\"google\")\r\n async def google(self, ctx, *, args):\r\n if not ctx.channel.is_nsfw:\r\n return await ctx.send(\r\n embed=await macro.error(\r\n desc=\"Command must be used in an NSFW channel\"\r\n )\r\n )\r\n else:\r\n for url in googlesearch.search(args, stop=1):\r\n await ctx.send(url)\r\n break\r\n @commands.command(name=\"roll\")\r\n async def dice(self, ctx, max_value:int=None):\r\n if not max_value: max_value = 6\r\n await ctx.send(\r\n embed=await macro.msg(\r\n desc=f\":game_die: On a dice from 1 to {max_value}, you rolled a {random.randint(1,max_value)}\"\r\n )\r\n )\r\n @commands.command(name=\"flip\")\r\n async def flip(self, ctx):\r\n await ctx.send(\r\n embed=await macro.msg(\r\n desc=f\"You flipped a coin, and it returned {random.choice(['tails', 'heads'])}!\"\r\n )\r\n )\r\n @commands.command(name=\"color\")\r\n async def color(self, ctx, *args):\r\n for item in args:\r\n if int(item) > 255 or int(item) < 0:\r\n return\r\n await ctx.send(\r\n embed=await macro.img(\r\n desc=f\"**RGB**: {args[0]} {args[1]} {args[2]}\\n**HEX**: {'#%02x%02x%02x' % (args[0],args[1],args[2])}\",\r\n url=f\"https://ice-creme.de/randcolor/?r={args[0]}&g={args[1]}&b={args[2]}\"\r\n )\r\n )\r\n @commands.command(name=\"randomcolor\")\r\n async def randomcolor(self, ctx):\r\n r = random.randint(1,255);g = random.randint(1,255);b = random.randint(1,255)\r\n await ctx.send(\r\n embed=await macro.img(\r\n desc=f\"**RGB**: {r} {g} {b}\\n**HEX**: {'#%02x%02x%02x' % (r,g,b)}\",\r\n url=f\"https://ice-creme.de/randcolor/?r={r}&g={g}&b={b}\"\r\n )\r\n )\r\n @commands.command(name=\"character\")\r\n async def character(self, ctx):\r\n await ctx.send(\r\n embed=await macro.msg(\r\n desc=f\"**Age**: {random.randint(16,60)}\\n\"\r\n f\"**Alignment**: {random.choice(['Chaotic Good', 'Lawful Good', 'Neutral Good','Good Neutral', 'True Neutral', 'Chaotic Neutral', 'Chaotic Evil', 'Lawful Evil', 'Neutral Evil'])}\\n\"\r\n f\"**Color Scheme**: {random.choice(['blue', 'black', 'gray', 'red', 'navy'])} and {random.choice(['yellow', 'green', 'white', 'orange', 'brown'])}\\n\"\r\n f\"**Skin Tone**: {random.choice('Light Dark Mixed'.split())}\\n\"\r\n f\"**Body Size**: {random.choice(['average', 'small', 'large']).title()}\\n\"\r\n f\"**Race**: {random.choice(['human','human','human','human','human','human','human','human','human','human','dwarf','elf','angel/demon'])}\\n\"\r\n f\"**Hairstyle & Color**: {random.choice(['long', 'moderate', 'short'])} length and {random.choice(['white', 'brunette', 'blond', 'grey', 'jet-black','dyed'])}\"\r\n )\r\n )\r\n @commands.command(name=\"poll\")\r\n @commands.has_permissions(kick_members=True)\r\n async def poll(self, ctx, *args):\r\n emojis = [\"\\U0001f34e\", \"\\U0001f352\", \"\\U0001f34d\", \"\\U0001f34a\", \"\\U0001f349\", \"\\U0001f347\", \"\\U0001f34b\"]\r\n used = []\r\n b = \"\"\r\n if len(args) > 7: return await ctx.send(\r\n embed=await macro.error(desc=\"Sorry, but you can only have 7 options\")\r\n )\r\n for item in args:\r\n a = random.choice(emojis)\r\n used.append(a)\r\n emojis.pop(emojis.index(a))\r\n for i in range(0, len(args)):\r\n b += f\"{used[i]}: {args[i]}\\n\"\r\n msg = await ctx.send(\r\n embed=await macro.msg(\r\n desc=b,\r\n title=f\"{ctx.message.author}'s poll\"\r\n )\r\n )\r\n for emoji in used:\r\n await msg.add_reaction(emoji)\r\n #TODO: ADD MORE EMOJIS\r\n @commands.command(name=\"vote\")\r\n async def vote(self, ctx):\r\n await ctx.send(\r\n embed=await macro.msg(\r\n desc=\"Please vote for me here!\\nhttps://discordbots.org/bot/503962394129596426/vote\"\r\n )\r\n )\r\n @commands.command(name=\"invite\")\r\n async def invite(self, ctx, member:discord.Member=None):\r\n if not member:\r\n member = ctx.message.author\r\n await member.send(\r\n embed=await macro.msg(\r\n desc=\"https://discordapp.com/oauth2/authorize?client_id=503962394129596426&permissions=8&scope=bot\"\r\n )\r\n )\r\n @commands.command(name='fact')\r\n async def fact(self, ctx):\r\n async with aiohttp.ClientSession() as cs:\r\n async with cs.get(\"https://nekos.life/api/v2/fact\") as r:\r\n re = await r.json()\r\n await ctx.send(\r\n embed=await macro.msg(\r\n desc=re.get(\"fact\")\r\n )\r\n )\r\n await cs.close()\r\n\r\ndef setup(bot:commands.Bot):\r\n bot.add_cog(Other(bot=bot))\r\n","repo_name":"meestr/yomenai","sub_path":"cogs/other.py","file_name":"other.py","file_ext":"py","file_size_in_byte":9036,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"30"} +{"seq_id":"74912780565","text":"import numpy as np\nfrom scipy.stats import norm, t\n\ndef check_p01(data):\n mu=np.mean(data)\n std=np.std(data)\n \n if np.abs(mu-10)/10<0.01 and np.abs(std-1)<0.15 and len(data)==100:\n print('Nice work')\n points=2\n return points\n else:\n print('Whoops, try again')\n points=0\n return points\n \ndef check_p02(mu,conf_interval):\n if np.abs(mu-10)/10<0.01 and np.abs(conf_interval-0.2)/0.2<0.1:\n points=2\n print('Nice work')\n return points\n else:\n points=0\n print('Whoops, try again')\n return points\n \ndef check_p03(data1,data2):\n N1=len(data1)\n N2=len(data2)\n mu1=np.mean(data1); mu2=np.mean(data2)\n sigma1=np.std(data1); sigma2=np.std(data2); \n\n A=np.abs(N1+N2)/(N1*N2*1.0)\n B=((N1-1)*sigma1**2+(N2-1)*sigma2**2)/(N1+N2-2)\n\n tstat=np.abs(mu1-mu2)/np.sqrt(A*B)\n\n print('t=%1.2f'%tstat)\n\n df=N1+N2-2\n if tstat>t.interval(0.95, df)[1] and (mu1-mu2)/mu1<0.1:\n points=2\n print('Nice work')\n return points\n else:\n print('Whoops, try again')\n return points\n \nif __name__=='__main__':\n print('This is the library to check Lab 00 prework')\n","repo_name":"cooperrc/sensors_and_data","sub_path":"ME3263-Lab_00/check_lab00.py","file_name":"check_lab00.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"23626389105","text":"from urllib.parse import parse_qs, urlsplit\n\nfrom django.contrib import admin\nfrom django_audit_fields.admin import audit_fieldset_tuple\nfrom edc_constants.constants import YES\nfrom edc_lab.admin import (\n RequisitionAdminMixin,\n requisition_fieldset,\n requisition_identifier_fieldset,\n requisition_status_fieldset,\n requisition_verify_fieldset,\n)\nfrom edc_model_admin.history import SimpleHistoryAdmin\n\nfrom ..admin_site import meta_subject_admin\nfrom ..forms import SubjectRequisitionForm\nfrom ..models import SubjectRequisition\nfrom .modeladmin import CrfModelAdminMixin\n\n\n@admin.register(SubjectRequisition, site=meta_subject_admin)\nclass SubjectRequisitionAdmin(RequisitionAdminMixin, CrfModelAdminMixin, SimpleHistoryAdmin):\n form = SubjectRequisitionForm\n\n fieldsets = (\n (None, {\"fields\": (\"subject_visit\", \"requisition_datetime\", \"panel\")}),\n requisition_fieldset,\n requisition_status_fieldset,\n requisition_identifier_fieldset,\n requisition_verify_fieldset,\n audit_fieldset_tuple,\n )\n\n radio_fields = {\n \"is_drawn\": admin.VERTICAL,\n \"reason_not_drawn\": admin.VERTICAL,\n \"item_type\": admin.VERTICAL,\n }\n\n def get_search_results(self, request, queryset, search_term):\n queryset, use_distinct = super().get_search_results(request, queryset, search_term)\n path = urlsplit(request.META.get(\"HTTP_REFERER\")).path\n query = urlsplit(request.META.get(\"HTTP_REFERER\")).query\n if \"bloodresult\" in path:\n attrs = parse_qs(query)\n try:\n subject_visit = attrs.get(\"subject_visit\")[0]\n except (TypeError, IndexError):\n pass\n else:\n queryset = queryset.filter(subject_visit=subject_visit, is_drawn=YES)\n return queryset, use_distinct\n","repo_name":"meta-trial/meta-edc","sub_path":"meta_subject/admin/subject_requisition_admin.py","file_name":"subject_requisition_admin.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"30"} +{"seq_id":"41504545886","text":"import sqlite3\n\nconnection = sqlite3.connect('skripsi.db')\n\n\n\ncur = connection.cursor()\n\n\n# print(cur.execute(\"DELETE FROM list_cluster\"))\nprint(cur.execute(\"SELECT * FROM list_cluster LIMIT 5\").fetchall())\n# print(cur.fetchall())\n\nconnection.commit()\nconnection.close()","repo_name":"adelataniaaa/info_gempa_cluster","sub_path":"test_db.py","file_name":"test_db.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"5432700374","text":"def requestHandler(req):\n request = ''.join((line + '\\n') for line in req) \n print(request)\n requestHead, requestBody = request.split('\\n\\n', 1)\n requestHead = requestHead.splitlines()\n\n requestHeadline = requestHead[0]\n requestHeadline = requestHeadline.split()\n\n requestHeaders = dict(x.split(': ', 1) for x in requestHead[1:])\n\n return {'headLine': requestHeadline, 'headers': requestHeaders, 'body': requestBody}","repo_name":"BrunoSBecker/PyServer","sub_path":"handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"43229815047","text":"\"\"\"\nFacade Pattern: Provide uniform interface to the users.\nUser can use it to communicate with multiple subsystems.\n\nFacade pattern hides the complexities of the system and provides\nan interface to the client using which the client can access the system.\nThis type of design pattern comes under structural pattern as\nthis pattern adds an interface to existing system to hide its complexities.\n\"\"\"\n\n\nclass PythonConvertor:\n \"\"\"\n Python Convertor converts any file to python file by renaming.\n Doesn't contain main logic, it is just sample class.\n \"\"\"\n def __init__(self):\n self.name = \"Python-Convertor\"\n\n def rename(self, filename):\n \"\"\" Rename the file\n Change extension to .py\n Args:\n filename (str): file name\n Returns:\n str: renamed file name\n \"\"\"\n filename_parts = filename.split(\".\")\n filename_parts[-1] = \"py\"\n renamed_filename = \".\".join(filename_parts)\n return renamed_filename\n\n\nclass JavaConvertor:\n \"\"\"\n Java Convertor converts any file to java file by renaming.\n Doesn't contain main logic, it is just sample class.\n \"\"\"\n def __init__(self):\n self.name = \"Java-Convertor\"\n\n def rename(self, filename):\n \"\"\" Rename the file\n Change extension to .php\n Args:\n filename (str): file name\n Returns:\n str: renamed file name\n \"\"\"\n filename_parts = filename.split(\".\")\n filename_parts[-1] = \"java\"\n renamed_filename = \".\".join(filename_parts)\n return renamed_filename\n\n\nclass PHPConvertor:\n \"\"\"\n PHP Convertor converts any file to php file by renaming.\n Doesn't contain main logic, it is just sample class.\n \"\"\"\n def __init__(self):\n self.name = \"PHP-Convertor\"\n\n def rename(self, filename):\n \"\"\" Rename the file\n Change extension to .php\n Args:\n filename (str): file name\n Returns:\n str: renamed file name\n \"\"\"\n filename_parts = filename.split(\".\")\n filename_parts[-1] = \"php\"\n renamed_filename = \".\".join(filename_parts)\n return renamed_filename\n\n\nclass FileConvertorFacade:\n \"\"\"\n It is actually not convertor it just rename filename\n\n it uses other subsystems/classes and provide features to the user.\n \"\"\"\n def convert_file(self, filename, target_extension):\n if target_extension in [\"py\", \"php\", \"java\"]:\n convertor = None\n if target_extension == \"py\":\n convertor = PythonConvertor()\n elif target_extension == \"java\":\n convertor = JavaConvertor()\n elif target_extension == \"php\":\n convertor = PHPConvertor()\n renamed_filename = convertor.rename(filename)\n print(\"Converted file name : \", renamed_filename)\n else:\n print(\"Invalid Extension selected!\")\n return 0\n\n\nif __name__ == \"__main__\":\n\n file_convertor = FileConvertorFacade()\n\n input_file = input(\"Enter input file name : \")\n target_extension = input(\"File will be converted into which extension: \")\n file_convertor.convert_file(input_file, target_extension)\n","repo_name":"hasit73/Design-Patterns","sub_path":"facade_pattern.py","file_name":"facade_pattern.py","file_ext":"py","file_size_in_byte":3240,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"4925829125","text":"#!/usr/local/bin/python3\n# -*- coding: UTF-8 -*-\n\nimport xlrd\nfrom tkinter import messagebox\nimport shutil\nfrom PackPackage import *\n\n# 获取推荐人 userid: excel路径,工程路径,输出路径\ndef packPkg(excelFile, projectFile, outputPath):\n print(\"推荐人excel地址是\", excelFile) #/Users/admin/Desktop/推荐人20180301.xlsx\n print(\"ipa包地址是\", projectFile) #/Users/admin/ios/fmapp.xcodeproj\n print(\"IPA的输出地址是\", outputPath) #/Users/admin/Desktop/生成的ipa包\n # 读取表格\n excel_data = xlrd.open_workbook(excelFile)\n # 第一列\n table_one = excel_data.sheet_by_index(0) # 根据sheet索引获取sheet的内容\n # 表格的总行数\n lines = table_one.nrows\n cols = table_one.ncols\n print (\"表格的总行数:\",lines)\n print (\"表格的总列数:\",cols)\n last_output_dir = ''\n \n list = []\n\n for i in range(0,lines):\n # 获取excel表格一行的数据\n row_values = table_one.row_values(i)\n tuijianren_id = str(int(row_values[0]))\n \n list.append(tuijianren_id)\n \n print (\"===========创建的列表是:\",list)\n if len(list) == lines:\n # 调用打包程序\n beginToPackage(projectFile, outputPath, list)\n else:\n print (\"读取Excel失败\")\n\n\n\n","repo_name":"CoderXAndZ/Python_Package","sub_path":"Python打包界面化版本1.0/createPhoneList.py","file_name":"createPhoneList.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"30"} +{"seq_id":"26028290098","text":"import streamlit as st\nimport pandas as pd\nimport plotly.express as px\nimport plotly.graph_objects as go\nimport json\nimport numpy as np\nfrom copy import deepcopy\nst.set_page_config(layout=\"wide\")\n###################################################################################\n### LOADING FILES\n\n# Load data downloaded from FAO\n\n\n\n# LOAD DATAFRAME FUNCTION\ndef load_data(path):\n df = pd.read_csv(path)\n return df\n\n\n# LOAD GEOJSON FILE\nwith open(\"./data/countries.geojson\") as response:\n countries = json.load(response)\n\n# LOAD CLEANED DATA\ndf_raw = load_data(path=\"data/temp_gdppc.csv\")\ndf = deepcopy(df_raw)\n\n# Format the page with less spaces on the side\nst.set_page_config(layout=\"wide\")\n\n# Add title and header\nst.title(\"Climate change\")\nst.header(\"Detail per Country\")\n\n## Geographic Maps\n#Temperature standard deviation\nfig1 = go.Figure(go.Choroplethmapbox(geojson=countries,\n locations=df.Area,\n z= df['Std_temp'],\n featureidkey=\"properties.ADMIN\",\n colorscale=[[0, 'rgb(255,255,255)'], [0.5, 'rgb(255,136,0)'], [1, 'rgb(255,0,0)']],\n zmax = df['Std_temp'].max(),\n text=df.Area,\n hovertemplate=\"Country: %{text}
%{z}\"'',\n #labels={m_std_df.Value: 'Standard deviation
Temperature change'},\n marker_opacity=0.7,\n marker_line_width=0))\n\nfig1.update_layout(mapbox_style=\"carto-positron\",\n mapbox_zoom=1)\nfig1.update_layout(margin={\"r\":0,\"t\":0,\"l\":0,\"b\":0})\n\n#GDP per capita\nfig2 = go.Figure(go.Choroplethmapbox(geojson=countries,\n locations=df.Area,\n z=df[\"GDPpc\"],\n featureidkey=\"properties.ADMIN\",\n colorscale=[[0, 'rgb(255, 255, 255)'],\n [0.1, 'rgb(13, 255, 174)'],\n \n [0.3, 'rgb(77, 252, 255)'],\n \n [0.7, 'rgb(77, 104, 255)'],\n [1, 'rgb(255,0,0)']],\n zmax = df[\"GDPpc\"].max(),\n text=df.Area,\n hovertemplate=\"Country: %{text}
%{z}\"'',\n #labels={m_std_df.Value: 'Standard deviation
Temperature change'},\n marker_opacity=0.7,\n marker_line_width=0))\n\nfig2.update_layout(mapbox_style=\"carto-positron\",\n mapbox_zoom=1)\nfig2.update_layout(margin={\"r\":0,\"t\":0,\"l\":0,\"b\":0})\n\noption = st.selectbox(\n\t'Choose the map to see',\n\t('Temperature changes', 'GDPpc', 'Co2',))\n\nif option == 'Temperature changes':\n\tst.plotly_chart(fig1)\n\nif option == 'GDPpc':\n\tst.plotly_chart(fig2)\n\n# Setting up columns\nc1,c3 = st.columns([1,1])\n\n\nlink = '[To see the code in GitHub ](https://github.com/AdriPerse/climate-change)'\nc3.markdown(link, unsafe_allow_html=True)\n","repo_name":"AdriPerse/climate-change","sub_path":"notebooks/trial/first.py","file_name":"first.py","file_ext":"py","file_size_in_byte":3421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"73622964244","text":"import datetime\nimport csv\nimport TwitterInteraction \n\ndef parseDateTime(dt):\n dt = dt.split(' ')\n date = dt[0].split('-')\n time = dt[1].split(':')\n ret = datetime.datetime(\n int(date[0]), int(date[1]), int(date[2]),\n hour=(int(time[0]) + 4), minute=int(time[1]))\n return ret\n\ndef parseRow(dictionary, row):\n pOpen = float(row[1])\n pClose = float(row[2])\n date = parseDateTime(row[0])\n dictionary[date] = [pOpen, pClose]\n\ndef parseCSV(dictionary):\n with open(\"marketData.csv\", \"r\") as csvFile :\n reader = csv.reader(csvFile, delimiter=',')\n for row in reader:\n parseRow(dictionary, row)\n\ndef pairTweetToMarket(marketDictionary, tweetDictionary):\n pairs = []\n for date in tweetDictionary:\n temp = date.replace(minute=(date.minute - date.minute % 15), second=0, microsecond=0)\n if temp in marketDictionary:\n print(date)\n print(\n \"Tweet: \" + tweetDictionary[date] +\n \" Price Before: %f, Price After:%f\" % (marketDictionary[temp.replace(hour=13, minute=30)][0],\n marketDictionary[temp.replace(hour=19, minute=45)][1])\n )\n\ndef generateTrainingData():\n marketDictionary = {}\n twit = TwitterInteraction.TwitterAccess()\n tweetDictionary = twit.getElonsTweets()\n parseCSV(marketDictionary)\n pairTweetToMarket(marketDictionary, tweetDictionary)\n\ngenerateTrainingData()\n","repo_name":"d-mooers/ElonBot","sub_path":"src/DataHandler.py","file_name":"DataHandler.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"9444988771","text":"# (C) Datadog, Inc. 2018\n# All rights reserved\n# Licensed under a 3-clause BSD style license (see LICENSE)\nimport click\nimport yaml\n\nfrom ..console import CONTEXT_SETTINGS, abort, echo_failure, echo_info, echo_success, echo_waiting, echo_warning\nfrom ...utils import get_config_files, get_valid_checks\nfrom ....utils import basepath, read_file\n\nFILE_INDENT = ' ' * 8\n\nIGNORE_DEFAULT_INSTANCE = {\n 'ceph',\n 'dotnetclr',\n 'gunicorn',\n 'marathon',\n 'pgbouncer',\n 'process',\n 'supervisord',\n}\n\n\n@click.command(\n context_settings=CONTEXT_SETTINGS,\n short_help='Validate default configuration files'\n)\n@click.argument('check', required=False)\ndef config(check):\n \"\"\"Validate default configuration files.\"\"\"\n if check:\n checks = [check]\n else:\n checks = sorted(get_valid_checks())\n\n files_failed = {}\n files_warned = {}\n num_files = 0\n\n echo_waiting('Validating default configuration files...')\n for check in checks:\n check_display_queue = []\n\n config_files = get_config_files(check)\n for config_file in config_files:\n num_files += 1\n file_display_queue = []\n file_name = basepath(config_file)\n\n try:\n config_data = yaml.safe_load(read_file(config_file))\n except Exception as e:\n files_failed[config_file] = True\n\n # We must convert to text here to free Exception object before it goes out of scope\n error = str(e)\n\n check_display_queue.append(lambda: echo_info('{}:'.format(file_name), indent=True))\n check_display_queue.append(lambda: echo_failure('Invalid YAML -', indent=FILE_INDENT))\n check_display_queue.append(lambda: echo_info(error, indent=FILE_INDENT * 2))\n continue\n\n # Verify there is an `instances` section\n if 'instances' not in config_data:\n files_failed[config_file] = True\n file_display_queue.append(lambda: echo_failure('Missing `instances` section', indent=FILE_INDENT))\n\n # Verify there is a default instance\n else:\n instances = config_data['instances']\n if check not in IGNORE_DEFAULT_INSTANCE and not isinstance(instances, list):\n files_failed[config_file] = True\n file_display_queue.append(lambda: echo_failure('No default instance', indent=FILE_INDENT))\n\n if file_display_queue:\n check_display_queue.append(lambda: echo_info('{}:'.format(file_name), indent=True))\n check_display_queue.extend(file_display_queue)\n\n if check_display_queue:\n echo_success('{}:'.format(check))\n for display in check_display_queue:\n display()\n\n files_failed = len(files_failed)\n files_warned = len(files_warned)\n files_passed = num_files - (files_failed + files_warned)\n\n if files_failed or files_warned:\n click.echo()\n\n if files_failed:\n echo_failure('Files with errors: {}'.format(files_failed))\n\n if files_warned:\n echo_warning('Files with warnings: {}'.format(files_warned))\n\n if files_passed:\n if files_failed or files_warned:\n echo_success('Files valid: {}'.format(files_passed))\n else:\n echo_success('All {} configuration files are valid!'.format(num_files))\n\n if files_failed:\n abort()\n","repo_name":"haiyuanhe/integrations-core","sub_path":"datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3465,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"30"} +{"seq_id":"40465269766","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport geojsoncontour\nfrom io import StringIO\nfrom flask import Blueprint, request, jsonify, Response\nfrom pyproj import Transformer\nimport requests\nimport agentlogging\nimport json\nimport os\n\nROUTE = \"/getAermodGeoJSON\"\n\nget_aermod_geojson_bp = Blueprint('get_aermod_geojson_bp', __name__)\nlogger = agentlogging.get_logger(\"dev\")\n\n\n@get_aermod_geojson_bp.route(ROUTE, methods=['GET'])\ndef api():\n logger.info(\"Received request to process AERMOD dispersion matrix\")\n aermod_output_url = request.args[\"dispersionMatrix\"]\n srid = request.args[\"srid\"]\n # download file from url\n dispersion_file = requests.get(\n aermod_output_url, auth=requests.auth.HTTPBasicAuth('fs_user', 'fs_pass'))\n\n return get_aermod_geojson(dispersion_file.text, srid)\n\n\n# This script is only valid for a 1 hour simulation because this file shows the maximum concentration at each receptor\ndef get_aermod_geojson(aermod_output, srid):\n aermod_output_buffer = StringIO(aermod_output)\n data = pd.read_csv(aermod_output_buffer, delim_whitespace=True, skiprows=range(0, 8), header=None, names=[\n 'X', 'Y', 'AVERAGE CONC', 'ZELEV', 'ZHILL', 'ZFLAG', 'AVE', 'GRP', 'NUM HRS', 'NET ID'])\n x_all = data['X']\n y_all = data['Y']\n\n x_set = sorted(set(x_all))\n y_set = sorted(set(y_all))\n\n x_matrix = np.empty((len(x_set), len(y_set)))\n y_matrix = np.empty((len(x_set), len(y_set)))\n\n transformer = Transformer.from_crs(\"epsg:\" + srid, \"epsg:4326\")\n\n for i in range(len(x_set)):\n for j in range(len(y_set)):\n lat, lon = transformer.transform(x_set[i], y_set[j])\n x_matrix[i, j] = lon\n y_matrix[i, j] = lat\n\n conc_list = data['AVERAGE CONC']\n conc_matrix = np.empty((len(x_set), len(y_set)))\n\n average_conc = sum(conc_list)/len(conc_list)\n logger.info('Average concentration = ' + str(average_conc))\n\n if (average_conc / 1e5 > 1):\n use_g = True\n else:\n use_g = False\n\n elev_list = data['ZELEV']\n elev_matrix = np.empty((len(x_set), len(y_set)))\n\n for i in range(len(conc_list)):\n x_index = x_set.index(data['X'][i])\n y_index = y_set.index(data['Y'][i])\n if (use_g):\n conc_value = conc_list[i] / 1e6\n else:\n conc_value = conc_list[i]\n conc_matrix[x_index, y_index] = conc_value\n elev_matrix[x_index, y_index] = elev_list[i]\n\n contour_level = 30\n _, ax = plt.subplots()\n\n contourf = ax.contourf(x_matrix, y_matrix, conc_matrix,\n levels=contour_level, cmap=plt.cm.jet)\n\n contourf_elev = ax.contourf(x_matrix, y_matrix, elev_matrix,\n levels=contour_level, cmap=plt.cm.jet)\n\n plt.colorbar(contourf)\n ax.remove()\n if (use_g):\n plt.title(\"Concentration (g/m$^3$)\")\n else:\n plt.title(\"Concentration ($\\mu$g/m$^3$)\")\n plt.savefig(\"colorbar.png\", bbox_inches='tight', transparent=True, dpi=300)\n\n files = {'colorbar': open('colorbar.png', 'rb')}\n\n response = requests.post(os.environ['FILE_SERVER'] + 'colorbar/colorbar.png',\n files=files, auth=requests.auth.HTTPBasicAuth('fs_user', 'fs_pass'))\n\n url = response.headers.get('colorbar')\n logger.info(url)\n\n geojsonstring = geojsoncontour.contourf_to_geojson(\n contourf=contourf, fill_opacity=0.5)\n\n geojsonstring_elev = geojsoncontour.contourf_to_geojson(\n contourf=contourf_elev, fill_opacity=0.5)\n\n response = {'contourgeojson': json.loads(\n geojsonstring), 'colourbar': url, 'contourgeojson_elev': json.loads(geojsonstring_elev)}\n\n return jsonify(response), 200\n","repo_name":"cambridge-cares/TheWorldAvatar","sub_path":"JPS_VIRTUALSENSOR/PythonService/aermod/postprocess_aermod.py","file_name":"postprocess_aermod.py","file_ext":"py","file_size_in_byte":3726,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"30"} +{"seq_id":"35914841555","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n@author: Cheng Chen\n@email: chengc0611@gmail.com\n@time: 10/29/21 11:07 AM\n\"\"\"\n\nimport open3d as o3\nimport numpy as np\nfrom scipy.spatial.kdtree import KDTree\nimport copy\n\n\ndef main():\n vis = o3.visualization.Visualizer()\n # vis.create_window()\n\n pc_file_path = '/home/cheng/proj/3d/TEASER-plusplus/data/human_models/head_models/model_man/3D_model_face_from_mr.pcd'\n # pc_gt_file_path = '/home/cheng/proj/3d/TEASER-plusplus/data/human_models/head_models/model_man/3D_model_from_mr.pcd'\n pc_original = o3.io.read_point_cloud(pc_file_path)\n # pc_gt = o3.io.read_point_cloud(pc_gt_file_path)\n\n '''init pc'''\n pc = pc_original.voxel_down_sample(1)\n # pc_show = pc_original.voxel_down_sample(20)\n # pc_gt = pc_gt.voxel_down_sample(2)\n\n '''compute'''\n center = pc.get_center()\n\n pc_down = get_rid_of_layer_loop_through_layers(pc)\n\n axis_pcd = o3.geometry.TriangleMesh()\n axis_pcd = axis_pcd.create_coordinate_frame(size=50, origin=center)\n\n o3.visualization.draw_geometries([pc, axis_pcd])\n o3.visualization.draw_geometries([pc_down, axis_pcd])\n\n return 0\n\n\ndef sphere_test():\n mesh = o3.geometry.TriangleMesh()\n pc_1 = o3.geometry.PointCloud()\n pc_2 = o3.geometry.PointCloud()\n\n sphere1 = mesh.create_sphere(radius=30, resolution=90)\n sphere2 = mesh.create_sphere(radius=10, resolution=30)\n axe = mesh.create_coordinate_frame(size=5)\n\n pc_1.points = sphere1.vertices\n # pc_1.points = pc_1.points[int(len(pc_1.points) * 2 / 3):]\n\n pc_2.points = sphere2.vertices\n center = pc_2.get_center()\n\n # pc_2.points = pc_2.points[0: int(len(pc_2.points))]\n\n pc = pc_1 + pc_2\n # pc_down = get_rid_of_inner_loop_through_dot_product(pc, center)\n pc_down = get_rid_of_layer_loop_through_layers(pc)\n\n # o3.visualization.draw_geometries([pc_1, pc_2])\n o3.visualization.draw_geometries([axe, pc])\n o3.visualization.draw_geometries([axe, pc_down])\n\n\ndef get_rid_of_layer_loop_through_layers(pc):\n layer_axis = 2\n layer_step = 20\n\n center = pc.get_center()\n '''cut off part below neck'''\n max_bound, min_bound = o3.geometry.PointCloud.get_max_bound(pc), o3.geometry.PointCloud.get_min_bound(pc)\n max_bound[-1] -= 50\n bounding_box_layer = o3.geometry.AxisAlignedBoundingBox(min_bound, max_bound)\n pc = o3.geometry.PointCloud.crop(pc, bounding_box_layer)\n\n '''normalized and scale head toward sphere shape'''\n sphere_radius = 100.0\n points = np.array(pc.points)\n points -= center\n\n ratio = sphere_radius / (max_bound - center)\n scale_matrix = np.eye(3)\n scale_matrix[0, 0] = ratio[0]\n scale_matrix[1, 1] = ratio[1]\n scale_matrix[2, 2] = ratio[2]\n points = np.matmul(points, scale_matrix)\n\n # points += center\n pc.points = o3.utility.Vector3dVector(points)\n\n # new range\n max_bound, min_bound = o3.geometry.PointCloud.get_max_bound(pc), o3.geometry.PointCloud.get_min_bound(pc)\n\n # vis\n o3.visualization.draw_geometries([pc])\n\n pc_return = o3.geometry.PointCloud()\n\n '''filter points inside layer-wise'''\n for slice_start_in_layer_axis in np.arange(min_bound[layer_axis], max_bound[layer_axis], layer_step):\n '''crop layer point cloud'''\n max_bound_layer, min_bound_layer = copy.deepcopy(max_bound), copy.deepcopy(min_bound)\n max_bound_layer[layer_axis] = slice_start_in_layer_axis + layer_step\n min_bound_layer[layer_axis] = slice_start_in_layer_axis\n bounding_box_layer = o3.geometry.AxisAlignedBoundingBox(min_bound_layer, max_bound_layer)\n layer = o3.geometry.PointCloud.crop(pc, bounding_box_layer)\n\n # o3.visualization.draw_geometries([layer])\n\n '''layer points '''\n pc_return += get_rid_of_inner_all_radius(layer, center=(0, 0, 0))\n\n print('total ', len(np.array(pc.points)), ' in input')\n print('total ', len(np.array(pc_return.points)), ' in output')\n return pc_return\n\n\n# def get_rid_of_inner_loop_through_dot_product(pc, center=None):\n# num_points_in_loop = 1000\n# max_distance_to_from_outter_layer = 0.02\n# max_dis_of_dot_product_vector_inline = 0.00008\n#\n# points = np.asarray(pc.points) # (N, 3)\n# np.random.shuffle(points)\n# points_porj_xyplane = copy.deepcopy(points)\n# points_porj_xyplane[:, -1] = 0.0\n# mask_testing = np.ones(points_porj_xyplane.shape[0]).astype(bool) # (N)\n# # print(len(points_porj_xyplane), 'points in count')\n#\n# pc_return = o3.geometry.PointCloud()\n# mask_parallel_and_inside = np.zeros(points_porj_xyplane.shape[0]).astype(bool)\n# # mask_out = np.zeros(points.shape[0]).astype(bool) # (N)\n#\n# if points_porj_xyplane.shape[0] <= 5:\n# return pc_return\n#\n# '''normalize all points: move to center, and rescale to unit length'''\n# if center is None:\n# center = points_porj_xyplane.mean(axis=0)\n# norms = np.expand_dims(np.linalg.norm(points_porj_xyplane - center, axis=-1), axis=-1)\n# points_normalized = (points_porj_xyplane - center) / norms # (N, 3)\n# # index = dot < 0.1\n#\n# '''dot product of all points, points in similar direction has dot product close to one,\n# variant direction has dot product close to zero '''\n#\n#\n# num_iter = int(np.ceil(points_normalized.shape[0] / num_points_in_loop))\n#\n# for i in range(num_iter):\n# # loop start from points not tested yet, ignore points that already tested parallel and inside\n# index_batch_start = i * num_points_in_loop\n# # mask_parallel_and_inside_current = mask_parallel_and_inside[index_start:]\n# points_testing = points_normalized #[np.logical_not(mask_parallel_and_inside_current)]\n# norms_testing = norms #[np.logical_not(mask_parallel_and_inside_current)]\n#\n# # index_batch_end = max((i + 1) * num_points_in_loop, points_testing.shape[0] - 1)\n# # update loop: if loop overed all testing points, break\n# if i >= len(points_testing):\n# break\n#\n# #\n# mask_parallel_and_inside_batch = mask_parallel_and_inside[index_batch_start: min(index_batch_start+num_points_in_loop, len(points_testing))]\n# points_batch = points_testing[index_batch_start: min(index_batch_start+num_points_in_loop, len(points_testing))]\n# norms_batch = norms_testing[index_batch_start: min(index_batch_start+num_points_in_loop, len(points_testing))]#[np.logical_not(mask_parallel_and_inside_current)]\n#\n# dot = np.dot(points_batch, points_testing.T)\n# mask_parallel = np.logical_and(1.0 - max_dis_of_dot_product_vector_inline < dot, dot < 1.0 + max_dis_of_dot_product_vector_inline) # (N_male, N_female)\n#\n# '''mark points in same direction but most distant from center(having biggest norm or close to)'''\n# for k in range(mask_parallel.shape[0]):\n# mask_parallel_ele = mask_parallel[k] # (N)\n# norms_parallel_ele = norms_testing[mask_parallel_ele] # (n_parallel)\n# # no maximum filtering, ignore point far away from points in outer surface\n# if norms_parallel_ele.shape[0] > 0:\n# if norms_batch[k] < norms_parallel_ele.max() - max_distance_to_from_outter_layer or \\\n# norms_batch[k] > norms_parallel_ele.max() + max_distance_to_from_outter_layer:\n# mask_parallel_and_inside_batch[k] = True\n#\n# # dot = np.dot(points_normalized, points_normalized.T) # (N, N)\n# # dot.diagonal -= 1 # (N, N), ignore point itself\n# # # print(index.shape)\n# #\n# # '''mark points that has similar direction'''\n# # mask = np.logical_and(dot < 1.0 + max_dis_of_dot_product_vector_inline, dot > 1.0 - max_dis_of_dot_product_vector_inline) # (\n# #\n# # '''mark points in same direction but most distant from center(having biggest norm or close to)'''\n# # for i in range(points.shape[0]):\n# # neighbors_mask = mask[i]\n# # neighbors_norm = norms[neighbors_mask]\n# # # no maximum filtering, ignore point far away from points in outer surface\n# # if neighbors_norm.shape[0] > 0:\n# # if neighbors_norm.max() - max_distance_to_from_outter_layer < norms[i] < neighbors_norm.max() + max_distance_to_from_outter_layer:\n# # mask_out[i] = True\n#\n# points_out = o3.utility.Vector3dVector(points[np.logical_not(mask_parallel_and_inside)])\n# # print(len(points_out), 'points in count')\n#\n#\n# pc_return.points = points_out\n#\n# # print('total comb', points.shape[0] * points.shape[0])\n# # print('comb 0', len(np.where(index)[0]))\n# # print('idnex 0', np.where(index))\n#\n# # print(p_1.shape, p_2.shape)\n# # c = np.cross(p_1, p_2)\n# # d = np.dot(p_1, p_2.T)\n# return pc_return\n\n\n# def get_rid_of_inner_all_dot_product(pc, center=None):\n# max_distance_to_from_outter_layer = 2\n# max_dis_of_dot_product_vector_inline = 0.000001\n#\n# pc_return = o3.geometry.PointCloud()\n# points = np.asarray(pc.points) # (N, 3)\n#\n# points_porj_xyplane = copy.deepcopy(points)\n# points_porj_xyplane[:, -1] = 0.0\n# # np.random.shuffle(points_porj_xyplane)\n#\n# mask_out = np.zeros(points_porj_xyplane.shape[0]).astype(bool) # (N)\n#\n# if points_porj_xyplane.shape[0] < 5:\n# return pc_return\n#\n# '''normalize all points: move to center, and rescale to unit length'''\n# if center is None:\n# center = points_porj_xyplane.mean(axis=0)\n# norms = np.expand_dims(np.linalg.norm(points_porj_xyplane - center, axis=-1), axis=-1)\n# points_normalized = (points_porj_xyplane - center) / norms # (N, 3)\n# # index = dot < 0.1\n#\n# '''dot product of all points, points in similar direction has dot product close to one,\n# variant direction has dot product close to zero '''\n# dot = np.dot(points_normalized, points_normalized.T) # (N, N)\n# dot -= np.eye(dot.shape[0]) # (N, N), ignore point itself\n# # print(index.shape)\n#\n# '''mark points that has similar direction'''\n# mask = dot > 1.0 - max_dis_of_dot_product_vector_inline # (\n#\n# '''mark points in same direction but most distant from center(having biggest norm or close to)'''\n# for i in range(points_porj_xyplane.shape[0]):\n# neighbors_mask = mask[i]\n# neighbors_norm = norms[neighbors_mask]\n# # no maximum filtering, ignore point far away from points in outer surface\n# if neighbors_norm.shape[0] > 0:\n# if neighbors_norm.max() - max_distance_to_from_outter_layer < norms[i]:\n# mask_out[i] = True\n#\n# points_out = o3.utility.Vector3dVector(points[mask_out])\n#\n# # for i in range(points.shape[0]):\n# # if i > 10:\n# # break\n# # dot_i = dot[i, :]\n# # id_ones = np.logical_and(dot_i < 1.2, dot_i > 0.8)\n# #\n# # '''add points'''\n# # points_out = points[i, id_ones]\n# # print(dot_i)\n# # print(np.where(id_ones)[0].shape)\n#\n# pc_return.points = points_out\n#\n# # axis_pcd = o3.geometry.TriangleMesh()\n# # axis_pcd = axis_pcd.create_coordinate_frame(size=5, origin=center)\n# # o3.visualization.draw_geometries([pc, axis_pcd])\n# # o3.visualization.draw_geometries([pc_return])\n#\n# return pc_return\n\ndef get_rid_of_inner_all_radius(pc, center=None):\n radius_shrink_factor = 0.9\n max_distance_to_from_outter_layer = 5.0\n\n pc_return = o3.geometry.PointCloud()\n points = np.asarray(pc.points) # (N, 3)\n\n points_porj_xyplane = copy.deepcopy(points)\n # points_porj_xyplane[:, -1] = 0.0\n # np.random.shuffle(points_porj_xyplane)\n\n mask_out = np.zeros(points_porj_xyplane.shape[0]).astype(bool) # (N)\n\n if points_porj_xyplane.shape[0] < 5:\n return pc_return\n\n '''normalize all points: move to center, and rescale to unit length'''\n if center is None:\n center = points_porj_xyplane.mean(axis=0)\n norms = np.expand_dims(np.linalg.norm(points_porj_xyplane - center, axis=-1), axis=-1)\n\n '''dot product of all points, points in similar direction has dot product close to one,\n variant direction has dot product close to zero '''\n radius = norms.mean() # (N, N)\n # print(index.shape)\n\n '''mark points that has similar direction'''\n mask_out = norms > radius_shrink_factor * radius - max_distance_to_from_outter_layer # (\n mask_out = mask_out.squeeze()\n '''mark points in same direction but most distant from center(having biggest norm or close to)'''\n\n points_out = o3.utility.Vector3dVector(points[mask_out])\n\n pc_return.points = points_out\n\n print(len(points), 'points in ')\n print(len(points_out), ' points out ')\n\n # debug visual\n axis_pcd = o3.geometry.TriangleMesh()\n axis = axis_pcd.create_coordinate_frame(size=10, origin=center)\n sphere_filter = axis_pcd.create_sphere(radius=radius, resolution=1000)\n sphere_filter.translate(center)\n sphere_filter = sphere_filter.sample_points_uniformly(number_of_points=1000)\n\n # o3.visualization.draw_geometries([pc, sphere_filter, axis])\n # o3.visualization.draw_geometries([pc_return])\n\n return pc_return\n\n\nif __name__ == '__main__':\n main()\n # sphere_test()\n # a = np.arange(0, 12).reshape((3, -1))\n # b = np.array([True, False, True])\n # c = a[b]\n # print(c)\n","repo_name":"StrikerCC/slam_learn","sub_path":"pc_utils.py","file_name":"pc_utils.py","file_ext":"py","file_size_in_byte":13663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"73562258323","text":"import sys\nimport nltk.data\nfrom nltk.tokenize import whitespace_tokenize\nimport simple_tokens\n\ninput_file = open(sys.argv[1]).read()\ninput_file = simple_tokens.simple_tokens(input_file)\ninput_file = whitespace_tokenize(input_file)\ninput_file = [w.lower() for w in input_file if w.isalpha()]\nbasic_english = nltk.corpus.words.words('en-basic')\n\nnonsimple_words = tuple(set(input_file) - set(basic_english))\nprint(\"Total number of words: \")\nprint(len(input_file))\nprint(\"Nonsimple words: \")\nprint(nonsimple_words)\n\npercent = 100 - (float(len(nonsimple_words)) / float(len(input_file))) * 100\nprint(\"Percentage simple english: (v0.1)\")\nprint(percent)\n\n","repo_name":"cornelisonc/IsItSimple","sub_path":"simple_english.py","file_name":"simple_english.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"32230423875","text":"from torch.utils.data import Dataset\nfrom tqdm import tqdm\nimport numpy as np\nimport os\nimport torch\nfrom torch.utils.data import Dataset\nfrom torch.utils.data.sampler import SubsetRandomSampler\nimport pandas as pd\nfrom preprocessing_data_functions import create_pillars , get_feature_tensor\nfrom lidar_processing_functions import *\n\n\nclass LiDARDataSet_PC_fake_data(Dataset):\n \"\"\"Lidar sample dataset.\"\"\"\n\n def __init__(self, sample_dir, csv_path, translation, rotation):\n \"\"\"\n Args:\n sample_dir : Directory with all ply-files.\n \"\"\"\n self.sample_dir = sample_dir\n self.list_of_files = os.listdir(self.sample_dir)\n self.length = len(self.list_of_files)\n self.csv_path = csv_path\n self.labels = [random_rigid_transformation(translation, rotation) for x in np.arange(self.length)]\n\n def __len__(self):\n return self.length\n\n def __getitem__(self, idx):\n # load ply-file\n file_path = os.path.join(self.sample_dir, self.list_of_files[idx])\n pc, global_coord = load_data(file_path, self.csv_path)\n label = self.labels[idx]\n\n # cut-out\n cutout = pc + np.array((label[0], label[1], 0))\n cutout = trim_pointcloud(cutout)\n cutout_pillars, cutout_coordinates = create_pillars(cutout)\n cutout_features, cutout_coordinates = get_feature_tensor(cutout_pillars, cutout_coordinates)\n\n # rotate/translate sweep, create pillars/tensor\n sweep = rotate_point_cloud(pc, label[-1])\n sweep = trim_pointcloud(sweep)\n sweep_pillars, sweep_coordinates = create_pillars(sweep)\n sweep_features , sweep_coordinates = get_feature_tensor(sweep_pillars, sweep_coordinates)\n\n # save labels, sweep + rot/trans-sweep\n training_sample = {'sweep': torch.from_numpy(sweep_features).float(),'sweep_coordinates': sweep_coordinates,\n 'cutout': torch.from_numpy(cutout_features).float(),\n 'cutout_coordinates': cutout_coordinates, 'labels': label}\n\n return training_sample\n\n\nclass LiDARDataSet_PC(Dataset):\n \"\"\"Lidar sample dataset.\"\"\"\n\n def __init__(self, sample_dir, csv_path, grid_csv_path, translation, rotation):\n \"\"\"\n Args:\n sample_dir : Directory with all ply-files.\n \"\"\"\n self.sample_dir = sample_dir\n self.list_of_files = os.listdir(self.sample_dir)\n self.length = len(self.list_of_files)\n self.csv_path = csv_path\n self.labels = [random_rigid_transformation(translation, rotation) for x in np.arange(self.length)]\n\n\n list_of_csv = os.listdir(grid_csv_path)\n sweeps = []\n print('loading all LiDAR detections...')\n for file in tqdm(list_of_csv):\n if 'grid' in file:\n pc = pd.read_csv(os.path.join(grid_csv_path, file))\n sweeps.append(pc)\n self.lidar_points = pd.concat(sweeps)\n print('Done loading detections.')\n del sweeps, pc\n\n def __len__(self):\n return self.length\n\n def __getitem__(self, idx):\n # load ply-file\n file_path = os.path.join(self.sample_dir, self.list_of_files[idx])\n pc, global_coord = load_data(file_path, self.csv_path)\n label = self.labels[idx]\n\n # map cut-out\n cut_out_coordinates = global_coord[0][:2] + label[:2] # translation x, y\n # we want all coordinates that in trim_range around cut_out_coordinates\n trim_range = 15\n # get all points around the sweep\n cutout = self.lidar_points[self.lidar_points['x'] <= cut_out_coordinates[0]+trim_range]\n cutout = cutout[cutout['x'] >= cut_out_coordinates[0]-trim_range]\n cutout = cutout[cutout['y'] <= cut_out_coordinates[1]+trim_range]\n cutout = cutout[cutout['y'] >= cut_out_coordinates[1]-trim_range]\n # if we want to use occupancy grid, sample points first\n # move all points such that the cut-out-coordinates becomes the origin\n cutout = cutout.values - np.array((cut_out_coordinates[0], cut_out_coordinates[1], 0))\n cutout_pillars, cutout_coordinates = create_pillars(cutout)\n cutout_features, cutout_coordinates = get_feature_tensor(cutout_pillars, cutout_coordinates)\n\n # rotate/translate sweep, create pillars/tensor\n sweep = rotate_point_cloud(pc, label[-1])\n sweep = trim_pointcloud(sweep)\n sweep_pillars, sweep_coordinates = create_pillars(sweep)\n sweep_features , sweep_coordinates = get_feature_tensor(sweep_pillars, sweep_coordinates)\n\n # save labels, sweep + rot/trans-sweep\n training_sample = {'sweep': torch.from_numpy(sweep_features).float(),'sweep_coordinates': sweep_coordinates,\n 'cutout': torch.from_numpy(cutout_features).float(),\n 'cutout_coordinates': cutout_coordinates, 'labels': label}\n\n return training_sample\n\n\ndef get_train_loader_pointpillars(batch_size, data_set_path_train, csv_path_train, data_set_path_val, csv_path_val, rotation, translation, kwargs):\n '''\n Create training data loader.\n :param batch_size: batch size when making a forward pass through network\n :param data_set_path: Path to directory with all the folder containing ply-files for each grid over town.\n :param kwargs: use cpu or gpu\n :return: train_loader: data loader\n '''\n grid_csv_path = '/home/annika_lundqvist144/pc_samples/csv_grids/Town01'\n training_data_set = LiDARDataSet_PC(data_set_path_train, csv_path_train, grid_csv_path, translation, rotation)\n #training_data_set = LiDARDataSet_PC_fake_data(data_set_path, csv_path, translation, rotation)\n print('Number of training samples: ', len(training_data_set))\n train_sampler = SubsetRandomSampler(np.arange(len(training_data_set), dtype=np.int64))\n train_loader = torch.utils.data.DataLoader(training_data_set, batch_size=batch_size, sampler=train_sampler, drop_last = True, **kwargs)\n\n grid_csv_path = '/home/annika_lundqvist144/pc_samples/csv_grids/validation'\n validation_data_set = LiDARDataSet_PC(data_set_path_val, csv_path_val, grid_csv_path, translation, rotation)\n #validation_data_set = LiDARDataSet_PC_fake_data(data_set_path, csv_path, translation, rotation)\n print('Number of training samples: ', len(validation_data_set))\n val_sampler = SubsetRandomSampler(np.arange(len(validation_data_set), dtype=np.int64))\n val_loader = torch.utils.data.DataLoader(validation_data_set, batch_size=batch_size, sampler=val_sampler, drop_last = True, **kwargs)\n\n return train_loader, val_loader\n","repo_name":"jtpils/master_thesis","sub_path":"point_cloud_input/DataSetsGenerateOnTheGo.py","file_name":"DataSetsGenerateOnTheGo.py","file_ext":"py","file_size_in_byte":6632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"74602114964","text":"import aqt \nfrom aqt.qt import (\n QCursor,\n QDockWidget,\n QMenu,\n QPalette,\n QSize,\n Qt,\n pyqtSignal,\n)\nfrom anki.hooks import addHook\nfrom aqt.webview import AnkiWebView\nfrom aqt.utils import tooltip\n\nfrom .anki_version_detection import anki_point_version\nfrom .config import anki_21_version, gc\nfrom .sidebar_set_contents import update_contents_of_sidebar\n\n\nclass ThinAnkiWebView(AnkiWebView):\n def __init__(self, sidebar):\n AnkiWebView.__init__(self, None)\n self.sidebar = sidebar\n def sizeHint(self):\n return QSize(gc(\"default width\", 200), 100)\n # def contextMenuEvent(self, evt):\n # m = QMenu(self)\n # a = m.addAction(\"Hello\")\n # a.triggered.connect(self.sidebar.on_hello)\n # m.popup(QCursor.pos())\n\n\nclass DockableWithClose(QDockWidget):\n closed = pyqtSignal()\n def closeEvent(self, evt):\n self.closed.emit()\n QDockWidget.closeEvent(self, evt)\n\n\nclass StatsSidebar:\n def __init__(self, mw):\n self.mw = mw\n self.shown = False\n self.night_mode_on = False\n if mw.pm.night_mode():\n self.night_mode_on = True\n addHook(\"showQuestion\", lambda: update_contents_of_sidebar(self))\n addHook(\"deckClosing\", self.hide)\n addHook(\"reviewCleanup\", self.hide)\n addHook(\"night_mode_state_changed\", self.refresh)\n if anki_point_version >= 50:\n aqt.gui_hooks.theme_did_change.append(self.setup_style)\n \n def setup_style(self): # theme change in Anki - only in .50+\n if self.shown:\n if aqt.theme.theme_manager.get_night_mode(): # if self.night_mode_on:\n self.set_dark_style()\n else:\n self.set_day_style()\n\n def set_dark_style(self):\n # https://doc.qt.io/qt-5/stylesheet-examples.html#customizing-qdockwidget\n # I think I can't style the divider since this like a window border which are\n # owned by the OS?\n # A QSplitter can be styled but I think this would require to\n # change main.py/setupMainWindow which I don't want to do.\n bgcolor = QPalette().window().color().name()\n if self.shown:\n self.shown.setStyleSheet(\"\"\"\n QWidget::title {\n color: white;\n background-color: #272828;\n }\n \"\"\")\n update_contents_of_sidebar(self)\n\n def set_day_style(self):\n bgcolor = QPalette().window().color().name()\n if self.shown:\n self.shown.setStyleSheet(\"\")\n update_contents_of_sidebar(self)\n\n def refresh(self, nm_state):\n self.night_mode_on=nm_state\n if self.shown:\n self._remDockable(self.shown)\n self.shown = None\n if self.mw.state == \"review\":\n self.show()\n \n def _addDockable(self, title, w):\n dock = DockableWithClose(title, self.mw)\n dock.setObjectName(title)\n dock.setAllowedAreas(Qt.DockWidgetArea.LeftDockWidgetArea | Qt.DockWidgetArea.RightDockWidgetArea)\n dock.setFeatures(QDockWidget.DockWidgetFeature.DockWidgetClosable)\n dock.setWidget(w)\n if self.mw.width() < 600:\n self.mw.resize(QSize(600, self.mw.height()))\n self.mw.addDockWidget(Qt.DockWidgetArea.RightDockWidgetArea, dock)\n if anki_point_version <= 49:\n if aqt.theme.theme_manager.get_night_mode(): # if self.night_mode_on:\n # https://doc.qt.io/qt-5/stylesheet-examples.html#customizing-qdockwidget\n # I think I can't style the divider since this like a window border which are\n # owned by the OS?\n # A QSplitter can be styled but I think this would require to\n # change main.py/setupMainWindow which I don't want to do.\n dock.setStyleSheet(\"\"\"\n QWidget::title {\n color: white;\n background-color: #272828;\n }\n \"\"\")\n return dock\n\n def _remDockable(self, dock):\n if anki_point_version >= 50:\n aqt.gui_hooks.theme_did_change.remove(self.setup_style)\n self.mw.removeDockWidget(dock)\n\n def show(self):\n if not self.shown:\n self.web = ThinAnkiWebView(self)\n self.shown = self._addDockable(\"\", self.web)\n if anki_point_version >= 50:\n self.setup_style()\n self.shown.closed.connect(self._onClosed)\n self.web.onBridgeCmd = self.myLinkHandler\n update_contents_of_sidebar(self)\n\n def hide(self):\n if self.shown:\n self._remDockable(self.shown)\n self.shown = None\n\n def toggle(self):\n if self.shown:\n self.hide()\n else:\n self.show()\n\n def _onClosed(self):\n # schedule removal for after evt has finished\n self.mw.progress.timer(100, self.hide, False)\n \n def myLinkHandler(self, url):\n if url.startswith(\"BrowserSearch#\"):\n out = url.replace(\"BrowserSearch#\", \"\").split(\"#\", 1)[0]\n self.openBrowser(\"cid:\" + out)\n \n def openBrowser(self, searchterm):\n # https://ankiweb.net/shared/info/861864770\n # Open 'Added Today' from Reviewer\n # Copyright (c) 2013 Steve AW\n # Copyright (c) 2016-2017 Glutanimate\n browser = aqt.dialogs.open(\"Browser\", self.mw)\n browser.form.searchEdit.lineEdit().setText(searchterm)\n browser.onSearchActivated()\n if anki_21_version < 45:\n if u'noteCrt' in browser.model.activeCols:\n col_index = browser.model.activeCols.index(u'noteCrt')\n browser.onSortChanged(col_index, True)\n browser.form.tableView.selectRow(0) \n else:\n pass\n","repo_name":"ijgnd/anki__reviewer_deck_and_card_info_sidebar","sub_path":"src/sidebar_base.py","file_name":"sidebar_base.py","file_ext":"py","file_size_in_byte":5814,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"30"} +{"seq_id":"43173228281","text":"#!/usr/bin/env python\n\n\"\"\"\nRemoves crosstalk from PAPER data by modeling it over the course of a day\nas a static per-channel cross-coupling that rises and falls uniformly across\nthe band with changing input amplitude. Steps for crosstalk removal are:\nrun \"xtalk3.py -o\" on 1 day of UV files (which should be flagged for RFI, and\nif possible, have a sky model removed) to\ngenerate *.xtalk.npz files. Then run \"xtalk3.py -r\" on same UV files to\nreprocess *.xtalk.npz files, separating them into static shape/uniform gain\nterms that are stored in *.xtalk.rep.npz files. Finally, run \"xtalk3.py -i\" on\nUV files from the same JD (but which need not have RFI flagged or a sky model\nremoved) to use the *.xtalk.rep.npz model to remove crosstalk from the visibility\ndata.\n\nAuthor: Aaron Parsons\n\"\"\"\n\nfrom __future__ import print_function, division, absolute_import\n\nimport aipy as a, numpy as np, sys, os, optparse\n\no = optparse.OptionParser()\no.set_usage('xtalk3.py [options] *.uv')\no.set_description(__doc__)\no.add_option('-i', '--infile', dest='infile', action='store_true',\n help='Apply xtalk calibrations generated with the -o option.')\no.add_option('-o', '--outfile', dest='outfile', action='store_true',\n help='Rather than apply the calibrations to the data, store them in a file (named by JD) to apply to a different file with the same JD.')\no.add_option('-r', '--reprocess', dest='reprocess', action='store_true',\n help='Reprocess xtalk files for the specified UV files to generate an average crosstalk profile and a time-dependent gain for scaling that profile. The results will be saved to *.xtalk.rep.npz files which will have precedence over *.xtalk.npz files.')\no.add_option('-c', '--chan', dest='chan', default='160_720',\n help='Channels range to use for normalization when reprocessing.')\nopts,args = o.parse_args(sys.argv[1:])\n\nassert(not opts.infile or not opts.outfile)\n\nif opts.reprocess:\n chans = map(int, opts.chan.split('_'))\n xsum, cnt, gain, times = {}, {}, {}, []\n for filename in args:\n uv = a.miriad.UV(filename)\n (uvw,jd,(i,j)),d,f = uv.read(raw=True)\n xfile = '%f.xtalk.npz' % jd\n print(xfile)\n times.append(jd)\n if not os.path.exists(xfile):\n print(xfile, 'does not exist. Skipping...')\n continue\n xtalk = np.load(xfile)\n for bl in xtalk.files:\n dat = np.array(xtalk[bl])\n adat = np.ma.masked_equal(np.abs(dat), 0)\n if not gain.has_key(bl): gain[bl] = []\n gain[bl].append(np.average(adat[chans[0]:chans[1]]))\n dat /= gain[bl][-1]\n xsum[bl] = xsum.get(bl, 0) + dat\n cnt[bl] = cnt.get(bl, 0) + np.logical_not(adat.mask).astype(np.int_)\n for bl in xsum: xsum[bl] /= np.where(cnt[bl] == 0, 1, cnt[bl])\n for c, jd in enumerate(times):\n repfile = '%f.xtalk.rep.npz' % jd\n xtalk = {}\n for bl in xsum: xtalk[bl] = gain[bl][c] * xsum[bl]\n print('Writing', repfile)\n np.savez(repfile, **xtalk)\n import sys; sys.exit(0)\n\nguess, cnt, xtalk = {}, {}, {}\nfor filename in args:\n print(filename,'->',filename+'x')\n if not opts.outfile and os.path.exists(filename+'x'):\n print(filename+'x', 'exists. Skipping...')\n continue\n uv = a.miriad.UV(filename)\n uv.select('auto',0, 0, include=False)\n (uvw,jd,(i,j)),d,f = uv.read(raw=True)\n uv.rewind()\n if opts.infile:\n xfile = '%f.xtalk.rep.npz' % jd\n if not os.path.exists(xfile): xfile = '%f.xtalk.npz' % jd\n if not os.path.exists(xfile):\n print(xfile, 'does not exist. Skipping...')\n continue\n print(' using', xfile)\n xtalk = np.load(xfile)\n else:\n guess, cnt, xtalk = {}, {}, {}\n for (uvw,t,(i,j)),d,f in uv.all(raw=True):\n bl = str(a.pol.ijp2blp(i,j,uv['pol']))\n if not guess.has_key(bl): guess[bl], cnt[bl] = 0, 0\n guess[bl] += np.where(f, 0, d)\n cnt[bl] += np.logical_not(f)\n del(uv)\n for bl in guess: xtalk[bl] = guess[bl] / np.clip(cnt[bl], 1, np.Inf)\n if opts.outfile:\n xfile = '%f.xtalk.npz' % jd\n print('Writing', xfile)\n np.savez(xfile, **xtalk)\n else:\n def mfunc(uv, p, d, f):\n uvw,t,(i,j) = p\n bl = str(a.pol.ijp2blp(i,j,uv['pol']))\n try: return p, d - xtalk[bl], f\n except(KeyError): return p, d, f\n uvi = a.miriad.UV(filename)\n uvo = a.miriad.UV(filename+'x', status='new')\n uvo.init_from_uv(uvi)\n uvo.pipe(uvi, mfunc=mfunc, append2hist='XTALK:'+' '.join(sys.argv)+'\\n', raw=True)\n","repo_name":"HERA-Team/aipy","sub_path":"scripts/xtalk3.py","file_name":"xtalk3.py","file_ext":"py","file_size_in_byte":4650,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"30"} +{"seq_id":"13466189075","text":"\"\"\"The module contains functions that permit to create networks of neurons and write connections on a .txt file.\nThis last one can be used by cAER NETPARSER module to implement them in DYNAP-se\n\"\"\"\n\nfrom DYNAPSETools.classes.DevicePopulation import DevicePopulation\nfrom DYNAPSETools.classes.DeviceConnections import DeviceConnections\n\n### ===========================================================================\ndef write_connections(*connections_lists, fileName = \"network.txt\"):\n \"\"\"Write on a .txt file the network connections, ready to be uploaded to the final device.\n\nParameters:\n *connection_lists (list of connections): Contains the list of all the connections lists that must be written in the output file\n fileName (string): Name of the output .txt file\n\nNote:\n This function write for each connection an header containing:\n\n - A separator: '#!======================================== '\n - A string with the name of the two populations that are connected\n\n This strings will be printed in the log when loading the network inside Dynap-se. For this reason is highly recommended\n to write meaningful names for the populations.\n \n Remember to put the * on the list when calling the function (see example).\n\nExample:\n - Write a list of 2 connections to the out .txt file::\n\n _conn1 = DeviceConnections(...) # First connections\n _conn2 = DeviceConnections(...) # Second connections\n allConnections = (_conn1,\n _conn2)\n fileName = \"workingNetwork.txt\"\n write_connections(*allConnections, fileName = fileName)\n\"\"\"\n \n with open(fileName, 'w') as f:\n # Sweep over all the dictionary containing the connections\n for connections in connections_lists:\n # Write header\n f.write('#!======================================== ') # Separator\n f.write('Connecting {}->{}\\n'.format(connections.sourcePop.name, connections.targetPop.name)) # Title of the connections\n\n # Write connections\n # In case you specified a connection type, use that. Otherwise, use the neuron type\n if connections.connTypes is not None:\n for srcNeuron, destNeuron, connType, weight in zip(connections.sourceNeurons,\n connections.targetNeurons,\n connections.connTypes,\n connections.weights):\n f.write('{}->{}-{}-{}\\n'.format(srcNeuron.create_neuron_string(),\n connType,\n int(weight),\n destNeuron.create_neuron_string()))\n else:\n for srcNeuron, destNeuron, weight in zip(connections.sourceNeurons,\n connections.targetNeurons,\n connections.weights):\n f.write('{}->{}-{}-{}\\n'.format(srcNeuron.create_neuron_string(),\n srcNeuron.neuronType,\n int(weight),\n destNeuron.create_neuron_string()))","repo_name":"sanfans/dynap-se-tools","sub_path":"dynapseNetGenerator.py","file_name":"dynapseNetGenerator.py","file_ext":"py","file_size_in_byte":3418,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"30"} +{"seq_id":"2479612991","text":"#!/usr/bin/env python3\n\nimport rclpy\nfrom rclpy.node import Node\nfrom std_msgs.msg import String\n\n\nclass HelloWorldSubscriber(Node):\n def __init__(self):\n super().__init__(\"hello_world_sub_node\")\n\n # The `create_subscription` function takes four parameters:\n # - The message type\n # - The topic name\n # - A callback function\n # - QoS profile (subscriber's history depth)\n self.sub = self.create_subscription(\n String, \"hello_world\", self.subscriber_callback, 10)\n\n def subscriber_callback(self, msg):\n \"\"\"\n Callback function for messages received over \"hello_world\" topic\n \"\"\"\n print(f\"Received: {msg.data}\")\n\n\ndef main():\n # Initialise ROS client library\n # It can take commandline arguments or a context name\n # as input parameters, which we will not use currently.\n rclpy.init()\n\n my_sub = HelloWorldSubscriber()\n\n print(\"Waiting for data to be published over topic\")\n\n try:\n # The `spin` function will keep the function from exiting (I assume\n # because it's all asyncronous now), until a KeyboardInterrupt.\n rclpy.spin(my_sub)\n\n except KeyboardInterrupt:\n # Kill the node\n my_sub.destroy_node()\n\n # Shutdown and disconnect the client library\n rclpy.shutdown()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"XDGFX/udemy_ros2","sub_path":"udemy_ros2_pkg/subscriber.py","file_name":"subscriber.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"42516445952","text":"import numpy as np\n\ndef print_layer_information(rl_net):\n for i, submodule in enumerate(rl_net.net.modules()):\n print(i, submodule)\n\ndef convert_single_layer(ind,activations):\n shp=activations[ind][0].shape\n t=len(activations[ind])\n ts=np.zeros((np.prod(shp),t))\n for j in range(0,t):\n ts[:,j]=activations[ind][j].flatten()\n return ts\n\ndef convert_multiple_layers(inds,activations):\n t=len(activations[inds[0]])\n ts=np.zeros((1,t))\n for j in inds:\n ts_=convert_single_layer(j,activations)\n ts=np.vstack((ts,ts_))\n ts=ts[1:,:]\n return ts\n\ndef one_hot_action(actions_record,action_label):\n action_one_hot=[]\n for j in actions_record:\n if j==action_label:\n action_one_hot.append(1)\n else:\n action_one_hot.append(0)\n return action_one_hot\n\ndef one_hot_action_arr(actions_record):\n action_one_hot_arr=[]\n for j in range(0,5):\n action_one_hot_arr.append(one_hot_action(actions_record,j))\n return np.array(action_one_hot_arr)\n","repo_name":"mariakesa/UdacityMachineLearningEngineerNanoDegree","sub_path":"dev/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"6880601013","text":"import pandas as pd\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\n\n'''There are several learning rate techniques, the main ones are:\n1. Power scheduling - n(t) = n0/(1+t/s)^C, basicaly it decreases the learning rate each step, first the schedule drops the lr quickly and than more slowly\n2. Exponential scheduling - n(t) = n0 * 0.1^(t/s), the learning rate will drop by a factor of 10 every s steps, it decreases differently then the power(constant)\n3. PieceWise scheduling - Use a constant learning rate for a number of epochs, then change the learning rate and change how many epochs this \n new lr will be used. In general it is very good but also very hard to tune the hyparameters\n4. 1cycle Scheduling - It starts by increasing the learning rate n0 linearly to a value n1 halfway through the training, then it starts decreasing the lr\nlinearly down to n0 again during the secoind half of training, finishing training by dropiong the rate down several orders of magnitude (still linearly).\nThis aproach is best.'''\n\n# implementing power scheduling in keras is the easiest option\n# decay is the inverse of s (number of steps it takes to divide the learning rate by one more unit)\noptimizer = keras.optimizers.SGD(lr=0.01, decay=1e-4) \n\n# exponential scheduling and piecewise scheduling\n# first a function that takes the current epoch and returns the learning rate (example for exponential)\ndef exponential_decay(lr0, s):\n def exponential_decay_fn(epoch):\n return lr0 * 0.01**(epoch/s)\n return exponential_decay_fn\n\nexponential_decay_fn = exponential_decay(lr0=0.01, s=20)\n\n# now we create a learningratescheduler callback, giving it the schedule function \nlr_scheduler = keras.callbacks.LearnignRateScheduler(exponential_decay_fn)\n# history = model.fit(X_train, y_train, [...], callbacks=[lr_scheduler])\n\n# when we load a model, and start training it more, the epoch it starts is zero, so\n# if we are using a learning rate scheduler that takes the epoch as argument, we can \n# damage the weighs of the dnn due to a way to big lr, to solve this, in the fit() method\n# we can use the inital_epoch= argument, so it starts counting the epochs right and the lr\n# does not compromise the already trained parameters\n\n# performance scheduler\nlr_scheduler = keras.callbacks.ReduceLROnPlateau(factor=0.5, patience=5)\n# this means that every time the best validation loss does not improve for during 5 consecutive\n# epochs, the learning rate will be multiplied by 0.5 (divided by 2)\n\n# keras offers the option of updating the leraning rate every step and not only every epoch\n# exemple with exponential schedule\n\ns = 20 * len('X_train') // 32 # number of steps in 20 epochs (batchsize = 32)\nlearning_rate = keras.otpimizers.schedules.ExponentialDecay(0.01, s, 0.1)\noptimizer = keras.optimizers.SGD(learning_rate)\n# when we use this we dont need to worry about reloading the model and the new epoch counting\n# messing with the lr, since the model will save the state of the learning schedule\n# this only works with tensorflow keras, it's not a keras standart implementation\n\n\n# to sum up, exponential decay, performance scheduling and 1cycle can considerably speed\n# up convergenc","repo_name":"Raaulsthub/DeepLearningStudies","sub_path":"Neural Nets Aurelien Geron/very_deep_nets/learning_rate_scheduling.py","file_name":"learning_rate_scheduling.py","file_ext":"py","file_size_in_byte":3213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"72425148564","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 7 07:48:09 2020\n\n@author: horst\n\"\"\"\n\nimport os\nimport numpy as np\nimport pandas as pd\n\nimport json\n# from area import area\nfrom matplotlib import path\n\nfrom sklearn.preprocessing import OneHotEncoder\n\n\n# dictionary of the files to be downloaded\nmonth_url = {'april_data' :'uber-raw-data-apr14.csv', 'may_data' :'uber-raw-data-may14.csv',\n 'june_data' :'uber-raw-data-jun14.csv', 'july_data' :'uber-raw-data-jul14.csv',\n 'august_data' :'uber-raw-data-aug14.csv', 'september_data' :'uber-raw-data-sep14.csv'}\n\n# path to store the downloaded data\nSAVING_PATH = os.path.join('datasets')\n\ndata_dict = {}\n\ndef load_csv_data(csv_path):\n return pd.read_csv(csv_path)\n\ndef load_data(month):\n csv_filename = month_url[month]\n csv_path = os.path.join(SAVING_PATH, csv_filename)\n\n return load_csv_data(csv_path)\n \n # pd_filename = month \n # data_dict.update({pd_filename : load_csv_data(csv_path)})\n # return data_dict[month]\n \n \n\ndef gentestsets(month):\n # load the data and generate a DataFrame\n x = load_data(month)\n \n # Add the label attribute to the data\n x['Pickups'] = 1\n \n # Add new parameter Neighborhood to cluster the longitude and latitude into spatial clusters\n x_add = x.copy()\n \n x_add['Neighborhood'] = np.zeros(len(x_add))\n \n geofile = json.load(open(\"NY_neighborhoods.geojson\"))\n geo_points = list(zip(x_add['Lon'], x_add['Lat']))\n for feature in geofile['features']:\n coords = feature['geometry']['coordinates'][0]\n p = path.Path(coords)\n inds = p.contains_points(geo_points)\n list_neighborhoods = [str(feature['properties']['neighborhood'])]*np.sum(inds)\n x_add.loc[x_add.index[inds], 'Neighborhood'] = list_neighborhoods\n \n # Remove all non-matching entries\n x_add = x_add[x_add['Neighborhood'] != 0]\n \n # Convert Date/Time into a time series format\n x_timeseries = x_add.copy()\n x_timeseries.index = pd.to_datetime(x_timeseries['Date/Time'])\n x_timeseries.sort_index(inplace=True)\n x_timeseries.drop(labels=['Date/Time'], axis=1, inplace=True)\n \n # Delete unnecessary parameters\n x_neighbor = x_timeseries.drop(['Lat', 'Lon', 'Base'], axis = 1)\n \n # Cluster the data within time intervals of one hour\n x_cluster = x_neighbor.resample('H').agg({'Pickups' : 'sum', 'Neighborhood': 'nunique'})\n \n # Extend the data by weekdays, weekend check and hours of the day\n x_cluster['Weekday'] = x_cluster.index.weekday\n x_cluster['Is_weekend'] = x_cluster.index.map(lambda x: 1 if x.weekday() > 4 else 0)\n x_cluster['Hour_of_day'] = x_cluster.index.hour\n \n # Split labels from training set\n X_train = x_cluster.drop('Pickups', axis = 1)\n y_train = x_cluster['Pickups'].copy()\n \n # Categorical attributes\n map_dict_weekday = {0: \"Mon\", 1: \"Tue\", 2: \"Wen\", 3: \"Thu\", 4: \"Fri\", 5: \"Sat\", 6: \"Sun\"}\n X_train['Day_of_week'] = X_train['Weekday'].map(map_dict_weekday)\n X_train.drop(labels=['Weekday'], axis=1, inplace=True)\n \n map_dict_hour = {0: \"H_1\", 1: \"H_2\", 2: \"H_3\", 3: \"H_4\", 4: \"H_5\", 5: \"H_6\", 6: \"H_7\", 7: \"H_8\",8: \"H_9\", 9: \"H_10\", 10: \"H_11\", 11: \"H_12\", 12: \"H_13\", 13: \"H_14\", 14: \"H_15\", 15: \"H_16\",16: \"H_17\", 17: \"H_18\", 18: \"H_19\", 19: \"H_20\", 20: \"H_21\", 21: \"H_22\", 22: \"H_23\", 23: \"H_24\"}\n \n X_train['Hours'] = X_train['Hour_of_day'].map(map_dict_hour)\n X_train.drop(labels=['Hour_of_day'], axis=1, inplace=True)\n \n X_train_prep = X_train[['Is_weekend', 'Day_of_week', 'Hours']].copy()\n \n X_train_prep.reset_index(drop=True)\n \n # One-Hot-Encoder\n one_hot_encoder = OneHotEncoder(sparse=False)\n \n X_train_cat_days = X_train_prep[['Day_of_week']]\n \n X_train_cat_days_1hot = one_hot_encoder.fit_transform(X_train_cat_days)\n \n X_train_cat_hours = X_train_prep[['Hours']]\n \n X_train_cat_hours_1hot = one_hot_encoder.fit_transform(X_train_cat_hours)\n \n Is_weekend_array = np.expand_dims(np.asarray(X_train_prep['Is_weekend']), axis=1)\n \n result = np.concatenate((Is_weekend_array, X_train_cat_days_1hot, X_train_cat_hours_1hot), axis=1)\n \n X_train_prep = pd.DataFrame(result)\n \n return X_train_prep, y_train\n ","repo_name":"faberfred/uber-pickup-prediction","sub_path":"Gentestset.py","file_name":"Gentestset.py","file_ext":"py","file_size_in_byte":4308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"74582816405","text":"import json\nfrom flask import Flask, render_template, request\napp = Flask(__name__, template_folder='view')\n\n\n@app.route('/')\ndef second():\n test_dic = {\n 'a1': 100,\n 'a2': '문자열'\n }\n\n json_data = json.dumps(test_dic, indent=4, ensure_ascii=False)\n # indent는 들여쓰기, ensure_ascii는 한글 처리\n\n print(json_data)\n\n python_data = json.loads(json_data)\n print(python_data)\n print(type(json_data))\n print(type(python_data))\n return json_data\n\n\nif __name__ == '__main__':\n app.run(host='127.0.0.1', port=80, debug=True)\n","repo_name":"junu126/FlaskWebStudy","sub_path":"6. 템플릿 사용하기/jsonTest.py","file_name":"jsonTest.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"13408935040","text":"from confluent_kafka import Consumer\n\n\nc = Consumer({\n 'bootstrap.servers': 'b-1.demo-cluster-1.hu0has.c6.kafka.us-east-2.amazonaws.com:9092',\n 'group.id': 'mygroup2',\n 'auto.offset.reset': 'earliest'\n})\n\nc.subscribe(['greetings'])\n\nwhile True:\n # read data from broker if any for 1 second time\n msg = c.poll(1.0)\n\n if msg is None:\n continue\n if msg.error():\n print(\"Consumer error: {}\".format(msg.error()))\n continue\n\n \n print('Received message: {}'.format(msg.value().decode('utf-8')))\n\nc.close()\n","repo_name":"nodesense/cts-data-engineering-feb-2022","sub_path":"kafka/consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"30"} +{"seq_id":"42682628703","text":"#下载一本小说并存放在指定目录\nimport urllib.request\nfrom bs4 import BeautifulSoup\nimport time\nimport gzip\nfrom urllib.error import HTTPError,URLError\nimport random\n\n#req_header={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'}\n'''\nreq_header={\n 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n 'Accept-Encoding':'gzip, deflate',\n 'Accept-Language':'zh-CN,zh;q=0.9',\n 'Connection': 'keep-alive',\n 'Cookie':'_abcde_qweasd=0; __guid=270189431.88115020722726830.1582038501555.4824; _abcde_qweasd=0; bdshare_firstime=1582038502825; Hm_lvt_169609146ffe5972484b0957bd1b46d6=1582038503; monitor_count=3; Hm_lpvt_169609146ffe5972484b0957bd1b46d6=1582038564',\n 'Host':'www.xbiquge.la',\n 'Upgrade-Insecure-Requests':'1',\n 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'\n}#新笔趣阁\n'''\nreq_header={\n 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n 'Accept-Encoding':'gzip, deflate',\n 'Accept-Language':'zh-CN,zh;q=0.9',\n 'Connection': 'keep-alive',\n 'Cookie':'__guid=130189705.926614980802380000.1582362419987.4084; clickbids=34538; Hm_lvt_6dfe3c8f195b43b8e667a2a2e5936122=1582362452; Hm_lvt_c979821d0eeb958aa7201d31a6991f34=1582362421,1582362456; monitor_count=4; Hm_lpvt_6dfe3c8f195b43b8e667a2a2e5936122=1582362481; Hm_lpvt_c979821d0eeb958aa7201d31a6991f34=1582362482',\n 'Host':'www.biquge.info',\n 'Upgrade-Insecure-Requests':'1',\n 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'\n}#笔趣阁\n\n#iplist=['123.163.96.80:9999','58.247.127.145:53281','183.196.168.194:9000','123.139.56.238:9999','119.57.108.109:53281']\n\n#打开并返回整个网页\ndef url_open(url):\n '''\n proxy_support=urllib.request.ProxyHandler({'http':random.choice(iplist)})\n opener=urllib.request.build_opener(proxy_support)\n urllib.request.install_opener(opener)\n '''\n request = urllib.request.Request(url,headers=req_header)\n response = urllib.request.urlopen(request)\n content = gzip.decompress(response.read())\n data = content.decode('utf-8','ignore')\n\n return data\n\nclass novel_catch(object):\n\n def __init__(self,url):#init两边各有两个_\n self.url = url\n self.chapter_names = []#存放章节名称\n self.chapter_hrefs = []#存放章节链接\n\n #获取章节名称和章节URL\n def get_catalogue(self):\n response=url_open(self.url)\n soup = BeautifulSoup(response, \"html.parser\")\n #chapter_hrefs=re.findall('/\\d\\d/\\d\\d\\d\\d\\d/\\d\\d\\d\\d\\d\\d\\d\\.html',soup)\n a=soup.find('div',id='list')\n #print(a)\n for i in a.dl:\n if i.name=='dd':\n self.chapter_names.append(i.string)\n self.chapter_hrefs.append('http://www.biquge.info/34_34538/'+i.a['href'])\n self.novel_name=soup.find('div',id='info').h1.string\n \n #获取对应章节的内容\n def get_chapter_text(self,url):\n try:\n response=url_open(url) \n soup = BeautifulSoup(response, \"lxml\")\n a=soup.find('div',id='content')\n text=''\n for i in a:\n if i.name!='img' and i.name!='a' and i.name!='p' and i.name!='br' and i.name!='strong' and i.name!='h1' and i.name!='script' and i.name!='div' and i.name!='table' and i.name!='td' and i.name!='tr':\n text+=(i.string+'\\n\\n')\n \n return text\n except HTTPError as e:\n print(e.code)\n print(\"获取章节内容失败,1秒后重试!\")\n time.sleep(1)\n self.get_chapter_text(url)\n except URLError as e:\n print(e.reason)\n print(\"获取章节内容失败,1秒后重试!\")\n time.sleep(1)\n self.get_chapter_text(url)\n except Exception as e:\n print(e)\n print(\"获取章节内容失败,1秒后重试!\")\n time.sleep(1)\n self.get_chapter_text(url)\n \n #写入txt文档\n def writer(self,name,path,text1):\n with open(path,'a',encoding='utf-8') as f:\n f.write(name+'\\n') \n f.write(text1)\n f.write('\\n\\n')\n\n\n\n\n\n\n\n \nif __name__ == \"__main__\":\n novel_1=novel_catch('http://www.biquge.info/34_34538/')\n\n novel_1.get_catalogue()\n\n for i in range(len(novel_1.chapter_names)):\n name=novel_1.chapter_names[i]\n text=str(novel_1.get_chapter_text(novel_1.chapter_hrefs[i]))\n novel_1.writer(name,'F:\\学习资料\\程序设计\\python练习代码\\\\'+novel_1.novel_name+'.txt',text)\n print(name+'\\t下载完成',i)\n time.sleep(random.choice([0.3,1,1.5,2]))\n\n print('successful')\n #打开书的目录获取章节名字和链接地址\n #catalogue_catch()\n\n\n\n\n\n\n\n\n","repo_name":"Jasoncnnn/test","sub_path":"test/novel_catch1.py","file_name":"novel_catch1.py","file_ext":"py","file_size_in_byte":5032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"14013929490","text":"#!python\n\ndef scons():\n Import('env')\n\n libs = ['daos', 'daos_common', 'daos_tier', 'daos_hl', 'crt',\n 'mpi', 'uuid', 'cmocka', 'pmem']\n\n denv = env.Clone()\n\n denv.Append(CPPPATH = ['#/src/tests/'])\n test = denv.Program('daos_hl_test', Glob('*.c'), LIBS = libs)\n denv.Install('$PREFIX/bin/', test)\n\nif __name__ == 'SCons.Script':\n scons()\n","repo_name":"mchaarawi/daos_hl","sub_path":"src/tests/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":371,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"39995423679","text":"import wolframalpha\r\n#Enter your API Key\r\nclient = wolframalpha.Client(\"Your API Key from Wolframalpha\")\r\n\r\nimport wikipedia\r\n\r\nimport PySimpleGUI as ai\r\nimport pyttsx3\r\n#import PyPDF2\r\n#from tkinter.filedialog import *\r\n\r\n#window color scheme\r\nai.theme('DarkTeal')\t\r\n#window layout\r\nlayout = [ [ai.Text('J.A.S.S.I : what would you like me to do ?')],\r\n [ai.Text('A.I User'), ai.InputText()],\r\n [ai.Button('Search'), ai.Button('Exit')] ]\r\n\r\n\r\n#window execution\r\nwindow = ai.Window('J.A.S.S.I - Created by Rajveer Narang ', layout)\r\n# Event Loop to process \"events\" and get the \"values\" of the inputs\r\nwhile True:\r\n event, values = window.read()\r\n if event in (None, 'Cancel'):\r\n break\r\n res = client.query(values[0])\r\n wolfram_res = next(res.results).text\r\n wiki_res = wikipedia.summary(values[0], sentences=2)\r\n audioplayer = pyttsx3.init()\r\n audioplayer.say(wolfram_res+wiki_res)\r\n ai.PopupNonBlocking(\"Result from Wolfram :\"+wolfram_res, \"Result from Wikipedia: \"+wiki_res)\r\n audioplayer.runAndWait()\r\n print (values[0]) \r\n \r\n \r\nwindow.close()\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"RajveerNarang/Python-Mini-Projects","sub_path":"J.A.S.S.I_ai/Jassi_ai.py","file_name":"Jassi_ai.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"25469095431","text":"#import needed modules \nimport pandas as pd \nimport sqlite3\n\ndef get_db_connection():\n conn = sqlite3.connect('patients.db')\n conn.row_factory = sqlite3.Row\n return conn\n\n#getting the connection to database in line 12 \n#line 14 is using a sql command to grab\n#all the data from that table and giving it a var name of 'patientlistSql'\ndb = get_db_connection()\npatientListSql = db.execute('SELECT * FROM patient_table').fetchall()\npatientListSql\n\n# saves the data to a dataframe using pandas\ndf = pd.DataFrame(patientListSql)\ndf\n\n# renames the columns to the names given in the orginal file\ndf.columns = ['mrn', 'firstname', 'lastname', 'dob', 'gender', 'insurance', 'ssn', 'phone']\ndf","repo_name":"Shad47486/flask-with-db","sub_path":"connectdb.py","file_name":"connectdb.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"33490931051","text":"def fruitmand_maken(fruit):\n fruitmand = {}\n\n for i in range(len(fruit)):\n fruitmand[len(fruit[i])] = fruit[i]\n\n return fruitmand\n\n\ndef fruitmand_inpakken(fruitstukken):\n fruitmand = []\n\n while len(fruitstukken) != 0:\n fruitmand.append(fruitstukken[min(fruitstukken)])\n fruitstukken.pop(min(fruitstukken))\n\n return fruitmand\n","repo_name":"Dieterdemuynck/Informatica5","sub_path":"13 - Dictionaries/Fruitmand (met strik).py","file_name":"Fruitmand (met strik).py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"15065485538","text":"from data import question_data\nfrom question_model import Question\nfrom quiz_brain import QuizBrain\n\nquestion_bank = list()\n\nfor question in question_data:\n q_text = question[\"question\"]\n q_ans = question[\"correct_answer\"]\n new_q = Question(q_text,q_ans)\n question_bank.append(new_q)\n\nnew_quiz = QuizBrain(question_bank)\nwhile new_quiz.still_has_question():\n new_quiz.next_question()\n\nprint(\"You've completed the quiz.\")\nprint(f\"Your final score was: {new_quiz.score}/{len(new_quiz.question_list)}\")","repo_name":"iamlohit/100daysofPython","sub_path":"Day 17/quiz-game/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"40097070797","text":"#====================== BEGIN GPL LICENSE BLOCK ======================\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n#======================= END GPL LICENSE BLOCK ========================\n\n# \n\nimport bpy\n\nfrom ...base_generate import SubstitutionRig\n\nfrom .limb_rigs import BaseLimbRig\n\nfrom . import arm, leg, paw\n\n\nRIGS = { 'arm': arm.Rig, 'leg': leg.Rig, 'paw': paw.Rig }\n\n\nclass Rig(SubstitutionRig):\n def substitute(self):\n return [ self.instantiate_rig(RIGS[self.params.limb_type], self.base_bone) ]\n\n\ndef add_parameters(params):\n items = [\n ('arm', 'Arm', ''),\n ('leg', 'Leg', ''),\n ('paw', 'Paw', '')\n ]\n\n params.limb_type = bpy.props.EnumProperty(\n items = items,\n name = \"Limb Type\",\n default = 'arm'\n )\n\n BaseLimbRig.add_parameters(params)\n\n\ndef parameters_ui(layout, params):\n r = layout.row()\n r.prop(params, \"limb_type\")\n\n RIGS[params.limb_type].parameters_ui(layout, params)\n\n\ndef create_sample(obj):\n arm.create_sample(obj, limb=True)\n","repo_name":"MARUI-PlugIn/BlenderXR","sub_path":"blender/release/scripts/addons/rigify/rigs/limbs/super_limb.py","file_name":"super_limb.py","file_ext":"py","file_size_in_byte":1726,"program_lang":"python","lang":"en","doc_type":"code","stars":239,"dataset":"github-code","pt":"30"} +{"seq_id":"2499174665","text":"import torch\nimport torchvision.transforms as transforms\n\nDEVICE = \"cuda:1\" if torch.cuda.is_available() else \"cpu\"\nTRAIN_DIR = \"result/train/\"\nVAL_DIR = \"result/val/\"\nBATCH_SIZE = 4\nLEARNING_RATE = 2e-4\nLAMBDA_IDENTITY = 1\nLAMBDA_CYCLE = 10\nIMG_SIZE = 256\nNUM_WORKERS = 0\nEPOCHS = 400\nLOAD_MODEL = True\nSAVE_MODEL = True\nCHECKPOINT_GEN_xy = \"checkpoint/gen_xy.pth\"\nCHECKPOINT_GEN_yx = \"checkpoint/gen_yx.pth\"\nCHECKPOINT_CRITIC_x = \"checkpoint/critic_x.pth\"\nCHECKPOINT_CRITIC_y = \"checkpoint/critic_y.pth\"\n\ntransform = transforms.Compose([\n transforms.Resize((IMG_SIZE, IMG_SIZE)),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),\n])\n","repo_name":"hank891008/cyclegan","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"} +{"seq_id":"36807959381","text":"# -*- coding: utf-8 -*-\nfrom odoo import api, fields, models\n\nclass Partner(models.Model):\n _inherit = 'res.partner'\n\n instructor = fields.Boolean(default=False)\n session_ids = fields.Many2many('openacademy.session', string='Attended Sessions',\n readonly=True)\n\n level = fields.Integer(compute='_get_level', string=\"Level\", store=True)\n\n @api.depends('category_id', 'category_id.name')\n def _get_level(self):\n for partner in self:\n level = []\n for category in partner.category_id:\n if \"Teacher Level\" in category.name:\n level.append(int(category.name.split(' ')[-1]))\n partner.level = max(level) if level else 0\n","repo_name":"svs-odoo/learning","sub_path":"openacademy/models/partner.py","file_name":"partner.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"30"} +{"seq_id":"12566752690","text":"import json\nresult = 0\nnumberOfQuestion = 0\nname = input(\"Enter yout name: \")\nwith open('q.json') as json_file:\n data = json.load(json_file)\n for question in data:\n print(question+\": \")\n answer = input()\n if answer == data[question]:\n result = result + 1\n numberOfQuestion = numberOfQuestion + 1\n\noutputFile = open(\"result.txt\", \"w\")\noutputFile.write(name+\": \" +str(result)+\"/\" + str(numberOfQuestion))\noutputFile.close()\n","repo_name":"hanabouz/h1","sub_path":"Q3.py","file_name":"Q3.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"30"}
NameBioLast tweetDays since last tweetTwitter clientTweetsFollowersFriendsDays since joinedTweets per day
\n \n
\n {{ name }}
\n \n @{{ screen_name}}\n \n
{{ bio }}{{ latest_tweet_text }}{{ days_since_latest_tweet }}{{twitter_source}}{{ tweets_count }}{{ followers_count }}{{ friends_count }}{{ days_since_joined }}{{ tweets_per_day }}