diff --git "a/1453.jsonl" "b/1453.jsonl" new file mode 100644--- /dev/null +++ "b/1453.jsonl" @@ -0,0 +1,676 @@ +{"seq_id":"65255134","text":"# ---------------------------------------------------------------------\n# Brocade.CER-ADV.get_version\n# ---------------------------------------------------------------------\n# Copyright (C) 2007-2013 The NOC Project\n# See LICENSE for details\n# ---------------------------------------------------------------------\n\n# Python modules\nimport re\n\n# NOC modules\nfrom noc.core.script.base import BaseScript\nfrom noc.sa.interfaces.igetversion import IGetVersion\n\n\nclass Script(BaseScript):\n \"\"\"\n Brocade.CER-ADV.get_version\n \"\"\"\n\n name = \"Brocade.CER-ADV.get_version\"\n interface = IGetVersion\n rx_sw_ver = re.compile(\"IronWare\\\\s:\\\\sVersion\\\\s(?P\\\\S+)\", re.MULTILINE | re.DOTALL)\n rx_hw_ver = re.compile(\"System:\\\\sNetIron\\\\s(?P\\\\S+)\", re.MULTILINE | re.DOTALL)\n rx_snmp_ver = re.compile(\n \"Brocade\\\\sNetIron\\\\s(?P\\\\S+)\\\\,.*Version\\\\s+V(?P\\\\S+).+$\"\n )\n\n def execute(self):\n if self.has_snmp():\n try:\n v = self.snmp.get(\"1.3.6.1.2.1.1.1.0\")\n match = self.re_search(self.rx_snmp_ver, v)\n return {\n \"vendor\": \"Brocade\",\n \"platform\": match.group(\"platform\"),\n \"version\": match.group(\"version\"),\n }\n except self.snmp.TimeOutError:\n pass\n\n v = self.cli(\"show version\")\n match1 = self.re_search(self.rx_sw_ver, v)\n match2 = self.re_search(self.rx_hw_ver, v)\n return {\n \"vendor\": \"Brocade\",\n \"platform\": match2.group(\"version\"),\n \"version\": match1.group(\"version\"),\n }\n","sub_path":"sa/profiles/Brocade/CER-ADV/get_version.py","file_name":"get_version.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"352735469","text":"import time\nfrom urllib.request import urlopen\nfrom multiprocessing import Process\n\n\ndef get_urls_to_crawl():\n urls_list = list()\n urls_list.append('http://www.cnn.com/')\n urls_list.append('https://www.foxnews.com/')\n urls_list.append('https://www.bbc.com/')\n urls_list.append('https://www.cnbc.com')\n urls_list.append('https://www.dawn.com')\n return urls_list\n\n\ndef crawl_one_url(url):\n html = urlopen(url)\n txt = html.read()\n\n\nif __name__ == \"__main__\":\n urls_to_crawl = get_urls_to_crawl()\n start = time.time()\n\n processes = []\n for url in urls_to_crawl:\n processes.append(Process(target=crawl_one_url, args=(url,)))\n\n for process in processes:\n process.start()\n\n for process in processes:\n process.join()\n\n elapsed = time.time() - start\n print(f\"\\nURLs downloaded in {elapsed:.2f}s\")\n","sub_path":"asyncio_module/web_crawler_example_multiprocess.py","file_name":"web_crawler_example_multiprocess.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"226788604","text":"import math\n\nfrom rx import Observable\nfrom rx.subjects import Subject\n\n# import needed for to_backpressure operator\nimport rxbackpressure\n\n# time value samples recorded by some device\ntime_value_record = Subject()\n\n# separate time and value\ntime = time_value_record.map(lambda pair: pair[0])\nsignal = time_value_record.map(lambda pair: pair[1])\n\n# timebase synchronization\nsync1 = Subject()\nsync2 = Subject()\n\n# synchronize time samples to two timebases\ntime_sync2_bp = time.to_backpressure().zip(sync2.repeat_first(), lambda t, sync_time: t + sync_time)\ntime_sync1_bp = time.to_backpressure().zip(sync1.repeat_first(), lambda t, sync_time: t + sync_time)\n\n# pairing time value observables\ntime_sync1_bp.to_observable().zip(signal, lambda t, v: (t, v)).unsafe_subscribe()\ntime_sync2_bp.to_observable().zip(signal, lambda t, v: (t, v)).unsafe_subscribe(print, on_completed=lambda: print('completed'))\n\n# emulating hot observable\nObservable.range(0,100) \\\n .map(lambda v: (float(v)+3.2)/1000) \\\n .map(lambda t: (t, math.sin(t/0.05*2*math.pi))) \\\n .unsafe_subscribe(time_value_record)\nObservable.just(-3.2/1000).unsafe_subscribe(sync1)\nObservable.just(2/1000).unsafe_subscribe(sync2)\n","sub_path":"examples/timevaluepairs.py","file_name":"timevaluepairs.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"614232827","text":"\n# 문제 121\n입력저장 = input(\"영문 입력 : \")\nif 입력저장.islower(): #서문자이면 True 아니면 False\n print(입력저장.upper()) #대문자로 변환\nelse:\n print(입력저장.lower()) #소문자로 변환\n\n# 문제 122\n점수 = int(input(\"정수 입력 : \"))\nif 점수 >= 81:\n print(\"A 등급\")\nelif 점수 >= 61:\n print(\"B 등급\")\nelif 점�� >= 41:\n print(\"C 등급\")\nelif 점수 >= 21:\n print(\"D 등급\")\nelse:\n print(\"E 등급\")\n# 문제 123\n환율 = {\"달러\" : 1167, \"앤\" : 1.096, \"유로\" : 1268, \"위안\" : 171}\n금액저장 = input(\"금액과 통화면 입력 : \")\n금액 , 통화명 = 금액저장.split(\" \")\nprint(int(금액) * 환율[통화명], \"원\")\n# 문제 124\n숫자1 = int(input(\"숫자1 입력 : \"))\n숫자2 = int(input(\"숫자2 입력 : \"))\n숫자3 = int(input(\"숫자3 입력 : \"))\n\nprint(max(숫자1, 숫자2, 숫자3))\n# 문제 125\n핸드폰번호 = input(\"핸드폰번호 입력 : \")\n통신사번호 = 핸드폰번호.split(\"-\")[0]\nif 통신사번호 == \"010\":\n print(\"당신은 SKT 입니다\")\nelif 통신사번호 == \"016\":\n print(\"당신은 KT 입니다\")\nelif 통신사번호 == \"019\":\n print(\"당신은 LGU 입니다\")\nelse:\n print(\"알수 없습니다\")\n# 문제 126\n우편번호 = int(input(\"우편번호 입력 : \"))\n우편번호 = 우편번호[0:3]\nif 우편번호 in [\"010\", \"011\", \"012\"]:\n print(\"경북구\")\nelif 우편번호 in [\"014\", \"015\", \"016\"]:\n print(\"도봉구\")\nelse :\n print(\"노원구\")\n# 문제 127\n주민번호 = input(\"주민등록번호 : \")\n주민번호 = 주민번호.split(\"-\")[1]\nif 주민번호[0] == \"1\" or 주민번호[0] == \"3\":\n print(\"남자\")\nelse:\n print(\"여자\")\n# 문제 128\n주민번호 = input(\"주민등록번호 : \")\n뒷자리 = 주민번호.split(\"-\")[1]\nif 0 <= int(뒷자리[1:3]) <= 0:\n print(\"서울입니다\")\nelse:\n print(\"부산입니다\")\n# 문제 129\n주민번호 = input(\"주민등록번호 : \")\n계산1 = int(주민번호[0]) * 2 + \\\n int(주민번호[1]) * 3 + \\\n int(주민번호[2]) * 4 + \\\n int(주민번호[3]) * 5 + \\\n int(주민번호[4]) * 6 + \\\n int(주민번호[5]) * 7 + \\\n int(주민번호[7]) * 8 + \\\n int(주민번호[8]) * 9 + \\\n int(주민번호[9]) * 2 + \\\n int(주민번호[10]) * 3 + \\\n int(주민번호[11]) * 4 + \\\n int(주민번호[12]) * 5\n계산2 = 11 - (계산1%11)\n계산3 = str(계산2)\n\nif 주민번호[-1] == 계산3[-1]:\n print(\"유효한 주민등록번호 입니다\")\nelse:\n print(\"유효하지 않는 주민등록번호 입니다\")\n","sub_path":"문제300/문제 121~130.py","file_name":"문제 121~130.py","file_ext":"py","file_size_in_byte":2591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"31206942","text":"#!/usr/bin/python3\n# -*-:coding=utf-8-*-\n\nfrom __future__ import unicode_literals\n\nimport logging\nimport os\nimport re\nimport time\n\ntry:\n from urllib.parse import urlparse\nexcept:\n from urlparse import urlparse\n\nimport pdfkit\nimport requests\nfrom bs4 import BeautifulSoup\n\nhtml_template = \"\"\"\n\n\n\n \n\n\n{content}\n\n\n\"\"\"\n\nclass Crawler(object):\n name = None\n\n def __init__(self, name, url):\n self.name = name\n self.url = url\n self.domain = '{uri.scheme}://{uri.netloc}'.format(uri=urlparse(self.url))\n\n def crawl(self, url):\n print(url)\n response = requests.get(url)\n return response\n\n def parse_menu(self, response):\n raise NotImplementedError\n\n def parse_body(self, response):\n raise NotImplementedError\n\n def run(self):\n start = time.time()\n options = {\n 'page-size' : 'A4',\n 'margin-top': '0,75in',\n 'margin-right': '0.5in',\n 'margin-bottom': '0.75in',\n 'margin-left': '0.5in',\n 'encoding': 'utf-8',\n 'custom-header': [\n \t('Acce[pt-Encoding', 'gzip')\n ],\n 'outline-depth': 10,\n 'cookie' : [\n \t('cookie-name1', 'cookie-value1'),\n \t('cookie-name2', 'cookie-value2')\n ]\n }\n\n htmls = []\n for index, sub_url in enumerate(self.parse_menu(self.crawl(self.url))) :\n html = self.parse_body(self.crawl(sub_url))\n f_name = '.'.join([str(index), \"html\"])\n with open(f_name, 'wb') as f:\n f.write(html)\n htmls.append(f_name)\n pdfkit.from_file(htmls, self.name + '.pdf', options = options)\n for html in htmls:\n os.remove(html)\n total_time = time.time() - start\n print(u'耗时:%f 秒' % total_time)\n\nclass MyCrawler(Crawler):\n def parse_menu(self, response):\n soup = BeautifulSoup(response.content, 'html.parser')\n menu_tag = soup.find_all(class_ = \"uk-nav uk-nav-side\")[1]\n for li in menu_tag.find_all('li'):\n url = li.a.get('href')\n if not url.startswith(\"http\"):\n url = \"\".join([self.domain, url])\n yield url\n\n def parse_body(self, response):\n try:\n soup = BeautifulSoup(response.content, 'html.parser')\n body = soup.find_all(class_ = 'x-wiki-content')[0]\n\n title = soup.find('h4').get_text()\n center_tag = soup.new_tag(\"center\")\n title_tag = soup.new_tag(\"h1\")\n title_tag.string = title\n center_tag.insert(1, title_tag)\n body.insert(1, center_tag)\n\n html = str(body)\n pattern = \"(.+?)--\\d+.html'\n _TESTS = [\n {\n 'url': 'http://www.telemadrid.es/programas/telenoticias-fin-de-semana/cerca-lejos-nuevo-Klapisch-2-2169403070--20191020083747.html',\n 'info_dict': {\n 'id': '6096258464001',\n 'ext': 'mp4',\n 'title': ''Tan cerca, tan lejos', lo nuevo de Klapisch',\n 'description': 'md5:10ac0514bdbdeeea9de495ae0720c6ff',\n 'thumbnail': r're:^https?://images.telemadrid.es/2019/10/20/programas/telenoticias-fin-de-semana/cerca-lejos-nuevo-Klapisch_2169403070_7342970_1300x813.png$',\n 'timestamp': 1571596692,\n 'upload_date': '20191020'\n }\n },\n {\n 'url': 'http://www.telemadrid.es/programas/telenoticias-fin-de-semana/Doce-detenidos-altercados-registrados-Madrid-2-2169403042--20191020100828.html',\n 'info_dict': {\n 'id': '6096226698001',\n 'ext': 'mp4',\n 'title': 'Doce detenidos en los altercados registrados en el centro de Madrid',\n 'description': 'md5:77a37a10cfe8b8cd595d11bf762e90da',\n 'thumbnail': r're:^https?://images.telemadrid.es/2019/10/20/programas/telenoticias-fin-de-semana/Doce-detenidos-altercados-registrados-Madrid_2169403042_7342376_4000x2666.jpg$',\n 'timestamp': 1571573534,\n 'upload_date': '20191020'\n }\n },\n {\n 'url': 'http://www.telemadrid.es/programas/120-minutos/minutos-Parte-uno-2-2165803426--20191008033109.html',\n 'info_dict': {\n 'id': '6093135605001',\n 'ext': 'mp4',\n 'title': '120 minutos 08.10.2019 (Parte 1)',\n 'description': 'md5:eea20844c4aef07638b53d8f40fe8e23',\n 'thumbnail': r're:^https?://images.telemadrid.es/2019/10/08/programas/120-minutos/minutos-Parte-uno_2165803426_7312298_1920x1080.jpg$',\n 'timestamp': 1570541701,\n 'upload_date': '20191008'\n }\n }\n ]\n\n _VIDEO_BASE = 'http://c.brightcove.com/services/mobile/streaming/index/master.m3u8?videoId='\n\n def _real_extract(self, url):\n display_id = re.match(self._VALID_URL, url).groups()\n webpage = self._download_webpage(url, display_id)\n\n video_figure = self._search_regex(r']+class=\\\"media-video\\\"[^>]+itemtype=\\\"http://schema\\.org/VideoObject\\\"*>(.*?)', webpage, 'video_figure', flags=re.DOTALL)\n video_id = self._search_regex(r'', video_figure, 'video_id', flags=re.DOTALL)\n name = self._search_regex(r'', video_figure, 'name', flags=re.DOTALL)\n description = self._html_search_regex(r'', video_figure, 'description', flags=re.DOTALL)\n thumbnail = self._search_regex(r'', video_figure, 'thumbnail', flags=re.DOTALL)\n timestamp = self._search_regex(r'', video_figure, 'timestamp', flags=re.DOTALL)\n\n formats = self._extract_m3u8_formats(self._VIDEO_BASE + video_id, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)\n\n return {\n 'id': video_id,\n 'display_id': display_id,\n 'title': name,\n 'description': description,\n 'thumbnail': thumbnail,\n 'timestamp': int_or_none(timestamp),\n 'formats': formats\n }\n","sub_path":"youtube_dl/extractor/telemadrid.py","file_name":"telemadrid.py","file_ext":"py","file_size_in_byte":3850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"597400907","text":"#Greeter\n\ndef greet(name):\n #Takes in a name and output a greeting\n print(\"Hi %s\" % name)\n print(\"Nice to see you, %s\" % name)\n print(\"Have a nice day, %s\\n\" % name)\n\npeople = []\n\nfor num in range(0,3):\n name = input(\"Who do you wnant to greet? \")\n people.append(name)\n greet(name)","sub_path":"ex_6/greeter.py","file_name":"greeter.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"396208503","text":"#!/usr/bin/python\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\n\ntime = np.asarray([])\ntemperature = np.asarray([])\nmass_fraction = np.asarray([])\nCH2O = np.asarray([])\nCO = np.asarray([])\nHCN = np.asarray([])\nN2O = np.asarray([])\nNO = np.asarray([])\nNO2 = np.asarray([])\nH2O = np.asarray([])\nCO2 = np.asarray([])\n\nwith open('./output_TGA/mole_fractions_gas.txt','r') as File:\n lines = File.readlines()\n \n header = lines[0].split()\n \n iCH2O = header.index('CH2O')\n iCO = header.index('CO')\n iHCN = header.index('HCN')\n iN2O = header.index('N2O')\n iNO = header.index('NO')\n iNO2 = header.index('NO2')\n iH2O = header.index('H2O')\n iCO2 = header.index('CO2')\n \n new_lines = lines[1::3]\n #temp = lines[70::100]\n \n #new_lines.extend(temp)\n\n for line in new_lines:\n time = np.append(time,float(line.split()[0]))\n temperature = np.append(temperature,float(line.split()[1])-273.0)\n mass_fraction = np.append(mass_fraction,100.0*float(line.split()[2]))\n CH2O = np.append(CH2O,float(line.split()[iCH2O]))\n CO = np.append(CO,float(line.split()[iCO]))\n HCN = np.append(HCN,float(line.split()[iHCN]))\n N2O = np.append(N2O,float(line.split()[iN2O]))\n NO = np.append(NO,float(line.split()[iNO]))\n NO2 = np.append(NO2,float(line.split()[iNO2]))\n H2O = np.append(H2O,float(line.split()[iH2O]))\n CO2 = np.append(CO2,float(line.split()[iCO2]))\n\nHMXc = np.asarray([])\nTc = np.asarray([])\nwith open('./output_TGA/mass_fractions_liquid.txt','r') as File:\n lines = File.readlines()\n \n header = lines[0].split()\n \n iHMX = header.index('HMX')\n for line in lines[1:]:\n Tc = np.append(Tc,float(line.split()[1])-273.0)\n HMXc = np.append(HMXc,float(line.split()[iHMX])*100.0)\n\n#---------------------------------------------------------------------------------------------\nTexp = np.asarray([])\nm1 = np.asarray([])\nm2 = np.asarray([])\nm3 = np.asarray([])\nm4 = np.asarray([])\nm5 = np.asarray([])\nm6 = np.asarray([])\nm7 = np.asarray([])\nm8 = np.asarray([])\nm9 = np.asarray([])\n\ndsc1 = np.asarray([])\ndsc2 = np.asarray([])\ndsc3 = np.asarray([])\ndsc4 = np.asarray([])\ndsc5 = np.asarray([])\ndsc6 = np.asarray([])\ndsc7 = np.asarray([])\ndsc8 = np.asarray([])\ndsc9 = np.asarray([])\n\nstart = 1\nstep = 1\n\npath_mass_loss = './mass_loss_data'\n\nwith open(os.path.join(path_mass_loss,'HMX_5KPM_1198ug.txt'),'r') as File:\n lines = File.readlines()\n lines = lines[start::step]\n for line in lines:\n Texp = np.append(Texp,float(line.split()[0]))\n dsc1 = np.append(dsc1,float(line.split()[2]))\n m1 = np.append(m1,float(line.split()[3])-15.0)\n\nwith open(os.path.join(path_mass_loss,'HMX_5KPM_1180ug.txt'),'r') as File:\n lines = File.readlines()\n lines = lines[start::step]\n for line in lines:\n dsc2 = np.append(dsc2,float(line.split()[2]))\n m2 = np.append(m2,float(line.split()[3])-1.0)\n \nwith open(os.path.join(path_mass_loss,'HMX_5KPM_1104ug.txt'),'r') as File:\n lines = File.readlines()\n lines = lines[start::step]\n for line in lines:\n dsc3 = np.append(dsc3,float(line.split()[2]))\n m3 = np.append(m3,float(line.split()[3])-8.0)\n\nwith open(os.path.join(path_mass_loss,'HMX_10KPM_1050ug.txt'),'r') as File:\n lines = File.readlines()\n lines = lines[start::step]\n for line in lines:\n dsc4 = np.append(dsc4,float(line.split()[2])-2.5)\n m4 = np.append(m4,float(line.split()[3]))\n\nwith open(os.path.join(path_mass_loss,'HMX_10KPM_1024ug.txt'),'r') as File:\n lines = File.readlines()\n lines = lines[start::step]\n for line in lines:\n dsc5 = np.append(dsc5,float(line.split()[2])-2.2)\n m5 = np.append(m5,float(line.split()[3])-7.0)\n \nwith open(os.path.join(path_mass_loss,'HMX_10KPM_1094ug.txt'),'r') as File:\n lines = File.readlines()\n lines = lines[start::step]\n for line in lines:\n dsc6 = np.append(dsc6,float(line.split()[2])-2.1)\n m6 = np.append(m6,float(line.split()[3])-6.0)\n \n#with open(os.path.join(path_mass_loss,'HMX_15KPM_1176ug.txt'),'r') as File:\n# lines = File.readlines()\n# lines = lines[start::step]\n# for line in lines:\n# dsc7 = np.append(dsc7,float(line.split()[2]))\n# m7 = np.append(m7,float(line.split()[3])+3.0)\n\nwith open(os.path.join(path_mass_loss,'HMX_15KPM_1158ug.txt'),'r') as File:\n lines = File.readlines()\n lines = lines[start::step]\n for line in lines:\n dsc8 = np.append(dsc8,float(line.split()[2])-7.2)\n m8 = np.append(m8,float(line.split()[3])+1.0)\n \nwith open(os.path.join(path_mass_loss,'HMX_15KPM_1142ug.txt'),'r') as File:\n lines = File.readlines()\n lines = lines[start::step]\n for line in lines:\n dsc9 = np.append(dsc9,float(line.split()[2])-7.2)\n m9 = np.append(m9,float(line.split()[3])+3.0)\n\nm5_average = np.mean([m1,m2,m3],axis=0)\nm10_average = np.mean([m4,m5,m6],axis=0)\nm15_average = np.mean([m8,m9],axis=0)\n\ndsc5_average = np.mean([dsc1,dsc2,dsc3],axis=0)\ndsc10_average = np.mean([dsc4,dsc5,dsc6],axis=0)\ndsc15_average = np.mean([dsc8,dsc9],axis=0)\n#---------------------------------------------------------------------------------------------\n\nT_start = 260.0\nT_end = 300.0\n\nfig, ax1 = plt.subplots()\nax1.plot(temperature,100.0*CH2O,label='CH2O',marker=\">\")\nax1.plot(temperature,100.0*CO,label='CO',marker=\"p\")\nax1.plot(temperature,100.0*HCN,label='HCN',marker=\"<\")\nax1.plot(temperature,100.0*N2O,label='N2O',marker=\"D\")\nax1.plot(temperature,100.0*NO,label='NO',marker=\"^\")\nax1.plot(temperature,100.0*NO2,label='NO2',marker=\"o\")\nax1.plot(temperature,100.0*H2O,label='H2O',marker=\"s\")\nax1.plot(temperature,100.0*CO2,label='CO2',marker=\"d\")\n\nax1.ticklabel_format(style='sci',axis='y',scilimits=(-1,1))\nax1.set_xlim(T_start,T_end)\nax1.set_xlabel('Temperature($^o$C)')\nax1.set_ylabel('Mole fraction (%)')\nax1.legend(loc='upper center',bbox_to_anchor=(0.5,1.15),ncol=4)\n\n#ax2 = ax1.twinx()\n#ax2.plot(temperature,mass_fraction,label='TGA',color='black')\n\n#ax2.plot(Texp,m5_average,label='TGA',marker=\"*\",color='black')\n#ax2.plot(Texp,m10_average,label='TGA',marker=\"*\",color='black')\n#ax2.plot(Texp[::4],m15_average[::4],label='TGA',marker='*',color='black')\n\n#ax2.plot(Tc,HMXc,color='magenta')\n#ax2.legend()\n#ax2.set_ylabel('Condensed-phase mass (%)')\nplt.savefig('./output_TGA/Gases.pdf')\n\nfig, ax1 = plt.subplots()\nax1.plot(time,temperature)\nax1.set_ylabel('Temperature($^o$C)')\nax1.set_xlabel('time (s)')\nplt.savefig('./output_TGA/Temperature.pdf')\n","sub_path":"plots_TGA.py","file_name":"plots_TGA.py","file_ext":"py","file_size_in_byte":6558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"343918637","text":"class Solution:\n def firstUniqChar(self, s: str) -> int:\n if not s: return -1\n dic ={}\n for char in s:\n if char in dic:\n dic[char] += 1\n else:\n dic[char] = 1\n for key in dic:\n if dic[key] == 1: return s.index(key)\n return -1\n","sub_path":"Week_06/387. 字符串中的第一个唯一字符.py","file_name":"387. 字符串中的第一个唯一字符.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"29916402","text":"from OpenGL.GL import *\nfrom OpenGL.GLUT import *\nfrom OpenGL.GLU import *\nimport math\nwidth = 600\nheight = 600\nlistPoint = []\n\n\ndef put4pixel(xc, yc, x, y):\n glVertex3i(x+xc, y+yc, 0)\n glVertex3i(x+xc, -y+yc, 0)\n glVertex3i(xc-x, yc-y, 0)\n glVertex3i(xc-x, yc+y, 0)\n\n\ndef ElipMidPoint(xc, yc, a, b):\n a2, b2 = a*a, b*b\n x, y = 0, b\n p = b2-a2*b+(1/4)*a2\n x0 = (int)(a2/math.sqrt(a2+b2))\n y0 = (int)(b2/math.sqrt(a2+b2))\n glBegin(GL_POINTS)\n while x <= x0:\n put4pixel(xc, yc, x, y)\n if p < 0:\n p += (2*x+3)*b2\n else:\n p += (2*x+3)*b2-2*a2*(y-1)\n y -= 1\n x += 1\n\n x, y = a, 0\n p = a2-a*b2+(1/4)*b2\n while(y <= y0):\n put4pixel(xc, yc, x, y)\n if p < 0:\n p += a2*(2*y+3)\n else:\n p += (2*y+3)*a2-2*b2*(x-1)\n x -= 1\n y += 1\n glEnd()\n\n\ndef myDisplay():\n glClear(GL_COLOR_BUFFER_BIT)\n glColor3f(1.0, 0.0, 0.0)\n if len(listPoint) == 3:\n x1, y1 = listPoint[0][0], listPoint[0][1]\n x2, y2 = listPoint[1][0], listPoint[1][1]\n x3, y3 = listPoint[2][0], listPoint[2][1]\n a = math.sqrt((x2-x1)**2+(y2-y1)**2)\n b = math.sqrt((x3-x1)**2+(y3-y1)**2)\n ElipMidPoint(listPoint[0][0], listPoint[0][1], (int)(a), (int)(b))\n glFlush()\n\n\ndef MouseEventHandler(button, state, x, y):\n if button == GLUT_LEFT_BUTTON and state == GLUT_UP:\n if len(listPoint) == 3:\n listPoint.clear()\n listPoint.append([(int)(x-width/2), (int)(height/2-y)])\n print(listPoint)\n glutPostRedisplay()\n\n\nif __name__ == \"__main__\":\n glutInit()\n glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB)\n glutInitWindowSize(width, height)\n glutInitWindowPosition(10, 10)\n glutCreateWindow(\"Lab1-ElipBres\")\n gluOrtho2D(-width/2, height/2, -height/2, width/2)\n glutDisplayFunc(myDisplay)\n glutMouseFunc(MouseEventHandler)\n glutMainLoop()\n","sub_path":"lab1_le_van_tan_19it049/ElipMidPoint.py","file_name":"ElipMidPoint.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"334599157","text":"#coding=utf-8\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf8')\n# Copyright 2008 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport re\nimport wsgiref.handlers\nfrom google.appengine.api import xmpp\nfrom google.appengine.api.labs import taskqueue\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp import xmpp_handlers\nfrom django.utils import simplejson\nfrom models import Channel\nfrom models import Log\nfrom models import Person\nfrom models import Room\n\n\nclass XmppController(xmpp_handlers.CommandHandler):\n \"\"\"Handler class for all XMPP activity.\"\"\"\n\n _CHANNEL_SIZE_LIMIT = 100 # Max people in a channel.\n _LIST_LIMIT = 20 # Max channels to show for /list command.\n _NAME_LIMIT = 20 # Max names to show for /name command.\n\n def Broadcast(self, channel, message, system=False, exclude_self=True):\n \"\"\"Queues a broadcast message.\n\n Args:\n channel: The target channel.\n message: The message to broadcast.\n system: Whether this is a system message (i.e. not a chat).\n exclude_self: Whether to exclude self.person from the broadcast.\n \"\"\"\n if system:\n lines = ['* ' + l for l in message.split('\\n')]\n message = '\\n'.join(lines)\n params = {\n 'channel': channel.name,\n 'message': message,\n }\n if exclude_self:\n params['skip'] = self.person.jid()\n taskqueue.Task(url='/task/broadcast', params=params).add('chats')\n self.Log(channel, message)\n\n def Log(self, channel, body, person=None, system=False):\n \"\"\"Log a message.\n\n Args:\n channel: The channel object the message was sent to.\n body: The body of the message.\n person: The person who sent the message. Defaults to self.person.\n system: Whether this is a system log (i.e. not a chat).\n\n You should not use person and system at the same time.\n \"\"\"\n if person and system:\n raise RuntimeError('You can\\'t use person and system here')\n if not system:\n if not person: person = self.person\n log = Log(channel=channel.name,\n user=person.user.email(),\n body=body)\n else:\n log = Log(channel=channel.name,\n system=True,\n body=body)\n log.put()\n\n def help_command(self, msg):\n channel_rx = '/#' + Channel.CHANNEL_NAME_REGEX + '/'\n lines = [\n '* 支援的命令如下:',\n '* /help * 取得本說明文件',\n '* /newroom * 建立新房間物件',\n '* /join # * 加入頻道,若無該頻道則創建一個新的',\n '* /look [who] * 看週遭或是人',\n '* /gossip YOUR MESSAGE * 送訊息到頻道',\n '* /nickname [NICKNAME] * 取得或是設定暱稱',\n '* /leave * 離開頻道',\n '* /list * 列出所有頻道名稱',\n '* /who [#] * 看同頻道內有誰,有點像 /look',\n '* /me * 顯示我的資訊',\n '* ',\n ('* 頻道名稱大抵是用英文,語法要符合 %s; 別忘了前面要加 #' %\n channel_rx),\n ]\n msg.reply(u'\\n'.join(lines))\n\n def newroom_command(self, msg):\n json_decoder = simplejson.decoder.JSONDecoder()\n msg.reply(u'%s 創建了新房間 %s' % msg.arg)\n #room_json = json_decoder.decode(msg.arg)\n #msg.reply(room_json)\n\n def look_command(self, msg):\n channel = self.person.channel\n if not channel:\n msg.reply(u'* 您應該先進頻道內再用此一語法查詢.')\n return\n if not msg.arg:\n q = Person.all().filter('channel =', channel)\n else:\n q = Person.all().filter('channel =', channel).filter('name =', msg.arg)\n people = q.fetch(self._NAME_LIMIT + 1)\n lines = []\n for p in people:\n lines.append(u'*** %s email 是 %s' % (p.name, p.user.email()))\n msg.reply(u'\\n'.join(lines))\n\n def join_command(self, msg):\n m = re.match(r'^#(?P' + Channel.CHANNEL_NAME_REGEX + ')$',\n msg.arg)\n if not m:\n msg.reply(u'* /join 語法錯誤')\n return\n name = m.group('channel')\n if self.person.channel and (self.person.channel.name == name):\n msg.reply(u'* 你已經在頻道 #%s 中!' % name)\n return\n\n # Leave the existing channel, and tell them about it.\n if self.person.channel:\n old = self.person.channel\n message = '%s has left %s' % (self.person, old)\n self.Broadcast(old, message, system=True)\n self.Log(old, message, system=True)\n self.person.channel = None\n taskqueue.Task(url='/task/update-channel-stats',\n params={'channel': old.name}).add('stats')\n\n channel = Channel.ChannelByName(name, create=True)\n if channel.num_members >= self._CHANNEL_SIZE_LIMIT:\n msg.reply(u'* 抱歉,頻道 %s 內已經遠到容量上限(%d/%d)人' %\n (channel, channel.num_members, self._CHANNEL_SIZE_LIMIT))\n return\n self.person.channel = channel\n self.person.put()\n msg.reply(u'* 你已經加入 %s' % channel)\n message = (u'%s 已經加入 %s' % (self.person, channel))\n self.Broadcast(channel, message, system=True)\n self.Log(channel, message, system=True)\n taskqueue.Task(url='/task/update-channel-stats',\n params={'channel': channel.name}).add('stats')\n\n def leave_command(self, msg):\n if not self.person.channel:\n msg.reply(u'* 你並不在頻道內!')\n else:\n message = (u'%s 已經離開 %s' % (self.person, self.person.channel))\n self.Broadcast(self.person.channel, message, system=True)\n self.Log(self.person.channel, message, system=True)\n\n name = self.person.channel.name\n self.person.channel = None\n self.person.put()\n msg.reply(u'* 你已經離開 #%s' % name)\n taskqueue.Task(url='/task/update-channel-stats',\n params={'channel': name}).add('stats')\n\n def list_command(self, msg):\n \"\"\"Handle /list commands.\"\"\"\n lines = []\n q = Channel.all().order('-num_members').filter('num_members >', 0)\n channels = q.fetch(self._LIST_LIMIT + 1)\n if not len(channels):\n msg.reply(u'* 沒有任何頻道!')\n return\n if len(channels) <= self._LIST_LIMIT:\n # Show all, sorted by channel name.\n channels.sort(key=lambda c: c.name)\n lines.append('* 所有頻道清單如下:')\n else:\n # Show the top N channels, sorted by num_members.\n channels.pop()\n lines.append('* 頻道數超過 %d; 底下是最受歡迎的清單:' %\n self._LIST_LIMIT)\n for c in channels:\n if c.num_members == 1:\n count = '1 個人'\n else:\n count = '%d 個人' % c.num_members\n s = '* - %s (%s)' % (c, count)\n lines.append(s)\n msg.reply(u'\\n'.join(lines))\n\n def who_command(self, msg):\n m = re.match(r'^(#(?P' + Channel.CHANNEL_NAME_REGEX + '))?$',\n msg.arg)\n if not m:\n msg.reply(u'* /who 後面的頻道名稱似乎有誤')\n return\n if m.group('channel'):\n channel = Channel.ChannelByName(m.group('channel'), create=False)\n if not channel:\n msg.reply(u'* 沒有您要查的頻道: #%s' % m.group('channel'))\n return\n else:\n channel = self.person.channel\n if not channel:\n msg.reply(u'* 您應該先進頻道內再用此一語法查詢.')\n return\n q = Person.all().filter('channel =', channel)\n people = q.fetch(self._NAME_LIMIT + 1)\n if len(people) <= self._NAME_LIMIT:\n people = people[0:self._NAME_LIMIT]\n names = sorted([p.name for p in people])\n msg.reply(u'* 在 %s 頻道內的人有: %s' % (channel, ', '.join(names)))\n else:\n msg.reply(u'* 在頻道 %s 的人數超過 %d' % (channel, self._NAME_LIMIT))\n\n def me_command(self, msg):\n msg.reply(u'*** 您的暱稱是 %s, email 是 %s' % (self.person.name, self.person.user.email()))\n\n def nickname_command(self, msg):\n if msg.arg:\n self.person.setName(msg.arg)\n msg.reply(u'* 您設定新暱稱為 %s' % msg.arg)\n else:\n msg.reply(u'* 您的暱稱是 %s' % self.person.name)\n\n def gossip_command(self, msg):\n # gossip to your channel, but only if you're in a channel.\n channel = self.person.channel\n if channel:\n self.Broadcast(channel, u'%s gossip: %s' % (self.person, msg.arg))\n else:\n msg.reply(u'* 您需要先加入(/join)頻道才能送訊息.')\n\n def text_message(self, msg):\n \"\"\"Handle plain messages.\"\"\"\n # Chat, but only if you're in a channel.\n channel = self.person.channel\n if channel:\n self.Broadcast(channel, u'%s 說: %s' % (self.person.name, msg.body))\n else:\n msg.reply(u'* 你必須在某頻道內說話才有意義.')\n\n def message_received(self, msg):\n \"\"\"Handle all messages; overrides CommandHandlerMixin.\"\"\"\n logging.debug('%s sent \"%s\"', msg.sender, msg.body)\n\n match = re.match(r'^([^/]+)(/.*)?$', msg.sender)\n if not match:\n msg.reply(u'* 您正在用奇怪的帳號!')\n return\n self.person = Person.PersonByEmail(match.group(1))\n if not self.person:\n msg.reply(u'* 抱歉,您是誰?')\n return\n\n super(XmppController, self).message_received(msg)\n\n\ndef main():\n app = webapp.WSGIApplication([\n ('/_ah/xmpp/message/chat/', XmppController),\n ], debug=True)\n wsgiref.handlers.CGIHandler().run(app)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"controllers/xmpp.py","file_name":"xmpp.py","file_ext":"py","file_size_in_byte":9855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"192740277","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# This file is part of Python Challenge Solutions\n# https://github.com/scorphus/PythonChallengeSolutions\n\n# Licensed under the BSD-3-Clause license:\n# https://opensource.org/licenses/BSD-3-Clause\n# Copyright (c) 2018, Pablo S. Blum de Aguiar \n\n# http://www.pythonchallenge.com/pc/return/mozart.html\n\nfrom PIL import Image # pip install pillow\nfrom base64 import encodebytes\nfrom urllib.request import Request, urlopen\n\nurl = 'http://www.pythonchallenge.com/pc/return/mozart.gif'\nauth = encodebytes(b'huge:file').decode().rstrip()\nheaders = {'Authorization': f'Basic {auth}'}\n\nimage = Image.open(urlopen(Request(url=url, headers=headers))).convert('RGB')\nnew_image = Image.new(image.mode, (2 * image.width, image.height))\n\nfor y in range(image.height):\n X = iter(range(image.width))\n row = list()\n for x in X:\n row.append(image.getpixel((x, y)))\n if row[-1][0] == row[-1][2] == 255 and row[-1][1] == 0:\n threshold = image.width - x\n break\n for x, pixel in enumerate(row):\n new_image.putpixel((threshold + x, y), pixel)\n for x in X:\n new_image.putpixel((threshold + x, y), image.getpixel((x, y)))\n\nnew_image.save('16-mozart.png', 'PNG')\nprint('Open 16-mozart.png')\n","sub_path":"16-mozart.py","file_name":"16-mozart.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"17218757","text":"import unittest\nfrom tempoAutomationTest import parse_bom_line\n\nclass TestMPN(unittest.TestCase):\n\n def test_format_1(self):\n bom_line = \"TSR-1002:Panasonic:A1,D2\"\n self.assertEqual(parse_bom_line(bom_line, {\"MPN\":'TSR-1002','Manufacturer':'Panasonic','ReferenceDesignators':['A1','D2']}))\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"tempo/tempoTest.py","file_name":"tempoTest.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"516130954","text":"__author__ = 'Joe Linn'\n\nimport pylastica.index\n\n\nclass Percolator(object):\n\n def __init__(self, index):\n \"\"\"\n\n @param index:\n @type index: pylastica.index.Index\n \"\"\"\n assert isinstance(index, pylastica.index.Index), \"index must be an instance of Index: %r\" % index\n self._index = index\n\n def register_query(self, name, query):\n \"\"\"\n Register a percolator query\n @param name: query name\n @type name: str\n @param query:\n @type query: str or pylastica.query.Query or pylastica.query.AbstractQuery\n @return:\n @rtype: pylastica.response.Response\n \"\"\"\n path = '%s/.percolator/%s' % (self._index.name, name)\n query = pylastica.query.Query.create(query)\n return self._index.client.request(path, pylastica.request.Request.PUT, query.to_dict())\n\n def unregister_query(self, name):\n \"\"\"\n Remove a percolator query\n @param name: name of the query\n @type name: str\n @return:\n @rtype: pylastica.response.Response\n \"\"\"\n path = '_percolator/%s/%s' % (self._index.name, name)\n return self._index.client.request(path, pylastica.request.Request.DELETE)\n\n def match_doc(self, doc, query=None):\n \"\"\"\n Match a document to percolator queries\n @param doc:\n @type doc: pylastica.document.Document\n @param query:\n @type query: str or pylastica.query.Query or pylastica.query.AbstractQuery\n @return:\n @rtype: pylastica.response.Response\n \"\"\"\n path = \"%s/type/_percolate\" % self._index.name\n data = {'doc': doc.data}\n if query is not None:\n query = pylastica.query.Query.create(query)\n data['query'] = query.query\n response = self._index.client.request(path, data=data)\n data = response.data\n return data['matches'] if 'matches' in data else []\n\n @property\n def index(self):\n \"\"\"\n\n @return:\n @rtype: pylastica.index.Index\n \"\"\"\n return self._index\n","sub_path":"pylastica/percolator.py","file_name":"percolator.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"339747495","text":"import tensorflow as tf\nimport numpy as npy\nimport pickle\nimport datetime\n\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation, Flatten, Reshape\nfrom keras.layers import Conv2D, Conv2DTranspose, UpSampling2D,MaxPooling2D\nfrom keras.layers import LeakyReLU, Dropout\nfrom keras.layers import BatchNormalization\nfrom keras.optimizers import Adam, RMSprop, SGD\n\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nfrom PIL import Image\n\nclass DCGAN(object):\n\n def __init__(self,img_rows=32, img_cols=32, img_chanl=3):\n self.image_rows = img_rows\n self.image_columns = img_cols\n self.image_channels = img_chanl\n self.Discriminator = None\n self.Generator = None\n self.Discriminator_Model = None\n self.Adversarial_Model = None\n self.Compressor_Model = None\n\n def generator(self):\n if self.Generator:\n return self.Generator\n self.Generator = Sequential()\n dropout = 0.4\n dimen = 8\n depth = 96\n self.Generator.add(Dense(dimen*dimen*depth,input_dim=3072))\n self.Generator.add(LeakyReLU(alpha=0.05))\n # self.Generator.add(Activation('relu'))\n self.Generator.add(Reshape((dimen, dimen, depth)))\n self.Generator.add(Dropout(dropout))\n\n self.Generator.add(UpSampling2D())\n self.Generator.add(Conv2DTranspose(int(depth/2), 5, padding='same'))\n self.Generator.add(LeakyReLU(alpha=0.05))\n # self.Generator.add(Activation('relu'))\n\n self.Generator.add(UpSampling2D())\n self.Generator.add(Conv2DTranspose(int(depth/4), 5, padding='same'))\n self.Generator.add(LeakyReLU(alpha=0.05))\n # self.Generator.add(Activation('relu'))\n\n self.Generator.add(Conv2DTranspose(int(depth/8), 5, padding='same'))\n self.Generator.add(LeakyReLU(alpha=0.05))\n \n self.Generator.add(Conv2DTranspose(int(depth/16), 5, padding='same'))\n self.Generator.add(LeakyReLU(alpha=0.05))\n # self.Generator.add(Activation('relu'))\n\n self.Generator.add(Conv2DTranspose(int(depth/32), 5, padding='same'))\n# self.Generator.add(LeakyReLU(alpha=0.05))\n \n self.Generator.add(Activation('sigmoid'))\n print(\"Generator Summary\")\n self.Generator.summary()\n return self.Generator\n\n\n def discriminator(self):\n if self.Discriminator:\n return self.Discriminator\n self.Discriminator = Sequential()\n depth = 64\n dropout = 0.4\n input_shape = (self.image_rows, self.image_columns, self.image_channels)\n\n self.Discriminator.add(Conv2D(6,5,strides=(2,2),padding='same',input_shape=input_shape))\n self.Discriminator.add(LeakyReLU(alpha=0.05))\n self.Discriminator.add(MaxPooling2D(pool_size=(2,2),strides=(2,2)))\n self.Discriminator.add(Dropout(0.25))\n self.Discriminator.add(Conv2D(6,5,strides=(2,2),padding='same'))\n self.Discriminator.add(LeakyReLU(alpha=0.05))\n self.Discriminator.add(MaxPooling2D(pool_size=(2,2),strides=(2,2)))\n self.Discriminator.add(Flatten())\n self.Discriminator.add(Dense(120))\n self.Discriminator.add(LeakyReLU(alpha=0.05))\n self.Discriminator.add(Dense(84))\n self.Discriminator.add(LeakyReLU(alpha=0.05))\n self.Discriminator.add(Dense(1))\n self.Discriminator.add(Activation('sigmoid'))\n print(\"Discriminator Summary\")\n self.Discriminator.summary()\n return self.Discriminator\n# \n\n def discriminator_model(self):\n if self.Discriminator_Model:\n return self.Discriminator_Model\n optimizer = RMSprop(lr=0.0002, decay=6e-8) \n self.Discriminator_Model = Sequential()\n self.Discriminator_Model.add(self.discriminator())\n self.Discriminator_Model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])\n return self.Discriminator_Model\n\n def adversarial_model(self):\n if self.Adversarial_Model:\n return self.Adversarial_Model\n optimizer = RMSprop(lr=0.0001, decay=3e-8) \n self.Adversarial_Model = Sequential()\n self.Adversarial_Model.add(self.generator())\n self.Adversarial_Model.add(self.discriminator())\n self.Adversarial_Model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])\n return self.Adversarial_Model\n\n\nclass CIFAR(object):\n\n def __init__(self):\n self.img_rows = 32\n self.img_cols = 32\n self.img_chanl = 3\n\n# self.x_train = input_data.read_data_sets(\"mnist\", one_hot=True).train.images\n# self.x_train = self.x_train.reshape(-1, self.img_rows, self.img_cols, 1).astype(npy.float32)\n airfile = open('./Automobile/automobile_train.pickle', 'rb') \n self.x_train = pickle.load(airfile)\n airfile.close()\n self.x_train = self.x_train.astype(npy.float32)\n\n self.DCGAN = DCGAN()\n self.generator = self.DCGAN.generator()\n self.discriminator = self.DCGAN.discriminator_model()\n self.adversary = self.DCGAN.adversarial_model()\n\n def train(self, train_steps=5000, batch_size=256):\n\n # noise_input = npy.random.uniform(-1,1,size=[16, 784])\n for i in range(train_steps):\n images_train = self.x_train[npy.random.randint(0,self.x_train.shape[0], size=batch_size), :, :, :]\n# print(\"Images train\",npy.shape(images_train))\n if(i==0):\n print(npy.shape(images_train))\n img = images_train[0]/255\n# image = npy.reshape(images_train[0], [self.img_rows, self.img_cols,self.img_chanl])\n# print(images_train[0])\n plt.imshow(img)\n plt.show()\n noise = npy.random.uniform(-1.0, 1.0, size=[batch_size, 3072]) #changed\n images_gen = self.generator.predict(noise)\n# print(\"Images Gen\",npy.shape(images_gen))\n x = npy.concatenate((images_train, images_gen))\n y = npy.ones([2*batch_size, 1])\n y[batch_size:, :] = 0\n d_loss = self.discriminator.train_on_batch(x,y)\n\n y = npy.ones([batch_size, 1])\n noise = npy.random.uniform(-1.0, 1.0, size=[batch_size, 3072]) #changed\n a_loss = self.adversary.train_on_batch(noise, y)\n\n log_mesg = \"%d: [D loss: %f, acc: %f]\" % (i, d_loss[0], d_loss[1])\n log_mesg = \"%s [A loss: %f, acc: %f]\" % (log_mesg, a_loss[0], a_loss[1])\n print(log_mesg)\n\n if (i%100==0):\n noisy = npy.random.uniform(-1.0, 1.0, size=[1, 3072]) #changed\n images = self.generator.predict(noisy)\n image = npy.reshape(images, [self.img_rows, self.img_cols,self.img_chanl])\n plt.imshow(image)\n plt.axis('off')\n plt.imsave('/content/drive/My Drive/smai_cifar/autos_gen_cifar.png',image)\n plt.tight_layout()\n plt.show()\n self.generator.save_weights('/content/drive/My Drive/smai_cifar/autos_generator_weights.h5')\n self.discriminator.save_weights('/content/drive/My Drive/smai_cifar/autos_discriminator_weights.h5')\n print('Weights last saved at :')\n print(datetime.datetime.now())\n \n \n \n \n #Saving weights of model\n self.generator.save_weights('/content/drive/My Drive/smai_cifar/autos_generator_weights.h5')\n self.discriminator.save_weights('/content/drive/My Drive/smai_cifar/autos_discriminator_weights.h5')\n\n\nif __name__ == \"__main__\":\n print('Program started at:')\n print(datetime.datetime.now())\n cifar_dcgan = CIFAR()\n cifar_dcgan.train(train_steps=30001, batch_size=256)\n print('Program ended at :')\n print(datetime.datetime.now())","sub_path":"Automobile_model.py","file_name":"Automobile_model.py","file_ext":"py","file_size_in_byte":7950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"300548269","text":"import sys\nimport time\nimport logging\n\nfrom server import repository, scrape\n\ndef _parse_sysargv():\n \"\"\"Naively parse command line args into dict.\"\"\"\n return dict(arg.split('=') for arg in sys.argv[1:])\n\ndef _config_logging(opts):\n lvl = getattr(logging, opts.get('--loglevel', 'warn').upper()) \n filename = opts.get('--logfile', 'scrape.log')\n logging.basicConfig(filename=filename, level=lvl)\n\n__opts = _parse_sysargv()\n_config_logging(__opts)\n\n\ndef full_scrape():\n \"\"\"Full scrape of top currencies and their historical prices.\"\"\"\n logging.info('============ Starting full scrape ============')\n scrape.scrape_top_currency_prices()\n time.sleep(5)\n # Now for all the fetched currencies, get their historical prices\n for currency in repository.get_all_currency_ids():\n logging.info('Fetching historical prices for: %s', currency['symbol'])\n scrape.scrape_historical_prices(currency)\n time.sleep(5)\n logging.info('============ Full scrape complete! ============')\n\ndef daily_scrape():\n \"\"\"Partial scrape of top currencies and their most recent prices.\"\"\"\n logging.info('============ Starting daily scrape ============')\n scrape.scrape_top_currency_prices()\n logging.info('============ Daily scrape complete! ============')\n\n\nif __name__ == '__main__':\n scrape_type = __opts.get('--scrape', 'daily') \n if scrape_type == 'full':\n full_scrape()\n else:\n daily_scrape()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"7405854","text":"#\n# ICRAR - International Centre for Radio Astronomy Research\n# (c) UWA - The University of Western Australia, 2014\n# Copyright by UWA (in the framework of the ICRAR)\n# All rights reserved\n#\n# This library is free software; you can redistribute it and/or\n# modify it under the terms of the GNU Lesser General Public\n# License as published by the Free Software Foundation; either\n# version 2.1 of the License, or (at your option) any later version.\n#\n# This library is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this library; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston,\n# MA 02111-1307 USA\n#\n\"\"\"\nModule containing the NodeManager, which directly manages DROP instances, and\nthus represents the bottom of the DROP management hierarchy.\n\"\"\"\n\nimport importlib\nimport inspect\nimport logging\nimport os\nimport sys\n\nfrom dfms import droputils\nfrom dfms.lifecycle.dlm import DataLifecycleManager\nfrom dfms.manager import repository\nfrom dfms.manager.drop_manager import DROPManager\nfrom dfms.manager.session import Session\n\n\nlogger = logging.getLogger(__name__)\n\ndef _functionAsTemplate(f):\n args, _, _, defaults = inspect.getargspec(f)\n\n # 'defaults' might be shorter than 'args' if some of the arguments\n # are not optional. In the general case anyway the optional\n # arguments go at the end of the method declaration, and therefore\n # a reverse iteration should yield the correct match between\n # arguments and their defaults\n defaults = list(defaults) if defaults else []\n defaults.reverse()\n argsList = []\n for i, arg in enumerate(reversed(args)):\n if i >= len(defaults):\n # mandatory argument\n argsList.append({'name':arg})\n else:\n # optional with default value\n argsList.append({'name':arg, 'default':defaults[i]})\n\n return {'name': inspect.getmodule(f).__name__ + \".\" + f.__name__, 'args': argsList}\n\nclass NodeManager(DROPManager):\n \"\"\"\n A DROPManager that creates and holds references to DROPs.\n\n A NodeManager is the ultimate responsible of handling DROPs. It does so not\n directly, but via Sessions, which represent and encapsulate separate,\n independent DROP graph executions. All DROPs created by the\n different Sessions are also given to a common DataLifecycleManager, which\n takes care of expiring them when needed and replicating them.\n\n Since a NodeManager can handle more than one session, in principle only one\n NodeManager is needed for each computing node, thus its name.\n \"\"\"\n\n def __init__(self, useDLM=True, dfmsPath=None, host=None, error_listener=None,\n enable_luigi=False):\n self._dlm = DataLifecycleManager() if useDLM else None\n self._sessions = {}\n self._host = host\n\n # dfmsPath contains code added by the user with possible\n # DROP applications\n if dfmsPath:\n dfmsPath = os.path.expanduser(dfmsPath)\n if os.path.isdir(dfmsPath):\n if logger.isEnabledFor(logging.INFO):\n logger.info(\"Adding %s to the system path\" % (dfmsPath))\n sys.path.append(dfmsPath)\n\n # Error listener used by users to deal with errors coming from specific\n # Drops in whatever way they want\n if error_listener:\n if isinstance(error_listener, basestring):\n try:\n parts = error_listener.split('.')\n module = importlib.import_module('.'.join(parts[:-1]))\n except:\n logger.exception('Creating the error listener')\n raise\n error_listener = getattr(module, parts[-1])()\n if not hasattr(error_listener, 'on_error'):\n raise ValueError(\"error_listener doesn't contain an on_error method\")\n self._error_listener = error_listener\n\n self._enable_luigi = enable_luigi\n\n def createSession(self, sessionId):\n if sessionId in self._sessions:\n raise Exception('A session already exists for sessionId %s' % (str(sessionId)))\n self._sessions[sessionId] = Session(sessionId, self._host, self._error_listener, self._enable_luigi)\n if logger.isEnabledFor(logging.INFO):\n logger.info('Created session %s' % (sessionId))\n\n def getSessionStatus(self, sessionId):\n return self._sessions[sessionId].status\n\n def quickDeploy(self, sessionId, graphSpec):\n self.createSession(sessionId)\n self.addGraphSpec(sessionId, graphSpec)\n return self.deploySession(sessionId)\n\n def linkGraphParts(self, sessionId, lhOID, rhOID, linkType):\n self._sessions[sessionId].linkGraphParts(lhOID, rhOID, linkType)\n\n def addGraphSpec(self, sessionId, graphSpec):\n self._sessions[sessionId].addGraphSpec(graphSpec)\n\n def getGraphStatus(self, sessionId):\n return self._sessions[sessionId].getGraphStatus()\n\n def getGraph(self, sessionId):\n return self._sessions[sessionId].getGraph()\n\n def deploySession(self, sessionId, completedDrops=[]):\n session = self._sessions[sessionId]\n session.deploy(completedDrops=completedDrops)\n roots = session.roots\n\n # We register the new DROPs with the DLM if there is one\n if self._dlm:\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug('Registering new DROPs with the DataLifecycleManager')\n droputils.breadFirstTraverse(roots, lambda drop: self._dlm.addDrop(drop))\n\n # Finally, we also collect the Pyro URIs of our DROPs and return them\n uris = {}\n droputils.breadFirstTraverse(roots, lambda drop: uris.__setitem__(drop.uid, drop.uri))\n return uris\n\n def destroySession(self, sessionId):\n session = self._sessions.pop(sessionId)\n session.destroy()\n\n def getSessionIds(self):\n return self._sessions.keys()\n\n def getGraphSize(self, sessionId):\n session = self._sessions[sessionId]\n return len(session._graph)\n\n def getTemplates(self):\n\n # TODO: we currently have a hardcoded list of functions, but we should\n # load these repositories in a different way, like in this\n # commented code\n #tplDir = os.path.expanduser(\"~/.dfms/templates\")\n #if not os.path.isdir(tplDir):\n # logger.warning('%s directory not found, no templates available' % (tplDir))\n # return []\n #\n #templates = []\n #for fname in os.listdir(tplDir):\n # if not os.path.isfile(fname): continue\n # if fname[-3:] != '.py': continue\n #\n # with open(fname) as f:\n # m = imp.load_module(fname[-3:], f, fname)\n # functions = m.list_templates()\n # for f in functions:\n # templates.append(_functionAsTemplate(f))\n\n templates = []\n for f in repository.complex_graph, repository.pip_cont_img_pg, repository.archiving_app:\n templates.append(_functionAsTemplate(f))\n return templates\n\n def materializeTemplate(self, tpl, sessionId, **tplParams):\n # tpl currently has the form \n parts = tpl.split('.')\n module = importlib.import_module('.'.join(parts[:-1]))\n tplFunction = getattr(module, parts[-1])\n\n # invoke the template function with the given parameters\n # and add the new graph spec to the session\n graphSpec = tplFunction(**tplParams)\n self.addGraphSpec(sessionId, graphSpec)\n\n if logger.isEnabledFor(logging.INFO):\n logger.info('Added graph from template %s to session %s with params: %s' % (tpl, sessionId, tplParams))\n","sub_path":"dfms/manager/node_manager.py","file_name":"node_manager.py","file_ext":"py","file_size_in_byte":8077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"308514758","text":"\nnumboards = int(input())\n\narr = [int(x) for x in input().split()]\n\narr = tuple(arr)\nheights = []\n\nfor i in range(numboards):\n for q in range(i+1,numboards):\n heights.append(arr[i] + arr[q])\n\nheights = list(set(heights))\nsolheights = [0] * len(heights)\n\n\n\nfor i in range(len(heights)):\n temparr = arr\n temparr = list(temparr)\n temparr.sort() \n while (temparr):\n if (heights[i] - temparr[0] in temparr[1:]):\n solheights[i] += 1 \n temparr.remove(heights[i] - temparr[0])\n del temparr[0]\n else:\n del temparr[0]\n\n\nuniquesolheights = list(set(solheights))\noccur = [0] * len(uniquesolheights)\n\n\nfor i in range(len(uniquesolheights)):\n occur[i] = solheights.count(uniquesolheights[i])\n\nprint(heights)\nprint(solheights)\nprint(max(uniquesolheights), solheights.count(max(uniquesolheights)))\n","sub_path":"Junior/2017/CCC 2017 J5.py","file_name":"CCC 2017 J5.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"27302585","text":"import asyncio\r\n\r\nimport asyncpg\r\nimport os\r\nimport uvicorn\r\nfrom starlette.applications import Starlette\r\nfrom starlette.middleware import Middleware\r\nfrom starlette.middleware.cors import CORSMiddleware\r\nfrom starlette.requests import Request\r\nfrom starlette.routing import Mount\r\nfrom starlette.templating import Jinja2Templates\r\nfrom starlette.staticfiles import StaticFiles\r\nfrom starlette.responses import JSONResponse\r\n\r\nmiddleware = [\r\n Middleware(CORSMiddleware, allow_origins=['*'])\r\n]\r\n\r\nroutes = [\r\n Mount('/static', StaticFiles(directory='app/static'), name='static')\r\n]\r\n\r\ntemplates = Jinja2Templates(directory='app/templates')\r\n\r\n\r\nasync def startup():\r\n global conn\r\n print(\"connect\")\r\n conn = await asyncpg.connect(\r\n \"postgres://mxqkomkc:w2w1BetfK154mVvEMfJpuNGAYyqlzVyo@john.db.elephantsql.com:5432/mxqkomkc\")\r\n\r\napp = Starlette(middleware=middleware, on_startup=[startup], routes=routes, debug=True)\r\n\r\n\r\n\r\n@app.route(\"/\")\r\nasync def proc_hom(request: Request):\r\n return templates.TemplateResponse('index.html',\r\n {'request': request})\r\n\r\n@app.route(\"/villa\", methods=['GET'])\r\nasync def proc_sen(request: Request):\r\n sentence: str = request.query_params['id']\r\n if not sentence:\r\n return JSONResponse({\"error\": \"no id provided\"}, status_code=400)\r\n else:\r\n try:\r\n id = int(sentence)\r\n except ValueError:\r\n return JSONResponse({\"error\": \"id is not a number\"},\r\n status_code=400)\r\n out = await conn.fetch(\"SELECT * FROM villas WHERE villa_number=$1\", id)\r\n base = []\r\n for val in out:\r\n sd = dict()\r\n for key, v in val.items():\r\n sd[key] = v\r\n base.append(sd)\r\n return templates.TemplateResponse('answer.html',{'request': request, 'data': base, 'results': len(base), 'id': id})\r\n@app.route(\"/name\", methods=['GET'])\r\nasync def proc_sen(request: Request):\r\n sentence: str = request.query_params['id']\r\n if not sentence:\r\n return JSONResponse({\"error\": \"no id provided\"}, status_code=400)\r\n else:\r\n try:\r\n id = str(sentence)\r\n except ValueError:\r\n return JSONResponse({\"error\": \"id is not a name\"},\r\n status_code=400)\r\n out = await conn.fetch(\"SELECT * FROM villas WHERE member_name=$1\", id)\r\n base = []\r\n for val in out:\r\n sd = dict()\r\n for key, v in val.items():\r\n sd[key] = v\r\n base.append(sd)\r\n return templates.TemplateResponse('answer.html',{'request': request, 'data': base, 'results': len(base), 'id': id})\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n uvicorn.run(app, host='0.0.0.0', port=os.getenv(\"PORT\", 5000))\r\n","sub_path":"app/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"123190815","text":"\nfrom genericGoal import GenericGoal\n\n\n\nclass EatNearestEnemyGoal(GenericGoal):\n\n def __init__(self, agentIndex, captureAgent, state, foodsToEat=None):\n self.index = agentIndex\n self.agent = captureAgent\n self.initialState = state\n if not foodsToEat:\n foodsToEat = [self.getClosestFoodOnBoard(state)]\n \n self.foodsToEat = foodsToEat #note, this is initial. it does not change.\n self.legal_positions = None #\n\n def getClosestFoodOnBoard(self, state): \n foodList = self.agent.getFood(state).asList()\n myPos = self.agent.getPosition(state)\n \n foodList.sort(key=lambda x:self.agent.getMazeDistance(myPos, x))\n return foodList[0]\n\n def getFoodsToEatFromState(self, state):\n return [f for f in self.foodsToEat if state.hasFood(f[0], f[1])]\n\n def getCompletionTime(self, state):\n time = 0\n myPos = self.agent.getPosition(state)\n for food in self.getFoodsToEatFromState(self, state):\n time += self.agent.getMazeDistance(myPos, food)\n myPos = food\n \n return time\n \n def getStartTime(self, state):\n myPos = self.agent.getPosition(state)\n foods = self.getFoodsToEatFromState(self, state)\n return min([self.agent.getMazeDistance(myPos, x) for x in foods])\n \n def chooseAction(self, agent_idx, state):\n \"\"\" Every goal should implement a movement policy for achieving the target\n state. The policy can be learned during initialization of the goal.\n This method returns the action that the agent should take given the state. \"\"\"\n \n #greedily eat the next closest food. this is generally a safe thing to do, since we're scared of bad guys getting there first\n myPos = self.agent.getPosition(state)\n foods = self.getFoodsToEatFromState(state) \n if not foods:\n # we're out of food to eat! give us something to eat!\n foods = self.foodsToEat = [self.getClosestFoodOnBoard(state)]\n \n foods.sort(key=lambda x:self.agent.getMazeDistance(myPos, x))\n \n closest_dist = self.agent.getMazeDistance(myPos, foods[0])\n \n for a in state.getLegalActions(agent_idx):\n new_pos = self.agent.getPosition(state.generateSuccessor(agent_idx, a))\n if self.agent.getMazeDistance(new_pos, foods[0]) < closest_dist:\n return a\n\n \n\n\n","sub_path":"final_contest/goals/eatNearestEnemyGoal.py","file_name":"eatNearestEnemyGoal.py","file_ext":"py","file_size_in_byte":2250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"93242589","text":"#!/usr/bin/python3\n# -*- coding:utf-8 -*-\nimport os\nimport utils\n#utils.execShellCommand(\"rm -rf *.txt\")\nalgs = ['LLF','SJF','WRR','OUR']\nsyss = ['sys0','sys1','sys2']\nforms = ['fct_tcp_out_1']\nprojects = ['P0', 'P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7']\ndebug = False\noffline_syss = ['S0','S1','S2']\noffline_time = 60\noffline_when = 30\nutils.execShellCommand(\"mkdir fct_offline_analysis\")\nfor form in forms:\n for alg in algs:\n outpath=\"fct_offline_analysis/fct_offline_analysis_\"+alg+\".txt\"\n fout = open(outpath,\"w+\")\n for offline_sys in offline_syss:\n for project in projects:\n fct_collect = []\n # 拿到三个系统的同一个用户的所有数据\n project_iperf3_file_list = []\n for sys in syss:\n path = \"./\"+alg+\"/\"+sys+\"/\"+form+\"/out/dat/1\"\n # print(path)\n\n raw_file_list = os.listdir(path) # 得到文件夹下的所有文件名称\n # print(raw_file_list)\n\n for raw_file in raw_file_list: # 遍历文件夹\n if not os.path.isdir(raw_file): # 判断是否是文件夹\n if project in raw_file:\n project_iperf3_file_list.append(\n path+\"/\"+raw_file)\n #print(project)\n #print(raw_file)\n\n #print(project_iperf3_file_list)\n for project_iperf3_file in project_iperf3_file_list :\n raw_dat_list =utils.execShellCommand(\"cat \"+project_iperf3_file+\" | grep receiver\").split(\"\\n\")[:-1]\n #print(raw_dat_list)\n dat_list = []\n for raw_dat in raw_dat_list:\n dat_list.append(raw_dat.split(\" \")[2].strip())\n\n #print(dat_list)\n\n time_list = []\n for dat in dat_list:\n time_list.append(float(dat.split(\"-\")[1].strip()))\n #print(time_list)\n \n fct = []\n fct_temp = 0\n offline_flag = False\n for time in time_list:\n if offline_sys in project_iperf3_file and fct_temp > offline_when and offline_flag == False :\n fct_temp += offline_time\n offline_flag = True\n fct_temp += time\n fct.append(fct_temp)\n fct_collect.append(fct_temp)\n\n fout.write(\"=========================================================\\n\")\n fout.write(project_iperf3_file+\"\\n\"+ str(str(fct))+\"\\n\")\n \n fct_collect.sort()\n if len(fct_collect)<50 :\n print(\"ERROR\")\n exit()\n fout.write(\"=========================================================\\n\")\n fout.write(str(fct_collect))\n fout.write(\"\\n-- analysis || \"+\" alg: \"+alg+\" offline_sys: \"+str(offline_sys)+\" offline_when: \"+str(offline_when)+\" project \"+project+\" fct : \"+str(format(fct_collect[-10],'.2f'))+\" , \" +str(format(fct_collect[-9],'.2f'))+\" , \"+str(format(fct_collect[-8],'.2f'))+\" , \"+str(format(fct_collect[-7],'.2f'))+\" , \"+str(format(fct_collect[-6],'.2f'))+\" , \"+str(format(fct_collect[-5],'.2f'))+\" , \"+str(format(fct_collect[-4],'.2f'))+\" , \"+str(format(fct_collect[-3],'.2f'))+\" , \"+str(format(fct_collect[-2],'.2f'))+\" , \"+str(format(fct_collect[-1],'.2f'))+\"\\n\\n\")\n \n if debug:\n exit()\n","sub_path":"testbed/fct_offline.py","file_name":"fct_offline.py","file_ext":"py","file_size_in_byte":3747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"306425256","text":"\nclass Solution:\n def maxProfit(self, prices: 'List[int]') -> int:\n largestMargin = 0\n for i, price in enumerate(prices):\n buy = price\n sell = max(prices[i+1:], default=0)\n if sell - buy > largestMargin:\n largestMargin = sell - buy\n \n return largestMargin\n\n\n\nsol = Solution()\nsol.maxProfit([7,6,4,3,1])\n\n","sub_path":"leetcode/Easy/BestTimeToBuyAndSellStock/BestTimeToBuyAndSellStock_attempt1.py","file_name":"BestTimeToBuyAndSellStock_attempt1.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"104234887","text":"#!/usr/bin/env python\n\nimport rospy\nimport cv2\nimport os\nfrom custom_msgs.srv import EnableModel\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge\n\n\n# Mock the camera by publishing the same image to a topic\nclass DummyImagePublisher:\n\n NODE_NAME = 'test_images'\n CAMERA = 'left'\n IMAGE_TOPIC = '/camera/{}/image_raw'.format(CAMERA)\n\n # Read in the dummy image and other misc. setup work\n def __init__(self):\n self.image_publisher = rospy.Publisher(self.IMAGE_TOPIC, Image, queue_size=10)\n\n path = os.path.dirname(__file__)\n image = cv2.imread(os.path.join(path, '../assets/buoy.jpg'), cv2.IMREAD_COLOR)\n bridge = CvBridge()\n\n self.image_msg = bridge.cv2_to_imgmsg(image, 'bgr8')\n\n # Publish dummy image to topic every few seconds\n def run(self):\n rospy.init_node(self.NODE_NAME)\n\n # Testing enable_model service\n service_name = 'enable_model_{}'.format(self.CAMERA)\n rospy.wait_for_service(service_name)\n enable_model = rospy.ServiceProxy(service_name, EnableModel)\n\n loop_rate = rospy.Rate(1)\n model_enabled = True\n\n count = 0\n while not rospy.is_shutdown():\n self.image_publisher.publish(self.image_msg)\n\n # Testing enable\n if count % 30 == 0:\n enable_model('buoy', model_enabled)\n model_enabled = not model_enabled\n\n count += 1\n loop_rate.sleep()\n\n\nif __name__ == '__main__':\n DummyImagePublisher().run()\n","sub_path":"onboard/catkin_ws/src/cv/scripts/test_images.py","file_name":"test_images.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"376714057","text":"# Copyright (c) 2021 Mira Geoscience Ltd.\n#\n# This file is part of geoapps.\n#\n# geoapps is distributed under the terms and conditions of the MIT License\n# (see LICENSE file at the root of this source code package).\n\nfrom uuid import UUID\n\nfrom geoh5py.workspace import Workspace\n\nrequired_parameters = []\ndefaults = {}\n\ndefault_ui_json = {\n \"title\": \"Octree Mesh Creator\",\n \"geoh5\": \"../../assets/FlinFlon.geoh5\",\n \"objects\": {\n \"enabled\": True,\n \"group\": \"1- Core\",\n \"label\": \"Core hull extent\",\n \"main\": True,\n \"meshType\": [\n \"{202C5DB1-A56D-4004-9CAD-BAAFD8899406}\",\n \"{6A057FDC-B355-11E3-95BE-FD84A7FFCB88}\",\n \"{F26FEBA3-ADED-494B-B9E9-B2BBCBE298E1}\",\n ],\n \"value\": \"{656acd40-25de-4865-814c-cb700f6ee51a}\",\n },\n \"u_cell_size\": {\n \"enabled\": True,\n \"group\": \"2- Core cell size\",\n \"label\": \"Easting (m)\",\n \"main\": True,\n \"value\": 25,\n },\n \"v_cell_size\": {\n \"enabled\": True,\n \"group\": \"2- Core cell size\",\n \"label\": \"Northing (m)\",\n \"main\": True,\n \"value\": 25,\n },\n \"w_cell_size\": {\n \"enabled\": True,\n \"group\": \"2- Core cell size\",\n \"label\": \"Vertical (m)\",\n \"main\": True,\n \"value\": 25,\n },\n \"horizontal_padding\": {\n \"enabled\": True,\n \"group\": \"3- Padding distance\",\n \"label\": \"Horizontal (m)\",\n \"main\": True,\n \"value\": 1000.0,\n },\n \"vertical_padding\": {\n \"enabled\": True,\n \"group\": \"3- Padding distance\",\n \"label\": \"Vertical (m)\",\n \"main\": True,\n \"value\": 1000.0,\n },\n \"depth_core\": {\n \"enabled\": True,\n \"group\": \"1- Core\",\n \"label\": \"Minimum Depth (m)\",\n \"main\": True,\n \"value\": 500.0,\n },\n \"ga_group_name\": {\n \"enabled\": True,\n \"group\": \"\",\n \"label\": \"Name:\",\n \"value\": \"Octree_Mesh\",\n },\n \"Refinement A Object\": {\n \"enabled\": True,\n \"group\": \"Refinement A\",\n \"label\": \"Object\",\n \"meshType\": [\n \"{202C5DB1-A56D-4004-9CAD-BAAFD8899406}\",\n \"{6A057FDC-B355-11E3-95BE-FD84A7FFCB88}\",\n \"{F26FEBA3-ADED-494B-B9E9-B2BBCBE298E1}\",\n ],\n \"value\": \"{656acd40-25de-4865-814c-cb700f6ee51a}\",\n },\n \"Refinement A Levels\": {\n \"enabled\": True,\n \"group\": \"Refinement A\",\n \"label\": \"Levels\",\n \"value\": \"4,4,4\",\n },\n \"Refinement A Type\": {\n \"choiceList\": [\"surface\", \"radial\"],\n \"enabled\": True,\n \"group\": \"Refinement A\",\n \"label\": \"Type\",\n \"value\": \"radial\",\n },\n \"Refinement A Distance\": {\n \"enabled\": True,\n \"group\": \"Refinement A\",\n \"label\": \"Distance\",\n \"value\": 1000.0,\n },\n \"Refinement B Object\": {\n \"enabled\": True,\n \"group\": \"Refinement B\",\n \"label\": \"Object\",\n \"meshType\": [\n \"{202C5DB1-A56D-4004-9CAD-BAAFD8899406}\",\n \"{6A057FDC-B355-11E3-95BE-FD84A7FFCB88}\",\n \"{F26FEBA3-ADED-494B-B9E9-B2BBCBE298E1}\",\n ],\n \"value\": \"\",\n },\n \"Refinement B Levels\": {\n \"enabled\": True,\n \"group\": \"Refinement B\",\n \"label\": \"Levels\",\n \"value\": \"0,0,2\",\n },\n \"Refinement B Type\": {\n \"choiceList\": [\"surface\", \"radial\"],\n \"enabled\": True,\n \"group\": \"Refinement B\",\n \"label\": \"Type\",\n \"value\": \"surface\",\n },\n \"Refinement B Distance\": {\n \"enabled\": True,\n \"group\": \"Refinement B\",\n \"label\": \"Distance\",\n \"value\": 1000.0,\n },\n \"run_command\": (\"geoapps.create.octree_mesh\"),\n \"monitoring_directory\": \"\",\n \"conda_environment\": \"geoapps\",\n}\n\nrequired_parameters = []\n\nvalidations = {\n \"title\": {\n \"types\": [str],\n },\n \"geoh5\": {\n \"types\": [str, Workspace],\n },\n \"objects\": {\n \"types\": [str, UUID],\n \"uuid\": [],\n },\n \"u_cell_size\": {\n \"types\": [int, float],\n },\n \"v_cell_size\": {\n \"types\": [int, float],\n },\n \"w_cell_size\": {\n \"types\": [int, float],\n },\n \"horizontal_padding\": {\n \"types\": [int, float],\n },\n \"vertical_padding\": {\n \"types\": [int, float],\n },\n \"depth_core\": {\n \"types\": [int, float],\n },\n \"refinement_object\": {\n \"types\": [str, UUID],\n \"uuid\": [],\n },\n \"refinement_levels\": {\n \"types\": [int, float],\n },\n \"refinement_type\": {\n \"types\": [str],\n \"values\": [\"surface\", \"radial\"],\n },\n \"refinement_distance\": {\n \"types\": [int, float],\n },\n \"ga_group_name\": {\n \"types\": [str],\n },\n \"monitoring_directory\": {\n \"types\": [str],\n },\n \"workspace_geoh5\": {\n \"types\": [str, Workspace],\n },\n \"run_command\": {\n \"types\": [str],\n },\n \"run_command_boolean\": {\n \"types\": [bool],\n },\n \"conda_environment\": {\n \"types\": [str],\n },\n \"conda_environment_boolean\": {\n \"types\": [bool],\n },\n \"workspace\": {\n \"types\": [str, Workspace],\n },\n}\n","sub_path":"geoapps/io/Octree/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":5159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"339974725","text":"import torch\nfrom converters import pytorch2savedmodel, savedmodel2tflite\nimport torchvision\nfrom model import QATNet, Net, val_transform\nfrom tflite import get_tflite_outputs\nfrom PIL import Image\nimport os\n\n\n\ndataset = torchvision.datasets.ImageFolder(root='./data', transform=val_transform)\ndataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=16)\n\ndef to_tflite_input(tensor):\n return tensor.numpy().transpose((0, 2, 3, 1))\n\ndef representative_dataset_gen():\n for i, (x, _) in enumerate(dataloader):\n yield [to_tflite_input(x)]\n if i > 200:\n return\n\ndef dequantize_state_dict(state_dict):\n new_dict = {}\n drop_keys = {\n 'weight_fake_quant',\n 'qconfig',\n 'activation_post_process',\n }\n for key, value in state_dict.items():\n keep = True\n for drop in drop_keys:\n if drop in key:\n keep = False\n break\n if keep:\n new_dict[key] = value\n return new_dict\n\nstate_dict = torch.load('./friday_net.pth', map_location='cpu')\n\n#qat_net = QATNet()\n#qat_net.load_state_dict(state_dict)\n\nnet = Net()\nnet.load_state_dict(dequantize_state_dict(state_dict))\nnet.eval()\n#net=torch.quantization.convert(qat_net.eval(), inplace=False)\n\nim = Image.open('./test.jpg')\ndata = val_transform(im).reshape((1, 3, 224, 224))\ntflite_input = to_tflite_input(data)\nprint(tflite_input, tflite_input.mean(), tflite_input.std())\ntorch_output = net(data)\n#torch_quantize_output = qat_net(data)\n\ndummy_input = torch.randn(1, 3, 224, 224)\ninput_names = ['image_array']\noutput_names = ['category']\n\nprint('onnx export')\nonnx_model_path = 'model.onnx'\ntorch.onnx.export(net, dummy_input, onnx_model_path, input_names=input_names, output_names=output_names)\n\nprint('keras export')\nsaved_model_dir = 'saved_model'\npytorch2savedmodel(onnx_model_path, saved_model_dir)\n\nprint('tflite export')\ntflite_model_path = 'model.tflite'\ntflite_model = savedmodel2tflite(saved_model_dir, tflite_model_path, quantize=False)\n\n\nprint('tflite quantized export')\ntflite_quantized_model_path = 'model_quantized.tflite'\ntflite_quantized_model = savedmodel2tflite(saved_model_dir,\n tflite_quantized_model_path, quantize=True,\n representative_dataset=representative_dataset_gen)\n\nprint('edgetpu_compiler')\nos.system(f\"edgetpu_compiler {tflite_quantized_model_path}\")\n\ntflite_output = get_tflite_outputs(tflite_input, tflite_model).reshape(-1, )\ntflite_quantized_output = get_tflite_outputs(tflite_input, tflite_quantized_model).reshape(-1, )\nprint('torch', torch_output)\n#print('torch_quantized', torch_output)\nprint('tflite', tflite_output)\nprint('tflite_quantized', tflite_quantized_output)\n","sub_path":"convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":2724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"262041196","text":"from room import Room\nfrom player import Player\nfrom world import World\n\nimport random\nfrom ast import literal_eval\n\n# Load world\nworld = World()\n\n\n# You may uncomment the smaller graphs for development and testing purposes.\n# map_file = \"maps/test_line.txt\"\n# map_file = \"maps/test_cross.txt\"\n# map_file = \"maps/test_loop.txt\"\n# map_file = \"maps/test_loop_fork.txt\"\nmap_file = \"maps/main_maze.txt\"\n\n# Loads the map into a dictionary\nroom_graph=literal_eval(open(map_file, \"r\").read())\nworld.load_graph(room_graph)\n\n# Print an ASCII map\nworld.print_rooms()\n\nplayer = Player(world.starting_room)\n\n# Fill this out with directions to walk\n# traversal_path = ['n', 'n']\n\ntraversal_path = ['n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's', 'n', 's']\n\n\n\n# traversal_path = ['n', 's']\n# traversal_path = []\n\n#my tips\n#que or stack might be counterproductive, since we cant teleport\n#dictionary probably?\n#might not need a class\n#get to every room with least amount of backtracking\n#use loop logic, want to keep running program until it does what it needs to do, and then moves onto something else\n\n\ntime_machine = {'n': 's', 'e': 'w', 's': 'n', 'w': 'e'}\n\nprevious_choice = [None]\ngogo = {} #we got rooms to see\ngot_it = {} #been there done that\n\ndef good_choice(room_number):\n choice = []\n if 'n' in room_graph[room_number][1].keys():\n choice.append('n')\n if 'e' in room_graph[room_number][1].keys():\n choice.append('e')\n if 's' in room_graph[room_number][1].keys():\n choice.append('s')\n if 'w' in room_graph[room_number][1].keys():\n choice.append('w')\n \n return choice\n\nwhile len(got_it) < len(room_graph): #while rooms ive been to is less than total rooms\n roomid = player.current_room.id #You may find the commands `player.current_room.id` useful (directions)\n if roomid not in gogo:\n\n got_it[roomid] = roomid\n\n gogo[roomid] = good_choice(roomid)\n\n if len(gogo[roomid]) < 1: \n previousDirection = previous_choice.pop()\n traversal_path.append(previousDirection)\n\n player.travel(previousDirection)\n\n else:\n nextDirection = gogo[roomid].pop(0) # removes first item in list (index of 0)\n traversal_path.append(nextDirection)\n\n previous_choice.append(time_machine[nextDirection])\n player.travel(nextDirection)\n\n# TRAVERSAL TEST\nvisited_rooms = set()\nplayer.current_room = world.starting_room\nvisited_rooms.add(player.current_room)\n\nfor move in traversal_path:\n player.travel(move)\n visited_rooms.add(player.current_room)\n\nif len(visited_rooms) == len(room_graph):\n print(f\"TESTS PASSED: {len(traversal_path)} moves, {len(visited_rooms)} rooms visited\")\nelse:\n print(\"TESTS FAILED: INCOMPLETE TRAVERSAL\")\n print(f\"{len(room_graph) - len(visited_rooms)} unvisited rooms\")\n\n\n\n#######\n# UNCOMMENT TO WALK AROUND\n#######\n# player.current_room.print_room_description(player)\n# while True:\n# cmds = input(\"-> \").lower().split(\" \")\n# if cmds[0] in [\"n\", \"s\", \"e\", \"w\"]:\n# player.travel(cmds[0], True)\n# elif cmds[0] == \"q\":\n# break\n# else:\n# print(\"I did not understand that command.\")\n","sub_path":"projects/adventure/adv.py","file_name":"adv.py","file_ext":"py","file_size_in_byte":3630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"530641017","text":"f = open('input3.txt')\ninput_file = f.read().rstrip().split('\\n')\nf.close()\nbackpack = []\nsum_weight = 0\nc, n, arr = input_file[0], input_file[1], input_file[2:-1]\nc = int(c)\narr = [x.split() for x in arr]\narr = enumerate(arr)\narr = sorted(arr, key=lambda x: (-int(x[1][0]), int(x[1][1])))\nfor i in arr:\n if sum_weight + int(i[1][1]) <= c:\n backpack.append(int(i[0]))\n sum_weight += int(i[1][1])\nbackpack.sort()\nbackpack = [str(i) for i in backpack]\noutput_file = ' '.join(backpack)\nprint(output_file)\nf = open('output.txt', 'w')\nf.write(output_file)\nf.close()","sub_path":"13_Yandex_Contest 1/D/D_vladilir.py","file_name":"D_vladilir.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"362156527","text":"from unittest import TestCase\nfrom settings.settings import PROJECT_ROOT\n\nfrom utils.helpers import get_json_config\nfrom hamcrest import assert_that, raises, calling, contains\n\n\nclass JsonFileTester(TestCase):\n\n def test_successfully_read_config_file(self):\n actual_json = get_json_config(\n file_path='{}/app/tests/fixtures/test_config.json'.format(\n PROJECT_ROOT\n )\n )\n for _ in actual_json:\n assert_that(\n contains('repo', 'config_path', 'tokenize')\n )\n\n def test_raises_io_error_if_fail(self):\n assert_that(\n calling(get_json_config).with_args(file_name='askldfj'),\n raises(TypeError)\n )","sub_path":"app/tests/json_file_tests.py","file_name":"json_file_tests.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"536488335","text":"#!/usr/bin/evn python\n# -*- coding:utf-8 -*-\n\n# FileName Config.py\n# Author: HeyNiu\n# Created Time: 2016/8/22\n\"\"\"\nhttp接口测试框架配置信息解析器\n\"\"\"\nimport configparser\nimport os\n\nimport utils.GlobalList\n\n\nclass Config(object):\n def __init__(self, api_type):\n self.config = configparser.ConfigParser()\n self.conf_path = os.path.join(os.getcwd()[::-1].split('\\\\', 1)[-1][::-1], 'conf', 'config.conf')\n if not os.path.exists(self.conf_path):\n # 持续集成时配置文件目录有改变,需要兼容\n self.conf_path = os.path.join(os.path.dirname(\n os.path.abspath(__file__)[::-1].split('\\\\', 1)[-1][::-1]), 'conf', 'config.conf')\n if not os.path.exists(self.conf_path):\n raise FileNotFoundError(\"请确保配置文件存在!\")\n self.config.read(self.conf_path, encoding='utf-8')\n self.type = api_type\n self.conf = {\n 'tester': '',\n 'project': '',\n 'versionName': '',\n 'versionCode': '',\n 'AppBuild': '',\n 'host': '',\n 'systemType': '2',\n 'DeviceId': 'ffffffff-b3f1-87ad-90ef-ebeb00000000',\n 'Model': 'MI+4LTE',\n 'DeviceOS': '23',\n 'Release': '6.0.1',\n 'getTokenHost': '',\n 'loginHost': '',\n 'loginInfo': '',\n 'SessionsPath': '',\n 'ApiURL': '',\n 'SpecialSessions': '',\n 'SessionsPair': '',\n 'DuplicateSwitch': False\n }\n\n self.__get_conf()\n\n def __get_conf(self):\n print('读取配置文件中...')\n self.conf['tester'] = self.config.get(self.config.sections()[self.type], 'tester')\n self.conf['project'] = self.config.get(self.config.sections()[self.type], 'project')\n self.conf['versionName'] = self.config.get(self.config.sections()[self.type], 'versionName')\n self.conf['versionCode'] = self.config.get(self.config.sections()[self.type], 'versionCode')\n self.conf['AppBuild'] = self.conf['versionCode']\n self.conf['host'] = self.config.get(self.config.sections()[self.type], 'host')\n utils.GlobalList.HOST = self.conf['host']\n self.conf['getTokenHost'] = self.config.get(self.config.sections()[self.type], 'getTokenHost')\n self.conf['loginHost'] = self.config.get(self.config.sections()[self.type], 'loginHost')\n self.conf['loginInfo'] = self.config.get(self.config.sections()[self.type], 'loginInfo')\n self.conf['SessionsPath'] = self.config.get(self.config.sections()[self.type], 'SessionsPath')\n utils.GlobalList.SESSIONS_PATH = self.conf['SessionsPath']\n self.conf['ApiURL'] = self.config.get(self.config.sections()[self.type], 'ApiURL')\n utils.GlobalList.API_URL = self.conf['ApiURL']\n self.conf['SpecialSessions'] = self.config.get(self.config.sections()[self.type], 'SpecialSessions')\n utils.GlobalList.SPECIAL_SESSIONS = self.conf['SpecialSessions']\n self.conf['SessionsPair'] = self.config.get(self.config.sections()[self.type], 'SessionsPair')\n utils.GlobalList.SESSIONS_PAIR = self.conf['SessionsPair']\n self.conf['DuplicateSwitch'] = self.config.getboolean(self.config.sections()[self.type], 'DuplicateSwitch')\n utils.GlobalList.DUPLICATE_SWITCH = self.conf['DuplicateSwitch']\n self.__init_data()\n utils.GlobalList.CONF = self.conf\n\n def __init_data(self):\n \"\"\"\n 初始化接口对,提取出创建数据接口与删除数据接口\n :return:\n \"\"\"\n for i in eval(self.conf['SessionsPair']):\n session_create_name = i.split(':')[0]\n session_create_parameter = i.split(':')[1].split('|')[0]\n session_delete_name = i.split('|')[-1].split(':')[0]\n session_delete_parameter = i.split(':')[-1]\n utils.GlobalList.CREATE_DICT[session_create_name] = session_create_parameter\n utils.GlobalList.DELETE_DICT[session_delete_name] = session_delete_parameter\n utils.GlobalList.MAPPING_DICT[session_delete_name] = session_create_name\n\n\nif __name__ == '__main__':\n Config(0)\n","sub_path":"conf/Config.py","file_name":"Config.py","file_ext":"py","file_size_in_byte":4201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"418823695","text":"import db\nimport os\n\nconn = db.get_connection()\ncursor = conn.cursor(dictionary=True)\ncursor.execute(\"SELECT t.problem_id, p.name AS problem_name, tm.trace_id, tm.energy_autoscorer, tm.s3url \"\n \"FROM tbltrace_metadata tm \"\n \"JOIN tbltrace t ON tm.trace_id = t.id \"\n \"JOIN tblproblem p ON t.problem_id = p.id \"\n \"WHERE tm.energy_autoscorer IS NOT NULL AND tm.energy_autoscorer > 0 AND p.is_lightning = FALSE AND p.dummy_problem = FALSE\")\nbest_traces = dict()\nsecond_best_traces = dict()\nfor trace in cursor:\n prob_name = trace['problem_name']\n if prob_name in best_traces:\n if best_traces[prob_name]['energy_autoscorer'] > trace['energy_autoscorer']:\n second_best_traces[prob_name] = best_traces[prob_name]\n best_traces[prob_name] = trace\n else:\n best_traces[prob_name] = trace\n\nfor key in best_traces.keys():\n if key not in second_best_traces:\n second_best_traces[key] = best_traces[key]\n\nbest_energy = 0\nfor trace in best_traces.values():\n best_energy += trace['energy_autoscorer']\nbest_traces.clear()\n\nsecond_best_energy = 0\nfor trace in second_best_traces.values():\n second_best_energy += trace['energy_autoscorer']\nprint(\"Best: \", best_energy)\nprint(\"Scnd: \", second_best_energy)\nbest_traces = second_best_traces\n\n\nos.mkdir('tmp')\nnames = sorted(best_traces.keys())\nfor name in names:\n trace = best_traces[name]\n if trace['s3url']:\n print('%s: %s (energy=%s)' % (name, trace['s3url'].strip(), trace['energy_autoscorer']))\n os.system('curl -o tmp/%s.nbt %s' % (trace['problem_name'], trace['s3url'].strip()))\n else:\n print('%s: blob(trace_id=%d, energy=%s)' % (name, trace['trace_id'], trace['energy_autoscorer']))\n cursor.execute(\"SELECT body FROM tbltrace WHERE id=%s\", (trace['trace_id'],))\n blob = cursor.fetchone()['body']\n with open('tmp/' + trace['problem_name'] + '.nbt', 'bw') as f:\n f.write(blob)\n","sub_path":"scripts/create_second_best_submission.py","file_name":"create_second_best_submission.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"233152231","text":"import uuid\nimport threading\nfrom datetime import datetime\nfrom .execution import RunExecution\n\n\nclass RunBackend(object):\n def __init__(self, workspace):\n self.workspace = workspace\n self.db = workspace.db\n self.storage = workspace.storage\n\n def create_run(self, specification):\n run_id = str(uuid.uuid4())\n\n run = {\n \"run_id\": run_id,\n \"status\": \"created\",\n \"created\": datetime.utcnow(),\n \"specification\": specification,\n }\n\n self.db.create_run(run)\n\n run_execution = RunExecution(self.workspace, run_id)\n run_execution_thread = threading.Thread(\n target=run_execution.run, name=f\"RunExecution {run_id}\"\n )\n run_execution_thread.start()\n\n return run\n\n def terminate_run(self, run_id):\n run = self.db.get_run(run_id)\n\n if run[\"status\"] == \"terminated\" or run[\"status\"] == \"run finished\":\n return\n\n run[\"status\"] = \"terminated\"\n run[\"terminated\"] = datetime.now()\n self.db.update_run(run)\n\n def delete_run(self, run_id):\n self.terminate_run(run_id)\n self.db.delete_run(run_id)\n self.storage.delete_logs(run_id)\n self.storage.delete_code(run_id)\n\n def get_run(self, run_id):\n return self.db.get_run(run_id)\n\n def get_run_ids(self):\n return self.db.get_run_ids()\n\n def get_all_runs(self):\n return self.db.get_all_runs()\n","sub_path":"src/datalaunch_server/backend/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"439118152","text":"# coding=utf-8\nfrom django.http import HttpRequest, HttpResponse\nfrom django.shortcuts import render, redirect\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.contrib.auth import authenticate, login as l_in, logout as l_out\nfrom django.http import HttpResponseRedirect\nfrom django.http.response import Http404, JsonResponse\nfrom django.contrib.auth.decorators import login_required\nfrom django.views import View\nimport json\n\nfrom ask_app.forms import *\nfrom .models import *\n\n\nclass LoadView(View):\n def get(self, request):\n start = int(request.GET.get('start'))\n recent_questions = Question.objects.recent_questions()\n questions_for_send = []\n result = recent_questions[start:start + 4]\n for questn in result:\n questions_for_send.append(\n {\n 'text': questn.text,\n 'title': questn.title,\n 'author': questn.author.username,\n 'id': questn.id,\n 'avatar': questn.author.avatar.url,\n # 'tags': list(questn.tags),\n 'number_answers': questn.number_answers,\n 'likes': questn.likes\n }\n )\n return HttpResponse(json.dumps(questions_for_send), content_type='application/json')\n\n\nclass AddAnswerView(View):\n def post(self, request):\n try:\n text = str(request.POST.get('text'))\n userid = int(request.POST.get('user'))\n questionid = int(request.POST.get('question'))\n except:\n return JsonResponse(dict(error='bad data'))\n\n if text:\n new_answer = Answer(author=UserProfile.objects.get(id=userid),\n question=Question.objects.get(id=questionid),\n text=text)\n new_answer.save()\n answer_for_send = []\n answer_for_send.append(\n {\n 'text': new_answer.text,\n 'createdate': str(new_answer.create_date.year) + \".\" + str(new_answer.create_date.month) + \".\" +\n str(new_answer.create_date.day),\n 'id': new_answer.id\n # 'text': \"super text for answer\",\n # 'createdate': \"11.23.2017\",\n # 'id': \"3\"\n }\n )\n return HttpResponse(json.dumps(answer_for_send), content_type='application/json')\n else:\n return JsonResponse(dict(error='bad length of text'))\n\n\nclass IndexView(View):\n def get(self, request):\n context = {}\n context = _get_user_context(request, context)\n\n questions = Question.objects.recent_questions()\n questions_for_render = questions[0:20]\n context['objects'] = questions_for_render\n context['enable_modal_ask'] = True\n form = AskForm()\n\n context['form'] = form\n return render(request, 'index.html', context)\n\n def post(self, request):\n context = {}\n context = _get_user_context(request, context)\n\n questions = Question.objects.recent_questions()\n questions_for_render = questions[0:20]\n context['objects'] = questions_for_render\n context['enable_modal_ask'] = True\n form = AskForm(request.POST, UserProfile.objects.get(id=request.user.id))\n if form.is_valid():\n new_question = form.save()\n return redirect('question', new_question.id)\n\n context['form'] = form\n return render(request, 'index.html', context)\n\nclass TagView(View):\n def get(self, request, name):\n context = {}\n context = _get_user_context(request, context)\n questions = Question.objects.questions_by_tag(name)\n questions_for_render = questions[0:20]\n context['objects'] = questions_for_render\n # context['enable_modal_ask'] = True\n # if request.method == 'POST':\n # form = AskForm(request.POST, UserProfile.objects.get(id=request.user.id))\n # if form.is_valid():\n # new_question = form.save()\n # return redirect('question', new_question.id)\n # else:\n # form = AskForm()\n # context['form'] = form\n return render(request, 'tag.html', context)\n\nclass HotView(View):\n def get(self, request):\n context = {}\n context = _get_user_context(request, context)\n questions = Question.objects.questions_with_high_rating()\n questions_for_render = paginate(questions, request)\n context['objects'] = questions_for_render\n return render(request, 'index.html', context)\n\n\nclass QuestionView(View):\n def get(self, request, _id):\n context = {}\n context = _get_user_context(request, context)\n\n try:\n main_question = Question.objects.get_with_tags(_id)\n except Question.DoesNotExist:\n raise Http404()\n\n answers = Answer.objects.get_with_likes(_id)\n answers_for_render = paginate(answers, request)\n form = AnswerForm()\n\n context['form'] = form\n context['question'] = main_question\n context['answers'] = answers_for_render\n return render(request, 'question.html', context)\n\n def post(self, request, _id):\n context = {}\n context = _get_user_context(request, context)\n\n try:\n main_question = Question.objects.get_with_tags(_id)\n except Question.DoesNotExist:\n raise Http404()\n answers = Answer.objects.get_with_likes(_id)\n answers_for_render = paginate(answers, request)\n form = AnswerForm(request.POST, context['user'], main_question)\n if form.is_valid():\n form.save()\n return redirect('question', _id)\n\n context['form'] = form\n context['question'] = main_question\n context['answers'] = answers_for_render\n return render(request, 'question.html', context)\n\n\nclass AskView(View):\n def get(self, request):\n context = {}\n context = _get_user_context(request, context)\n form = AskForm()\n\n context['form'] = form\n return render(request, 'ask.html', context)\n\n def post(self, request):\n context = {}\n context = _get_user_context(request, context)\n form = AskForm(request.POST, UserProfile.objects.get(id=request.user.id))\n if form.is_valid():\n new_question = form.save()\n return redirect('question', new_question.id)\n context['form'] = form\n return render(request, 'ask.html', context)\n\n\ndef paginate(objects_list, request, page_objects_num=20):\n paginator = Paginator(objects_list, page_objects_num)\n page = request.GET.get('page')\n\n try:\n objects_page = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n objects_page = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n objects_page = paginator.page(paginator.num_pages)\n return objects_page\n\n\ndef _get_user_context(request, context):\n if request.user.is_authenticated():\n context['user_logged_in'] = True\n # context['user'] = UserProfile.objects.get_or_create(username=request.user.username)\n context['user'] = UserProfile.objects.get(username=request.user.username)\n else:\n context['user_logged_in'] = False\n\n context['enable_modal_ask'] = False\n return context\n\n\nclass LoginView(View):\n def get(self, request):\n if request.user.is_authenticated():\n context = {}\n context = _get_user_context(request, context)\n return render(request, 'index.html', context)\n\n form = LoginForm()\n return render(request, 'login.html', {\n 'form': form\n })\n\n def post(self, request):\n if request.user.is_authenticated():\n context = {}\n context = _get_user_context(request, context)\n return render(request, 'index.html', context)\n\n form = LoginForm(request.POST) # initialize the form with POST data\n if form.is_valid():\n username = form.cleaned_data[\"username\"]\n password = form.cleaned_data[\"password\"]\n user_auth = authenticate(username=username, password=password) # try auth\n if user_auth is not None: # if auth is success\n l_in(request, user_auth) # start session\n return HttpResponseRedirect(\"/success\")\n else: # else, auth gone wrong\n form.add_error(None, \"Username or password is incorrect\")\n return render(request, 'login.html', {\n 'form': form\n })\n\n\nclass RegistrationView(View):\n def get(self, request):\n if request.user.is_authenticated():\n context = {}\n context = _get_user_context(request, context)\n return render(request, 'index.html', context)\n\n register_form = RegisterForm()\n return render(request, 'registration.html', {\n 'form': register_form\n })\n\n def post(self, request):\n register_form = RegisterForm(request.POST, request.FILES)\n if register_form.is_valid():\n new_profile = register_form.save()\n l_in(request, new_profile)\n return HttpResponseRedirect(\"/success\")\n return render(request, 'registration.html', {\n 'form': register_form\n })\n\n\nclass SuccessView(View):\n def get(self, request):\n context = {}\n context = _get_user_context(request, context)\n if request.user.is_authenticated():\n context['success'] = True\n return redirect('/')\n else:\n return render(request, 'success.html', {\n 'success': False\n })\n\n\nclass LogoutView(View):\n def get(self, request):\n if request.user.is_authenticated():\n l_out(request)\n return HttpResponseRedirect('/')\n else:\n return HttpResponseRedirect('/')\n\n\nclass SettingsView(View):\n def get(self, request):\n user = request.user\n _profile = UserProfile.objects.filter(user_ptr_id=user.id).last()\n print(\"=====================\")\n print(user.id)\n\n init = {\"username\": _profile.username,\n \"email\": _profile.email,\n \"avatar\": _profile.avatar}\n form = ProfileForm(initial=init)\n context = {'form': form}\n context = _get_user_context(request, context)\n return render(request, 'settings.html', context)\n\n def post(self, request):\n user = request.user\n _profile = UserProfile.objects.filter(user_ptr_id=user.id).last()\n print(\"=====================\")\n print(user.id)\n\n form = ProfileForm(request.POST, request.FILES, _profile)\n if form.is_valid():\n _profile.username = form.cleaned_data[\"username\"]\n _profile.email = form.cleaned_data[\"email\"]\n if form.cleaned_data[\"avatar\"]:\n _profile.avatar = form.cleaned_data[\"avatar\"]\n _profile.save()\n context = {'form': form}\n context = _get_user_context(request, context)\n return render(request, 'settings.html', context)\n\n\nclass VoteView(View):\n def post(self, request):\n try:\n qid = int(request.POST.get('qid'))\n except:\n return JsonResponse(dict(error='bad question id'))\n\n _vote = request.POST.get('vote')\n question = Question.objects.get_with_tags(question_id=qid)\n likes = question.likes\n if _vote == \"inc\":\n likes += 1\n else:\n likes -= 1\n return JsonResponse(dict(ok=1, vote=_vote, likes=likes))\n\nclass AnswerView(View):\n def post(self, request):\n try:\n qid = int(request.POST.get('qid'))\n except:\n return JsonResponse(dict(error='bad question id'))\n\n _vote = request.POST.get('vote')\n question = Question.objects.get_with_tags(question_id=qid)\n likes = question.likes\n if _vote == \"inc\":\n likes += 1\n else:\n likes -= 1\n return JsonResponse(dict(ok=1, vote=_vote, likes=likes))","sub_path":"ask_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"183215961","text":"\nimport numpy as np\n\nfrom tensorflow_core.python.keras.datasets import imdb\nfrom tensorflow_core.python.keras.preprocessing import sequence\n\nmax_features = 10000\nmaxlen = 500\nbatch_size = 32\n\nprint('Loading data...')\n(input_train, y_train), (input_test, y_test) = imdb.load_data(num_words=max_features)\nprint(len(input_train), 'train sequences')\nprint(len(input_test), 'test sequences')\n\nprint('Pad sequences (samples x time)')\ninput_train = sequence.pad_sequences(input_train, maxlen=maxlen)\ninput_test = sequence.pad_sequences(input_test, maxlen=maxlen)\nprint('input_train shape:', input_train.shape)\nprint('input_test shape:', input_test.shape)\n# make the training data 80% and testing 20%\nx_train = np.concatenate((input_train, input_test[:15000]))\ninput_test = input_test[15000:]\ny_train = np.concatenate((y_train, y_test[:15000]))\ny_test = y_test[15000:]\n\n\nfrom tensorflow_core.python.keras import models\nfrom tensorflow_core.python.keras import layers\n\nembedding_size = 128\nmodel = models.Sequential()\nmodel.add(layers.Embedding(max_features, embedding_size, input_length=maxlen))\nmodel.add(layers.Bidirectional(layers.LSTM(128, return_sequences=True)))\nmodel.add(layers.Dropout(0.2))\nmodel.add(layers.Bidirectional(layers.LSTM(128)))\nmodel.add(layers.Dropout(0.2))\nmodel.add(layers.Dense(256, activation='relu'))\nmodel.add(layers.Dense(64, activation='relu'))\nmodel.add(layers.Dense(32, activation='relu'))\nmodel.add(layers.Dense(16, activation='relu'))\nmodel.add(layers.Dense(16, activation='relu'))\nmodel.add(layers.Dense(1, activation='sigmoid'))\n\n\nmodel.compile(optimizer='rmsprop',\n loss='binary_crossentropy',\n metrics=['acc'])\nhis = model.fit(x_train, y_train,\n epochs=4,\n batch_size=64,\n validation_split=0.025)\n\n\nresults = model.evaluate(input_test, y_test)\nprint(results)\n\nprint(\"Done!\")\n\n\n\n","sub_path":"multiple_LSTM.py","file_name":"multiple_LSTM.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"82190344","text":"__author__ = 'Jason Ragsdale'\n\nimport time\nimport logging\n\nfrom JOBS_LCD import JOBS_LCD\nfrom JOBS_Temperature import JOBS_Temperature\nfrom JOBS_Keypad import JOBS_Keypad\n\n\nclass JOBS():\n _version = \"0.01\"\n\n bus_number = -1\n address = 0x20\n\n def __init__(self):\n logging.info(\"Starting JOBSv\" + self._version)\n\n self.keypad = JOBS_Keypad()\n\n self.temperature = JOBS_Temperature()\n\n self.lcd = JOBS_LCD(self.address, self.bus_number)\n self.lcd.init()\n self.lcd.setCursor(0, 0)\n self.lcd.message(\" JOBS v\" + self._version)\n time.sleep(5)\n self.menu()\n\n def main(self):\n while True:\n self.lcd.setCursor(0, 1)\n self.lcd.message(time.strftime(\"%Y/%m/%d %H:%M:%S\"))\n temp_c, temp_f = self.temperature.read_temp()\n self.lcd.setCursor(0, 2)\n self.lcd.message(\"Temp C: \" + \"{0:.4g}\".format(temp_c))\n self.lcd.setCursor(0, 3)\n self.lcd.message(\"Temp F: \" + \"{0:.4g}\".format(temp_f))\n time.sleep(.5)\n\n def menu(self):\n self.lcd.clear()\n self.lcd.setCursor(0, 0)\n self.lcd.message(\"1.) Set Timer\")\n self.lcd.setCursor(0, 1)\n self.lcd.message(\"2.) Set Temperature\")\n self.lcd.setCursor(0, 2)\n self.lcd.message(\"4.) Start\")\n\n while True:\n if self.keypad.buttonPressed(1):\n self.setTimer()\n if self.keypad.buttonPressed(2):\n self.setTemperature()\n if self.keypad.buttonPressed(3):\n self.start()\n\n def setTimer(self):\n self.lcd.clear()\n self.lcd.setCursor(0, 0)\n self.lcd.message(\"Enter Time Duration:\")\n self.lcd.setCursor(0, 1)\n temp = None\n while True:\n key = self.keypad.getKey()\n if key is not None:\n temp = temp + key\n\n self.lcd.message(str(temp))\n\n def setTemperature(self):\n pass\n\n def start(self):\n pass\n\n\nif __name__ == '__main__':\n logging.basicConfig(filename='JOBS.log', format='%(asctime)s %(message)s', level=logging.INFO)\n jobs = JOBS()\n","sub_path":"python/JOBS/JOBS.py","file_name":"JOBS.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"395939058","text":"from django.conf.urls import url, include\nfrom . import views\nfrom django.urls import path\n\napp_name = 'home'\n\nurlpatterns = [\n path('quiz2', views.quiz, name = 'quiz'),\n path('scenario', views.scenario, name = 'scenario'),\n path('quiz', views.quiz2, name='quiz2'),\n path('thankyou', views.resultsPage, name='resultsPage'),\n path('moderator', views.moderator, name='moderator'),\n path('info', views.info, name='info'),\n path('feedback', views.feedback, name='feedback'),\n path('reattempt', views.reattempt, name='reattempt'),\n path('revisit', views.revisit, name='revisit'),\n path('preresults', views.preresults, name='preresults'),\n path('comeback/', views.comeback, name='comeback'),\n path('view_feedback', views.view_feedback, name='view_feedback'),\n path('', views.home, name='home')\n]\n","sub_path":"pls-website/home/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"208346539","text":"# %load q06_get_unique_matches_count/build.py\n# Default imports\nfrom greyatomlib.python_intermediate.q05_read_csv_data.build import read_ipl_data_csv\npath = 'data/ipl_matches_small.csv'\n\n# Enter Code Here\ndef get_unique_matches_count():\n data = read_ipl_data_csv(path, dtype=int)\n ipl_matches_array = int(len(set(data[:,0])))\n return ipl_matches_array\n\n","sub_path":"q06_get_unique_matches_count/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"89258259","text":"import pandas as pd\r\nimport numpy as np\r\nimport os\r\nfrom sklearn.ensemble import GradientBoostingClassifier\r\nfrom collections import defaultdict\r\n\r\n# 预测结果文件:src/step1/ground_truth/test_prediction.csv\r\nnum = []\r\ndef getPrediction():\r\n data_train = os.path.join('input', 'train.csv')\r\n data_test = os.path.join( 'input', 'test.csv')\r\n\r\n converters = defaultdict(int)\r\n ads = pd.read_csv(data_train, converters=converters)\r\n\r\n X = ads.drop('TARGET', axis=1).values\r\n y = ads[\"TARGET\"].values\r\n ads_test = pd.read_csv(data_test)\r\n X_TEST = ads_test.values\r\n y_ID = ads_test['ID'].values\r\n\r\n clf = GradientBoostingClassifier(learning_rate=0.01, n_estimators=600, max_features = 3, subsample = 0.9)\r\n clf.fit(X, y)\r\n y_predictor = clf.predict_proba(X_TEST)\r\n\r\n sub_name = os.path.join('input', 'test_prediction.csv')\r\n with open(sub_name, 'w') as file:\r\n file.write('ID,TARGET\\n')\r\n k = len(y_ID)\r\n for i in range(int(k)):\r\n line = str(y_ID[i]) + ',' + str(y_predictor[i][1])\r\n file.write(line + '\\n')\r\n\r\n\r\n\r\ngetPrediction()","sub_path":"manyidu.py","file_name":"manyidu.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"214564267","text":"import numpy as np \nimport matplotlib.pyplot as plt \nimport pandas as pd\n\ndata = []\nwith open('facebook.txt', 'r') as f:\n\tfor line in f:\n\t\tdata.append(line.replace('\\n', '').split(','))\n\nplt.xlabel('age')\nplt.ylabel('count')\nplt.title('facebook user age distribution')\ndel data[1][0]\nclasses = ['18-22', '23-27', '28+']\nbar1 = plt.bar(np.arange(0, 3, 1)-.2, [78,49,21], color='blue', label='yes', width=.3)\nbar2 = plt.bar(np.arange(0, 3, 1)+.1, [4,21,46], color='red', label='no', width=.3, align='center')\nplt.xticks(np.arange(0, 3, 1), ['18-22', '23-27', '28+'])\nplt.legend(loc='upper right', title='facebook user?')\nplt.show()\n\n# d = [1, 2, 3, 4, 5]\n# print(np.std(d, ddof=0))\n\n","sub_path":"facebook_graph_visualization/facebook.py","file_name":"facebook.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"125993316","text":"import types\nfrom controllers.base_controller import Controller\n\n\nclass SharedController(Controller):\n\n def _do_update(self, dt):\n observation = self.env.unwrapped.render_obs()\n if self.primary.enabled:\n return self.primary._do_update(observation)\n elif self.secondary.enabled:\n return self.secondary._do_update(observation)\n\n return None # consistence\n\n def __init__(self, env, primary, secondary, shared=[True, False]):\n Controller.__init__(self, env=env)\n self.primary = primary\n self.secondary = secondary\n self.shared = shared\n\n def configure(self):\n Controller.extend_capabilities(self, self.primary, {'share': self.share})\n\n self.primary.configure()\n self.secondary.configure()\n\n self.primary.enabled = self.shared[0]\n self.secondary.enabled = self.shared[1]\n\n # extended capability\n def share(self, _):\n self.primary.enabled = not self.primary.enabled\n self.secondary.enabled = not self.secondary.enabled\n print('primary: {}, secondary: {}'.format(self.primary.enabled, self.secondary.enabled))","sub_path":"controllers/shared_controller.py","file_name":"shared_controller.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"334097629","text":"class IntCode:\n def __init__(self, mem):\n self.mem = {i: e for i, e in enumerate(mem)}\n self.pc = 0\n self.running = False\n self.inp = []\n self.out = []\n self.rel = 0\n\n def start(self):\n self.running = True\n self.cont()\n \n def get_arg(self, i):\n mode = self.mem[self.pc] // (10 ** (i+1)) % 10\n out = self.mem[self.pc + i]\n if mode == 1:\n return out\n elif mode == 2:\n out += self.rel\n return self.mem.get(out, 0)\n \n def write_arg(self, i, value):\n mode = self.mem[self.pc] // (10 ** (i+1)) % 10\n pos = self.mem[self.pc + i]\n assert mode != 1\n if mode == 2:\n pos += self.rel\n self.mem[pos] = value\n\n def cont(self):\n while True:\n opcode = self.mem[self.pc] % 100\n \n if opcode == 1:\n self.write_arg(3, self.get_arg(1) + self.get_arg(2))\n self.pc += 4\n elif opcode == 2:\n self.write_arg(3, self.get_arg(1) * self.get_arg(2))\n self.pc += 4\n elif opcode == 3:\n if not self.inp:\n return\n self.write_arg(1, self.inp.pop(0))\n self.pc += 2\n elif opcode == 4:\n self.out.append(self.get_arg(1))\n self.pc += 2\n elif opcode == 5:\n if self.get_arg(1):\n self.pc = self.get_arg(2)\n else:\n self.pc += 3\n elif opcode == 6:\n if not self.get_arg(1):\n self.pc = self.get_arg(2)\n else:\n self.pc += 3\n elif opcode == 7:\n self.write_arg(3, int(self.get_arg(1) < self.get_arg(2)))\n self.pc += 4\n elif opcode == 8:\n self.write_arg(3, int(self.get_arg(1) == self.get_arg(2)))\n self.pc += 4\n elif opcode == 9:\n self.rel += self.get_arg(1)\n self.pc += 2\n elif opcode == 99:\n self.running = False\n return\n\n*mem, = map(int, open(\"input.txt\").read().split(\",\"))\nintcode = IntCode(mem)\nintcode.start()\no = {}\nx = y = 0\ndx = 0\ndy = -1\nwhile intcode.running:\n intcode.inp.append(o.get((x, y), 0))\n intcode.cont()\n o[(x, y)] = intcode.out.pop(0)\n if intcode.out.pop(0):\n dx, dy = -dy, dx\n else:\n dx, dy = dy, -dx\n x += dx\n y += dy\nprint(len(o))\n","sub_path":"2019/Day11/puzzle1.py","file_name":"puzzle1.py","file_ext":"py","file_size_in_byte":2575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"178213342","text":"#“United States”と”U.S.”のコサイン類似度を計算せよ.\n\nfrom gensim.models import KeyedVectors\n\nmodel = KeyedVectors.load_word2vec_format(\n \"GoogleNews-vectors-negative300.bin\", binary=True\n)\nprint(model.similarity('United_States','U.S.'))\n\n#0.73107743","sub_path":"Mana/chapter07/knock61.py","file_name":"knock61.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"139790325","text":"# -*- coding: utf-8 -*-\n\n# dcf\n# ---\n# A Python library for generating discounted cashflows.\n# \n# Author: sonntagsgesicht, based on a fork of Deutsche Postbank [pbrisk]\n# Version: 0.3, copyright Wednesday, 18 September 2019\n# Website: https://github.com/sonntagsgesicht/dcf\n# License: Apache License 2.0 (see LICENSE file)\n\n\nfrom .interpolation import constant, linear\nfrom .interpolationscheme import dyn_scheme\nfrom .compounding import continuous_compounding, continuous_rate\n\n\ndef act_36525(start, end):\n if hasattr(start, 'diff_in_days'):\n # duck typing businessdate.BusinessDate.diff_in_days\n d = start.diff_in_days(end)\n else:\n d = end - start\n if hasattr(d, 'days'):\n # assume datetime.date or finance.BusinessDate (else days as float)\n d = d.days\n return float(d) / 365.25\n\n\nclass Curve(object):\n _interpolation = dyn_scheme(constant, linear, constant)\n\n def __init__(self, domain=(), data=(), interpolation=None):\n r\"\"\"\n Curve object to build function\n\n :param list(float) domain: source values\n :param list(float) data: target values\n :param function interpolation: interpolation function on x_list (optional), default is taken from class member _interpolation\n\n Curve object to build function :math:`f:R \\rightarrow R, x \\mapsto y`\n from finite point vectors :math:`x` and :math:`y`\n using piecewise various interpolation functions.\n \"\"\"\n # sort data by domain values\n if not len(domain) == len(data):\n raise ValueError('%s requires equal length input for domain and data' % self.__class__.__name__)\n\n if domain:\n domain, data = map(list,zip(*sorted(zip(*(domain, data)))))\n\n if interpolation is None:\n interpolation = self.__class__._interpolation\n\n self._scheme = interpolation\n self._func = interpolation(domain, data)\n self._domain = domain\n\n @property\n def interpolation(self):\n return self._scheme\n\n @property\n def domain(self):\n return self._domain\n\n def __call__(self, x):\n if isinstance(x, (tuple, list)):\n return [self(xx) for xx in x]\n return self._func(x)\n\n def __add__(self, other):\n x_list = sorted(set(self.domain + other.domain))\n y_list = [self(x) + other(x) for x in x_list]\n return self.__class__(x_list, y_list, self.interpolation)\n\n def __sub__(self, other):\n x_list = sorted(set(self.domain + other.domain))\n y_list = [self(x) - other(x) for x in x_list]\n return self.__class__(x_list, y_list, self.interpolation)\n\n def __mul__(self, other):\n x_list = sorted(set(self.domain + other.domain))\n y_list = [self(x) * other(x) for x in x_list]\n return self.__class__(x_list, y_list, self.interpolation)\n\n def __truediv__(self, other):\n return self.__div__(other)\n\n def __div__(self, other):\n x_list = sorted(set(self.domain + other.domain))\n if any(not other(x) for x in x_list):\n raise ZeroDivisionError(\"Division with %s requires on zero values.\" % other.__class__.__name__)\n y_list = [self(x) / other(x) for x in x_list]\n return self.__class__(x_list, y_list, self.interpolation)\n\n def __str__(self):\n return str([z for z in zip(self.domain, self(self.domain))])\n\n def __repr__(self):\n return self.__class__.__name__ + '(' + self.__str__() + ')'\n\n def shifted(self, delta=0.0):\n if delta:\n x_list = [x + delta for x in self.domain]\n else:\n x_list = self.domain\n y_list = self(self.domain)\n return self.__class__(x_list, y_list, self.interpolation)\n\n\nclass DateCurve(Curve):\n\n @staticmethod\n def _default_day_count(start, end):\n if hasattr(start, 'diff_in_days'):\n # duck typing businessdate.BusinessDate.diff_in_days\n d = start.diff_in_days(end)\n else:\n d = end - start\n if hasattr(d, 'days'):\n # assume datetime.date or finance.BusinessDate (else days as float)\n d = d.days\n return float(d) / 365.25\n\n _time_shift = '1d'\n\n def __init__(self, domain=(), data=(), interpolation=None, origin=None, day_count=None):\n self._origin = domain[0] if origin is None and domain else origin\n self._day_count = self._default_day_count if day_count is None else day_count\n flt_domain = [self._day_count(self._origin, x) for x in domain]\n super(DateCurve, self).__init__(flt_domain, data, interpolation)\n self._domain = domain\n\n @property\n def domain(self):\n \"\"\" domain of curve as list of dates where curve values are given \"\"\"\n return self._domain\n\n @property\n def origin(self):\n \"\"\" date of origin (date zero) \"\"\"\n return self._origin\n\n def __call__(self, x):\n if isinstance(x, (list, tuple)):\n return [self(xx) for xx in x]\n return super(DateCurve, self).__call__(self.day_count(self.origin, x))\n\n def __add__(self, other):\n new = super(DateCurve, self).__add__(other.shifted(self.origin - other.origin))\n self.__class__(new.domain, new(new.domain), new.interpolation, self.origin, self._day_count)\n return new\n\n def __sub__(self, other):\n new = super(DateCurve, self).__sub__(other.shifted(self.origin - other.origin))\n self.__class__(new.domain, new(new.domain), new.interpolation, self.origin, self._day_count)\n return new\n\n def __mul__(self, other):\n new = super(DateCurve, self).__mul__(other.shifted(self.origin - other.origin))\n self.__class__(new.domain, new(new.domain), new.interpolation, self.origin, self._day_count)\n return new\n\n def __div__(self, other):\n new = super(DateCurve, self).__div__(other.shifted(self.origin - other.origin))\n new.origin = self.origin\n return new\n\n def day_count(self, start, end):\n return self._day_count(start, end)\n\n def to_curve(self, origin=None):\n origin = self.origin if origin is None else origin\n x_list = [self.day_count(origin, x) for x in self.domain]\n y_list = self(self.domain)\n return Curve(x_list, y_list, self.interpolation)\n\n def integrate(self, start, stop):\n \"\"\" integrates curve and returns results as annualized rates \"\"\"\n # try use result, error = scipy.integrate(self, start, stop)\n try:\n from scipy.integrate import quad\n #raise ImportError()\n s = self.day_count(self.origin, start)\n e = self.day_count(self.origin, stop)\n f = super(DateCurve, self).__call__\n value, error = quad(f, s, e)\n except ImportError:\n value = 0.0\n step = self.__class__._time_shift\n current = start\n while current + step < stop:\n value += self(current) * self.day_count(current, current + step)\n current += step\n value += self(current) * self.day_count(current, stop)\n result = value / self.day_count(start, stop)\n return result\n\n def derivative(self, start):\n # todo use scipy.misc.derivative(self, start, self.__class__._time_shift)\n try:\n from scipy.misc import derivative\n s = self.day_count(self.origin, start)\n dx = self.day_count(start, start + self.__class__._time_shift)\n f = super(DateCurve, self).__call__\n result = derivative(f, s, dx)\n except ImportError:\n stop = start + self.__class__._time_shift\n value = self(stop) - self(start)\n result = value / self.day_count(start, stop)\n return result\n\n\nclass RateCurve(DateCurve):\n _time_shift = '1D'\n _forward_tenor = '3M'\n\n @staticmethod\n def get_storage_type(curve, x):\n raise NotImplementedError\n\n def cast(self, cast_type, **kwargs):\n new = cast_type(kwargs.get('domain', self.domain),\n [cast_type.get_storage_type(self, x) for x in kwargs.get('domain', self.domain)],\n kwargs.get('interpolation', None),\n kwargs.get('origin', self.origin),\n kwargs.get('day_count', self.day_count),\n kwargs.get('forward_tenor', self.forward_tenor))\n return new\n\n def __init__(self, domain=(), data=(), interpolation=None, origin=None, day_count=None, forward_tenor=None):\n super(RateCurve, self).__init__(domain, data, interpolation, origin, day_count)\n self.forward_tenor = self.__class__._forward_tenor if forward_tenor is None else forward_tenor\n\n def __add__(self, other):\n casted = other.cast(self.__class__)\n new = super(RateCurve, self).__add__(casted)\n new.forward_tenor = self.forward_tenor\n return new\n\n def __sub__(self, other):\n casted = other.cast(self.__class__)\n new = super(RateCurve, self).__sub__(casted)\n new.forward_tenor = self.forward_tenor\n return new\n\n def __mul__(self, other):\n casted = other.cast(self.__class__)\n new = super(RateCurve, self).__mul__(casted)\n new.forward_tenor = self.forward_tenor\n return new\n\n def __div__(self, other):\n casted = other.cast(self.__class__)\n new = super(RateCurve, self).__div__(casted)\n new.forward_tenor = self.forward_tenor\n return new\n\n def _get_compounding_factor(self, start, stop):\n if start == stop:\n return 1.\n ir = self._get_compounding_rate(start, stop)\n t = self.day_count(start, stop)\n return continuous_compounding(ir, t)\n\n def _get_compounding_rate(self, start, stop):\n if start == stop:\n return self._get_compounding_rate(start, start + self.__class__._time_shift)\n df = self._get_compounding_factor(start, stop)\n t = self.day_count(start, stop)\n return continuous_rate(df, t)\n","sub_path":"dcf/curve.py","file_name":"curve.py","file_ext":"py","file_size_in_byte":10079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"613451493","text":"#Script for performing topic modelling on patchnotes\n#Author: Tim Jonathan Rupp\n#DISC applied project\n\n#import needed modules\nimport nltk\nnltk.download('stopwords')\nfrom nltk.corpus import stopwords\n\nimport re\n\nimport pandas as pd\n\nfrom pprint import pprint\n\nimport spacy\n\nimport gensim\nimport gensim.corpora as corpora\nfrom gensim.utils import simple_preprocess\nfrom gensim.models import CoherenceModel\n\nimport pyLDAvis.gensim_models\n\nimport matplotlib.pyplot as plt\n\nimport sqlite3 as sql\n\n#define stop words\nstop_words = stopwords.words('english')\n\n#connect to SQLite database\nconn = sql.connect('app_reviews.sqlite')\n\n#read reviews from database\nreviews = pd.read_sql('SELECT content FROM reviews;', conn)\n\n#convert to list\nreviews = reviews.values.tolist()\n\n#convert contents to string\nreviews = [str(i) for i in reviews]\n\n#save as all texts\nall_texts = reviews\n\n#define function for removing emojis\ndef remove_emojis(data):\n emoj = re.compile(\"[\"\n u\"\\U0001F600-\\U0001F64F\"\n u\"\\U0001F300-\\U0001F5FF\" \n u\"\\U0001F680-\\U0001F6FF\" \n u\"\\U0001F1E0-\\U0001F1FF\" \n u\"\\U00002500-\\U00002BEF\" \n u\"\\U00002702-\\U000027B0\"\n u\"\\U00002702-\\U000027B0\"\n u\"\\U000024C2-\\U0001F251\"\n u\"\\U0001f926-\\U0001f937\"\n u\"\\U00010000-\\U0010ffff\"\n u\"\\u2640-\\u2642\"\n u\"\\u2600-\\u2B55\"\n u\"\\u200d\"\n u\"\\u23cf\"\n u\"\\u23e9\"\n u\"\\u231a\"\n u\"\\ufe0f\" \n u\"\\u3030\"\n \"]+\", re.UNICODE)\n return re.sub(emoj, '', data)\n\n#remove emojis and save patch notes in text_list\ntext_list = []\nfor text in all_texts:\n text_list.append(remove_emojis(text))\n\n#remove line breaks\ntext_list = [re.sub('\\\\s+', ' ', sent) for sent in text_list]\n\n#define function for tokenisation, normalisation and removal of punctuation\ndef sent_to_words(sentences):\n for sentence in sentences:\n yield gensim.utils.simple_preprocess(str(sentence), deacc=True)\n\n#use on text_list\ndata_words = list(sent_to_words(text_list))\n\nprint(data_words[:4])\n\n#define bigrams (have to occur min 5 times)\nbigram = gensim.models.Phrases(data_words, min_count=5, threshold=100)\nbigram_mod = gensim.models.phrases.Phraser(bigram)\n\n#define function to remove stopwords\ndef remove_stopwords(texts):\n return [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts]\n\n#function to get bigrams\ndef make_bigrams(texts):\n return [bigram_mod[doc] for doc in texts]\n\n#function for lemmatisation and POS-tagging\n#keep only nouns, adjectives, verbs and adverbs\ndef lemmatization(texts, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):\n texts_out = []\n for sent in texts:\n doc = nlp(\" \".join(sent))\n texts_out.append([token.lemma_ for token in doc if token.pos_ in allowed_postags])\n return texts_out\n\n#remove stopwords\ndata_words_nostops = remove_stopwords(data_words)\n\n#calculate bigrams\ndata_words_bigrams = make_bigrams(data_words_nostops)\n\n#define lemmatiser and POS-tagger\nnlp = spacy.load('en_core_web_sm', disable=['parser', 'ner'])\n\n#lemmatise and tag words\ndata_lemmatized = lemmatization(data_words_bigrams, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV'])\n\nprint(data_lemmatized[:1])\n\n#create dictionary\nid2word = corpora.Dictionary(data_lemmatized)\n\n#create corpus\ntexts = data_lemmatized\n\n#TF-IDF (term frequency and inverse document frequency\ncorpus = [id2word.doc2bow(text) for text in texts]\n\nprint(corpus[:1])\n\n#build model\nlda_model = gensim.models.ldamodel.LdaModel(corpus=corpus,\n id2word=id2word,\n num_topics=20,\n random_state=100,\n update_every=1,\n chunksize=100,\n passes=10,\n alpha='auto',\n per_word_topics=True)\n#print topics\npprint(lda_model.print_topics())\n\n#save model\ndoc_lda = lda_model[corpus]\n\n#Print perplexity\nprint('\\nPerplexity: ', lda_model.log_perplexity(corpus))\n\n#Print coherence\ncoherence_model_lda = CoherenceModel(model=lda_model, texts=data_lemmatized, dictionary=id2word, coherence='u_mass')\ncoherence_lda = coherence_model_lda.get_coherence()\nprint('\\nCoherence: ', coherence_lda)\n\nvis = pyLDAvis.gensim_models.prepare(lda_model, corpus, id2word)\npyLDAvis.save_html(vis, 'lda_review.html')\n\n\n#function to compute multiple LDAs with varying topic numbers\n#coherence type is u_mass\n#returns a list of models and corresponding coherence values\ndef compute_coherence_values(dictionary, corpus, texts, limit, start=2, step=3):\n coherence_values = []\n model_list = []\n for num_topics in range(start, limit, step):\n model = gensim.models.ldamodel.LdaModel(corpus=corpus, num_topics=num_topics, id2word=id2word)\n model_list.append(model)\n coherencemodel = CoherenceModel(model=model, texts=texts, dictionary=dictionary, coherence='u_mass')\n coherence_values.append(coherencemodel.get_coherence())\n\n return model_list, coherence_values\n\n#call function\nmodel_list, coherence_values = compute_coherence_values(dictionary=id2word, corpus=corpus, texts=data_lemmatized,\n start=2, limit=40, step=4)\n\n#plot distribution of coherence values for different numbers of topics\nlimit = 40;\nstart = 2;\nstep = 4;\nx = range(start, limit, step)\nplt.plot(x, coherence_values)\nplt.xlabel(\"Num Topics\")\nplt.ylabel(\"Coherence score\")\nplt.legend((\"coherence_values\"), loc='best')\nplt.show()\n\nfor m, um in zip(x, coherence_values):\n print(\"Num Topics =\", m, \" has Coherence Value of\", round(um, 4))\n\n#choose optimal model depending on coherence\noptimal_model = model_list[1]\n\n#show topics of optimal model\nmodel_topics = optimal_model.show_topics(formatted=False)\npprint(optimal_model.print_topics(num_words=10))\n\n#visualize and save as html\nvis = pyLDAvis.gensim_models.prepare(optimal_model, corpus, id2word)\npyLDAvis.save_html(vis, 'lda_reviews3.html')\n\n\n#save topwords of topics to sql\ntopics = pd.DataFrame(optimal_model.print_topics(num_words = 10))\n\ntopics.to_sql(\"LDA_reviews_topwords3\", conn)\n\n#get values for each review corresponding to each topic\nall_topics = optimal_model.get_document_topics(corpus, minimum_probability=0.0)\nall_topics_csr = gensim.matutils.corpus2csc(all_topics)\nall_topics_numpy = all_topics_csr.T.toarray()\nall_topics_df = pd.DataFrame(all_topics_numpy)\n\n#save values in sql\nall_topics_df.to_sql(\"LDA_reviews3\", conn)\n","sub_path":"14_LDA_reviews.py","file_name":"14_LDA_reviews.py","file_ext":"py","file_size_in_byte":6885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"413346900","text":"\"\"\"\nTwo words are anagrams if you can rearrange the letters from one to spell the other.\nWrite a function called is_anagram that takes two strings and returns True if they are anagrams.\n\"\"\"\ndef is_anagram(s1,s2):\n\tif len(s1)!=len(s2):\n\t\treturn False\n\tt1=list(s1)\n\tt2=list(s2)\n\tt1.sort()\n\tt2.sort()\n\tif(t1==t2):\n\t\treturn True\n\telse:\n\t\treturn False\nprint(\"eastern\",\"nearest\",is_anagram(\"eastern\",\"nearest\"))\n","sub_path":"c10/e6.py","file_name":"e6.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"149116192","text":"import numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport tensorflow as tf\nimport math\nimport csv\nimport torch\nimport torch.nn as nn\nfrom sklearn import metrics\n# Input data files are available in the \"../input/\" directory.\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n\nimport os\n\n# Any results you write to the current directory are saved as output.\ndef write_result(name, predictions):\n \"\"\"\n \"\"\"\n if predictions is None:\n raise Exception('need predictions')\n\n predictions = predictions.flatten()\n\n if not os.path.exists('./results'):\n os.makedirs('./results')\n\n path = os.path.join('./results', name)\n\n with open(path, 'wt', encoding='utf-8', newline='') as csv_target_file:\n target_writer = csv.writer(csv_target_file, lineterminator='\\n')\n\n header = [\n 'user_id',\n 'time_slot_0', 'time_slot_1', 'time_slot_2', 'time_slot_3',\n 'time_slot_4', 'time_slot_5', 'time_slot_6', 'time_slot_7',\n 'time_slot_8', 'time_slot_9', 'time_slot_10', 'time_slot_11',\n 'time_slot_12', 'time_slot_13', 'time_slot_14', 'time_slot_15',\n 'time_slot_16', 'time_slot_17', 'time_slot_18', 'time_slot_19',\n 'time_slot_20', 'time_slot_21', 'time_slot_22', 'time_slot_23',\n 'time_slot_24', 'time_slot_25', 'time_slot_26', 'time_slot_27',\n ]\n\n target_writer.writerow(header)\n\n for i in range(0, len(predictions), 28):\n # NOTE: 57159 is the offset of user ids\n userid = [57159 + i // 28]\n labels = predictions[i:i+28].tolist()\n\n target_writer.writerow(userid + labels)\n\n# NOTE: load the data from the npz\ndataset = np.load('./datasets/v0_eigens.npz')\n\n# NOTE: read features of test set\ntest_eigens = dataset['issue_eigens'][:, :-28].reshape(-1, 32, 28).astype(float)\n\n# Hyperparameters\nsequence_length = 32\ninput_size = 28\nhidden_size1 = 128\nfc_hidden_size = [28, 128, 128, 128, 28]\nnum_layers = 2\nnum_classes = 28\nbatch_size = 64\nnum_epochs = 60\nlearning_rate = 0.0001\n\nclass RNN(nn.Module):\n def __init__(self, input_size, hidden_size1, num_layers, num_classes):\n super(RNN, self).__init__()\n self.hidden_size1 = hidden_size1\n self.num_layers = num_layers\n self.fc1 = nn.Linear(input_size, fc_hidden_size[0])\n self.fc2 = nn.Linear(fc_hidden_size[0], fc_hidden_size[1])\n self.fc3 = nn.Linear(fc_hidden_size[1], fc_hidden_size[2])\n self.lstm1 = nn.LSTM(fc_hidden_size[2], hidden_size1, num_layers, batch_first = True, dropout = 0.5)\n self.fc4 = nn.Linear(hidden_size1, fc_hidden_size[3])\n self.fc5 = nn.Linear(fc_hidden_size[3], fc_hidden_size[4])\n self.fc6 = nn.Linear(fc_hidden_size[4], num_classes)\n self.drop = nn.Dropout(p=0.3)\n self.sigmoid = nn.Sigmoid()\n \n def forward(self, x):\n \n out = self.fc1(x)\n out = self.fc2(out)\n out = self.fc3(out)\n \n # Set initial hidden and cell states \n h0 = torch.zeros(self.num_layers, out.size(0), self.hidden_size1).float()\n c0 = torch.zeros(self.num_layers, out.size(0), self.hidden_size1).float()\n \n # Forward propagate LSTM\n out, _ = self.lstm1(out, (h0, c0)) # out: tensor of shape (batch_size, seq_length, hidden_size)\n\n # Decode the hidden state of the last time step\n out = self.fc4(out[:, -1, :])\n out = self.fc5(out)\n out = self.fc6(out)\n \n out = self.sigmoid(out)\n \n return out\n \nmodel = RNN(input_size, hidden_size1, num_layers, num_classes)\nmodel.load_state_dict(torch.load('model_lstm_81_0.8799_0.2104.ckpt'))\n\nwith torch.no_grad():\n test_eigens = torch.tensor(test_eigens).float()\n outputs = model.forward(test_eigens)\n one = torch.ones(len(test_eigens), 28)\n zero = torch.zeros(len(test_eigens), 28)\n\n result2 = torch.where(outputs > 0.1, one, zero)\n result3 = torch.where(outputs > 0.09, one, zero)\n write_result('best_submission.csv', outputs.detach().numpy())","sub_path":"lstm_predict.py","file_name":"lstm_predict.py","file_ext":"py","file_size_in_byte":4174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"238494","text":"#coding=gbk\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport jieba\nfrom wordcloud import WordCloud,ImageColorGenerator\nfrom PIL import Image\n#打开歌词并进行分词\nwith open(r\"C:\\Users\\john\\Desktop\\jay.txt\",\"r\",encoding=\"utf-8\") as f:\n text=f.read()\ncut_text=jieba.cut(text)\nresult=\" \".join(cut_text)\n#选用背景图片\nimage=np.array(Image.open(r\"C:\\Users\\john\\Desktop\\jay.jpg\"))\n#设置参数\nwc=WordCloud(font_path=r\"C:\\Windows\\Fonts\\STZHONGS.TTF\",\n background_color=\"white\",\n width=500,\n height=350,\n max_font_size=50,\n min_font_size=10,\n mask=image\n )\nwc.generate(result)\n#设置背景颜色随图片颜色改变\nimage_colors=ImageColorGenerator(image)\nplt.show(wc.recolor(color_func=image_colors))\n#展示图片\nplt.imshow(wc)\nplt.axis(\"off\")\nplt.show()\n#保存图片\nwc.to_file(r\"C:\\Users\\john\\Desktop\\jay1.png\")\n\n\n","sub_path":"worldColoud.py","file_name":"worldColoud.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"85612502","text":"import pytest\nfrom src.auth import auth_register_v2\nfrom src.error import AccessError, InputError\nfrom src.dm import dm_details_v1, dm_create_v1\nimport jwt\nfrom src.other import clear_v1\n\n@pytest.fixture\ndef num_members():\n return 5\n\n@pytest.fixture\ndef users(num_members):\n\n u_ids = []\n tokens = []\n for i in range(num_members):\n email = f\"test{i}email@gmail.com\"\n password = f\"TestTest{i}\"\n firstname = f\"firstname{i}\"\n lastname = f\"lastname{i}\"\n user = auth_register_v2(email,password,firstname, lastname)\n u_ids.append(user['auth_user_id'])\n tokens.append(user['token'])\n return {'tokens' : tokens, 'u_ids': u_ids}\n\n \n@pytest.fixture\ndef clear():\n clear_v1()\n\ndef test_invalid_token(clear):\n with pytest.raises(AccessError):\n dm_details_v1(jwt.encode({'test' : 'token'}, 'testSecret', algorithm='HS256'), 5)\n\ndef test_user_not_in_dm(clear, users):\n dm = dm_create_v1(users['tokens'][1], users['u_ids'][2:])\n with pytest.raises(AccessError):\n dm_details_v1(users['tokens'][0], dm['dm_id'])\n\ndef test_invalid_dm_id(clear, users):\n with pytest.raises(InputError):\n dm_details_v1(users['tokens'][0], 'test_dm_id')\n\ndef test_user_in_dm(clear, users, num_members):\n dm = dm_create_v1(users['tokens'][0], users['u_ids'][1:])\n details = dm_details_v1(users['tokens'][1], dm['dm_id'])\n assert len(details) == 2\n assert len(details['members']) == num_members\n\ndef test_valid_dict_keys(clear, users):\n dm = dm_create_v1(users['tokens'][0], users['u_ids'])\n details = dm_details_v1(users['tokens'][1], dm['dm_id'])\n assert 'names' and 'members' in details \n assert 'user_id' in details['members'][0] \n assert 'email' in details['members'][0] \n assert 'name_first' in details['members'][0] \n assert 'name_last' in details['members'][0]\n assert 'handle_str' in details['members'][0] \n\n","sub_path":"tests/dm_details_v1_test.py","file_name":"dm_details_v1_test.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"364285465","text":"# matrix\ndef is_p(s):\n n = len(s)\n if n == 1:\n return True\n m = n / 2\n for i in xrange(m):\n if s[i] != s[n-1-i]:\n return False\n return True\n\nclass Solution(object):\n # it work\n def partition_r(self, s):\n \"\"\"\n recursive \n :type s: str\n :rtype: List[List[str]]\n \"\"\"\n def p(s):\n n = len(s)\n if n == 0:\n return [[]]\n if n == 1:\n return [[s]]\n cur_l = []\n for i in xrange(0, n):\n if is_p(s[i:n]):\n pre_list = p(s[0:i])\n cur_l += [l+[s[i:n]] for l in pre_list]\n return cur_l\n return p(s)\n\n def partition(self, s):\n \"\"\"\n :type s: str\n :rtype: List[List[str]]\n \"\"\"\n n = len(s)\n if n == 0:\n return [[]]\n if n == 1:\n return [[s]]\n dp = [[] for i in xrange(n)]\n # dp[0] = [[s[0]]]\n # dp[0].p()\n for i in xrange(0,n):\n for j in xrange(0,i):\n if is_p(s[i-j:i+1]):\n dp[i] += [l+[s[i-j:i+1]] for l in dp[i-1-j]]\n\n if is_p(s[0:i+1]):\n dp[i] += [s[0:i+1]],\n # dp[i].p()\n return dp[n-1]\n\n\n\n # not work \n def partition_1(self, s):\n \"\"\"\n :type s: str\n :rtype: List[List[str]]\n \"\"\"\n pre_list = []\n results = []\n n = len(s) \n for i in xrange(1,n):\n pre_s = s[0:i]\n # pre_s.p()\n if len(pre_list) > 0:\n pre_list = [l+[s[i-1]] for l in pre_list]\n # pre_list.p()\n if is_p(pre_s):\n pre_list += [pre_s],\n # pre_list.p()\n left_s = s[i:n]\n # left_s.p()\n if is_p(left_s):\n results += [l+[left_s] for l in pre_list]\n # results.p()\n if is_p(s):\n results += [[s]]\n return results\n\nif __name__ == '__main__':\n from minitest import *\n\n with test(Solution):\n Solution().partition(\"aa\").must_equal([[\"a\",\"a\"],[\"aa\"]])\n Solution().partition(\"abbab\").must_equal(\n [[\"a\",\"b\",\"b\",\"a\",\"b\"],[\"a\",\"b\",\"bab\"],[\"a\",\"bb\",\"a\",\"b\"],[\"abba\",\"b\"]])\n Solution().partition(\"aaba\").must_equal(\n [['a', 'aba'], ['a', 'a', 'b', 'a'], ['aa', 'b', 'a']])\n\n \n ","sub_path":"python/leetcode/dynamic_programming/131_Palindrome_Partitioning.py","file_name":"131_Palindrome_Partitioning.py","file_ext":"py","file_size_in_byte":2513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"492884638","text":"# Copyright (c) 2016, Daniele Venzano\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"The Discovery API endpoint.\"\"\"\n\nfrom tornado.web import RequestHandler\n\nfrom zoe_api.api_endpoint import APIEndpoint # pylint: disable=unused-import\nfrom zoe_api.rest_api.utils import catch_exceptions, manage_cors_headers\n\n\nclass DiscoveryAPI(RequestHandler):\n \"\"\"The Discovery API endpoint.\"\"\"\n\n def initialize(self, **kwargs):\n \"\"\"Initializes the request handler.\"\"\"\n self.api_endpoint = kwargs['api_endpoint'] # type: APIEndpoint\n\n def set_default_headers(self):\n \"\"\"Set up the headers for enabling CORS.\"\"\"\n manage_cors_headers(self)\n\n def options(self):\n \"\"\"Needed for CORS.\"\"\"\n self.set_status(204)\n self.finish()\n\n @catch_exceptions\n def get(self, execution_id: int, service_group: str):\n \"\"\"HTTP GET method.\"\"\"\n self.api_endpoint.execution_by_id(0, 'admin', execution_id)\n if service_group != 'all':\n services = self.api_endpoint.service_list(0, 'admin', service_group=service_group, execution_id=execution_id)\n else:\n services = self.api_endpoint.service_list(0, 'admin', execution_id=execution_id)\n ret = {\n 'service_type': service_group,\n 'execution_id': execution_id,\n 'dns_names': [s.dns_name for s in services]\n }\n\n self.write(ret)\n\n def data_received(self, chunk):\n \"\"\"Not implemented as we do not use stream uploads\"\"\"\n pass\n","sub_path":"zoe_api/rest_api/discovery.py","file_name":"discovery.py","file_ext":"py","file_size_in_byte":2020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"650207609","text":"#!/usr/bin/python3\n\"\"\" puzzle queen challenge\n\"\"\"\nfrom sys import argv, exit\n\n\ndef place(N, row, col, result):\n \"\"\" place queens recursively \"\"\"\n while col < N:\n if isvalid(row, col, result):\n result.append([row, col])\n if row == N-1:\n print(result)\n result.pop()\n else:\n place(N, row+1, 0, result)\n col += 1\n if len(result) > 0:\n result.pop()\n return\n\n\ndef isvalid(row, col, result):\n \"\"\" check if the position is valid \"\"\"\n diag1 = [l[0]+l[1] for l in result]\n diag2 = [l[1]-l[0] for l in result]\n cols = [l[1] for l in result]\n rows = [l[0] for l in result]\n if row in rows or col in cols or row+col in diag1 or col-row in diag2:\n return False\n return True\n\nif __name__ == \"__main__\":\n length = len(argv)\n if length != 2:\n print(\"Usage: nqueens N\")\n exit(1)\n if argv[1].isdigit() is False:\n print(\"N must be a number\")\n exit(1)\n N = int(argv[1])\n if N < 4:\n print(\"N must be at least 4\")\n exit(1)\n result = []\n place(N, 0, 0, result)\n","sub_path":"0x08-python-more_classes/101-nqueens.py","file_name":"101-nqueens.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"209645770","text":"# coding = utf-8\n\n'''\n@author = super_fazai\n@File : fib迭代器.py\n@Time : 2017/8/4 09:02\n@connect : superonesfazai@gmail.com\n'''\n\nclass FibIterator(object):\n \"\"\"(Fibonacci)斐波那契数列迭代器\"\"\"\n def __init__(self, n):\n self.n = n # 指明生成数列的前n个数\n self.current = 0 # current⽤来保存当前⽣成到数列中的第⼏个数了\n self.num1 = 0 # num1⽤来保存前前⼀个数, 初始值为数列中的第⼀个数0\n self.num2 = 1 # num2⽤来保存前⼀个数, 初始值为数列中的第⼆个数1\n\n def __next__(self): # 即num1, num2两个一起往后移\n '''被next()函数调⽤来获取下⼀个数'''\n if self.current < self.n:\n num = self.num1\n # 计算下一次迭代要返回的数据\n self.num1, self.num2 = self.num2, self.num1+self.num2\n self.current += 1\n return num\n else:\n raise StopIteration\n\n def __iter__(self):\n '''迭代器的__iter__返回⾃身即可'''\n return self\n\nif __name__ == '__main__':\n fib = FibIterator(20)\n for num in fib:\n print(num, end=' ')\n\n print('')\n # 除了for循环能接收可迭代对象, list, tuple等也能接收\n li = list(FibIterator(20))\n print(li)\n tp = tuple(FibIterator(20))\n print(tp)","sub_path":"python/my_py_notes_万物皆对象/算法/fib迭代器.py","file_name":"fib迭代器.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"336636836","text":"#!/usr/bin/python\n\nimport argparse, csv\n\nfrom classes import modify_otu\n\ndef main():\n\tparser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\tparser.add_argument('-i','--input-table-fp',help='file that is tabtab', required=True)\n\tparser.add_argument('-o','--output-fp',help='output base file path',required=True)\n\tparser.add_argument('-c',help='class to split on',required=True)\n\targs = parser.parse_args()\n\tholder = modify_otu.OtuTable(args.input_table_fp, args.c, args.output_fp)\n\tholder.write_all()\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"split_on_state.py","file_name":"split_on_state.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"598656211","text":"import random\nfrom re import escape\nfrom ..userinterface import UserInterface\nfrom ..commandmode import publics\nfrom .. import commands\n\n# All keys that can be entered by the user simulator\n# Esc is included more often, to keep the insertions relatively small\nkey_space = list(\n \"\"\"\n 1234567890qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM\n `-=[]\\\\;',./~_+{}|:\"<>?\n !@#$%&*()\n \\n\\t\\b\n \"\"\"\n) + 30 * ['Cancel'] + ['Esc', 'Up', 'Down', 'Left', 'Right']\n\ncommand_dict = publics(commands)\nforbidden_command_names = ['open_document', 'quit_document', 'force_quit', 'quit_all',\n 'formattext']\nforbidden_commands = [command_dict[name] for name in forbidden_command_names]\nfor name in forbidden_command_names:\n command_dict.pop(name)\ncommand_names = list(command_dict.keys())\ncommand_values = list(command_dict.values())\n# Sorting is needed to be able to reproduce a seeded random test case\ncommand_names.sort()\n\ncompound_input_space = command_values + key_space\n\n\nclass RandomizedUserSimulator(UserInterface):\n\n \"\"\"UserInterface which simulates some random behaviour for testing purposes.\"\"\"\n\n def __init__(self, document):\n UserInterface.__init__(self, document)\n self.nextkey = None\n self.offset = (0, 0)\n\n @property\n def viewport_size(self):\n \"\"\"Get viewport size.\"\"\"\n return (500, 500)\n\n @property\n def viewport_offset(self):\n \"\"\"Get and set viewport offset.\"\"\"\n return self.offset\n\n @viewport_offset.setter\n def viewport_offset(self, value):\n \"\"\"Get and set viewport offset.\"\"\"\n self.offset = value\n\n def quit(self, document):\n assert document is self.document\n\n def _getuserinput(self):\n if self.nextkey:\n nextkey = self.nextkey\n self.nextkey = None\n else:\n nextkey = self.newinput()\n return nextkey\n\n def peekinput(self):\n if not self.nextkey:\n self.nextkey = self.newinput()\n return self.nextkey\n\n def newinput(self):\n if not self.document.mode:\n command_name = random.choice(command_names)\n return command_dict[command_name]\n\n # If we are in a certain mode we try to construct a meaningful input space\n mode = self.document.mode\n input_space = ['Cancel']\n input_space.extend(\n [c for c in mode.allowedcommands if not c in forbidden_commands])\n\n if mode.keymap:\n input_space.extend(mode.keymap.values())\n else:\n input_space.extend(key_space)\n\n if not input_space:\n input_space = compound_input_space\n\n #print('Inputspace = ' + str(input_space))\n return random.choice(input_space)\n\n def prompt(self, prompt_string='>'):\n length = random.randint(1, 10)\n # Generate random string\n randomstring = ''.join(self.getkey() for _ in range(length))\n # Escape string to ensure a valid regex\n return escape(randomstring)\n\n def notify(self, message):\n pass\n\n def activate(self):\n pass\n\n def touch(self):\n pass\n","sub_path":"fate/test/randomized_userinterface.py","file_name":"randomized_userinterface.py","file_ext":"py","file_size_in_byte":3140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"627825286","text":"\"\"\"\nGraphQL definitions for the Authentication App\n\"\"\"\nimport datetime\nfrom django.db.models import Q\nfrom django.contrib.auth.models import User\nfrom graphene import AbstractType, Argument, Field, Float, Int, List, Mutation, \\\n NonNull, ObjectType, String, relay\nfrom graphene_django import DjangoObjectType\nfrom graphene_django.filter import DjangoFilterConnectionField\nfrom trading.models import TradingAccount\nfrom trading.graphql import GTradingAccount\nfrom stocks.graphql import GInvestmentBucket, GStock\nfrom stocks.models import InvestmentBucket, Stock\nfrom .models import Profile, UserBank\n\n\n# pylint: disable=too-few-public-methods\nclass GUser(DjangoObjectType):\n \"\"\"\n GraphQL representation of a User\n \"\"\"\n class Meta(object):\n \"\"\"\n Meta Model for User. We must make sure to not expose\n the whole usere object\n \"\"\"\n model = User\n only_fields = ('id', 'profile', 'username', 'userbank')\n interfaces = (relay.Node, )\n\n\nclass GProfile(DjangoObjectType):\n \"\"\"\n GraphQL representation of a Profile\n \"\"\"\n stock_find = List(\n GStock, args={'text': Argument(NonNull(String)), 'first': Argument(Int)})\n invest_suggestions = DjangoFilterConnectionField(\n GInvestmentBucket,\n )\n\n class Meta(object):\n \"\"\"\n Meta Model for Profile\n \"\"\"\n model = Profile\n only_fields = ('id', 'trading_accounts', 'stock_find')\n interfaces = (relay.Node, )\n\n @staticmethod\n def resolve_stock_find(_self, args, _context, _info):\n \"\"\"\n Finds a stock given a case insensitive name\n \"\"\"\n query = Stock.objects.filter(name__icontains=args['text'])\n if 'first' in args:\n query = query[:args['first']]\n return query\n\n @staticmethod\n def resolve_invest_suggestions(_data, _args, context, _info):\n \"\"\"\n Finds all the investment suggestions available to the user\n \"\"\"\n return InvestmentBucket.objects.filter(Q(owner=context.user.profile) | Q(public=True))\n\n\nclass DataPoint(object):\n \"\"\"\n Dummy class to represent a date / value DataPoint\n \"\"\"\n def __init__(self, date, value):\n self.date = date\n self.value = value\n\n\nclass GDataPoint(ObjectType):\n \"\"\"\n GraphQL definition of the DataPoint above\n \"\"\"\n date = String()\n value = Float()\n\n\nclass GUserBank(DjangoObjectType):\n \"\"\"\n GraphQL representation of a UserBank\n \"\"\"\n balance = Float()\n income = Float()\n name = String()\n outcome = Float()\n history = List(GDataPoint, args={'start': Argument(NonNull(String))})\n\n class Meta(object):\n \"\"\"\n Meta Model for UserBank\n \"\"\"\n model = UserBank\n only_fields = ('id', 'balance', 'income', 'outcome')\n interfaces = (relay.Node, )\n\n @staticmethod\n def resolve_history(data, args, context, _info):\n \"\"\"\n Builds the financial history for the user\n \"\"\"\n start = args['start']\n end = datetime.datetime.now().strftime(\"%Y-%m-%d\")\n response = context.plaid.Transactions.get(\n data.access_token,\n start_date=start,\n end_date=end\n )\n transactions = response['transactions']\n value = GUserBank.resolve_balance(data, {}, context, None)\n value_list = [DataPoint(end, value)]\n for transaction in transactions:\n value = value - transaction['amount']\n if not value_list[-1].date == transaction['date']:\n value_list.append(DataPoint(transaction['date'], value))\n return value_list\n\n @staticmethod\n def resolve_balance(data, _args, context, _info):\n \"\"\"\n Finds the current balance of the user\n \"\"\"\n balances = context.plaid.Accounts.balance.get(data.access_token)['accounts']\n extracted_balances = [((b['balances']['available']\n if b['balances']['available'] is not None else\n b['balances']['current']) *\n (1\n if b['subtype'] == 'credit card' else -1))\n for b in balances]\n balance = sum(extracted_balances)\n return float(balance)\n\n @staticmethod\n def resolve_name(data, _args, context, _info):\n \"\"\"\n Returns the name of the bank account\n \"\"\"\n name = context.plaid.Accounts.get(data.access_token)['accounts'][0]['name']\n return name\n\n @staticmethod\n def resolve_income(data, _args, context, _info):\n \"\"\"\n Calculates the income a user has per month\n \"\"\"\n start = (datetime.datetime.now() - datetime.timedelta(days=30)).strftime(\"%Y-%m-%d\")\n end = datetime.datetime.now().strftime(\"%Y-%m-%d\")\n response = context.plaid.Transactions.get(\n data.access_token,\n start_date=start,\n end_date=end,\n )\n transactions = response['transactions']\n plus = sum(filter(lambda x: x > 0, [tx['amount'] for tx in transactions]))\n return float(plus)\n\n @staticmethod\n def resolve_outcome(data, _args, context, _info):\n \"\"\"\n Calculates the expenses a user has\n \"\"\"\n start = (datetime.datetime.now() - datetime.timedelta(days=30)).strftime(\"%Y-%m-%d\")\n end = datetime.datetime.now().strftime(\"%Y-%m-%d\")\n response = context.plaid.Transactions.get(\n data.access_token,\n start_date=start,\n end_date=end,\n )\n transactions = response['transactions']\n plus = sum(filter(lambda x: x < 0, [tx['amount'] for tx in transactions]))\n return float(plus)\n\n\n# pylint: disable=no-init\nclass Query(AbstractType):\n \"\"\"\n Query represents the entry method for a GraphQL request\n \"\"\"\n viewer = Field(GUser, )\n\n @staticmethod\n def resolve_viewer(_self, _args, context, _info):\n \"\"\"\n The viewer represents the current logged in user\n \"\"\"\n if not context.user.is_authenticated():\n return None\n return context.user\n# pylint: enable=no-init\n\n\nclass AddTradingAccount(Mutation):\n \"\"\"\n AddTradingAccount creates a new TradingAccount for the user\n \"\"\"\n class Input(object):\n \"\"\"\n Input to create a trading account. Right now it's only a name.\n \"\"\"\n name = String()\n account = Field(lambda: GTradingAccount)\n\n @staticmethod\n def mutate(_self, args, context, _info):\n \"\"\"\n Creates a TradingAccount and saves it to the DB\n \"\"\"\n account = TradingAccount(\n profile=context.user.profile,\n account_name=args['name']\n )\n account.save()\n return AddTradingAccount(account=account)\n# pylint: enable=too-few-public-methods\n","sub_path":"authentication/graphql.py","file_name":"graphql.py","file_ext":"py","file_size_in_byte":6857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"341562206","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 6 12:25:37 2019\n\n@author: yohei\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport csv\n\np = np.zeros((401, 201))\nC = []\nwith open('data3.csv', 'r') as f:\n reader = csv.reader(f)\n for row in reader:\n if row != []:\n c = row[0].split(' ')[2]\n C.append(float(c))\n for i in range(1, 401):\n for j in range(1, 201):\n p[i-1][j-1] += C[201*(i-1)+j-2]\n\nX = np.linspace(-10.0, 30.0, 401)\nY = np.linspace(-10.0, 10.0, 201)\nprint(p)\n#print(p)\n#print(len(X)*len(Y))\nk = 0\nfor i in range(len(p)):\n for j in range(len(p[i])):\n if p[i][j] < -0:\n k+=1\nprint(k)\n \nplt.pcolormesh(X, Y, p.T, cmap='jet')\nplt.colorbar()\nplt.xlabel('x')\nplt.ylabel('y')\ncont = plt.contour(X, Y, p.T)\nplt.title('Tstep=10000, time=200[s]')\nplt.savefig('object_3.png')","sub_path":"src/visualization3.py","file_name":"visualization3.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"635793993","text":"import numpy as np\n\n\ndef softmax(x):\n \"\"\"batch ok\"\"\"\n exps = np.exp(x)\n return exps / np.sum(exps, axis=1, keepdims=True)\n\n\ndef cross_en(p, y):\n d = y.shape[0]\n return -np.log(p[range(d), y]).mean()\n\n\ndef delta_cross_entropy(x, y):\n \"\"\"batch ok\"\"\"\n m = y.shape[0]\n grad = softmax(x)\n grad[range(m), y] -= 1\n grad = grad / m\n return grad\n\n\ndef one_layer(x, y, w, b, lr):\n loss = 1\n while loss > 1e-1:\n fc_out = x.dot(w) + b\n # print(\"fc out\", fc_out)\n sm_out = softmax(fc_out)\n # print(\"sm out\", sm_out)\n loss = cross_en(sm_out, y)\n print(\"loss: \", loss)\n\n # grad_wrt_fc_out.shape ( n_sample, n_classes )\n grad_wrt_fc_out = delta_cross_entropy(fc_out, y)\n # dw.shape ( x_dim, fc_out_dim )\n dw = x.T.dot(grad_wrt_fc_out)\n db = grad_wrt_fc_out.sum(axis=0)\n w -= lr * dw\n b -= lr * db\n\n\nif __name__ == \"__main__\":\n x_dim = 50\n fc_out_dim = 100\n n_sample = 3\n n_classes = 3\n learning_rate = 0.1\n\n x_train = np.random.random((n_sample, x_dim))\n y_train = np.random.randint(n_classes, size=n_sample)\n w_init = np.random.randn(x_dim, fc_out_dim) * np.sqrt(2.0 / fc_out_dim)\n b_init = np.zeros(fc_out_dim)\n one_layer(x_train, y_train, w_init, b_init, learning_rate)\n","sub_path":"002_one_layer_backprops.py","file_name":"002_one_layer_backprops.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"599181545","text":"# USAGE\n# python long_exposure.py --video videos/river_02.mov --output river_02.png --time 5\n#new --> python3 longExposure.py --fileName file --time 5\n\n# import the necessary packages\nimport argparse\nimport imutils\nimport time\nimport cv2\n\n# construct the argument parse and parse the arguments\nap = argparse.ArgumentParser()\n#ap.add_argument(\"-v\", \"--video\", required=True,\n#\thelp=\"path to input video file\")\n#ap.add_argument(\"-o\", \"--output\", required=True,\n#\thelp=\"path to output 'long exposure'\")\nap.add_argument(\"-f\", \"--fileName\", required=True,\n\thelp=\"filename\")\nap.add_argument(\"-t\", \"--time\",required = True,\n\thelp=\"time to record video'\")\nargs = vars(ap.parse_args())\n\n\n# Create a VideoCapture object\ncap = cv2.VideoCapture(0)\n\n# Check if camera opened successfully\nif (cap.isOpened() == False): \n print(\"Unable to read camera feed\")\n\n# Default resolutions of the frame are obtained.The default resolutions are system dependent.\n# We convert the resolutions from float to integer.\nframe_width = int(cap.get(3))\nframe_height = int(cap.get(4))\n\n# Define the codec and create VideoWriter object.The output is stored in 'outpy.avi' file.\nout = cv2.VideoWriter(\"/home/pi/Videos/\"+args[\"fileName\"]+\".mp4\",cv2.VideoWriter_fourcc('M','J','P','G'), 10, (frame_width,frame_height))\nstartTime = time.time()\nwhile(int(time.time())-startTime < int(args[\"time\"])):\n ret, frame = cap.read()\n\n if ret == True: \n \n # Write the frame into the file 'output.avi'\n #frame = cv2.flip(frame, flipCode = -1)\n out.write(frame)\n\n # Display the resulting frame \n cv2.imshow('frame',frame)\n\n # Press Q on keyboard to stop recording\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n # Break the loop\n else:\n break \n\n# When everything done, release the video capture and video write objects\ncap.release()\nout.release()\n\n# Closes all the frames\ncv2.destroyAllWindows() \ntime.sleep(5)\n\n# initialize the Red, Green, and Blue channel averages, along with\n# the total number of frames read from the file\n(rAvg, gAvg, bAvg) = (None, None, None)\ntotal = 0\n\n# open a pointer to the video file\nprint(\"[INFO] opening video file pointer...\")\nstream = cv2.VideoCapture(\"/home/pi/Videos/\"+args[\"fileName\"]+\".mp4\")\nprint(\"[INFO] computing frame averages (this will take awhile)...\")\n\n# loop over frames from the video file stream\nwhile True:\n\t# grab the frame from the file stream\n\t(grabbed, frame) = stream.read()\n\n\t# if the frame was not grabbed, then we have reached the end of\n\t# the sfile\n\tif not grabbed:\n\t\tbreak\n\n\t# otherwise, split the frmae into its respective channels\n\t(B, G, R) = cv2.split(frame.astype(\"float\"))\n\n\t# if the frame averages are None, initialize them\n\tif rAvg is None:\n\t\trAvg = R\n\t\tbAvg = B\n\t\tgAvg = G\n\n\t# otherwise, compute the weighted average between the history of\n\t# frames and the current frames\n\telse:\n\t\trAvg = ((total * rAvg) + (1 * R)) / (total + 1.0)\n\t\tgAvg = ((total * gAvg) + (1 * G)) / (total + 1.0)\n\t\tbAvg = ((total * bAvg) + (1 * B)) / (total + 1.0)\n\n\t# increment the total number of frames read thus far\n\ttotal += 1\n\n# merge the RGB averages together and write the output image to disk\navg = cv2.merge([bAvg, gAvg, rAvg]).astype(\"uint8\")\ncv2.imwrite(\"/home/pi/Pictures/\"+args[\"fileName\"]+\".png\", avg)\n\n# do a bit of cleanup on the file pointer\nstream.release()\n","sub_path":"long_exposure/rec_longExposure.py","file_name":"rec_longExposure.py","file_ext":"py","file_size_in_byte":3307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"333082005","text":"#\n# Decription: This program will analize the upper and lower case content of\n# a given string.\n#\n\n\n\nsampleString = (\"We the People of the United States, in Order+ to form a more perfect Union,\"\n\" establish Justice, insure domestic Tranquility, provide for the common defence, promote the general Welfare,\"\n\" and secure the Blessings of Liberty to ourselves and our Posterity, do ordain and establish this Constitution\"\n\" for the United States of America.\")\n\n\n# 1) Start by counting the number of upper, lower, and other characters in the provided string. Print your results.\n\n# 2) Next, invert the case of all of the text in the sample string. Print the resulting string.\n\n# 3) Place all vowels in one list. \n\n# 4) Place all consonant in another list. \n\n# 5) Find the decimal value of each character and place all characters that are multiples of 3 in another list.\n\nlowers = 0\nuppers = 0\nnot_letters = 0\n\nvowel_list = []\nconsonant_list = []\n\nnew_string = \"\"\n\nfor char in sampleString:\n if char.islower():\n new_string += char.upper()\n lowers += 1\n elif char.isupper():\n new_string += char.lower()\n uppers += 1\n else:\n new_string += char\n not_letters += 1\n\n if char.lower() in \"aeiouy\":\n vowel_list.append(char)\n elif char.isalpha():\n consonant_list.append(char)\n \n \n\nprint(lowers, uppers, not_letters)\nprint()\nprint(new_string)\nprint()\nprint(vowel_list)\nprint()\nprint(consonant_list)\nprint()\nprint([ord(c) for c in sampleString if ord(c)%3==0])\n\n\n\n\n\n\n \n","sub_path":"example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"484096644","text":"from Tkinter import *\n\ndef load_digits(filename):\n \"\"\"\n \"\"\"\n\n f = open(filename, 'r')\n\n # We'll return a list of digit vectors and class vectors\n digits = []\n classes = []\n\n # Loop through \n \n while f.readline(): # Just the instance name - unused\n # The next 14 lines indicate a new digit\n # Flatten this to a 196 element vector\n digit = [] \n for i in range(14):\n line = f.readline()\n digit = digit + [float(data) for data in line.split()]\n digits.append(digit)\n\n # The last line is the classes\n line = f.readline()\n digit_class = [int(data) for data in line.split()]\n classes.append(digit_class)\n\n # All done!\n f.close()\n\n return digits, classes\n\n \ndef show_digit(digit):\n \"\"\"\n Create a window and show the digit\n \"\"\"\n\n # Create a window for the digit. The digit is 14x14, so create a window \n # which is 150x150. We'll leave a border of 5 pixels, and each digit\n # \"pixel\" will be 10x10\n\n master = Tk()\n\n canvas = Canvas(master, width=150, height=150)\n canvas.pack()\n\n # Draw a rectange for each pixel in the digit\n for i in range(14):\n y = 10*i + 5\n for j in range(14):\n x = 10*j + 5\n \n\n # Determine the hex value of this pixel color\n pixel_value = digit[14*i + j]\n pixel_hex = hex(int(pixel_value*255)).replace('0x','')\n pixel_hex = '#' + pixel_hex + pixel_hex + pixel_hex\n \n # Draw the rectangle\n canvas.create_rectangle(x, y, x+10, y+10, fill=pixel_hex)\n\n # Done!\n return canvas\n\n\ndef digit_gray_to_binary(digit, threshold = 0.5):\n \"\"\"\n \"\"\"\n\n # Set the digit to 1 if greater than the threshold, 0 otherwise\n return [1.0 if pixel >= threshold else 0.0 for pixel in digit]\n","sub_path":"CRBM/digits.py","file_name":"digits.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"343351930","text":"from flask import Flask, jsonify, abort, make_response, request\nfrom flask.ext.httpauth import HTTPBasicAuth\napp = Flask(__name__)\nauth = HTTPBasicAuth()\n\nusers = [\n\t{\n\t\t\"nome\": \"Lucas\",\n\t\t\"id\": 1,\n\t\t\"idade\": 15,\n\t\t\"email\": \"example@example.com\"\n\t},\n\t{\n\t\t\"nome\": \"Dim\",\n\t\t\"id\": 2,\n\t\t\"idade\": 16,\n\t\t\"email\": \"example2@example.com\"\n\t}\n]\n@app.route('/user/', methods=['GET'])\n@auth.login_required\ndef get_user(id_usuario):\n\tuser = [user for user in users if user[\"id\"] == id_usuario]\n\tif len(user) == 0:\n\t\tabort(404)\n\treturn jsonify({\"user\":user[0]})\n@app.errorhandler(404)\ndef not_found(error):\n\treturn make_response(jsonify({'Error': 'Not found'}), 404)\n\n@app.route('/user', methods=['POST'])\n@auth.login_required\ndef create_user():\n\tuser = [\n\t\t{\n\t\t\t\"nome\": request.json.get(\"nome\", \"\"),\n\t\t\t\"id\": users[-1]['id'] + 1,\n\t\t\t\"idade\": request.json.get(\"idade\"),\n\t\t\t\"email\": request.json.get(\"email\", \"\")\n\t\t}\n\t]\n\t\n\tusers.append(user)\n\treturn jsonify({\"user\": user}), 201\n\t\n@app.route('/remover/user/', methods=['DELETE'])\n@auth.login_required\ndef delete_user(id_usuario):\n\tuser = [user for user in users if user[\"id\"] == id_usuario]\n\tif len(user) == 0:\n\t\tabort(404)\n\tusers.remove(user[0])\n\treturn jsonify({\"result\": True})\n\n@app.route('/atualizar/user/', methods=['PUT'])\n@auth.login_required\ndef update_user(id_usuario):\n\n\tuser = [user for user in users if user[\"id\"] == id_usuario]\n \n\tif (len(user) == 0):\n\t\tabort(404)\n\t\n\tif (not request.json):\n\t\tabort(400)\n \n\t\n\t\n\tuser[0]['nome'] = request.json.get('nome', user[0]['nome'])\n\tuser[0]['email'] = request.json.get('email', user[0]['email'])\n\tuser[0]['idade'] = request.json.get('idade', user[0]['idade'])\n\t\n\treturn jsonify({'user': user[0]})\n\t\n@auth.get_password\ndef get_password(username):\n if username == 'Example':\n return '12345'\n return None\n\n@auth.error_handler\ndef unauthorized():\n return make_response(jsonify({'error': 'Unauthorized access'}), 401)\n\t\nif __name__ == '__main__':\n\tapp.run(debug=True)\n","sub_path":"cadastro2.py","file_name":"cadastro2.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"199767257","text":"# Copyright (c) Facebook, Inc. and its affiliates. (http://www.facebook.com)\n# -*- coding: utf-8 -*-\n\n\"\"\"\nmapillary.models.geojson\n~~~~~~~~~~~~~~~~~~~~~~~~\n\nThis module contains the class implementation for the geojson\n\nFor more information about the API, please check out\nhttps://www.mapillary.com/developer/api-documentation/.\n\n:copyright: (c) 2021 Facebook\n:license: MIT LICENSE\n\"\"\"\n\n# Package\nimport json\n\n# Local\n\n# # Exceptions\nfrom models.exceptions import InvalidOptionError\n\n\nclass Properties:\n \"\"\"Representation for the properties in a GeoJSON\n\n :param properties: The properties as the input\n :type properties: dict\n\n '''\n :raise InvalidOptionError: Raised when the geojson passed is the invalid type - not a dict\n '''\n\n :return: A class representation of the model\n :rtype: \n \"\"\"\n\n def __init__(self, *properties, **kwargs) -> None:\n \"\"\"Initializing Properties constructor\"\"\"\n\n # Validate that the geojson passed is indeed a dictionary\n if not isinstance(properties, dict):\n\n # Raise InvalidOptionError\n InvalidOptionError(\n # The parameter that caused the exception\n param=\"Properties.__init__.properties\",\n # The invalid value passed\n value=properties,\n # The keys that should be passed instead\n options=[\"dict\"],\n )\n\n for item in properties:\n for key in item:\n setattr(self, key, item[key])\n for key in kwargs:\n setattr(self, key, kwargs[key])\n\n def to_dict(self):\n \"\"\"Return the dictionary representation of the Properties\"\"\"\n\n attr_representation = [\n key for key in dir(self) if not key.startswith(\"__\") and key != \"to_dict\"\n ]\n\n return {key: getattr(self, key) for key in attr_representation}\n\n def __str__(self):\n \"\"\"Return the informal string representation of the Properties\"\"\"\n\n attr_representation = [\n key for key in dir(self) if not key.startswith(\"__\") and key != \"to_dict\"\n ]\n\n attr_key_value_pair = {key: getattr(self, key) for key in attr_representation}\n\n return f\"{attr_key_value_pair}\"\n\n def __repr__(self):\n \"\"\"Return the formal string representation of the Properties\"\"\"\n\n attr_representation = [\n key for key in dir(self) if not key.startswith(\"__\") and key != \"to_dict\"\n ]\n\n attr_key_value_pair = {key: getattr(self, key) for key in attr_representation}\n\n return f\"{attr_key_value_pair}\"\n\n\nclass Geometry:\n \"\"\"Representation for the geometry in a GeoJSON\n\n :param geometry: The geometry as the input\n :type geometry: dict\n\n '''\n :raise InvalidOptionError: Raised when the geometry passed is the invalid type - not a dict\n '''\n\n :return: A class representation of the model\n :rtype: \n \"\"\"\n\n def __init__(self, geometry) -> None:\n \"\"\"Initializing Geometry constructor\"\"\"\n\n # Validate that the geojson passed is indeed a dictionary\n if not isinstance(geometry, dict):\n\n # Raise InvalidOptionError\n InvalidOptionError(\n # The parameter that caused the exception\n param=\"Geometry.__init__.geometry\",\n # The invalid value passed\n value=geometry,\n # The keys that should be passed instead\n options=[\"dict\"],\n )\n\n # Setting the type of the selected geometry\n self.type: str = geometry[\"type\"]\n\n # Setting the coordinates of the geometry\n self.coordinates: list = geometry[\"coordinates\"]\n\n def to_dict(self):\n \"\"\"Return dictionary representation of the geometry\"\"\"\n\n return {\"type\": self.type, \"coordinates\": self.coordinates}\n\n def __str__(self):\n \"\"\"Return the informal string representation of the Geometry\"\"\"\n\n return f\"{{'type': {self.type}, 'coordinates': {self.coordinates}}}\"\n\n def __repr__(self):\n \"\"\"Return the formal string representation of the Geometry\"\"\"\n\n return f\"{{'type': {self.type}, 'coordinates': {self.coordinates}}}\"\n\n\nclass Feature:\n \"\"\"Representation for a feature in a feature list\n\n :param geojson: The GeoJSON as the input\n :type geojson: dict\n\n '''\n :raise InvalidOptionError: Raised when the geojson passed is the invalid type - not a dict\n '''\n\n :return: A class representation of the model\n :rtype: \n \"\"\"\n\n def __init__(self, feature: dict) -> None:\n \"\"\"Initializing Feature constructor\"\"\"\n\n # Validate that the geojson passed is indeed a dictionary\n if not isinstance(feature, dict):\n\n # If not, raise `InvalidOptionError`\n InvalidOptionError(\n # The parameter that caused the exception\n param=\"Feature.__init__.feature\",\n # The invalid value passed\n value=feature,\n # The type of value that should be passed instead\n options=[\"dict\"],\n )\n\n # Setting the type of the selected FeatureList\n self.type = \"Feature\"\n\n # Setting the `geometry` property\n self.geometry = Geometry(feature[\"geometry\"])\n\n # Setting the `properties` property\n self.properties = Properties(feature[\"properties\"])\n\n def to_dict(self) -> dict:\n \"\"\"Return the dictionary representation of the Feature\"\"\"\n\n return {\n \"type\": self.type,\n \"geometry\": self.geometry.to_dict(),\n \"properties\": self.properties.to_dict(),\n }\n\n def __str__(self) -> str:\n \"\"\"Return the informal string representation of the Feature\"\"\"\n\n return (\n f\"{{\"\n f\"'type': '{self.type}', \"\n f\"'geometry': {self.geometry}, \"\n f\"'properties': {self.properties}\"\n f\"}}\"\n )\n\n def __repr__(self) -> str:\n \"\"\"Return the formal string representation of the Feature\"\"\"\n\n return (\n f\"{{\"\n f\"'type': {self.type}, \"\n f\"'geometry': {self.geometry}, \"\n f\"'properties': {self.properties}\"\n f\"}}\"\n )\n\n\nclass GeoJSON:\n \"\"\"Representation for a geojson\n\n :param geojson: The GeoJSON as the input\n :type geojson: dict\n\n '''\n :raise InvalidOptionError: Raised when the geojson passed is the invalid type - not a dict\n '''\n\n :return: A class representation of the model\n :rtype: \n\n Usage::\n >>> import mapillary as mly\n >>> from models.geojson import GeoJSON\n >>> mly.set_access_token('MLY|XXX')\n >>> data = mly.get_image_close_to(longitude=31, latitude=31)\n >>> geojson = GeoJSON(geojson=data)\n >>> type(geojson)\n ... \n >>> type(geojson.type)\n ... \n >>> type(geojson.features)\n ... \n >>> type(geojson.features[0])\n ... \n >>> type(geojson.features[0].type)\n ... \n >>> type(geojson.features[0].geometry)\n ... \n >>> type(geojson.features[0].geometry.type)\n ... \n >>> type(geojson.features[0].geometry.coordinates)\n ... \n >>> type(geojson.features[0].properties)\n ... \n >>> type(geojson.features[0].properties.captured_at)\n ... \n >>> type(geojson.features[0].properties.is_pano)\n ... \n \"\"\"\n\n def __init__(self, geojson: dict) -> None:\n \"\"\"Initializing GeoJSON constructor\"\"\"\n\n # Validate that the geojson passed is indeed a dictionary\n if isinstance(geojson, dict):\n\n # The GeoJSON should only contain the keys of `type`, `features`, if not empty,\n # raise exception\n if [key for key in geojson.keys() if key not in [\"type\", \"features\"]] != []:\n\n # Raise InvalidOptionError\n InvalidOptionError(\n # The parameter that caused the exception\n param=\"GeoJSON.__init__.geojson\",\n # The invalid value passed\n value=geojson,\n # The keys that should be passed instead\n options=[\"type\", \"features\"],\n )\n\n # If the GeoJSON is not of type dictionary\n else:\n\n # Raise InvalidOptionError\n InvalidOptionError(\n # The parameter that caused the exception\n param=\"GeoJSON.__init__.geojson\",\n # The invalid value passed\n value=geojson,\n # The keys that should be passed instead\n options=[\"type\", \"features\"],\n )\n\n # Validate that the geojson passed is indeed a dictionary\n if not isinstance(geojson[\"features\"], list):\n\n # If not, raise InvalidOptionError\n InvalidOptionError(\n # The parameter that caused the exception\n param=\"FeatureList.__init__.geojson['features']\",\n # The invalid value passed\n value=geojson[\"features\"],\n # The type of the value that should be passed\n options=[\"list\"],\n )\n\n # Setting the type parameter\n self.type: str = geojson[\"type\"]\n\n # Setting the list of features\n self.features: list = (\n [Feature(feature=feature) for feature in geojson[\"features\"]]\n if (geojson[\"features\"] != []) or (geojson[\"features\"] is not None)\n else []\n )\n\n def append_features(self, features: list) -> None:\n\n for feature in features:\n self.append_feature(feature)\n\n def append_feature(self, feature_inputs: dict) -> None:\n\n feature = Feature(feature=feature_inputs)\n\n if feature not in self.features:\n self.features.append(feature)\n\n def encode(self):\n\n return json.dumps(self.__dict__)\n\n def to_dict(self):\n \"\"\"Return the dict format representation of the GeoJSON\"\"\"\n\n return {\n \"type\": self.type,\n \"features\": [feature.to_dict() for feature in self.features]\n if self.features != []\n else [],\n }\n\n def __str__(self):\n \"\"\"Return the informal string representation of the GeoJSON\"\"\"\n\n return f\"{{'type': '{self.type}', 'features': {self.features}}}\"\n\n def __repr__(self):\n \"\"\"Return the formal string representation of the GeoJSON\"\"\"\n\n return f\"{{'type': '{self.type}', 'features': {self.features}}}\"\n","sub_path":"mapillary/models/geojson.py","file_name":"geojson.py","file_ext":"py","file_size_in_byte":10987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"543335143","text":"from information_extraction.pre_process_data import pre_process_file\nfrom information_extraction.tagging_automater import AutoTagProcessor\nfrom services.document_service import DocumentService\nfrom services.settings_service import SettingService\n\n'''\nEnvironments :\n1 - Local\n2 - Cloud \n\nSSL Verify is Boolean to handle the SSL authentication or bypass it.\n'''\n\n\ndef download_settings():\n '''\n Download settings of entity JSON for the annotation files to be righly formed.\n :return: N/A\n '''\n # download the project settings for comparision\n setting_service = SettingService(True, 2)\n setting_service.get_project_setting()\n\n\ndef upload_files():\n '''\n Upload generated Files which would the text file and then annotations files to tagtog for further review.\n :return: N/A\n '''\n # Using REST call to upload the data into Server.\n doc_service = DocumentService(True, 2)\n doc_service.push_annotated_verbatim_text(text_file_path, ann_file_path)\n\n\nif __name__ == '__main__':\n '''\n Pre-process files and tag files based on the rules. \n '''\n download_settings()\n pre_processed_file = pre_process_file('configs/input_data.txt')\n # Tag the files using the rules set.\n auto_tag_processor = AutoTagProcessor(pre_processed_file, '3ggpp')\n text_file_path, ann_file_path = auto_tag_processor.tag_words()\n upload_files()\n","sub_path":"apps/3gg_processor_app.py","file_name":"3gg_processor_app.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"643418878","text":"# Copyright 2016 the original author or authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n__author__ = \"Angelo Torelli, Markus Gumbel, Julian Debatin\"\n__copyright__ = \"The authors\"\n__license__ = \"Apache 2\"\n__email__ = \"m.gumbel@hs-mannheim.de\"\n__status__ = \"Production\"\n\nfrom Steppable.ModuroSteppable import ModuroSteppable\n\nclass TransformationSteppable(ModuroSteppable):\n def __init__(self, _simulator, model, _frequency=1):\n\n ModuroSteppable.__init__(self, _simulator, model, _frequency)\n\n\n # def moduroStep(self, mcs):\n # for cell in self.cellList:\n # if cell.type == self.BASAL and not self.hasCertainNeighbor(cell, self.BASALMEMBRANE) and not self.onlyIntermediate:\n # self.transformInto(cell, self.INTERMEDIATE, mcs)\n # elif cell.type == self.INTERMEDIATE and self.hasCertainNeighbor(cell, self.MEDIUM):\n # self.transformInto(cell, self.UMBRELLA, mcs)\n # elif (cell.type == self.STEM or cell.type == self.UMBRELLA) and self.hasCertainNeighbor(cell, self.MEDIUM):\n # self.setInhibitionFlag(cell, False)\n\n\n def hasCertainNeighbor(self, cell, neighborType):\n totalMediumArea = 0\n hasCertainNeighbor = False\n for neighbor, commonSurfaceArea in self.getCellNeighborDataList(cell):\n if neighbor and neighborType != 0 and neighbor.type == neighborType and commonSurfaceArea > 0:\n hasCertainNeighbor = True\n elif not neighbor and commonSurfaceArea > 0:\n totalMediumArea += commonSurfaceArea\n if neighborType == 0:\n hasCertainNeighbor = True\n if hasCertainNeighbor:\n break\n if totalMediumArea == 0:\n self.setInhibitionFlag(cell, True)\n else:\n self.setInhibitionFlag(cell, False)\n return hasCertainNeighbor\n\n\n def transformInto(self, cell, cellType, mcs):\n cellDict = self.getDictionaryAttribute(cell)\n self.model.cellLifeCycleLogger.cellLifeCycleDeath(mcs, cell, cellDict)\n cell.type = cellType\n self.model.setCellAttributes(cellDict, cell, 0)\n self.model.cellLifeCycleLogger.cellLifeCycleBirth(mcs, cell, cellDict)\n\n\n def setInhibitionFlag(self, cell, flag):\n cellDict = self.getDictionaryAttribute(cell)\n cellDict['inhibited'] = flag","sub_path":"Simulation/Steppable/TransformationSteppable.py","file_name":"TransformationSteppable.py","file_ext":"py","file_size_in_byte":2890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"246785396","text":"# import requests\n# r = requests.post('https://httpbin.org/post?page=\"2\" &count=\"25\"')\n# # print(help(r))\n#\n# # with open('comic.png' , 'wb') as f:\n# # f.write(r.content)\n#\n# #print(r.status_code)\n#\n# print(r.text)\n\nposts = [\n {\n 'author': 'Corey Schafer',\n 'title': 'Blog Post 1',\n 'content': 'First post content',\n 'date_posted': 'April 20,2018'\n },\n {\n 'author': 'Muhammad Waqas',\n 'title': 'Blog Post 2',\n 'content': 'Second post content',\n 'date_posted': 'April 21,2018'\n },\n {\n 'author': 'Hamid Khan',\n 'title': 'Blog Post 3',\n 'content': 'Third post content',\n 'date_posted': 'April 29,2018'\n }\n]\nfrom flask import Flask, jsonify,request\n\napp = Flask(__name__)\n\n@app.route('/', methods = ['GET', 'POST'])\ndef index():\n if(request.method == 'POST'):\n some_json = request.get_json()\n return jsonify({'you sent': some_json})\n else:\n return jsonify(posts)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"probation period/python_Requests/rdemo.py","file_name":"rdemo.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"318259004","text":"import sqlite3\nimport sys\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nfrom table_display import *\nimport time\nfrom cascade_style_sheet import *\n\nclass AssignCustomer(QDialog):\n \"\"\"this class will be used to either assign a customer that has\n made a booking to a table or assign a customer that has not made a booking\n to a table\"\"\"\n\n def __init__(self,TableNumber):\n super().__init__()\n self.setWindowTitle(\"Assign customer to table {0}\".format(TableNumber))\n self.setMinimumSize(600,600)\n self.tableNumber = TableNumber\n self.setStyleSheet(css)\n\n self.titleFont = QFont()\n self.titleFont.setPointSize(15)\n\n\n self.todays_bookings_label = QLabel(\"Todays bookings for table {0}\".format(TableNumber))\n self.todays_bookings_label.setFont(self.titleFont)\n self.todays_bookings_label.setAlignment(Qt.AlignLeft)\n self.todays_bookings_label.setFixedWidth(400)\n\n\n self.main_assign_layout = QVBoxLayout()\n self.choose_customer = QHBoxLayout()\n self.create_combo_box(TableNumber)\n self.add_customer_layout = QGridLayout()\n self.create_complete_layout = QHBoxLayout()\n \n\n self.choose_customer.addWidget(self.customer_combo_box)\n self.select_customer = QPushButton(\"Select\")\n self.choose_customer.addWidget(self.select_customer)\n self.select_customer.clicked.connect(self.select_connect) \n \n #create buttons\n self.create_complete = QPushButton(\"Create\")\n self.create_complete.clicked.connect(self.create_booking)\n \n #labels\n self.table_number_label = QLabel(\"Table Number : \")\n self.number_of_people_label = QLabel(\"Number Of People : \")\n self.time_arrived_label = QLabel(\"Time Of Arrival : \")\n self.date_arrived_label = QLabel(\"Date Of Arrival : \")\n\n self.systemtime = time.strftime(\"%H:%M\")\n self.system_time_label = QLineEdit(self.systemtime)\n self.system_time_label.setReadOnly(True)\n sizehint = self.system_time_label.sizeHint()\n self.system_time_label.setMaximumSize(sizehint)\n\n self.systemdate = time.strftime(\"%d/%m/%Y\")\n self.system_date_label = QLineEdit(self.systemdate)\n self.system_date_label.setReadOnly(True)\n self.system_date_label.setMaximumSize(sizehint)\n\n self.display_table_number = QLineEdit(\"{0}\".format(TableNumber))\n self.display_table_number.setReadOnly(True)\n self.display_table_number.setMaximumSize(sizehint)\n\n regexp = QRegExp(\"^\\\\d\\\\d?$\")\n validator = QRegExpValidator(regexp)\n self.input_number_of_people = QLineEdit()\n self.input_number_of_people.setValidator(validator)\n self.input_number_of_people.setMaximumSize(sizehint)\n\n\n displayQuery = \"\"\"SELECT\n Customers.FirstName,\n Customers.LastName,\n Bookings.NumberOfPeople,\n Bookings.Time\n FROM Customers\n INNER JOIN Bookings\n ON Customers.CustomerID = Bookings.CustomerID\n WHERE Bookings.Date = '{0}'\n AND Bookings.TableNumber = {1}\n \"\"\".format(self.systemdate,TableNumber)\n\n self.display_customers = DisplayTable()\n self.display_customers.show_results(displayQuery)\n\n\n self.add_customer_layout.addWidget(self.table_number_label,0,0)\n self.add_customer_layout.addWidget(self.display_table_number,0,1)\n self.add_customer_layout.addWidget(self.time_arrived_label,1,0)\n self.add_customer_layout.addWidget(self.date_arrived_label,2,0)\n self.add_customer_layout.addWidget(self.system_time_label,1,1)\n self.add_customer_layout.addWidget(self.system_date_label,2,1)\n self.add_customer_layout.addWidget(self.number_of_people_label,3,0)\n self.add_customer_layout.addWidget(self.input_number_of_people,3,1)\n self.add_customer_layout.addWidget(self.create_complete,4,0,2,2) \n\n self.assign_street_box = QGroupBox(\"Customer that has not booked in advance\")\n self.assign_street_box.setLayout(self.add_customer_layout)\n\n self.assign_booked_box = QGroupBox(\"Customer that has booked in advance\")\n self.assign_booked_box.setLayout(self.choose_customer)\n\n self.main_assign_layout.addWidget(self.todays_bookings_label)\n self.main_assign_layout.addWidget(self.display_customers)\n self.main_assign_layout.addWidget(self.assign_booked_box)\n self.main_assign_layout.addWidget(self.assign_street_box) \n self.setLayout(self.main_assign_layout)\n \n self.exec_()\n\n def create_booking(self):\n #create bookingID for customer that has walked in\n TableNumber = self.display_table_number.text()\n CustomerID = 1\n NumberOfPeople = self.input_number_of_people.text()\n Date = self.systemdate\n Time = self.systemtime\n \n Booking = (CustomerID,TableNumber,NumberOfPeople,Date,Time)\n\n\n\n if len(NumberOfPeople) > 0 and (int(NumberOfPeople)>0):\n\n with sqlite3.connect(\"restaurant.db\") as db:\n cursor = db.cursor()\n sql = \"insert into Bookings(CustomerID, TableNumber, NumberOfPeople, Date, Time) values (?,?,?,?,?)\"\n cursor.execute(\"PRAGMA foreign_keys = ON\")\n cursor.execute(sql,Booking)\n db.commit()\n \n with sqlite3.connect(\"restaurant.db\") as db:\n cursor = db.cursor()\n cursor.execute(\"select * from Bookings where CustomerID = {0} and TableNumber = {1} and NumberOfPeople = {2} and Date = '{3}' and Time = '{4}' \".format(CustomerID, TableNumber, NumberOfPeople, Date, Time))\n self.bookingDetails = cursor.fetchone()\n\n self.close()\n return self.bookingDetails\n else:\n print(\"Please enter a valid number.\")\n\n \n\n def select_connect(self):\n TodaysDate = time.strftime(\"%d/%m/%Y\")\n customerCurrentIndex = self.customer_combo_box.currentIndex()\n print(\"Customer : {0}\".format(customerCurrentIndex))\n CustomerID = self.CustomerList[customerCurrentIndex]\n print(\"Customer ID: {0}\".format(CustomerID))\n \n with sqlite3.connect(\"restaurant.db\") as db:\n cursor = db.cursor()\n cursor.execute(\"select * from Bookings where CustomerID = {0} and TableNumber = {1} and Date = '{2}'\".format(CustomerID, self.tableNumber, TodaysDate))\n self.bookingDetails = cursor.fetchone() \n print(self.bookingDetails)\n\n self.close()\n \n return self.bookingDetails\n\n def create_combo_box(self,TableNumber):\n self.CustomerList = []\n CustomerLastName = []\n TodaysDate = time.strftime(\"%d/%m/%Y\")\n\n ## get all customer IDs that are on table _\n with sqlite3.connect(\"restaurant.db\") as db:\n cursor = db.cursor()\n cursor.execute(\"select CustomerID from Bookings where TableNumber = {0} and Date = '{1}'\".format(TableNumber,TodaysDate))\n customers = cursor.fetchall()\n for each in customers:\n self.CustomerList.append(each[0]) \n\n ## get all last names from previouse fetchall \n for customer in self.CustomerList:\n with sqlite3.connect(\"restaurant.db\") as db:\n cursor = db.cursor()\n cursor.execute(\"select LastName from Customers where CustomerID = {0}\".format(customer))\n customer = cursor.fetchone()\n CustomerLastName.append(customer[0]) \n \n #create combo, insert all last names from fetchall\n self.customer_combo_box = QComboBox(self)\n for each in CustomerLastName:\n self.customer_combo_box.addItem(each)\n\nif __name__ == \"__main__\":\n TableNumber = 1\n application = QApplication(sys.argv)\n window = AssignCustomer(TableNumber)\n window.show()\n window.raise_()\n application.exec()\n","sub_path":"Implementation/GUI/assign_table_customer.py","file_name":"assign_table_customer.py","file_ext":"py","file_size_in_byte":8206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"544744774","text":"\"\"\"\nGitHub extensions to the new-commit view.\n\"\"\"\n\nimport re\n\nimport sublime\nfrom sublime_plugin import TextCommand\n\nfrom ...core.git_command import GitCommand\nfrom .. import github\nfrom .. import git_mixins\n\n\nclass GsShowGithubIssuesCommand(TextCommand, GitCommand, git_mixins.GithubRemotesMixin):\n\n \"\"\"\n Display a panel of GitHub issues to either:\n\n 1) the remote repo, if default_repo is True, or\n 2) another repo on the same remote, if default_repo\n is False.\n\n After the user makes their selection, insert the issue\n number at the current cursor position.\n \"\"\"\n\n def run(self, edit, default_repo=True):\n if not default_repo:\n first_cursor = self.view.sel()[0].begin()\n text_before_cursor = self.view.substr(sublime.Region(0, first_cursor))\n nondefault_repo = re.search(r\"([a-zA-Z\\-_0-9\\.]+)/([a-zA-Z\\-_0-9\\.]+)#$\", text_before_cursor).groups()\n else:\n nondefault_repo = None\n\n sublime.set_timeout_async(lambda: self.run_async(nondefault_repo))\n\n def run_async(self, nondefault_repo):\n remote = github.parse_remote(self.get_integrated_remote_url())\n\n if nondefault_repo:\n owner, repo_name = nondefault_repo\n remote = github.GitHubRepo(\n url=\"\",\n fqdn=remote.fqdn,\n owner=owner,\n repo=repo_name,\n token=remote.token\n )\n\n issues = github.get_issues(remote)\n\n if not issues:\n return\n\n self.menu_items = [\"{} - {}\".format(issue[\"number\"], issue[\"title\"]) for issue in issues]\n self.view.show_popup_menu(self.menu_items, self.on_done)\n\n def on_done(self, selection_id):\n if selection_id != -1:\n selection = self.menu_items[selection_id]\n number = selection.split(\" \")[0]\n self.view.run_command(\"gs_insert_text_at_cursor\", {\"text\": number})\n\n\nclass GsShowGithubContributorsCommand(TextCommand, GitCommand):\n\n \"\"\"\n Query github for a list of people that have contributed to the GitHub project\n setup as a remote for the current Git project, and display that list the the\n user. When a selection is made, insert that selection at the current cursor\n position.\n \"\"\"\n\n def run(self, edit):\n sublime.set_timeout_async(lambda: self.run_async())\n\n def run_async(self):\n default_remote_name, default_remote = self.get_remotes().popitem(last=False)\n remote = github.parse_remote(default_remote)\n\n contributors = github.get_contributors(remote)\n\n if not contributors:\n return\n\n self.menu_items = [contributor[\"login\"] for contributor in contributors]\n self.view.show_popup_menu(self.menu_items, self.on_done)\n\n def on_done(self, selection_id):\n if selection_id != -1:\n selection = self.menu_items[selection_id]\n self.view.run_command(\"gs_insert_text_at_cursor\", {\"text\": selection})\n","sub_path":"github/commands/commit.py","file_name":"commit.py","file_ext":"py","file_size_in_byte":3002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"24396499","text":"#!/usr/bin/env python\n\n# tile-generator\n#\n# Copyright (c) 2015-Present Pivotal Software, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function\nfrom setuptools import setup\nimport os\nimport sys\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\ndef read_readme():\n\ttry:\n\t\timport pypandoc\n\t\treturn pypandoc.convert('README.md', 'rst')\n\texcept ImportError:\n\t\twith open(os.path.join(here, 'README.md')) as f:\n\t\t\treturn f.read()\n\ndef get_version():\n\tversion_file = os.path.join(here, 'version.txt')\n\ttry:\n\t\twith open(version_file) as f:\n\t\t\treturn f.read()\n\texcept:\n\t\treturn '0.0.0'\n\nsetup(\n\tname = \"tile-generator\",\n\tversion = get_version(),\n\tdescription = 'Tools supporting development of Pivotal Cloud Foundry services and add-ons.',\n\tlong_description = read_readme(),\n\turl = 'https://github.com/cf-platform-eng/tile-generator',\n\tauthor = 'Pivotal Cloud Foundry Platform Engineering',\n\tlicense = 'Apache 2.0',\n\tclassifiers = [\n\t\t'Development Status :: 4 - Beta',\n\t\t'Environment :: Console',\n\t\t'Intended Audience :: Developers',\n\t\t'License :: OSI Approved :: Apache Software License',\n\t\t'Programming Language :: Python :: 2 :: Only',\n\t\t'Topic :: Software Development',\n\t\t'Topic :: Software Development :: Code Generators',\n\t],\n\tkeywords = [\n\t\t'pivotal cloud foundry',\n\t\t'tile',\n\t\t'generator'\n\t],\n\tpackages = [ 'tile_generator' ],\n\tinstall_requires = [\n\t\t'Cerberus>=1.1',\n\t\t'click>=6.2',\n\t\t'Jinja2>=2.8',\n\t\t'PyYAML>=3.1',\n\t\t'docker-py>=1.6.0',\n\t\t'requests>=2.9.1,<2.11',\n\t\t'requests-toolbelt',\n\t\t'mock>=2.0.0',\n\t\t'pexpect>=4.2.1'\n\t],\n\tinclude_package_data = True,\n\tentry_points = {\n\t\t'console_scripts': [\n\t\t\t'tile = tile_generator.tile:cli',\n\t\t\t'pcf = tile_generator.pcf:main',\n\t\t]\n\t}\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"20800833","text":"from FWCore.ParameterSet.VarParsing import VarParsing\noptions = VarParsing ('analysis')\noptions.parseArguments()\n\nimport os,fnmatch\nimport FWCore.ParameterSet.Config as cms\nprocess = cms.Process(\"Rootuple\")\n\nprocess.load(\"TrackingTools.TransientTrack.TransientTrackBuilder_cfi\")\nprocess.load('Configuration.StandardSequences.GeometryRecoDB_cff')\nprocess.load(\"Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff\")\nprocess.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_condDBv2_cff')\nfrom Configuration.AlCa.GlobalTag_condDBv2 import GlobalTag\nprocess.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_data')\n\nprocess.load(\"FWCore.MessageLogger.MessageLogger_cfi\")\nprocess.MessageLogger.cerr.FwkReport.reportEvery = 1000\n\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1))\n\nprocess.source = cms.Source(\"PoolSource\",\n\t\t fileNames = cms.untracked.vstring(options.inputFiles)\n\t\t\t )\n\nprocess.TFileService = cms.Service(\"TFileService\",fileName = cms.string(options.outputFile))\n\nprocess.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(False))\n\nprocess.triggerSelection = cms.EDFilter(\"TriggerResultsFilter\",\n triggerConditions = cms.vstring('HLT_Dimuon0_Phi_Barrel_v*','HLT_Mu16_TkMu0_dEta18_Phi_v*'),\n hltResults = cms.InputTag( \"TriggerResults\", \"\", \"HLT\" ),\n l1tResults = cms.InputTag( \"\" ),\n throw = cms.bool(False)\n )\n\n\n#this is need, because AOD contains only reco::Tracks, so this lines create patTracks on fly.\n\nprocess.load(\"SimGeneral.HepPDTESSource.pythiapdt_cfi\")\nprocess.CandidateSelectedTracks = cms.EDProducer(\"ConcreteChargedCandidateProducer\",\n src=cms.InputTag(\"oniaSelectedTracks\"),\n particleType=cms.string('K+')\n )\n\nfrom PhysicsTools.PatAlgos.producersLayer1.genericParticleProducer_cfi import patGenericParticles\nprocess.patSelectedTracks = patGenericParticles.clone(src=cms.InputTag(\"CandidateSelectedTracks\"))\n\nprocess.load('CompactSkim.Examples.DiMuonFilter_cfi')\nprocess.Onia2MuMuFiltered.singlemuonSelection = cms.string(\"\")\nprocess.Onia2MuMuFiltered.dimuonSelection = cms.string(\"0.85 < mass && mass < 1.2 && charge==0 && userFloat('vProb') > 0.01\")\n\nprocess.DimuonCunter = cms.EDFilter('CandViewCountFilter',\n src = cms.InputTag(\"Onia2MuMuFiltered\"),\n minNumber = cms.uint32(1),\n filter = cms.bool(True)\n)\n\nprocess.PhiPhiKVF = cms.EDProducer('PhiPhiKVF',\n DiMuonTag = cms.InputTag(\"Onia2MuMuFiltered\"),\n DiKaonTag = cms.InputTag(\"Phi2KKPAT\"),\n DiMuonMass = cms.double(1.019461) \n)\n\nprocess.PhiPhiNPKVF = cms.EDProducer('PhiPhiNPKVF',\n DiMuonTag = cms.InputTag(\"Onia2MuMuFiltered\"),\n DiKaonTag = cms.InputTag(\"Phi2KKPAT\"),\n TracksTag = cms.InputTag(\"TracksFiltered\"), \n DiMuonMass = cms.double(1.019461)\n)\n\nprocess.load('CompactSkim.Examples.TrackFilter_cfi')\nprocess.TracksFiltered.TrackTag = cms.InputTag(\"patSelectedTracks\")\nprocess.TracksFiltered.PrimaryVertexTag = cms.InputTag(\"offlinePrimaryVertices\")\nprocess.TracksFiltered.OniaTag = cms.InputTag(\"Onia2MuMuFiltered\")\nprocess.TracksFiltered.TrackSelection = cms.string(\"\") #pt > 0.7 && abs(eta) <= 2.4\")\n\n\n#Phi2KK\nprocess.load('CompactSkim.Examples.Phi2KKPAT_cfi')\nprocess.Phi2KKPAT.kaons = cms.InputTag(\"TracksFiltered\")\nprocess.Phi2KKPAT.OniaTag = cms.InputTag(\"Onia2MuMuFiltered\")\nprocess.Phi2KKPAT.higherPuritySelection = cms.string(\"\")\nprocess.Phi2KKPAT.lowerPuritySelection = cms.string(\"\")\nprocess.Phi2KKPAT.dikaonSelection = cms.string(\"0.95 < mass && mass < 1.1 && charge==0 && userFloat('deltar') < 0.5\")\n\n#a basic rootupler\nprocess.load('CompactSkim.Examples.PhiPhiNPKVFRootupler_cfi')\nprocess.rootuple.isMC = cms.bool(False)\nprocess.rootuple.dimTag = cms.InputTag(\"Onia2MuMuFiltered\")\nprocess.rootuple.dikTag = cms.InputTag(\"Phi2KKPAT\")\nprocess.rootuple.ffkTag = cms.InputTag(\"PhiPhiNPKVF\")\nprocess.rootuple.OnlyBest = cms.bool(False)\n\n#run everything\nprocess.p = cms.Path(\n process.triggerSelection *\n process.Onia2MuMuFiltered *\n process.DimuonCunter *\n process.CandidateSelectedTracks*process.patSelectedTracks *\n process.TracksFiltered *\n process.Phi2KKPAT *\n process.PhiPhiNPKVF *\n process.rootuple)\n","sub_path":"test/runPhiPhiBSKIM_byset.py","file_name":"runPhiPhiBSKIM_byset.py","file_ext":"py","file_size_in_byte":4501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"92713422","text":"import numpy as np\nimport chainer\nimport chainer.variable as variable\nfrom chainer.functions.activation import lstm\nfrom chainer import cuda, Function, gradient_check, report, training, utils, Variable\nfrom chainer import datasets, iterators, optimizers, serializers\nfrom chainer import Link, Chain, ChainList\nimport chainer.functions as F\nimport chainer.links as L\nfrom collections import OrderedDict\nimport logging\nimport time\nfrom utils import to_device\n\nclass EntropyRegularizationLoss(Chain):\n\n def __init__(self, test=False):\n super(EntropyRegularizationLoss, self).__init__()\n self.loss = None\n \n def __call__(self, y, ):\n bs = y.data.shape[0]\n d = np.prod(y.data.shape[1:])\n\n y_normalized = F.softmax(y)\n y_log_softmax = F.log_softmax(y)\n self.loss = - F.sum(y_normalized * y_log_softmax) / bs / d\n\n return self.loss\n\nclass ReconstructionLoss(Chain):\n\n def __init__(self,\n ):\n super(ReconstructionLoss, self).__init__()\n self.loss = None\n \n def __call__(self, x_recon, x):\n bs = x.shape[0]\n d = np.prod(x.shape[1:])\n self.loss = F.mean_squared_error(x_recon, x) / d\n\n return self.loss\n\nclass InvariantReconstructionLoss(Chain):\n\n def __init__(self,\n ):\n super(InvariantReconstructionLoss, self).__init__()\n self.loss = None\n \n def __call__(self, x_recon, x):\n bs = x.shape[0]\n d = np.prod(x.shape[1:])\n\n if x.shape[1:] == 3:\n h_recon = F.average_pooling_2d(x_recon, (2, 2))\n h = F.average_pooling_2d(x, (2, 2))\n self.loss = F.mean_squared_error(x_recon, x) / d\n else:\n self.loss = F.mean_squared_error(x_recon, x) / d\n\n return self.loss\n\nclass ReconstructionLoss1(Chain):\n\n def __init__(self,\n ):\n super(ReconstructionLoss1, self).__init__()\n self.loss = None\n \n def __call__(self, x_recon, x):\n bs = x.shape[0]\n d = np.prod(x.shape[1:])\n self.loss = F.mean_absolute_error(x_recon, x) / d\n\n return self.loss\n\nclass GANLoss(Chain):\n\n def __init__(self, ):\n super(GANLoss, self).__init__(\n )\n \n def __call__(self, d_x_gen, d_x_real=None):\n bs_d_x_gen = d_x_gen.shape[0]\n if d_x_real is not None:\n bs_d_x_real = d_x_real.shape[0]\n loss = F.sum(F.log(d_x_real)) / bs_d_x_real \\\n + F.sum(F.log(1 - d_x_gen)) / bs_d_x_gen\n return - loss # to minimize\n \n else:\n loss = F.sum(F.log(d_x_gen)) / bs_d_x_gen\n return - loss # to minimize (reverse trick)\n\nclass WGANLoss(Chain):\n \"\"\"Wasserstein GAN loss\n \"\"\"\n def __init__(self, ):\n super(WGANLoss, self).__init__(\n )\n \n def __call__(self, d_x_gen, d_x_real=None):\n bs_d_x_gen = d_x_gen.shape[0]\n if d_x_real is not None:\n bs_d_x_real = d_x_real.shape[0]\n loss = F.sum(d_x_real) / bs_d_x_real - F.sum(d_x_gen) / bs_d_x_gen\n return - loss # to minimize\n \n else:\n loss = F.sum(d_x_gen) / bs_d_x_gen\n return - loss # to minimize (reverse trick)\n \nclass LSGANLoss(Chain):\n \"\"\"Least Square GAN Loss\n \"\"\"\n def __init__(self, ):\n super(LSGANLoss, self).__init__(\n )\n \n def __call__(self, d_x_gen, d_x_real=None):\n bs_d_x_gen = d_x_gen.shape[0]\n if d_x_real is not None:\n bs_d_x_real = d_x_real.shape[0]\n loss = F.sum(F.square(d_x_real - 1)) / bs_d_x_real /2 \\\n + F.sum(F.square(d_x_gen)) / bs_d_x_gen / 2\n return loss\n \n else:\n loss = F.sum(F.square(d_x_gen - 1)) / bs_d_x_gen / 2\n return loss\n\nclass MeanDistanceLoss(Chain):\n def __init__(self, ):\n super(MeanDistanceLoss, self).__init__(\n )\n \n def __call__(self, h):\n shape = h.shape\n m = F.sum(h, axis=0) / shape[0]\n M = F.broadcast_to(m, shape)\n D = -F.sum(h - M) / np.prod(shape)\n return D\n \nclass DistanceLoss(Chain):\n def __init__(self, ):\n super(DistanceLoss, self).__init__(\n )\n \n def __call__(self, h):\n shape = h.shape\n h = F.reshape(h, (shape[0], np.prod(shape[1:])))\n h_ns = F.batch_l2_norm_squared(h)\n bs = shape[0]\n h0 = F.broadcast_to(F.expand_dims(h_ns, 0), (bs, bs))\n h1 = F.broadcast_to(F.expand_dims(h_ns, 1), (bs, bs))\n hh = F.linear(h, h)\n D = h0 + h1 - 2 * hh\n D = F.sum(D) / np.prod(h.shape)\n \n return D\n \n \n\n","sub_path":"meta_recon/meta_recon/losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":4743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"337832304","text":"import os\nfrom pathlib import Path\nimport uuid\nimport json\n\nimport aiofiles\nimport aiohttp_jinja2\nimport aioredis\nimport jinja2\nfrom aiohttp import web\nfrom aiohttp_session import setup, get_session\nfrom celery_workers import worker\nfrom aiohttp_session import session_middleware\nfrom aiohttp_session.redis_storage import RedisStorage\n\nimport settings\n\nroutes = web.RouteTableDef()\n\n\n@routes.get('/analyse/start')\n@aiohttp_jinja2.template('analyse_start.html')\nasync def upload_form(request):\n return {'title': 'Welcome Page'}\n\n\n@routes.post('/analyse/start')\n@aiohttp_jinja2.template('analyse_succeed_created.html')\nasync def upload_process(request):\n async for obj in (await request.multipart()):\n if obj.filename is not None:\n\n file_path = os.path.join(settings.MEDIA_ROOT, obj.filename)\n f = await aiofiles.open(file_path, 'wb')\n await f.write(await obj.read())\n await f.close()\n await worker.create_task(file_path)\n session = await get_session(request)\n file = [\n {\n 'file_name': obj.filename,\n 'task_id': str(uuid.uuid4())\n }\n ]\n if 'files' in session:\n session['files'] = session['files'] + file\n else:\n session['files'] = file\n\n\n@routes.get(r'/analyse/list')\n@aiohttp_jinja2.template('analyse_list.html')\nasync def get_analyse_list(request):\n session = await get_session(request)\n return {'analysed_list': session.get('files')}\n\n\n@routes.get(r'/analyse/result/{task_id}')\n@aiohttp_jinja2.template('analyse_result.html')\nasync def get_result_view(request):\n task_id = request.match_info['task_id']\n df = await worker.get_result(task_id)\n return {\n 'describe': df['describe'].to_html(),\n 'info': df['info'],\n }\n\n\nasync def init():\n redis = await aioredis.create_pool(('localhost', 6379))\n app = web.Application(middlewares=[session_middleware(RedisStorage(redis))])\n app.add_routes(routes)\n aiohttp_jinja2.setup(app, loader=jinja2.FileSystemLoader(settings.TEMPLATE_ROOT))\n Path(settings.MEDIA_ROOT).mkdir(parents=True, exist_ok=True)\n return app\n\nweb.run_app(init())\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"419360555","text":"#module for handling operations with matrices\n\nfrom __future__ import division\nimport sys\n\nclass BlosumMatrix:\n\t\"\"\"class for working with BLOSUM matrix\"\"\"\n\tdef __init__(self,matrix):\n\t\tself.load_matrix(matrix)\n\t\n\t#load BLOSUM matrix from file and create dictionary representing matrix\t\n\tdef load_matrix(self,matrix):\n\t\twith open(matrix,'r') as blosum_file:\n\t\t\tblosum_matrix = blosum_file.read()\n\n\t\tlines = blosum_matrix.strip().split('\\n')\n\t\theader = lines.pop(0)\n\t\tcolumns = header.split()\n\t\tblosum_matrix = {}\n\n\t\tfor row in lines:\n\t\t\tentries = row.split()\n\t\t\trow_name = entries.pop(0)\n\t\t\tblosum_matrix[row_name] = {}\n\n\t\t\tfor column_name in columns:\n\t\t\t\tblosum_matrix[row_name][column_name] = entries.pop(0)\n\n\t\tself._blosum_matrix = blosum_matrix\n\t\t#print(self._blosum_matrix)\n\n\t#find score of 2 amino acids according to BLOSUM matrix\n\tdef find_score(self,acid1,acid2):\n\t\tacid1 = acid1.upper()\n\t\tacid2 = acid2.upper()\n\n\t\tif acid1 not in self._blosum_matrix or acid2 not in self._blosum_matrix[acid1]:\n\t\t\tprint('Error')\n\t\treturn self._blosum_matrix[acid1][acid2]\n\n\nclass ProbabilityMatrix:\n\t\"\"\"creates probability matrix from multiple alignements\"\"\"\n\tdef __init__(self,filename,matrix):\n\t\talignements = self.store_alignement(filename)\n\t\tself.create_probability_matrix(matrix,alignements)\n\t\n\t#stores alignements from file\t\n\tdef store_alignement(self,filename):\n\t\talignements = list()\n\t\twith open(filename,'r') as t:\n\t\t\tfor line in t.readlines():\n\t\t\t\tif not line.startswith('>'):\n\t\t\t\t\tseq1 = line.strip('\\n')\n\t\t\t\t\talignements.append(seq1)\n\t\treturn alignements\n\t#creates probability matrix as a dictionary, key value is alignement itself\n\tdef create_probability_matrix(self,matrix,alignements):\n\t\tscore = 0\n\t\tmax_score = 0\n\t\tprobability_matrix = {}\n\t\tfor i in alignements:\n\t\t\tfor c,c1 in zip(i,i):\n\t\t\t\tmax_score += int(matrix.find_score(c,c1))\n\t\t\tprint('Max score je:' + str(max_score))\n\t\t\tprobability_matrix[i] = {}\n\t\t\tfor j in alignements:\n\t\t\t\tfor c,c1 in zip(i,j):\n\t\t\t\t\tscore += int(matrix.find_score(c,c1))\n\t\t\t\tprobability_matrix[i][j] = score / max_score\n\t\t\t\tm = score / max_score\n\t\t\t\tprint('Score je:' + str(m))\n\t\t\t\tscore = 0\n\t\t\tmax_score = 0\n\n\t\tself._probability_matrix = probability_matrix\n\t\t#print(self._probability_matrix)\n\n\n\tdef find_pair(self,seq1,seq2):\n\t\treturn self._probability_matrix[seq1][seq2]\n\n","sub_path":"Dokumenty/blosum.py","file_name":"blosum.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"575195650","text":"#! /usr/bin/env python \n \n \n##########################################################################################\n# PostColorTrack_MCHE470_Fall2013_cv2.py\n#\n# Script to process Mini-Project 3b videos\n#\n# Requires OpenCV\n# \n# Created: 11/2/13 \n# - Joshua Vaughan \n# - joshua.vaughan@louisiana.edu\n# - http://www.ucs.louisiana.edu/~jev9637\n#\n# Modified:\n# * 11/4/13 - Joshua Vaughan - joshua.vaughan@louisiana.edu\n# - hard coded video names due to Tkinter file dialog bug\n#\n########################################################################################## \n \nimport cv2 as cv2\nimport numpy as np\nfrom time import localtime, strftime, sleep\n#from Tkinter import Tk\nimport sys\n#from matplotlib.pyplot import * # Grab MATLAB plotting functions\n\ncolor_tracker_window = \"Color Tracker\"\n\ntrial_number = 1\n\n#if not os.path.exists(folder):\n # os.makedirs(folder)\n\n# filename = strftime(\"%m_%d_%Y_%H%M%S\") #names the output file as the date and time that the program is run\n# filepath = filename + \".csv\" #gives the path of the file to be opened\n\nfilepath = 'CSV files/pantographic_arm_vibration_{}.csv'.format(trial_number)\n# vid_name = sys.argv[1]\n \nf = open(filepath, \"a+\") #opens the output file in append mode\nf.write('Time (s), X Position (pixels), Y Position (pixels)' + '\\n') \n\nshow_images = 1\nprint_images = 0\n\nclass ColorTracker:\n def __init__(self): \n# cv2.NamedWindow( color_tracker_window, 1 ) \n\n# tk = Tk()\n# tk.withdraw() # we don't want a full GUI, so keep the root window from appearing\n# video_filename = askopenfilename(parent=tk) # show an \"Open\" dialog box and return the path to the selected file\n# \n# tk.destroy()\n \n video_filename = '/Users/Matt/Desktop/pantographic_arm_vibration_recordings/pantographic_arm_vibration_{}.mov'.format(trial_number)\n # video_filename = '/Users/Matt/Desktop/pantog/pantographic_arm_vibration_{}.mov'.format(trial_number)\n\n \n self.capture = cv2.VideoCapture(video_filename)\n \n \n def run(self): \n initialTime = 0. #sets the initial time\n# num_Frames = int( cv2.GetCaptureProperty( self.capture, cv2.CV_CAP_PROP_FRAME_COUNT ) )\n num_Frames = int(self.capture.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))\n # fps = self.capture.get(cv2.cv.CV_CAP_PROP_FPS)\n fps = 30.0\n# fps = cv2.GetCaptureProperty( self.capture, cv2.CV_CAP_PROP_FPS )\n \n for ii in range(num_Frames-9):\n \n print('Frame: ' + str(ii) + ' of ' + str(num_Frames))\n # read the ii-th frame\n# img = cv2.QueryFrame( self.capture ) \n img = self.capture.read()[1]\n \n if show_images:\n cv2.imshow('Raw Frame',img)\n # raw_input(\"Press Enter to continue...\")\n \n if print_images:\n savefig('Raw_Frame.png')\n \n \n # Blur the source image to reduce color noise \n # cv2.Smooth(img, img, cv2.CV_BLUR, 10) \n img = cv2.blur(img,(10,10))\n \n if show_images:\n cv2.imshow('Blurred',img)\n# raw_input(\"Press Enter to continue...\")\n \n if print_images:\n savefig('10x10_blur.png')\n\n \n # Convert the image to hsv(Hue, Saturation, Value) so its \n # It's easier to determine the color to track(hue) \n hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) \n\n # Define min and max HSV values to threshold\n Track_MIN = np.array([0, 0, 245],np.uint8)\n Track_MAX = np.array([180, 10, 255],np.uint8)\n \n # threshold the image\n thresholded_img = cv2.inRange(hsv_img, Track_MIN, Track_MAX)\n \n if show_images:\n cv2.imshow('Thresholded Image',thresholded_img)\n# raw_input(\"Press Enter to continue...\")\n \n if print_images:\n savefig('Thresholded_Frame.png')\n \n # fill the top with black\n thresholded_img[0:75,0:720] = 0\n thresholded_img[0:480,650:720] = 0\n \n if show_images:\n# if ii > 100:\n cv2.imshow('Thresholded Image',thresholded_img)\n# raw_input(\"Press Enter to continue...\")\n \n \n #determine the objects moments and check that the area is large \n #enough to be our object \n# thresholded_img2 = cv2.GetMat(thresholded_img)\n moments = cv2.moments(thresholded_img,0) \n area = moments['m00'] \n \n \n # there can be noise in the video so ignore objects with small areas \n if(area > 1500): \n #determine the x and y coordinates of the center of the object \n #we are tracking by dividing the 1, 0 and 0, 1 moments by the area \n x = moments['m10'] / area\n y = moments['m01'] / area\n\n elapsedTime = ii/fps\n \n f.write(str(elapsedTime) + ',' + '%013.9f' % x + ',' + '%013.9f' % y + \"\\n\") #prints output to the specified output file for later use\n \n x = int(x)\n y = int(y)\n \n# #create an overlay to mark the center of the tracked object \n# overlay = cv2.CreateImage(cv2.GetSize(img), 8, 3) \n# \n# cv2.Circle(overlay, (x, y), 2, (255, 255, 255), 20) \n# cv2.Add(img, overlay, img) \n# #add the thresholded image back to the img so we can see what was \n# #left after it was applied \n# cv2.Merge(thresholded_img, None, None, None, img) \n# \n# #display the image \n# cv2.ShowImage(color_tracker_window, img) \n \n # close the data file\n f.close()\n\n \nif __name__==\"__main__\": \n color_tracker = ColorTracker() \n color_tracker.run() \n","sub_path":"Code/Tinkering/Machine_Vision/MachineVision_VideoProcessing.py","file_name":"MachineVision_VideoProcessing.py","file_ext":"py","file_size_in_byte":6189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"456878053","text":"from __future__ import print_function\nimport torch\nimport os\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torchvision\nimport torchvision.transforms as transforms\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\n\nclass CifarAlexNet(nn.Module):\n def __init__(self):\n super(CifarAlexNet, self).__init__()\n self.conv1 = nn.Conv2d(3, 32, 3, padding = 1)\n self.pool = nn.MaxPool2d(2, 2)\n self.conv2 = nn.Conv2d(32, 64, 3, padding = 1)\n self.conv3 = nn.Conv2d(64, 96, 3, padding = 1)\n self.conv4 = nn.Conv2d(96, 96, 3, padding = 1)\n self.conv5 = nn.Conv2d(96, 128, 3, padding= 1)\n self.fc1 = nn.Linear(128 * 4 * 4, 1024)\n self.fc2 = nn.Linear(1024, 1024)\n self.fc3 = nn.Linear(1024, 10)\n self.dropout = nn.Dropout(p = 0.5)\n\n\n def forward(self, x):\n x = self.pool(F.relu(self.conv1(x)))\n x = self.pool(F.relu(self.conv2(x)))\n x = F.relu(self.conv3(x))\n x = F.relu(self.conv4(x))\n # y = x # that is reconstruct_v2\n x = self.pool(F.relu(self.conv5(x)))\n y = x # that is reconstruct\n x = self.dropout(x.view(-1, 128 * 4 * 4))\n x = self.dropout(F.relu(self.fc1(x)))\n x = self.dropout(F.relu(self.fc2(x)))\n x = self.fc3(x)\n return x, y\n\n# Train the alexnet(simplified for cifar)\nif __name__ == \"__main__\":\n keepOn = False\n transform = transforms.Compose(\n\t [transforms.ToTensor(),\n\t transforms.Normalize((0.5, 0.5, 0.5),(0.5, 0.5, 0.5))])\n\t\n trainset = torchvision.datasets.CIFAR10(root = './data', train = True, transform = transform)\n trainloader = torch.utils.data.DataLoader(trainset, batch_size = 128, shuffle = True, num_workers = 0)\n\n testset = torchvision.datasets.CIFAR10(root='./data', train=False, transform=transform)\n testloader = torch.utils.data.DataLoader(testset, batch_size = 128, shuffle=False, num_workers=0)\n\n classes = ('plane', 'car', 'bird', 'cat',\n 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n device = torch.device(\"cuda:0\")\n net = CifarAlexNet()\n net.to(device)\n crit = nn.CrossEntropyLoss()\n learningRate = [0.005 for i in range(40)]\n learningRate.extend(0.0005 for i in range(20))\n learningRate.extend(0.0001 for i in range(10))\n # Record the performance\n train_loss = []\n train_accu = []\n test_loss = []\n test_accu = []\n x_axis = []\n start = 0\n if keepOn:\n res = os.listdir(\"./data/exp\")\n start = len(res)\n net = torch.load(\"./data/exp/alex\"+str(start)+\".pkl\")\n for epoch in range(start,70):\n x_axis.append(epoch + 1)\n optimizer = optim.SGD(net.parameters(), lr = learningRate[epoch], momentum = 0.9)\n correct = 0\n total = 0\n accu_loss = 0\n batchNum = 0\n\n # Train\n for i, data in enumerate(trainloader, 0):\n batchNum += 1\n inputs, labels = data\n inputs, labels = inputs.to(device), labels.to(device)\n optimizer.zero_grad()\n\n outputs, features = net(inputs)\n # Update parameters\n loss = crit(outputs, labels)\n loss.backward()\n optimizer.step()\n # Calculate the performance\n outputs, predicted = torch.max(outputs.data, 1)\n correct += (predicted == labels).sum().item()\n accu_loss += loss.item()\n total += labels.size(0)\n accuracy = correct / total\n print('[train] epoch: %2d, batch: %3d, loss: %.3f, accuracy: %.3f'\\\n % (epoch + 1, i + 1, accu_loss / (i+1), accuracy))\n train_loss.append(accu_loss / batchNum)\n train_accu.append(correct / total)\n\n # Test\n correct = 0\n total = 0\n accu_loss = 0\n batchNum = 0\n with torch.no_grad():\n for i, data in enumerate(testloader, 0):\n batchNum += 1\n inputs, labels = data\n inputs, labels = inputs.to(device), labels.to(device)\n outputs = net(inputs)\n loss = crit(outputs, labels)\n # Calculate the performance\n outputs, predicted = torch.max(outputs.data, 1)\n correct += (predicted == labels).sum().item()\n accu_loss += loss.item()\n total += labels.size(0)\n accuracy = correct / total\n print('[test] epoch: %2d, batch: %3d, loss: %.3f, accuracy: %.3f'\\\n % (epoch + 1, i + 1, accu_loss / (i+1), accuracy))\n test_loss.append(accu_loss / batchNum)\n test_accu.append(correct / total)\n\n #draw the figures\n pdf = PdfPages(\"alex_figure.pdf\")\n plt.figure(1)\n plt.subplot(121)\n plt.plot(x_axis, train_accu, x_axis, test_accu)\n plt.xlabel(\"epoch\")\n plt.ylabel(\"accuracy\")\n\n plt.subplot(122)\n plt.plot(x_axis, train_loss, x_axis, test_loss)\n plt.xlabel(\"epoch\")\n plt.ylabel(\"loss\")\n pdf.savefig()\n plt.close()\n pdf.close()\n\n # Save the net\n net_name = \"./data/exp/alex\" + str(epoch+1) + \".pkl\"\n torch.save(net, net_name)\n\n print('over')","sub_path":"cifar_alex.py","file_name":"cifar_alex.py","file_ext":"py","file_size_in_byte":5309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"572845721","text":"import openpyxl\nimport numpy\nimport statistics\nimport math\n\n#************************** Function for Getting BIN Number for a given height ***************\ndef get_bin_num (i, Min, Max, bin_cnt=32):\n return (round ((bin_cnt-1) * ((i-Min)/(Max-Min))))\n\ndef get_pd (x, mu, sig):\n tmp1 = (1 / (math.sqrt(2 * math.pi) *sig))\n tmp2 = (-0.5* ( ((x - mu)/sig) * ((x - mu)/sig) ))\n tmp3 = math.exp (tmp2)\n prob_den = tmp1 * tmp3\n return(prob_den)\n\ndef bayesian (Nm, pm, Nf, pf):\n temp1 = (Nm*pm) /(Nm+Nf)\n temp2 = (Nf*pf)/(Nm+Nf)\n Pf = temp2 / (temp1+temp2)\n return (Pf)\n\n \n \n#************* End of Functions Definitions ***************\n\nwb = openpyxl.load_workbook('Assignment_1_Data_and_Template.xlsx')\n#print (type(wb))\n#wb.get_sheet_names()\nsheet = wb.get_sheet_by_name('Data')\nlast_row_number = sheet.max_row\nprint (\"Max row Number in XL sheet=\", last_row_number)\nprint (\"Total Number of Rows in the Training set =\", (last_row_number-1))\n\n###1. Initialize Lists and Dictionaries. \nMale =[]\nFemale =[]\nMale_And_Female = []\nBin_count = 32\nH={}\nNm = 0\nNf = 0\npm = 0\npf = 0\n\n###2.\noutput = \"Initialise Histogram Dictionary with a size of \" + str(Bin_count) + \". Index <0 thru\" + str(Bin_count-1) + \">: \"\nprint (output.center(100, \"*\"))\n \nfor i in range(0,Bin_count):\n H[i]= {'male':0, 'female':0}\n#print(H)\n\n###3.\noutput = \"Read all the Rows from XL and save the hights into 3 lists: Male, Femal, Male_And_Female :\"\nprint (output.center(100, \"*\"))\n \n#for i in (range(2, 52)):\nfor i in (range(2, (last_row_number+1) )):\n fv_h_ft = sheet[\"A\"+str(i)].value\n fv_h_in = sheet[\"B\"+str(i)].value\n fv_gender = sheet[\"C\"+str(i)].value\n \n Height = (fv_h_ft * 12 + fv_h_in)\n Male_And_Female.append(Height)\n\n if (fv_gender == \"Male\"):\n Male.append(Height)\n elif (fv_gender == \"Female\"):\n Female.append(Height)\n\n#print (\"Male List:\",Male)\n#print (\"Female List:\",Female)\n#print (\"Male and Female List:\", Male_And_Female) \n\nMale_And_Female.sort()\nMax=Male_And_Female[-1]\nMin=Male_And_Female[0]\nNm = len(Male)\nNf = len(Female)\noutput = \"Max and Minimum Height from Male_And_Female List: \"\nprint (\"Max:\", Male_And_Female[-1])\nprint (\"Min:\", Male_And_Female[0])\nprint (\"Size of Male List:\", Nm)\nprint (\"Size of FeMale List:\", Nf)\nprint (\"Sum of both the list sizes:\", (Nm+Nf))\n\nif(((len(Male)+len(Female)) == (last_row_number -1))):\n print (\"Combined length of Male and Female Lists = Number of Rows in Training Set - Check PASS\")\nelse:\n print (\"Combined length of Male and Female Lists != Number of Rows in Training Set - Check FAIL\")\n \n#print (\"Male and Female List, after sort:\", Male_And_Female)\n\noutput = \"Create Male Histogram: \"\nprint (output.center(100, \"*\"))\n\nfor i in (Male):\n bin_num = get_bin_num(i, Min, Max)\n H[bin_num]['male']=H[bin_num]['male']+1\n\noutput = \"Create Female Histogram: \"\nprint (output.center(100, \"*\"))\n \nfor i in (Female):\n bin_num = get_bin_num(i, Min, Max)\n H[bin_num]['female']=H[bin_num]['female']+1\n\noutput = \"Mean and Standrd Deviation for Male and Female Histograms: \"\nprint (output.center(100, \"*\"))\n \nMu_M = statistics.mean(Male)\nMu_F = statistics.mean(Female)\n\nsig_M = statistics.stdev(Male)\nsig_F = statistics.stdev(Female)\n\nprint (\"Mean Male:\",Mu_M)\nprint (\"Mean Female:\",Mu_F)\nprint (\"stdev Male:\",sig_M)\nprint (\"stdev Female:\",sig_F)\n\noutput2 = \"\"\nprint (output2.center(100, \"~\"))\nprint (\"Counts in each Bin:\", H);\nprint (output2.center(100, \"~\"))\n\noutput = \"Check whether the Sum of all Buckets is Equal to Number of Rows in Training Set : \"\nprint (\"Max:\", Male_And_Female[-1])\nprint (\"Min:\", Male_And_Female[0])\ncount_h = 0\nfor i in (H):\n count_h = count_h + H[i]['male']+H[i]['female']\n\n\nif ( count_h == (last_row_number -1)):\n print (\"The Sum of all Buckets is Equal to Number of Rows in Training Set - Check PASS\")\nelse:\n print (\"The Sum of all Buckets is Equal to Number of Rows in Training Set - Check FAIL\")\n\n#****************************************************************************************\n#************************************ Start of Testing **********************************\n#****************************************************************************************\n \noutput = \"Start of Testing \"\noutput2 = \"\"\nprint (output2.center(100, \"*\"))\nprint (output.center(100, \"*\"))\nprint (output2.center(100, \"*\"))\ntcs = [55,60,65,70,75,80]\n\n#***************** Using Histograms ******************************\noutput = \"Results using Histogram Method\"\nprint (output.center(100, \"~\"))\n\nfor ht_in in (tcs):\n bin_num = get_bin_num(ht_in, Min, Max)\n male_cnt = H[bin_num]['male']\n female_cnt = H[bin_num]['female']\n p_f = (female_cnt/(female_cnt+male_cnt))\n print (\"Height:\", ht_in, \"bin number:\", bin_num, \"Male Count:\", male_cnt, \"female count:\", female_cnt, \"Probability of being female: \", p_f, sep =\"\\t\")\n\n#***************** Using Gaussian Model ******************************\noutput = \"Results using Gaussian Method\"\nprint (output.center(100, \"~\"))\n\nfor ht_in in (tcs):\n pd_m = get_pd (ht_in, Mu_M, sig_M)\n pd_f = get_pd (ht_in, Mu_F, sig_F)\n Pf = bayesian(Nm, pd_m, Nf, pd_f)\n print (\"Height:\", ht_in, \"PD_Male:\", pd_m, \"PD_f:\", pd_f, \"Pf:\", Pf, sep =\"\\t\")\n","sub_path":"Assignment_1/assignment1_v3.py","file_name":"assignment1_v3.py","file_ext":"py","file_size_in_byte":5268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"305440534","text":"# from django.conf.urls.defaults import patterns, include, url\n# from minesite.views import hello, current_datetime, hours_ahead\n# from minesite import books\n# from minesite.contact.views import contact\nfrom django.conf.urls.defaults import *\n# from minesite import views\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n (r'^hello/$', 'minesite.views.hello'),\n (r'^time/$', 'minesite.views.current_datetime'),\n (r'^time/plus/(\\d{1,2})/$', 'minesite.views.hours_ahead'),\n# (r'^search-form/$', views.search_form),\n (r'^search/$', 'minesite.books.views.search'),\n (r'^contact/$', 'minesite.contact.contact'),\n # Examples:\n # url(r'^$', 'mysite.views.home', name='home'),\n # url(r'^minesite/', include('mysite.foo.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n url(r'^admin/', include(admin.site.urls)),\n)\n","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"120432693","text":"n=input()\nx=n.split()\na=int(x[0])\nb=int(x[1])\nc=int(x[2])\nif(a>c):\n if(a>b):\n print(a)\nelif(c 8:\n nparr = np.frombuffer(data, np.uint16)\n division = pow(2, pixel_format_info\n .each_component_valid_bit_count - 8)\n nparr = (nparr / division).astype('uint8')\n else:\n nparr = np.frombuffer(data, np.uint8)\n\n # Process image for display.\n nparr = nparr.reshape(st_image.height, st_image.width, 1)\n\n # Perform color conversion for Bayer.\n if pixel_format_info.is_bayer:\n bayer_type = pixel_format_info.get_pixel_color_filter()\n if bayer_type == st.EStPixelColorFilter.BayerRG:\n nparr = cv2.cvtColor(nparr, cv2.COLOR_BAYER_RG2RGB)\n elif bayer_type == st.EStPixelColorFilter.BayerGR:\n nparr = cv2.cvtColor(nparr, cv2.COLOR_BAYER_GR2RGB)\n elif bayer_type == st.EStPixelColorFilter.BayerGB:\n nparr = cv2.cvtColor(nparr, cv2.COLOR_BAYER_GB2RGB)\n elif bayer_type == st.EStPixelColorFilter.BayerBG:\n nparr = cv2.cvtColor(nparr, cv2.COLOR_BAYER_BG2RGB)\n\n # Resize image and store to self._image.\n nparr = cv2.resize(nparr, None,\n fx=DISPLAY_RESIZE_FACTOR,\n fy=DISPLAY_RESIZE_FACTOR)\n self._lock.acquire()\n self._image = nparr\n self._lock.release()\n\n\nst.initialize()\nst_system = st.create_system()\nst_device = st_system.create_first_device()\nst_datastream = st_device.create_datastream()\nst_datastream.start_acquisition()\nst_device.acquisition_start()\n\nmy_callback = CMyCallback()\ncb_func = my_callback.datastream_callback\n\n# Register callback for datastream\ncallback = st_datastream.register_callback(cb_func)\n\ncap = cv2.VideoCapture(1)\nmean = None\nfirst_frame = None\nresponses = {}\n\nstatus = True\nwhile status:\n output_image = my_callback.image\n print('output_image',output_image)\n if output_image is not None:\n cv2.imshow('image', output_image)\n key_input = cv2.waitKey(1)\n\n #motion detection. this happens, if camera detection motion of palm fruit\n ret, frame = cap.read()\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n gray = cv2.GaussianBlur(gray, (21,21), 0)\n if first_frame is None:\n time.sleep(10)\n first_frame = gray\n continue\n delta_frame = cv2.absdiff(first_frame, gray)\n\n if mean is None:\n mean = np.mean(delta_frame)\n continue\n print(np.mean(delta_frame))\n print(np.mean(delta_frame))\n if np.mean(delta_frame) > mean+10 or np.mean(delta_frame) < mean-10:\n responses['moved'] = '1'\n print('object moved')\n else:\n responses['moved'] = '0'\n print('no object moved')\n \n thresh_delta = cv2.threshold(delta_frame, 30, 255, cv2.THRESH_BINARY)[1]\n thresh_delta = cv2.dilate(thresh_delta, None, iterations=0)\n cnts, __ = cv2.findContours(thresh_delta.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n for contour in cnts:\n if cv2.contourArea(contour)<10000:\n continue\n (x,y,w,h) = cv2.boundingRect(contour)\n cv2.rectangle(frame, (x,y), (x+w, y+h), (0,255,0), 3)\n\n cv2.imshow('frame', frame)\n cv2.imshow('capturing', gray)\n cv2.imshow('delta', delta_frame)\n cv2.imshow('thresh', thresh_delta)\n\n capturing = None\n\n print(responses)\n if responses['moved'] == '1':\n time.sleep(1)\n # conveyor_stoped = toArduino2.write(str.encode(responses['moved']))\n\n conveyor_stoped = toArduino2.write(str.encode('0'))\n print('conveyor stop moving')\n\n status = False\n rescale_frame = cv2.resize(output_image, (1024,1088))\n h,w,l = rescale_frame.shape\n result_array = np.zeros((h,231,1))\n start_time = datetime.now()\n\n # capture frame per second\n for _ in range(50):\n ret, frame = cap.read()\n rescale_frame = cv2.resize(frame, (1024, 1088))\n crop_frame = rescale_frame[:, 370:601]\n result_array = np.append(result_array, crop_frame, axis=2)\n\n result_array = result_array[:,:,1:101]\n\n # Modify matrix of white reference and dark reference\n file_wr = 'wr.mat'\n file_blk = 'blk.mat'\n wr = sc.loadmat('wr.mat')['wr'].astype(int)\n blk = sc.loadmat('blk.mat')['blk'].astype(int)\n y = np.subtract(wr, blk)\n\n m, n = y.shape\n for s in range(m):\n for t in range(n):\n if y[s][t] < 0:\n y[s][t] = 0\n if y[s][t] == 0:\n y[s][t] = 1\n\n h1,w1,l1 = result_array.shape\n\n for i in range(l1):\n temp = np.subtract(result_array[:,:,i],blk)\n m, n = temp.shape\n for s in range(m):\n for t in range(n):\n if temp[s][t] < 0:\n temp[s][t] = 0\n result_array[:,:,i] = np.divide(temp, y)\n\n result_arrayv2 = np.zeros((100,231,1088))\n Ax, Ay, r = result_array.shape\n for i in range(Ax):\n for z in range(r):\n result_arrayv2[z,:,i] = result_array[i,:,z]\n\n print('dimension of array is {}'.format(result_arrayv2.shape))\n mean = []\n\n for i in range(1088):\n n = 1087-i\n res = result_arrayv2[:,:,n][40:55, 100:125]\n mean.append(np.mean(res))\n\n plt.plot(mean)\n end_time = datetime.now()\n time_needed = end_time - start_time\n print('the time needed is {} seconds'.format(time_needed.seconds))\n\n filename = \"parameterValue\"\n #Prediction\n prediction = NewPrediction(filename, np.mean(mean))\n result = prediction.predict()\n print(\"Result Prediction is {}\".format(result))\n\n # Sending result response to arduino to turn on conveyor\n # conveyor_moved = toArduino2.write(str.encode(mv_dtc['status_off']))\n print('conveyor moved {}'.format(conveyor_moved))\n time.sleep(3)\n\n #move arm\n time.sleep(1)\n # arm_moved = toArduino1.write( str.encode(result['index']))\n print('arm_moved'.format(arm_moved))\n \n print (\"Program done\")\n\n time.sleep(3)\n status = True\n plt.show()\n if key_input == ord('q'):\n break\n\nst_device.acquisition_stop()\nst_datastream.stop_acquisition()","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":8828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"432661016","text":"from wx import wx\r\nimport wx.richtext\r\nimport sqlite3\r\n\r\nfrom StringIO import StringIO\r\n\r\n# Create Content Class\r\nclass CreateContentTab(wx.Panel):\r\n \r\n def __init__(self, parent):\r\n wx.Panel.__init__(self, parent)\r\n\r\n self.toolbar_ADDCONTENT = wx.NewId()\r\n self.toolbar_UPDATECONTENT = wx.NewId()\r\n # toolbar_STRIKETHROUGH = wx.NewId()\r\n # toolbar_SUBSCRIPT = wx.NewId()\r\n # toolbar_SUPERSCRIPT = wx.NewId()\r\n # toolbar_ORDEREDLIST = wx.NewId()\r\n # toolbar_UNORDEREDLIST = wx.NewId()\r\n richToolbar = self.MakeToolBar()\r\n self.box = wx.BoxSizer(wx.VERTICAL)\r\n self.createContentRichTextCtrl = wx.richtext.RichTextCtrl(self, -1, style=wx.VSCROLL | wx.BORDER_SUNKEN | wx.ALWAYS_SHOW_SB | wx.WANTS_CHARS)\r\n self.listBox = wx.ListBox(self, -1, style= wx.BORDER_SUNKEN | wx.LB_SINGLE | wx.LB_ALWAYS_SB | wx.LB_HSCROLL)\r\n self.box.Add(richToolbar, 0, wx.ALL | wx.ALIGN_LEFT | wx.EXPAND)\r\n self.box.Add(self.createContentRichTextCtrl, 1, wx.EXPAND)\r\n self.box.Add(self.listBox, 1, wx.EXPAND)\r\n self.SetSizer(self.box)\r\n wx.EVT_LISTBOX(self, self.listBox.GetId(), self.OnSelectedContentItem)\r\n \r\n # Create Event Function\r\n def OnSelectedContentItem(self, event):\r\n if(self.listBox.GetSelection() != -1):\r\n tmpString = self.listBox.GetStringSelection()\r\n tmpString2 = tmpString.split(\".\", 1)\r\n self.content = self.TopLevelParent.currentContentDict[int(tmpString2[0])]\r\n self.content = self.content[0].encode(\"UTF-8\")\r\n out = StringIO()\r\n handler = wx.richtext.RichTextXMLHandler()\r\n txtBuffer = self.createContentRichTextCtrl.GetBuffer()\r\n txtBuffer.AddHandler(handler)\r\n out.write(self.content)\r\n out.seek(0)\r\n handler.LoadStream(txtBuffer, out)\r\n out.close()\r\n self.createContentRichTextCtrl.Refresh()\r\n\r\n # Create RichText Toolbar\r\n def MakeToolBar(self):\r\n tb = wx.ToolBar(self, -1, style=wx.TB_FLAT | wx.NO_BORDER)\r\n self.ToolBar = tb\r\n tb.AddTool(wx.ID_CUT, wx.Bitmap(\"images/edit-cut.png\", wx.BITMAP_TYPE_PNG), isToggle=False, shortHelpString=\"Cut\")\r\n tb.AddTool(wx.ID_COPY, wx.Bitmap(\"images/edit-copy.png\", wx.BITMAP_TYPE_PNG), isToggle=False,shortHelpString=\"Copy\")\r\n tb.AddTool(wx.ID_PASTE, wx.Bitmap(\"images/edit-paste.png\", wx.BITMAP_TYPE_PNG), isToggle=False, shortHelpString=\"Paste\")\r\n tb.AddTool(wx.ID_UNDO, wx.Bitmap(\"images/edit-undo.png\", wx.BITMAP_TYPE_PNG), isToggle=False, shortHelpString=\"Undo\")\r\n tb.AddTool(wx.ID_REDO, wx.Bitmap(\"images/edit-redo.png\", wx.BITMAP_TYPE_PNG), isToggle=False, shortHelpString=\"Redo\")\r\n tb.AddSeparator()\r\n tb.AddTool(wx.ID_BOLD, wx.Bitmap(\"images/format-text-bold.png\", wx.BITMAP_TYPE_PNG), wx.Bitmap(\"images/format-text-bold-off.png\", wx.BITMAP_TYPE_PNG), isToggle=True, shortHelpString=\"Bold\")\r\n tb.AddTool(wx.ID_ITALIC, wx.Bitmap(\"images/format-text-italic.png\", wx.BITMAP_TYPE_PNG), isToggle=True, shortHelpString=\"Italic\")\r\n tb.AddTool(wx.ID_UNDERLINE, wx.Bitmap(\"images/format-text-underline.png\", wx.BITMAP_TYPE_PNG), isToggle=True, shortHelpString=\"Underline\")\r\n # tb.AddTool(toolbar_STRIKETHROUGH, wx.Bitmap(\"images/format-text-strikethrough.png\", wx.BITMAP_TYPE_PNG), isToggle=True, shortHelpString=\"Strikethrough\")\r\n # tb.AddTool(toolbar_SUBSCRIPT, wx.Bitmap(\"images/Subscript.png\", wx.BITMAP_TYPE_PNG), isToggle=True, shortHelpString=\"Subscript\")\r\n # tb.AddTool(toolbar_SUPERSCRIPT, wx.Bitmap(\"images/Superscript.png\", wx.BITMAP_TYPE_PNG), isToggle=True, shortHelpString=\"Superscript\")\r\n tb.AddSeparator()\r\n tb.AddTool(wx.ID_JUSTIFY_LEFT, wx.Bitmap(\"images/format-justify-left.png\", wx.BITMAP_TYPE_PNG), isToggle=True, shortHelpString=\"Left Align\")\r\n tb.AddTool(wx.ID_JUSTIFY_CENTER, wx.Bitmap(\"images/format-justify-center.png\", wx.BITMAP_TYPE_PNG), isToggle=True, shortHelpString=\"Align Center\")\r\n tb.AddTool(wx.ID_JUSTIFY_RIGHT, wx.Bitmap(\"images/format-justify-right.png\", wx.BITMAP_TYPE_PNG), isToggle=True, shortHelpString=\"Right Align\")\r\n tb.AddTool(wx.ID_JUSTIFY_FILL, wx.Bitmap(\"images/format-justify-fill.png\", wx.BITMAP_TYPE_PNG), isToggle=True, shortHelpString=\"Justify\")\r\n tb.AddSeparator()\r\n # tb.AddTool(toolbar_ORDEREDLIST, wx.Bitmap(\"images/NumbersList.png\", wx.BITMAP_TYPE_PNG), isToggle=True, shortHelpString=\"Ordered List\")\r\n # tb.AddTool(toolbar_UNORDEREDLIST, wx.Bitmap(\"images/BulletList.png\", wx.BITMAP_TYPE_PNG), isToggle=True, shortHelpString=\"Unordered List\")\r\n # tb.AddSeparator()\r\n # tb.AddTool(wx.ID_INDENT, wx.Bitmap(\"images/format-indent-more.png\", wx.BITMAP_TYPE_PNG), isToggle=False, shortHelpString=\"Indent\")\r\n # tb.AddTool(wx.ID_UNINDENT, wx.Bitmap(\"images/format-indent-less.png\", wx.BITMAP_TYPE_PNG), isToggle=False, shortHelpString=\"Outdent\")\r\n # tb.AddSeparator()\r\n tb.AddTool(self.toolbar_ADDCONTENT, wx.Bitmap(\"images/list-add.png\", wx.BITMAP_TYPE_PNG), isToggle=False, shortHelpString=\"Add New Content\")\r\n tb.AddTool(self.toolbar_UPDATECONTENT, wx.Bitmap(\"images/document-save.png\", wx.BITMAP_TYPE_PNG), isToggle=False, shortHelpString=\"Update Existing Content\")\r\n wx.EVT_TOOL(self, wx.ID_CUT, self.ForwardEvent)\r\n wx.EVT_TOOL(self, wx.ID_COPY, self.ForwardEvent)\r\n wx.EVT_TOOL(self, wx.ID_PASTE, self.ForwardEvent)\r\n wx.EVT_TOOL(self, wx.ID_UNDO, self.ForwardEvent)\r\n wx.EVT_TOOL(self, wx.ID_REDO, self.ForwardEvent)\r\n wx.EVT_TOOL(self, wx.ID_BOLD, self.OnBold)\r\n wx.EVT_UPDATE_UI(self, wx.ID_BOLD, self.OnUpdateBold)\r\n wx.EVT_TOOL(self, wx.ID_ITALIC, self.OnItalic)\r\n wx.EVT_UPDATE_UI(self, wx.ID_ITALIC, self.OnUpdateItalic)\r\n wx.EVT_TOOL(self, wx.ID_UNDERLINE, self.OnUnderline)\r\n wx.EVT_UPDATE_UI(self, wx.ID_UNDERLINE, self.OnUpdateUnderline)\r\n wx.EVT_TOOL(self, wx.ID_JUSTIFY_LEFT, self.OnAlignLeft)\r\n wx.EVT_UPDATE_UI(self, wx.ID_JUSTIFY_LEFT, self.OnUpdateAlignLeft)\r\n wx.EVT_TOOL(self, wx.ID_JUSTIFY_CENTER, self.OnAlignCenter)\r\n wx.EVT_UPDATE_UI(self, wx.ID_JUSTIFY_CENTER, self.OnUpdateAlignCenter)\r\n wx.EVT_TOOL(self, wx.ID_JUSTIFY_RIGHT, self.OnAlignRight)\r\n wx.EVT_UPDATE_UI(self, wx.ID_JUSTIFY_RIGHT, self.OnUpdateAlignRight)\r\n wx.EVT_TOOL(self, wx.ID_JUSTIFY_FILL, self.OnAlignJustify)\r\n wx.EVT_UPDATE_UI(self, wx.ID_JUSTIFY_FILL, self.OnUpdateAlignJustify)\r\n #wx.EVT_TOOL(self, toolbar_ORDEREDLIST, self.OnNumList)\r\n #wx.EVT_TOOL(self, toolbar_UNORDEREDLIST, self.OnBulletList)\r\n wx.EVT_TOOL(self, self.toolbar_ADDCONTENT, self.AddContent)\r\n wx.EVT_TOOL(self, self.toolbar_UPDATECONTENT, self.UpdateContent)\r\n tb.Realize()\r\n \r\n return tb\r\n\r\n def ForwardEvent(self, evt):\r\n # The RichTextCtrl can handle menu and update events for undo,\r\n # redo, cut, copy, paste, delete, and select all, so just\r\n # forward the event to it.\r\n self.createContentRichTextCtrl.ProcessEvent(evt)\r\n\r\n def OnStrikethrough(self, evt):\r\n self.TopLevelParent.DisplayDebug(\"\")\r\n #self.createContentRichTextCtrl\r\n \r\n def OnUpdateBold(self, evt):\r\n evt.Check(self.createContentRichTextCtrl.IsSelectionBold())\r\n\r\n def OnBold(self, evt):\r\n self.createContentRichTextCtrl.ApplyBoldToSelection()\r\n\r\n def OnItalic(self, evt): \r\n self.createContentRichTextCtrl.ApplyItalicToSelection()\r\n \r\n def OnUnderline(self, evt):\r\n self.createContentRichTextCtrl.ApplyUnderlineToSelection()\r\n \r\n def OnAlignLeft(self, evt):\r\n self.createContentRichTextCtrl.ApplyAlignmentToSelection(wx.richtext.TEXT_ALIGNMENT_LEFT)\r\n \r\n def OnAlignRight(self, evt):\r\n self.createContentRichTextCtrl.ApplyAlignmentToSelection(wx.richtext.TEXT_ALIGNMENT_RIGHT)\r\n \r\n def OnAlignCenter(self, evt):\r\n self.createContentRichTextCtrl.ApplyAlignmentToSelection(wx.richtext.TEXT_ALIGNMENT_CENTRE)\r\n \r\n def OnAlignJustify(self, evt):\r\n self.createContentRichTextCtrl.ApplyAlignmentToSelection(wx.richtext.TEXT_ALIGNMENT_JUSTIFIED)\r\n \r\n def OnUpdateItalic(self, evt): \r\n evt.Check(self.createContentRichTextCtrl.IsSelectionItalics())\r\n \r\n def OnUpdateUnderline(self, evt): \r\n evt.Check(self.createContentRichTextCtrl.IsSelectionUnderlined())\r\n \r\n def OnUpdateAlignLeft(self, evt):\r\n evt.Check(self.createContentRichTextCtrl.IsSelectionAligned(wx.richtext.TEXT_ALIGNMENT_LEFT))\r\n \r\n def OnUpdateAlignCenter(self, evt):\r\n evt.Check(self.createContentRichTextCtrl.IsSelectionAligned(wx.richtext.TEXT_ALIGNMENT_CENTRE))\r\n \r\n def OnUpdateAlignRight(self, evt):\r\n evt.Check(self.createContentRichTextCtrl.IsSelectionAligned(wx.richtext.TEXT_ALIGNMENT_RIGHT))\r\n \r\n def OnUpdateAlignJustify(self, evt):\r\n evt.Check(self.createContentRichTextCtrl.IsSelectionAligned(wx.richtext.TEXT_ALIGNMENT_JUSTIFIED))\r\n\r\n # Create Event Function\r\n def AddContent(self, evt):\r\n # should be disabled until someone enters content...\r\n out = StringIO()\r\n handler = wx.richtext.RichTextXMLHandler()\r\n txtBuffer = self.createContentRichTextCtrl.GetBuffer()\r\n handler.SaveStream(txtBuffer, out)\r\n out.seek(0)\r\n self.content = out.read()\r\n out.close()\r\n con = sqlite3.connect(self.TopLevelParent.database)\r\n cur = con.cursor()\r\n cur.execute(\"INSERT INTO ResourceTable(resourceData, courseID, resourceTypeID) VALUES(?, ?, 1)\", (self.content, self.TopLevelParent.currentCourseID))\r\n con.commit()\r\n con.close()\r\n self.TopLevelParent.currentContentDict = self.TopLevelParent.RefreshListBox(self.listBox, self.TopLevelParent.currentCourseID, 1, None)\r\n # put code in here to refresh available resources list box...\r\n # or we could just put code to refresh available resources list box when you select the layout tab button...\r\n # if that loads too quickly, then do it here\r\n \r\n def UpdateContent(self, evt):\r\n tmpString = self.listBox.GetStringSelection()\r\n tmpString2 = tmpString.split(\".\", 1)\r\n out = StringIO()\r\n handler = wx.richtext.RichTextXMLHandler()\r\n txtBuffer = self.createContentRichTextCtrl.GetBuffer()\r\n handler.SaveStream(txtBuffer, out)\r\n out.seek(0)\r\n self.content = out.read()\r\n out.close()\r\n con = sqlite3.connect(self.TopLevelParent.database)\r\n cur = con.cursor()\r\n cur.execute(\"UPDATE ResourceTable set resourceData = (?) where resourceID = (?)\", (self.content, int(tmpString2[0]),))\r\n con.commit()\r\n con.close()\r\n self.TopLevelParent.currentContentDict = self.TopLevelParent.RefreshListBox(self.listBox, self.TopLevelParent.currentCourseID, 1, None)\r\n # put code in here to refresh available resources list box...\r\n # or we could just put code to refresh available resources list box when you select the layout tab button...\r\n # if that loads too quickly, then do it here\r\n","sub_path":"src/ContentTab.py","file_name":"ContentTab.py","file_ext":"py","file_size_in_byte":11304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"229598873","text":"import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nimport mnist_inference\nimport mnist_train\n\n# 每10秒加载一次最新的模型,并在测试数据集上测试最新模型的正确率\nEVAL_INTERVAL_SECS = 10\n\n\ndef evaluate(mnist):\n with tf.Graph().as_default() as g:\n x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')\n y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')\n\n y = mnist_inference.inference(x, None)\n\n correct_prediction = tf.equal(tf.argmax(y_, 1), tf.argmax(y, 1))\n accuracy_op = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n # 通过变量重命名的方式来加载模型\n variable_averages = tf.train.ExponentialMovingAverage(mnist_train.MOVING_AVERAGE_DECAY)\n variables_to_restore = variable_averages.variables_to_restore()\n saver = tf.train.Saver(variables_to_restore)\n\n validation_feed = {x: mnist.validation.images, y_: mnist.validation.labels}\n\n with tf.Session() as sess:\n ckpt = tf.train.get_checkpoint_state(mnist_train.MODEL_SAVE_PATH)\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path)\n\n global_step = ckpt.model_checkpoint_path.split(\"/\")[-1].split(\"-\")[-1]\n accuracy_score = sess.run(accuracy_op, feed_dict=validation_feed)\n\n print(\"After training %s step(s), validation accuracy is %g.\" % (global_step, accuracy_score))\n else:\n print(\"No checkpoint file found\")\n return\n\n\ndef main(argv=None):\n mnist = input_data.read_data_sets(\"./data/MNIST_data\", one_hot=True)\n evaluate(mnist)\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n","sub_path":"mnist_tensorflow_dnn/mnist_eval.py","file_name":"mnist_eval.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"627635890","text":"import pytesseract\nimport numpy as np\nimport cv2\n\nimg = cv2.imread('../../Util/Imagens/saida.jpg')\nrgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n#tesseract --help-extra (executar o comando no cmd)\nconfig_tesseract = \"--psm 8\"\ntexto = pytesseract.image_to_string(img, lang=\"por\", config=config_tesseract)\n\nprint(texto)\ncv2.imshow(\"Image\",rgb)\ncv2.waitKey(0)\n\n","sub_path":"pythonCodes/step1/OCR5.py","file_name":"OCR5.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"403856786","text":"#!/usr/bin/env python\n# coding:utf-8\n# Copyright (C) dirlt\nimport copy\n\n\ndef make_array(base, *dims):\n def _make(base, *dims):\n if len(dims) == 0:\n return base\n v = dims[0]\n res = []\n for _ in range(v):\n res.append(_make(base, *dims[1:]))\n return res\n\n return _make(base, *dims)\n\n\nclass Solution:\n def combinationSum3(self, k, n):\n \"\"\"\n :type k: int\n :type n: int\n :rtype: List[List[int]]\n \"\"\"\n\n cache = {}\n\n # cache = make_array(None, k + 1, 10, n + 1)\n\n def solve(k, idx, t):\n if t == 0:\n if k == 0:\n return [[]]\n return []\n\n if k == 0:\n return []\n if idx == 0:\n return []\n\n cache_key = '{}.{}.{}'.format(k, idx, t)\n if cache_key in cache:\n return cache[cache_key]\n # if cache[k][idx][t] is not None:\n # return cache[k][idx][t]\n\n res = []\n if idx <= t:\n xs = solve(k - 1, idx - 1, t - idx)\n xs = copy.deepcopy(xs)\n for x in xs:\n x.append(idx)\n res.extend(xs)\n xs = solve(k, idx - 1, t)\n res.extend(xs)\n cache[cache_key] = res\n # cache[k][idx][t] = res\n return res\n\n res = solve(k, 9, n)\n return res\n\n\nif __name__ == '__main__':\n s = Solution()\n print(s.combinationSum3(3, 10))\n print(s.combinationSum3(3, 7))\n print(s.combinationSum3(3, 9))\n","sub_path":"codes/contest/leetcode/combination-sum-iii.py","file_name":"combination-sum-iii.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"624490490","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom main import data\n\ndf = pd.read_excel('FuelCG.xlsx', header=None, sheet_name='Sheet1')\ntime_flight = data['time']\nff_le = data['lh_engine_FMF']\nff_re = data['rh_engine_FMF']\ndef TrapArea(j):\n dt = time_flight[j+1] -time_flight[j]\n f1 = ff_le[j]/3600\n f2 = ff_le[j+1]/3600\n f3 = ff_re[j]/3600\n f4 = ff_re[j+1]/3600\n I1 = (f2-f1)* (dt)/2. +f1*(dt)\n I2 = (f4-f3)* (dt)/2. +f3*(dt)\n return I1+I2\n\ndef totalfuelused(time):\n j = 0 \n \n totalfuelused = 0\n while time_flight[j]< time :\n \n integral_dt = TrapArea(j)\n totalfuelused = totalfuelused + integral_dt\n j = j + 1\n integral_dt = 0\n \n return totalfuelused\n\n\nfuelUsed = {}\nintegral = 0\nfor t in time_flight[0:48320]:\n integral += TrapArea(time_flight[time_flight == t].index[0])\n fuelUsed[t] = integral\n\n\ndef interpolatefuel(fuel):\n \n for i in range(len(df)):\n if fuel<= df.iat[i,0]:\n momentcg = df.iat[i,1] - ((df.iat[i,1] - df.iat[i-1,1])/100 )* (df.iat[i,0] - fuel)\n break\n \n \n return momentcg\n\n\n","sub_path":"CGcalc.py","file_name":"CGcalc.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"501355172","text":"# encoding=utf8\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport logging\n\nfrom django.core.cache import cache\n\nfrom billing_proxy.api import ceilometer\nfrom billing_proxy.api import exceptions\nfrom billing_proxy.api import keystone\nfrom billing_proxy.api import utils\nfrom billing_proxy.api.views import APIView\nfrom billing_proxy.client import openstack_client\nfrom billing_proxy import models\nfrom billing_proxy.models import BillingResOrder\nfrom billing_proxy.models import resource_order_select_for_update_by_id\nfrom billing_proxy.worker import cache_manager\n\n\nLOG = logging.getLogger(__name__)\n\nCACHE_KEY_CONTRACT = \"contracts\"\n\nFIP_METER = \"network.floating.ip.outgoing.bytes\"\nSYSTEMSNAPSHOT_METER = \"instance.snapshot.size\"\nPROTECTED_GROUP_VOLUME_METER = \"protectiongroup.backup.volum.size\"\nPROTECTED_GROUP_NETWORK_METER = \"protectiongroup.volume.network.bytes\"\n\n\nclass Contract(APIView):\n\n @utils.argument_check([\"account\", \"contractCode\", \"isDisplayPrice\",\n \"contractType\", \"contractId\"])\n def post(self, request, *args, **kwargs):\n try:\n # add new product_alias into cache\n contract_id = kwargs[\"contractId\"]\n data = cache_manager.contract_data_adapter(kwargs)\n cache_manager.cache_contract_by_key(data,\n contract_id=contract_id)\n except Exception as e:\n LOG.exception(\"Create contract fail : {0}\".format(e))\n\n @utils.argument_check([\"account\", \"contractCode\", \"isDisplayPrice\",\n \"contractType\"])\n def put(self, request, *args, **kwargs):\n contract_id = kwargs[\"contract_id\"]\n account = kwargs[\"account\"]\n utils.check_user_is_existed(account)\n # check if contract_id is exsited\n old_contracts = cache.get(CACHE_KEY_CONTRACT, {})\n if contract_id not in old_contracts[\"index\"]:\n raise exceptions.ContractNotExisted\n data = cache_manager.contract_data_adapter(kwargs)\n cache_manager.cache_contract_by_key(data, contract_id)\n\n def delete(self, request, *args, **kwargs):\n contract_id = kwargs[\"contract_id\"]\n old_contracts = cache.get(CACHE_KEY_CONTRACT, {})\n if contract_id not in old_contracts[\"index\"]:\n raise exceptions.ContractNotExisted\n contract_res_relation = BillingResOrder.objects.filter(\n contract_id=contract_id)\n if len(contract_res_relation):\n raise exceptions.ContractResourceRelationExisted\n cache_manager.delete_cached_contract(contract_id)\n\n\nclass ContractResourceRelation(APIView):\n\n @utils.argument_check([\"ResContract\"])\n def put(self, request, *args, **kwargs):\n \"\"\"update relation between resource and contract\"\"\"\n\n res_contracts = kwargs[\"ResContract\"]\n # check if all resource id is existed,otherwise ,raise a exception\n for res_con in res_contracts:\n account = res_con[\"account\"]\n utils.check_user_is_existed(account)\n resource_id = res_con.get(\"resourceId\")\n try:\n BillingResOrder.objects.get(resource_id=resource_id)\n except Exception as e:\n LOG.exception(e)\n raise exceptions.ResourceNotFound\n\n # do update\n for res_con in res_contracts:\n resource_id = res_con.get(\"resourceId\")\n contract_id = res_con.get(\"contractId\")\n resource_order_select_for_update_by_id(resource_id=resource_id,\n contract_id=contract_id)\n\n\nclass InstanceFipView(APIView):\n\n @utils.argument_check([\"instance_ids\"])\n def post(self, request, *args, **kwargs):\n \"\"\"获取云主机下面的所有绑定的公网ip\n\n ret = {\n \"InstanceFip\":\n [\n {\n \"id\": \"project_id\",\n \"floatingips\": [\"1.2.3.4\"]\n }\n ]\n }\n \"\"\"\n instance_ids = kwargs[\"instance_ids\"]\n nova = openstack_client.get_novaclient()\n ret = {\n \"InstanceFip\": []\n }\n for instance_id in instance_ids:\n try:\n instance = nova.servers.get(instance_id)\n except Exception as e:\n LOG.error(\"Instance Not Found : {0}\".format(e))\n continue\n addresses = instance.addresses\n project_id = instance.tenant_id\n floating_ips = []\n for net_name in addresses:\n spec_network_list = addresses[net_name]\n for network in spec_network_list:\n if network['OS-EXT-IPS:type'] == 'floating':\n floating_ips.append(network['addr'])\n\n ret['InstanceFip'].append({'id': project_id,\n 'floatingips': floating_ips})\n return ret\n\n\ndef query_data(date_from,\n date_to,\n meter,\n stats_attr,\n date_options=\"other\",\n period=None,\n instance=None):\n # 格式化开始时间和结束时间,做了2件事:\n # 1.格式化为yyyy-mm-dd格式,2.把输入的结束时间往后延了23小时59分59秒,\n # 这样当你输入同一个时间的时候,默认会获取到这一整天的使用量\n date_from, date_to = ceilometer._calc_date_args(date_from,\n date_to,\n date_options)\n # 计算开始时间到结束时间的秒数,即颗粒度\n if not period:\n period = ceilometer._calc_period(date_from, date_to)\n additional_query = []\n # 组装成查询序列\n if date_from:\n additional_query += [{'field': 'timestamp',\n 'op': 'ge',\n 'value': date_from}]\n if date_to:\n additional_query += [{'field': 'timestamp',\n 'op': 'le',\n 'value': date_to}]\n\n query = []\n # 资源ID\n if instance:\n query += [{'field': 'resource_id', 'op': 'eq', 'value': instance}]\n\n if additional_query:\n if not ceilometer.is_iterable(additional_query):\n raise ValueError(\"Additional query must be list of\"\n \" conditions. See the docs for format.\")\n query = query + additional_query\n aggregates = []\n # 对所有数据求和\n aggregate_args = \"sum\"\n try:\n LOG.info(\"meter: {0}, query: {1}, period: {2}\".format(meter, query,\n period))\n aggregates.append(dict(zip(('func', 'param'),\n aggregate_args.split(\"<-\"))))\n # 调用ceilometer API查询\n statistics = ceilometer.statistic_list(meter, query=query,\n period=period,\n aggregates=aggregates)\n # 正常返回一个Statistic对象的列表,列表中只可能返回一个结果\n if statistics:\n return getattr(statistics[0], stats_attr, None)\n\n LOG.info(\"statistic data: {0}\".format(statistics))\n except Exception as e:\n LOG.exception(\"get meter data fail : {0}\".format(e))\n statistics = None\n return statistics\n\n\ndef get_floatingip_usage(date_from, date_to, resource_id, meter, stats_attr):\n \"\"\"get floating ip sum usage from start_time to end_time\n\n :param date_from:\n :param date_to:\n :param resource_id:\n :param meter:\n :return:\n \"\"\"\n statistics = query_data(date_from,\n date_to,\n meter,\n stats_attr,\n instance=resource_id)\n return statistics\n\n\ndef get_statistic_data(resource_type, resource_id,\n start_time, end_time, stats_attr):\n if resource_type == \"bandwidth\":\n # 公网ip的使用量\n LOG.info(\"get floating ip statistic data, {0}\".format(\n resource_id))\n value = get_floatingip_usage(start_time, end_time, resource_id,\n FIP_METER, stats_attr)\n if not value:\n value = 0\n return {\"resourceId\": resource_id,\n \"resourceType\": \"floatingBandwidth\",\n \"startTime\": start_time,\n \"endTime\": end_time,\n \"statistics\": [{\"name\": \"fixedBandwidth\",\n \"value\": value,\n \"unit\": \"Mbps\"}]}\n elif resource_type == \"systemSnapshot\":\n # 系统盘快照的使用量\n LOG.info(\"get systemSnapshot statistic data, {0}\".format(\n resource_id))\n value = query_data(start_time,\n end_time,\n SYSTEMSNAPSHOT_METER,\n stats_attr,\n instance=resource_id)\n\n if not value:\n value = 0\n return {\"resourceId\": resource_id,\n \"resourceType\": \"systemSnapshot\",\n \"startTime\": start_time,\n \"endTime\": end_time,\n \"statistics\": [{\"name\": \"SnapshotCapability\",\n \"value\": value,\n \"unit\": \"GBHour\"}]}\n\n elif resource_type == \"Anti-DDoS\":\n # 定义在模块中改为这里函数中定义,可以防止防护组件初始化失败时,不至于整个bp都挂掉\n cloud_guard_client = openstack_client.get_antiddos_client()\n # 云安全组件的使用量\n LOG.info(\"get Anti-DDoS statistic data, {0}\".format(\n resource_id))\n # 获取资源对应的订单id\n order_id = models.get_order_by_resource_id(resource_id)\n query_body = {}\n query_body.update({\"startdate\": start_time, \"enddate\": end_time,\n \"orderId\": order_id})\n # 获取防护套餐的统计\n statistics_data = cloud_guard_client.get_cloud_guard_statistic(\n params=query_body)\n return {\"resourceId\": resource_id,\n \"statistics\": statistics_data}\n elif resource_type == \"volumeBackup\":\n # 保护组使用量\n LOG.info(\"get protectgroup network statistic data, {0}\".format(\n resource_id))\n statistics = []\n # 获取保护组资源的使用量, stats_attr是sum\n value = query_data(start_time,\n end_time,\n PROTECTED_GROUP_NETWORK_METER,\n stats_attr,\n instance=resource_id)\n statistics.append({\"name\": \"BackupCapability\",\n \"value\": value,\n \"unit\": \"GB\"})\n LOG.info(\"get protectgroup volume statistic data, {0}\".format(\n resource_id))\n value = query_data(start_time,\n end_time,\n PROTECTED_GROUP_VOLUME_METER,\n stats_attr,\n instance=resource_id)\n statistics.append({\"name\": \"BackupFlow\",\n \"value\": value,\n \"unit\": \"GB\"})\n return {\"resourceId\": resource_id,\n \"statistics\": statistics}\n else:\n raise exceptions.StatisticDataNotReady\n\n\nclass ResourceUsageView(APIView):\n @utils.argument_check([\"usage\", \"startTime\", \"endTime\"])\n def post(self, request, *args, **kwargs):\n usage = kwargs[\"usage\"]\n startTime = kwargs[\"startTime\"]\n endTime = kwargs[\"endTime\"]\n stats_attr = \"sum\"\n ret = {\"usage\": []}\n for req_res in usage:\n resource_type = req_res[\"resourceType\"]\n resource_id = req_res[\"resourceId\"]\n LOG.info(\"getting statistic data\"\n \"resource_id: {0},\"\n \" resource_type: {1}\".format(resource_id,\n resource_type))\n statistics_data = get_statistic_data(resource_type,\n resource_id,\n startTime, endTime,\n stats_attr)\n if statistics_data:\n ret[\"usage\"].append(statistics_data)\n return ret\n\n\nclass UserUsage(APIView):\n\n def is_resource_a_flow_type(self, resource_id):\n product_type = models.get_product_type_by_resource_id(resource_id)\n if product_type == \"floatingBandwidth\":\n return True\n product_name = models.get_product_name_by_resource_id(resource_id)\n if product_name == \"systemSnapshot\":\n return True\n elif product_name == \"Anti-DDoS\":\n return True\n else:\n return False\n\n def get_flow_resource(self, user_name):\n resource_ids_types = models.get_existed_resource_attr_by_user_name(\n user_name, \"resource_id\", \"resource_type\")\n return [(resource_id, resource_type) for resource_id, resource_type in\n resource_ids_types\n if self.is_resource_a_flow_type(resource_id)]\n\n @utils.argument_check([\"account\", \"startTime\", \"endTime\"])\n def post(self, request, *args, **kwargs):\n ret = {\"usage\": []}\n startTime = kwargs[\"startTime\"]\n endTime = kwargs[\"endTime\"]\n stats_attr = \"sum\"\n users = keystone.list_user(username=kwargs['account'])\n if not users:\n raise exceptions.UserNotExists\n resource_ids_types = self.get_flow_resource(kwargs['account'])\n for resource_id, resource_type in resource_ids_types:\n product_type = models.get_product_type_by_resource_id(resource_id)\n if product_type == \"floatingBandwidth\":\n resource_type = \"bandwidth\"\n LOG.info(\"get {0},resource_id:{1} statistic data\".format(\n resource_type, resource_id))\n try:\n statistics_data = get_statistic_data(resource_type,\n resource_id,\n startTime, endTime,\n stats_attr)\n if statistics_data:\n ret[\"usage\"].append(statistics_data)\n except Exception:\n LOG.error(\"getting statistic data error,\"\n \"{0},resource_id:{1}\".format(resource_type,\n resource_id))\n return ret\n","sub_path":"billing_proxy/api/contract/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":15266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"573686937","text":"# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.\n# This product includes software developed at Datadog (https://www.datadoghq.com/).\n# Copyright 2019-Present Datadog, Inc.\nfrom __future__ import annotations\n\nfrom typing import Union\n\nfrom datadog_api_client.model_utils import (\n ModelNormal,\n cached_property,\n none_type,\n unset,\n UnsetType,\n)\n\n\nclass SearchSLOResponseLinks(ModelNormal):\n @cached_property\n def openapi_types(_):\n return {\n \"first\": (str,),\n \"last\": (str, none_type),\n \"next\": (str,),\n \"prev\": (str, none_type),\n \"self\": (str,),\n }\n\n attribute_map = {\n \"first\": \"first\",\n \"last\": \"last\",\n \"next\": \"next\",\n \"prev\": \"prev\",\n \"self\": \"self\",\n }\n\n def __init__(\n self_,\n first: Union[str, UnsetType] = unset,\n last: Union[str, none_type, UnsetType] = unset,\n next: Union[str, UnsetType] = unset,\n prev: Union[str, none_type, UnsetType] = unset,\n self: Union[str, UnsetType] = unset,\n **kwargs,\n ):\n \"\"\"\n Pagination links.\n\n :param first: Link to last page.\n :type first: str, optional\n\n :param last: Link to first page.\n :type last: str, none_type, optional\n\n :param next: Link to the next page.\n :type next: str, optional\n\n :param prev: Link to previous page.\n :type prev: str, none_type, optional\n\n :param self: Link to current page.\n :type self: str, optional\n \"\"\"\n if first is not unset:\n kwargs[\"first\"] = first\n if last is not unset:\n kwargs[\"last\"] = last\n if next is not unset:\n kwargs[\"next\"] = next\n if prev is not unset:\n kwargs[\"prev\"] = prev\n if self is not unset:\n kwargs[\"self\"] = self\n super().__init__(kwargs)\n","sub_path":"src/datadog_api_client/v1/model/search_slo_response_links.py","file_name":"search_slo_response_links.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"241825662","text":"import math\nimport shelve\n\nimport numpy as np\n\nfrom dateutil import relativedelta\nfrom datetime import datetime\nfrom typing import Dict, List, Tuple, Union, Any\n\n\n_dir = '/Users/matthewjbelcher/PycharmProjects/Reserver'\n\n\ndef compare_dicts(dict1: Dict, dict2: Dict) -> bool:\n \"\"\" Compares two dictionaries to see if they are equal.\n\n :param dict1: first dictionary\n :param dict2: second dictionary\n :return: True if two dictionaries are equal; False otherwise\n \"\"\"\n dirty = False\n\n if dict1 is None or dict2 is None:\n dirty = True\n elif dict1.keys() != dict2.keys():\n dirty = True\n else:\n for key in dict1:\n try:\n if dict1[key] != dict2[key]:\n dirty = True\n except ValueError:\n if not ((dict1[key] == dict2[key]) | (np.isnan(dict1[key]) & np.isnan(dict2[key]))).all():\n dirty = True\n except KeyError:\n dirty = True\n\n return dirty\n\n\ndef get_app_settings(keyword) -> Any:\n\n with shelve.open(f'{_dir}/settings/app', 'r') as s:\n assert keyword in s, 'keyword: %r not found in app settings' % keyword\n return s[keyword]\n\n\ndef get_dataset_label_path(name_path: str) -> str:\n project_path = get_session_settings('project path')\n\n _dict = {key.rstrip(): value for value, key in [line.split(sep='=') for line in\n open('{0}/dataset_map.txt'.format(project_path))]}\n\n return _dict[name_path]\n\n\ndef get_dataset_name_path(label_path: str) -> str:\n project_path = get_session_settings('project path')\n\n _dict = {key: value.rstrip() for key, value in [line.split(sep='=') for line in\n open('{0}/dataset_map.txt'.format(project_path))]}\n\n return _dict[label_path]\n\n\ndef get_project_settings(keywords: Union[str, Tuple]):\n \"\"\" Gets the settings for the current project.\n\n :param keywords: tuple of key(s) providing direction to the desired property(s)\n :return: if several keywords is/are specified, settings are returned as a tuple; otherwise single property returned\n as most appropriate type\n \"\"\"\n project_path = get_session_settings('project path')\n\n dict_ = {key: value.rstrip() for key, value in [line.split(sep='=') for line in\n open('{0}/settings.txt'.format(project_path))]}\n\n for key in dict_:\n\n if dict_[key].count('/') == 2 and len(dict_[key]) == 10: # probably a date; try to convert\n try:\n dict_[key] = datetime.strptime(dict_[key], '%d/%m/%Y').date()\n except ValueError:\n pass\n else:\n try:\n dict_[key] = float(dict_[key])\n except ValueError:\n pass\n\n if type(keywords) == str:\n return dict_[keywords]\n else:\n return tuple(dict_[keyword] for keyword in keywords)\n\n\ndef get_session_settings(key: str=None) -> Union[Dict, str]:\n \"\"\" Gets the settings for the current session.\n\n :param key: optional key providing direction to the desired property\n :return: if no keyword is specified, all settings are returned as dictionary; otherwise, single property returned\n \"\"\"\n with shelve.open('{0}/settings/session'.format(_dir), flag='r') as s:\n if str:\n return s[key] # type: str\n else:\n return {key: s[key] for key in s} # type: Dict\n\n\ndef set_session_settings(key: str, value: str) -> None:\n \"\"\" Updates the session settings file.\n\n :param key: key providing direction to the property to be updated\n :param value: new value for the setting\n :return: None\n \"\"\"\n with shelve.open('{0}/settings/session'.format(_dir), flag='c') as s:\n s[key] = value\n\n\ndef get_dHeaders(dLength: int, dCount: int, _type: str, basis: str='development') -> Tuple[str, ...]:\n \"\"\" Returns a tuple of development headers for a triangle or method.\n\n :param dLength: development length of the triangle/method for which headers are to be returned\n :param dCount: number of development periods in the triangle/method\n :param _type: whether the headers being return are for a triangle or CLM (no other types currently supported)\n :param basis: whether the headers need to be on a development or calendar period basis. Only relevant for triangles\n :return: tuple containing the development headers\n \"\"\"\n assert _type in ('triangle', 'CLM'), 'type must be one of: triangle, CLM; entered %r' % type\n if _type == 'CLM':\n assert basis == 'development', 'basis must be set to development if prefix == CLM; entered %r' % basis\n\n p_start_date, p_end_date, p_dLength = get_project_settings(('start date', 'end date', 'dLength'))\n p_monthCount = (p_end_date.year - p_start_date.year) * 12 + (p_end_date.month - p_start_date.month) + 1\n p_dCount = int((p_monthCount - 1) // p_dLength + 1)\n\n if _type == 'triangle' and basis == 'development':\n labels = list(range(p_dCount, 0, -dLength))\n labels.reverse()\n\n elif _type == 'triangle' and basis == 'calendar':\n labels = [(p_end_date - relativedelta.relativedelta(months=d)).strftime('%b%y')\n for d in range(0, dCount, dLength)]\n labels.reverse()\n\n elif _type == 'CLM':\n _ = list(range(p_dCount, 0, -dLength))\n _.reverse()\n labels = ('({0})-({1})'.format(_[d], _[d + 1]) for d in range(len(_) - 1))\n\n else:\n labels = ()\n\n return tuple(labels)\n\n\ndef get_dCount(dLength: int) -> int:\n \"\"\" Get the number of development periods for a specified development length.\n\n :param dLength: development period length\n :return: number of development periods\n \"\"\"\n start, stop = get_project_settings(('start date', 'end date')) # type: datetime\n dCount_in_months = (stop.year - start.year) * 12 + (stop.month - start.month) + 1\n dCount = int(math.ceil(dCount_in_months / dLength))\n\n return dCount\n\n\ndef get_oCount(oLength: int) -> int:\n \"\"\" Get the number of origin periods for a specified origin length.\n\n :param oLength: origin period length\n :return: number of origin periods\n \"\"\"\n start, stop = get_project_settings(('start date', 'end date')) # type: datetime\n oCount_in_months = (stop.year - start.year) * 12 + (12 - start.month) + 1\n oCount = int(math.ceil(oCount_in_months / oLength))\n\n return oCount\n\n\ndef get_oHeaders(oLength: int, oCount: int) -> Tuple[str, ...]:\n \"\"\" Returns a tuple of origin headers for a triangle or method.\n\n :param oLength: origin length of the triangle/method for which headers are to be returned\n :param oCount: number of origin periods in the triangle/method\n :return: tuple containing the origin headers\n \"\"\"\n start_date = get_project_settings('start date')\n oPeriods = [start_date + relativedelta.relativedelta(months=o * oLength) for o in range(oCount)] # type: List\n\n if oLength == 1:\n labels = tuple(oPeriod.strftime('%b%y') for oPeriod in oPeriods)\n elif oLength == 3:\n labels = tuple('{0} Q{1}'. format(oPeriod.year, (oPeriod.month - 1) // 3 + 1) for oPeriod in oPeriods)\n elif oLength == 6:\n labels = tuple('{0} H{1}'.format(oPeriod.year, (oPeriod.month - 1) // 6 + 1) for oPeriod in oPeriods)\n elif oLength == 12:\n labels = tuple(oPeriod.year for oPeriod in oPeriods)\n else:\n oPeriods.append(start_date + relativedelta.relativedelta(months=oCount * oLength)) # add the end date\n labels = tuple('{0}-{1}'.format(oPeriods[o].strftime('%b%y'),\n (oPeriods[o + 1] - relativedelta.relativedelta(days=1)).strftime('%b%y'))\n for o in range(oCount))\n\n return labels\n\n\ndef get_root(directory: str) -> str:\n \"\"\" Gets the root directory from a specified sub-directory.\n\n :param directory: sub-directory from which to extract the root\n :return: path of the root directory\n \"\"\"\n\n tail = directory[directory.find('db/') + 3:].split('/', maxsplit=2)[2]\n root = directory.replace(tail, '')\n print(root)\n return root\n\n\ndef lcm(a: int, b: int) -> int:\n \"\"\" Lowest common multiple of two integers. \"\"\"\n return (a * b) // math.gcd(a, b)\n\n\nif __name__ == '__main__':\n print(get_project_settings('oLength'))\n","sub_path":"misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":8366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"601693325","text":"from lr2irscraper import get_bms_table\n\nfrom lr2iraggregator.logic.bms_tables import *\nfrom lr2iraggregator.logic.lr2_hash import fill_ids, fill_hashes\nfrom lr2iraggregator.util.luigiutil import *\nfrom lr2iraggregator.luigitask.lr2_hash import RegisterNewHashesToHashTableTask\n\n\nclass LoadBMSTableTask(WorkDirDFCSVTask):\n \"\"\"\n 難易度表ひとつのデータを読み込む。URL、またはローカルの csv ファイルへのパスを入力に取る。\n\n Args:\n bms_table_path: 難易度表の URL またはパスを表す文字列\n\n Returns:\n (type, lr2_id, lr2_hash, level, title)\n \"\"\"\n\n name = \"bms_table\"\n dtypes = {\"lr2_id\": str}\n\n bms_table_path = luigi.Parameter()\n\n def run(self):\n\n if is_url(self.bms_table_path):\n df = get_bms_table(self.bms_table_path)\n df = convert_lr2irscraper_bms_table_to_bmsirt_format(df)\n else:\n df = (\n pd.read_csv(self.bms_table_path, header=0, dtype=str)\n .astype({\"type\": CategoricalDtype(categories=[\"bms\", \"course\"])})\n )\n\n self.save(df, index=False)\n\n\nclass LoadBMSTableListTask(WorkDirDFCSVTask):\n \"\"\"\n 難易度表のリストを読み込む。\n リスト内の path で指定されている相対パスはすべて絶対パスに直す。\n\n Args:\n bms_table_list_csv: 難易度表リスト\n\n Returns:\n (name, path, is_dan)\n \"\"\"\n name = \"bms_table_list\"\n dtypes = {\"is_dan\": bool}\n\n bms_table_list_csv = luigi.Parameter()\n\n def run(self):\n self.save(load_bms_table_list_csv(self.bms_table_list_csv), index=False)\n\n\nclass LoadBMSTablesWithIDUnFilledTask(WorkDirDFCSVTask):\n \"\"\"\n 難易度表のリストから難易度表をすべて読み込み、その内容を一つのテーブルに結合する。\n この段階では LR2ID に抜けがある (ことがある)。あとでハッシュテーブルを用いて埋める。\n\n Args:\n bms_table_list_csv: 難易度表リスト (name, path)\n\n Returns:\n (type, lr2_id, bms_table_name, lr2_hash, level, title)\n \"\"\"\n\n name = \"merged_bms_tables_without_ids\"\n dtypes = {\"lr2_id\": str}\n\n bms_table_list_csv = luigi.Parameter()\n\n def requires(self):\n return LoadBMSTableListTask(\n bms_table_list_csv=self.bms_table_list_csv,\n work_dir=self.work_dir\n )\n\n def run(self):\n bms_table_list = self.load_requires()\n bms_table_tasks = {\n row[\"name\"]: LoadBMSTableTask(\n bms_table_path=row[\"path\"],\n work_dir=self.work_dir\n )\n for _, row in bms_table_list.iterrows()\n }\n yield bms_table_tasks.values()\n\n df = merge_bms_tables({key: task.load() for key, task in bms_table_tasks.items()})\n self.save(df, index=False)\n\n\nclass UpdateHashTableTask(WorkDirJSONTask):\n \"\"\"\n bms_table_list 中の難易度表にある項目から、IDが未知のものを抽出し、\n そのIDをLR2IRにアクセスすることで調べ、ハッシュテーブルに登録する。\n (正確には、「既知のハッシュテーブルに新規のハッシュを追加したハッシュテーブル」を新たに生成する)\n \"\"\"\n name = \"hash_table_updated\"\n\n bms_table_list_csv = luigi.Parameter()\n hash_table_json = luigi.Parameter()\n\n def requires(self):\n return LoadBMSTablesWithIDUnFilledTask(\n bms_table_list_csv=self.bms_table_list_csv,\n work_dir=self.work_dir\n )\n\n def run(self):\n hash_table_task = RegisterNewHashesToHashTableTask(\n list_csv=self.requires().output().path,\n hash_table_json=self.hash_table_json,\n work_dir=self.work_dir\n )\n yield hash_table_task\n self.save(hash_table_task.load())\n\n\nclass LoadBMSTablesTask(WorkDirDFCSVTask):\n \"\"\"\n ハッシュテーブルを用いて LoadBMSTablesTaskWithIDUnfilled の出力のIDの欠損を埋め、返す。\n\n Args:\n bms_table_list_csv: 難易度表リスト (name, path)\n hash_table_json: ハッシュテーブル {lr2_hash: (type, lr2_id)}\n\n Returns:\n (type, lr2_id, bms_table_name, lr2_hash, level, title)\n\n \"\"\"\n name = \"bms_tables\"\n\n bms_table_list_csv = luigi.Parameter()\n hash_table_json = luigi.Parameter()\n\n def requires(self):\n return [\n LoadBMSTablesWithIDUnFilledTask(\n bms_table_list_csv=self.bms_table_list_csv,\n work_dir=self.work_dir\n ),\n UpdateHashTableTask(\n bms_table_list_csv=self.bms_table_list_csv,\n hash_table_json=self.hash_table_json,\n work_dir=self.work_dir\n )\n ]\n\n def run(self):\n bms_tables_csv_without_ids, hash_table = self.load_requires()\n bms_tables_csv = fill_ids(bms_tables_csv_without_ids, hash_table)\n bms_tables_csv = fill_hashes(bms_tables_csv, hash_table)\n self.save(bms_tables_csv, index=False)\n\n\nclass MakeItemCSVTask(WorkDirDFCSVTask):\n \"\"\"\n LoadBMSTablesTask の出力から item.csv を作成する。\n\n Args:\n bms_table_list_csv: 難易度表リスト (name, path)\n hash_table_json: ハッシュテーブル {lr2_hash: (type, lr2_id)}\n\n Returns:\n (type, lr2_id, lr2_hash, title)\n \"\"\"\n\n name = \"item\"\n dtypes = {\"lr2_id\": str}\n\n bms_table_list_csv = luigi.Parameter()\n hash_table_json = luigi.Parameter()\n\n def requires(self):\n return LoadBMSTablesTask(\n bms_table_list_csv=self.bms_table_list_csv,\n hash_table_json = self.hash_table_json,\n work_dir=self.work_dir\n )\n\n def run(self):\n self.save(make_item_df(self.load_requires()), index=False)\n\n\nclass MakeLevelCSVTask(WorkDirDFCSVTask):\n \"\"\"\n LoadBMSTablesTask の出力から level.csv を作成する。\n\n Args:\n bms_table_list_csv: 難易度表リスト (name, path)\n hash_table_json: ハッシュテーブル {lr2_hash: (type, lr2_id)}\n\n\n Returns:\n (type, lr2_id, bms_table_name, level)\n \"\"\"\n\n name = \"level\"\n dtypes = {\"lr2_id\": str}\n\n bms_table_list_csv = luigi.Parameter()\n hash_table_json = luigi.Parameter()\n\n def requires(self):\n return LoadBMSTablesTask(\n bms_table_list_csv=self.bms_table_list_csv,\n hash_table_json=self.hash_table_json,\n work_dir=self.work_dir\n )\n\n def run(self):\n self.save(make_level_df(self.load_requires()), index=False)\n","sub_path":"lr2iraggregator/luigitask/bms_tables.py","file_name":"bms_tables.py","file_ext":"py","file_size_in_byte":6617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"490184879","text":"#!/usr/bin/env python3\nimport os\nimport sys\nimport time\nimport copy\nimport json\nimport zmqutils\nimport logging as log\nimport threading\nfrom centroidtracker import CentroidTracker\nfrom iothub_client import IoTHubClient, IoTHubTransportProvider\nfrom iothub_client import IoTHubMessage, IoTHubError\n\n\nlog.basicConfig(stream=sys.stdout, level=log.DEBUG)\n\n# Get value from env variable\nCONNECTION_STRING = os.getenv(\"AZ_CONNECTION_STRING\", None)\n# Using the MQTT protocol.\nPROTOCOL = IoTHubTransportProvider.MQTT\n# ENV\nvideo_config_path = os.getenv(\"VIDEO_CONFIG_PATH\", \"\")\ncam_id_path = os.getenv(\"MAC_ADDRESS_PATH\", \"\")\nzmq_address = os.getenv(\"ZMQ_ADDRESS\", \"tcp://localhost:5960\")\nzmq_video_address = os.getenv(\"ZMQ_VIDEO_ADDRESS_SUB\", \"tcp://*:5561\")\nsending_interval = int(os.getenv(\"IOTHUB_SENDING_INTERVAL\", \"5\"))\nmax_disappeared = os.getenv(\"MAX_DISAPPEARED\", \"50\")\nthread_sending_interval_video_address = int(\n os.getenv(\"THREAD_SENDING_INTERVAL_VIDEO_ADDRESS\", \"20\")\n)\n\n# Define global variable\ntimestamps = None\ntimestamps_is_checked = False\n\n\ndef send_confirmation_callback(message, result, user_context):\n print(\"IoT Hub responded to message with status: %s\" % (result))\n\n\ndef iothub_client_init():\n # Create an IoT Hub client\n client = IoTHubClient(CONNECTION_STRING, PROTOCOL)\n return client\n\n\ndef iothub_client_run(json_object):\n\n try:\n # Build the message with detection result values.\n message = IoTHubMessage(json_object)\n\n # Send the message.\n client.send_event_async(message, send_confirmation_callback, None)\n\n except IoTHubError as iothub_error:\n log.error(\"Unexpected error %s from IoTHub\" % iothub_error)\n return\n except KeyboardInterrupt:\n log.error(\"IoTHubClient sample stopped\")\n\n\n# ZMQ Publisher\npub, pub_ctx = zmqutils.pub(zmq_video_address)\n# ZMQ Subscriber\nsub, ctx = zmqutils.sub(zmq_address)\n\n# Init a IoT Hub client\nclient = iothub_client_init()\n\n\ndef get_video_source_from_config_file():\n data = {\"video_address\": \"\"}\n try:\n video_address_from_config_file = (\n open(video_config_path).readline().split(\"=\")[1].strip()\n )\n\n except BaseException:\n # Get defaul value\n video_address_from_config_file = \"tcp://video-manager:5562\"\n log.info(\n \"Error config file!!! Using this default value is {}\".format(\n video_address_from_config_file\n )\n )\n\n data[\"video_address\"] = video_address_from_config_file\n pub.send_json(data, flags=0)\n log.info(\n \"Just sent the video source address : {}\".format(\n video_address_from_config_file\n )\n )\n\n\ndef get_cam_id_from_config_file():\n try:\n cam_id = open(cam_id_path).readline().split(\"=\")[1].strip()\n\n except BaseException:\n # Get defaul value\n cam_id = \"00:00:00:00:00:00\"\n log.info(\n \"Error config file!!! Using this default value is {}\".format(\n cam_id\n )\n )\n return cam_id\n\n\n# Get video source at first\nget_video_source_from_config_file()\n\n# Get mac address from config file\nmac_address = get_cam_id_from_config_file()\n\n# Tracker\nct = CentroidTracker(float(max_disappeared))\n\n\ndef check_point_in_polygon(face_geometry, person_geometry):\n center_face = (\n face_geometry[0] + (face_geometry[2] / 2),\n face_geometry[1] + (face_geometry[3] / 2),\n )\n if (\n center_face[0] > person_geometry[0] and\n center_face[0] < person_geometry[2] and\n center_face[1] > person_geometry[1] and\n center_face[1] < person_geometry[3]\n ):\n return True\n else:\n return False\n\n\ndef thread_change_video_address():\n while True:\n get_video_source_from_config_file()\n time.sleep(thread_sending_interval_video_address)\n\n\ndef thread_send_message_to_hub():\n global timestamps\n global timestamps_is_checked\n global ct\n while True:\n # Define send data format\n log.info(\"Value of persons are {}\".format(ct.persons))\n data = {\"timestamp\": timestamps, \"frames\": [], \"camID\": mac_address}\n\n trackID_tmp = []\n\n for person in ct.persons.values():\n if person[\"is_sent\"] is True:\n continue\n else:\n element_box = {\n \"trackID\": None,\n \"timestamp\": None,\n \"recognition\": {},\n }\n element_box[\"trackID\"] = person[\"trackID\"]\n element_box[\"timestamp\"] = person[\"timestamp\"]\n element_box[\"recognition\"] = person[\"recognition\"]\n data[\"frames\"].append(element_box)\n\n person[\"is_sent\"] = True\n\n ct.update_persons(person[\"trackID\"], person)\n if person[\"is_sent\"] and person[\"is_exceed_threshold\"]:\n trackID_tmp.append(person[\"trackID\"])\n # Remove the trackID info that was sent to IoT Hub \\\n # and exceed threshold\n for trackID in trackID_tmp:\n ct.deregister_persons(trackID)\n\n if data[\"frames\"] is not None and len(data[\"frames\"]) > 0:\n data[\"frames\"] = sorted(data[\"frames\"], key=lambda x: x[\"trackID\"])\n timestamps_is_checked = False\n log.info(\"Just sent to Hub : {}\".format(json.dumps(data)))\n iothub_client_run(json.dumps(data))\n else:\n log.info(\"No data for sending to IoT Hub\")\n\n time.sleep(sending_interval)\n\n\n# Running a thread for sending video address\nthread_for_pub = threading.Thread(target=thread_change_video_address)\nthread_for_pub.start()\n\n# Running a thread for sending msg to hub\nthread_for_send_msg = threading.Thread(target=thread_send_message_to_hub)\nthread_for_send_msg.start()\n\nbase_box = {\n \"trackID\": 0,\n \"timestamp\": 1562241186.4627721,\n \"recognition\": {},\n \"is_sent\": False,\n \"is_fulled\": False,\n \"is_exceed_threshold\": False,\n}\n\nwhile True:\n log.info(\"Got a data from ncs2-manager!!!\")\n json_data = sub.recv_json()\n person_rect = []\n # Extract frame data\n frame = json_data[\"frame\"][0]\n timestamp = frame[\"timestamp\"]\n\n if not len(frame[\"obj_boxes\"]):\n log.info(\"Data with no person. Skip to next frame\")\n # Update track ID\n objects = ct.update(person_rect)\n continue\n\n people = [\n x for x in frame[\"obj_boxes\"] if x[\"detection\"][\"label\"] == \"person\"\n ]\n faces = [\n x for x in frame[\"obj_boxes\"] if x[\"detection\"][\"label\"] == \"face\"\n ]\n\n for person in people:\n ymin, xmin, ymax, xmax = person[\"detection\"][\"box_geometry\"]\n person_rect.append((xmin, ymin, xmax, ymax))\n\n # Update track ID\n objects = ct.update(person_rect)\n\n for person in people:\n ymin, xmin, ymax, xmax = person[\"detection\"][\"box_geometry\"]\n cX = int((xmin + xmax) / 2.0)\n cY = int((ymin + ymax) / 2.0)\n\n # Create result object\n result_object = copy.deepcopy(base_box)\n result_object[\"timestamp\"] = timestamp\n\n for face in faces:\n ymin_face, xmin_face, ymax_face, xmax_face = face[\"detection\"][\n \"box_geometry\"\n ]\n if (\n check_point_in_polygon(\n (ymin_face, xmin_face, ymax_face, xmax_face),\n (ymin, xmin, ymax, xmax),\n ) and face[\"recognition\"]\n ):\n result_object[\"recognition\"] = {\n \"age_gender\": face[\"recognition\"][\"age_gender\"]\n }\n break\n else:\n log.info(\"Do not mapping face & person !!!\")\n\n for (objectID, centroid) in objects.items():\n if (cX, cY) == (centroid[0], centroid[1]):\n result_object[\"trackID\"] = objectID\n\n if objectID not in ct.persons:\n if not timestamps_is_checked:\n timestamps = timestamp\n\n if result_object[\"recognition\"]:\n result_object[\"is_fulled\"] = True\n ct.update_persons(objectID, result_object)\n\n else:\n current_tracker = ct.persons[objectID]\n\n if (\n current_tracker[\"is_fulled\"] is not True and\n result_object[\"recognition\"]\n ):\n result_object[\"is_fulled\"] = True\n result_object[\"is_sent\"] = False\n log.info(\n \"Just update the track ID \\\n information: {}\".format(\n result_object\n )\n )\n timestamps = timestamp\n timestamps_is_checked = True\n ct.update_persons(objectID, result_object)\n","sub_path":"src/inference-app/docker-compose/inference-engine/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":8920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"240259636","text":"#\n# @lc app=leetcode id=128 lang=python3\n#\n# [128] Longest Consecutive Sequence\n#\n# https://leetcode.com/problems/longest-consecutive-sequence/description/\n#\n# algorithms\n# Hard (41.42%)\n# Likes: 1829\n# Dislikes: 86\n# Total Accepted: 207.7K\n# Total Submissions: 498.1K\n# Testcase Example: '[100,4,200,1,3,2]'\n#\n# Given an unsorted array of integers, find the length of the longest\n# consecutive elements sequence.\n# \n# Your algorithm should run in O(n) complexity.\n# \n# Example:\n# \n# \n# Input: [100, 4, 200, 1, 3, 2]\n# Output: 4\n# Explanation: The longest consecutive elements sequence is [1, 2, 3, 4].\n# Therefore its length is 4.\n# \n# \n#\nclass Solution:\n # O(nlogn + n)\n def longestConsecutive(self, nums: List[int]) -> int:\n if not nums: \n return 0\n nums.sort()\n length = 1\n maxLength = 1\n for i in range(1, len(nums)):\n if nums[i] != nums[i - 1]:\n if nums[i] - nums[i - 1] == 1:\n length += 1\n maxLength = max(maxLength, length)\n else: \n length = 1\n return maxLength\n \n # O(n)\n def longestConsecutive(self, nums: List[int]) -> int:\n longest_streak = 0\n num_set = set(nums)\n\n for num in num_set:\n if num - 1 not in num_set: # O(1) look up\n current_num = num\n current_streak = 1\n\n while current_num + 1 in num_set: # O(1) look up -> max n times\n current_num += 1\n current_streak += 1\n\n longest_streak = max(longest_streak, current_streak)\n\n return longest_streak\n\n","sub_path":"128.longest-consecutive-sequence.py","file_name":"128.longest-consecutive-sequence.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"145503595","text":"class date_test:\n def __init__(self, year, month, day):\n self.year = year\n self.month = month\n self.day = day\n\n def show_date(self):\n print(f'today is {self.year}-{self.month}-{self.day}')\n\n\n @classmethod\n def get_date(cls, date_str):\n y,m,d = map(int, date_str.split('-'))\n dateo = cls(y, m, d)\n return dateo\n\n # def get_date(cls, date_str):\n # y,m,d = map(int, date_str.split('-'))\n # cls.year, cls.month, cls.day = y, m, d\n # return cls\n\n\nif __name__ == '__main__':\n d = date_test(2022, 1, 2)\n d.show_date()\n\n a = date_test.get_date('2021-11-11')\n d.show_date()\n\n a.show_date()","sub_path":"python_concepts/python_classmethod.py","file_name":"python_classmethod.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"605399365","text":"#!/usr/bin/python\n\nimport os\nimport glob\n\nheaders = []\nfor root, dirnames, filenames in os.walk(\".\"):\n\tdirectory_headers = glob.glob(root + \"/*.h\")\n\tfor h in directory_headers:\n\t\theaders.append(h[2:])\n\nf = open(\"pcf_viewer.h\", \"w\")\nf.writelines([\n\t\"// File automatically generated by generate_pcf_viewer_h.py\\n\",\n\t\"\\n\",\n\t\"#ifndef PCF_PCF_VIEWER_H_\\n\",\n\t\"#define PCF_PCF_VIEWER_H_\\n\",\n\t\"\\n\"\n])\n\nfor h in headers:\n\tif (h != \"pcf_viewer.h\") and (h[:8] != \"shaders/\"):\n\t\tf.write(\"#include \\\"\" + h + \"\\\"\\n\")\n\nf.writelines([\n\t\"\\n\",\n\t\"#endif\\n\"\n])\n","sub_path":"program/pcf/pcf_viewer/generate_pcf_viewer_h.py","file_name":"generate_pcf_viewer_h.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"131818787","text":"\"\"\"\r\n主函数\r\n\"\"\"\r\n\r\nfrom multiprocessing import Process\r\nfrom ProxyValidSchedule import run as ValidRun\r\nfrom ProxyRefreshSchedule import run as RefreshRun\r\n\r\ndef run():\r\n p_list = list()\r\n p3 = Process(target=RefreshRun, name=\"RefreshRun\")\r\n p_list.append(p3)\r\n p2 = Process(target=ValidRun, name=\"ValidRun\")\r\n p_list.append(p2)\r\n\r\n\r\n for p in p_list:\r\n p.start()\r\n for p in p_list:\r\n p.join()\r\n\r\nif __name__ == '__main__':\r\n run()","sub_path":"proxyIpSpider/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"228355387","text":"import scrapy\nfrom SpiderNews.config import NEWS_TYPE,get_header\nfrom SpiderNews.items import NewsSpiderItem\nfrom scrapy import log\nfrom selenium import webdriver\n\n\nclass NetEaseSpider(scrapy.Spider):\n start_urls = ['http://news.baidu.com']\n name = 'baiduindex'\n allowed_domains = ['news.baidu.com']\n base_url = 'http://news.baidu.com/'\n\n def parse(self, response):\n yield scrapy.Request(self.base_url,self.parseNewsPage,headers=get_header())\n\n def parseList(self, response):\n urls = response.xpath(\"//a/@href\").extract()\n for url in urls:\n yield scrapy.Request(url, self.parseNews)\n\n def parseNewsPage(self, response):\n log.msg(type(response), level=log.WARNING)\n item = NewsSpiderItem()\n #首页热点新闻模块\n news_url = response.xpath(\"//li/a/@href\").extract()\n news_text = response.xpath(\"//li/a/text()\").extract()\n\n print(news_url)\n print(news_text)\n\n pane_news_url = response.xpath(\"//div[@id='pane-news']//li/a/@href\").extract()\n pane_news_text = response.xpath(\"//div[@id='pane-news']//li/a/text()\").extract()\n local_news_url = response.xpath(\"//div[@id='local_news']//li/a/@href\").extract()\n print(local_news_url)\n\n focusUrl = response.xpath(\"//div[@id='col_focus']//li/a/@href\").extract()\n focusText = response.xpath(\"//div[@id='col_focus']//li/a/text()\").extract()\n self.parse_instat_news(response)\n for i in range(1,len(focusUrl)+1):\n item['url'] = focusUrl[i]\n item['title'] = focusText[i]\n item['category'] = ''\n item ['secCategory'] = 'focus'\n yield item\n\n #print(focusText)\n #print (focusUrl)\n def parse_instat_news(self,response):\n attimeUrl = response.xpath(\"//div[@id='instant-news']//li/a/@href\").extract()\n attimeText = response.xpath(\"//div[@id='instant-news']//li/a/text()\").extract()\n item = NewsSpiderItem()\n for i in range(1, len(attimeUrl) + 1):\n item['url'] = attimeUrl[i]\n item['title'] = attimeText[i]\n item['category'] = ''\n item['secCategory'] = 'attime'\n yield item\n '''\n titles = response.xpath(\"//a/text()\").extract()\n url = response.xpath(\"//a/@href\").extract()\n for i in range(1,len(titles)):\n item['title'] = titles[i]\n item['url'] = url[i]\n item['category'] = 'ent'\n yield item'''\n #timee = data.xpath(\"//div[@class='post_time_source']/text()\").extract()\n #title = data.xpath(\"//h1/text()\").extract()\n #content = data.xpath(\"//div[@class='post_text']/p/text()\").extract()\n","sub_path":"SpiderNews/spiders/BaiduNewsIndex.py","file_name":"BaiduNewsIndex.py","file_ext":"py","file_size_in_byte":2770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"478790357","text":"import numpy as np\nfrom Cython.Build import cythonize\nfrom setuptools import setup, Extension\n\nstan_include_dirs = ['src/stanflow/cmdstan/stan/src/',\n 'src/stanflow/cmdstan/stan/lib/stan_math',\n 'src/stanflow/cmdstan/stan/lib/stan_math/lib/eigen_3.3.3',\n 'src/stanflow/cmdstan/stan/lib/stan_math/lib/boost_1.69.0',\n 'src/stanflow/cmdstan/stan/lib/stan_math/lib/sundials_3.1.0/include']\n\nstan_macros = [\n ('BOOST_DISABLE_ASSERTS', None),\n ('BOOST_NO_DECLTYPE', None),\n ('BOOST_RESULT_OF_USE_TR1', None),\n]\n\nextra_compile_args = [\n '-Os',\n '-ftemplate-depth-256',\n '-Wno-unused-function',\n '-Wno-uninitialized',\n '-std=c++14'\n]\n\nextensions = [\n Extension('stanflow.compute_effective_sample_size',\n ['src/stanflow/compute_effective_sample_size.pyx'],\n language='c++',\n define_macros=stan_macros,\n include_dirs=stan_include_dirs + [np.get_include()],\n extra_compile_args=extra_compile_args)\n]\n\nsetup(name='stanflow',\n version='0.1',\n description='Python tools for a Stan workflow using CmdStan.',\n url='http://github.com/roualdes/stanflow',\n author='Edward A. Roualdes',\n author_email='eroualdes@csuchico.edu',\n license='BSD (3-clause)',\n install_requires=[\n 'cython>=0.22,!=0.25.1',\n 'numpy>=1.7,<2.0',\n 'scipy>=0.19.1',\n ],\n ext_modules=cythonize(extensions),\n packages=['stanflow'],\n package_dir={'': 'src'},\n include_package_data=True,\n zip_safe=False)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"54461712","text":"#!/usr/bin/python\n# -*- coding: iso-8859-1 -*-\n#-----------------------------------------------------------\n#\n# Custom renderer functions for maps\n#\n# Copyright (C) 2013 Quentin Rossy\n#\n#-----------------------------------------------------------\n\n\nimport os, math\n\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom qgis.core import *\n\n#Convenient function to debug\nlog = lambda m: QgsMessageLog.logMessage(m, \"Visualist\")\n\nPOINT = 'point'\nLINE = 'line'\n\nclass MapRender(object):\n\n def __init__(self, layer):\n self.l = layer\n self.prov = self.l.dataProvider()\n\n def cat(self, cntField, groups, colors, name, labelField=None, type=POINT, color=Qt.black, trans=0.7):\n labelField = cntField if labelField is None else labelField\n\n l = []\n for group in groups:\n color = colors[group]\n symbol = QgsSymbol.defaultSymbol(QgsWkbTypes.Line) if type == LINE else QgsSymbol.defaultSymbol(QgsWkbTypes.Point)\n symbol.setColor(color)\n symbol.setOpacity(trans)\n self.setDataDefinedSize(symbol, cntField)\n if type == POINT:\n sl = symbol.symbolLayer(0)\n sl.setBorderColor(Qt.white)\n l.append(QgsRendererCategory(group, symbol, group))\n myRenderer = QgsCategorizedSymbolRenderer(name, l)\n\n self.l.setRenderer(myRenderer)\n self.setLabels(labelField)\n return myRenderer\n\n def setDataDefinedSize(self, symbol, field, type):\n min = None\n max = 0\n for feature in self.l.getFeatures():\n val = feature[field]\n if min is None:\n min = val\n max = val\n elif val < min:\n min = val\n elif val > max:\n max = val\n if type == LINE:\n strExp = \"coalesce(0.5*scale_linear(\"+field+\", \"+str(min)+\", \"+str(max)+\", 1, 10), 0)\"\n prop = QgsProperty().fromExpression(strExp)\n symbol.setDataDefinedWidth(prop)\n else:\n if min == 0:\n minScale = 0\n maxScale = 20\n else:\n maxScale = 20.0\n minScale = (maxScale*math.sqrt(min))/math.sqrt(max)\n if minScale < 1:\n minScale = 1\n maxScale = math.sqrt(max)/math.sqrt(min)\n # strExp = \"coalesce(scale_linear(sqrt(\"+field+\"), \"+str(math.sqrt(min))+\", \"+str(math.sqrt(max))+\", \"+str(minScale)+\", \"+str(maxScale)+\"), 0)\"\n strExp = \"coalesce(scale_exp(\"+field+\", \"+str(min)+\", \"+str(max)+\", \"+str(minScale)+\", \"+str(maxScale)+\", 0.5), 0)\"\n prop = QgsProperty().fromExpression(strExp)\n symbol.setDataDefinedSize(prop)\n # prop.setField(field)\n\n\n def prop(self, cntField, labelField=None, type=POINT, color=Qt.black, trans=0.7):\n labelField = cntField if labelField is None else labelField\n s = QgsSymbol.defaultSymbol(1) if type == LINE else QgsSymbol.defaultSymbol(0)\n myRenderer = QgsSingleSymbolRenderer(s)\n self.setDataDefinedSize(s, cntField, type)\n myRenderer.symbol().setColor(color)\n myRenderer.symbol().setOpacity(trans)\n # if type == POINT:\n # l = s.symbolLayer(0)\n # l.setColor(Qt.white)\n # myRenderer.setScaleMethod(QgsSymbol.ScaleArea) #QgsSymbol.ScaleArea\n self.l.setRenderer(myRenderer)\n self.setLabels(labelField)\n return myRenderer\n\n def choropleth(self, cntField, classes=7, labelField=None, mode=QgsGraduatedSymbolRenderer.Jenks):\n labelField = cntField if labelField is None else labelField\n\n props = {}\n props[\"outline_color\"] =\"255,255,255,255\"\n props[\"outline_style\"] =\"solid\"\n props[\"outline_width\"] =\"0.26\"\n props[\"outline_width_unit\"] =\"MM\"\n\n renderer = QgsGraduatedSymbolRenderer()\n renderer.setSourceSymbol(QgsFillSymbol.createSimple(props))\n renderer.setClassAttribute(cntField)\n renderer.setClassificationMethod(QgsClassificationJenks())\n renderer.setSourceColorRamp(QgsGradientColorRamp(QColor(230,230,230), QColor(50,50,50)))\n renderer.setGraduatedMethod(QgsGraduatedSymbolRenderer.GraduatedColor)\n renderer.updateClasses(self.l, classes)\n\n self.setLabels(labelField, filter=0)\n self.l.setRenderer(renderer)\n\n return renderer\n\n def nnclusters(self, cntField, classes=7, labelField=None, mode=QgsGraduatedSymbolRenderer.Jenks):\n labelField = cntField if labelField is None else labelField\n\n props = {}\n props[\"outline_color\"] =\"0,0,0,0\"\n props[\"outline_style\"] =\"solid\"\n props[\"outline_width\"] =\"2\"\n props[\"outline_width_unit\"] =\"MM\"\n\n renderer = QgsGraduatedSymbolRenderer()\n renderer.setSourceSymbol(QgsFillSymbol.createSimple(props))\n renderer.setClassAttribute(cntField)\n renderer.setClassificationMethod(QgsClassificationJenks())\n renderer.setSourceColorRamp(QgsGradientColorRamp(QColor(230,230,230), QColor(50,50,50)))\n renderer.setGraduatedMethod(QgsGraduatedSymbolRenderer.GraduatedColor)\n renderer.updateClasses(self.l, classes)\n\n self.setLabels(labelField, filter=0)\n self.l.setRenderer(myRenderer)\n return myRenderer\n\n def choropleth2(self, cntField, labelField=None):\n labelField = cntField if labelField is None else labelField\n fieldIndex = self.l.fields().indexFromName(cntField)\n minimum = self.prov.minimumValue( fieldIndex )\n maximum = self.prov.maximumValue( fieldIndex )\n if str(minimum) == \"NULL\" or str(maximum) == \"NULL\":\n (minimum, maximum) = (0, 0)\n# log(str(minimum)+\" \"+str(maximum))\n numberOfClasses=7\n myRangeList = []\n delta = ( maximum - minimum ) / numberOfClasses\n for i in range(0,numberOfClasses):\n myMin = minimum + delta * i\n myMax = minimum + delta * ( i + 1 )\n # if i != 0: myMin += 1\n if i == numberOfClasses-1:\n myMax = maximum\n elif delta > 10: myMax -= 1\n myLabel = \"%.0f - %.0f\" % (myMin,myMax)\n s = 200\n myColour = QColor(s-s*i/numberOfClasses, s-s*i/numberOfClasses, s-s*i/numberOfClasses)\n mySymbol = QgsSymbol.defaultSymbol(QgsWkbTypes.MultiPolygon)\n mySymbol.setColor(myColour)\n l = mySymbol.symbolLayer(0)\n l.setBorderColor(Qt.white)\n mySymbol.setOpacity(1)\n myRange1 = QgsRendererRange(myMin,myMax,mySymbol,myLabel)\n myRangeList.append(myRange1)\n myRenderer = QgsGraduatedSymbolRenderer('', myRangeList)\n myRenderer.setMode(QgsGraduatedSymbolRenderer.Jenks)\n myRenderer.setClassAttribute(cntField)\n self.setLabels(labelField)\n self.l.setRenderer(myRenderer)\n return myRenderer\n\n def zscore(self, cntField):\n fieldIndex = self.l.fields().indexFromName(cntField)\n minimum = self.prov.minimumValue( fieldIndex )\n maximum = self.prov.maximumValue( fieldIndex )\n if not minimum < -2.576:\n minimum = -2.576\n if not maximum > 2.576:\n maximum = 2.576\n zstep = [minimum, -2.576, -1.960, -1.645, -0.674, 0, 0,\n 0.674, 1.645, 1.960, 2.576, maximum]\n colors=[QColor(103,0,31),\n QColor(178,24,43),\n QColor(214,96,77),\n QColor(244,165,130),\n QColor(253,219,199),\n QColor(247,247,247),\n QColor(209,229,240),\n QColor(146,197,222),\n QColor(67,147,195),\n QColor(33,102,172),\n QColor(5,48,97)]\n myRangeList = []\n for i in range(len(colors)):\n myMin = zstep[i]\n myMax = zstep[i+1]\n myLabel = \"%.3f - %.3f\" % (myMin,myMax)\n mySymbol = QgsSymbol.defaultSymbol(QgsWkbTypes.PolygonGeometry)\n mySymbol.setColor(colors[len(colors)-i-1])\n myRange1 = QgsRendererRange(myMin,myMax,mySymbol,myLabel)\n myRangeList.append(myRange1)\n myRenderer = QgsGraduatedSymbolRenderer('', myRangeList)\n myRenderer.setClassificationMethod(QgsClassificationEqualInterval())\n myRenderer.setClassAttribute(cntField)\n self.l.setRenderer(myRenderer)\n return myRenderer\n\n def setLabels(self, field, filter=1):\n Labelqml = \"\"\\\n \"\"\\\n \" \"\\\n \" \"\\\n \" \"\\\n \" \"\\\n \" \"\\\n \" \"\\\n \" \"\\\n \" \"\\\n \" \\\" reverseDirectionSymbol=\\\"0\\\"/>\"\\\n \" \"\\\n \" \"\\\n \" \"\\\n \" \"\\\n \" \"\\\n \" \"\\\n \" \"\\\n \" \"\\\n \" \"\\\n \"\"\n\n path = os.path.join(os.path.abspath( os.path.dirname( __file__)), \"styles/labels.qml\")\n file = open(path, 'w')\n file.writelines(Labelqml)\n file.close()\n self.l.loadNamedStyle(path)\n self.l.triggerRepaint()\n\n # layer_settings = QgsPalLayerSettings()\n # text_format = QgsTextFormat()\n #\n # text_format.setFont(QFont(\"Arial\", 8))\n # text_format.setSize(8)\n #\n # buffer_settings = QgsTextBufferSettings()\n # buffer_settings.setEnabled(True)\n # buffer_settings.setSize(0.80)\n # buffer_settings.setColor(QColor(\"white\"))\n #\n # text_format.setBuffer(buffer_settings)\n # layer_settings.setFormat(text_format)\n #\n # layer_settings.fieldName = field\n # layer_settings.placement = 1\n # layer_settings.enabled = True\n #\n # layer_settings = QgsVectorLayerSimpleLabeling(layer_settings)\n #\n # self.l.setLabelsEnabled(True)\n # self.l.setLabeling(layer_settings)\n # self.l.triggerRepaint()\n","sub_path":"utils/renderers.py","file_name":"renderers.py","file_ext":"py","file_size_in_byte":13894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"359262738","text":"import cv2\n\"\"\"Compute depth maps for images in the input folder.\n\"\"\"\nimport os\nimport glob\nimport torch\nimport utils\nimport cv2\nimport random\nimport time\n\nfrom torchvision.transforms import Compose\nfrom models.midas_net import MidasNet\nfrom models.transforms import Resize, NormalizeImage, PrepareForNet\n\n\ndef run(model_path):\n \"\"\"Run MonoDepthNN to compute depth maps.\n\n Args:\n model_path (str): path to saved model\n \"\"\"\n print(\"initialize\")\n\n # select device\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n print(\"device: %s\" % device)\n\n # load network\n model = MidasNet(model_path, non_negative=True)\n\n transform = Compose(\n [\n Resize(\n 384,\n 384,\n resize_target=None,\n keep_aspect_ratio=True,\n ensure_multiple_of=32,\n resize_method=\"upper_bound\",\n image_interpolation_method=cv2.INTER_CUBIC,\n ),\n NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n PrepareForNet(),\n ]\n )\n\n model.to(device)\n model.eval()\n\n cap = cv2.VideoCapture(1)\n print(\"is camera open\", cap.isOpened())\n cap.set(3,320)\n cap.set(4,240)\n print(\"start processing\")\n\n i = 0\n while cap.isOpened():\n start = time.time()\n ret, frame = cap.read()\n print(\"new frame\", ret)\n p1 = time.time()\n print(f\"take a picture {p1 - start}\")\n if ret:\n img = utils.process_camera_img(frame)\n img_input = transform({\"image\": img})[\"image\"]\n p2 = time.time()\n print(f\"transoform image {p2 - p1}\")\n # compute\n with torch.no_grad():\n sample = torch.from_numpy(img_input).to(device).unsqueeze(0)\n p3 = time.time()\n print(f\"from numpy to cuda {p3 - p2}\")\n prediction = model.forward(sample)\n p4 = time.time()\n print(f\"prediction {p4 - p3}\")\n prediction = (\n torch.nn.functional.interpolate(\n prediction.unsqueeze(1),\n size=img.shape[:2],\n mode=\"bicubic\",\n align_corners=False,\n )\n .squeeze()\n .cpu()\n .numpy()\n )\n p5 = time.time()\n print(f\"prediction from cuda to cpu {p5 - p4}\")\n\n\n # output\n\n r = random.randint(0, 10000)\n cv2.imwrite(f\"output/input-{i}-{r}.png\", frame)\n utils.write_depth(f\"output/depth-{i}-{r}\", prediction, bits=2)\n p6 = time.time()\n print(f\"save input and write depth {p6 - p5}\")\n\n cv2.imshow('frame', frame)\n cv2.imshow('prediction', prediction)\n p7 = time.time()\n print(f\"show images {p7 - p6}\")\n i += 1\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n else:\n print(\"Camera is not recording\")\n print(f\"image took {time.time() - start} s\")\n print(\"\\n-----------------------\\n\")\n\n # When everything done, release the capture\n cap.release()\n cv2.destroyAllWindows()\n\n print(\"finished\")\n\n\nif __name__ == \"__main__\":\n MODEL_PATH = \"model.pt\"\n\n # set torch options\n torch.backends.cudnn.enabled = True\n torch.backends.cudnn.benchmark = True\n\n # compute depth maps\n run(MODEL_PATH)\n","sub_path":"run_cv.py","file_name":"run_cv.py","file_ext":"py","file_size_in_byte":3573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"411755968","text":"# 자리수 합 리턴\r\ndef sum_digit(num):\r\n str_num = str(num)\r\n result = 0\r\n for i in str_num: \r\n result += int(i) \r\n return result\r\n \r\n# sum_digit(1)부터 sum_digit(1000)까지의 합 구하기\r\n# 코드를 입력하세요.\r\n \r\nsum_result = 0\r\n\r\nfor i in range(1, 1001):\r\n sum_result += sum_digit(i)\r\n \r\nprint(sum_result)\r\n","sub_path":"code_it/sum_digit.py","file_name":"sum_digit.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"12377349","text":"import os\nfrom twilio.rest import Client\n\nos.environ['TWILIO_ACCOUNT_SID'] = 'ACcd99094db783d1141bf1de6a0f4c1e5a'\nos.environ['TWILIO_AUTH_TOKEN'] = 'ad848429edfbe548faaaafc94f7f86ad'\n\nclient = Client()\n\nfrom_whatsapp_number=\"whatsapp:+14155238886\"\nto_whatsapp_number=\"whatsapp:+821090292356\"\n\notp = 'asdfas'\nrequest.user.profile.otp = otp\nuser.profile.save()\n\nclient.messages.create(body='Verify your phone number by submitting the OTP: ' + otp,\n from_=from_whatsapp_number,\n to=to_whatsapp_number)\n","sub_path":"whatsapp.py","file_name":"whatsapp.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"73757998","text":"# Zadanie 5. Wykorzystaj plik zawierający fragment Pana Tadeusza. Znajdź najdłuższe słowo występujące w zadanym fragmencie.\n\ndef get_text():\n with open('text.txt', encoding='UTF-8') as fopen:\n content = fopen.read()\n\n return content\n\n\ndef clean_text(text):\n extras = '.!,()'\n\n for symbol in extras:\n text = text.replace(symbol, '')\n\n return text\n\n\ndef find_longest_word(text):\n text = text.split()\n longest_word = ''\n\n for current_word in text:\n if len(current_word) > len(longest_word):\n longest_word = current_word\n\n return longest_word\n\n\n# -- main code\n\ntxt = get_text()\ntxt = clean_text(txt)\nsearch_word = find_longest_word(txt)\n\nprint(search_word, ', o długości - ', len(search_word))","sub_path":"07-files/05/Files - Zad5.py","file_name":"Files - Zad5.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"385780923","text":"from unittest import TestCase\nfrom hamcrest import assert_that, is_\n\nfrom media_platform.metadata.image.image_features import ImageFeatures\n\n\nclass TestImageFeatures(TestCase):\n\n def test_serialize_deserialize_with_explicit_content(self):\n data = {\n 'labels': [\n {'name': 'one', 'score': 0.2323},\n {'name': 'two', 'score': 0.9}\n ],\n 'faces': [\n {'x': 383, 'y': 393, 'width': 155, 'height': 180},\n {'x': 460, 'y': 385, 'width': 145, 'height': 173}\n ],\n 'colors': [\n {'r': 138, 'g': 218, 'b': 244, 'pixelFraction': 0.38548386, 'score': 0.688166},\n ],\n 'explicitContent': [\n {\n 'name': 'adult',\n 'likelihood': 'VERY_UNLIKELY'\n }\n ]\n }\n\n image_features = ImageFeatures.deserialize(data)\n\n assert_that(len(image_features.labels), is_(2))\n assert_that(len(image_features.faces), is_(2))\n assert_that(len(image_features.colors), is_(1))\n assert_that(len(image_features.explicit_content), is_(1))\n","sub_path":"tests/metadata/image/test_image_features.py","file_name":"test_image_features.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"539615610","text":"from flask import Blueprint\nfrom api.utils import get_zone_facts_select_columns\nfrom flask import jsonify\n\ndef construct_project_extended_blueprint(name, engine):\n '''\n Provides an endpoint that provides an extended version of the project table that has been joined to \n other tables. In particular, it joins to the zone_facts table to provide de-normalized statistics \n about nearby developments. All endpoints still return one record per project. \n '''\n\n blueprint = Blueprint(name, __name__, url_prefix='/api/project')\n\n @blueprint.route('/')\n @blueprint.route('/')\n def project_with_zone_facts(nlihc_id= None):\n\n ward_selects, cluster_selects, tract_selects = get_zone_facts_select_columns(engine)\n \n q = \"\"\"\n select\n p.*\n \"\"\"\n q += ward_selects\n q += cluster_selects\n q += tract_selects\n\n q +=\"\"\"\n from project as p\n left join zone_facts as z1 on z1.zone = p.ward\n left join zone_facts as z2 on z2.zone = p.neighborhood_cluster\n left join zone_facts as z3 on z3.zone = p.census_tract\n \"\"\"\n if nlihc_id != None:\n q+= \"WHERE nlihc_id = '{}'\".format(nlihc_id)\n\n conn = engine.connect()\n proxy = conn.execute(q)\n results = [dict(x) for x in proxy.fetchall()]\n conn.close()\n output = {'objects': results}\n return jsonify(output)\n\n return blueprint","sub_path":"python/api/project_extended_constructor.py","file_name":"project_extended_constructor.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"359904485","text":"#!/usr/bin/env python\n# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"This example creates a keyword plan.\n\nKeyword plans can be reused for retrieving forecast metrics and historic\nmetrics.\n\"\"\"\n\n\nimport argparse\nimport sys\nimport uuid\n\nfrom google.ads.googleads.client import GoogleAdsClient\nfrom google.ads.googleads.errors import GoogleAdsException\n\n\n# [START add_keyword_plan]\ndef main(client, customer_id):\n \"\"\"Adds a keyword plan, campaign, ad group, etc. to the customer account.\n\n Also handles errors from the API and prints them.\n\n Args:\n client: An initialized instance of GoogleAdsClient\n customer_id: A str of the customer_id to use in requests.\n \"\"\"\n _add_keyword_plan(client, customer_id)\n\n\ndef _add_keyword_plan(client, customer_id):\n \"\"\"Adds a keyword plan, campaign, ad group, etc. to the customer account.\n\n Args:\n client: An initialized instance of GoogleAdsClient\n customer_id: A str of the customer_id to use in requests.\n\n Raises:\n GoogleAdsException: If an error is returned from the API.\n \"\"\"\n keyword_plan = _create_keyword_plan(client, customer_id)\n keyword_plan_campaign = _create_keyword_plan_campaign(\n client, customer_id, keyword_plan\n )\n keyword_plan_ad_group = _create_keyword_plan_ad_group(\n client, customer_id, keyword_plan_campaign\n )\n _create_keyword_plan_ad_group_keywords(\n client, customer_id, keyword_plan_ad_group\n )\n _create_keyword_plan_negative_campaign_keywords(\n client, customer_id, keyword_plan_campaign\n )\n\n\ndef _create_keyword_plan(client, customer_id):\n \"\"\"Adds a keyword plan to the given customer account.\n\n Args:\n client: An initialized instance of GoogleAdsClient\n customer_id: A str of the customer_id to use in requests.\n\n Returns:\n A str of the resource_name for the newly created keyword plan.\n\n Raises:\n GoogleAdsException: If an error is returned from the API.\n \"\"\"\n keyword_plan_service = client.get_service(\"KeywordPlanService\")\n operation = client.get_type(\"KeywordPlanOperation\")\n keyword_plan = operation.create\n\n keyword_plan.name = f\"Keyword plan for traffic estimate {uuid.uuid4()}\"\n\n forecast_interval = (\n client.enums.KeywordPlanForecastIntervalEnum.NEXT_QUARTER\n )\n keyword_plan.forecast_period.date_interval = forecast_interval\n\n response = keyword_plan_service.mutate_keyword_plans(\n customer_id=customer_id, operations=[operation]\n )\n resource_name = response.results[0].resource_name\n\n print(f\"Created keyword plan with resource name: {resource_name}\")\n\n return resource_name\n\n\ndef _create_keyword_plan_campaign(client, customer_id, keyword_plan):\n \"\"\"Adds a keyword plan campaign to the given keyword plan.\n\n Args:\n client: An initialized instance of GoogleAdsClient\n customer_id: A str of the customer_id to use in requests.\n keyword_plan: A str of the keyword plan resource_name this keyword plan\n campaign should be attributed to.create_keyword_plan.\n\n Returns:\n A str of the resource_name for the newly created keyword plan campaign.\n\n Raises:\n GoogleAdsException: If an error is returned from the API.\n \"\"\"\n keyword_plan_campaign_service = client.get_service(\n \"KeywordPlanCampaignService\"\n )\n operation = client.get_type(\"KeywordPlanCampaignOperation\")\n keyword_plan_campaign = operation.create\n\n keyword_plan_campaign.name = f\"Keyword plan campaign {uuid.uuid4()}\"\n keyword_plan_campaign.cpc_bid_micros = 1000000\n keyword_plan_campaign.keyword_plan = keyword_plan\n\n network = client.enums.KeywordPlanNetworkEnum.GOOGLE_SEARCH\n keyword_plan_campaign.keyword_plan_network = network\n\n geo_target = client.get_type(\"KeywordPlanGeoTarget\")\n # Constant for U.S. Other geo target constants can be referenced here:\n # https://developers.google.com/google-ads/api/reference/data/geotargets\n geo_target.geo_target_constant = \"geoTargetConstants/2840\"\n keyword_plan_campaign.geo_targets.append(geo_target)\n\n # Constant for English\n language = \"languageConstants/1000\"\n keyword_plan_campaign.language_constants.append(language)\n\n response = keyword_plan_campaign_service.mutate_keyword_plan_campaigns(\n customer_id=customer_id, operations=[operation]\n )\n\n resource_name = response.results[0].resource_name\n\n print(f\"Created keyword plan campaign with resource name: {resource_name}\")\n\n return resource_name\n\n\ndef _create_keyword_plan_ad_group(client, customer_id, keyword_plan_campaign):\n \"\"\"Adds a keyword plan ad group to the given keyword plan campaign.\n\n Args:\n client: An initialized instance of GoogleAdsClient\n customer_id: A str of the customer_id to use in requests.\n keyword_plan_campaign: A str of the keyword plan campaign resource_name\n this keyword plan ad group should be attributed to.\n\n Returns:\n A str of the resource_name for the newly created keyword plan ad group.\n\n Raises:\n GoogleAdsException: If an error is returned from the API.\n \"\"\"\n operation = client.get_type(\"KeywordPlanAdGroupOperation\")\n keyword_plan_ad_group = operation.create\n\n keyword_plan_ad_group.name = f\"Keyword plan ad group {uuid.uuid4()}\"\n keyword_plan_ad_group.cpc_bid_micros = 2500000\n keyword_plan_ad_group.keyword_plan_campaign = keyword_plan_campaign\n\n keyword_plan_ad_group_service = client.get_service(\n \"KeywordPlanAdGroupService\"\n )\n response = keyword_plan_ad_group_service.mutate_keyword_plan_ad_groups(\n customer_id=customer_id, operations=[operation]\n )\n\n resource_name = response.results[0].resource_name\n\n print(f\"Created keyword plan ad group with resource name: {resource_name}\")\n\n return resource_name\n\n\ndef _create_keyword_plan_ad_group_keywords(client, customer_id, plan_ad_group):\n \"\"\"Adds keyword plan ad group keywords to the given keyword plan ad group.\n\n Args:\n client: An initialized instance of GoogleAdsClient\n customer_id: A str of the customer_id to use in requests.\n plan_ad_group: A str of the keyword plan ad group resource_name\n these keyword plan keywords should be attributed to.\n\n Raises:\n GoogleAdsException: If an error is returned from the API.\n \"\"\"\n keyword_plan_ad_group_keyword_service = client.get_service(\n \"KeywordPlanAdGroupKeywordService\"\n )\n operation = client.get_type(\"KeywordPlanAdGroupKeywordOperation\")\n operations = []\n\n operation = client.get_type(\"KeywordPlanAdGroupKeywordOperation\")\n keyword_plan_ad_group_keyword1 = operation.create\n keyword_plan_ad_group_keyword1.text = \"mars cruise\"\n keyword_plan_ad_group_keyword1.cpc_bid_micros = 2000000\n keyword_plan_ad_group_keyword1.match_type = (\n client.enums.KeywordMatchTypeEnum.BROAD\n )\n keyword_plan_ad_group_keyword1.keyword_plan_ad_group = plan_ad_group\n operations.append(operation)\n\n operation = client.get_type(\"KeywordPlanAdGroupKeywordOperation\")\n keyword_plan_ad_group_keyword2 = operation.create\n keyword_plan_ad_group_keyword2.text = \"cheap cruise\"\n keyword_plan_ad_group_keyword2.cpc_bid_micros = 1500000\n keyword_plan_ad_group_keyword2.match_type = (\n client.enums.KeywordMatchTypeEnum.PHRASE\n )\n keyword_plan_ad_group_keyword2.keyword_plan_ad_group = plan_ad_group\n operations.append(operation)\n\n operation = client.get_type(\"KeywordPlanAdGroupKeywordOperation\")\n keyword_plan_ad_group_keyword3 = operation.create\n keyword_plan_ad_group_keyword3.text = \"jupiter cruise\"\n keyword_plan_ad_group_keyword3.cpc_bid_micros = 1990000\n keyword_plan_ad_group_keyword3.match_type = (\n client.enums.KeywordMatchTypeEnum.EXACT\n )\n keyword_plan_ad_group_keyword3.keyword_plan_ad_group = plan_ad_group\n operations.append(operation)\n\n response = keyword_plan_ad_group_keyword_service.mutate_keyword_plan_ad_group_keywords(\n customer_id=customer_id, operations=operations\n )\n\n for result in response.results:\n print(\n \"Created keyword plan ad group keyword with resource name: \"\n f\"{result.resource_name}\"\n )\n\n\ndef _create_keyword_plan_negative_campaign_keywords(\n client, customer_id, plan_campaign\n):\n \"\"\"Adds a keyword plan negative campaign keyword to the given campaign.\n\n Args:\n client: An initialized instance of GoogleAdsClient\n customer_id: A str of the customer_id to use in requests.\n plan_campaign: A str of the keyword plan campaign resource_name\n this keyword plan negative keyword should be attributed to.\n\n Raises:\n GoogleAdsException: If an error is returned from the API.\n \"\"\"\n keyword_plan_negative_keyword_service = client.get_service(\n \"KeywordPlanCampaignKeywordService\"\n )\n operation = client.get_type(\"KeywordPlanCampaignKeywordOperation\")\n\n keyword_plan_campaign_keyword = operation.create\n keyword_plan_campaign_keyword.text = \"moon walk\"\n keyword_plan_campaign_keyword.match_type = (\n client.enums.KeywordMatchTypeEnum.BROAD\n )\n keyword_plan_campaign_keyword.keyword_plan_campaign = plan_campaign\n keyword_plan_campaign_keyword.negative = True\n\n response = keyword_plan_negative_keyword_service.mutate_keyword_plan_campaign_keywords(\n customer_id=customer_id, operations=[operation]\n )\n\n print(\n \"Created keyword plan campaign keyword with resource name: \"\n f\"{response.results[0].resource_name}\"\n )\n # [END add_keyword_plan]\n\n\nif __name__ == \"__main__\":\n # GoogleAdsClient will read the google-ads.yaml configuration file in the\n # home directory if none is specified.\n googleads_client = GoogleAdsClient.load_from_storage(version=\"v10\")\n\n parser = argparse.ArgumentParser(\n description=\"Creates a keyword plan for specified customer.\"\n )\n # The following argument(s) should be provided to run the example.\n parser.add_argument(\n \"-c\",\n \"--customer_id\",\n type=str,\n required=True,\n help=\"The Google Ads customer ID.\",\n )\n args = parser.parse_args()\n\n try:\n main(googleads_client, args.customer_id)\n except GoogleAdsException as ex:\n print(\n f'Request with ID \"{ex.request_id}\" failed with status '\n f'\"{ex.error.code().name}\" and includes the following errors:'\n )\n for error in ex.failure.errors:\n print(f'\\tError with message \"{error.message}\".')\n if error.location:\n for field_path_element in error.location.field_path_elements:\n print(f\"\\t\\tOn field: {field_path_element.field_name}\")\n sys.exit(1)\n","sub_path":"examples/planning/add_keyword_plan.py","file_name":"add_keyword_plan.py","file_ext":"py","file_size_in_byte":11329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"35283248","text":"import logging\nimport os\nimport requests\nimport tempfile\nfrom contextlib import contextmanager\nfrom google.cloud import storage\nimport hail as hl\nfrom tqdm import tqdm\n\nfrom sv_pipeline.utils.common import parse_gs_path_to_bucket\n\nlogger = logging.getLogger(__name__)\n\n@contextmanager\ndef file_writer(file_path, get_existing_size=False):\n bucket = None\n size = None\n if is_gs_path(file_path):\n local_file_path = os.path.join(tempfile.gettempdir(), os.path.basename(file_path))\n bucket, file_name = parse_gs_path_to_bucket(file_path)\n if get_existing_size:\n blob = bucket.get_blob(file_name)\n size = blob and blob.size\n else:\n local_file_path = file_path\n if get_existing_size:\n size = os.path.isfile(local_file_path) and os.path.getsize(local_file_path)\n\n local_file = open(local_file_path, 'wb')\n\n yield local_file, size\n\n local_file.close()\n\n if bucket:\n blob = bucket.blob(file_name)\n blob.upload_from_filename(local_file_path)\n\n\ndef is_gs_path(path):\n return path.startswith('gs://')\n\n\ndef path_exists(path):\n is_gs = is_gs_path(path)\n return (is_gs and hl.hadoop_exists(path)) or (not is_gs and os.path.exists(path))\n\n\ndef download_file(url, to_dir=tempfile.gettempdir(), verbose=True):\n \"\"\"Download the given file and returns its local path.\n Args:\n url (string): HTTP or FTP url\n Returns:\n string: local file path\n \"\"\"\n\n if not (url and url.startswith((\"http://\", \"https://\"))):\n raise ValueError(\"Invalid url: {}\".format(url))\n remote_file_size = _get_remote_file_size(url)\n\n file_path = os.path.join(to_dir, filename)\n with file_writer(file_path, get_existing_size=True) as fw:\n f, file_size = fw\n if file_size and file_size == remote_file_size:\n logger.info(\"Re-using {} previously downloaded from {}\".format(local_file_path, url))\n return file_path\n\n is_gz = url.endswith(\".gz\")\n response = requests.get(url, stream=is_gz)\n input_iter = response if is_gz else response.iter_content()\n if verbose:\n logger.info(\"Downloading {} to {}\".format(url, local_file_path))\n input_iter = tqdm(input_iter, unit=\" data\" if is_gz else \" lines\")\n\n f.writelines(input_iter)\n input_iter.close()\n\n return file_path\n\n\ndef _get_remote_file_size(url):\n return int(requests.head(url).headers.get('Content-Length', '0'))\n","sub_path":"sv_pipeline/genome/utils/download_utils.py","file_name":"download_utils.py","file_ext":"py","file_size_in_byte":2477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"583273480","text":"from __future__ import print_function, division\n\nimport numpy as np\n\nfrom ..util.functions import FreezableClass\nfrom ..util.convenience import OptThinRadius\nfrom ..dust import SphericalDust\nfrom ..util.validator import validate_scalar\nfrom ..grid import SphericalPolarGrid\n\n\nclass AmbientMedium(FreezableClass):\n r'''\n This class implements the density structure for an ambient density\n medium defined by a constant density, and an inner and outer radius.\n\n Once the :class:`~hyperion.densities.AmbientMedium` class has been\n instantiated, the parameters for the density structure can be set via\n attributes::\n\n >>> from hyperion.util.constants import au, pc\n >>> ambient = AmbientMedium()\n >>> ambient.rho = 1.e-20 # cgs\n >>> ambient.rmin = 0.1 * au # cm\n >>> ambient.rmax = pc # cm\n\n :class:`~hyperion.densities.AmbientMedium` instances can only be used with\n spherical polar grids at this time.\n '''\n def __init__(self, rho=None, rmin=None, rmax=None):\n\n # Basic ambient medium parameters\n self.rho = rho\n self.rmin = rmin\n self.rmax = rmax\n\n # Dust\n self.dust = None\n\n self._freeze()\n\n @property\n def rho(self):\n '''Density of the ambient medium (g/cm^3)'''\n return self._rho\n\n @rho.setter\n def rho(self, value):\n if value is not None:\n validate_scalar('rho', value, domain='positive')\n self._rho = value\n\n @property\n def rmin(self):\n '''inner radius (cm)'''\n if isinstance(self._rmin, OptThinRadius):\n return self._rmin.evaluate(self.star, self.dust)\n else:\n return self._rmin\n\n @rmin.setter\n def rmin(self, value):\n if not isinstance(value, OptThinRadius) and value is not None:\n validate_scalar('rmin', value, domain='positive', extra=' or an OptThinRadius instance')\n self._rmin = value\n\n @property\n def rmax(self):\n '''outer radius (cm)'''\n if isinstance(self._rmax, OptThinRadius):\n return self._rmax.evaluate(self.star, self.dust)\n else:\n return self._rmax\n\n @rmax.setter\n def rmax(self, value):\n if not isinstance(value, OptThinRadius) and value is not None:\n validate_scalar('rmax', value, domain='positive', extra=' or an OptThinRadius instance')\n self._rmax = value\n\n @property\n def dust(self):\n '''dust properties (filename or dust object)'''\n return self._dust\n\n @dust.setter\n def dust(self, value):\n if isinstance(value, basestring):\n self._dust = SphericalDust(value)\n else:\n self._dust = value\n\n def _check_all_set(self):\n\n if self.density is None:\n raise Exception(\"density is not set\")\n if self.rmin is None:\n raise Exception(\"rmin is not set\")\n if self.rmax is None:\n raise Exception(\"rmax is not set\")\n\n if isinstance(self.rmin, OptThinRadius):\n raise Exception(\"Inner ambient medium radius needs to be computed first\")\n if isinstance(self.rmax, OptThinRadius):\n raise Exception(\"Inner ambient medium radius needs to be computed first\")\n\n def density(self, grid):\n '''\n Return the density grid\n\n Parameters\n ----------\n grid : :class:`~hyperion.grid.SphericalPolarGrid` instance.\n The spherical polar grid object containing information about the\n position of the grid cells.\n\n Returns\n -------\n rho : np.ndarray\n A 3-dimensional array containing the density of the envelope\n inside each cell. The shape of this array is the same as\n ``grid.shape``.\n '''\n\n if not isinstance(grid, SphericalPolarGrid):\n raise TypeError(\"grid should be a SphericalPolarGrid instance\")\n\n self._check_all_set()\n\n rho = np.ones(grid.gr.shape) * self.rho\n\n rho[grid.gr < self.rmin] = 0.\n rho[grid.gr > self.rmax] = 0.\n\n return rho\n","sub_path":"hyperion/densities/ambient_medium.py","file_name":"ambient_medium.py","file_ext":"py","file_size_in_byte":4090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"186242555","text":"import datetime\nimport requests\nimport nltk\nfrom pycorenlp import StanfordCoreNLP\nfrom pyspark import SparkContext, SparkConf\nconf = SparkConf().setAppName(\"app\")\nsc = SparkContext(conf=conf)\n\ndef telegram_bot_sendtext(service_name, time, type, bot_message):\n bot_token = '1297258570:AAGTzLSNjMrE9gLhpJuQ2EOyL45Bb5yGwZc'\n bot_chatID = '-467351323'\n mess = type + '\\t' + service_name + '\\n' + time + '\\n' + bot_message\n mess = mess.replace('_', '-')\n send_text = 'https://api.telegram.org/bot' + bot_token + '/sendMessage?chat_id=' + bot_chatID + '&parse_mode=Markdown&text=' + mess\n response = requests.get(send_text)\n return response.json()\n\ndef format_relation(str):\n arr = [pos for pos, char in enumerate(str) if char == \" \"]\n result = \"\"\n for index, item in enumerate(str):\n if (index - 1) in arr:\n result += item.upper()\n else:\n result += item\n result = result.replace(\" \", \"\")\n return result\n\ndef extracter(sent):\n output= nlp.annotate(sent, properties={'annotators': 'tokenize, ssplit, pos, depparse, parse, openie','outputFormat': 'json'})\n triple = []\n try:\n for item in output['sentences'][0]['openie']:\n tmp = item['subject'].replace(\" \", \"_\") + \"\\t\" + format_relation(item['relation']) + \"\\t\" + item[\"object\"].replace(\" \", \"_\")\n triple.append(tmp)\n return triple\n except Exception as e:\n return repr(e)\n \n\ndef filter_trump(triple):\n return triple.lower().__contains__('trump')\n\nsite = 'news_fox'\nnow = datetime.datetime.now()\ndistTime = now - datetime.timedelta(1)\nfolder_name = distTime.__format__(\"%Y-%m-%d\")\n#folder_name = \"2020-05-27\"\nfolder_input = \"/user/hduser/processed_data/\"+site+\"/\"+folder_name+\"/*\"\nfoler_save = \"hdfs:///user/hduser/triples/\"+site+\"/\"+folder_name+\".txt\"\ntime_now = now.__format__('%Y-%m-%d %H:%M:%S')\ntry:\n telegram_bot_sendtext(\"extraction.py\", time_now, \"INFO\", \"Start extracting triples from \" + site + \", date: \" + folder_name)\n data = sc.textFile(folder_input)\n data2 = data.map(lambda x: nltk.sent_tokenize(x)[0])\n nlp = StanfordCoreNLP('http://localhost:9000')\n out = data2.flatMap(lambda x: extracter(x))\n res = out.filter(lambda x: filter_trump(x))\n res.saveAsTextFile(foler_save)\n\n time_now = datetime.datetime.now().__format__('%Y-%m-%d %H:%M:%S')\n telegram_bot_sendtext(\"extraction.py\", time_now, \"INFO\", \"Successfully extract triples from \" + site + \", date: \" + folder_name)\nexcept Exception as e:\n mess = \"ERROR when extract tripple news from \" +site+\", date: \" + folder_name + \"\\n\" + repr(e)\n time_now = datetime.datetime.now().__format__('%Y-%m-%d %H:%M:%S')\n telegram_bot_sendtext(\"extraction.py\", time_now, \"ERROR\", mess)\n","sub_path":"server/extraction/news_fox/extraction_newsfox.py","file_name":"extraction_newsfox.py","file_ext":"py","file_size_in_byte":2755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"272600607","text":"from datetime import datetime\nfrom bs4 import BeautifulSoup\n\nfrom aLib import logger\nimport requests\n\nclass DataGet:\n\tcurrent_date = datetime.now()\n\ttoday = (current_date.strftime('%m/%d/%Y')).replace(\"/\",\"-\")\n\tblad = \"eb\"\n\tblade = [\"eb\",\"p2\",\"p3\",\"p4\"]\n\tresponse = \"\"\n\tmyLogger = logger.Logger()\n\tdata = {}\n\n\n\tdef __init__(self, blad=None):\n\t\tif(blad != None):\n\t\t\tself.blad = blad\n\t\tself.myLogger.log(\" --- init --- on blad: \" + str(self.blad))\n\n\tdef getData(self):\n\t\treturnData = {}\n\t\tprint(\"[-------------- SCAN --------------]\")\n\t\tif self.blad == \"eb\":\n\t\t########################## EKSTRABLADET ###########################\n\t\t\tr = requests.get(\"https://ekstrabladet.dk/\")\n\t\t\tdata = r.text\n\t\t\tsoup = BeautifulSoup(data, \"html.parser\")\n\t\t\tupper = soup.findAll(\"div\", {\"class\": \"df-article-content\"})\n\t\t\tfor a in upper:\n\t\t\t\tprint(a)\n\t\t\t\ttitle = \"\"\n\t\t\t\ttry:\n\t\t\t\t\tspans = a.findAll(\"h3\")\n\t\t\t\t\tlink = a.find(\"a\")['href']\n\t\t\t\texcept TypeError as e:\n\t\t\t\t\tbreak #no link so skip\n\t\t\t\tfor s in spans:\n\t\t\t\t\ttry:\n\t\t\t\t\t\ttitle_part = (s.find(\"span\").text)\n\t\t\t\t\t\ttitle +=\" \"+ title_part\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\tpass\n\t\t\t\ttitle = title.strip()\n\t\t\t\tinner = requests.get(link)\n\t\t\t\tinnerData = inner.text\n\t\t\t\tinnerSoup = BeautifulSoup(innerData, \"html.parser\")\n\t\t\t\tfullContent = \"\"\n\t\t\t\tkommentareAntal = None\n\t\t\t\ttry:\n\t\t\t\t\trealTitle = (innerSoup.find(\"h1\", {\"class\": \"art-title\"}).text).strip()\n\t\t\t\t\ttimeOfArticle = (innerSoup.find((\"time\"),{\"class\": [\"eb-row-item\",\"eb-row-item--grow\",\"article-timestamp--top\"]}).text).strip()\n\t\t\t\t\tkommentareAntal = (innerSoup.find(\"span\", id=\"fnTalkCommentText\").text).split(\" \")[0]\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tpass\n\n\t\t\t\t#print(timeOfArticle)\n\t\t\t\tfor part in (innerSoup.findAll(\"p\", class_=False)):\n\t\t\t\t\tpart = part.text\n\t\t\t\t\tif(\"function(apntag)\" not in part and \"eller prøv igen senere.\" not in part):\n\t\t\t\t\t\tif(\"Foto:\" in part):\n\t\t\t\t\t\t\tpart = \"[FOTO]\" + part.split(\"Foto:\")[0] + \"[FOTO]\"\n\t\t\t\t\t\tfullContent = fullContent + part\n\n\t\t\t\t#print(title + \" --- \" + realTitle)\n\t\t\t\tif \"ekstrabladet.dk\" in link:\n\t\t\t\t\tif (not innerSoup.find(\"body\", {\"class\": \"body--plus\"})) and (not innerSoup.find(\"body\", {\"class\": \"body--plus\"})):\n\t\t\t\t\t\treturnData[link] = [title,realTitle,fullContent,timeOfArticle, kommentareAntal]\n\t\t\t\telse:\n\t\t\t\t\treturnData[link] = [title,\"N/A\",fullContent,\"N/A\", 0]\n\n\t\treturn returnData\n\t\t########################## EKSTRABLADET ###########################\n\n\n\tdef parseData(self,response):\n\t\ttry:\n\t\t\treturn response\n\t\texcept:\n\t\t\tpass","sub_path":"src/popData.py","file_name":"popData.py","file_ext":"py","file_size_in_byte":2482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"335449946","text":"import os, sys, json, re\nfrom subprocess import check_output\n\ndef main():\n lookup = {}\n for name in os.listdir(\"lec\"):\n lookup[name.split(\"-\")[0]] = f\"lec/{name}\"\n\n txt = str(check_output(\"pbpaste\"), \"utf-8\")\n matches = re.findall(r\"Lecture (\\d+): (.+)\", txt)\n for num, title in matches:\n if len(num) == 1:\n num = \"0\"+num\n\n short = title.split()[0].lower()\n\n if num in lookup:\n dirname = lookup[num]\n metaname = f\"{dirname}/meta.txt\"\n with open(metaname) as f:\n lines = list(f) + [\"\"]\n lines[0] = title\n with open(metaname, \"w\") as f:\n f.write(\"\\n\".join(lines) + \"\\n\")\n else:\n dirname = f\"lec/{num}-{short}\"\n metaname = f\"{dirname}/meta.txt\"\n os.mkdir(dirname)\n with open(metaname, \"w\") as f:\n f.write(title + \"\\n\")\n print(matches)\n\nif __name__ == '__main__':\n main()\n","sub_path":"tyler/cs544/s23/paste-lec.py","file_name":"paste-lec.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"431286369","text":"from sys import exit\nINF = 10**21\nN, L= [int(n) for n in input().split()]\napples = [L +i - 1 for i in range(1,N+1)]\nans = INF\nval = INF\n# print(apples)\nfor i in range(N):\n eat = apples[i]\n tmp = abs(eat)\n if tmp < val:\n ans = sum(apples) - eat\n val = tmp\nprint(ans)\n","sub_path":"problems/Beginner/B/131.py","file_name":"131.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"433681909","text":"from typing import Callable, Generator, Optional\nfrom testutils.trees import TreeNode, build_tree\n\n\ndef inorder_traversal(root: TreeNode) -> Generator[int, None, None]:\n stack: list[TreeNode] = []\n current: Optional[TreeNode] = root\n while True:\n if current is not None:\n stack.append(current)\n current = current.left\n\n elif stack:\n node = stack.pop()\n yield node.val\n current = node.right\n\n else:\n break\n\n\nclass Solution:\n def kthSmallest(self, root: Optional[TreeNode], k: int) -> int:\n if root is None:\n return -1\n\n counter = 1\n for value in inorder_traversal(root):\n if counter == k:\n return value\n\n counter += 1\n\n return -1\n\n\ntests = [\n (\n ([3, 1, 4, None, 2], 1,),\n 1,\n ),\n (\n ([5, 3, 6, 2, 4, None, None, 1], 3,),\n 3,\n ),\n]\n\n\ndef validator(\n kthSmallest: Callable[[Optional[TreeNode], int], int],\n inputs: tuple[list[Optional[int]], int],\n expected: int,\n) -> None:\n values, k = inputs\n tree = build_tree(values)\n output = kthSmallest(tree, k)\n assert output == expected, (output, expected)\n","sub_path":"kth_smallest_element_in_a_bst.py","file_name":"kth_smallest_element_in_a_bst.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"566132848","text":"'''\r\n2.\r\n년, 월, 일을 입력하면 그 날이 무슨 요일인지 출력하는 함수를 만드세요.\r\n테스트코드\r\n<입력>\r\nprint(\"%d년 %d월 %d일은 %s 입니다.\" % (myYear, myMonth, myDay, printDayOfTheWeek(myYear, myMonth, myDay)))\r\n<출력>\r\n연도를 입력하시오 : 2020\r\n월을 입력하시오 : 3\r\n일을 입력하시오 : 13\r\n2020년 3월 13일은 금요일 입니다.\r\n'''\r\n\r\ndef printDayOfTheWeek():\r\n year = int(input('연도를 입력하시오 : '))\r\n month = int(input('월을 입력하시오 : '))\r\n day = int(input('일을 입력하시오 :'))\r\n basic_year = 365*(year-1)\r\n yun_year = (year-1)//4 - (year-1)//100 + (year-1)//400\r\n total = basic_year + yun_year\r\n \r\n if (year % 4 == 0 and year % 100 != 0) or year % 400 == 0:\r\n days = [31,29,31,30,31,30,31,31,30,31,30,31]\r\n for i in range(month-1):\r\n total += days[i]\r\n else:\r\n days = [31,28,31,30,31,30,31,31,30,31,30,31]\r\n for i in range(month-1):\r\n total += days[i]\r\n \r\n total += day\r\n if total % 7 == 0:\r\n answer = '일'\r\n if total % 7 == 1:\r\n answer = '월'\r\n if total % 7 == 2:\r\n answer = '화'\r\n if total % 7 == 3:\r\n answer = '수'\r\n if total % 7 == 4:\r\n answer = '목'\r\n if total % 7 == 5:\r\n answer = '금'\r\n if total % 7 == 6:\r\n answer = '토'\r\n print('{}년 {}월 {}일은 {}요일 입니다.'.format(year,month,day,answer))\r\n \r\nprintDayOfTheWeek()","sub_path":"quiz/quiz2_2.py","file_name":"quiz2_2.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"123276980","text":"from app import db\nfrom datetime import datetime\n\n\nclass Customer(db.Model):\n\n sort_fields = [\"name\", \"registered_at\", \"postal_code\"]\n\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n name = db.Column(db.String(200))\n postal_code = db.Column(db.String(50))\n registered_at = db.Column(db.DateTime(timezone=True), server_default=db.func.now())\n phone = db.Column(db.String(50))\n videos = db.relationship(\"Video\", back_populates=\"customers\", secondary=\"rentals\")\n __tablename__ = \"customers\"\n\n def to_dict(self):\n return {\n \"id\": self.id,\n \"name\": self.name,\n \"phone\": self.phone,\n \"postal_code\": self.postal_code,\n \"registered_at\": self.registered_at\n }\n\n @classmethod\n def from_json(cls, request_body):\n\n return cls(\n name = str(request_body[\"name\"]),\n postal_code = str(request_body[\"postal_code\"]),\n phone = str(request_body[\"phone\"])\n )\n\n @classmethod\n def validate_id(cls, id):\n try:\n int(id)\n except ValueError:\n return \"\", 400\n\n obj = cls.query.get(id)\n\n if not obj:\n return {\n \"message\": f\"Customer {id} was not found\"\n }, 404\n\n @classmethod\n def check_input_fields(cls, request_body):\n\n required_fields = [\"name\", \"postal_code\", \"phone\"]\n\n for field in required_fields:\n if field not in request_body:\n return { \"details\" : f\"Request body must include {field}.\"}, 400\n\n if len(request_body[\"postal_code\"]) > 50 or len(request_body[\"phone\"]) > 50 or len(request_body[\"name\"]) > 200:\n return {\n \"details\" : \"Input length exceeds database capacity.\"\n }, 400\n\n","sub_path":"app/models/customer.py","file_name":"customer.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"43170030","text":"import argparse\nfrom subnetwork import Discriminator\nimport torch\nimport numpy as np\n\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--gpu_number', type=int, default=0)\nparser.add_argument('--image_size', type=int, default=128)\nparser.add_argument('--z_dim', type=int, default=128)\n\nargs = parser.parse_args()\n\n\ndevice = 'cuda:{}'.format(args.gpu_number) if torch.cuda.is_available() else 'cpu'\n\n# build discriminator class\ndiscriminator = Discriminator(args=args, in_channels=3)\n\n\nreal_image = np.random.randn(10, 3, args.image_size, args.image_size)\nreal_image_tensor = torch.from_numpy(real_image).to(device).float()\nprint('input image tensor shape:', real_image_tensor.shape)\ndiscriminator(real_image_tensor)\n\n\n","sub_path":"test_discriminator.py","file_name":"test_discriminator.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"621881949","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 15 11:00:10 2019\n\n@author: abhijithneilabraham\n\"\"\"\n\n# Imports\n#-----------\n# rasa nlu\nfrom rasa_nlu.training_data import load_data\nfrom rasa_nlu.config import RasaNLUModelConfig\nfrom rasa_nlu.model import Trainer\nfrom rasa_nlu import config\nfrom rasa_nlu.model import Metadata, Interpreter\n\n# Functions\n#------------\ndef train (data, config_file, model_dir):\n training_data = load_data(data)\n trainer = Trainer(config.load(config_file))\n trainer.train(training_data)\n model_directory = trainer.persist(model_dir, fixed_model_name = 'chat')\n\n# Training\n#------------\ntrain('nlu_train.md', 'nlu_config.yml', 'models/nlu')\ninterpreter = Interpreter.load('./models/nlu/default/chat')\n\n# define function to ask question\ndef ask_question(text):\n print(interpreter.parse(text))\n\n# asking question\n\ntext=\"How many days in January\"\ntest=interpreter.parse(text)\nask_question(text)\nprint(test[\"intent\"][\"name\"])\n","sub_path":"RasaTEST/Rasa_stuff/chat1.py","file_name":"chat1.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"516017164","text":"\"\"\"\n Functions for various anti-aliasing functions and wrappers.\n\"\"\"\nfrom vsutil import get_w, get_y, split\nfrom typing import Optional\n\nimport vapoursynth as vs\n\nfrom . import util\n\ncore = vs.core\n\n\ndef nneedi3_clamp(clip: vs.VideoNode, strength: int = 1,\n mask: Optional[vs.VideoNode] = None, ret_mask: bool = False,\n show_mask: bool = False,\n opencl: bool = False) -> vs.VideoNode:\n \"\"\"\n Function written by Zastin to clamp eedi3 to nnedi3 for the purpose of reducing artifacts.\n This should fix every issue created by eedi3. For example: https://i.imgur.com/hYVhetS.jpg\n\n Dependencies:\n\n * kagefunc (optional: retinex edgemask)\n * vapoursynth-retinex (optional: retinex edgemask)\n * vapoursynth-tcanny (optional: retinex edgemask)\n * vapoursynth-eedi3\n * vapoursynth-nnedi3 or znedi3\n * vapoursynth-nnedi3cl (optional: opencl)\n * vsTAAmbk\n\n :param clip: Input clip\n :param strength: Set threshold strength (Default: 1)\n :param mask: Clip to use for custom mask (Default: None)\n :param ret_mask: Replace default mask with a retinex edgemask (Default: False)\n :param show_mask: Return mask instead of clip (Default: False)\n :param opencl: OpenCL acceleration (Default: False)\n\n :return: Antialiased clip\n \"\"\"\n try:\n from vsTAAmbk import TAAmbk\n except ModuleNotFoundError:\n raise ModuleNotFoundError(\"nnedi3_clamp: missing dependency 'vsTAAmbk'\")\n\n bits = clip.format.bits_per_sample - 8\n thr = strength * (1 >> bits)\n strong = TAAmbk(clip, aatype='Eedi3', alpha=0.25, beta=0.5, gamma=40, nrad=2, mdis=20, mtype=0,\n opencl=opencl)\n weak = TAAmbk(clip, aatype='Nnedi3', nsize=3, nns=3, qual=1, mtype=0, opencl=opencl)\n expr = 'x z - y z - * 0 < y x y {0} + min y {0} - max ?'.format(thr)\n\n if clip.format.num_planes > 1:\n expr = [expr, '']\n aa = core.std.Expr([strong, weak, clip], expr)\n\n if mask:\n merged = clip.std.MaskedMerge(aa, mask, planes=0)\n elif ret_mask:\n try:\n import kagefunc as kgf\n except ModuleNotFoundError:\n raise ModuleNotFoundError(\"nnedi3_clamp: missing dependency 'kagefunc'\")\n mask = kgf.retinex_edgemask(clip, 1).std.Binarize()\n merged = clip.std.MaskedMerge(aa, mask, planes=0)\n else:\n mask = clip.std.Prewitt(planes=0).std.Binarize(planes=0).std.Maximum(planes=0).std.Convolution([1] * 9, planes=0)\n mask = get_y(mask)\n merged = clip.std.MaskedMerge(aa, mask, planes=0)\n\n if show_mask:\n return mask\n return merged if clip.format.color_family == vs.GRAY else core.std.ShufflePlanes([merged, clip], [0, 1, 2], vs.YUV)\n\n\ndef transpose_aa(clip: vs.VideoNode,\n eedi3: bool = False) -> vs.VideoNode:\n \"\"\"\n Function written by Zastin and modified by LightArrowsEXE to perform anti-aliasing\n over a clip by using Nnedi3, transposing, using Nnedi3 again, and transposing a final time.\n This results in overall stronger anti-aliasing.\n Useful for shows like Yuru Camp with bad lineart problems.\n\n Dependencies: vapoursynth-eedi3, vapoursynth-nnedi3, znedi3\n\n :param clip: Input clip\n :param eedi3: Use eedi3 for the interpolation (Default: False)\n\n :return: Antialiased clip\n \"\"\"\n clip_y = get_y(clip)\n\n if eedi3:\n def _aa(clip_y):\n clip_y = clip_y.std.Transpose()\n clip_y = clip_y.eedi3m.EEDI3(0, 1, 0, 0.5, 0.2)\n clip_y = clip_y.znedi3.nnedi3(1, 0, 0, 3, 4, 2)\n clip_y = clip_y.resize.Spline36(clip.height, clip.width, src_top=.5)\n clip_y = clip_y.std.Transpose()\n clip_y = clip_y.eedi3m.EEDI3(0, 1, 0, 0.5, 0.2)\n clip_y = clip_y.znedi3.nnedi3(1, 0, 0, 3, 4, 2)\n return clip_y.resize.Spline36(clip.width, clip.height, src_top=.5)\n else:\n def _aa(clip_y):\n clip_y = clip_y.std.Transpose()\n clip_y = clip_y.nnedi3.nnedi3(0, 1, 0, 3, 3, 2)\n clip_y = clip_y.nnedi3.nnedi3(1, 0, 0, 3, 3, 2)\n clip_y = clip_y.resize.Spline36(clip.height, clip.width, src_top=.5)\n clip_y = clip_y.std.Transpose()\n clip_y = clip_y.nnedi3.nnedi3(0, 1, 0, 3, 3, 2)\n clip_y = clip_y.nnedi3.nnedi3(1, 0, 0, 3, 3, 2)\n return clip_y.resize.Spline36(clip.width, clip.height, src_top=.5)\n\n def _csharp(flt, clip):\n blur = core.std.Convolution(flt, [1] * 9)\n return core.std.Expr([flt, clip, blur], 'x y < x x + z - x max y min x x + z - x min y max ?')\n\n aaclip = _aa(clip_y)\n aaclip = _csharp(aaclip, clip_y).rgvs.Repair(clip_y, 13)\n\n return aaclip if clip.format.color_family is vs.GRAY else core.std.ShufflePlanes([aaclip, clip], [0, 1, 2], vs.YUV)\n\n\ndef upscaled_sraa(clip: vs.VideoNode,\n rfactor: float = 1.5,\n rep: Optional[int] = None,\n h: Optional[int] = None, ar: Optional[int] = None,\n sharp_downscale: bool = False) -> vs.VideoNode:\n \"\"\"\n Another AA written by Zastin and modified by LightArrowsEXE to perform\n an upscaled single-rate AA to deal with heavy aliasing.\n Useful for Web rips, where the source quality is not good enough to descale,\n but you still want to deal with some bad aliasing and lineart.\n\n Dependencies: fmtconv, rgsf (optional: 32bit clip), vapoursynth-eedi3, vapoursynth-nnedi3\n\n :param clip: Input clip\n :param rfactor: Image enlargement factor. 1.3..2 makes it comparable in strength to vsTAAmbk.\n It is not recommended to go below 1.3 (Default: 1.5)\n :param rep: Repair mode (Default: None)\n :param h: Set custom height. Width and aspect ratio are auto-calculated (Default: None)\n :param ar: Force custom aspect ratio. Width is auto-calculated (Default: None)\n :param sharp_downscale: Use a sharper downscaling kernel (inverse gauss) (Default: False)\n\n :return: Antialiased clip\n \"\"\"\n planes = split(clip)\n\n nnargs = dict(nsize=0, nns=4, qual=2)\n eeargs = dict(alpha=0.2, beta=0.6, gamma=40, nrad=2, mdis=20) # TAAmbk defaults are 0.5, 0.2, 20, 3, 30\n\n ssw = round( clip.width * rfactor )\n ssh = round( clip.height * rfactor )\n\n while ssw % 2:\n ssw += 1\n while ssh % 2:\n ssh += 1\n\n if h:\n if not ar:\n ar = clip.width / clip.height\n w = get_w(h, aspect_ratio=ar)\n else:\n w, h = clip.width, clip.height\n\n # Nnedi3 upscale from source height to source height * rounding (Default 1.5)\n up_y = core.nnedi3.nnedi3(planes[0], 0, 1, 0, **nnargs)\n up_y = core.resize.Spline36(up_y, height=ssh, src_top=.5)\n up_y = core.std.Transpose(up_y)\n up_y = core.nnedi3.nnedi3(up_y, 0, 1, 0, **nnargs)\n up_y = core.resize.Spline36(up_y, height=ssw, src_top=.5)\n\n # Single-rate AA\n aa_y = core.eedi3m.EEDI3(up_y, 0, 0, 0, **eeargs, sclip=core.nnedi3.nnedi3(up_y, 0, 0, 0, **nnargs))\n aa_y = core.std.Transpose(aa_y)\n aa_y = core.eedi3m.EEDI3(aa_y, 0, 0, 0, **eeargs, sclip=core.nnedi3.nnedi3(aa_y, 0, 0, 0, **nnargs))\n\n # Back to source clip height or given height\n scaled = core.fmtc.resample(aa_y, w, h, kernel='gauss', invks=True, invkstaps=2, taps=1, a1=32) if sharp_downscale else core.resize.Spline36(aa_y, w, h)\n\n if rep:\n scaled = util.pick_repair(scaled)(scaled, planes[0].resize.Spline36(w, h), rep)\n return scaled if clip.format.color_family is vs.GRAY else core.std.ShufflePlanes([scaled, clip], [0, 1, 2], vs.YUV)\n","sub_path":"lvsfunc/aa.py","file_name":"aa.py","file_ext":"py","file_size_in_byte":7723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"411470717","text":"import sqlite3\nimport csv\nimport json\n\n# proj3_choc.py\n# You can change anything in this file you want as long as you pass the tests\n# and meet the project requirements! You will need to implement several new\n# functions.\n\n# Part 1: Read data from CSV and JSON into a new database called choc.db\nDBNAME = 'choc.db'\nBARSCSV = 'flavors_of_cacao_cleaned.csv'\nCOUNTRIESJSON = 'countries.json'\n\ndef create_bars():\n try:\n conn = sqlite3.connect(DBNAME)\n cur = conn.cursor()\n except:\n print(\"Could not connect to database.\")\n\n try:\n statement = '''\n DROP TABLE IF EXISTS 'Bars';\n '''\n cur.execute(statement)\n statement = '''CREATE TABLE 'Bars' \n ('Id' INTEGER PRIMARY KEY AUTOINCREMENT, 'Company' Text, 'SpecificBeanBarName' TEXT, 'REF' TEXT,\n 'ReviewDate' TEXT, 'CocoaPercent' REAL, 'CompanyLocationId' INT, 'Rating' REAL, 'BeanType' TEXT,\n 'BroadBeanOriginId' INT);'''\n cur.execute(statement)\n\n conn.commit()\n conn.close()\n except:\n print(\"Could not create table Bars.\")\n\ndef populate_bars():\n try:\n conn = sqlite3.connect(DBNAME)\n #From https://stackoverflow.com/questions/3425320/sqlite3-programmingerror-you-must-not-use-8-bit-bytestrings-unless-you-use-a-te\n conn.text_factory = str\n cur = conn.cursor()\n except:\n print(\"Could not connect to database.\")\n\n try:\n with open(BARSCSV) as f:\n csvReader = csv.reader(f)\n statement = '''INSERT INTO 'Bars' (Company, SpecificBeanBarName, REF, ReviewDate, CocoaPercent,\n CompanyLocationId, Rating, BeanType, BroadBeanOriginId) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)'''\n for row in csvReader:\n if row[0] == \"Company\":\n continue\n cocoa = ((float(row[4][:-1])) / 100)\n cur.execute(statement, (row[0], row[1], row[2], row[3], cocoa, row[5], row[6], row[7], row[8]))\n\n conn.commit()\n conn.close()\n except:\n print(\"Could not populate table Bars.\")\n\ndef create_countries():\n try:\n conn = sqlite3.connect(DBNAME)\n cur = conn.cursor()\n except:\n print(\"Could not connect to database.\")\n try:\n statement = '''\n DROP TABLE IF EXISTS 'Countries';\n '''\n cur.execute(statement)\n statement = '''CREATE TABLE 'Countries' \n ('Id' INTEGER PRIMARY KEY AUTOINCREMENT, 'Alpha2' Text, 'Alpha3' TEXT, 'EnglishName' TEXT,\n 'Region' TEXT, 'Subregion' TEXT, 'Population' INT, 'Area' REAL);'''\n cur.execute(statement)\n\n conn.commit()\n conn.close()\n except:\n print(\"Could not create table Countries.\") \n\ndef populate_countries():\n try:\n conn = sqlite3.connect(DBNAME)\n #From https://stackoverflow.com/questions/3425320/sqlite3-programmingerror-you-must-not-use-8-bit-bytestrings-unless-you-use-a-te\n conn.text_factory = str\n cur = conn.cursor()\n except:\n print(\"Could not connect to database.\")\n\n try:\n f = json.load(open('countries.json'))\n statement = '''INSERT INTO 'Countries' (Alpha2, Alpha3, EnglishName, Region, Subregion,\n Population, Area) VALUES (?, ?, ?, ?, ?, ?, ?)'''\n \n for country in f:\n cur.execute(statement, (country['alpha2Code'], country['alpha3Code'], country['name'], country['region'], country['subregion'], country['population'], country['area']))\n\n cur.execute(statement, ('UN', 'UNK', 'Unknown', 'Unknown', '', 0, 0)) \n \n conn.commit()\n conn.close()\n except:\n print(\"Could not populate table Countries.\")\n\ndef update_tables_with_foreign_keys():\n try:\n conn = sqlite3.connect(DBNAME)\n #From https://stackoverflow.com/questions/3425320/sqlite3-programmingerror-you-must-not-use-8-bit-bytestrings-unless-you-use-a-te\n conn.text_factory = str\n cur = conn.cursor()\n except:\n print(\"Could not connect to database.\")\n\n\n f = json.load(open(COUNTRIESJSON))\n\n for country in f:\n country_id = cur.execute(\"SELECT id FROM 'Countries' WHERE EnglishName=?\", (country['name'],)).fetchone()[0]\n #statement = '''UPDATE Bars SET CompanyLocationId = ? WHERE CompanyLocationId LIKE ? '''\n #cur.execute(statement,(country_id,'%'+country['name']+'%'))\n #statement = '''UPDATE Bars SET BroadBeanOriginId = ? WHERE BroadBeanOriginId LIKE ? '''\n #cur.execute(statement,(country_id,'%'+country['name']+'%'))\n statement = '''UPDATE Bars SET CompanyLocationId = ? WHERE CompanyLocationId = ?'''\n cur.execute(statement,(country_id, country['name']))\n statement = '''UPDATE Bars SET BroadBeanOriginId = ? WHERE BroadBeanOriginId = ? '''\n cur.execute(statement,(country_id, country['name']))\n statement = '''UPDATE Bars SET BroadBeanOriginId = ? WHERE BroadBeanOriginId = ? '''\n cur.execute(statement,(251, \"Unknown\"))\n\n\n conn.commit()\n conn.close()\n\ncreate_bars()\npopulate_bars()\n\ncreate_countries()\npopulate_countries()\n\nupdate_tables_with_foreign_keys()\n# Part 2: Implement logic to process user commands\ndef process_command(command):\n try:\n conn = sqlite3.connect(DBNAME)\n #From https://stackoverflow.com/questions/3425320/sqlite3-programmingerror-you-must-not-use-8-bit-bytestrings-unless-you-use-a-te\n conn.text_factory = str\n cur = conn.cursor()\n except:\n print(\"Could not connect to database.\")\n lst = []\n command_split = command.split()\n if command_split[0] == \"bars\":\n sortby = 'rating'\n number = 10\n sortby_query = \" ORDER BY rating DESC LIMIT ?\"\n country_query = \"None\"\n country = \"\"\n bottom = False\n for word in command_split:\n if \"sellcountry\" in word:\n country = word[-2:]\n country_query = \" WHERE c.Alpha2= ? AND c.EnglishName != \\\"Unknown\\\"\"\n elif \"sourcecountry\" in word:\n country = word[-2:]\n country_query = \" WHERE z.Alpha2 = ? AND z.EnglishName != \\\"Unknown\\\"\"\n elif \"sellregion\" in word:\n split_word = word.split('=')\n country = split_word[1]\n country_query = \" WHERE c.Region = ? \"\n elif \"sourceregion\" in word:\n split_word = word.split('=')\n country = split_word[1]\n country_query = \" WHERE z.Region = ? \"\n elif word == \"cocoa\":\n sortby = \"CocoaPercent\"\n elif \"bottom\" in word:\n split_word = word.split('=')\n number = int(split_word[1])\n sortby_query = \" ORDER BY \" + sortby + \" LIMIT ?\"\n bottom = True\n elif \"top\" in word:\n split_word = word.split('=')\n number = int(split_word[1]) \n sortby_query = \" ORDER BY \" + sortby + \" DESC LIMIT ?\"\n elif word == \"ratings\":\n continue \n elif word == \"bars\":\n continue\n else:\n print(\"Command not recognized: \" + command)\n return\n\n if not bottom:\n sortby_query = \" ORDER BY \" + sortby + \" DESC LIMIT ?\"\n\n base_statement = '''SELECT Bars.SpecificBeanBarName, Bars.Company, c.EnglishName, Bars.Rating, Bars.CocoaPercent, z.EnglishName\n FROM Bars\n JOIN Countries as c ON Bars.CompanyLocationId = c.id\n JOIN Countries as z ON Bars.BroadBeanOriginId = z.id'''\n if country_query != \"None\":\n statement = base_statement + country_query + sortby_query\n cur.execute(statement,(country,number))\n else:\n statement = base_statement + sortby_query\n cur.execute(statement,(number,))\n\n lst = cur.fetchall()\n\n elif command_split[0] == \"companies\":\n select_statement = '''SELECT Bars.Company, Countries.EnglishName, AVG(Bars.Rating) '''\n join_statement = '''FROM Bars JOIN Countries ON Countries.Id = Bars.CompanyLocationId'''\n number = 10\n sortby_query = ''' ORDER BY AVG(Bars.Rating) DESC LIMIT ?'''\n sortby = \"AVG(Bars.Rating)\"\n groupby_query = ''' GROUP BY Bars.Company HAVING COUNT(*) > 4'''\n country = \"\"\n country_query = \"None\"\n bottom = False\n for word in command_split:\n if \"country\" in word:\n country = word[-2:]\n country_query = \" WHERE Countries.Alpha2= ? AND Countries.EnglishName != \\\"Unknown\\\"\"\n elif \"region\" in word:\n split_word = word.split('=')\n country = split_word[1]\n country_query = \" WHERE Countries.Region = ? AND Countries.EnglishName != \\\"Unknown\\\"\"\n elif word == \"cocoa\":\n sortby = \"AVG(Bars.CocoaPercent)\"\n select_statement = '''SELECT Bars.Company, Countries.EnglishName, AVG(Bars.CocoaPercent) ''' \n elif word == \"bars_sold\":\n sortby = \"COUNT(*)\"\n select_statement = '''SELECT Bars.Company, Countries.EnglishName, COUNT(*) '''\n elif \"bottom\" in word:\n split_word = word.split('=')\n number = int(split_word[1])\n sortby_query = \" ORDER BY \" + sortby + \" LIMIT ?\"\n bottom = True\n elif \"top\" in word:\n split_word = word.split('=')\n number = int(split_word[1])\n sortby_query = \" ORDER BY \" + sortby + \" DESC LIMIT ?\" \n elif word == \"ratings\":\n continue\n elif word == \"companies\":\n continue\n else:\n print(\"Command not recognized: \" + command)\n return\n\n if not bottom:\n sortby_query = \" ORDER BY \" + sortby + \" DESC LIMIT ?\"\n\n if country_query != \"None\":\n statement = select_statement + join_statement + country_query + groupby_query + sortby_query\n cur.execute(statement,(country,number))\n else:\n statement = select_statement + join_statement + groupby_query + sortby_query\n cur.execute(statement,(number,)) \n lst = cur.fetchall() \n\n elif command_split[0] == \"countries\":\n select_statement = '''SELECT Countries.EnglishName, Countries.Region, AVG(Bars.Rating)'''\n join_statement = ''' FROM Bars JOIN Countries ON Bars.CompanyLocationId = Countries.Id WHERE Countries.EnglishName != \"Unknown\"'''\n groupby_query = ''' GROUP BY Countries.EnglishName HAVING COUNT(*) > 4'''\n sortby_query = ''' ORDER BY AVG(Bars.Rating) DESC LIMIT ?'''\n country = \"\"\n country_query = \"None\"\n sortby = \"AVG(Bars.Rating)\"\n number = 10\n bottom = False\n for word in command_split:\n if \"region\" in word:\n split_word = word.split('=')\n country = split_word[1]\n country_query = \" AND Countries.Region = ?\"\n elif word == \"sources\":\n join_statement = ''' FROM Bars JOIN Countries ON Bars.BroadBeanOriginId = Countries.Id WHERE Countries.EnglishName != \"Unknown\"'''\n elif word == \"cocoa\":\n select_statement = '''SELECT Countries.EnglishName, Countries.Region, AVG(Bars.CocoaPercent)'''\n sortby = \"AVG(Bars.CocoaPercent)\"\n elif word == \"bars_sold\":\n select_statement = '''SELECT Countries.EnglishName, Countries.Region, COUNT(SpecificBeanBarName) '''\n sortby = \"COUNT(*)\"\n elif \"bottom\" in word:\n split_word = word.split('=')\n number = int(split_word[1])\n sortby_query = \" ORDER BY \" + sortby + \" LIMIT ?\"\n bottom = True\n elif \"top\" in word:\n split_word = word.split('=')\n number = int(split_word[1])\n sortby_query = \" ORDER BY \" + sortby + \" DESC LIMIT ?\" \n elif word == \"ratings\":\n continue\n elif word == \"sellers\":\n continue\n elif word == \"countries\":\n continue\n else:\n print(\"Command not recognized: \" + command)\n return\n\n if not bottom:\n sortby_query = \" ORDER BY \" + sortby + \" DESC LIMIT ?\"\n\n if country_query != \"None\":\n statement = select_statement + join_statement + country_query + groupby_query + sortby_query\n cur.execute(statement,(country,number))\n else:\n statement = select_statement + join_statement + groupby_query + sortby_query\n cur.execute(statement,(number,)) \n lst = cur.fetchall() \n\n elif command_split[0] == \"regions\":\n select_statement = '''SELECT Countries.Region, AVG(Bars.Rating)'''\n join_statement = ''' FROM Bars JOIN Countries ON Bars.CompanyLocationId = Countries.Id WHERE Countries.EnglishName != \\\"Unknown\\\"'''\n groupby_query = ''' GROUP BY Countries.Region HAVING COUNT(*) > 4'''\n sortby_query = ''' ORDER BY AVG(Bars.Rating) DESC LIMIT ?'''\n country = \"\"\n country_query = \"None\"\n sortby = \"AVG(Bars.Rating)\"\n number = 10\n bottom = False\n for word in command_split:\n if word == \"sources\":\n join_statement = ''' FROM Bars JOIN Countries ON Bars.BroadBeanOriginId = Countries.Id WHERE Countries.EnglishName != \\\"Unknown\\\"'''\n elif word == \"cocoa\":\n select_statement = '''SELECT Countries.Region, AVG(Bars.CocoaPercent)'''\n sortby = \"AVG(Bars.CocoaPercent)\"\n elif word == \"bars_sold\":\n select_statement = '''SELECT Countries.Region, COUNT(*) '''\n sortby = \"COUNT(*)\"\n elif \"bottom\" in word:\n split_word = word.split('=')\n number = int(split_word[1])\n sortby_query = \" ORDER BY \" + sortby + \" LIMIT ?\"\n bottom = True\n elif \"top\" in word:\n split_word = word.split('=')\n number = int(split_word[1])\n sortby_query = \" ORDER BY \" + sortby + \" DESC LIMIT ?\" \n elif word == \"ratings\":\n continue\n elif word == \"sellers\":\n continue\n elif word == \"regions\":\n continue\n else:\n print(\"Command not recognized: \" + command)\n return\n\n if not bottom:\n sortby_query = \" ORDER BY \" + sortby + \" DESC LIMIT ?\"\n\n statement = select_statement + join_statement + groupby_query + sortby_query\n cur.execute(statement,(number,)) \n \n lst = cur.fetchall() \n \n else:\n print(\"Command not recognized: \" + command)\n return\n \n conn.close()\n\n return lst\n\n\ndef load_help_text():\n with open('help.txt') as f:\n return f.read()\n\n# Part 3: Implement interactive prompt. We've started for you!\ndef interactive_prompt():\n help_text = load_help_text()\n response = ''\n end = False\n while response != 'exit':\n response = input('Enter a command: ')\n if response != 'exit':\n try:\n result = process_command(response)\n except:\n print(\"Unable to process command.\")\n continue\n if type(result) == list:\n tup_len = len(result[0])\n for tup in result:\n x = 1\n for word in tup: \n if x == tup_len:\n end = True\n else:\n end = False\n if type(word) == float:\n word = round(word, 1)\n if word <= 1 and x != 4:\n word = word * 100\n word = str(word)\n word = word.split('.')\n word = word[0] + \"% \"\n if(end):\n print(word)\n else:\n print(word, end = \" \")\n else:\n word = str(word) + \" \"\n if(end):\n print(word)\n else:\n print(word, end = \" \")\n elif type(word) == str:\n #From https://stackoverflow.com/questions/2872512/python-truncate-a-long-string\n word = (word[:12] + '...') if len(word) > 12 else word\n if(end):\n print('{0: <15}'.format(word))\n else: \n print('{0: <15}'.format(word), end = \" \")\n elif(end):\n print(word)\n else:\n print(word, end = \" \")\n x += 1\n if response == 'help':\n print(help_text)\n continue\n return\n\n# Make sure nothing runs or prints out when this file is run as a module\nif __name__==\"__main__\":\n interactive_prompt()\n","sub_path":"proj3_choc.py","file_name":"proj3_choc.py","file_ext":"py","file_size_in_byte":17492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"332305190","text":"from __future__ import division, print_function, absolute_import\n\nimport numpy as np\nimport tensorflow as tf\n\n\nimport tflearn\nfrom tflearn.data_utils import shuffle, to_categorical\nfrom tflearn.layers.core import input_data, dropout, fully_connected\nfrom tflearn.layers.conv import conv_2d, max_pool_2d\nfrom tflearn.layers.estimator import regression\nfrom tflearn.data_preprocessing import ImagePreprocessing\nfrom tflearn.data_augmentation import ImageAugmentation\n\n\n\n# Real-time data preprocessing\nimg_prep = ImagePreprocessing()\nimg_prep.add_featurewise_zero_center()\nimg_prep.add_featurewise_stdnorm()\n\n# Real-time data augmentation\nimg_aug = ImageAugmentation()\nimg_aug.add_random_flip_leftright()\nimg_aug.add_random_rotation(max_angle=25.)\n\n# Convolutional network building\n\nclass Cifar10Classifier():\n def __init__(self):\n network = input_data(shape=[None, 32, 32, 3],\n data_preprocessing=img_prep,\n data_augmentation=img_aug,\n name='Classifier.Input')\n network = conv_2d(network, 32, 3, activation='relu', name='Classifier.Conv1')\n network = max_pool_2d(network, 2, name='Classifier.Pool1')\n network = conv_2d(network, 64, 3, activation='relu', name='Classifier.Conv2')\n network = conv_2d(network, 64, 3, activation='relu', name='Classifier.Conv3')\n network = max_pool_2d(network, 2, name='Classifier.Pool3')\n network = fully_connected(network, 512, activation='relu', name='Classifier.FC4')\n network = dropout(network, 0.5, name='Classifier.Drop')\n network = fully_connected(network, 10, activation='softmax', name='Classifier.FC5')\n network = regression(network, optimizer='adam',\n loss='categorical_crossentropy',\n learning_rate=0.001,\n name='Classifier.Reg')\n\n # Train using classifier\n self.model = tflearn.DNN(network, tensorboard_verbose=0)\n\n\nif __name__ == \"__main__\":\n\n # Data loading and preprocessing\n # from tflearn.datasets import cifar10\n # (X, Y), (X_test, Y_test) = cifar10.load_data()\n # X, Y = shuffle(X, Y)\n # Y = to_categorical(Y, 10)\n # Y_test = to_categorical(Y_test, 10)\n\n # Data loading and preprocessing\n import tflib\n from tflib.cifar10 import load_data\n\n (X, Y), (X_test, Y_test) = load_data('/home/zhengliz/Data/cifar10')\n X = X.reshape((-1, 32, 32, 3)).astype('float32')\n X_test = X_test.reshape((-1, 32, 32, 3)).astype('float32')\n\n X, Y = shuffle(X, Y)\n Y = to_categorical(Y, 10)\n Y_test = to_categorical(Y_test, 10)\n\n\n\n classifier = Cifar10Classifier()\n\n classifier.model.fit(X, Y, n_epoch=50, shuffle=True,\n validation_set=(X_test, Y_test), show_metric=True,\n batch_size=96, run_id='cifar10_cnn')\n\n classifier.model.save(\"classifier_cifar10_cnn.tfl\")\n\n","sub_path":"cifar10_classifier.py","file_name":"cifar10_classifier.py","file_ext":"py","file_size_in_byte":2948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"315317358","text":"# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nfrom pants.backend.python.subsystems.python_tool_base import PythonToolBase\nfrom pants.backend.python.util_rules.pex_requirements import PexRequirements, Resolve\nfrom pants.testutil.option_util import create_subsystem\nfrom pants.util.ordered_set import FrozenOrderedSet\n\n\nclass _DummyTool(PythonToolBase):\n options_scope = \"dummy\"\n default_lockfile_resource = (\"dummy\", \"dummy\")\n\n\ndef test_install_from_resolve_default() -> None:\n tool = create_subsystem(\n _DummyTool,\n lockfile=\"dummy.lock\",\n install_from_resolve=\"dummy_resolve\",\n requirements=[\"foo\", \"bar\", \"baz\"],\n version=\"\",\n extra_requirements=[],\n )\n pex_reqs = tool.pex_requirements()\n assert isinstance(pex_reqs, PexRequirements)\n assert pex_reqs.from_superset == Resolve(\"dummy_resolve\", False)\n assert pex_reqs.req_strings_or_addrs == FrozenOrderedSet([\"bar\", \"baz\", \"foo\"])\n","sub_path":"src/python/pants/backend/python/subsystems/python_tool_base_test.py","file_name":"python_tool_base_test.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"176357436","text":"import json\n\nin_file = open(\"US_fires_9_1.json\", 'r')\n\nout_file = open('readable_eq_data.json','w')\n\neq_data = json.load(in_file)\n\njson.dump(eq_data,out_file, indent = 4)\n\n\nlist_of_eqs = eq_data\n\n\nmags,lons,lats = [],[],[]\n\nfor eq in list_of_eqs:\n if(eq[\"brightness\"] > 450):\n mags.append(eq[\"brightness\"])\n lons.append(eq[\"longitude\"])\n lats.append(eq[\"latitude\"])\n\nfrom plotly.graph_objs import Scattergeo, Layout\nfrom plotly import offline\n\ndata = [{\n 'type': 'scattergeo',\n 'lon': lons,\n 'lat': lats,\n 'marker': {\n 'size': [.03*mag for mag in mags],\n 'color': mags,\n \"colorscale\": 'Viridis',\n 'reversescale': True,\n 'colorbar': {'title': 'Magnitude'}\n },\n}]\n\nmy_layout = Layout(title = \"US Fires - 9/1/2020 through 9/13/2020\")\n\nfig = {'data':data, 'layout':my_layout}\n\noffline.plot(fig, filename = '09_1_2020_fires.html')\n","sub_path":"1-13_data_file.py","file_name":"1-13_data_file.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"299313921","text":"\"\"\"Greets user by name if available or\n get username from user and save it to disk\"\"\"\n\nimport json\n\ndef get_stored_username():\n \"\"\"Get stored username if available.\"\"\"\n filename = 'Username.json'\n try:\n with open(filename) as file_obj:\n username = json.load(file_obj)\n except FileNotFoundError:\n return None\n else:\n return username\n\ndef get_new_username():\n \"\"\"Get new username and save it to disk\"\"\"\n username = input(\"Input your username: \")\n filename = 'Username.json'\n with open(filename, 'w') as file_obj:\n json.dump(username, file_obj)\n return username\n\ndef verify_user(username):\n verify_user = input(\"Are you \" + username + \"? (y/n)\")\n if verify_user.lower() == 'y':\n return True\n else:\n return False\n\ndef greet_user():\n \"\"\"Greet the user by name.\"\"\"\n username = get_stored_username()\n if not username:\n username = get_new_username()\n print(\"Username, \" + username + \", saved!\")\n elif not verify_user(username):\n username = get_new_username()\n print(\"Username, \" + username + \", saved!\")\n else:\n print(\"Welcome back, \" + username + \"!\")\n\n\ngreet_user()\n","sub_path":"ch10/verify_user.py","file_name":"verify_user.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"210209193","text":"from datetime import datetime\n\nfrom flask import Flask, abort, flash, redirect, render_template, request, url_for\n\napp = Flask(__name__)\napp.config['DEBUG'] = True\napp.config['SECRET_KEY'] = 'some_really_long_random_string_here'\n\n# local embedding_dims = std.extVar('embedding_dims');\n# local dataset = std.extVar('dataset');\n# local lang = std.extVar('lang');\n# local idf_weights = std.extVar('idf_weights');\n# local dan = std.extVar('dan');\n# local doc_projection = std.extVar('doc_projection');\n# local averaged = std.extVar('averaged');\n# local num_filters = std.extVar('num_filters');\n# local query_averaged = std.extVar('query_averaged');\n# local l2 = std.extVar('l2');\n# local lr = std.extVar('lr');\n\nvariables = {\n 'hyperparameters': [],\n 'architecture': [\n {'name': 'embedding_dims', 'title': 'Embedding Dimensions', 'type': 'text'},\n {'name': 'idf_weights', 'title': 'Use IDF Weights', 'type': 'bool'},\n {'name': 'dan', 'title': 'Use Averaging Composer', 'type': 'bool'}\n ],\n 'dataset': []\n}\n\ndef render_jsonnet():\n pass\n\n@app.route('/')\ndef configure():\n return render_template('configure.html')\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"neuclir/config/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"542998385","text":"import json\n\ndef get_rememebered_name():\n filename='usernamev2.json'\n try:\n with open(filename) as f:\n usernamev2= json.load(f)\n except FileNotFoundError:\n return None\n else:\n return usernamev2\n\ndef greet_user():\n username = get_rememebered_name()\n if username:\n print(f\"Welcome back {username}\")\n else:\n username=input(\"What is your name: \")\n filename = 'username.json'\n with open(filename, 'w') as f:\n json.dump(username, f)\n print(f\"We'll remember you when you come back {username}\")\n\ngreet_user()","sub_path":"Chapter_10/get_remembered_name.py","file_name":"get_remembered_name.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"305330663","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport tensorflow as tf\nimport numpy as np\nimport tensorflow.examples.tutorials.mnist.input_data as input_data\n\n\n# 初始化权重 w\ndef init_weights(shape):\n return tf.Variable(tf.random_normal(shape, stddev=0.01))\n\n\n# 定义网络模型,只是基本的mlp模型,堆叠两层的逻辑回归\ndef model(X, w_h, w_o):\n h = tf.nn.sigmoid(tf.matmul(X, w_h))\n return tf.matmul(h, w_o) # 这里没有用softmax\n\n\n# 加载数据\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\ntrX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels\n\n# 定义占位符\nX = tf.placeholder(\"float\", [None, 784])\nY = tf.placeholder(\"float\", [None, 10])\n\n# 初始化模型参数\nw_h = init_weights([784, 625])\nw_o = init_weights([625, 10])\n\n# 定义模型\npy_x = model(X, w_h, w_o)\n\n# 定义损失函数\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(py_x, Y))\n# 定义训练操作\ntrain_op = tf.train.GradientDescentOptimizer(0.05).minimize(cost) # construct an optimizer\n# 定义测试操作\npredict_op = tf.argmax(py_x, 1)\n\n# 定义并初始化会话\nsess = tf.Session()\ninit = tf.initialize_all_variables()\nsess.run(init)\n\n# 训练测试\nfor i in range(100):\n for start, end in zip(range(0, len(trX), 128), range(128, len(trX), 128)):\n sess.run(train_op, feed_dict={X: trX[start:end], Y: trY[start:end]})\n print(i, np.mean(np.argmax(teY, axis=1) ==\n sess.run(predict_op, feed_dict={X: teX, Y: teY})))\n","sub_path":"base_MLP.py","file_name":"base_MLP.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"345747714","text":"class UnionFind:\n def __init__(self, n):\n self.fathers = {}\n\n for i in range(1, n + 1):\n self.fathers[i] = i\n\n def find(self, node):\n path = []\n while self.fathers[node] != node:\n node = self.fathers[node]\n path.append(node)\n\n for n in path:\n self.fathers[n] = node\n\n return node\n\n def query(self, a, b):\n return self.find(a) == self.find(b)\n\n def connect(self, a, b):\n self.fathers[self.find(a)] = self.find(b)\n\nclass Solution:\n \"\"\"\n @param edges: List[List[int]]\n @return: List[int]\n \"\"\"\n def findRedundantConnection(self, edges):\n if not edges:\n return None\n uf = UnionFind(len(edges))\n\n for first, second in edges:\n if uf.query(first, second):\n return first, second\n else:\n uf.connect(first, second)\n\n return None\n","sub_path":"Amazon 9月OA真题/2/1088. Redundant Connection.py","file_name":"1088. Redundant Connection.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"602192617","text":"import numpy as np\nfrom math import ceil\nimport logging\nfrom typing import List, Tuple\n\nfrom ..core.position import Position\nfrom .route import Route\nfrom .base_router import BaseRouter\n\n\nclass LinearRouter(BaseRouter):\n \"\"\" Calculates routes as straight lines and haversine distances,\n kind of \"bee line\" distance\n\n Usage sample::\n\n >>> from simobility.routers.linear_router import LinearRouter\n >>> my_router = LinearRouter(clock=clock, speed=speed_kmph)\n \"\"\"\n\n def __init__(self, clock, speed: int = 20):\n self.clock = clock\n self.speed = speed\n\n def map_match(self, position: Position) -> Position:\n return Position(*position.coords)\n\n def calculate_route(self, origin: Position, destination: Position) -> Route:\n \"\"\"\n Calculate route between 2 points\n\n Params\n ------\n\n origin : Position\n destination : Position\n\n Returns\n -------\n\n route : Route\n \"\"\"\n\n trip_duration = self.estimate_duration(origin, destination)\n\n y = np.linspace(origin.lat, destination.lat, trip_duration + 1)\n x = np.linspace(origin.lon, destination.lon, trip_duration + 1)\n\n path = np.array([x, y]).T.tolist()\n waypoints = [Position(x_, y_) for x_, y_ in path]\n\n distance_km = origin.distance(destination)\n\n return Route(\n self.clock.now, waypoints, trip_duration, distance_km, origin, destination\n )\n\n def estimate_duration(self, origin: Position, destination: Position) -> int:\n \"\"\" Duration in clock units\n\n Params\n ------\n\n origin : Position\n destination : Position\n\n Returns\n -------\n\n duration : int\n Trip duration in clock units\n \"\"\"\n\n distance_km = origin.distance(destination)\n # convert to minutes\n travel_time = distance_km / self.speed * 60\n\n return ceil(self.clock.time_to_clock_time(travel_time, \"m\"))\n\n def calculate_distance_matrix(\n self,\n sources: List[Position],\n destinations: List[Position],\n travel_time: bool = True,\n ) -> np.array:\n \"\"\" Calculate all-to-all travel time - all source to all destinations.\n Here distance means \"distance in time\"\n\n Params\n ------\n\n sources : list\n List of Positions\n\n destinations : list\n List of Positions\n\n Returns\n -------\n\n distance_matrix : np.array\n All-to-all trip durations (distance in time) in clock units\n \"\"\"\n\n n_sources = len(sources)\n n_dest = len(destinations)\n\n matrix = np.zeros([n_sources, n_dest])\n\n for ind1, src in enumerate(sources):\n for ind2, dest in enumerate(destinations):\n\n if travel_time:\n matrix[ind1, ind2] = self.estimate_duration(src, dest)\n else:\n matrix[ind1, ind2] = src.distance(dest)\n\n return matrix\n","sub_path":"simobility/routers/linear_router.py","file_name":"linear_router.py","file_ext":"py","file_size_in_byte":3019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"423281884","text":"# -*- coding: utf-8 -*-\r\n\r\n# 3과목의 성적을 입력받아 총점과 평균\r\n# 그리고 등급을 출력하세요\r\n# 90점 이상 A, 80점 이상 B, 70점 이상 C,\r\n# 60점 이상 D, 그 외에는 F\r\n\r\n# 1. 입력\r\nnumbers = input(\"3개의 성적 입력(공백을 넣으세요) : \")\r\nnumbers_split = numbers.split()\r\nnum1 = int(numbers_split[0])\r\nnum2 = int(numbers_split[1])\r\nnum3 = int(numbers_split[2])\r\n\r\n# 2. 처리\r\ntotal = num1 + num2 + num3\r\navg = total / 3\r\ngrade = \"?\"\r\n\r\nif avg > 100 or avg < 0 :\r\n grade = \"?\"\r\nelif avg >= 90:\r\n grade = \"A\"\r\nelif avg >= 80:\r\n grade = \"B\" \r\nelif avg >= 70:\r\n grade = \"C\"\r\nelif avg >= 60:\r\n grade = \"D\"\r\nelse:\r\n grade = \"F\"\r\n \r\nprint(f\"총점 : {total}점, 평균 : {avg:.2f}점\")\r\n\r\nif grade != \"?\" :\r\n print(f\"평가 : '{grade}' 등급\")\r\nelse:\r\n print(\"성적 점수를 확인하세요\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"day_03/if_05_example_1.py","file_name":"if_05_example_1.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"419904023","text":"from collections import OrderedDict as odict\nimport importlib\n\nfrom omnium.processes import Process\n\nclass LastFiveDayMean(Process):\n name = 'last_five_day_mean'\n out_ext = 'txt'\n\n def load_modules(self):\n self.iris = importlib.import_module('iris')\n\n def load_upstream(self):\n super(LastFiveDayMean, self).load_upstream()\n filenames = [n.filename(self.config) for n in self.node.from_nodes]\n all_timeseries = self.iris.load(filenames)\n self.data = all_timeseries\n return all_timeseries\n\n def run(self):\n super(LastFiveDayMean, self).run()\n all_timeseries = self.data\n self.processed_data = []\n for timeseries in all_timeseries:\n if self.node.name == 'surf_ts_means_large_dom':\n five_days = -144*5*3 # 20s ts. output every 10ts\n else:\n five_days = -144*5 # output every 10 min.\n\n time_in_hours = timeseries.coord('time').points[-1]\\\n - timeseries.coord('time').points[five_days]\n value = timeseries[five_days:]\\\n .collapsed('time', self.iris.analysis.MEAN)\n # print(timeseries.name(), value.data)\n self.processed_data.append('{},{},{},{}'.format(timeseries.name(),\n time_in_hours,\n value.data,\n timeseries.units))\n\n def save(self):\n super(LastFiveDayMean, self).save()\n with open(self.node.filename(self.config), 'w') as f:\n f.write('\\n'.join(self.processed_data))\n","sub_path":"processes/last_five_day_mean.py","file_name":"last_five_day_mean.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"354812026","text":"# Copyright (C) 2018 Google Inc.\n# Licensed under http://www.apache.org/licenses/LICENSE-2.0 \n\n\"\"\"Test Access Control Roleable mixin\"\"\"\nimport ddt\nfrom ggrc import db\nfrom ggrc.models import all_models\nfrom integration.ggrc import TestCase\nfrom integration.ggrc import api_helper\nfrom integration.ggrc.models import factories\n\n\n@ddt.ddt\nclass TestAccessControlRoleable(TestCase):\n \"\"\"TestAccessControlList\"\"\"\n\n def setUp(self):\n super(TestAccessControlRoleable, self).setUp()\n with factories.single_commit():\n self.role = factories.AccessControlRoleFactory()\n self.person = factories.PersonFactory()\n\n @ddt.data(lambda self: [{\n \"ac_role_id\": self.role.id,\n \"person\": {\n \"id\": self.person.id\n }\n }], lambda self: [{\n \"person\": self.person,\n \"ac_role\": self.role\n }])\n def test_with_dict(self, acl_list):\n \"\"\"Test access_control_list setter with a basic dict object\n This is the format the frontend uses\"\"\"\n obj = all_models.Control(\n title=\"New Control\",\n access_control_list=acl_list(self))\n self.assertIsNotNone(obj.access_control_list)\n acl = obj.access_control_list[0]\n self.assertIsNotNone(acl)\n self.assertIsInstance(acl, all_models.AccessControlList)\n self.assertEqual(acl.person.id, self.person.id)\n self.assertEqual(acl.ac_role.id, self.role.id)\n self.assertEqual(acl.object, obj)\n\n def test_with_dict_objs_multiple(self):\n \"\"\"Test access_control_list setter without ids\"\"\"\n\n def acl_query():\n return db.session.query(\n all_models.AccessControlList.person_id,\n all_models.AccessControlList.ac_role_id\n ).filter(\n all_models.AccessControlList.object_id == obj.id,\n all_models.AccessControlList.object_type == \"Control\"\n ).all()\n person_1 = all_models.Person(name=\"Frodo\", email=\"frodo@baggins.com\")\n person_2 = all_models.Person(name=\"Bilbo\", email=\"bilbo@baggins.com\")\n person_3 = factories.PersonFactory(name=\"Merry\", email=\"merry@buck.com\")\n role = all_models.AccessControlRole(name=\"Hobbit\")\n obj = all_models.Control(title=\"Test Control\", access_control_list=[{\n \"person\": person_1,\n \"ac_role\": self.role,\n }, {\n \"person\": person_2,\n \"ac_role\": role,\n }])\n db.session.commit()\n self.assertIsNotNone(obj.access_control_list)\n self.assertEqual(len(obj.access_control_list), 2)\n self.assertEqual(obj.access_control_list[0].person, person_1)\n self.assertEqual(obj.access_control_list[1].person, person_2)\n\n acls = acl_query()\n self.assertItemsEqual([\n (person_1.id, self.role.id),\n (person_2.id, role.id)\n ], acls)\n\n obj.access_control_list = [{\n \"person\": {\n \"id\": person_2.id,\n },\n \"ac_role_id\": role.id,\n }, {\n \"person\": {\n \"id\": person_3.id,\n },\n \"ac_role_id\": role.id,\n }]\n db.session.commit()\n\n acls = acl_query()\n self.assertItemsEqual([\n (person_2.id, role.id),\n (person_3.id, role.id)\n ], acls)\n\n def test_full_access_control_list(self):\n \"\"\"Test if access_control_list property filters out propagated roles\n\n Before sending the access_control_list to the frontend, propagated roles\n need to be filtered out to help prevent performance issues\"\"\"\n with factories.single_commit():\n # Create an object with one external and one propagated role\n obj = factories.ControlFactory()\n acl = factories.AccessControlList(\n object=obj,\n ac_role=self.role,\n person=self.person\n )\n factories.AccessControlList(\n object=obj,\n ac_role=self.role,\n person=self.person,\n parent=acl\n )\n # full_access_control_list should have all rows:\n self.assertEqual(len(obj.full_access_control_list), 2,\n \"full_access_control_list doesn't include all roles\")\n # access_control_list should only have non propagated ones\n self.assertEqual(len(obj.access_control_list), 1,\n \"access_control_list doesn't include all the roles\")\n obj_id, acl_id = obj.id, acl.id\n api = api_helper.Api()\n response = api.get(all_models.Control, obj_id)\n acl = response.json[\"control\"][\"access_control_list\"]\n # Check if the response filtered out the propagated access_control_role\n self.assertEqual(len(acl), 1,\n \"acl didn't filter out propagated roles correctly\")\n self.assertEqual(acl[0][\"id\"], acl_id,\n \"acl didn't filter out propagated roles correctly\")\n","sub_path":"test/integration/ggrc/access_control/test_roleable.py","file_name":"test_roleable.py","file_ext":"py","file_size_in_byte":4607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"568535711","text":"import telebot\nfrom telebot import types\n\nbot = telebot.TeleBot(\"250806434:AAFEmXPORxni3FlMJuTPWtwYIxc04A_3j3U\")\n\n\n@bot.message_handler(commands=['start'])\ndef handle_start(message):\n print(\"+\")\n user_markup = telebot.types.ReplyKeyboardMarkup(True, True)\n user_markup.row('Не шути больше', '21')\n user_markup.row('Переверни моё сообщение пож')\n bot.send_message(message.from_user.id, 'Хай', reply_markup=user_markup)\n\n\n@bot.message_handler(commands=['help'])\ndef handle_start(message):\n print(\"+\")\n user_markup = telebot.types.ReplyKeyboardMarkup(True, True)\n user_markup.row('Не шути больше', '21')\n user_markup.row('Переверни моё сообщение пож')\n bot.send_message(message.from_user.id, 'Используй клаву и интуицию', reply_markup=user_markup)\n\n\n@bot.message_handler(content_types=[\"text\"])\ndef handle_command(message):\n\n if message.text == \"Не шути больше\":\n bot.send_message(message.chat.id, \"Ацтань\")\n elif message.text == \"21\":\n user_markup = telebot.types.ReplyKeyboardMarkup(True, True)\n user_markup.row('Тоттенхэм', 'Осип')\n user_markup.row('Бавария')\n bot.send_message(message.from_user.id, 'За кого ты болеешь?', reply_markup=user_markup)\n elif message.text == \"Переверни моё сообщение пож\":\n bot.send_message(message.chat.id, \"Что перевернуть?\")\n elif message.text == \"Ясен\":\n bot.send_message(message.chat.id, \"Красен\")\n\n elif message.text == \"Бавария\":\n user_markup = telebot.types.ReplyKeyboardMarkup(True, True)\n user_markup.row('Не шути больше', '21')\n user_markup.row('Переверни моё сообщение пож')\n bot.send_message(message.from_user.id, 'Вань, когда тебя уже отчислят?', reply_markup=user_markup)\n elif message.text == \"Тоттенхэм\":\n user_markup = telebot.types.ReplyKeyboardMarkup(True, True)\n user_markup.row('Не шути больше', '21')\n user_markup.row('Переверни моё сообщение пож')\n bot.send_message(message.from_user.id, 'Люблю их', reply_markup=user_markup)\n elif message.text == \"Осип\":\n user_markup = telebot.types.ReplyKeyboardMarkup(True, True)\n user_markup.row('Не шути больше', '21')\n user_markup.row('Переверни моё сообщение пож')\n bot.send_message(message.from_user.id, 'Мой пидор', reply_markup=user_markup)\n else:\n newString = \"\"\n ranged = len(message.text) - 1\n while ranged != -1:\n newString += message.text[ranged]\n ranged -= 1\n bot.send_message(message.from_user.id, newString)\n\nbot.polling(none_stop=True, interval=0)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"462333106","text":"import itertools\nimport curses\nimport curses.panel as panel\nfrom src.util import *\nfrom src.game import *\nimport src.game as Game\nimport textwrap\n\ndef color(color_name):\n \"Declaring color pairs for ncurses\"\n colors = Game.state.color_pairs\n return curses.color_pair(colors.get(color_name) or 1)\n\n\ndef UpdateUI():\n \"Called every frame to update the ui (ncurses)\"\n windows = Game.state.windows.values()\n # Render windows\n for window in windows:\n def window_render():\n window.win.move(1, 1) # move cursor to 1, 1\n window.win.clrtobot() # clear to bot\n window.render(window) # call the window's render method\n window.win.border() # redraw the border\n window.win.noutrefresh() # refresh with calling doupdate repeatedly\n \n window_render()\n\n # if there are still problems can probably fix with an occasional call to clearok\n # window.clearok(True) on a resize event and it should refresh appropriately\n curses.doupdate()\n curses.update_lines_cols()\n curses.panel.update_panels()\n\n\ndef NcursesReset():\n \"Exit the game cleanly\"\n Game.state.windows.screen.win.keypad(0)\n curses.nocbreak()\n curses.echo()\n curses.endwin()\n \n \ndef NcursesSetup():\n \"Setup ncurses tui library\"\n # curses.raw()\n curses.noecho() # don't print type back\n curses.curs_set(0) # hide cursor\n curses.cbreak() # react to keypresses instantly\n curses.start_color() # allow for colors\n curses.setupterm(\"Advenjur\") # set title\n curses.mousemask(curses.ALL_MOUSE_EVENTS | curses.REPORT_MOUSE_POSITION) # enable mouse\n curses.mouseinterval(25) # how long to wait for a mouse press\n\n\ndef ColorSetup():\n \"Setup colors to be used with ncurses\"\n # pair_number, foreground, background\n Game.state.color_pairs = adict()\n def add_color(name, colora, colorb):\n colors = Game.state.color_pairs\n pair = len(colors)+1\n a = curses.__dict__[f'color_{colora}'.upper()] # get color_blue for example from curses module\n b = curses.__dict__[f'color_{colorb}'.upper()]\n colors[name] = pair\n curses.init_pair(pair, a, b) # curses color system work in pairs\n \n add_color('black_on_blue', 'black', 'blue')\n add_color('grass', 'yellow', 'green')\n add_color('debug', 'red', 'black')\n add_color('game_log', 'magenta', 'black')\n add_color('main_text', 'cyan', 'black')\n add_color('cmd_text', 'blue', 'black')\n add_color('inventory', 'yellow', 'black')\n\n \ndef AddWindow1(name, win, render):\n height, width = win.getmaxyx()\n y, x = win.getbegyx()\n return AddWindow2(name, y, x, height, width, render, win=win)\n \n \ndef AddWindow2(name, y, x, height, width, render, win=None):\n if win is None:\n win = curses.newwin(height, width, y, x)\n \n win.idlok(True) # necessary for scrolling\n win.scrollok(1)\n \n Game.state.windows[name] = adict(name=name, \n win=win,\n panel=panel.new_panel(win),\n width=width,\n height=height,\n x=x, \n y=y,\n render=render)\n return Game.state.windows[name]\n\n\ndef UISetup():\n scr = AddWindow1('screen', curses.initscr(), MainWindowRender)\n scr.win.keypad(1) # will return a special value instead of a multibyte thing\n scr.win.timeout(1) # time to wait in milliseconds before rerendering\n scr.win.nodelay(True)\n scr.win.notimeout(True)\n NcursesSetup()\n ColorSetup()\n\n # Windows\n # Main Window on left side\n # Typing Window at bottom left\n # Interactive Window is on top right\n # Debug Window is on bottom right\n \n # Main Window\n width = 30\n cmd_window_height = 4\n main_window = AddWindow2('main_window', 1, 1,\n scr.height-cmd_window_height-2,\n scr.width-width,\n MainWindowRender)\n \n # CMD Window\n y = main_window.height + main_window.y\n cmd_window = AddWindow2('cmd_window', y, 1, \n cmd_window_height, \n scr.width-width,\n CmdWindowRender)\n\n\n height = 20\n width = width - 2\n \n # Side Display Window\n y = 1\n x = main_window.width+main_window.x\n height = 25\n sidedisplay_window = AddWindow2('sidedisplay_window', y, x, height, width,\n SideDisplayWindowRender)\n\n # Interactive Window\n y = sidedisplay_window.y + sidedisplay_window.height\n x = main_window.width+main_window.x\n interactive_window = AddWindow2('interactive_window', y, x, height, width,\n InteractiveWindowRender)\n\n # Debug Window, on bottom right.\n height = scr.height - (interactive_window.y + interactive_window.height) - 1\n y = interactive_window.y + interactive_window.height\n x = interactive_window.x\n\n debug_window = AddWindow2('debug_window', y, x, height, width, DebugWindowRender)\n\n\n # Log the size of terminal\n Game.Log(\"debug_log\", f\"Lines: {curses.LINES}\")\n Game.Log(\"debug_log\", f\"Cols: {curses.COLS}\")\n\n\n# count how many spaces starting at at\ndef spacelen(s, tabsize=2, at=0):\n spaces = 0 \n for character in s:\n if character == ' ': spaces += 1\n elif character == '\\t': spaces += tabsize\n else: break\n return spaces\n\n \ndef addstr(window, s, y, x, _color='debug'):\n \"Add string to window with text wrapping. Also whitespace at line start won't be highlighted.\"\n height, width = window.win.getmaxyx()\n tabsize = 2\n tw = textwrap.TextWrapper(expand_tabs=True, tabsize=tabsize, replace_whitespace=False,\n drop_whitespace=False, width=width-3)\n line_count = 0\n _color = color(_color)\n final_lines = []\n for split_line in s.split('\\n'):\n if split_line == '':\n final_lines.append('\\n')\n else:\n lines = tw.wrap(split_line)\n for line in lines:\n final_lines.append(line)\n \n # only show the final lines, this effectively scrolls the window\n # +2 is the border/margin size\n final_lines = final_lines[-window.height+2:]\n \n for line_count, line in enumerate(final_lines):\n spaces = spacelen(line, tabsize)\n window.win.addstr(y+line_count, x+spaces, line[spaces:], _color)\n \n # Return the final y position\n return y + len(final_lines)\n\n\n# For inventory and so forth\ndef InteractiveWindowRender(window):\n player = Game.state.player\n items = player._items\n items_str = '\\n\\n'.join(map(str, items))\n s = (\n f\"\"\"Inventory:\\n\n{items_str}\n\"\"\")\n y = addstr(window, s, 1, 1, 'inventory')\n \n# Rendering Functions for windows\ndef SideDisplayWindowRender(window):\n input_mode = Game.state.input_mode\n player = Game.state.player\n width = window.width\n \n y = addstr(window, f\"\"\"{player.name}\"\"\", 1, 1)\n window.win.hline(y+1, 0, curses.ACS_HLINE, width) \n y = addstr(window, f\"\"\"Mode: {input_mode}\n\nDirections:\\n\\tn(orth)\\n\\ts(outh)\\n\\te(ast)\\n\\tw(est)\n\nActions:\\n\\tg(et)\\n\\td(rop)\n\n😛\nああああ\n█▀█ █▄█ ▀█▀ \n\"\"\", y+2, 1)\n\n y = window.height - 3\n addstr(window, \"q(uit)\\n`(return to normal mode)\", y, 1)\n\n\ndef DebugWindowRender(window):\n def debug_display():\n return \"Debug:\\n\\t\" + \"\\n\\t\".join(Game.state.debug_log[-8:])\n addstr(window, debug_display(), 1, 1, 'debug')\n\n\n# Idea: draw the map using boxes.\n# Idea: make a window with a draggable interior. It does this by\n# using its position to offset how it renders things.\ndef MainWindowRender(window):\n player = Game.state.player\n room = player.room\n items = room._items \n # have to list out items\n # annoyingly have to map even if I make a __str__ and __repr__ method\n items_str = '\\n\\n'.join(map(str, items))\n \n# window.win.bkgdset('@')\n# TODO give every item in the room a temporary number that can be used with a g(et) command\n s = f\"\"\"\nRoom {room.name}\n{items_str}\n\"\"\"\n addstr(window, s, 1, 1, 'main_text')\n def game_log_display():\n return \"\\n\".join(Game.state.game_log[-8:])\n \n y = window.height - 10\n addstr(window, game_log_display(), y, 1, 'game_log')\n \ndef CmdWindowRender(window):\n s = Game.state.text_buffer \n # not working\n window.win.attron(curses.A_DIM | curses.A_ALTCHARSET | curses.A_UNDERLINE)\n addstr(window, s, 1, 1, 'cmd_text')\n window.win.addch(curses.ACS_PI)\n window.win.attroff(curses.A_DIM | curses.A_ALTCHARSET | curses.A_UNDERLINE)\n \n","sub_path":"src/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":8758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"397497580","text":"import pandas as pd\r\nimport numpy as np\r\nimport torch\r\n\r\n\r\ndef crossEntropyLossValue(tensor1,tensor2):\r\n '''\r\n you must rewrite your own crossEntropyLoss since\r\n the pytorch version of crossEntropyLoss is\r\n (p(x)*log(q(x))).sum()\r\n but the crossEntropyLoss applied in this paper is\r\n (p(x)*log(q(x))+(1-p(x))*log(1-q(x))).sum()\r\n '''\r\n # loss = (-tensor1*torch.log(tensor2)-(1-tensor1)*torch.log(1-tensor2)).sum()/tensor1.shape[0]\r\n loss = ((tensor1-tensor2)*(tensor1-tensor2)).sum()\r\n return loss\r\n\r\ndef read_csv():\r\n loss = 0\r\n data = pd.read_csv('fake_fingerprint.csv',header=None)\r\n mean = pd.read_csv('mean_targetData.csv',header=None)\r\n data_array = np.array(data)\r\n mean_array = np.array(mean)\r\n for i in range(7400):\r\n print(i)\r\n tensor1 = torch.from_numpy(data_array[i])\r\n tensor2 = torch.from_numpy(mean_array[0])\r\n loss = loss+crossEntropyLossValue(tensor1,tensor2)\r\n print(loss/7400)\r\n\r\n\r\nif __name__ == '__main__':\r\n read_csv()","sub_path":"pandasReadCSV.py","file_name":"pandasReadCSV.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"388460440","text":"import os,sys\nimport ntpath\nimport pybatdata.tkbat as tkbat\nimport pybatdata.constants as cte\nfrom pybatdata.iobat import fileclass\nimport pybatdata.iobat as iobat\nfrom pybatdata.iobiologic import prep_biologic\nimport preparenovonix.novonix_prep as prep\nfrom preparenovonix.novonix_io import after_file_name\n\n \ndef checkequal(lst):\n return lst[1:] == lst[:-1]\n\n\ndef find_testers():\n # Initialize the testers list\n fileclass.tester = ['None'] * len(fileclass.name)\n\n # Loop over each input file\n for ii,infile in enumerate(fileclass.name):\n if (infile == 'None'):\n continue\n\n with open(infile, 'r', encoding='utf-8',\n errors='replace') as ff:\n # Read the header\n for line in ff:\n tester_found = False ; tester = 'None'\n if line.strip():\n char1 = line.strip()[0]\n if char1 in cte.numberstr:\n for hs in cte.headstr_tester:\n # Find the name of the input file\n fname = ntpath.basename(infile)\n if hs.lower() in fname.lower():\n # Find if the tester is in the name\n ih = cte.headstr_tester.index(hs)\n tester = cte.testers[ih]\n tester_found = True\n print(tester_found,tester)\n break\n if (tester_found): \n fileclass.tester[ii] = tester\n print(\n \"WARNING! Truncated header in file \\n\"\n + \" \"\n + str(infile)\n + \" \\n\"\n ) \n break\n else:\n print(\n \"WARNING! Unknown tester for file \\n\"\n + \" \"\n + str(infile)\n + \", \\n\"\n + \"(Raise an issue in the GitHub repository).\"\n ) \n break\n else:\n for hs in cte.headstr_tester:\n if hs.lower() in line.lower():\n ih = cte.headstr_tester.index(hs)\n tester = cte.testers[ih]\n tester_found = True\n break\n if (tester_found): \n fileclass.tester[ii] = tester\n break\n return\n\n\ndef type_experiment():\n # Initialize the type of experiment list\n fileclass.experiment = [cte.experiments[0]] * len(fileclass.name)\n\n # Loop over each input file\n for ii,tester in enumerate(fileclass.tester):\n exp = cte.experiments[0]\n\n s = cte.separators[cte.testers.index(tester)]\n\n col_names = iobat.read_col_names(fileclass.name[ii],\n fileclass.header_nl[ii],\n splitter=s)\n col = cte.freq_col(tester)\n if (col in col_names):\n exp = cte.experiments[1]\n\n fileclass.experiment[ii] = exp\n\n return\n\n\ndef prep_files():\n # Loop over each input file\n for ii,infile in enumerate(fileclass.name):\n print('\\n * Preparing: {}'.format(infile))\n problem = False\n if (infile == 'None' or fileclass.header_nl[ii] < 2):\n continue\n \n # Tester\n tester = fileclass.tester[ii]\n \n # Extra file preparation if needed\n if(tester == cte.testers[1]):\n try:\n prep_biologic(infile,fileclass.header_nl[ii],\n fileclass.experiment[ii],zcycle=True,\n overwrite=False,verbose=True)\n #fileclass.name[ii] = after_file_name(infile) ## TO EXPAND\n problem = False\n except:\n problem = True\n elif(tester == cte.testers[2]):\n infile = fileclass.name[ii]\n try:\n prep.prepare_novonix(infile, addstate=True, lprotocol=True,\n overwrite=False, verbose=True)\n fileclass.name[ii] = after_file_name(infile)\n problem = False\n except:\n problem = True\n\n fileclass.problem[ii] = problem\n return\n\n\ndef check_files():\n # Loop over each input file\n for ii,infile in enumerate(fileclass.name):\n problem = False\n print('\\n * Checking: {}'.format(infile))\n if (infile == 'None' or fileclass.header_nl[ii] < 2):\n continue\n\n # Tester\n tester = fileclass.tester[ii]\n s = cte.separators[cte.testers.index(tester)]\n\n # Read the column names\n col_names = iobat.read_col_names(fileclass.name[ii],\n fileclass.header_nl[ii],\n splitter=s)\n\n # Read the first row with data\n data1 = iobat.read_row_data1(fileclass.name[ii],\n fileclass.header_nl[ii],\n splitter=s)\n\n # The columns in the header should match the data\n if (len(col_names) != len(data1)):\n print('WARNING from loadbat \\n',\n 'Columns in header={}, Data columns= {} in file:\\n {}'.format(\n len(col_names),len(data1),infile))\n return True\n\n # The column header should contain some fundamental columns\n if (fileclass.experiment[0] == cte.experiments[0]):\n cols = [cte.time_col(tester), cte.v_col(tester),\n cte.i_col(tester), cte.loop_col(tester),\n cte.state_col(tester)]\n elif (fileclass.experiment[0] == cte.experiments[1]):\n cols = [cte.time_col(tester), cte.freq_col(tester),\n cte.Re_col(tester),cte.Im_col(tester)]\n\n for col in cols:\n if (col not in col_names):\n print('WARNING from loadbat, file: \\n',\n infile,'\\n',\n 'does not contain column ',col)\n return True\n\n fileclass.problem[ii] = problem\n return\n\n \ndef load_files(GUI=False):\n if GUI:\n tkbat.select_files()\n\n # Check that all the files exists\n iobat.file_exists()\n # Remove files that do not exist\n nind = fileclass.name.count('None')\n for ic in range(nind):\n ii = fileclass.name.index('None')\n fileclass.name.pop(ii)\n\n # Find the testers corresponding to each file\n find_testers()\n # Remove files without a recognised tester\n nind = fileclass.tester.count('None')\n for ic in range(nind):\n ii = fileclass.tester.index('None')\n fileclass.name.pop(ii)\n fileclass.tester.pop(ii)\n\n # Count header lines\n iobat.count_header_lines()\n\n # Type of experiment (Cycling,EIS)\n type_experiment()\n\n # Initialize the list of problems\n fileclass.problem = [False] * len(fileclass.name)\n\n # Prepare files if needed\n prep_files()\n \n # Check files \n check_files()\n # Remove files with problems\n nind = fileclass.problem.count(True)\n for ic in range(nind):\n ii = fileclass.problem.index(True) \n fileclass.name.pop(ii)\n fileclass.tester.pop(ii)\n fileclass.header_nl.pop(ii)\n fileclass.problem.pop(ii)\n\n # Check that all the considered files correspond to\n # the same type of experiment\n answer = checkequal(fileclass.experiment)\n if answer:\n experiment = fileclass.experiment[0]\n fileclass.experiment = experiment\n else:\n print('\\n STOP! input files \\n',\n fileclass.name,\n '\\n correspond to different type of experiments \\n')\n sys.exit()\n\n return \n","sub_path":"pybatdata/loadbat.py","file_name":"loadbat.py","file_ext":"py","file_size_in_byte":8210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"209832106","text":"from setuptools import setup\n\nwith open('README') as f:\n long_description = f.read()\n\nwith open('VERSION') as f:\n version = f.readline().strip()\n\nsetup(\n name='ase-gaming',\n description='playing w/ setup.py',\n long_description=long_description,\n license='MIT',\n author='@wolfhesse',\n author_email='wolfgang.schuessel@gmail.com',\n url='asecms.base.wolfspool.at/py-ase-gaming-pg',\n version=version,\n packages=[\n 'ase_gaming',\n ],\n # scripts=[\n # 'scripts/eins.py',\n # ],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'License :: OSI Approved :: MIT license',\n ],\n zip_safe=False,\n install_requires=['pytest', 'ase_game_py_mod'],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"223128351","text":"# Find the elements that appeared in all sorted lists.\n# O(k) space and O(n) time.\n# Question: https://goo.gl/cWrfwM\n\n# Companies: Uber\n\ndef commonElement(lists):\n if len(lists) == 0:\n return []\n res, indices = [], [0] * len(lists)\n for p, n in enumerate(lists[0]):\n allFound = True\n for lid in range(1, len(lists)): # iterate over remaining lists\n while indices[lid] < len(lists[lid]) and lists[lid][indices[lid]] < n:\n indices[lid] += 1\n if indices[lid] == len(lists[lid]) or lists[lid][indices[lid]] > n:\n allFound = False\n break # one list does not contain n\n if allFound:\n res.append(n) # all lists contain n\n return res\n\n\nlists1 = [\n [23, 34, 67, 89, 123, 566, 1000],\n [11, 22, 23, 24,33, 37, 185, 566, 987, 1223, 1234],\n [23, 43, 67, 98, 566, 678],\n [1, 4, 5, 23, 34, 76, 87, 132, 566, 665],\n [1, 2, 3, 23, 24, 344, 566],\n]\n\nprint(commonElement(lists1))\n","sub_path":"src/main/python/company/common_elements_k_sorted_lists.py","file_name":"common_elements_k_sorted_lists.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"598739309","text":"#!/usr/bin/env python3.6\nfrom math import *\nfrom decimal import *\nimport sys\nimport argparse\n\nex = 5\neps = round(pow(1/10, ex), ex)\npartsize = 2\n\n\ndef main():\n stat = \"sin(x)\"\n\n if len(sys.argv) > 1:\n parser = argparse.ArgumentParser(description='Proof of Bolzano-Cauchy Theorem calculator',\n usage=\"__main__.py STATEMENT [-h] [-a EX] [-p PARTSIZE]\"\n \"[-r SEG [SEG ...]] [-s STAT]\")\n parser.add_argument('-a', action='store', default=3, type=int, dest='ex',\n help='Accuracy of numbers (<= 4).')\n parser.add_argument('-p', action='store', default=2, type=float, dest='partsize',\n help='Size of parts segment to be divided.')\n parser.add_argument('-r', action='store', default=(-32, 32), nargs='+', type=float, dest='seg',\n help='Segment to be considered.')\n parser.add_argument('-s', action='store', default=\"sin(x)\", type=str, dest='stat',\n help='The statement.')\n\n args = parser.parse_args()\n global ex\n ex = args.ex\n if ex > 4:\n print(\"\\033[91mUsing accuracy > 4 is highly not recommended!\\033[0m\")\n global partsize\n partsize = args.partsize\n\n seg = args.seg\n stat = args.stat\n\n global parts\n parts = round((seg[1] - seg[0]) / partsize)\n\n print(\"\\033[92mLooking for roots f(x) = \" + stat)\n print(\"on [\" + str(seg[0]) + \"; \" + str(seg[1]) + \"] segment\" + \"\\033[0m\")\n\n roots = find_roots(stat, seg)\n print(\"\\t\" + str(len(roots)) + \" roots found.\")\n\n i = 0\n for i in range(0, len(roots)):\n if roots[i] == 0:\n print(str(i + 1) + \". \" + str(0))\n else:\n print(str(i + 1) + \". \" + str(roots[i]))\n\n\ndef find_roots(stat, intseg, roots=[]):\n isfound = False\n segs = divide(intseg, parts)\n for seg in segs:\n left = calculate(stat, seg[0])\n right = calculate(stat, seg[1])\n if (left * right) <= 0:\n if calculate(stat, seg[1]) == 0:\n root = seg[1]\n isfound = True\n else:\n if seg[1] - seg[0] >= eps:\n root = find_roots(stat, seg, roots)\n isfound = True\n else:\n return round((seg[0] + seg[1]) / 2, ex)\n if isfound:\n if type(root) is not list and not find(roots, root):\n roots.append(root)\n isfound = False\n return roots\n\n\ndef find(list, obj):\n for el in list:\n if el == obj:\n return True\n return False\n\n\ndef calculate(stat, arg):\n stat = stat.replace('x', '(' + str(arg) + ')')\n try:\n ans = eval(stat)\n return ans\n except Exception as msg:\n print(\"\\033[91mCan't calculate \\\"f(x) = \" + stat + \"\\\"\\033[0m\")\n sys.exit()\n\n\ndef divide(seg, times):\n segs = []\n i = 0\n diff = seg[1] - seg[0]\n for i in range(0, times):\n if i == 0:\n left = seg[0]\n else:\n left = right\n right = diff * (i + 1) / times + seg[0]\n segs.append([left, right])\n return segs\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"prog.py","file_name":"prog.py","file_ext":"py","file_size_in_byte":3272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"12283534","text":"def recent_deaths(family):\n recent = []\n\n for x in family:\n dday = family[x]['dday']\n dyear = dday[7:11]\n dmonth = dday[3:6]\n ddate = dday[0:2]\n if dyear == '2020' and turnMonthToNum(dmonth) >= turnMonthToNum('NOV'):\n recent.append(family[x]['name'])\n return str(recent)\n\n\nmyfamily = {\n 1: {\n \"name\": \"Dom Ortiz\",\n \"dday\": '24 JAN 1998',\n },\n 2: {\n \"name\": \"Jadon Ortiz\",\n 'dday': '15 NOV 2002',\n },\n 3: {\n \"name\": \"Vi Ortiz\",\n \"dday\": '01 NOV 2020',\n }\n}\n\ndef turnMonthToNum(month):\n return{\n 'JAN' : 1,\n 'FEB' : 2,\n 'MAR' : 3,\n 'APR' : 4,\n 'MAY' : 5,\n 'JUN' : 6,\n 'JUL' : 7,\n 'AUG' : 8,\n 'SEP' : 9, \n 'OCT' : 10,\n 'NOV' : 11,\n 'DEC' : 12\n }[month]\n\n","sub_path":"US36.py","file_name":"US36.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"110253886","text":"from flask import redirect, url_for, request, abort, flash, session, render_template\nfrom web import app, authorize\nfrom core.ops import leagues, events, event_invites\nfrom core.errors import ValidationError\nimport datetime\n\n\n\n@authorize\n@app.route('/leagues//new/', methods=['GET'])\ndef events_new(league_slug):\n start = datetime.datetime.utcnow()\n end = start + datetime.timedelta(days=2)\n league = leagues.get(league_slug)\n\n if league and league.creator_username == session['username']:\n return render_template('events/new.html', league=leagues.get(league_slug), start=start, end=end)\n\n abort(401)\n\n\n\n@authorize\n@app.route('/leagues//', methods=['POST'])\ndef events_create(league_slug):\n name = request.form.get('name')\n start, end = __build_dates(request.form)\n max_number_of_teams = int(request.form.get('max_number_of_teams'))\n max_team_size = int(request.form.get('max_team_size'))\n bug_label = request.form.get('bug_label')\n feature_label = request.form.get('feature_label')\n\n league = leagues.get(league_slug)\n if league and league.creator_username == session['username']:\n try:\n event = events.create(name, league_slug, start, end, max_number_of_teams, max_team_size, bug_label, feature_label)\n flash(\"Event created\", 'success')\n return redirect(url_for('events_show', league_slug=league.slug, slug=event.slug))\n except ValidationError as e:\n flash(e.message, 'error')\n return redirect(url_for('events_new', league_slug=league.slug))\n\n\n\n@app.route('/leagues//events//', methods=['GET', 'DELETE'])\ndef events_show(league_slug, slug):\n if request.method == 'DELETE':\n return redirect(url_for('events_delete'))\n\n event = events.get(slug, league_slug)\n\n if event:\n return render_template('events/show.html', event=event)\n\n return abort(404)\n\n\n@authorize\n@app.route('/leagues//events//edit/', methods=['GET'])\ndef events_edit(league_slug, slug):\n event = events.get(slug, league_slug)\n league = leagues.get(league_slug)\n\n if event and event.league.creator_username == session.get('username'):\n return render_template('events/edit.html', event=event, league=league)\n\n return abort(401)\n\n\n\n@authorize\n@app.route('/leagues//events//', methods=['POST', 'PUT'])\ndef events_update(league_slug, slug):\n event = events.get(slug, league_slug)\n\n if event and event.league.creator_username == session.get('username'):\n start, end = __build_dates(request.form)\n max_number_of_teams = int(request.form.get('max_number_of_teams'))\n max_team_size = int(request.form.get('max_team_size'))\n bug_label = request.form.get('bug_label')\n feature_label = request.form.get('feature_label')\n\n try:\n events.update(event.slug, league_slug, start, end, max_number_of_teams, max_team_size, bug_label, feature_label)\n flash(\"Event updated\", 'success')\n return redirect(url_for('events_show', league_slug=league_slug, slug=slug))\n except ValidationError as e:\n flash(e.message, 'error')\n return redirect(url_for('events_edit', league_slug=league_slug, slug=slug))\n\n return abort(401)\n\n\n\n@authorize\n@app.route('/leagues//events//delete/', methods=['GET'])\ndef events_delete(league_slug, slug):\n event = events.get(league_slug, slug)\n\n if event and event.league.creator_username == session.get('username'):\n events.delete(event)\n flash(\"Event Deleted\", 'success')\n return redirect(url_for('events_show', slug=slug))\n\n return abort(401)\n\n\n@authorize\n@app.route('/leagues//events//invites/', methods=['POST'])\ndef event_invites_create(league_slug, slug):\n event = events.get(slug, league_slug)\n\n if event.league.creator_username == session.get('username'):\n try:\n event_invites.create(event.id, request.form.get('team_slug'))\n flash(\"Invite Sent\")\n except ValidationError as e:\n flash(e.message, \"error\")\n\n return redirect(url_for('events_show', league_slug=league_slug, slug=slug))\n\n return abort(401)\n\n\n@authorize\n@app.route('/leagues//events//invites//', methods=['POST'])\ndef event_invites_respond(league_slug, slug, event_invite_id):\n invite = event_invites.get_by_id(event_invite_id)\n\n if invite and invite.team.creator_username == session.get('username'):\n if request.form.get('accept'):\n try:\n event_invites.accept(invite.event_id, invite.team_slug)\n flash(\"Invite Accepted\", 'success')\n except ValidationError as e:\n flash(e.message, 'error')\n else:\n event_invites.decline(invite.event_id, invite.team_slug)\n flash(\"Invite Declined\", 'success')\n\n return redirect(url_for('teams_show', slug=invite.team_slug))\n\n return abort(401)\n\n\ndef __build_dates(form):\n start = datetime.datetime.strptime(form.get('start'),\"%Y-%m-%d %H:%M\")\n end = datetime.datetime.strptime(form.get('end'),\"%Y-%m-%d %H:%M\")\n return start, end","sub_path":"web/views/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":5245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"116813244","text":"# https://www.careercup.com/question?id=5719799180034048\n# integers are in the range 0 <= x <= size of array\n\ndef solution(lst):\n for i in range(len(lst)):\n elem = lst[i]\n # None is visited, num is not, false if one occurrence, true is a duplicate\n if elem is None or type(elem) == bool:\n continue\n else:\n lst[i] = None\n while elem is not None:\n if type(lst[elem]) == bool:\n if not lst[elem]:\n lst[elem], elem = True, None\n else:\n elem = None\n else:\n lst[elem], elem = False, lst[elem]\n\n res = []\n for i in range(len(lst)):\n if lst[i]:\n res.append(i)\n\n return res\n","sub_path":"find_duplicates.py","file_name":"find_duplicates.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"198578972","text":"# Model copied from https://github.com/abhijeet3922/Object-recognition-CIFAR-10\n# https://github.com/keras-team/keras/blob/master/examples/cifar10_cnn.py\n\nfrom keras.datasets import cifar10\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import BatchNormalization, Conv2D, MaxPooling2D\nfrom keras.optimizers import RMSprop\nfrom keras.regularizers import l2\nfrom keras.callbacks import ModelCheckpoint, CSVLogger, LearningRateScheduler\nfrom keras.utils import to_categorical\nimport os\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"1\"\n\nbatch_size = 64\nnum_classes = 10\nepochs = 200\ndata_augmentation = True\n\n# The data, split between train and test sets:\n(x_train, y_train), (x_test, y_test) = cifar10.load_data()\nprint('x_train shape:', x_train.shape)\nprint(x_train.shape[0], 'train samples')\nprint(x_test.shape[0], 'test samples')\n\n\n# Normalize and OHC\n(x_train, y_train), (x_test, y_test) = cifar10.load_data()\nx_train = x_train.astype('float32')\nx_test = x_test.astype('float32')\n \nx_train = x_train / 255.0\nx_test = x_test / 255.0\n \ny_train = to_categorical(y_train, num_classes)\ny_test = to_categorical(y_test, num_classes)\n\n\nweight_decay = 1e-4\n\nmodel = Sequential()\nmodel.add(Conv2D(32, (3,3), padding='same', kernel_regularizer=l2(weight_decay), input_shape=x_train.shape[1:]))\nmodel.add(Activation('elu'))\nmodel.add(BatchNormalization())\nmodel.add(Conv2D(32, (3,3), padding='same', kernel_regularizer=l2(weight_decay)))\nmodel.add(Activation('elu'))\nmodel.add(BatchNormalization())\nmodel.add(MaxPooling2D(pool_size=(2,2)))\nmodel.add(Dropout(0.2))\n\nmodel.add(Conv2D(64, (3,3), padding='same', kernel_regularizer=l2(weight_decay)))\nmodel.add(Activation('elu'))\nmodel.add(BatchNormalization())\nmodel.add(Conv2D(64, (3,3), padding='same', kernel_regularizer=l2(weight_decay)))\nmodel.add(Activation('elu'))\nmodel.add(BatchNormalization())\nmodel.add(MaxPooling2D(pool_size=(2,2)))\nmodel.add(Dropout(0.3))\n\nmodel.add(Conv2D(128, (3,3), padding='same', kernel_regularizer=l2(weight_decay)))\nmodel.add(Activation('elu'))\nmodel.add(BatchNormalization())\nmodel.add(Conv2D(128, (3,3), padding='same', kernel_regularizer=l2(weight_decay)))\nmodel.add(Activation('elu'))\nmodel.add(BatchNormalization())\nmodel.add(MaxPooling2D(pool_size=(2,2)))\nmodel.add(Dropout(0.4))\n\nmodel.add(Flatten())\nmodel.add(Dense(num_classes))\nmodel.add(Activation('softmax'))\n\n\n# initiate RMSprop optimizer\nopt = RMSprop(lr=0.001, decay=1e-6)\n\n# Let's train the model using RMSprop\nmodel.compile(loss='categorical_crossentropy',\n optimizer=opt,\n metrics=['accuracy'])\n\nprint(model.summary())\n\nmodel_type = 'vgg_like'\nsave_dir = os.path.join(os.getcwd(), 'saved_models')\nmodel_name = 'cifar10_%s_{epoch:03d}.h5' % model_type\nif not os.path.isdir(save_dir):\n os.makedirs(save_dir)\nfilepath = os.path.join(save_dir, model_name)\n\n\ncheckpoint = ModelCheckpoint(filepath=filepath,\n monitor='val_acc',\n verbose=1,\n save_best_only=True)\ncsvlog = CSVLogger('cifar10_vgglike_log.csv')\n\ndef lr_schedule(epoch):\n lrate = 0.001\n if epoch > 75:\n lrate = 0.0005\n elif epoch > 100:\n lrate = 0.0003 \n return lrate\n\nlr_sched = LearningRateScheduler(lr_schedule)\n\ncallbacks = [checkpoint, csvlog, lr_sched]\n\nif not data_augmentation:\n print('Not using data augmentation.')\n model.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n validation_data=(x_test, y_test),\n shuffle=True)\nelse:\n print('Using real-time data augmentation.')\n # This will do preprocessing and realtime data augmentation:\n datagen = ImageDataGenerator(\n featurewise_center=False, # set input mean to 0 over the dataset\n samplewise_center=False, # set each sample mean to 0\n featurewise_std_normalization=False, # divide inputs by std of the dataset\n samplewise_std_normalization=False, # divide each input by its std\n zca_whitening=False, # apply ZCA whitening\n zca_epsilon=1e-06, # epsilon for ZCA whitening\n rotation_range=15, # randomly rotate images in the range (degrees, 0 to 180)\n # randomly shift images horizontally (fraction of total width)\n width_shift_range=0.1,\n # randomly shift images vertically (fraction of total height)\n height_shift_range=0.1,\n shear_range=0., # set range for random shear\n zoom_range=0., # set range for random zoom\n channel_shift_range=0., # set range for random channel shifts\n # set mode for filling points outside the input boundaries\n fill_mode='nearest',\n cval=0., # value used for fill_mode = \"constant\"\n horizontal_flip=True, # randomly flip images\n vertical_flip=False, # randomly flip images\n # set rescaling factor (applied before any other transformation)\n rescale=None,\n # set function that will be applied on each input\n preprocessing_function=None,\n # image data format, either \"channels_first\" or \"channels_last\"\n data_format=None,\n # fraction of images reserved for validation (strictly between 0 and 1)\n validation_split=0.0)\n\n # Compute quantities required for feature-wise normalization\n # (std, mean, and principal components if ZCA whitening is applied).\n datagen.fit(x_train)\n\n # Fit the model on the batches generated by datagen.flow().\n model.fit_generator(datagen.flow(x_train, y_train,\n batch_size=batch_size),\n epochs=epochs, callbacks=callbacks,\n validation_data=(x_test, y_test),\n workers=8)\n\n# Score trained model.\nscores = model.evaluate(x_test, y_test, verbose=1)\nprint('Test loss:', scores[0])\nprint('Test accuracy:', scores[1])\n","sub_path":"Scripts/cifar10_scripts/cifar10_cnn_good.py","file_name":"cifar10_cnn_good.py","file_ext":"py","file_size_in_byte":5982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"58175506","text":"import twitter\n\napi = twitter.Api(consumer_key='EpswX5oXXgWwRv2VcmROHYzUX',\n consumer_secret='tThHxDg78USbY2eDLqm2IR6AxU1w8ahSPFaHucSNZBsdskh7ar',\n access_token_key='758635295084384256-5JYlLxdl8gEbTVeUlpaLQGI0KiJHmPJ',\n access_token_secret='kQsA9TTjDIYPpxIUkooYY8kVHYPKWY9yfdOrh3JOdyaQI')\n\nprint(api.VerifyCredentials())\n\nTRACK = ['#เลื่อนแม่มึงสิ']\n\nLANGUAGES = ['th']\n\nfor line in api.GetStreamFilter(track=TRACK, languages=LANGUAGES):\n print(line)\n","sub_path":"twiter_module.py","file_name":"twiter_module.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"110409477","text":"__author__ = 'rbtying'\ntry:\n from cStringIO import StringIO, InputType, OutputType\n from StringIO import StringIO as pyStringIO\n\n def _checkIsStringIO(obj):\n return isinstance(obj, (InputType, OutputType, pyStringIO))\nexcept ImportError:\n from StringIO import StringIO\n\n def _checkIsStringIO(obj):\n return isinstance(obj, StringIO)\n\nimport pygame\n\n# ROS specific imports\nimport sensor_msgs.msg\n\n\nclass ImageConverter(object):\n \"\"\"\n Convert images/compressedimages to and from ROS\n \"\"\"\n\n _ENCODINGMAP_PY_TO_ROS = {'L': 'mono8', 'RGB': 'rgb8',\n 'RGBA': 'rgba8', 'YCbCr': 'yuv422'}\n _ENCODINGMAP_ROS_TO_PY = {'mono8': 'L', 'rgb8': 'RGB',\n 'rgba8': 'RGBA', 'yuv422': 'YCbCr'}\n _PIL_MODE_CHANNELS = {'L': 1, 'RGB': 3, 'RGBA': 4, 'YCbCr': 3}\n\n @staticmethod\n def to_ros(img):\n \"\"\"\n Convert a PIL/pygame image to a ROS compatible message (sensor_msgs.Image).\n \"\"\"\n\n # Everything ok, convert PIL.Image to ROS and return it\n if img.mode == 'P':\n img = img.convert('RGB')\n\n rosimage = sensor_msgs.msg.Image()\n rosimage.encoding = ImageConverter._ENCODINGMAP_PY_TO_ROS[img.mode]\n (rosimage.width, rosimage.height) = img.size\n rosimage.step = (ImageConverter._PIL_MODE_CHANNELS[img.mode]\n * rosimage.width)\n rosimage.data = img.tostring()\n return rosimage\n\n @classmethod\n def from_ros(cls, rosMsg):\n \"\"\"\n Converts a ROS sensor_msgs.Image or sensor_msgs.CompressedImage to a pygame Surface\n :param rosMsg: The message to convert\n :return: an alpha-converted pygame Surface\n \"\"\"\n pyimg = None\n if isinstance(rosMsg, sensor_msgs.msg.Image):\n pyimg = pygame.image.fromstring(rosMsg.data, (rosMsg.width, rosMsg.height),\n cls._ENCODINGMAP_ROS_TO_PY[rosMsg.encoding])\n elif isinstance(rosMsg, sensor_msgs.msg.CompressedImage):\n pyimg = pygame.image.load(StringIO(rosMsg.data))\n\n if not pyimg:\n raise TypeError('rosMsg is not an Image or CompressedImage!')\n\n return pyimg.convert_alpha()\n","sub_path":"image_coverter.py","file_name":"image_coverter.py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"180951037","text":"import pandas\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport datetime\nimport jieba\n\n# 聊天记录文件路径(需在QQ消息管理器以TXT格式导出)\nchat_log_path = './chat_log.txt'\n\n# TXT文件行号\nline_num = 0\n# 聊天记录总条数\nitem_sum = 0\n# 聊天文字总和\ntotal_text = ''\n\n# 修改分词权重\njieba.suggest_freq('亲爱的', True)\njieba.suggest_freq('大可爱', True)\njieba.suggest_freq('小可爱', True)\njieba.suggest_freq('吧唧', True)\njieba.suggest_freq('亲亲', True)\njieba.suggest_freq('哼唧', True)\njieba.suggest_freq('emmm', True)\njieba.del_word('一下')\njieba.del_word('然���')\njieba.del_word('这样')\njieba.del_word('这个')\njieba.del_word('还是')\njieba.del_word('就是')\njieba.del_word('一个')\njieba.del_word('晚上')\njieba.del_word('什么')\njieba.del_word('那个')\njieba.del_word('觉得')\njieba.del_word('不是')\njieba.del_word('感觉')\njieba.del_word('可能')\njieba.del_word('没有')\njieba.del_word('有点')\njieba.del_word('怎么')\njieba.del_word('还有')\n\n# 配置单字\nsingle_word = {'嗷', '嗯', '嬲'}\n\nhours = {}\n\n\n# 处理时间\ndef countTime(str_date):\n # print(str_date)\n # date = datetime.datetime.strptime(str_date, \"%Y-%m-%d %H:%M:%S\")\n # print(date)\n try:\n hour = int(str_date[10:13])\n hours[hour] = hours.get(hour, 0) + 1\n except:\n hour = int(str_date[10:12])\n hours[hour] = hours.get(hour, 0) + 1\n return\n\n\n# 读取文本文件\nfor line in open(chat_log_path, 'r', encoding='utf-8'):\n line_num = line_num + 1\n if line_num < 9:\n continue\n if line == '':\n continue\n else:\n # 时间戳特判\n if line.startswith('2017-') or line.startswith('2018-') or line.startswith('2019-'):\n item_sum = item_sum + 1\n countTime(line[0:19])\n continue\n else:\n # 过滤聊天图片和表情\n if not (line.startswith('[图片]') or line.startswith('[表情]')):\n total_text = total_text + line\n\nprint(\"记录总条数:\" + str(item_sum))\n\nwords = jieba.cut(total_text)\ncounts = {}\n\nfor word in words:\n if len(word) == 1:\n if word in single_word:\n counts[word] = counts.get(word, 0) + 1\n else:\n counts[word] = counts.get(word, 0) + 1\n\n# 将键值对转换成列表\nitems = list(counts.items())\n# 根据词语出现的次数进行从大到小排序\nitems.sort(key=lambda x: x[1], reverse=True)\n\nfor i in range(30):\n word, count = items[i]\n print(word + ' \\t' + str(count))\n\n# 绘制分时聊天频率折线图\nhours = [(k, hours[k]) for k in sorted(hours.keys())]\nplt.plot(hours)\nplt.title('聊天记录条数24小时分布图')\nplt.ylabel('消息条数')\nplt.xlabel('时间(小时)')\nplt.show()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"530015659","text":"import apiman\nimport maxman\n\ndef getdata(ip):\n maxcity_cont = maxman.maxcityload(ip)\n maxasn_cont = maxman.maxasnload(ip)\n ipapi_cont = apiman.ipapiload(ip)\n freegeoip_cont = apiman.freegeoipload(ip)\n\n ipaddr = ipapi_cont[\"query\"]\n host = ipapi_cont[\"reverse\"]\n city = maxcity_cont[\"city\"]\n region = ipapi_cont[\"regionName\"] + \" (\" + maxcity_cont[\"region_code\"] + \")\"\n country = maxcity_cont[\"country_name\"] + \" (\" + maxcity_cont[\"country_code\"] + \")\"\n continent = maxcity_cont[\"continent\"]\n geoloc = str(maxcity_cont[\"latitude\"]) + \", \" + str(maxcity_cont[\"longitude\"])\n zip_code = str(maxcity_cont[\"postal_code\"])\n area_code = str(maxcity_cont[\"area_code\"])\n dma_code = str(maxcity_cont[\"dma_code\"])\n time_zone = maxcity_cont[\"time_zone\"]\n isp = ipapi_cont[\"isp\"]\n org = ipapi_cont[\"org\"]\n asn = maxasn_cont\n proxy = str(ipapi_cont[\"proxy\"])\n mobile = str(ipapi_cont[\"mobile\"])\n\n data = {\n \"ip\":ipaddr,\n \"host\":host,\n \"city\":city,\n \"region\":region,\n \"country\":country,\n \"continent\":continent,\n \"geoloc\":geoloc,\n \"zip_code\":zip_code,\n \"area_code\":area_code,\n \"dma_code\":dma_code,\n \"time_zone\":time_zone,\n \"isp\":isp,\n \"org\":org,\n \"asn\":asn,\n \"proxy\":proxy,\n \"mobile\":mobile}\n return data\n","sub_path":"dataman.py","file_name":"dataman.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"183745951","text":"import numpy as np\r\nfrom collections import defaultdict\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\n\r\n\r\ndef get_glove(fname):\r\n\r\n with open(fname, \"rb\") as lines:\r\n wvec = {line.split()[0].decode(\"utf-8\"): np.array(line.split()[1:], dtype=np.float32)\r\n for line in lines}\r\n return wvec\r\n\r\n\r\n# sklearn's classifiers\r\nclass SumEmbeddingVectorizer(object):\r\n def __init__(self, word2vec):\r\n self.word2vec = word2vec\r\n if len(word2vec) > 0:\r\n self.dim = len(word2vec[next(iter(wvec))])\r\n else:\r\n self.dim = 0\r\n\r\n def fit(self, X, y):\r\n return self\r\n\r\n def transform(self, X):\r\n return np.array([\r\n np.sum([self.word2vec[w] for w in words if w in self.word2vec]\r\n or [np.zeros(self.dim)], axis=0)\r\n for words in X\r\n ])\r\n\r\n\r\nclass TfidfEmbeddingVectorizer(object):\r\n def __init__(self, word2vec):\r\n self.word2vec = word2vec\r\n self.word2weight = None\r\n if len(word2vec)>0:\r\n self.dim=len(word2vec[next(iter(wvec))])\r\n else:\r\n self.dim=0\r\n\r\n def fit(self, X, y):\r\n tfidf = TfidfVectorizer(analyzer=lambda x: x)\r\n tfidf.fit(X)\r\n # if a word was never seen - it must be at least as infrequent\r\n # as any of the known words - so the default idf is the max of\r\n # known idf's\r\n max_idf = max(tfidf.idf_)\r\n self.word2weight = defaultdict(\r\n lambda: max_idf,\r\n [(w, tfidf.idf_[i]) for w, i in tfidf.vocabulary_.items()])\r\n\r\n return self\r\n\r\n def transform(self, X):\r\n return np.array([\r\n np.sum([self.word2vec[w] * self.word2weight[w]\r\n for w in words if w in self.word2vec] or\r\n [np.zeros(self.dim)], axis=0)\r\n for words in X\r\n ])\r\n","sub_path":"embedding_utils.py","file_name":"embedding_utils.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"308341642","text":"from .boundary_loss import BDLoss,SoftDiceLoss,DC_and_BD_loss,DC_and_HDBinary_loss,DistBinaryDiceLoss,HDDTBinaryLoss\r\nfrom .dice_loss import DC_and_CE_loss,DC_and_topk_loss,GDiceLoss,GDiceLossV2,SSLoss,IoULoss,TopKLoss,TverskyLoss,FocalTversky_loss\r\nfrom .dice_loss import AsymLoss,PenaltyGDiceLoss,ExpLog_loss\r\nfrom .focal_loss import FocalLoss\r\nfrom .lovasz_loss import LovaszSoftmax\r\nfrom .ND_Crossentropy import CrossentropyND,WeightedCrossEntropyLoss,WeightedCrossEntropyLossV2,DisPenalizedCE\r\nfrom torch import nn\r\nlosses_seg = {}\r\nlosses_seg[\"BDLoss\"] = BDLoss\r\nlosses_seg[\"SoftDiceLoss\"] = SoftDiceLoss\r\nlosses_seg[\"DC_and_BD_loss\"] = DC_and_BD_loss\r\nlosses_seg[\"DC_and_HDBinary_loss\"] = DC_and_HDBinary_loss\r\nlosses_seg[\"DistBinaryDiceLoss\"] = DistBinaryDiceLoss\r\nlosses_seg[\"HDDTBinaryLoss\"] = HDDTBinaryLoss\r\nlosses_seg[\"DC_and_CE_loss\"] = DC_and_CE_loss\r\nlosses_seg[\"DC_and_topk_loss\"] = DC_and_topk_loss\r\nlosses_seg[\"GDiceLoss\"] = GDiceLoss\r\nlosses_seg[\"GDiceLossV2\"] = GDiceLossV2\r\nlosses_seg[\"SSLoss\"] = SSLoss\r\nlosses_seg[\"IoULoss\"] = IoULoss\r\nlosses_seg[\"TopKLoss\"] = TopKLoss\r\nlosses_seg[\"TverskyLoss\"] = TverskyLoss\r\nlosses_seg[\"FocalTversky_loss\"] = FocalTversky_loss\r\nlosses_seg[\"AsymLoss\"] = AsymLoss\r\nlosses_seg[\"PenaltyGDiceLoss\"] = PenaltyGDiceLoss\r\nlosses_seg[\"ExpLog_loss\"] = ExpLog_loss\r\nlosses_seg[\"FocalLoss\"] = FocalLoss\r\nlosses_seg[\"LovaszSoftmax\"] = LovaszSoftmax\r\nlosses_seg[\"CrossentropyND\"] = CrossentropyND\r\nlosses_seg[\"CrossEntropyLoss\"] = nn.CrossEntropyLoss\r\nlosses_seg[\"BCEWithLogitsLoss\"] = nn.BCEWithLogitsLoss\r\nlosses_seg[\"WeightedCrossEntropyLoss\"] = WeightedCrossEntropyLoss\r\nlosses_seg[\"WeightedCrossEntropyLossV2\"] = WeightedCrossEntropyLossV2\r\nlosses_seg[\"DisPenalizedCE\"] = DisPenalizedCE\r\n\r\ndef get_loss_func(name):\r\n return losses_seg[name]\r\n","sub_path":"utils/losses/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"253005252","text":"# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n# ##### END GPL LICENSE BLOCK #####\n\nbl_info = {\n \"name\": \"Bswap Admin Tools\",\n \"author\": \"Pablo Vazquez, Matthew Muldoon\",\n \"version\": (0, 1),\n \"blender\": (2, 71),\n \"location\": \"Everywhere!\",\n \"description\": \"A collection of tools and settings to improve productivity\",\n \"warning\": \"\",\n \"wiki_url\": \"http://pablovazquez.org/amaranth\",\n \"tracker_url\": \"\",\n \"category\": \"Scene\"}\n\n\nimport bpy\nimport bmesh\nfrom bpy.types import Operator, AddonPreferences, Panel, Menu\nfrom bpy.props import (BoolProperty, EnumProperty,\n FloatProperty, FloatVectorProperty,\n IntProperty, StringProperty)\nfrom mathutils import Vector\nfrom bpy.app.handlers import persistent\nfrom bl_operators.presets import AddPresetBase\n\n# Addon wide, we need to know if cycles is available\ncycles_exists = False\n\n\ndef check_cycles_exists():\n global cycles_exists\n cycles_exists = ('cycles' in dir(bpy.types.Scene)) \n return cycles_exists\n\n\ncheck_cycles_exists()\n\n\n# Preferences\nclass AmaranthToolsetPreferences(AddonPreferences):\n bl_idname = __name__\n use_scene_stats = BoolProperty(\n \tname=\"Extra Scene Statistics\",\n description=\"Display extra scene statistics in Info editor's header\",\n default=True,\n )\n\n# Scene Debug\n # Cycles Node Types\n if check_cycles_exists():\n cycles_shader_node_types = [\n (\"BSDF_DIFFUSE\", \"Diffuse BSDF\", \"\", 0),\n (\"BSDF_GLOSSY\", \"Glossy BSDF\", \"\", 1),\n (\"BSDF_TRANSPARENT\", \"Transparent BSDF\", \"\", 2),\n (\"BSDF_REFRACTION\", \"Refraction BSDF\", \"\", 3),\n (\"BSDF_GLASS\", \"Glass BSDF\", \"\", 4),\n (\"BSDF_TRANSLUCENT\", \"Translucent BSDF\", \"\", 5),\n (\"BSDF_ANISOTROPIC\", \"Anisotropic BSDF\", \"\", 6),\n (\"BSDF_VELVET\", \"Velvet BSDF\", \"\", 7),\n (\"BSDF_TOON\", \"Toon BSDF\", \"\", 8),\n (\"SUBSURFACE_SCATTERING\", \"Subsurface Scattering\", \"\", 9),\n (\"EMISSION\", \"Emission\", \"\", 10),\n (\"BSDF_HAIR\", \"Hair BSDF\", \"\", 11),\n (\"BACKGROUND\", \"Background\", \"\", 12),\n (\"AMBIENT_OCCLUSION\", \"Ambient Occlusion\", \"\", 13),\n (\"HOLDOUT\", \"Holdout\", \"\", 14),\n (\"VOLUME_ABSORPTION\", \"Volume Absorption\", \"\", 15),\n (\"VOLUME_SCATTER\", \"Volume Scatter\", \"\", 16)\n ]\n\n scene.amaranth_cycles_node_types = EnumProperty(\n items=cycles_shader_node_types, name = \"Shader\")\n\n scene.amaranth_cycles_list_sampling = BoolProperty(\n default=False,\n name=\"Samples Per:\")\n\n bpy.types.CyclesRenderSettings.use_samples_final = BoolProperty(\n name=\"Use Final Render Samples\",\n description=\"Use current shader samples as final render samples\",\n default=False)\n\n scene.amaranth_lighterscorner_list_meshlights = BoolProperty(\n default=False,\n name=\"List Meshlights\",\n description=\"Include light emitting meshes on the list\")\n\n scene.amaranth_debug_scene_list_missing_images = BoolProperty(\n default=False,\n name=\"List Missing Images\",\n description=\"Display a list of all the missing images\")\n\n bpy.types.ShaderNodeNormal.normal_vector = prop_normal_vector\n bpy.types.CompositorNodeNormal.normal_vector = prop_normal_vector\n\n bpy.types.Object.is_keyframe = is_keyframe\n\n scene.amth_wire_toggle_scene_all = BoolProperty(\n default=False,\n name=\"All Scenes\",\n description=\"Toggle wire on objects in all scenes\")\n scene.amth_wire_toggle_is_selected = BoolProperty(\n default=False,\n name=\"Only Selected\",\n description=\"Only toggle wire on selected objects\")\n scene.amth_wire_toggle_edges_all = BoolProperty(\n default=True,\n name=\"All Edges\",\n description=\"Draw all edges\")\n scene.amth_wire_toggle_optimal = BoolProperty(\n default=False,\n name=\"Optimal Display\",\n description=\"Skip drawing/rendering of interior subdivided edges \"\n \"on meshes with Subdivision Surface modifier\")\n\ndef clear_properties():\n props = (\n \"use_unsimplify_render\",\n \"simplify_status\",\n \"use_matching_indices\",\n \"use_simplify_nodes_vector\",\n \"status\",\n \"types\",\n \"toggle_mute\",\n \"amaranth_cycles_node_types\",\n \"amaranth_lighterscorner_list_meshlights\",\n \"amaranth_debug_scene_list_missing_images\",\n \"amarath_cycles_list_sampling\",\n \"normal_vector\",\n \"use_samples_final\",\n 'amth_wire_toggle_is_selected',\n 'amth_wire_toggle_scene_all',\n \"amth_wire_toggle_edges_all\",\n \"amth_wire_toggle_optimal\"\n )\n \n wm = bpy.context.window_manager\n for p in props:\n if p in wm:\n del wm[p]\n# FEATURE: Scene Debug\nclass AMTH_SCENE_OT_cycles_shader_list_nodes(Operator):\n \"\"\"List Cycles materials containing a specific shader\"\"\"\n bl_idname = \"scene.cycles_list_nodes\"\n bl_label = \"List Materials\"\n materials = []\n\n @classmethod\n def poll(cls, context):\n return cycles_exists and context.scene.render.engine == 'CYCLES'\n\n def execute(self, context):\n node_type = context.scene.amaranth_cycles_node_types\n roughness = False\n self.__class__.materials = []\n shaders_roughness = ['BSDF_GLOSSY','BSDF_DIFFUSE','BSDF_GLASS']\n\n print(\"\\n=== Cycles Shader Type: %s === \\n\" % node_type)\n\n for ma in bpy.data.materials:\n if ma.node_tree:\n nodes = ma.node_tree.nodes\n \n print_unconnected = ('Note: \\nOutput from \"%s\" node' % node_type,\n 'in material \"%s\"' % ma.name, 'not connected\\n')\n\n for no in nodes:\n if no.type == node_type:\n for ou in no.outputs:\n if ou.links:\n connected = True\n if no.type in shaders_roughness:\n roughness = 'R: %.4f' % no.inputs['Roughness'].default_value\n else:\n roughness = False\n else:\n connected = False\n print(print_unconnected)\n\n if ma.name not in self.__class__.materials:\n self.__class__.materials.append('%s%s [%s] %s%s%s' % (\n '[L] ' if ma.library else '',\n ma.name, ma.users,\n '[F]' if ma.use_fake_user else '',\n ' - [%s]' % roughness if roughness else '',\n ' * Output not connected' if not connected else ''))\n\n elif no.type == 'GROUP':\n if no.node_tree:\n for nog in no.node_tree.nodes:\n if nog.type == node_type:\n for ou in nog.outputs:\n if ou.links:\n connected = True\n if nog.type in shaders_roughness:\n roughness = 'R: %.4f' % nog.inputs['Roughness'].default_value\n else:\n roughness = False\n else:\n connected = False\n print(print_unconnected)\n\n if ma.name not in self.__class__.materials:\n self.__class__.materials.append('%s%s%s [%s] %s%s%s' % (\n '[L] ' if ma.library else '',\n 'Node Group: %s%s -> ' % (\n '[L] ' if no.node_tree.library else '',\n no.node_tree.name),\n ma.name, ma.users,\n '[F]' if ma.use_fake_user else '',\n ' - [%s]' % roughness if roughness else '',\n ' * Output not connected' if not connected else ''))\n\n self.__class__.materials = sorted(list(set(self.__class__.materials)))\n\n if len(self.__class__.materials) == 0:\n self.report({\"INFO\"}, \"No materials with nodes type %s found\" % node_type)\n else:\n print(\"* A total of %d %s using %s was found \\n\" % (\n len(self.__class__.materials),\n \"material\" if len(self.__class__.materials) == 1 else \"materials\",\n node_type))\n\n count = 0\n\n for mat in self.__class__.materials:\n print('%02d. %s' % (count+1, self.__class__.materials[count]))\n count += 1\n print(\"\\n\")\n\n self.__class__.materials = sorted(list(set(self.__class__.materials)))\n\n return {'FINISHED'}\n\nclass AMTH_SCENE_OT_cycles_shader_list_nodes_clear(Operator):\n \"\"\"Clear the list below\"\"\"\n bl_idname = \"scene.cycles_list_nodes_clear\"\n bl_label = \"Clear Materials List\"\n\n @classmethod\n def poll(cls, context):\n return cycles_exists\n\n def execute(self, context):\n AMTH_SCENE_OT_cycles_shader_list_nodes.materials[:] = []\n print(\"* Cleared Cycles Materials List\")\n return {'FINISHED'}\n\nclass AMTH_SCENE_OT_amaranth_object_select(Operator):\n '''Select object'''\n bl_idname = \"scene.amaranth_object_select\"\n bl_label = \"Select Object\"\n object = bpy.props.StringProperty()\n \n def execute(self, context):\n if self.object:\n object = bpy.data.objects[self.object]\n\n bpy.ops.object.select_all(action='DESELECT')\n object.select = True\n context.scene.objects.active = object\n\n return{'FINISHED'}\n\nclass AMTH_SCENE_OT_list_missing_node_links(Operator):\n '''Print a list of missing node links'''\n bl_idname = \"scene.list_missing_node_links\"\n bl_label = \"List Missing Node Links\"\n\n count_groups = 0\n count_images = 0\n count_image_node_unlinked = 0\n\n def execute(self, context):\n missing_groups = []\n missing_images = []\n image_nodes_unlinked = []\n libraries = []\n self.__class__.count_groups = 0\n self.__class__.count_images = 0\n self.__class__.count_image_node_unlinked = 0\n\n for ma in bpy.data.materials:\n if ma.node_tree:\n for no in ma.node_tree.nodes:\n if no.type == 'GROUP':\n if not no.node_tree:\n self.__class__.count_groups += 1\n\n users_ngroup = []\n\n for ob in bpy.data.objects:\n if ob.material_slots and ma.name in ob.material_slots:\n users_ngroup.append(\"%s%s%s\" % (\n \"[L] \" if ob.library else \"\",\n \"[F] \" if ob.use_fake_user else \"\",\n ob.name))\n\n missing_groups.append(\"MA: %s%s%s [%s]%s%s%s\\n\" % (\n \"[L] \" if ma.library else \"\",\n \"[F] \" if ma.use_fake_user else \"\",\n ma.name, ma.users,\n \" *** No users *** \" if ma.users == 0 else \"\",\n \"\\nLI: %s\" % \n ma.library.filepath if ma.library else \"\",\n \"\\nOB: %s\" % ', '.join(users_ngroup) if users_ngroup else \"\"))\n\n if ma.library:\n libraries.append(ma.library.filepath)\n if no.type == 'TEX_IMAGE':\n\n outputs_empty = not no.outputs['Color'].is_linked and not no.outputs['Alpha'].is_linked\n\n if no.image:\n import os.path\n image_path_exists = os.path.exists(\n bpy.path.abspath(\n no.image.filepath, library=no.image.library))\n\n if outputs_empty or not \\\n no.image or not \\\n image_path_exists:\n\n users_images = []\n\n for ob in bpy.data.objects:\n if ob.material_slots and ma.name in ob.material_slots:\n users_images.append(\"%s%s%s\" % (\n \"[L] \" if ob.library else \"\",\n \"[F] \" if ob.use_fake_user else \"\",\n ob.name))\n\n if outputs_empty:\n self.__class__.count_image_node_unlinked += 1\n\n image_nodes_unlinked.append(\"%s%s%s%s%s [%s]%s%s%s%s%s\\n\" % (\n \"NO: %s\" % no.name,\n \"\\nMA: \",\n \"[L] \" if ma.library else \"\",\n \"[F] \" if ma.use_fake_user else \"\",\n ma.name, ma.users,\n \" *** No users *** \" if ma.users == 0 else \"\",\n \"\\nLI: %s\" % \n ma.library.filepath if ma.library else \"\",\n \"\\nIM: %s\" % no.image.name if no.image else \"\",\n \"\\nLI: %s\" % no.image.filepath if no.image and no.image.filepath else \"\",\n \"\\nOB: %s\" % ', '.join(users_images) if users_images else \"\"))\n \n\n if not no.image or not image_path_exists:\n self.__class__.count_images += 1\n\n missing_images.append(\"MA: %s%s%s [%s]%s%s%s%s%s\\n\" % (\n \"[L] \" if ma.library else \"\",\n \"[F] \" if ma.use_fake_user else \"\",\n ma.name, ma.users,\n \" *** No users *** \" if ma.users == 0 else \"\",\n \"\\nLI: %s\" % \n ma.library.filepath if ma.library else \"\",\n \"\\nIM: %s\" % no.image.name if no.image else \"\",\n \"\\nLI: %s\" % no.image.filepath if no.image and no.image.filepath else \"\",\n \"\\nOB: %s\" % ', '.join(users_images) if users_images else \"\"))\n\n if ma.library:\n libraries.append(ma.library.filepath)\n\n # Remove duplicates and sort\n missing_groups = sorted(list(set(missing_groups)))\n missing_images = sorted(list(set(missing_images)))\n image_nodes_unlinked = sorted(list(set(image_nodes_unlinked)))\n libraries = sorted(list(set(libraries)))\n\n print(\"\\n\\n== %s missing image %s, %s missing node %s and %s image %s unlinked ==\" %\n (\"No\" if self.__class__.count_images == 0 else str(self.__class__.count_images),\n \"node\" if self.__class__.count_images == 1 else \"nodes\",\n \"no\" if self.__class__.count_groups == 0 else str(self.__class__.count_groups),\n \"group\" if self.__class__.count_groups == 1 else \"groups\",\n \"no\" if self.__class__.count_image_node_unlinked == 0 else str(self.__class__.count_image_node_unlinked),\n \"node\" if self.__class__.count_groups == 1 else \"nodes\"))\n\n # List Missing Node Groups\n if missing_groups:\n print(\"\\n* Missing Node Group Links\\n\")\n for mig in missing_groups:\n print(mig)\n\n # List Missing Image Nodes\n if missing_images:\n print(\"\\n* Missing Image Nodes Link\\n\")\n\n for mii in missing_images:\n print(mii)\n\n # List Image Nodes with its outputs unlinked\n if image_nodes_unlinked:\n print(\"\\n* Image Nodes Unlinked\\n\")\n\n for nou in image_nodes_unlinked:\n print(nou)\n\n if missing_groups or \\\n missing_images or \\\n image_nodes_unlinked:\n if libraries:\n print(\"\\nThat's bad, run check on %s:\" % (\n \"this library\" if len(libraries) == 1 else \"these libraries\"))\n for li in libraries:\n print(li)\n else:\n self.report({\"INFO\"}, \"Yay! No missing node links\") \n\n print(\"\\n\")\n\n if missing_groups and missing_images:\n self.report({\"WARNING\"}, \"%d missing image %s and %d missing node %s found\" %\n (self.__class__.count_images, \"node\" if self.__class__.count_images == 1 else \"nodes\",\n self.__class__.count_groups, \"group\" if self.__class__.count_groups == 1 else \"groups\"))\n\n return{'FINISHED'}\n\nclass AMTH_SCENE_OT_list_missing_material_slots(Operator):\n '''List objects with empty material slots'''\n bl_idname = \"scene.list_missing_material_slots\"\n bl_label = \"List Empty Material Slots\"\n\n objects = []\n libraries = []\n\n def execute(self, context):\n self.__class__.objects = []\n self.__class__.libraries = []\n\n for ob in bpy.data.objects:\n for ma in ob.material_slots:\n if not ma.material:\n self.__class__.objects.append('%s%s' % (\n '[L] ' if ob.library else '',\n ob.name))\n if ob.library:\n self.__class__.libraries.append(ob.library.filepath)\n\n self.__class__.objects = sorted(list(set(self.__class__.objects)))\n self.__class__.libraries = sorted(list(set(self.__class__.libraries)))\n\n if len(self.__class__.objects) == 0:\n self.report({\"INFO\"}, \"No objects with empty material slots found\")\n else:\n print(\"\\n* A total of %d %s with empty material slots was found \\n\" % (\n len(self.__class__.objects),\n \"object\" if len(self.__class__.objects) == 1 else \"objects\"))\n\n count = 0\n count_lib = 0\n\n for obs in self.__class__.objects:\n print('%02d. %s' % (\n count+1, self.__class__.objects[count]))\n count += 1\n\n if self.__class__.libraries:\n print(\"\\n\\n* Check %s:\\n\" % \n (\"this library\" if len(self.__class__.libraries) == 1\n else \"these libraries\"))\n\n for libs in self.__class__.libraries:\n print('%02d. %s' % (\n count_lib+1, self.__class__.libraries[count_lib]))\n count_lib += 1\n print(\"\\n\")\n\n return{'FINISHED'}\n\nclass AMTH_SCENE_OT_list_missing_material_slots_clear(Operator):\n \"\"\"Clear the list below\"\"\"\n bl_idname = \"scene.list_missing_material_slots_clear\"\n bl_label = \"Clear Empty Material Slots List\"\n \n def execute(self, context):\n AMTH_SCENE_OT_list_missing_material_slots.objects[:] = []\n print(\"* Cleared Empty Material Slots List\")\n return {'FINISHED'}\n\nclass AMTH_SCENE_OT_blender_instance_open(Operator):\n '''Open in a new Blender instance'''\n bl_idname = \"scene.blender_instance_open\"\n bl_label = \"Open Blender Instance\"\n filepath = bpy.props.StringProperty()\n\n def execute(self, context):\n if self.filepath:\n import os.path\n filepath = os.path.normpath(bpy.path.abspath(self.filepath))\n\n import subprocess\n try:\n subprocess.Popen([bpy.app.binary_path, filepath])\n except:\n print(\"Error on the new Blender instance\")\n import traceback\n traceback.print_exc()\n\n return{'FINISHED'}\n\nclass AMTH_SCENE_PT_scene_debug(Panel):\n '''Scene Debug'''\n bl_label = 'Scene Debug'\n bl_space_type = \"PROPERTIES\"\n bl_region_type = \"WINDOW\"\n bl_context = \"scene\"\n\n def draw_header(self, context):\n layout = self.layout\n layout.label(text=\"\", icon=\"RADIO\")\n\n def draw(self, context):\n layout = self.layout\n scene = context.scene\n objects = bpy.data.objects\n ob_act = context.active_object\n images = bpy.data.images\n lamps = bpy.data.lamps\n images_missing = []\n list_missing_images = scene.amaranth_debug_scene_list_missing_images\n materials = AMTH_SCENE_OT_cycles_shader_list_nodes.materials\n materials_count = len(AMTH_SCENE_OT_cycles_shader_list_nodes.materials)\n missing_material_slots_obs = AMTH_SCENE_OT_list_missing_material_slots.objects\n missing_material_slots_count = len(AMTH_SCENE_OT_list_missing_material_slots.objects)\n missing_material_slots_lib = AMTH_SCENE_OT_list_missing_material_slots.libraries\n engine = scene.render.engine\n\n # List Missing Images\n box = layout.box()\n row = box.row(align=True)\n split = row.split()\n col = split.column()\n\n if images:\n import os.path\n\n for im in images:\n if im.type not in ['UV_TEST', 'RENDER_RESULT', 'COMPOSITING']: \n if not os.path.exists(bpy.path.abspath(im.filepath, library=im.library)):\n images_missing.append([\"%s%s [%s]%s\" % (\n '[L] ' if im.library else '',\n im.name, im.users,\n ' [F]' if im.use_fake_user else ''),\n im.filepath if im.filepath else 'No Filepath',\n im.library.filepath if im.library else ''])\n\n if images_missing:\n row = col.row(align=True)\n row.alignment = 'LEFT'\n row.prop(scene, 'amaranth_debug_scene_list_missing_images',\n icon=\"%s\" % 'TRIA_DOWN' if list_missing_images else 'TRIA_RIGHT',\n emboss=False)\n\n split = split.split()\n col = split.column()\n\n col.label(text=\"%s missing %s\" % (\n str(len(images_missing)),\n 'image' if len(images_missing) == 1 else 'images'),\n icon=\"ERROR\")\n\n if list_missing_images:\n col = box.column(align=True)\n for mis in images_missing:\n col.label(text=mis[0],\n icon=\"IMAGE_DATA\")\n col.label(text=mis[1], icon=\"LIBRARY_DATA_DIRECT\")\n if mis[2]:\n row = col.row(align=True)\n row.alignment = \"LEFT\"\n row.operator(AMTH_SCENE_OT_blender_instance_open.bl_idname,\n text=mis[2],\n icon=\"LINK_BLEND\",\n emboss=False).filepath=mis[2]\n col.separator()\n else:\n row = col.row(align=True)\n row.alignment = 'LEFT'\n row.label(text=\"Great! No missing images\", icon=\"RIGHTARROW_THIN\")\n\n split = split.split()\n col = split.column()\n\n col.label(text=\"%s %s loading correctly\" % (\n str(len(images)),\n 'image' if len(images) == 1 else 'images'),\n icon=\"IMAGE_DATA\")\n else:\n row = col.row(align=True)\n row.alignment = 'LEFT'\n row.label(text=\"No images loaded yet\", icon=\"RIGHTARROW_THIN\")\n\n # List Cycles Materials by Shader\n if cycles_exists and engine == 'CYCLES':\n box = layout.box()\n split = box.split()\n col = split.column(align=True)\n col.prop(scene, 'amaranth_cycles_node_types',\n icon=\"MATERIAL\")\n\n row = split.row(align=True)\n row.operator(AMTH_SCENE_OT_cycles_shader_list_nodes.bl_idname,\n icon=\"SORTSIZE\",\n text=\"List Materials Using Shader\")\n if materials_count != 0: \n row.operator(AMTH_SCENE_OT_cycles_shader_list_nodes_clear.bl_idname,\n icon=\"X\", text=\"\")\n col.separator()\n\n try:\n materials\n except NameError:\n pass\n else:\n if materials_count != 0: \n col = box.column(align=True)\n count = 0\n col.label(text=\"%s %s found\" % (materials_count,\n 'material' if materials_count == 1 else 'materials'), icon=\"INFO\")\n for mat in materials:\n count += 1\n col.label(text='%s' % (materials[count-1]), icon=\"MATERIAL\")\n\n # List Missing Node Trees\n box = layout.box()\n row = box.row(align=True)\n split = row.split()\n col = split.column(align=True)\n\n split = col.split()\n split.label(text=\"Node Links\")\n split.operator(AMTH_SCENE_OT_list_missing_node_links.bl_idname,\n icon=\"NODETREE\")\n\n if AMTH_SCENE_OT_list_missing_node_links.count_groups != 0 or \\\n AMTH_SCENE_OT_list_missing_node_links.count_images != 0 or \\\n AMTH_SCENE_OT_list_missing_node_links.count_image_node_unlinked != 0:\n col.label(text=\"Warning! Check Console\", icon=\"ERROR\")\n\n if AMTH_SCENE_OT_list_missing_node_links.count_groups != 0:\n col.label(text=\"%s\" % (\"%s node %s missing link\" % (\n str(AMTH_SCENE_OT_list_missing_node_links.count_groups),\n \"group\" if AMTH_SCENE_OT_list_missing_node_links.count_groups == 1 else \"groups\")),\n icon=\"NODETREE\")\n if AMTH_SCENE_OT_list_missing_node_links.count_images != 0:\n col.label(text=\"%s\" % (\"%s image %s missing link\" % (\n str(AMTH_SCENE_OT_list_missing_node_links.count_images),\n \"node\" if AMTH_SCENE_OT_list_missing_node_links.count_images == 1 else \"nodes\")),\n icon=\"IMAGE_DATA\")\n\n if AMTH_SCENE_OT_list_missing_node_links.count_image_node_unlinked != 0:\n col.label(text=\"%s\" % (\"%s image %s with no output conected\" % (\n str(AMTH_SCENE_OT_list_missing_node_links.count_image_node_unlinked),\n \"node\" if AMTH_SCENE_OT_list_missing_node_links.count_image_node_unlinked == 1 else \"nodes\")),\n icon=\"NODE\")\n\n # List Empty Materials Slots\n box = layout.box()\n split = box.split()\n col = split.column(align=True)\n col.label(text=\"Material Slots\")\n\n row = split.row(align=True)\n row.operator(AMTH_SCENE_OT_list_missing_material_slots.bl_idname,\n icon=\"MATERIAL\",\n text=\"List Empty Materials Slots\")\n if missing_material_slots_count != 0: \n row.operator(AMTH_SCENE_OT_list_missing_material_slots_clear.bl_idname,\n icon=\"X\", text=\"\")\n col.separator()\n\n try:\n missing_material_slots_obs\n except NameError:\n pass\n else:\n if missing_material_slots_count != 0: \n col = box.column(align=True)\n count = 0\n count_lib = 0\n col.label(text=\"%s %s with empty material slots found\" % (\n missing_material_slots_count,\n 'object' if missing_material_slots_count == 1 else 'objects'),\n icon=\"INFO\")\n\n for obs in missing_material_slots_obs:\n count += 1\n\n row = col.row()\n row.alignment = 'LEFT'\n row.label(text='%s' % missing_material_slots_obs[count-1],\n icon=\"OBJECT_DATA\")\n\n if missing_material_slots_lib:\n col.separator()\n col.label(\"Check %s:\" % (\n \"this library\" if\n len(missing_material_slots_lib) == 1\n else \"these libraries\"))\n \n for libs in missing_material_slots_lib:\n count_lib += 1\n row = col.row(align=True)\n row.alignment = \"LEFT\"\n row.operator(AMTH_SCENE_OT_blender_instance_open.bl_idname,\n text=missing_material_slots_lib[count_lib-1],\n icon=\"LINK_BLEND\",\n emboss=False).filepath=missing_material_slots_lib[count_lib-1]\n\n# // FEATURE: Scene Debug","sub_path":"blendswap_admin_tools.py","file_name":"blendswap_admin_tools.py","file_ext":"py","file_size_in_byte":30576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"526033629","text":"import httpx\nfrom httpx import Timeout\n\n\nasync def async_get(url: str, return_json: bool = True):\n async with httpx.AsyncClient(timeout=Timeout(timeout=10.0)) as client:\n raw_response = await client.get(url)\n\n if return_json:\n return raw_response.json()\n else:\n return raw_response\n","sub_path":"dnd_discord_bot/requests_handler/async_requests.py","file_name":"async_requests.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"407347702","text":"# -*- coding: utf-8 -*-\n\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport os\nimport sys\nimport urllib.parse\nimport requests\nimport psycopg2\nfrom argparse import ArgumentParser\n\nfrom flask import Flask, request, abort\nfrom linebot import (\n LineBotApi, WebhookHandler\n)\nfrom linebot.exceptions import (\n InvalidSignatureError\n)\nfrom linebot.models import (\n MessageEvent, TextMessage, TextSendMessage,\n)\n\napp = Flask(__name__)\n\n# get channel_secret and channel_access_token from your environment variable\nchannel_secret = os.getenv('LINE_CHANNEL_SECRET', None)\nchannel_access_token = os.getenv('LINE_CHANNEL_ACCESS_TOKEN', None)\ndb_url = os.getenv('DATABASE_URL', None)\n\nif channel_secret is None: \n print('Specify LINE_CH name in postgresql as environment variable.')\n sys.exit(1)\nif channel_access_token is None:\n print('Specify LINE_CHANNEL_ACCESS_TOKEN as environment variable.')\n sys.exit(1)\n\nline_bot_api = LineBotApi(channel_access_token)\nhandler = WebhookHandler(channel_secret)\n\ndef calculate(expr):\n expr=urllib.parse.quote(expr)\n link = \"http://api.mathjs.org/v4/?expr=\" + expr\n response = requests.get(link)\n return response\n\n@app.route(\"/callback\", methods=['POST'])\ndef callback():\n # get X-Line-Signature header value\n signature = request.headers['X-Line-Signature']\n\n # get request body as text\n body = request.get_data(as_text=True)\n sys.stdout.flush()\n app.logger.info(\"Request body: \" + body)\n # handle webhook body\n try:\n handler.handle(body, signature)\n except InvalidSignatureError:\n abort(400)\n\n return 'OK'\n\n\n@handler.add(MessageEvent, message=TextMessage)\ndef message_text(event):\n if (event.message.text==\"/history\"):\n uid = str(event.source.user_id)\n conn = psycopg2.connect(db_url, sslmode='require') \n cur = conn.cursor() \n cur.execute(\"select * from calc_history where uid = '%s';\" % (uid))\n results = cur.fetchall()\n content =\"\"\n if (len(results)>0):\n for i in range (0,len(results)):\n content += results[i][0] + results[i][1] + \"\\n\"\n else : \n content = \"No calculation before\"\n conn.commit() \n conn.close()\n else:\n content = calculate(event.message.text).text\n uid = str(event.source.user_id)\n conn = psycopg2.connect(db_url, sslmode='require') \n cur = conn.cursor() \n cur.execute(\"insert into calc_history (uid,expression,result) values ('%s','%s','%s');\" %(uid,event.message.text,content))\n conn.commit() \n conn.close()\n line_bot_api.reply_message(\n event.reply_token,\n TextSendMessage(text=content)\n )\n\n\nif __name__ == \"__main__\":\n arg_parser = ArgumentParser(\n usage='Usage: python ' + __file__ + ' [--port ] [--help]'\n )\n arg_parser.add_argument('-p', '--port', default=8000, help='port')\n arg_parser.add_argument('-d', '--debug', default=False, help='debug')\n options = arg_parser.parse_args()\n\n app.run(debug=options.debug, port=options.port)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"604739812","text":"#!/usr/bin/python\n# coding=utf-8\nfrom lxml import etree\nfrom os.path import join,dirname\n\nDATA_DIR='xml-data'\n\nchina_regions = etree.parse(join(dirname(__file__), DATA_DIR, 'china_regions.xml'))\nprovince_list = [p.attrib for p in china_regions.xpath('/*/*/*')]\n\ndef node_of(region_code):\n regions = china_regions.xpath('//*[@region-code='+region_code+']')\n if len(regions):\n return regions[0]\n else:\n return None\n\ndef attrs_of(region_code):\n elem = node_of(region_code)\n if elem:\n return elem.attrib\n else:\n return None\n\ndef children_of(region_code):\n ret = []\n elem = node_of(region_code)\n if elem:\n for child in elem.getchildren():\n ret.append(child.attrib)\n return ret\n \ndef city_by_name(name):\n elem = china_regions.xpath('//*[@name=\"'+name+'\"]')[0]\n return elem.attrib\n\n# for p in children_of('12'):\n # print p['name'],p['id'],p['layer']\n","sub_path":"query_json/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"597122698","text":"# -*- coding: utf-8 -*-\n\n###用户玩玩家分享的活动信息\n\n####\n###userId, activeId, shareCode, questionIds, playQuestionId, result\n\nclass UserPlayShareGameInfo:\n\tdef __init__(self, id, userId, activeId, shareCode, questionIds, playQuestionId, result):\n\t\tself.id = id\n\t\tself.userId = userId\n\t\tself.activeId = activeId\n\t\tself.shareCode =shareCode\n\t\tself.questionIds = questionIds\n\t\tself.playQuestionId = playQuestionId\n\t\tself.result = result\n\n\tdef __str__(self):\n\t\treturn \"UserPlayShareInfo: userId=\" + self.userId \\\n\t\t\t+\", activeId=\" + self.activeId \\\n\t\t\t+\", questionIds=\" + self.questionIds \\\n\t\t\t+\", playQuestionId=\" + self.playQuestionId \\\n\t\t\t+\", result=\" + self.result","sub_path":"h5game_backend/models/UserPlayShareGameInfo.py","file_name":"UserPlayShareGameInfo.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"651698135","text":"from flask import Flask, render_template\nfrom flask_socketio import SocketIO\nfrom flask_socketio import send, emit\nimport time\n\nfrom multiprocessing import Process\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'secret!'\nsocketio = SocketIO(app)\n\n\ndef some_function(data):\n socketio.emit('test', {'data': data})\n\n@socketio.on('test')\ndef handle_my_custom_event(json):\n emit('test', str(json)+\"中文我收到了\")\n for i in range(5):\n p = Process(target=some_function, args=(i,))\n p.start()\n #some_function(i)\n time.sleep(1)\n #socketio.sleep(0.1)\n\nif __name__ == '__main__':\n socketio.run(app,debug=True)\n","sub_path":"others/tyone.py","file_name":"tyone.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"465251680","text":"import RPi.GPIO as GPIO\nimport time\n\n\n# LED pins\nled0 = 7\nled1 = 11\n\n\nleds = [led0, led1]\n\n# PIR sensor input\npir0 = 13\n\n\ndef setup():\n\n # set GPIO numbering mode\n GPIO.setmode(GPIO.BOARD)\n\n # For each LED in our list of LEDs\n for ledX in leds:\n\n GPIO.setup(ledX, GPIO.OUT)\n\n # led0 is clear led, will be on when no motion is detected.\n GPIO.output(led0, GPIO.HIGH)\n\n # led1 is motion led, will be on when motion is detected.\n GPIO.output(led1, GPIO.LOW)\n\n # setup the input\n GPIO.setup(pir0, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n\n\ndef loop():\n # Loop forever\n while True:\n # Set on to false\n on = False\n\n # Wait for PIR to change state\n # (Detecting motion or not detecting motion)\n GPIO.wait_for_edge(pir0, GPIO.BOTH,timeout=500)\n\n # if edge, and motion has been detected before, then edge is falling,\n # (aka no more motion)\n if on:\n # Turn off motion light, turn on clear\n GPIO.output(led0, GPIO.HIGH)\n GPIO.output(led1, GPIO.LOW)\n # set on to False\n on = False\n # if edge, and no motion has been detected before, then edge is rising,\n # (aka new motion)\n else:\n # Turn on motion light, turn off clear light\n GPIO.output(led1, GPIO.HIGH)\n GPIO.output(led0, GPIO.LOW)\n # set on to True\n on = True\n\n print(\"Lights are on\") if on else print(\"Lights are off\")\n\n\ndef destroy():\n\n for ledX in leds:\n # Turn off all leds\n GPIO.output(ledX, GPIO.LOW)\n\n GPIO.cleanup()\n\n\nif __name__ == '__main__':\n setup()\n try:\n loop()\n # When ctrl+c is pressed, destroy() is called to cleanup\n except KeyboardInterrupt:\n destroy()\n","sub_path":"5_moreInput.py","file_name":"5_moreInput.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"280581790","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom angular_fingerprintFeature_m import Angular_Fingerprint\nfrom gaussComparator import gaussComparator\nfrom krr_class_new import krr_class\n\nfrom ase import Atoms\nfrom ase.io import read, write\nfrom ase.visualize import view\nimport time\natoms = read('fromThomas/data_SnO.traj', index=':')\nNdata = len(atoms)\n\nE = np.array([a.get_potential_energy() for a in atoms])\n\nRc1 = 5\nRc2 = 5\nbinwidth1 = 0.1\nNbins2 = 30\nsigma1 = 0.4\ngamma = 3\n\n\ndef FVU_train(fingerprints, E, krr_model, Npoints, Npermutations):\n # Perform training with cross-validation\n np.random.seed(101)\n N_array = np.logspace(1, np.log10(Ndata), Npoints).astype(int)\n FVU = np.zeros((Npermutations, Npoints))\n GSkwargs = {'reg': [1e-5], 'sigma': np.logspace(0,2,10)}\n\n for k in range(Npermutations):\n print('training: {}/{}'.format(k, Npermutations))\n permutation = np.random.permutation(Ndata)\n E = E[permutation]\n fingerprints = fingerprints[permutation]\n\n for i, N in enumerate(N_array):\n Esub = E[:N]\n fingerprints_sub = fingerprints[:N]\n \n FVU_temp, params = krr.train(Esub, featureMat=fingerprints_sub, add_new_data=False, k=10, **GSkwargs)\n FVU[k, i] += FVU_temp\n FVU_mean = FVU.mean(axis=0)\n return FVU_mean[-1]\n\nNeta = 15\neta_array = np.linspace(1, 30, Neta).astype(int)\nresults = []\n\nplt.figure(1)\nfor name in ['', '_r_fcut', '_fcut']:\n for sigma2 in [0.05, 0.1, 0.2]:\n filename = 'SnO_features/SnO_radialAngFeatures_gauss{7:s}_Rc1_2_{0:d}_{1:d}_binwidth1_{2:.1f}_Nbins2_{3:d}_sigma1_2_{4:.1f}_{5:.2f}_gamma_{6:d}.txt'.format(Rc1, Rc2, binwidth1, Nbins2, sigma1, sigma2, gamma, name)\n fingerprints = np.loadtxt(filename, delimiter='\\t')\n\n print(sigma2)\n # Set up KRR-model\n featureCalculator = Angular_Fingerprint(atoms[0], Rc1=Rc1, Rc2=Rc2, binwidth1=binwidth1, Nbins2=Nbins2, sigma1=sigma1, sigma2=sigma2, gamma=gamma, use_angular=True)\n comparator = gaussComparator()\n krr = krr_class(comparator=comparator, featureCalculator=featureCalculator)\n \n MAEcurve = np.zeros(Neta)\n for i, eta in enumerate(eta_array):\n Nradial = int(Rc1/binwidth1)\n fingerprints_eta = fingerprints.copy()\n fingerprints_eta[:, 3*Nradial:] *= eta\n MAEcurve[i] = FVU_train(fingerprints_eta, E, krr, Npoints=10, Npermutations=5)\n print(MAEcurve)\n plt.plot(eta_array, MAEcurve, label='{0:s} sigmaAng={1:.2f}'.format(name, sigma2))\n results.append(MAEcurve)\n\nresults = np.array(results)\nnp.savetxt('resultsAngFing_paramCurves2.txt', results, delimiter='\\t')\n\nplt.legend()\nplt.show()\n","sub_path":"krrThomas/AngFing_ParameterCurves.py","file_name":"AngFing_ParameterCurves.py","file_ext":"py","file_size_in_byte":2737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"236048167","text":"#! python3\n\nimport pyinputplus as pyip\nfrom prices import calculateTotalPrice\n\n\nselectedItems = []\n\nprint(\"Welcome! Please tell me what sandwich you would like\")\n\navailableBreadTypes = (\"wheat\", \"white\", \"sourdough\")\nselectedBreadType = pyip.inputMenu(\n availableBreadTypes, prompt=\"What type of bread would you like?: \\n\"\n)\nselectedItems.append(selectedBreadType)\n\navailableProteinTypes = (\"chicken\", \"turkey\", \"ham\", \"tofu\")\nselectedProteinType = pyip.inputMenu(\n availableProteinTypes, prompt=\"What type of protein would you like?: \\n\"\n)\nselectedItems.append(selectedProteinType)\n\nwithCheese = pyip.inputYesNo(prompt=\"Would you like cheese with that? \\n\") == \"yes\"\n\navailableCheeses = (\"cheddar\", \"Swiss\", \"mozzarella\")\nselectedCheese = None\nif withCheese:\n selectedCheese = pyip.inputMenu(\n availableCheeses, prompt=\"What type of cheese would you like?: \\n\"\n )\n selectedItems.append(selectedCheese)\n\navailableToppings = (\"mayo\", \"mustard\", \"lettuce\", \"tomato\")\nselectedToppings = []\nfor topping in availableToppings:\n withTopping = (\n pyip.inputYesNo(prompt=f\"Would you like {topping} with that?: \\n\") == \"yes\"\n )\n if withTopping:\n selectedToppings.append(topping)\n selectedItems.append(topping)\n\namountOfSandwiches = pyip.inputNum(\n prompt=\"How many sandwiches would you like? \\n\", min=1\n)\n\ntotalPrice = calculateTotalPrice(selectedItems, amountOfSandwiches)\nformattedItems = \", \".join(selectedItems)\nresult = f\"\"\"\nGreat! {amountOfSandwiches} sandwich(es) with {formattedItems} coming right up.\nThat will cost you ${totalPrice}.\n\"\"\"\nprint(result)\n","sub_path":"sandwich_maker/sandwich_maker.py","file_name":"sandwich_maker.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"49620761","text":"# -*- coding: utf-8 -*-\n# file\n\nimport os\nfr = open('test.py')\nline = fr.readline()\nprint(line)\n# keyword\nimport keyword\nprint(keyword.kwlist)\n\n# condition\nif True:\n print(\"True\")\nelse:\n print(\"False\")\n\n# input\n#input(\"\\n\\n按下enter键后退出\")\n\n# loop\nimport sys\n#for i in (1, 2, 3, 4):\n#for i in [1, 2, 3, 4]:\nfor i in {1, 2, 3, 4}:\n print(i)\n\n\n# type isinstance\na, b, c, d = 20, 5.5, True, 4+3j\nprint(type(a), type(b), type(c), type(d))\nprint(isinstance(a, int))\nprint(isinstance(d, complex))\n\n# list\nlist = ['abcd', 686, 2.23, 'runoob', 70.2]\nprint(list)\nprint(list[0])\nprint(list[0:3])\nprint(list*2)\n\n# tuple\ntuple = ('abcd', 686, 2.23, 'runoob', 70.2)\nprint(tuple)\nprint(tuple*2)\n\n# set\nset = {'abcd', 686, 2.23, 'runoob', 70.2}\nprint(set)\nprint(set)\n\n# dict\ndict = {}\ndict['one'] = '1'\ndict[2] = 100\nprint(dict['one'])\n\n# id 获取地址\na = 2.23\nprint(id(a))\nb = 2.23\nif (a is b):\n print(\"a 和 b有相同的标识\")\nif (id(a) == id(b)):\n print(\"a 和 b有相同的标识\")\n\na, b = 0, 1\nwhile b < 10:\n print(b)\n a, b = b, a+b\n\n\nclass Employee:\n \"\"\"docstring for Employee\"\"\"\n empCount = 0\n def __init__(self, name, salary):\n super(Employee, self).__init__()\n self.name = name\n self.salary = salary\n Employee.empCount += 1\n\n def displayCount(self):\n print(\"Total Employee %d\" % Employee.empCount)\n\n def displayEmployee(self):\n print(\"Name: \", self.name, \", Salary: \", self.salary)\n\nclass Test:\n def prt(self):\n print(self)\n print(self.__class__)\n \nt = Test()\nt.prt()\n\n\"创建 Employee 类的第一个对象\"\nemp1 = Employee(\"Zara\", 2000)\n\"创建 Employee 类的第二个对象\"\nemp2 = Employee(\"Manni\", 5000)\nemp1.displayEmployee()\nemp2.displayEmployee()\nprint (\"Total Employee %d\" % Employee.empCount)\n\nif (hasattr(emp1, 'age')): # 如果存在 'age' 属性返回 True。\n print(getattr(emp1, 'age')) # 返回 'age' 属性的值\nelse:\n setattr(emp1, 'age', 8) # 添加属性 'age' 值为 8\n print(getattr(emp1, 'age'))\n delattr(emp1, 'age') # 删除属性 'age'\nprint (\"Employee.__doc__:\", Employee.__doc__)\nprint (\"Employee.__name__:\", Employee.__name__)\nprint (\"Employee.__module__:\", Employee.__module__)\nprint (\"Employee.__bases__:\", Employee.__bases__)\nprint (\"Employee.__dict__:\", Employee.__dict__)\n\n\na = 40 # 创建对象 <40>\nb = a # 增加引用, <40> 的计数\nc = [b] # 增加引用. <40> 的计数\n\ndel a # 减少引用 <40> 的计数\nb = 100 # 减少引用 <40> 的计数\nc[0] = -1 # 减少引用 <40> 的计数\n\nclass Point:\n def __init__(self, x = 0, y = 0):\n self.x = x\n self.y = y\n \n def __del__(self):\n class_name = self.__class__.__name__\n print(class_name, \"del\")\n\npt1 = Point()\npt2 = pt1\npt3 = pt1\nprint(id(pt1), id(pt2), id(pt3))\ndel pt1\ndel pt2\ndel pt3\n\nclass Parent: # 定义父类\n parentAttr = 100\n def __init__(self):\n print (\"调用父类构造函数\")\n \n def parentMethod(self):\n print ('调用父类方法')\n \n def setAttr(self, attr):\n Parent.parentAttr = attr\n \n def getAttr(self):\n print (\"父类属性 :\", Parent.parentAttr)\n \nclass Child(Parent): # 定义子类\n def __init__(self):\n super(Child, self).__init__()\n print (\"调用子类构造方法\")\n \n def childMethod(self):\n print ('调用子类方法 child method')\n \nc = Child() # 实例化子类\nc.childMethod() # 调用子类的方法\nc.parentMethod() # 调用父类方法\nc.setAttr(200) # 再次调用父类的方法\nc.getAttr() # 再次调用父类的方法\n\nclass Vector:\n def __init__(self, a, b):\n self.a = a\n self.b = b\n \n def __str__(self):\n return 'Vector (%d, %d)' % (self.a, self.b)\n \n def __add__(self,other):\n return Vector(self.a + other.a, self.b + other.b)\n \nv1 = Vector(2,10)\nv2 = Vector(5,-2)\nprint (v1 + v2)\n\nclass JustCounter:\n __secretCount = 0 # 私有变量\n publicCount = 0 # 公开变量\n \n def count(self):\n self.__secretCount += 1\n self.publicCount += 1\n print (self.__secretCount)\n \ncounter = JustCounter()\ncounter.count()\ncounter.count()\nprint (counter.publicCount)\n#print (counter.__secretCount) # 报错,实例不能访问私有变量\nprint (counter._JustCounter__secretCount)\n\n","sub_path":"Python/python.py","file_name":"python.py","file_ext":"py","file_size_in_byte":4349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"275595464","text":"\"\"\"\nCopyright (c) 2015 Red Hat, Inc\nAll rights reserved.\n\nThis software may be modified and distributed under the terms\nof the BSD license. See the LICENSE file for details.\n\"\"\"\nfrom __future__ import print_function, unicode_literals, absolute_import\n\nimport json\nimport logging\nimport os\nimport sys\nimport time\nfrom functools import wraps\n\nfrom .constants import SIMPLE_BUILD_TYPE, PROD_WITHOUT_KOJI_BUILD_TYPE, PROD_WITH_SECRET_BUILD_TYPE\nfrom osbs.build.build_request import BuildManager\nfrom osbs.build.build_response import BuildResponse\nfrom osbs.build.pod_response import PodResponse\nfrom osbs.constants import DEFAULT_NAMESPACE, PROD_BUILD_TYPE\nfrom osbs.core import Openshift\nfrom osbs.exceptions import OsbsException, OsbsValidationException\n# import utils in this way, so that we can mock standalone functions with flexmock\nfrom osbs import utils\n\n\n# Decorator for API methods.\ndef osbsapi(func):\n @wraps(func)\n def catch_exceptions(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except OsbsException:\n # Re-raise OsbsExceptions\n raise\n except Exception as ex:\n # Convert anything else to OsbsException\n\n # Python 3 has implicit exception chaining and enhanced\n # reporting, so you get the original traceback as well as\n # the one originating here.\n # For Python 2, let's do that explicitly.\n raise OsbsException(cause=ex, traceback=sys.exc_info()[2])\n\n return catch_exceptions\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass OSBS(object):\n \"\"\"\n Note: all API methods return osbs.http.Response object. This is, due to historical\n reasons, untrue for list_builds and get_user, which return list of BuildResponse objects\n and dict respectively.\n \"\"\"\n @osbsapi\n def __init__(self, openshift_configuration, build_configuration):\n \"\"\" \"\"\"\n self.os_conf = openshift_configuration\n self.build_conf = build_configuration\n self.os = Openshift(openshift_api_url=self.os_conf.get_openshift_api_uri(),\n openshift_api_version=self.os_conf.get_openshift_api_version(),\n openshift_oauth_url=self.os_conf.get_openshift_oauth_api_uri(),\n k8s_api_url=self.os_conf.get_k8s_api_uri(),\n verbose=self.os_conf.get_verbosity(),\n username=self.os_conf.get_username(),\n password=self.os_conf.get_password(),\n use_kerberos=self.os_conf.get_use_kerberos(),\n client_cert=self.os_conf.get_client_cert(),\n client_key=self.os_conf.get_client_key(),\n kerberos_keytab=self.os_conf.get_kerberos_keytab(),\n kerberos_principal=self.os_conf.get_kerberos_principal(),\n kerberos_ccache=self.os_conf.get_kerberos_ccache(),\n use_auth=self.os_conf.get_use_auth(),\n verify_ssl=self.os_conf.get_verify_ssl())\n self._bm = None\n\n # some calls might not need build manager so let's make it lazy\n @property\n def bm(self):\n if self._bm is None:\n self._bm = BuildManager(build_json_store=self.os_conf.get_build_json_store())\n return self._bm\n\n @osbsapi\n def list_builds(self, namespace=DEFAULT_NAMESPACE):\n response = self.os.list_builds(namespace=namespace)\n serialized_response = response.json()\n build_list = []\n for build in serialized_response[\"items\"]:\n build_list.append(BuildResponse(None, build))\n return build_list\n\n @osbsapi\n def get_build(self, build_id, namespace=DEFAULT_NAMESPACE):\n response = self.os.get_build(build_id, namespace=namespace)\n build_response = BuildResponse(response)\n return build_response\n\n @osbsapi\n def cancel_build(self, build_id, namespace=DEFAULT_NAMESPACE):\n response = self.os.cancel_build(build_id, namespace=namespace)\n build_response = BuildResponse(response)\n return build_response\n\n @osbsapi\n def get_pod_for_build(self, build_id, namespace=DEFAULT_NAMESPACE):\n \"\"\"\n :return: PodResponse object for pod relating to the build\n \"\"\"\n pods = self.os.list_pods(label='openshift.io/build.name=%s' % build_id,\n namespace=namespace)\n serialized_response = pods.json()\n pod_list = [PodResponse(pod) for pod in serialized_response[\"items\"]]\n if not pod_list:\n raise OsbsException(\"No pod for build\")\n elif len(pod_list) != 1:\n raise OsbsException(\"Only one pod expected but %d returned\",\n len(pod_list))\n return pod_list[0]\n\n @osbsapi\n def get_build_request(self, build_type=None):\n \"\"\"\n return instance of BuildRequest according to specified build type\n\n :param build_type: str, name of build type\n :return: instance of BuildRequest\n \"\"\"\n build_type = build_type or self.build_conf.get_build_type()\n build_request = self.bm.get_build_request_by_type(build_type=build_type)\n\n # Apply configured resource limits.\n cpu_limit = self.build_conf.get_cpu_limit()\n memory_limit = self.build_conf.get_memory_limit()\n storage_limit = self.build_conf.get_storage_limit()\n if (cpu_limit is not None or\n memory_limit is not None or\n storage_limit is not None):\n build_request.set_resource_limits(cpu=cpu_limit,\n memory=memory_limit,\n storage=storage_limit)\n\n return build_request\n\n @osbsapi\n def create_build_from_buildrequest(self, build_request, namespace=DEFAULT_NAMESPACE):\n \"\"\"\n render provided build_request and submit build from it\n\n :param build_request: instance of build.build_request.BuildRequest\n :param namespace: str, place/context where the build should be executed\n :return: instance of build.build_response.BuildResponse\n \"\"\"\n build_request.set_openshift_required_version(self.os_conf.get_openshift_required_version())\n build = build_request.render()\n response = self.os.create_build(json.dumps(build), namespace=namespace)\n build_response = BuildResponse(response)\n return build_response\n\n def _get_running_builds_for_build_config(self, build_config_id, namespace=DEFAULT_NAMESPACE):\n all_builds_for_bc = self.os.list_builds(\n build_config_id=build_config_id,\n namespace=namespace).json()['items']\n running = []\n for b in all_builds_for_bc:\n br = BuildResponse(request=None, build_json=b)\n if br.is_pending() or br.is_running():\n running.append(br)\n return running\n\n def _poll_for_builds_from_buildconfig(self, build_config_id, namespace=DEFAULT_NAMESPACE):\n # try polling for 60 seconds and then fail if build doesn't appear\n deadline = int(time.time()) + 60\n while int(time.time()) < deadline:\n logger.debug('polling for build from BuildConfig \"%s\"' % build_config_id)\n builds = self._get_running_builds_for_build_config(build_config_id, namespace)\n if len(builds) > 0:\n return builds\n # wait for 5 seconds before trying again\n time.sleep(5)\n\n raise OsbsException('Waited for new build from \"%s\", but none was automatically created' %\n build_config_id)\n\n def _panic_msg_for_more_running_builds(self, build_config_name, builds):\n # this should never happen, but if it does, we want to know all the builds\n # that were running at the time\n builds = ', '.join(['%s: %s' % (b.get_build_name(), b.status) for b in builds])\n msg = 'Multiple builds for %s running, can\\'t proceed: %s' % \\\n (build_config_name, builds)\n return msg\n\n def _create_build_config_and_build(self, build_request, namespace):\n # TODO: test this method more thoroughly\n build_json = build_request.render()\n apiVersion = build_json['apiVersion']\n if apiVersion != self.os_conf.get_openshift_api_version():\n raise OsbsValidationException(\"BuildConfig template has incorrect apiVersion (%s)\" %\n apiVersion)\n\n build_config_name = build_json['metadata']['name']\n\n # check if a build already exists for this config; if so then raise\n running_builds = self._get_running_builds_for_build_config(build_config_name, namespace)\n rb_len = len(running_builds)\n if rb_len > 0:\n if rb_len == 1:\n rb = running_builds[0]\n msg = 'Build %s for %s in state %s, can\\'t proceed.' % \\\n (rb.get_build_name(), build_config_name, rb.status)\n else:\n msg = self._panic_msg_for_more_running_builds(build_config_name, running_builds)\n raise OsbsException(msg)\n\n existing_bc = None\n try:\n # see if there's already a build config\n existing_bc = self.os.get_build_config(build_config_name)\n except OsbsException:\n pass # doesn't exist => do nothing\n\n build = None\n if existing_bc is not None:\n utils.deep_update(existing_bc, build_json)\n logger.debug('build config for %s already exists, updating...', build_config_name)\n self.os.update_build_config(build_config_name, json.dumps(existing_bc), namespace)\n else:\n # if it doesn't exist, then create it\n logger.debug('build config for %s doesn\\'t exist, creating...', build_config_name)\n self.os.create_build_config(json.dumps(build_json), namespace=namespace)\n # if there's an \"ImageChangeTrigger\" on the BuildConfig and \"From\" is of type\n # \"ImageStreamTag\", the build will be scheduled automatically\n # see https://github.com/projectatomic/osbs-client/issues/205\n if build_request.is_auto_instantiated():\n builds = self._poll_for_builds_from_buildconfig(build_config_name, namespace)\n if len(builds) > 0:\n if len(builds) > 1:\n raise OsbsException(\n self._panic_msg_for_more_running_builds(build_config_name, builds))\n else:\n build = builds[0].request\n if build is None:\n build = self.os.start_build(build_config_name, namespace=namespace)\n return build\n\n @osbsapi\n def create_prod_build(self, git_uri, git_ref, git_branch, user, component, target,\n architecture, yum_repourls=None, git_push_url=None,\n namespace=DEFAULT_NAMESPACE, **kwargs):\n df_parser = utils.get_df_parser(git_uri, git_ref, git_branch)\n build_request = self.get_build_request(PROD_BUILD_TYPE)\n build_request.set_params(\n git_uri=git_uri,\n git_ref=git_ref,\n git_branch=git_branch,\n user=user,\n component=component,\n base_image=df_parser.baseimage,\n name_label=df_parser.labels['Name'],\n registry_uri=self.build_conf.get_registry_uri(),\n openshift_uri=self.os_conf.get_openshift_base_uri(),\n kojiroot=self.build_conf.get_kojiroot(),\n kojihub=self.build_conf.get_kojihub(),\n sources_command=self.build_conf.get_sources_command(),\n koji_target=target,\n architecture=architecture,\n vendor=self.build_conf.get_vendor(),\n build_host=self.build_conf.get_build_host(),\n authoritative_registry=self.build_conf.get_authoritative_registry(),\n yum_repourls=yum_repourls,\n pulp_secret=self.build_conf.get_pulp_secret(),\n use_auth=self.build_conf.get_builder_use_auth(),\n pulp_registry=self.os_conf.get_pulp_registry(),\n nfs_server_path=self.os_conf.get_nfs_server_path(),\n nfs_dest_dir=self.build_conf.get_nfs_destination_dir(),\n git_push_url=self.build_conf.get_git_push_url(),\n git_push_username=self.build_conf.get_git_push_username(),\n )\n build_request.set_openshift_required_version(self.os_conf.get_openshift_required_version())\n response = self._create_build_config_and_build(build_request, namespace)\n build_response = BuildResponse(response)\n logger.debug(build_response.json)\n return build_response\n\n @osbsapi\n def create_prod_with_secret_build(self, git_uri, git_ref, git_branch, user, component,\n target, architecture, yum_repourls=None,\n namespace=DEFAULT_NAMESPACE, **kwargs):\n return self.create_prod_build(git_uri, git_ref, git_branch, user, component, target,\n architecture, yum_repourls=yum_repourls,\n namespace=namespace, **kwargs)\n\n @osbsapi\n def create_prod_without_koji_build(self, git_uri, git_ref, git_branch, user, component,\n architecture, yum_repourls=None,\n namespace=DEFAULT_NAMESPACE, **kwargs):\n return self.create_prod_build(git_uri, git_ref, git_branch, user, component, None,\n architecture, yum_repourls=yum_repourls,\n namespace=namespace, **kwargs)\n\n @osbsapi\n def create_simple_build(self, git_uri, git_ref, user, component, yum_repourls=None,\n namespace=DEFAULT_NAMESPACE, **kwargs):\n build_request = self.get_build_request(SIMPLE_BUILD_TYPE)\n build_request.set_params(\n git_uri=git_uri,\n git_ref=git_ref,\n user=user,\n component=component,\n registry_uri=self.build_conf.get_registry_uri(),\n openshift_uri=self.os_conf.get_openshift_base_uri(),\n yum_repourls=yum_repourls,\n use_auth=self.build_conf.get_builder_use_auth(),\n )\n build_request.set_openshift_required_version(self.os_conf.get_openshift_required_version())\n response = self._create_build_config_and_build(build_request, namespace)\n build_response = BuildResponse(response)\n logger.debug(build_response.json)\n return build_response\n\n @osbsapi\n def create_build(self, namespace=DEFAULT_NAMESPACE, **kwargs):\n \"\"\"\n take input args, create build request from provided build type and submit the build\n\n :param namespace: str, place/context where the build should be executed\n :param kwargs: keyword args for build\n :return: instance of BuildRequest\n \"\"\"\n build_type = self.build_conf.get_build_type()\n if build_type in (PROD_BUILD_TYPE,\n PROD_WITHOUT_KOJI_BUILD_TYPE,\n PROD_WITH_SECRET_BUILD_TYPE):\n return self.create_prod_build(namespace=namespace, **kwargs)\n elif build_type == SIMPLE_BUILD_TYPE:\n return self.create_simple_build(namespace=namespace, **kwargs)\n elif build_type == PROD_WITH_SECRET_BUILD_TYPE:\n return self.create_prod_with_secret_build(namespace=namespace, **kwargs)\n else:\n raise OsbsException(\"Unknown build type: '%s'\" % build_type)\n\n @osbsapi\n def get_build_logs(self, build_id, follow=False, build_json=None, wait_if_missing=False,\n namespace=DEFAULT_NAMESPACE):\n \"\"\"\n provide logs from build\n\n :param build_id: str\n :param follow: bool, fetch logs as they come?\n :param build_json: dict, to save one get-build query\n :param wait_if_missing: bool, if build doesn't exist, wait\n :param namespace: str\n :return: None, str or iterator\n \"\"\"\n return self.os.logs(build_id, follow=follow, build_json=build_json,\n wait_if_missing=wait_if_missing, namespace=namespace)\n\n @osbsapi\n def get_docker_build_logs(self, build_id, decode_logs=True, build_json=None,\n namespace=DEFAULT_NAMESPACE):\n \"\"\"\n get logs provided by \"docker build\"\n\n :param build_id: str\n :param decode_logs: bool, docker by default output logs in simple json structure:\n { \"stream\": \"line\" }\n if this arg is set to True, it decodes logs to human readable form\n :param build_json: dict, to save one get-build query\n :param namespace: str\n :return: str\n \"\"\"\n if not build_json:\n build = self.os.get_build(build_id, namespace=namespace)\n build_response = BuildResponse(build)\n else:\n build_response = BuildResponse(None, build_json)\n\n if build_response.is_finished():\n logs = build_response.get_logs(decode_logs=decode_logs)\n return logs\n logger.warning(\"build haven't finished yet\")\n\n @osbsapi\n def wait_for_build_to_finish(self, build_id, namespace=DEFAULT_NAMESPACE):\n response = self.os.wait_for_build_to_finish(build_id, namespace=namespace)\n build_response = BuildResponse(None, response)\n return build_response\n\n @osbsapi\n def wait_for_build_to_get_scheduled(self, build_id, namespace=DEFAULT_NAMESPACE):\n response = self.os.wait_for_build_to_get_scheduled(build_id, namespace=namespace)\n build_response = BuildResponse(None, response)\n return build_response\n\n @osbsapi\n def update_labels_on_build(self, build_id, labels,\n namespace=DEFAULT_NAMESPACE):\n response = self.os.update_labels_on_build(build_id, labels,\n namespace=namespace)\n @osbsapi\n def set_labels_on_build(self, build_id, labels, namespace=DEFAULT_NAMESPACE):\n response = self.os.set_labels_on_build(build_id, labels, namespace=namespace)\n return response\n\n @osbsapi\n def update_labels_on_build_config(self, build_config_id, labels,\n namespace=DEFAULT_NAMESPACE):\n response = self.os.update_labels_on_build_config(build_config_id,\n labels,\n namespace=namespace)\n return response\n\n @osbsapi\n def set_labels_on_build_config(self, build_config_id, labels,\n namespace=DEFAULT_NAMESPACE):\n response = self.os.set_labels_on_build_config(build_config_id,\n labels,\n namespace=namespace)\n return response\n\n @osbsapi\n def update_annotations_on_build(self, build_id, annotations,\n namespace=DEFAULT_NAMESPACE):\n return self.os.update_annotations_on_build(build_id, annotations,\n namespace=namespace)\n\n @osbsapi\n def set_annotations_on_build(self, build_id, annotations, namespace=DEFAULT_NAMESPACE):\n return self.os.set_annotations_on_build(build_id, annotations, namespace=namespace)\n\n @osbsapi\n def import_image(self, name, namespace=DEFAULT_NAMESPACE):\n return self.os.import_image(name, namespace=namespace)\n\n @osbsapi\n def get_token(self):\n return self.os.get_oauth_token()\n\n @osbsapi\n def get_user(self, username=\"~\"):\n return self.os.get_user(username).json()\n\n @osbsapi\n def get_image_stream(self, stream_id, namespace=DEFAULT_NAMESPACE):\n return self.os.get_image_stream(stream_id, namespace)\n\n @osbsapi\n def create_image_stream(self, name, docker_image_repository, namespace=DEFAULT_NAMESPACE):\n img_stream_file = os.path.join(self.os_conf.get_build_json_store(), 'image_stream.json')\n stream = json.load(open(img_stream_file))\n stream['metadata']['name'] = name\n stream['spec']['dockerImageRepository'] = docker_image_repository\n return self.os.create_image_stream(json.dumps(stream), namespace=DEFAULT_NAMESPACE)\n","sub_path":"osbs/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":20607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"505866082","text":"#Elizabeth Doss\n#SoftDev1 pd1\n#K8 -- Lemme Flask You Sump’n\n#2019-09-18\n\n#prepares flask\nfrom flask import Flask\napp = Flask(__name__) #create instance of class Flask\n\n#normal route\n@app.route(\"/\") #assign following fxn to run when root route requested\ndef queso():\n print(__name__ + \"norm\") #prints in terminal\n return \"No hablo queso!\" #prints on webpage\n\n#route 1\n@app.route(\"/escribo\") #if added to url, opens new page\ndef food1():\n print(__name__ + \"test1\")\n return \"No escribo queso!\"\n\n#route 2\n@app.route(\"/escucho\") #if added to url, opens new page\ndef food2():\n print(__name__ + \"test2\")\n return \"No escucho queso!\"\n\n#route 3\n@app.route(\"/soy\") #if added to url, opens new page\ndef food3():\n print(__name__ + \"test3\")\n return \"No soy queso!\"\n\n#main\nif __name__ == \"__main__\":\n app.debug = True\n app.run()\n","sub_path":"fall/08_app0/NoHabloQueso.py","file_name":"NoHabloQueso.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"411980399","text":"cats_file = 'cats.txt'\ndogs_file = 'dogs.txt'\n\nfilenames = [cats_file, dogs_file]\nfor files in filenames:\n\ttry:\n\t\twith open(files,'r') as file_object:\n\t\t\tcontents = file_object.read()\n\texcept FileNotFoundError:\n\t\tprint(\"File\" + files + \" is missing\")\n\telse:\n\t\tprint(contents)\n\t\n","sub_path":"Part_1/chapter_10/10-8_cats_and_dogs.py","file_name":"10-8_cats_and_dogs.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"335881862","text":"import settings\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.autograd import Variable\nimport numpy as np\nimport torchvision\nfrom torchvision import datasets, models, transforms\nimport matplotlib.pyplot as plt\nimport time\nimport copy\nimport os, glob\nimport cv2\nimport random\nimport argparse\nimport bcolz\nimport pandas as pd\nimport random\nfrom PIL import Image\n#from inception import inception_v3\nfrom vgg import vgg19_bn, vgg16_bn\n#from inceptionresv2 import inceptionresnetv2\n\nMODEL_DIR = settings.MODEL_DIR\nC = settings.NUM_CLASSES\n\nw_files_training = []\n\ndef get_acc_from_w_filename(filename):\n try:\n stracc = filename.split('_')[-2]\n return float(stracc)\n except:\n return 0.\n\ndef load_best_weights(model):\n w_files = glob.glob(os.path.join(MODEL_DIR, model.name) + '_*.pth')\n max_acc = 0\n best_file = None\n saved_epoch = -1\n for w_file in w_files:\n try:\n stracc = w_file.split('_')[-2]\n epoch = w_file.split('_')[-3]\n acc = float(stracc)\n if acc > max_acc:\n best_file = w_file\n max_acc = acc\n saved_epoch = int(epoch)\n w_files_training.append((acc, w_file))\n except:\n continue\n if max_acc > 0:\n print('loading weight: {}'.format(best_file))\n model.load_state_dict(torch.load(best_file))\n return saved_epoch\n\ndef save_weights(acc, model, epoch, max_num=2):\n f_name = '{}_{}_{:.5f}_.pth'.format(model.name, epoch, acc)\n w_file_path = os.path.join(MODEL_DIR, f_name)\n if len(w_files_training) < max_num:\n w_files_training.append((acc, w_file_path))\n torch.save(model.state_dict(), w_file_path)\n return\n min = 10.0\n index_min = -1\n for i, item in enumerate(w_files_training):\n val_acc, fp = item\n if min > val_acc:\n index_min = i\n min = val_acc\n #print(min)\n if acc > min:\n torch.save(model.state_dict(), w_file_path)\n try:\n os.remove(w_files_training[index_min][1])\n except:\n print('Failed to delete file: {}'.format(w_files_training[index_min][1]))\n w_files_training[index_min] = (acc, w_file_path)\n\ndef save_array(fname, arr):\n c=bcolz.carray(arr, rootdir=fname, mode='w')\n c.flush()\n\ndef load_array(fname):\n return bcolz.open(fname)[:]\n\ndef load_weights_file(model, w_file):\n model.load_state_dict(torch.load(w_file))\n\ndef create_res18(load_weights=False, freeze=False):\n model_ft = models.resnet18(pretrained=True)\n if freeze:\n for param in model_ft.parameters():\n param.requires_grad = False\n\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Sequential(nn.Linear(num_ftrs, C)) #, nn.Softmax())\n model_ft = model_ft.cuda()\n\n model_ft.name = 'res18'\n model_ft.batch_size = 256\n return model_ft\n\ndef create_res34(load_weights=False, freeze=False):\n model_ft = models.resnet34(pretrained=True)\n if freeze:\n for param in model_ft.parameters():\n param.requires_grad = False\n\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Sequential(nn.Linear(num_ftrs, C)) #, nn.Softmax())\n model_ft = model_ft.cuda()\n\n model_ft.name = 'res34'\n model_ft.batch_size = 128\n return model_ft\n\ndef create_res50(load_weights=False, freeze=False):\n model_ft = models.resnet50(pretrained=True)\n if freeze:\n for param in model_ft.parameters():\n param.requires_grad = False\n\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Sequential(nn.Linear(num_ftrs, C)) #, nn.Softmax())\n model_ft = model_ft.cuda()\n\n model_ft.name = 'res50'\n model_ft.batch_size = 32\n return model_ft\n\ndef create_res101(load_weights=False, freeze=False):\n model_ft = models.resnet101(pretrained=True)\n if freeze:\n for param in model_ft.parameters():\n param.requires_grad = False\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Sequential(nn.Linear(num_ftrs, C))\n model_ft = model_ft.cuda()\n\n model_ft.name = 'res101'\n model_ft.batch_size = 32\n return model_ft\n\ndef create_res152(load_weights=False, freeze=False):\n res152 = models.resnet152(pretrained=True)\n if freeze:\n for param in res152.parameters():\n param.requires_grad = False\n num_ftrs = res152.fc.in_features\n res152.fc = nn.Sequential(nn.Linear(num_ftrs, C))\n res152 = res152.cuda()\n\n res152.name = 'res152'\n return res152\n\ndef create_dense161(load_weights=False, freeze=False):\n desnet_ft = models.densenet161(pretrained=True)\n if freeze:\n for param in desnet_ft.parameters():\n param.requires_grad = False\n num_ftrs = desnet_ft.classifier.in_features\n desnet_ft.classifier = nn.Sequential(nn.Linear(num_ftrs, C))\n desnet_ft = desnet_ft.cuda()\n\n desnet_ft.name = 'dense161'\n #desnet_ft.batch_size = 32\n return desnet_ft\n\ndef create_dense169(load_weights=False, freeze=False):\n desnet_ft = models.densenet169(pretrained=True)\n if freeze:\n for param in desnet_ft.parameters():\n param.requires_grad = False\n num_ftrs = desnet_ft.classifier.in_features\n desnet_ft.classifier = nn.Sequential(nn.Linear(num_ftrs, C))\n desnet_ft = desnet_ft.cuda()\n\n desnet_ft.name = 'dense169'\n #desnet_ft.batch_size = 32\n return desnet_ft\n\ndef create_dense121(load_weights=False, freeze=False):\n desnet_ft = models.densenet121(pretrained=True)\n if freeze:\n for param in desnet_ft.parameters():\n param.requires_grad = False\n num_ftrs = desnet_ft.classifier.in_features\n desnet_ft.classifier = nn.Sequential(nn.Linear(num_ftrs, C))\n desnet_ft = desnet_ft.cuda()\n\n desnet_ft.name = 'dense121'\n desnet_ft.batch_size = 32\n return desnet_ft\n\ndef create_dense201(load_weights=False, freeze=False):\n desnet_ft = models.densenet201(pretrained=True)\n if freeze:\n for param in desnet_ft.parameters():\n param.requires_grad = False\n num_ftrs = desnet_ft.classifier.in_features\n desnet_ft.classifier = nn.Sequential(nn.Linear(num_ftrs, C))\n desnet_ft = desnet_ft.cuda()\n \n desnet_ft.name = 'dense201'\n #desnet_ft.batch_size = 32\n return desnet_ft\n\ndef create_vgg19bn(load_weights=False, freeze=False):\n vgg19_bn_ft = vgg19_bn(pretrained=True)\n if freeze:\n for param in vgg19_bn_ft.parameters():\n param.requires_grad = False\n #vgg19_bn_ft.classifier = nn.Linear(25088, 3)\n vgg19_bn_ft.classifier = nn.Sequential(\n nn.Linear(512 * 7 * 7, 4096),\n nn.ReLU(True),\n nn.Dropout(),\n nn.Linear(4096, 4096),\n nn.ReLU(True),\n nn.Dropout(),\n nn.Linear(4096, C))\n\n vgg19_bn_ft = vgg19_bn_ft.cuda()\n\n vgg19_bn_ft.name = 'vgg19bn'\n vgg19_bn_ft.max_num = 1\n #vgg19_bn_ft.batch_size = 32\n return vgg19_bn_ft\n\ndef create_vgg16bn(load_weights=False, freeze=False):\n vgg16_bn_ft = vgg16_bn(pretrained=True)\n if freeze:\n for param in vgg16_bn_ft.parameters():\n param.requires_grad = False\n #vgg16_bn_ft.classifier = nn.Linear(25088, 3)\n vgg16_bn_ft.classifier = nn.Sequential(\n nn.Linear(512 * 7 * 7, 4096),\n nn.ReLU(True),\n nn.Dropout(),\n nn.Linear(4096, 4096),\n nn.ReLU(True),\n nn.Dropout(),\n nn.Linear(4096, C))\n\n vgg16_bn_ft = vgg16_bn_ft.cuda()\n\n vgg16_bn_ft.name = 'vgg16bn'\n vgg16_bn_ft.max_num = 1\n #vgg16_bn_ft.batch_size = 32\n return vgg16_bn_ft\n\ndef create_inceptionv3(load_weights=False, freeze=False):\n incept_ft = models.inception_v3(pretrained=True)\n if freeze:\n for param in incept_ft.parameters():\n param.requires_grad = False\n num_ftrs = incept_ft.fc.in_features\n incept_ft.fc = nn.Sequential(nn.Linear(num_ftrs, C))\n incept_ft.aux_logits=False\n incept_ft = incept_ft.cuda()\n\n incept_ft.name = 'inceptionv3'\n incept_ft.batch_size = 32\n return incept_ft\n\ndef create_inceptionresv2(load_weights=False, freeze=False):\n model_ft = inceptionresnetv2(pretrained=True)\n num_ftrs = model_ft.classif.in_features\n model_ft.classif = nn.Sequential(nn.Linear(num_ftrs, C))\n model_ft = model_ft.cuda()\n\n model_ft.name = 'inceptionresv2'\n model_ft.batch_size = 8\n return model_ft\n\ndef create_model(model_name, freeze=False):\n create_func = 'create_' + model_name\n\n model = eval(create_func)(freeze=freeze)\n if not hasattr(model, 'batch_size'):\n model.batch_size = 16\n return model\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"412215528","text":"import streamlit as st\r\nimport pandas as pd\r\nimport numpy as np\r\nimport tensorflow as tf\r\nimport tensorflow_hub as hub\r\nfrom PIL import Image\r\nimport io\r\nimport base64\r\nimport time\r\n\r\nst.set_option('deprecation.showfileUploaderEncoding', False)\r\n\r\n# All the function needed \r\n#--------------------------------------------------------------------------------\r\n# Define image size\r\nIMG_SIZE = 224\r\n\r\n# Define a batch size , 32 is a good start \r\nBATCH_SIZE = 32\r\n# Import labels and create an array of 120 dog breeds\r\nlabels_csv = pd.read_csv(\"/home/gagan/Desktop/Ml-Sample/labels.csv\")\r\nlabels = labels_csv[\"breed\"].to_numpy()\r\nunique_breeds = np.unique(labels)\r\n\r\n# Prediction label function\r\ndef get_pred_label(prediction_probabilities):\r\n \"\"\"\r\n Turn an array of prediction probabilities into a label.\r\n \"\"\"\r\n return unique_breeds[np.argmax(prediction_probabilities)]\r\n\r\n#---------------------------------------------------------------------------------\r\n\r\nst.title(\"Welcome to Dog 🐕 Vision 👁️ AI\")\r\nst.write(\"\")\r\nst.write(\"Upload your dog's image\")\r\n\r\nfile = st.file_uploader(\"\", type=[\"jpg\", \"png\"])\r\n\r\nif file is None:\r\n st.text(\"Please upload an image file\")\r\nelse:\r\n custom_image = Image.open(file)\r\n st.text(\"Are you excited?😀...🐶...\")\r\n\r\nif file:\r\n\t# Data preprocessing\r\n\timage = tf.io.decode_image(file.getvalue(), channels=3, dtype=tf.float32)\r\n\timage= tf.image.resize(image, size=[IMG_SIZE, IMG_SIZE])\r\n\tdata = tf.data.Dataset.from_tensor_slices([image])\r\n\tdata_batch = data.batch(BATCH_SIZE)\r\n\r\n\t# Load pretrained model and make predictions\r\n\tloaded_full_model = tf.keras.models.load_model('/home/gagan/Desktop/Ml-Sample/20200727-18521595875929-full-image-set-mobilenetv2-Adam.h5',custom_objects={'KerasLayer':hub.KerasLayer})\r\n\tcustom_preds = loaded_full_model.predict(data_batch)\r\n\t# Get predicted label\r\n\tcustom_pred_labels = [get_pred_label(custom_preds[i]) for i in range(len(custom_preds))]\r\n\t\r\n\t# Starting a long computation...'\r\n\tlatest_iteration = st.empty()\r\n\tbar = st.progress(0)\r\n\r\n\tfor i in range(100):\r\n\t # Update the progress bar with each iteration.\r\n\t latest_iteration.text(f'Hold tight....{i+1}')\r\n\t bar.progress(i + 1)\r\n\t time.sleep(0.1)\r\n\t# '...and now we\\'re done!'\r\n\r\n\tst.title(f'Your dog is a {custom_pred_labels[0]}')\r\n\t# st.write(custom_pred_labels[0])\r\n\tst.image(custom_image, use_column_width=True)\r\n\r\n\r\n\r\n\r\n","sub_path":"DogVisionAI.py","file_name":"DogVisionAI.py","file_ext":"py","file_size_in_byte":2377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"654179365","text":"import turtle\nfrom math import *\nturtle.setup(640,480)\nturtle.colormode(255)\nt = turtle.Pen()\nturtle.bgcolor(\"white\")\nt.ht()\nt.speed(0)\nturtle.tracer(0,0)\n\ncolor1 = [1, 11, 74]\ncolor2 = [255, 255, 255]\ncolorMap = [[0 for i in range(3)] for j in range(100)]\nfor i in range(100):\n for j in range(3):\n colorMap[i][j] = color1[j] + (color2[j] - color1[j]) * i / 100\n\nt.penup()\nt.goto(-320,240)\nt.setheading(0)\n\nfor h in range(-240, 240):\n for w in range(-320, 320):\n x = (w - 100) / 180\n y = h / 180\n c = x + y * 1j\n z = 0\n for i in range(1, 100):\n z = z * z + c\n if abs(z) > 10.0:\n break\n if i >= 99:\n t.pencolor(0, 0, 0)\n else:\n t.pencolor(floor(colorMap[i][0]), floor(colorMap[i][1]), floor(colorMap[i][2]))\n t.forward(1)\n t.penup()\n t.backward(640)\n t.right(90)\n t.forward(1)\n t.left(90)\n t.pendown()\n\nturtle.update()\nturtle.done()\n","sub_path":"mdbtest.py","file_name":"mdbtest.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"26465569","text":"from PIL import Image\n\nfrom django.forms import ModelChoiceField, ModelForm, ValidationError\nfrom django.contrib import admin\nfrom django.utils.safestring import mark_safe\n\nfrom .models import *\n\n\n# class ShoeAdminForm(ModelForm):\n#\n# def __init__(self, *args, **kwargs):\n# super().__init__(*args, **kwargs)\n# self.fields['image'].help_text = mark_safe(\n# \"\"\"При загрузке изоброжение с разрешением больше {}x{} оно будет обрезать\n# \"\"\".format(\n# *Product.MAX_RESOLUTION\n# )\n# )\n\n # def clean_image(self):\n # image = self.cleaned_data['image']\n # img = Image.open(image)\n # min_height, min_width = Product.MIN_RESOLUTION\n # max_height, max_width = Product.MAX_RESOLUTION\n # if image.size > Product.MAX_IMAGE_SIZE:\n # raise ValidationError('Размер изображение не должен превышать 3MB')\n # if img.height < min_height or img.width < min_width:\n # raise ValidationError('Разрешение изображение меньше минимального')\n # if img.height > max_height or img.width > max_width:\n # raise ValidationError('Разрешение изображение больше максимального')\n # return image\n\n\nclass ShoeAdminForm(ModelForm):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n instance = kwargs.get('instance')\n if not instance.color:\n self.fields['color_volume_max'].vidget.attrs.update({\n 'readonly': True, 'style': 'background: lightgray'\n })\n\n def clean(self):\n if not self .cleaned_data['color']:\n self.cleaned_data['color_volume_max'] = None\n return self.cleaned_data\n\n\nclass ShirtAdminForm(ModelForm):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n instance = kwargs.get('instance')\n if not instance.color:\n self.fields['color_volume_max'].vidget.attrs.update({\n 'readonly': True, 'style': 'background: lightgray'\n })\n\n def clean(self):\n if not self .cleaned_data['color']:\n self.cleaned_data['color_volume_max'] = None\n return self.cleaned_data\n\n\nclass ShortAdminForm(ModelForm):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n instance = kwargs.get('instance')\n if not instance.color:\n self.fields['color_volume_max'].vidget.attrs.update({\n 'readonly': True, 'style': 'background: lightgray'\n })\n\n def clean(self):\n if not self .cleaned_data['color']:\n self.cleaned_data['color_volume_max'] = None\n return self.cleaned_data\n\n\nclass MikeyAdminForm(ModelForm):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n instance = kwargs.get('instance')\n if not instance.color:\n self.fields['color_volume_max'].vidget.attrs.update({\n 'readonly': True, 'style': 'background: lightgray'\n })\n\n def clean(self):\n if not self .cleaned_data['color']:\n self.cleaned_data['color_volume_max'] = None\n return self.cleaned_data\n\n\nclass ShoeAdmin(admin.ModelAdmin):\n\n # form = ShoeAdminForm\n\n change_form_template = 'admin.html'\n form = ShoeAdminForm\n\n def formfield_for_foreignkey(self, db_field, request, **kwargs):\n if db_field.name == 'category':\n return ModelChoiceField(Category.objects.filter(slug='shoes'))\n return super().formfield_for_foreignkey(db_field, request, **kwargs)\n\n\nclass ShortAdmin(admin.ModelAdmin):\n\n change_form_template = 'admin.html'\n form = ShortAdminForm\n\n def formfield_for_foreignkey(self, db_field, request, **kwargs):\n if db_field.name == 'category':\n return ModelChoiceField(Category.objects.filter(slug='shorts'))\n return super().formfield_for_foreignkey(db_field, request, **kwargs)\n\n\nclass ShirtAdmin(admin.ModelAdmin):\n\n change_form_template = 'admin.html'\n form = ShirtAdminForm\n\n def formfield_for_foreignkey(self, db_field, request, **kwargs):\n if db_field.name == 'category':\n return ModelChoiceField(Category.objects.filter(slug='shirts'))\n return super().formfield_for_foreignkey(db_field, request, **kwargs)\n\n\nclass MikeyAdmin(admin.ModelAdmin):\n\n change_form_template = 'admin.html'\n form = MikeyAdminForm\n\n def formfield_for_foreignkey(self, db_field, request, **kwargs):\n if db_field.name == 'category':\n return ModelChoiceField(Category.objects.filter(slug='mikeys'))\n return super().formfield_for_foreignkey(db_field, request, **kwargs)\n\n\nadmin.site.register(Category)\nadmin.site.register(Shoe, ShoeAdmin)\nadmin.site.register(Short, ShortAdmin)\nadmin.site.register(Shirt, ShirtAdmin)\nadmin.site.register(Mikey, MikeyAdmin)\nadmin.site.register(CartProduct)\nadmin.site.register(Cart)\nadmin.site.register(Customer)\nadmin.site.register(Order)","sub_path":"main/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":5226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"374020713","text":"import os\nimport cv2\nimport csv\nimport numpy as np\nimport easygui\nimport pytesseract\nimport difflib\n\nfrom models import Contours\nfrom models.Contours import sort_contours\nfrom models.Extract import extract, readHV, create\n\ndef convert():\n file = easygui.fileopenbox()\n # files = filedialog.askopenfilenames()\n directory = os.path.dirname(__file__)\n # directory = r'C:\\Users\\USUARIO\\Documents\\UNIVERSIDAD\\DABM\\Proyecto\\data'\n texto = box_extraction(file,directory)\n # disp = Equipo(name,code,rs,brand,model,tipo,series,numAct)\n # disp.create() \n return texto\n\ndef box_extraction(img_for_box_extraction_path, cropped_dir_path): \n img = cv2.imread(img_for_box_extraction_path, 0) # Read the image\n scale_percent = 80 # percent of original size\n width = int(img.shape[1] * scale_percent / 100)\n height = int(img.shape[0] * scale_percent / 100)\n dim = (width, height) \n # resize image\n resized = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)\n # resized = cv2.resize(img, (960,540))\n\n (thresh, img_bin) = cv2.threshold(resized, 150, 255, \n cv2.THRESH_BINARY | cv2.THRESH_OTSU) # Thresholding the image\n img_bin = 255-img_bin # Invert the image\n cv2.imwrite(\"Image_bin.jpg\",img_bin)\n\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1,2)) # Operador morfol+ogico de apertura\n img_bin = cv2.morphologyEx(img_bin, cv2.MORPH_OPEN, kernel,iterations=1)\n\n # Defining a kernel length\n kernel_length = np.array(resized).shape[1]//120\n # A verticle kernel of (1 X kernel_length), which will detect all the verticle lines from the image.\n verticle_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, round(kernel_length*0.89)))\n # A horizontal kernel of (kernel_length X 1), which will help to detect all the horizontal line \n # from the image.\n hori_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (kernel_length, 1))\n # A kernel of (3 X 3) ones.\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))\n # Morphological operation to detect verticle lines from an image\n img_temp1 = cv2.erode(img_bin, verticle_kernel, iterations=3)\n verticle_lines_img = cv2.dilate(img_temp1, verticle_kernel, iterations=3)\n cv2.imwrite(\"verticle_lines.jpg\",verticle_lines_img)\n # Morphological operation to detect horizontal lines from an image\n img_temp2 = cv2.erode(img_bin, hori_kernel, iterations=3)\n horizontal_lines_img = cv2.dilate(img_temp2, hori_kernel, iterations=3)\n cv2.imwrite(\"horizontal_lines.jpg\",horizontal_lines_img)\n # Weighting parameters, this will decide the quantity of an image to be added to make a new image.\n alpha = 0.5\n beta = 1.0 - alpha\n # This function helps to add two image with specific weight parameter to get a third image as summation of two image.\n img_final_bin = cv2.addWeighted(verticle_lines_img, alpha, horizontal_lines_img, beta, 0.0)\n img_final_bin = cv2.erode(~img_final_bin, kernel, iterations=2)\n (thresh, img_final_bin) = cv2.threshold(img_final_bin, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\n # For Debugging\n # Enable this line to see verticle and horizontal lines in the image which is used to find boxes\n cv2.imwrite(\"img_final_bin.jpg\",img_final_bin)\n # Find contours for image, which will detect all the boxes\n contours, hierarchy = cv2.findContours(img_final_bin, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n # cv2.drawContours(img_final_bin,contours,-1,(0,255,0),3)\n # cv2.imshow('image',img_final_bin)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n \n # print(contours)\n # Sort all the contours by top to bottom.\n (contours, boundingBoxes) = sort_contours(contours, method=\"top-to-bottom\")\n idx = 0\n\n pytesseract.pytesseract.tesseract_cmd = r'C:\\Program Files\\Tesseract-OCR\\tesseract.exe'\n text = []\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1,2))\n for c in contours:\n # Returns the location and width,height for every contour\n x, y, w, h = cv2.boundingRect(c)\n # If the box height is greater then 20, widht is >80, then only save it as a box in \"cropped/\" folder.\n if (w > 20 and h > 10) and w > 4*h:\n idx += 1\n\n new_img = resized[y-3:y+h+3, x-2:x+w]\n # cv2.imshow('image',new_img)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows() \n # # gray = cv2.cvtColor(new_img, cv2.COLOR_BGR2GRAY)\n \n blur = cv2.GaussianBlur(new_img,(3,3),0)\n # cv2.imshow('image',blur)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows() \n \n tresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]\n # cv2.imshow('image',tresh)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n # kernel = np.ones((1,1),np.uint8)\n # dilation = cv2.dilate(tresh,kernel,iterations = 1) \n \n kernel = np.ones((1,2),np.uint8)\n erosion = cv2.erode(tresh,kernel,iterations = 1)\n # cv2.imshow('image',erosion)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows() \n \n invert = 255 - erosion\n # cv2.imshow('image',invert)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows() \n\n custom_config = r'--oem 3 --psm 6'\n txt = pytesseract.image_to_string(invert,config= custom_config)\n # print(txt)\n\n text.append(txt)\n cv2.imwrite(cropped_dir_path+str(idx) + '.png', invert)\n matx = []\n for e in text:\n mod1 = e.replace('\\n','')\n # print(mod1)\n mod2 = mod1.replace('\\x0c','')\n # print(mod2)\n matx.append(mod2)\n \n return matx\n \n\n # # box_extraction(\"41.jpg\", \"./Cropped/\")\n\ndef get_matches(matrix,refTitle):\n match = difflib.get_close_matches(refTitle,matrix)\n match = match[0]\n return match\n\ndef getData(refMatrix):\n hdv = readHV('HV_BENEHEART_D6.csv')\n headers,values = extract(hdv,refMatrix)\n create(headers,values)\n\n \n \n\n #Extraer fecha de operación\n #Extraer vida util\n #Extraer fecha de vencimiento de garantía\n #Extraer periodicidad de mantenimiento\n #Extraer ultimo mantenimiento\n #Extraer los que tienen x\n #Extraer riesgo","sub_path":"models/Converter.py","file_name":"Converter.py","file_ext":"py","file_size_in_byte":6378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"577579412","text":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\n# This module contains utilities for parsing commit messages.\n\nimport cgi\nimport re\n\n# These regular expressions are not very robust. Specifically, they fail to\n# handle lists well.\n\nBUG_RE = re.compile(\n r'''# bug followed by any sequence of numbers, or\n # a standalone sequence of numbers\n (\n (?:\n bug |\n b= |\n # a sequence of 5+ numbers preceded by whitespace\n (?=\\b\\#?\\d{5,}) |\n # numbers at the very beginning\n ^(?=\\d)\n )\n (?:\\s*\\#?)(\\d+)(?=\\b)\n )''', re.I | re.X)\n\n# Like BUG_RE except it doesn't flag sequences of numbers, only positive\n# \"bug\" syntax like \"bug X\" or \"b=\".\nBUG_CONSERVATIVE_RE = re.compile(\n r'''((?:bug|b=)(?:\\s*)(\\d+)(?=\\b))''', re.I | re.X)\n\nSPECIFIER = r'(?:r|a|sr|rs|ui-r)[=?]'\nR_SPECIFIER = r'\\br[=?]'\nR_SPECIFIER_RE = re.compile(R_SPECIFIER)\nREQUAL_SPECIFIER_RE = re.compile(r'r=')\nRQUESTION_SPECIFIER_RE = re.compile(r'r\\?')\n\nLIST = r'[;,\\/\\\\]\\s*'\nLIST_RE = re.compile(LIST)\n\n# Note that we only allows a subset of legal IRC-nick characters.\n# Specifically we not allow [ \\ ] ^ ` { | }\nIRC_NICK = r'[a-zA-Z0-9\\-\\_]+' # this needs to match irc nicks\nBMO_IRC_NICK_RE = re.compile(r':(' + IRC_NICK + r')')\n\nREVIEWERS_RE = re.compile(\n r'([\\s\\(\\.\\[;,])' + # before 'r' delimiter\n r'(' + SPECIFIER + r')' + # flag\n r'(' + # capture all reviewers\n IRC_NICK + # reviewer\n r'(?:' + # additional reviewers\n LIST + # delimiter\n r'(?![a-z0-9\\.\\-]+[=?])' + # don't extend match into next flag\n IRC_NICK + # reviewer\n r')*' +\n r')?') # noqa\n\nBACKOUT_KEYWORD = r'^(?:backed out|backout|back out)\\b'\nBACKOUT_KEYWORD_RE = re.compile(BACKOUT_KEYWORD, re.I)\nCHANGESET_KEYWORD = r'(?:\\b(?:changeset|revision|change|cset|of)\\b)'\nCHANGESETS_KEYWORD = r'(?:\\b(?:changesets|revisions|changes|csets|of)\\b)'\nSHORT_NODE = r'([0-9a-f]{12}\\b)'\nSHORT_NODE_RE = re.compile(SHORT_NODE, re.I)\n\nBACKOUT_SINGLE_RE = re.compile(\n BACKOUT_KEYWORD + r'\\s+' +\n CHANGESET_KEYWORD + r'?\\s*' +\n r'(?P' + SHORT_NODE + r')',\n re.I\n)\n\nBACKOUT_MULTI_SPLIT_RE = re.compile(\n BACKOUT_KEYWORD + r'\\s+' +\n r'(?P\\d+)\\s+' +\n CHANGESETS_KEYWORD,\n re.I\n)\n\nBACKOUT_MULTI_ONELINE_RE = re.compile(\n BACKOUT_KEYWORD + r'\\s+' +\n CHANGESETS_KEYWORD + r'?\\s*' +\n r'(?P(?:(?:\\s+|and|,)+' + SHORT_NODE + r')+)',\n re.I\n)\n\nSHORT_RE = re.compile('^[0-9a-f]{12}$', re.I)\n\nDIGIT_RE = re.compile('#?\\d+')\n\n# Strip out a white-list of metadata prefixes.\n# Currently just MozReview-Commit-ID\nMETADATA_RE = re.compile('^MozReview-Commit-ID: ')\n\n\ndef parse_bugs(s):\n bugs_with_duplicates = [int(m[1]) for m in BUG_RE.findall(s)]\n bugs_seen = set()\n bugs_seen_add = bugs_seen.add\n bugs = [x for x in bugs_with_duplicates if not (x in bugs_seen or bugs_seen_add(x))]\n return [bug for bug in bugs if bug < 100000000]\n\n\ndef filter_reviewers(s):\n \"\"\"Given a string, extract meaningful reviewer names.\"\"\"\n for word in s.strip().split():\n if not word:\n continue\n\n word = word.strip('\"[]<>.:')\n\n if '=' in word:\n continue\n\n if word.startswith('(') or word.endswith(')'):\n continue\n\n if word == 'DONTBUILD':\n continue\n\n if DIGIT_RE.match(word):\n continue\n\n yield word\n\n\ndef parse_reviewers(commit_description, flag_re=None):\n commit_summary = commit_description.splitlines().pop(0)\n for match in re.finditer(REVIEWERS_RE, commit_summary):\n if not match.group(3):\n continue\n\n for reviewer in re.split(LIST_RE, match.group(3)):\n if flag_re is None:\n yield reviewer\n elif flag_re.match(match.group(2)):\n yield reviewer\n\n\ndef parse_requal_reviewers(commit_description):\n for reviewer in parse_reviewers(commit_description,\n flag_re=REQUAL_SPECIFIER_RE):\n yield reviewer\n\n\ndef parse_rquestion_reviewers(commit_description):\n for reviewer in parse_reviewers(commit_description,\n flag_re=RQUESTION_SPECIFIER_RE):\n yield reviewer\n\n\ndef replace_reviewers(commit_description, reviewers):\n if not reviewers:\n reviewers_str = ''\n else:\n reviewers_str = 'r=' + ','.join(reviewers)\n\n commit_description = commit_description.splitlines()\n commit_summary = commit_description.pop(0)\n commit_description = '\\n'.join(commit_description)\n\n if not R_SPECIFIER_RE.search(commit_summary):\n commit_summary += ' ' + reviewers_str\n else:\n # replace the first r? with the reviewer list, and all subsequent\n # occurences with a marker to mark the blocks we need to remove\n # later\n d = {'first': True}\n\n def replace_first_reviewer(matchobj):\n if R_SPECIFIER_RE.match(matchobj.group(2)):\n if d['first']:\n d['first'] = False\n return matchobj.group(1) + reviewers_str\n else:\n return '\\0'\n else:\n return matchobj.group(0)\n\n commit_summary = re.sub(REVIEWERS_RE, replace_first_reviewer,\n commit_summary)\n\n # remove marker values as well as leading separators. this allows us\n # to remove runs of multiple reviewers and retain the trailing\n # separator.\n commit_summary = re.sub(LIST + '\\0', '', commit_summary)\n commit_summary = re.sub('\\0', '', commit_summary)\n\n if commit_description == \"\":\n return commit_summary.strip()\n else:\n return commit_summary.strip() + \"\\n\" + commit_description\n\n\ndef is_backout(commit_desc):\n \"\"\"Returns True if the first line of the commit description appears to\n contain a backout.\n\n Backout commits should always result in is_backout() returning True,\n and parse_backouts() not returning None. Malformed backouts may return\n True here and None from parse_backouts().\"\"\"\n return BACKOUT_KEYWORD_RE.match(commit_desc) is not None\n\n\ndef parse_backouts(commit_desc, strict=False):\n \"\"\"Look for backout annotations in a string.\n\n Returns a 2-tuple of (nodes, bugs) where each entry is an iterable of\n changeset identifiers and bug numbers that were backed out, respectively.\n Or return None if no backout info is available.\n\n Setting `strict` to True will enable stricter validation of the commit\n description (eg. ensuring N commits are provided when given N commits are\n being backed out).\n \"\"\"\n if not is_backout(commit_desc):\n return None\n\n lines = commit_desc.splitlines()\n first_line = lines[0]\n\n # Single backout.\n m = BACKOUT_SINGLE_RE.match(first_line)\n if m:\n return [m.group('node')], parse_bugs(first_line)\n\n # Multiple backouts, with nodes listed in commit description.\n m = BACKOUT_MULTI_SPLIT_RE.match(first_line)\n if m:\n expected = int(m.group('count'))\n nodes = []\n for line in lines[1:]:\n single_m = BACKOUT_SINGLE_RE.match(line)\n if single_m:\n nodes.append(single_m.group('node'))\n if strict:\n # The correct number of nodes must be specified.\n if expected != len(nodes):\n return None\n return nodes, parse_bugs(commit_desc)\n\n # Multiple backouts, with nodes listed on the first line\n m = BACKOUT_MULTI_ONELINE_RE.match(first_line)\n if m:\n return SHORT_NODE_RE.findall(m.group('nodes')), parse_bugs(first_line)\n\n return None\n\n\ndef strip_commit_metadata(s):\n \"\"\"Strips metadata related to commit tracking.\n\n Will strip lines like \"MozReview-Commit-ID: foo\" from the commit\n message.\n \"\"\"\n # TODO this parsing is overly simplied. There is room to handle\n # empty lines before the metadata.\n lines = [l for l in s.splitlines() if not METADATA_RE.match(l)]\n\n while lines and not lines[-1].strip():\n lines.pop(-1)\n\n if type(s) == str:\n joiner = b'\\n'\n elif type(s) == unicode:\n joiner = u'\\n'\n else:\n raise TypeError('do not know type of commit message: %s' % type(s))\n\n return joiner.join(lines)\n\n\ndef parse_commit_id(s):\n \"\"\"Parse a MozReview-Commit-ID value out of a string.\n\n Returns None if the commit ID is not found.\n \"\"\"\n m = re.search('^MozReview-Commit-ID: ([a-zA-Z0-9]+)$', s, re.MULTILINE)\n if not m:\n return None\n\n return m.group(1)\n\n\nRE_SOURCE_REPO = re.compile('^Source-Repo: (https?:\\/\\/.*)$',\n re.MULTILINE)\nRE_SOURCE_REVISION = re.compile('^Source-Revision: (.*)$', re.MULTILINE)\n\nRE_XCHANNEL_REVISION = re.compile(\n '^X-Channel-Repo: (?P[a-zA-Z0-9/\\-._]+?)\\n'\n 'X-Channel-Converted-Revision: (?P[a-fA-F0-9]{12,40}?)$',\n re.MULTILINE)\n\n\ndef xchannel_link(m):\n s = m.group()[:(m.start('revision') - m.start())]\n l = '{revision}'\n s += l.format(\n repo=m.group('repo'),\n revision=m.group('revision'),\n )\n s += m.group()[(m.end('revision') - m.start()):]\n return s\n\n\ndef add_hyperlinks(s,\n bugzilla_url='https://bugzilla.mozilla.org/show_bug.cgi?id='):\n \"\"\"Add hyperlinks to a commit message.\n\n This is useful to be used as a Mercurial template filter for converting\n plain text into rich HTML.\n \"\"\"\n # Look for annotations saying this commit originally came from elsewhere.\n # If these are present, we are less aggressive about e.g. linking numbers\n # to Bugzilla bugs.\n source_repo = None\n github_repo = None\n\n m = RE_SOURCE_REPO.search(s)\n if m:\n source_repo = m.group(1)\n\n if source_repo.startswith('https://github.com/'):\n github_repo = source_repo[len('https://github.com/'):]\n\n start, end = m.span(1)\n\n s = '%s%s%s' % (\n s[0:start],\n cgi.escape(source_repo),\n cgi.escape(source_repo),\n s[end:])\n\n m = RE_SOURCE_REVISION.search(s)\n if m:\n source_revision = m.group(1)\n\n start, end = m.span(1)\n\n # Hyperlink to GitHub commits.\n if github_repo:\n s = '%s%s%s' % (\n s[0:start],\n cgi.escape(github_repo),\n cgi.escape(source_revision),\n cgi.escape(source_revision),\n s[end:])\n\n # We replace #\\d+ with links to the GitHub issue.\n if github_repo:\n repl = r'#\\1' % github_repo\n s = re.sub(r'#(\\d+)', repl, s)\n\n # Bugzilla linking.\n bugzilla_re = BUG_CONSERVATIVE_RE if github_repo else BUG_RE\n bugzilla_link = r'\\1' % bugzilla_url\n s = bugzilla_re.sub(bugzilla_link, s)\n\n # l10n cross channel linking\n s = RE_XCHANNEL_REVISION.sub(xchannel_link, s)\n\n return s\n","sub_path":"pylib/mozautomation/mozautomation/commitparser.py","file_name":"commitparser.py","file_ext":"py","file_size_in_byte":11418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"580913797","text":"# Create your views here.\n\nfrom django.shortcuts import render\nfrom django.template import loader\nfrom django.http import Http404, HttpResponse, HttpResponseRedirect\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom .models import Rubro\nfrom .forms import RubroForm\nfrom VeterinariaPatagonica import tools\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\ndef rubros(request):\n\n context = {}#Defino el contexto.\n template = loader.get_template('GestionDeRubros/GestionDeRubros.html')#Cargo el template desde la carpeta templates/GestionDeRubros.\n return HttpResponse(template.render(context, request))#Devuelvo la url con el template armado.\n\n\n@login_required(redirect_field_name='proxima')\n@permission_required('GestionDeRubros.add_Rubro', raise_exception=True)\ndef modificar(request, id = None):\n\n rubro = Rubro.objects.get(id=id) if id is not None else None\n if (id==None):\n context = {\"titulo\": 1, 'usuario': request.user}\n else:\n context = {\"titulo\": 2, 'usuario': request.user}\n if request.method == 'POST':\n formulario = RubroForm(request.POST, instance=rubro)\n print(formulario)\n if formulario.is_valid():\n rubro = formulario.save()\n return HttpResponseRedirect(\"/GestionDeRubros/ver/{}\".format(rubro.id))\n else:\n context['formulario'] = formulario\n else:\n context['formulario'] = RubroForm(instance=rubro)\n template = loader.get_template('GestionDeRubros/formulario.html')\n return HttpResponse(template.render(context, request))\n\n\n@login_required(redirect_field_name='proxima')\n@permission_required('GestionDeRubros.delete_Rubro', raise_exception=True)\ndef habilitar(request, id):\n try:\n rubro = Rubro.objects.get(id=id)\n except ObjectDoesNotExist:\n raise Http404()\n\n rubro.baja = False\n rubro.save()\n\n return HttpResponseRedirect( \"/GestionDeRubros/verHabilitados/\" )\n\n@login_required(redirect_field_name='proxima')\n@permission_required('GestionDeRubros.delete_Rubro', raise_exception=True)\ndef deshabilitar(request, id):\n\n try:\n rubro = Rubro.objects.get(id=id)\n except ObjectDoesNotExist:\n raise Http404()\n\n rubro.baja = True\n rubro.save()\n\n return HttpResponseRedirect( \"/GestionDeRubros/verDeshabilitados/\" )\n\n@login_required(redirect_field_name='proxima')\n@permission_required('GestionDeRubros.delete_Rubro', raise_exception=True)\ndef eliminar(request, id):\n try:\n rubro = Rubro.objects.get(id=id)\n except ObjectDoesNotExist:\n raise Http404()\n if request.method == 'POST':\n rubro.delete()\n return HttpResponseRedirect( \"/GestionDeRubros/verDeshabilitados/\" )\n else:\n template = loader.get_template('GestionDeRubros/eliminar.html')\n context = {\n 'usuario' : request.user,\n 'id' : id\n }\n return HttpResponse( template.render( context, request) )\n\ndef ver(request, id):\n\n try:\n rubro = Rubro.objects.get(id=id)\n except ObjectDoesNotExist:\n raise Http404(\"No encontrado\", \"El rubro con id={} no existe.\".format(id))\n\n template = loader.get_template('GestionDeRubros/ver.html')\n contexto = {\n 'rubro': rubro,\n 'usuario': request.user\n }\n\n return HttpResponse(template.render(contexto, request))\n\ndef verHabilitados(request):\n rubrosQuery = Rubro.objects.habilitados()\n rubrosQuery = rubrosQuery.filter(tools.paramsToFilter(request.GET, Rubro))\n template = loader.get_template('GestionDeRubros/verHabilitados.html')\n\n paginator = Paginator(rubrosQuery, 3)\n page = request.GET.get('page')\n\n try:\n rubros = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n rubros = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n rubros = paginator.page(paginator.num_pages)\n\n contexto = {\n 'rubrosQuery' : rubrosQuery,\n 'usuario' : request.user,\n 'rubros': rubros,\n }\n\n return HttpResponse(template.render(contexto,request))\n\n\ndef verDeshabilitados(request):\n rubrosQuery = Rubro.objects.deshabilitados()\n rubrosQuery = rubrosQuery.filter(tools.paramsToFilter(request.GET, Rubro))\n template = loader.get_template('GestionDeRubros/verDeshabilitados.html')\n\n paginator = Paginator(rubrosQuery, 3)\n page = request.GET.get('page')\n\n try:\n rubros = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n rubros = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n rubros = paginator.page(paginator.num_pages)\n\n contexto = {\n 'rubrosQuery': rubrosQuery,\n 'usuario': request.user,\n 'rubros': rubros,\n }\n\n return HttpResponse(template.render(contexto,request))\n","sub_path":"VeterinariaPatagonica/Apps/GestionDeRubros/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"445819979","text":"# pip3 install attrs\nimport attr\nimport pprint\nimport pygame\nimport time\n\n# Classes (frozen means immutable objects)\n# See attrs docs: https://www.attrs.org\n@attr.s(frozen=True)\nclass Node(object):\n\tx = attr.ib()\n\ty = attr.ib()\n\n@attr.s(frozen=True)\nclass Segment(object):\n\tnodes = attr.ib()\n\ttrans = attr.ib()\n\n@attr.s(frozen=True)\nclass Shape(object):\n\tsegments = attr.ib()\n\n@attr.s(frozen=True)\nclass MoveableNode(object):\n\tsegment_id = attr.ib()\n\tnode_id = attr.ib()\n\tnode = attr.ib()\n\n# Functions\ndef shape_combined_segments(shape: Shape):\n\tsegments = shape.segments\n\n\ttransposed_segments = []\n\tfor segment in segments:\n\t\ttransposed_segments.append(transpose_segment(segment))\n\n\treturn segments + transposed_segments\n\ndef transpose_segment(segment: Segment):\n\tif segment.trans == 'x':\n\t\ttransposed_nodes = [Node(200 + node.x, node.y) for node in segment.nodes] # mirror x coordinate\n\t\ttransposed_nodes.reverse() # reverse node order\n\t\treturn Segment(transposed_nodes, 'x')\n\n\tif segment.trans == 'y':\n\t\ttransposed_nodes = [Node(node.x, node.y - 200) for node in segment.nodes] # mirror y coordinate\n\t\ttransposed_nodes.reverse() # reverse node order\n\t\treturn Segment(transposed_nodes, 'y')\n\ndef print_combined_segments(shape: Shape):\n\tpprint.pp(shape_combined_segments(shape))\n\ndef add_node_to_shape(shape: Shape, segment_id, node_id, node: Node):\n\tsegments = shape.segments\n\tsegment = segments[segment_id]\n\tnodes = segment.nodes\n\tnodes.insert(node_id, node)\n\tsegments[segment_id] = Segment(nodes, segment.trans)\n\treturn Shape(segments)\n\ndef replace_node_in_shape(shape: Shape, segment_id, node_id, node: Node):\n\tsegments = shape.segments\n\tsegment = segments[segment_id]\n\tnodes = segment.nodes\n\tnodes[node_id] = node\n\tsegments[segment_id] = Segment(nodes, segment.trans)\n\treturn Shape(segments)\n\ndef shape_coordinates(shape:Shape):\n\tnodes = []\n\tfor segment in shape_combined_segments(shape):\n\t\tnodes += segment.nodes[:-1] # exclude every last node in segment to prevent overlap\n\tnodes.append(nodes[0]) # Duplicate the start node to the end to close the shape\n\tcoordinates = [(node.x, node.y) for node in nodes]\n\treturn coordinates\n\ndef print_coordinates(shape: Shape):\n\tpprint.pp(shape_coordinates(shape))\n\ndef shape_movable_nodes(shape:Shape):\n\tmoveable_nodes = []\n\tfor segment_id, segment in enumerate(shape.segments):\n\t\tfor node_id, node in enumerate(segment.nodes[1:-1]):\n\t\t\tmoveable_node = MoveableNode(segment_id, node_id + 1, node)\n\t\t\tmoveable_nodes.append(moveable_node)\n\treturn moveable_nodes\n\n# Create start square\ndef create_square_shape():\n\t# Create square\n\tnode1 = Node(-100, -100) # left-bottom\n\tnode2 = Node(-100, 100) # left-top\n\tnode3 = Node( 100, 100) # right-top\n\n\tsegment1 = Segment(\n\t\tnodes = [node1, node2], \n\t\ttrans = 'x'\n\t)\n\tsegment2 = Segment(\n\t\tnodes = [node2, node3], \n\t\ttrans = 'y'\n\t)\n\treturn Shape([segment1, segment2])\n\n\n# Pygame\nfrom pygame.locals import *\npygame.init()\nscreen = pygame.display.set_mode([750, 750])\nclock = pygame.time.Clock()\n\n# Set colors\nblack = (0,0,0)\n#green = (0,255,0)\n#blue = (0,0,255)\ngreybrown = (139,146,154)\n\nwhite = (255,255,255)\nred = (255,25,55)\nlightgreenblue = (182,220,233)\ndarkgreenblue = (48,124,145)\ngreywhite = (229,227,228)\nbrown = (123,92,82)\n\ncolor1 = lightgreenblue\ncolor2 = darkgreenblue\n\nX,Y,Z = 0,1,2\n\n# Set origin (0, 0) in the center of the screen instead of top-left and flip direction of y-axis\ncoord_to_screen = lambda c, center: (c[0] + center[0] + screen.get_width() // 2, - c[1] + center[1] + screen.get_height() // 2)\ncoords_to_screen = lambda l, center: [coord_to_screen(coordinates, center) for coordinates in l]\nscreen_to_coord = lambda s, center: (s[0] - center[0] - screen.get_width() // 2, - s[1] + center[1] + screen.get_height() // 2) \n\n# Set the start shape\nshape = create_square_shape()\nshape = add_node_to_shape(shape, segment_id=0, node_id=1, node=Node(-100, -30))\nshape = add_node_to_shape(shape, segment_id=0, node_id=2, node=Node(-70, 0))\nshape = add_node_to_shape(shape, segment_id=0, node_id=3, node=Node(-70, 30))\nshape = add_node_to_shape(shape, segment_id=0, node_id=4, node=Node(-100, 30))\nshape = add_node_to_shape(shape, segment_id=1, node_id=1, node=Node(-20, 100))\nshape = add_node_to_shape(shape, segment_id=1, node_id=2, node=Node(0, 75))\nshape = add_node_to_shape(shape, segment_id=1, node_id=3, node=Node(20, 100))\nprint(\"Start shape\")\nprint_combined_segments(shape)\nprint_coordinates(shape)\n\n# Select the start node for movement\nselected = None\n\n# Set the texts\nfont = pygame.font.Font(pygame.font.get_default_font(), 14)\ndraw_text = lambda text, pos: screen.blit(font.render(text, True, brown, greywhite), pos)\n\n# Start loop\nrunning = True\nwhile running:\n\tmovable_nodes = shape_movable_nodes(shape)\n\n\t# Single key-press\n\tfor event in pygame.event.get():\n\t\tif event.type == KEYDOWN:\n\t\t\tif event.key == K_ESCAPE:\n\t\t\t\trunning = False\n\n\t\telif event.type == MOUSEBUTTONDOWN and event.button == 1:\n\t\t\tmouse_x, mouse_y = screen_to_coord(pygame.mouse.get_pos(), (0,0))\n\t\t\tprint(pygame.mouse.get_pos(), mouse_x, mouse_y)\n\t\t\tfor m in movable_nodes:\n\t\t\t\tif abs(m.node.x - mouse_x) < 10 and abs(m.node.y - mouse_y) < 10 :\n\t\t\t\t\tselected = m\n\n\t\telif event.type == MOUSEBUTTONUP and event.button == 1:\n\t\t\tselected = None\n\n\t\telif event.type == QUIT:\n\t\t\trunning = False\n\n\tscreen.fill(white)\n\n\tcolor = color1\n\tfor x_center in range(-400,600,200):\n\t\tfor y_center in range(-400,600,200):\n\t\t\tpygame.draw.polygon(screen, color, coords_to_screen(shape_coordinates(shape), (x_center,y_center)))\n\t\t\tcolor = color2 if color == color1 else color1\n\n\tmovable_nodes = shape_movable_nodes(shape)\n\tfor moveable_node in movable_nodes:\n\t\tcoord = (moveable_node.node.x, moveable_node.node.y)\n\t\tpygame.draw.circle(screen, black, coord_to_screen(coord, (0,0)), 1)\n\n\tif selected is not None:\n\t\tmouse_pos = pygame.mouse.get_pos();\n\t\tmouse_x, mouse_y = screen_to_coord(mouse_pos, (0,0))\n\t\tshape = replace_node_in_shape(shape, selected.segment_id, selected.node_id, Node(mouse_x, mouse_y))\n\t\tpygame.draw.circle(screen, red, (mouse_pos[0], mouse_pos[1]), 5)\n\n\tdraw_text(\"ESCHER MAKER\", (10, 10))\n\t# draw_text(\"Select with tab. Move with arrows. Add with a\", (10, 30))\n\t# draw_text(f\"Segment: {selected_segment_id}\", (10, 50))\n\t# draw_text(f\"Node: {selected_node_id}\", (110, 50))\n\t# draw_text(f\"Position: ({selected_node.x}, {selected_node.y})\", (185, 50))\n\n\tpygame.display.update()\n\n\tclock.tick(60)\n\npygame.quit()\n","sub_path":"backup/escher-pygame.py","file_name":"escher-pygame.py","file_ext":"py","file_size_in_byte":6394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"241480361","text":"import logging\nimport re\n\nfrom streamlink.compat import urlparse\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api.utils import itertags\nfrom streamlink.stream import HLSStream, HTTPStream\ntry:\n from urlparse import urljoin # Python 2\nexcept ImportError:\n from urllib.parse import urljoin # Python 3\n\nlog = logging.getLogger(__name__)\n\nclass NetondemandMt(Plugin):\n '''\n Support for live TV channel and videos on netondemand.mt\n '''\n url_re = re.compile(r'https?://(www\\.)?netondemand\\.mt')\n\n stream_re = re.compile(r'\"sourceURL\"\\s*:\\s*\"((?:http(s)?:)?//[^\"]*?)\"')\n\n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n\n def _get_streams(self):\n res = self.session.http.get(self.url)\n\n stream_url = None\n\n if '/play/' in self.url:\n # Playing video\n log.info('Playing video')\n\n # Find video source URL\n for source in itertags(res.text, 'source'):\n if source.attributes.get('src'):\n stream_url = source.attributes.get('src')\n break\n \n stream_url = urljoin(self.url, stream_url)\n\n else:\n # Playing live TV channel\n log.info('Playing live TV channel')\n\n # Find stream URL\n stream_url_m = self.stream_re.search(res.text)\n stream_url = stream_url_m and stream_url_m.group(1)\n\n if not stream_url:\n log.error('Could not find stream URL')\n return\n\n log.debug('Found stream URL: {}', stream_url)\n\n if '.m3u8' in stream_url:\n streams = HLSStream.parse_variant_playlist(self.session, stream_url, verify=False)\n if not streams:\n log.debug('Play whole m3u8 file')\n yield 'live', HLSStream(self.session, stream_url, verify=False)\n else:\n log.debug('Play single stream')\n for s in streams.items():\n yield s\n\n else:\n yield 'video', HTTPStream(self.session, stream_url, verify=False)\n\n\n__plugin__ = NetondemandMt\n","sub_path":"netondemand_mt.py","file_name":"netondemand_mt.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"359560894","text":"#!/usr/bin/python\n\nfrom __future__ import print_function\n#import pickle\nimport re\nimport math\nfrom PIL import Image, ImageDraw, ImageFont\n\nfileExt = raw_input('Which book? ')\n\nim = Image.open('data/HP' + fileExt + '_Cover.jpg')\nim = im.convert(mode='L')\nim.thumbnail((1000, 1500), Image.ANTIALIAS)\nwidth, height = im.size\nprint (width, height)\n\nnotLetters = ('\\'', '\"', ',', '.', '!', ' ', '\\n', '-', '(', ')')\n#count = 0\n\n\ndef getVal(j, i):\n tot = 0\n for a in range(3):\n for b in range(2):\n tot += im.getpixel((j*2+b, i*3+a))\n return int((tot/24.0)/255.0*100*5)\n\n\ntext = []\nwith open('data/HP' + fileExt + '_1.txt') as f:\n while True:\n c = f.read(1) # format this now\n if not c:\n break\n if re.match('^[a-zA-Z0-9]', c):\n text.append(c)\n#img = Image.new('RGB', (120 * 308, 200*231), color = (255,255,255))\nprint (len(text))\nrowNum = int(math.floor(height / 3))\ncolNum = int(math.floor(width / 2))\nprint (rowNum, colNum)\n#img = Image.new('RGB', (78 * 500, 117*497), color = (255,255,255))\nimg = Image.new('RGB', (78 * (colNum + 1), 117 *\n (rowNum + 1)), color=(255, 255, 255))\npixTotal = []\n# Col limit is width / b range from getVal\n# Row limit is Height / a range from getVal\nfor i in range(0, rowNum):\n # images.append([])\n for j in range(0, colNum):\n #print c.upper()\n #count += 1\n pix = getVal(j, i)\n #print \"\\t\", j\n pixTotal.append(pix)\n\n if text:\n msg = text.pop(0).upper()\n if(msg == 'C' and text[0].upper() == 'H' and text[1].upper() == 'A' and text[2].upper() == 'P' and text[3].upper() == 'T' and text[4].upper() == 'E' and text[5].upper() == 'R'):\n for n in range(0, 15):\n print(text[n], sep='', end='')\n print(' ')\n\n else:\n\n break\n #img = Image.new('RGB', (120,200), color = (255,255,255))\n W, H = (90, 150)\n\n d = ImageDraw.Draw(img)\n if pix > 100:\n pix = 100\n myFont = ImageFont.truetype(\n \"/usr/share/fonts/liberation/LiberationMono-Regular.ttf\", (180 - pix))\n w, h = d.textsize(msg, font=myFont)\n #d.text((j*90 + (W-w)/2, i*150 + (H-h)/2), msg, fill=(0,0,0), font=myFont)\n d.text((j*78 + (W-w)/2, i*117 + (H-h)/2),\n msg, fill=(0, 0, 0), font=myFont)\n\n # images[i].append(img)\n\n print (i)\n if not text:\n\n break\n #print(\"AVG: \", sum(pixTotal) / len(pixTotal))\n #print(\"MAX: \", max(pixTotal))\nprint (len(text))\n# img.save('HP1TextNewLimit100from180.png')\nimg.save('output/HP' + fileExt + '.png')\nimg.thumbnail((14400, 14400), Image.ANTIALIAS)\n# img.save('HP1TextNewLimit100from180_small.png')\nimg.save('output/HP' + fileExt + '_small.png')\n\n'''\nimages = []\nfor i in range(0,231):\n\timages.append([])\n\tfor j in range (0,308):\n\t\t#print c.upper()\n\t\t#count += 1\n\t\tpix = getVal(i,j)\n\n\t\tif text:\n\t\t\tmsg = text.pop(0)\n\t\telse:\n\t\t\tbreak\n\t\timg = Image.new('RGB', (120,200), color = (255,255,255))\n\t\tW, H = (120, 200)\n\n\t\td = ImageDraw.Draw(img)\n\t\tmyFont = ImageFont.truetype(\"/usr/share/fonts/liberation/LiberationMono-Regular.ttf\",(pix + 10))\n\t\tw, h = d.textsize(msg, font=myFont)\n\t\td.text(((W-w)/2, (H-h)/2), msg, fill=(0,0,0), font=myFont)\n\n\t\timages[i].append(img)\n\n\tprint i\n\tif not text:\n\t\tbreak\n\ncombinedImg = []\nfor row in images:\n\t#images = map(Image.open, ['text6.png', 'text6.png', 'text6.png'])\n\twidths, heights = zip(*(i.size for i in row))\n\n\ttotal_width = sum(widths)\n\tmax_height = max(heights)\n\n\tnew_im = Image.new('RGB', (total_width, max_height))\n\n\tx_offset = 0\n\tfor im in row:\n\t new_im.paste(im, (x_offset,0))\n\t x_offset += im.size[0]\n\n\tcombinedImg.append(new_im)\n\n#images = map(Image.open, ['text6.png', 'text6.png', 'text6.png'])\nwidths, heights = zip(*(i.size for i in combinedImg))\n\ntotal_width = sum(widths)\nmax_height = max(heights)\n\nnew_im = Image.new('RGB', (total_width, max_height))\n\ny_offset = 0\nfor im in images:\n new_im.paste(im, (0,y_offset))\n y_offset += im.size[1]\n\nnew_im.save('HP1Text.png')\n#print count '''\n","sub_path":"makePosterGenLinted.py","file_name":"makePosterGenLinted.py","file_ext":"py","file_size_in_byte":4097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"338863","text":"# Solution for sWAP cASE\n\ndef swap_case(s):\n # Easy way\n # return s.swapcase()\n\n # Solution via ascii table\n chars = []\n\n for c in s:\n ord_c = ord(c)\n if 65 <= ord_c <= 90:\n ord_c = ord_c + 32\n elif 97 <= ord_c <= 122:\n ord_c = ord_c - 32\n\n chars.append(chr(ord_c))\n\n return \"\".join(chars)\n\n\nif __name__ == '__main__':\n s = input()\n result = swap_case(s)\n print(result)\n","sub_path":"Python/Python/Strings/sWAP cASE.py","file_name":"sWAP cASE.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"155018305","text":"#Troy Prince\n#cherrpy primer\n\nimport cherrypy\nimport re, json\n\n\nclass ResetController(object):\n\n def __init__(self, mdb=None):\n self.mdb = mdb\n print(\"Reset Init\")\n \n def PUT(self):\n output = { 'result' : 'success'}\n\n try:\n mdb.load_movies()\n except Exception as ex:\n output['result'] = 'error'\n output['message'] = str(ex)\n\n return json.dumps(output)\n\n def PUT_K(self, key):\n #else:\n output = { 'result' : 'success'}\n \n try:\n mdb.load_one_movie()\n except Exception as ex:\n output['result'] = 'error'\n output['message'] = str(ex)\n \n return json.dumps(output)\n","sub_path":"ResetController.py","file_name":"ResetController.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"200313210","text":"# --- coding:utf-8 ---\nimport torch.nn as nn\nfrom transformers import BertForMaskedLM, BertModel\n\n\nclass IntentModel(nn.Module):\n def __init__(self,args):\n super(IntentModel,self).__init__()\n\n self.model = BertModel.from_pretrained(args.pretrained_model_name)\n self.dropout = nn.Dropout(0.1)\n self.mlp = nn.Linear(768, 2)\n\n def forward(self, input_ids, attention_mask=None, token_type_ids=None):\n outputs = self.model(input_ids=input_ids,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids)\n\n cls_out = outputs['pooler_output']\n\n output = self.dropout(cls_out)\n output = self.mlp(output)\n return output","sub_path":"基于规则的DST对话系统/chatbot/intent/model/bert_model.py","file_name":"bert_model.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"156953734","text":"from tkinter import *\r\nfrom PIL import ImageTk, Image\r\nfrom random import randint\r\nimport random\r\nfrom tkinter import messagebox\r\n\r\n\r\nroot=Tk()\r\nroot.title(\"Flash App\")\r\nroot.geometry(\"810x550\")\r\nload = Image.open(\"C:/gui/india.png\")\r\nrender = ImageTk.PhotoImage(load)\r\nimg = Label(root, image=render)\r\nimg.image = render\r\nimg.place(x=2, y=2)\r\n# alabel=Label(root,text=\"Created by\"+ \" Yash Mehta 1811024 \" +\"Nidhi Nair 1811028\").grid(row=1,column=1,columnspan=3)\r\n\r\n\r\ndef math_random():\r\n # generate a random number\r\n global num_1\r\n global num_2\r\n num_1=randint(0,10)\r\n num_2=randint(0,10)\r\n\r\n global add_image1\r\n global add_image2\r\n card1=\"C:/gui/Flashcards/\" + str(num_1) +\".png\"\r\n card2=\"C:/gui/Flashcards/\" + str(num_2) +\".png\"\r\n add_image1=ImageTk.PhotoImage(Image.open(card1))\r\n add_image2=ImageTk.PhotoImage(Image.open(card2))\r\n \r\n # put flashcard on screen\r\n add_1.config(image=add_image1)\r\n add_2.config(image=add_image2)\r\n\r\n\r\n\r\n\r\n# create addition answer function\r\ndef answer_sub():\r\n answer=num_1-num_2\r\n if sub_answer.get()==\"\":\r\n messagebox.showerror(\"Error\",\"Write a value\")\r\n elif int(sub_answer.get())==answer:\r\n response=\"Correct! \"+str(num_1) +\" - \"+str(num_2)+\" = \"+str(answer)\r\n answer_message.config(text=response)\r\n else:\r\n response=\"Wrong! \"+str(num_1) +\" - \"+str(num_2)+\" = \"+str(answer) +\" Not \"+sub_answer.get()\r\n answer_message.config(text=response)\r\n \r\n sub_answer.delete(0,END)\r\n math_random()\r\n\r\n\r\n\r\n\r\n\r\n\r\n# create addition math flashcard function\r\ndef sub():\r\n hide_all_frames()\r\n sub_frame.pack(fill=\"both\",expand=1)\r\n\r\n add_label=Label(sub_frame,text=\"Subtraction Flashcards\",font=(\"Helvetica\",18)).pack(pady=15)\r\n pic_frame=Frame(sub_frame,width=400,height=300)\r\n pic_frame.pack()\r\n\r\n # generate a random number\r\n global num_1\r\n global num_2\r\n num_1=randint(0,10)\r\n num_2=randint(0,10)\r\n\r\n # create 3 labels inside our pic frame\r\n global add_1\r\n global add_2\r\n add_1=Label(pic_frame)\r\n add_2=Label(pic_frame)\r\n math_sign=Label(pic_frame,text=\"-\",font=(\"Helvetica\",28))\r\n # grid labels\r\n add_1.grid(row=0,column=0)\r\n math_sign.grid(row=0,column=1)\r\n add_2.grid(row=0,column=2)\r\n\r\n global add_image1\r\n global add_image2\r\n card1=\"C:/gui/Flashcards/\" + str(num_1) +\".png\"\r\n card2=\"C:/gui/Flashcards/\" + str(num_2) +\".png\"\r\n add_image1=ImageTk.PhotoImage(Image.open(card1))\r\n add_image2=ImageTk.PhotoImage(Image.open(card2))\r\n \r\n # put flashcard on screen\r\n add_1.config(image=add_image1)\r\n add_2.config(image=add_image2)\r\n\r\n\r\n # create answer box and button\r\n global sub_answer\r\n sub_answer=Entry(sub_frame,font=(\"Helvetica\",18))\r\n sub_answer.pack(pady=50)\r\n\r\n sub_answer_button=Button(sub_frame,text=\"Answer\",command=answer_sub)\r\n sub_answer_button.pack()\r\n\r\n global answer_message\r\n answer_message =Label(sub_frame,text=\"\",font=(\"Helvetica\",18))\r\n answer_message.pack(pady=40)\r\n\r\n\r\n\r\n\r\n# create addition answer function\r\ndef answer_add():\r\n answer=num_1+num_2\r\n if add_answer.get()==\"\":\r\n messagebox.showerror(\"Error\",\"Write a value\")\r\n elif int(add_answer.get())==answer:\r\n response=\"Correct! \"+str(num_1) +\" + \"+str(num_2)+\" = \"+str(answer)\r\n answer_message.config(text=response)\r\n else:\r\n response=\"Wrong! \"+str(num_1) +\" + \"+str(num_2)+\" = \"+str(answer) +\" Not \"+add_answer.get()\r\n answer_message.config(text=response)\r\n \r\n add_answer.delete(0,END)\r\n math_random()\r\n\r\n\r\n\r\n\r\n\r\n# create addition math flashcard function\r\ndef add():\r\n hide_all_frames()\r\n add_frame.pack(fill=\"both\",expand=1)\r\n\r\n add_label=Label(add_frame,text=\"Addition Flashcards\",font=(\"Helvetica\",18)).pack(pady=15)\r\n pic_frame=Frame(add_frame,width=400,height=300)\r\n pic_frame.pack()\r\n\r\n # generate a random number\r\n global num_1\r\n global num_2\r\n num_1=randint(0,10)\r\n num_2=randint(0,10)\r\n\r\n # create 3 labels inside our pic frame\r\n global add_1\r\n global add_2\r\n add_1=Label(pic_frame)\r\n add_2=Label(pic_frame)\r\n math_sign=Label(pic_frame,text=\"+\",font=(\"Helvetica\",28))\r\n # grid labels\r\n add_1.grid(row=0,column=0)\r\n math_sign.grid(row=0,column=1)\r\n add_2.grid(row=0,column=2)\r\n\r\n global add_image1\r\n global add_image2\r\n card1=\"C:/gui/Flashcards/\" + str(num_1) +\".png\"\r\n card2=\"C:/gui/Flashcards/\" + str(num_2) +\".png\"\r\n add_image1=ImageTk.PhotoImage(Image.open(card1))\r\n add_image2=ImageTk.PhotoImage(Image.open(card2))\r\n \r\n # put flashcard on screen\r\n add_1.config(image=add_image1)\r\n add_2.config(image=add_image2)\r\n\r\n\r\n # create answer box and button\r\n global add_answer\r\n add_answer=Entry(add_frame,font=(\"Helvetica\",18))\r\n add_answer.pack(pady=50)\r\n\r\n add_answer_button=Button(add_frame,text=\"Answer\",command=answer_add)\r\n add_answer_button.pack()\r\n\r\n global answer_message\r\n answer_message =Label(add_frame,text=\"\",font=(\"Helvetica\",18))\r\n answer_message.pack(pady=40)\r\n\r\n\r\n\r\n\r\n\r\n# create randomizing state function \r\ndef random_state():\r\n # create a list of our state names\r\n global our_states\r\n our_states=['andhrapradesh','arunachalpradesh','assam','bihar',\r\n 'chattisgarh','goa','gujarat','haryana',\r\n 'himachalpradesh','jharkhand','karnataka','kerala',\r\n 'madhyapradesh','maharashtra','manipur','meghalaya',\r\n 'mizoram','nagaland','odisha','punjab',\r\n 'rajasthan','sikkim','tamilnadu','telangana',\r\n 'tripura','uttarakhand','uttarpradesh','westbengal']\r\n\r\n # generate random number\r\n global rando\r\n rando=randint(0,len(our_states)-1)\r\n state1=\"C:/gui/states/\" + our_states[rando] +\".png\"\r\n\r\n # create state images\r\n global state_img\r\n state_img=ImageTk.PhotoImage(Image.open(state1))\r\n show_state.config(image=state_img,bg=\"white\")\r\n\r\n\r\n\r\n\r\n\r\n# create state capital answers\r\ndef state_capital_answer():\r\n if capital_radio.get() == our_state_capitals[answer]:\r\n response = \"Correct! \"+our_state_capitals[answer].title()+\" is the capital of \"+answer.title()\r\n else:\r\n response=\"Incorrect! \"+our_state_capitals[answer].title()+\" is the capital of \"+answer.title()\r\n\r\n answer_label_capitals.config(text=response)\r\n\r\n\r\n\r\n\r\n# create answer function\r\ndef state_answer():\r\n answer=answer_input.get()\r\n answer=answer.replace(\"\",\"\")\r\n\r\n # determine if our answer is right or wrong\r\n if answer.lower()==\"\":\r\n messagebox.showerror(\"Error\",\"Atleast give a miss!!\")\r\n elif answer.lower()==our_states[rando]:\r\n response=\"correct \" +our_states[rando].title()\r\n answer_label.config(text=response)\r\n else:\r\n response=\"Incorrect! \"+our_states[rando].title()\r\n answer_label.config(text=response) \r\n\r\n\r\n # answer_label.config(text=response)\r\n\r\n # clear the entry box\r\n answer_input.delete(0,END)\r\n\r\n\r\n random_state()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n# create state flashcard function\r\ndef states():\r\n # hide previous frames\r\n hide_all_frames()\r\n state_frame.pack(fill=\"both\",expand=1)\r\n\r\n global show_state\r\n show_state=Label(state_frame)\r\n show_state.pack(pady=15)\r\n random_state()\r\n\r\n # create answer input box\r\n global answer_input\r\n answer_input=Entry(state_frame,font=(\"Helvetica\",18),bd=2)\r\n answer_input.pack(pady=15)\r\n\r\n # create button to randomize images\r\n rando_btn=Button(state_frame,text=\"Next State\",command=states)\r\n rando_btn.pack(pady=10)\r\n\r\n # create a button to ans the q \r\n answer_button=Button(state_frame,text=\"Answer\",command=state_answer)\r\n answer_button.pack(pady=5)\r\n\r\n # create a label to tell us if we got the right answer or not\r\n global answer_label\r\n answer_label=Label(state_frame,text=\"\",font=(\"Helvetica\",18),bg=\"white\")\r\n answer_label.pack(pady=15)\r\n\r\n\r\n\r\n\r\n\r\n# create state capital flashcard function\r\ndef state_capitals():\r\n hide_all_frames()\r\n state_capital_frame.pack(fill=\"both\",expand=1)\r\n # my_label=Label(state_capital_frame,text=\"Capitals\").pack()\r\n \r\n global show_state\r\n show_state=Label(state_capital_frame)\r\n show_state.pack(pady=15)\r\n\r\n global our_states\r\n our_states=['andhrapradesh','arunachalpradesh','assam','bihar',\r\n 'chattisgarh','goa','gujarat','haryana',\r\n 'himachalpradesh','jharkhand','karnataka','kerala',\r\n 'madhyapradesh','maharashtra','manipur','meghalaya',\r\n 'mizoram','nagaland','odisha','punjab',\r\n 'rajasthan','sikkim','tamilnadu','telangana',\r\n 'tripura','uttarakhand','uttarpradesh','westbengal']\r\n\r\n global our_state_capitals\r\n our_state_capitals={\r\n 'andhrapradesh':\"hyderabad\",\r\n 'arunachalpradesh':\"itanagar\",\r\n 'assam':\"dispur\",\r\n 'bihar':\"patna\",\r\n 'chattisgarh':\"raipur\",\r\n 'goa':\"panaji\",\r\n 'gujarat':\"gandhinagar\",\r\n 'haryana':\"chandigarh\",\r\n 'himachalpradesh':\"shimla\",\r\n 'jharkhand':\"ranchi\",\r\n 'karnataka':\"bangalore\",\r\n 'kerala':\"trivandrum\",\r\n 'madhyapradesh':\"bhopal\",\r\n 'maharashtra':\"mumbai\",\r\n 'manipur':\"imphal\",\r\n 'meghalaya':\"shillong\",\r\n 'mizoram':\"aizawl\",\r\n 'nagaland':\"kohima\",\r\n 'odisha':\"bhubaneshwar\",\r\n 'punjab':\"chandigarh\",\r\n 'rajasthan':\"jaipur\",\r\n 'sikkim':\"gangtok\",\r\n 'tamilnadu':\"chennai\",\r\n 'telangana':\"hyderabad\",\r\n 'tripura':\"agartala\",\r\n 'uttarakhand':\"dehradun\",\r\n 'uttarpradesh':\"lucknow\",\r\n 'westbengal':\"kolkata\"\r\n }\r\n\r\n # create empty answer list and counter\r\n answer_list=[]\r\n count = 1\r\n global answer\r\n # generate 3 random capitals\r\n while count <4:\r\n rando=randint(0,len(our_states)-1)\r\n # if first selection,make it our answer\r\n if count==1:\r\n answer=our_states[rando]\r\n global state_img\r\n state=\"C:/gui/states/\"+our_states[rando]+\".png\"\r\n state_img=ImageTk.PhotoImage(Image.open(state))\r\n show_state.config(image=state_img)\r\n\r\n # add our first selection to a new list\r\n answer_list.append(our_states[rando])\r\n\r\n # remove from old list\r\n our_states.remove(our_states[rando])\r\n\r\n # shuffle original list\r\n random.shuffle(our_states)\r\n\r\n count=count+1\r\n\r\n random.shuffle(answer_list)\r\n\r\n global capital_radio\r\n capital_radio=StringVar()\r\n capital_radio.set(our_state_capitals[answer_list[0]])\r\n\r\n capital_radio_button1=Radiobutton(state_capital_frame,text=our_state_capitals[answer_list[0]].title(),variable=capital_radio,value=our_state_capitals[answer_list[0]]).pack()\r\n capital_radio_button2=Radiobutton(state_capital_frame,text=our_state_capitals[answer_list[1]].title(),variable=capital_radio,value=our_state_capitals[answer_list[1]]).pack()\r\n capital_radio_button3=Radiobutton(state_capital_frame,text=our_state_capitals[answer_list[2]].title(),variable=capital_radio,value=our_state_capitals[answer_list[2]]).pack()\r\n\r\n # add a pass button\r\n pass_button=Button(state_capital_frame,text=\"Next\",command=state_capitals)\r\n pass_button.pack(pady=15)\r\n\r\n # create a button to answer\r\n capital_answer_button=Button(state_capital_frame,text=\"Answer\",command=state_capital_answer)\r\n capital_answer_button.pack(pady=15)\r\n\r\n # create an answer label\r\n global answer_label_capitals\r\n answer_label_capitals=Label(state_capital_frame,text=\"\",font=(\"Helvetica\",12)) \r\n answer_label_capitals.pack(pady=15)\r\n\r\n\r\n\r\n\r\n\r\n# hide all previous frames\r\ndef hide_all_frames():\r\n # loop through and destroy all children in previous frames\r\n for widget in state_frame.winfo_children():\r\n widget.destroy()\r\n\r\n for widget in state_capital_frame.winfo_children():\r\n widget.destroy()\r\n\r\n for widget in add_frame.winfo_children():\r\n widget.destroy() \r\n\r\n for widget in sub_frame.winfo_children():\r\n widget.destroy() \r\n\r\n sub_frame.pack_forget()\r\n add_frame.pack_forget()\r\n state_frame.pack_forget()\r\n state_capital_frame.pack_forget()\r\n\r\n\r\n\r\n# create menu\r\nmy_menu=Menu(root)\r\nroot.config(menu=my_menu)\r\n\r\n# geography menu items\r\nstates_menu=Menu(my_menu)\r\nmy_menu.add_cascade(label=\"Geography\",menu=states_menu)\r\nstates_menu.add_command(label=\"states\",command=states)\r\nstates_menu.add_command(label=\"states capitals\",command=state_capitals)\r\nstates_menu.add_separator()\r\nstates_menu.add_command(label=\"Exit\",command=root.quit)\r\n\r\n# Math flashcard menu\r\nmath_menu=Menu(my_menu)\r\nmy_menu.add_cascade(label=\"Math\",menu=math_menu)\r\nmath_menu.add_command(label=\"Addition\",command=add)\r\nmath_menu.add_command(label=\"Subtraction\",command=sub)\r\nmath_menu.add_separator()\r\nmath_menu.add_command(label=\"Exit\",command=root.quit)\r\n\r\n\r\n# create our frames\r\nstate_frame=Frame(root,width=500,height=500,bg=\"white\")\r\nstate_capital_frame=Frame(root,width=500,height=500)\r\n# addition and subtraction frames\r\nadd_frame=Frame(root,width=500,height=500)\r\nsub_frame=Frame(root,width=500,height=500)\r\n\r\nroot.mainloop()","sub_path":"p.py","file_name":"p.py","file_ext":"py","file_size_in_byte":13222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"335296851","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#Mirrorcast Server for Raspberry Pi.\n#Please use python3 and not 2.7, 2.7 will cause problems\n\nimport socket,subprocess,time,logging, threading\nfrom omx import Omx\n\nlogging.basicConfig(filename='/var/log/mirrorcast_server.log',level=logging.DEBUG,format='%(asctime)s %(message)s', datefmt='%d/%m/%Y %I:%M:%S %p')\nlogging.info(\"Started Server\")\n\ntimestamp = time.localtime()\nconnected = \"\"\nready = False\nplaying = False\ntube = None\n\ndef connection():\n retries = 10\n try:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n host = \"\"\n sock.bind((host,8092))\n \n sock.listen(5)\n \n global connected\n global timestamp\n global ready\n global playing\n \n global tube \n tube = Omx()\n \n while True:\n client, address = sock.accept()\n status = client.recv(8024)\n command = status.decode('ascii')\n command = command.split(\",\")\n #Some else is already connected\n if connected != command[1] and connected != \"\": \n client.send(\"busy\".encode('ascii'))\n logging.info(str(command[1]) + \" tried to connect but \" + str(connected) + \" is already connected\")\n #User started casting/mirroring or reconnected\n if command[0] == \"play\":\n if connected == \"\":\n connected = command[1]\n logging.info(connected + \" has connected\")\n if connected == command[1]:\n ready == False\n timestamp = time.localtime()\n if tube.player != None:\n kill(tube.player)\n tube.player = None\n subprocess.call(\"tvservice -p &\",shell=True) \n tube.mirror()\n time.sleep(1)\n #Inform client that it is now ok to start ffmpeg\n client.send(\"ready\".encode('ascii'))\n \n #Client intiated stop mirroring\n elif command[0] == \"stop\" and connected == command[1]: \n ready = False\n logging.info(connected + \" has disconnected\")\n connected = \"\"\n kill(tube.player)\n subprocess.call(\"tvservice -p &\",shell=True)\n \n #Client wants to freeze the screen\n elif command[0] == \"freeze\" and connected == command[1]: \n ready = False\n connected = \"\"\n if tube.player != None:\n time.sleep(1)\n tube.player.pause()\n logging.info(connected + \" has froozen their screen\")\n client.send(\"paused\".encode('ascii'))\n \n #WIP, for playing youtube videos\n elif \"tube\" in command[0] and connected == \"\":\n if command[0] == \"tube-load\":\n if tube.player != None:\n kill(tube.player)\n tube.url = command[2]\n if tube.youtube() == False:\n client.send(\"error\".encode('ascii'))\n playing == True\n else:\n while True:\n if tube.player.is_playing():\n client.send(\"ready\".encode('ascii'))\n playing == True\n break\n elif command[0] == \"tube-stop\" and tube.player != None:\n kill(tube.player)\n tube.player = None\n elif command[0] == \"tube-forward\" and tube.player != None: \n if tube.player.can_control():\n tube.player.seek(30)\n elif command[0] == \"tube-back\" and tube.player != None:\n if tube.player.can_control():\n tube.player.seek(-30)\n elif command[0] == \"tube-pause\" and tube.player != None:\n if tube.player.can_control():\n tube.player.play_pause()\n elif command[0] == \"tube-up\" and tube.player != None:\n if tube.player.can_control():\n if tube.player.volume() < 700.0:\n tube.player.set_volume(tube.player.volume() + 100.0)\n elif command[0] == \"tube-down\" and tube.player != None:\n if tube.player.can_control():\n if tube.player.volume() > -1550.0:\n tube.player.set_volume(tube.player.volume() - 100.0)\n elif command[0] == \"tube-track-down\" and tube.player != None:\n if tube.player.can_control():\n tube.player.action(6)\n elif command[0] == \"tube-track-up\" and tube.player != None:\n if tube.player.can_control():\n tube.player.action(7)\n elif command[0] == \"tube-vol\" and tube.player != None:\n if tube.player.can_control():\n tube.player.set_volume(float(command[2]))\n \n #This condition is met if the user wants to play a DVD or Media file.\n elif command[0] == \"media\" and connected == \"\":\n logging.info(connected + \" is trying to stream a Media file or DVD\")\n subprocess.call(\"tvservice -p &\",shell=True)\n if tube.player != None:\n kill(tube.player)\n tube.player = None\n #Inform client that it is now ok to start ffmpeg\n client.send(\"ready\".encode('ascii'))\n \n elif command[0] == \"media-start\" and connected == \"\":\n tube.start_media(address[0])\n \n elif command[0] == \"tu-media\" and connected == \"\":\n logging.info(connected + \" is trying to stream a youtube video\")\n subprocess.call(\"tvservice -p &\",shell=True)\n if tube.player != None:\n kill(tube.player)\n tube.player = None\n time.sleep(1)\n #Inform client that it is now ok to start ffmpeg\n client.send(\"ready\".encode('ascii'))\n \n #Check if client is still online\n elif command[0] == \"active\":\n timestamp = time.localtime()\n ready = True\n client.send(\"ok\".encode('ascii'))\n \n client.close()\n retries = 10\n except:\n retries = retries - 1\n #To prevent logs from getting spammed if there is a problem\n if retries > 0:\n logging.warn(\"There was a issue with sockets, will retry in 20 seconds\")\n time.sleep(20)\n return\n\ndef timeout():\n global connected\n global timestamp\n global ready\n while True:\n #Can no longer contact client, kill omxplayer\n now = time.mktime(time.localtime())\n stamp = time.mktime(timestamp)\n if (now - stamp) > 20 and connected != \"\" and ready == True:\n timestamp = time.localtime()\n logging.warn(connected + \" timed out. \" + str(now) + \" :: \" + str(stamp))\n ready = False\n if tube.player != None:\n kill(tube.player)\n tube.player = None\n time.sleep(1)\n connected = \"\"\n return\n \ndef kill(player):\n try:\n player.quit()\n except:\n pass\n \nloop = threading.Thread(target=timeout)\nloop.start()\nwhile True:\n connection()\n","sub_path":"server/mirrorcast_server_pi.py","file_name":"mirrorcast_server_pi.py","file_ext":"py","file_size_in_byte":7741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"470071732","text":"import Adafruit_DHT\nimport time \nsensor = Adafruit_DHT.DHT11\npin = 4\n\ndef temperature(): \n humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)\n if humidity is not None and temperature is not None:\n\t return 'La temperatura es de {0:0.1f} grados centigrados, con una humedad de: {1:0.1f} porciento'.format(temperature, humidity)\n else:\n\t return 'No pude obtener la temperatura de mi sensor, vuelvelo a intentar por favor!'\n\n\n\n\n","sub_path":"sensors/Adafruit_DHT/temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"240346515","text":"# Utilities to deal with the classif2 agrovision dataset\nimport numpy as np\nimport numpy.ma as ma\nimport os\nimport gdal\n\nBASEDIR = os.path.join(os.environ['AGRODATA'], '3_datasets', 'classif2')\n\ndef load_labels(num=1):\n \"\"\"\n Loads the classif2 labels\n Args:\n num: The number of the labels set to load (different sets have different\n folds structure).\n\n Returns:\n labels\n id2label\n folds\n parcel_ids\n \"\"\"\n fname = os.path.join(BASEDIR, 'npy', 'labels_%s.npz' % str(num))\n d = np.load(fname)\n labels = d['labels']\n labels = ma.masked_where(labels == -1, labels)\n\n folds = d['folds']\n folds = ma.masked_where(folds == -1, folds)\n\n return labels, d['id2label'], folds, d['parcel_ids']\n\n\ndef _load_dsm_correction(datestr):\n \"\"\"\n The compute_dsm_correction scripts compute a per-date correction that\n should be subtracted from the DSM to align it with the others\n \"\"\"\n fname = os.path.join(BASEDIR, 'npy', 'per_date_dsm_correction.npz')\n d = np.load(fname)\n dates = d['dates']\n correction = d['correction']\n\n return correction[dates.tolist().index(datestr)]\n\n\ndef load_image(datestr, imgtype, autocorrect_dsm=True):\n \"\"\"\n Loads an image for the given date and type\n Args:\n datestr: Something like 2013_08_21\n imgtype: Either 'rgb' or 'dsm'\n autocorrect_dsm: If true, will apply DSM correction\n Returns:\n array: This is a masked array that contains the data\n - NxMx3 uint8 array with RGB values for 'rgb'\n - NxM float32 array with elevation values for 'dsm'\n \"\"\"\n assert imgtype in ['rgb', 'dsm']\n fname = os.path.join(BASEDIR, 'rasters', '%s_%s.tif' % (datestr, imgtype))\n assert os.path.exists(fname), 'File does not exist : %s' % fname\n\n ds = gdal.Open(fname)\n if imgtype == 'rgb':\n # This should be a RGBA image\n arr = ds.ReadAsArray()\n assert len(arr.shape) == 3\n assert arr.shape[0] == 4\n assert arr.dtype == np.uint8\n arr = np.rollaxis(arr, 0, start=3)\n mask = arr[:,:,3] == 0\n mask = np.dstack([mask, mask, mask])\n return ma.masked_array(arr[:,:,:3], mask=mask)\n else: # dsm\n assert ds.RasterCount == 1\n band = ds.GetRasterBand(1)\n nodata = band.GetNoDataValue()\n arr = band.ReadAsArray()\n arr = ma.masked_where(arr == nodata, arr)\n assert len(arr.shape) == 2\n assert arr.dtype == np.float32\n\n if autocorrect_dsm:\n return arr - _load_dsm_correction(datestr)\n\n return arr\n\n\n","sub_path":"paper_code/agronn/classif2.py","file_name":"classif2.py","file_ext":"py","file_size_in_byte":2607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"88547266","text":"#!/usr/bin/env python\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\nimport sys\nimport errno\nimport logging\nimport argparse\n\nARG_DEFAULTS = {'single':False, 'log':sys.stderr, 'volume':logging.ERROR}\nDESCRIPTION = \"\"\"Calculate the probability that an error that occurs somewhere during the a PCR\nprocess with k cycles ends up in x reads out of n in a duplex family. Assumes a simple PCR model\nwhere every fragment is doubled every cycle. It prints four tab-delimited columns: k, n, x, and the\nprobability of that x.\"\"\"\n\n\ndef make_argparser():\n\n parser = argparse.ArgumentParser(description=DESCRIPTION)\n parser.set_defaults(**ARG_DEFAULTS)\n\n parser.add_argument('-x', type=int,\n help='Number of reads with the error. If omitted, it will output the probability of every '\n 'x from 1 to n/2 (or n-1 if --one-sided).')\n parser.add_argument('-n', type=int, required=True,\n help='Total number of reads.')\n parser.add_argument('-k', type=int, required=True,\n help='Number of PCR cycles.')\n parser.add_argument('-1', '--1-sided', dest='single', action='store_true',\n help='Only calculate the literal probability of x errors in n reads. Contrast with '\n '--double-sided.')\n parser.add_argument('-2', '--2-sided', dest='single', action='store_false',\n help='Output P(x/n) + P((n-x)/n) (default). This is how real errors will appear in families, '\n 'since errors over 50%% will be considered the \"correct\", consensus base.')\n parser.add_argument('-l', '--log', type=argparse.FileType('w'),\n help='Print log messages to this file instead of to stderr. Warning: Will overwrite the file.')\n parser.add_argument('-q', '--quiet', dest='volume', action='store_const', const=logging.CRITICAL)\n parser.add_argument('-v', '--verbose', dest='volume', action='store_const', const=logging.INFO)\n parser.add_argument('-D', '--debug', dest='volume', action='store_const', const=logging.DEBUG)\n\n return parser\n\n\ndef main(argv):\n\n parser = make_argparser()\n args = parser.parse_args(argv[1:])\n\n logging.basicConfig(stream=args.log, level=args.volume, format='%(message)s')\n tone_down_logger()\n\n if args.x is not None and (args.x < 1 or args.x >= args.n):\n fail('-x must be between 0 and -n (you gave {})'.format(args.x))\n\n if args.x:\n x_values = [args.x]\n else:\n if args.single:\n x_values = range(1, args.n)\n else:\n x_values = range(1, args.n//2+1)\n\n for x in x_values:\n if args.single or x/args.n == 0.5:\n p = get_maf_prob(args.k, args.n, x)\n else:\n p1 = get_maf_prob(args.k, args.n, x)\n p2 = get_maf_prob(args.k, args.n, args.n-x)\n p = p1 + p2\n print(args.k, args.n, x, p, sep='\\t')\n\n\ndef get_maf_prob(k, n, x):\n \"\"\"Calculate the equation:\n $\\frac{\\sum_{i=1}^k 2^i {n \\choose x} \\frac{1}{2^i}^x (1 - \\frac{1}{2^i})^{n - x} }\n {\\sum_{y=1}^{n-1} \\sum_{i=1}^k 2^i {n \\choose y} \\frac{1}{2^i}^y (1 - \\frac{1}{2^i})^{n-y}}$\n Where n is the total number of reads in the family, x is the number of reads containing a given\n error, and k is the number of PCR cycles used. x/n should then be the frequency of the error in\n the family.\n \"\"\"\n numerator = summation(equation1, 1, k, n, x)\n denominator = 0\n for y in range(1, n):\n denominator += summation(equation1, 1, k, n, y)\n return numerator/denominator\n\n\ndef summation(function, start, end, *args):\n sum = 0\n for i in range(start, end+1):\n sum += function(i, *args)\n return sum\n\n\ndef equation1(i, n, x):\n two_i = 2**i\n mult1 = two_i\n mult2 = n_choose_k(n, x)\n mult3 = (1/two_i)**x\n mult4 = (1-(1/two_i))**(n-x)\n return mult1 * mult2 * mult3 * mult4\n\n\ndef n_choose_k(n, k):\n return factorial(n)/(factorial(k)*factorial(n-k))\n\n\ndef factorial(n):\n \"\"\"A non-recursive factorial function. Because why not.\"\"\"\n product = 1\n for i in range(n, 1, -1):\n product *= i\n return product\n\n\ndef tone_down_logger():\n \"\"\"Change the logging level names from all-caps to capitalized lowercase.\n E.g. \"WARNING\" -> \"Warning\" (turn down the volume a bit in your log files)\"\"\"\n for level in (logging.CRITICAL, logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG):\n level_name = logging.getLevelName(level)\n logging.addLevelName(level, level_name.capitalize())\n\n\ndef fail(message):\n logging.critical(message)\n if __name__ == '__main__':\n sys.exit(1)\n else:\n raise Exception('Unrecoverable error')\n\n\nif __name__ == '__main__':\n try:\n sys.exit(main(sys.argv))\n except IOError as ioe:\n if ioe.errno != errno.EPIPE:\n raise\n","sub_path":"utils/pcr.py","file_name":"pcr.py","file_ext":"py","file_size_in_byte":4601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"315778808","text":"# ******************************************************************************\n\nprint(\"WELCOME\")\n\nclass UserManagement:\n\n\tdef __init__(self):\n\t\tpass\n\n# Asks user for employee details and displays onto screen\n\n\tdef employee_details(self):\n\t\tprint(\"Employee Details\")\n\t\tanswer = True\n\t\twhile answer:\n\t\t\tself.employee_name = str(input(\"Please enter employee's name: \"))\n\t\t\tself.employee_id = str(input(\"Please enter employee's ID: \"))\n\t\t\temployee_number = str(input(\"Please enter employee's number: \"))\n\t\t\tself.qualification_level = str(input(\"Please enter AP for Apprentice or FQ for fully-qualified: \"))\n\t\t\t\n\t\t\tif self.qualification_level == \"AP\":\n\t\t\t\tprint(\"Apprentice\")\n\t\t\telse:\n\t\t\t\tprint(\"Fully Qualified\")\n\n\t\t\tself.confirm = str(input(\"Please confirm all information is correctly entered? \"))\n\t\t\tif not self.confirm[0].lower() == \"n\":\n\t\t\t\tanswer = False\n\t\t\t\n\t\t\telse:\n\t\t\t\tprint(\"Please enter the details again.\")\n\t\t\t\tself.employee_details()\n\t\t\treturn self.qualification_level\n\t\n\t\n# The three functions generates an estimate for the user\n\n\tdef information(self):\n\t\tself.customer_number = int(input(\"Enter customer's number: \"))\n\t\tself.date_of_estimate = str(input(\"Enter date of estimate: \"))\n\t\tself.number_of_rooms = int(input(\"Please enter the number of rooms: \"))\n\t\treturn self.number_of_rooms\n\n\t\n\tdef rooms(self):\n\t\tself.information()\n\t\t\n\t\tfor x in range(1,self.number_of_rooms+1):\n\t\t\tself.name = str(input(\"Please enter the name of the room: \"))\n\t\t\tself.height = int(input(\"Please enter height of the room:\" ))\n\t\t\tself.width = int(input(\"Please enter width of the room: \"))\n\t\t\tself.wall_paper = str(input(\"Does the wallpaper need to be removed?\"))\n\t\t\tif self.wall_paper[0].lower() == \"y\":\n\t\t\t\tself.wall_paper_rooms = int(input(\"How many rooms will the wall-paper need to be removed for: \"))\n\t\t\telse:\n\t\t\t\tself.wall_paper_rooms = 0\n\t\t\n\t\treturn self.height\n\t\treturn self.width\n\t\treturn self.wall_paper_rooms\n\n\n\tdef figures(self):\n\t\t\tself.rooms()\n\n\t\t\tself.surface_area = self.height * self.width\n\t\t\tprint(\"The total surface area for\", self.name, \"is: \",self.surface_area)\n\n\t\t\tself.price = (self.surface_area * 15) + (self.wall_paper_rooms*70)\n\t\t\t\n\t\t\treturn self.price\t\t\t\n\n# Based on all the information received, a final total will be displayed\n\n\tdef job_role(self):\n\t\tself.employee_details()\n\t\tself.figures()\n\n\t\tif self.qualification_level == \"Apprentice\":\n\t\t\tself.price = self.price + 100\n\t\telse:\n\t\t\tself.price = self.price + 250\n\n\t\tprint(\"The total payment so far is £\",self.price)\n\t\tself.price = self.price * 1.20\n\t\tprint(\"With VAT, the total price now becomes £\",self.price)\n\n\t\tself.query = str(input(\"Would you like to generate another estimate? \"))\n\t\tif self.query[0].lower() == \"y\":\n\t\t\tself.employee_details()\n\t\telse:\n\t\t\treturn self.query\n\ncopy = UserManagement()\n# ---\ncopy.job_role()\n","sub_path":"edit.py","file_name":"edit.py","file_ext":"py","file_size_in_byte":2800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"95865576","text":"import requests\nimport matplotlib\nimport matplotlib.pyplot as plt\n\n\nsites = ['http://www.google.com', 'http://www.youtube.com', 'http://www.polimi.it']\n\nm = 0\n\nfor site in sites:\n times = [] # List Results\n\n for request in range(20):\n r = requests.get(site) # Store in r request data\n times.append(r.elapsed.microseconds / 1000)\n plt.plot(times, label = site)\n\n print(\"For \", site, \" results are:\")\n print(\"Minimum time: \", min(times), \" ms\")\n print(\"Maximun time: \", max(times), \" ms\")\n print(\"Average time: \", sum(times)/len(times), \" ms\", end=\"\\n\\n\")\n m = max ([m, max(times)])\n\nprint(\"Massimo tra i massimi \", m)\n\n# Plot stuff\nplt.xlabel('ID request')\nplt.ylabel('Time request (ms)')\nplt.title('Multiple server requests')\nplt.ylim([0, 1.1*m])\nplt.legend(loc = 'upper right', fontsize = 10)\nplt.show()","sub_path":"Python/AnswerTime/Ex1.py","file_name":"Ex1.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"21240295","text":"from PyQt5 import QtCore, QtGui, QtWidgets\nfrom Dialogs.Graphs.bloodReportGraph import *\nfrom Dialogs.messageBox import *\nfrom Dialogs.superadmin.BloodBanks.viewBloodBanks import *\nfrom Dialogs.superadmin.BloodBanks.new_bloodBankProfile import *\n\nclass bloodcenterstats(object):\n def setup(self, bloodcenterstats):\n bloodcenterstats.setObjectName(\"bloodcenterstats\")\n bloodcenterstats.resize(393, 409)\n self.titleLabel = QtWidgets.QLabel(bloodcenterstats)\n self.titleLabel.setGeometry(QtCore.QRect(20, 10, 361, 41))\n self.titleLabel.setStyleSheet(\"font-size:14pt;\\n\"\n\"font-weight: bold;\")\n self.titleLabel.setObjectName(\"titleLabel\")\n self.close = QtWidgets.QPushButton(bloodcenterstats)\n self.close.setGeometry(QtCore.QRect(140, 360, 89, 25))\n self.close.setObjectName(\"close\")\n self.searchtcButton = QtWidgets.QPushButton(bloodcenterstats)\n self.searchtcButton.setGeometry(QtCore.QRect(20, 190, 161, 25))\n self.searchtcButton.setObjectName(\"searchtcButton\")\n self.allBillsButton = QtWidgets.QPushButton(bloodcenterstats)\n self.allBillsButton.setGeometry(QtCore.QRect(190, 190, 161, 25))\n self.allBillsButton.setObjectName(\"allBillsButton\")\n self.titleLabel_2 = QtWidgets.QLabel(bloodcenterstats)\n self.titleLabel_2.setGeometry(QtCore.QRect(10, 80, 241, 41))\n self.titleLabel_2.setStyleSheet(\"font-size:14pt;\\n\"\n\"font-weight: bold;\")\n self.titleLabel_2.setObjectName(\"titleLabel_2\")\n self.titleLabel_3 = QtWidgets.QLabel(bloodcenterstats)\n self.titleLabel_3.setGeometry(QtCore.QRect(-30, 130, 241, 41))\n self.titleLabel_3.setStyleSheet(\"font-size:14pt;\\n\"\n\"font-weight: bold;\")\n self.titleLabel_3.setObjectName(\"titleLabel_3\")\n self.frame_2 = QtWidgets.QFrame(bloodcenterstats)\n self.frame_2.setGeometry(QtCore.QRect(20, 240, 341, 101))\n self.frame_2.setFrameShape(QtWidgets.QFrame.StyledPanel)\n self.frame_2.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frame_2.setObjectName(\"frame_2\")\n self.tcid = QtWidgets.QLineEdit(self.frame_2)\n self.tcid.setGeometry(QtCore.QRect(80, 10, 181, 25))\n self.tcid.setObjectName(\"tcid\")\n self.goToProfile = QtWidgets.QPushButton(self.frame_2)\n self.goToProfile.setGeometry(QtCore.QRect(170, 60, 151, 25))\n self.goToProfile.setObjectName(\"goToProfile\")\n self.bloodstatsgraph = QtWidgets.QPushButton(self.frame_2)\n self.bloodstatsgraph.setGeometry(QtCore.QRect(10, 60, 161, 25))\n self.bloodstatsgraph.setObjectName(\"bloodstatsgraph\")\n self.bbcRegistered = QtWidgets.QLabel(bloodcenterstats)\n self.bbcRegistered.setGeometry(QtCore.QRect(270, 90, 67, 17))\n self.bbcRegistered.setObjectName(\"bbcRegistered\")\n self.totalbills = QtWidgets.QLabel(bloodcenterstats)\n self.totalbills.setGeometry(QtCore.QRect(270, 140, 67, 17))\n self.totalbills.setObjectName(\"totalbills\")\n\n self.retranslateUi(bloodcenterstats)\n QtCore.QMetaObject.connectSlotsByName(bloodcenterstats)\n\n def retranslateUi(self, bloodcenterstats):\n _translate = QtCore.QCoreApplication.translate\n bloodcenterstats.setWindowTitle(_translate(\"bloodcenterstats\", \"Stats\"))\n self.titleLabel.setText(_translate(\"bloodcenterstats\", \"

Blood Center Stats


\"))\n self.close.setText(_translate(\"bloodcenterstats\", \"close\"))\n self.searchtcButton.setText(_translate(\"bloodcenterstats\", \"Search Blood Centers\"))\n self.allBillsButton.setText(_translate(\"bloodcenterstats\", \"All Blood Bills\"))\n self.titleLabel_2.setText(_translate(\"bloodcenterstats\", \"

Blood Center Registered :

\"))\n self.titleLabel_3.setText(_translate(\"bloodcenterstats\", \"

Total Blood Bills:

\"))\n self.tcid.setPlaceholderText(_translate(\"bloodcenterstats\", \"Enter Blood Center ID\"))\n self.goToProfile.setText(_translate(\"bloodcenterstats\", \"Go To Profile\"))\n self.bloodstatsgraph.setText(_translate(\"bloodcenterstats\", \"See Blood Stats\"))\n self.bbcRegistered.setText(_translate(\"bloodcenterstats\", \"4\"))\n self.totalbills.setText(_translate(\"bloodcenterstats\", \"10\"))\n self.events(bloodcenterstats)\n\n def events(self,parent):\n self.bloodstatsgraph.clicked.connect(lambda : self.clickOnBloodGraph())\n self.goToProfile.clicked.connect(lambda : self.clickOnGotoProfile())\n self.searchtcButton.clicked.connect(lambda : self.clickOnSearchButton())\n\n def clickOnSearchButton(self):\n self.window = QDialog()\n self.dialog = viewBloodBankCenter()\n self.dialog.setup(self.window)\n self.window.setModal(True)\n self.window.show()\n\n\n\n def clickOnBloodGraph(self):\n if not(self.tcid.text().isdigit()):\n self.window = messageBox()\n self.window.infoBox(\"Invalid ID\")\n return\n\n self.window = QDialog()\n self.dialog = bloodbankGraph()\n self.dialog.setup(self.window,int(self.tcid.text()))\n self.window.setModal(True)\n self.window.show()\n\n def clickOnGotoProfile(self):\n if not(self.tcid.text().isdigit()):\n self.window = messageBox()\n self.window.infoBox(\"Invalid ID\")\n return\n import requests\n URL = \"https://mdtouch.herokuapp.com/MDTouch/api/bloodbankcenter/\" + str(self.tcid.text())\n r = requests.get(url=URL)\n l = r.json()\n if l == {\"detail\": \"Not found.\"}:\n self.window = messageBox()\n self.window.infoBox(\"Id Does Not Exits\")\n return\n\n self.window = QDialog()\n self.dialog = new_bloodBankProfile()\n self.dialog.setup(self.window,l)\n self.window.setModal(True)\n self.window.show()\n\n","sub_path":"Dialogs/superadmin/bloodcenterStats.py","file_name":"bloodcenterStats.py","file_ext":"py","file_size_in_byte":6083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"140742878","text":"import os\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"0,1\"\nimport numpy as np\nimport tensorflow as tf\nimport skimage.io\n\nimport argparse\nimport glob\nfrom multiprocessing import Pool, current_process\n\ndef proc_frames(frames_item):\n (in_path, proc_id) = frames_item\n class_name = in_path.split('/')[-2]\n vid_name = in_path.split('/')[-1]\n out_dir = os.path.join(OUT_PATH, class_name, vid_name)\n try:\n os.makedirs(out_dir)\n except:\n pass\n\n # Get frame data\n print(in_path)\n frame_names = glob.glob(in_path+'/*')\n data = [None]*len(frame_names)\n for i, name in enumerate(frame_names):\n data[i] = skimage.io.imread(name)\n data = np.array(data)\n assert len(data.shape) == 4\n\n # Get sobeled data\n print('getting sobel')\n # with tf.Session() as sess:\n feed_dict = {input_plh:data}\n im_data = np.array(sess.run(out_data, feed_dict=feed_dict))\n im_data = np.squeeze(im_data, axis=-1)\n for i, f_data in enumerate(im_data):\n full_name = os.path.join(out_dir, frame_names[i].split('/')[-1])\n skimage.io.imsave(full_name, f_data)\n print('{} {} done'.format(proc_id, vid_name))\n return True\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"Do Sobel Operation\")\n parser.add_argument(\"--in_path\", type=str, default='./UCF101',\n help='path to the video frame data')\n parser.add_argument(\"--out_path\", type=str, default='./UCF101_sobeled/',\n help='path to the output sobel dir')\n parser.add_argument(\"--num_worker\", type=int, default=2)\n args = parser.parse_args()\n\n IN_PATH = args.in_path\n OUT_PATH = args.out_path\n num_worker = args.num_worker\n\n sess = tf.InteractiveSession()\n\n input_plh = tf.placeholder(dtype=tf.float32, \n shape=(None, None, None, 3))\n # kernel_h = [[1,0,-1], [2,0,-2], [1,0,-1]]\n # kernel_h_tf = tf.constant(kernel_h, shape=[1,3,3,1], dtype=tf.float32)\n # kernel_v = [[1,2,1], [0,0,0], [-1,-2,-1]]\n # kernel_v_tf = tf.constant(kernel_v, shape=[1,3,3,1], dtype=tf.float32)\n kernel_v_tf = tf.tile(tf.constant([[1,2,1],[0,0,0],[-1,-2,-1]],\n shape=[3,3,1,1],dtype=tf.float32),[1,1,3,1])\n kernel_h_tf = tf.transpose(kernel_v_tf,[1,0,2,3])\n grad_x = tf.nn.conv2d(input_plh, kernel_h_tf, \n [1,1,1,1], padding='SAME')\n grad_y = tf.nn.conv2d(input_plh, kernel_v_tf, \n [1,1,1,1], padding='SAME')\n\n grad = tf.sqrt(tf.add(tf.pow(grad_x, 2), tf.pow(grad_y, 2)))\n # out_data = grad\n grad = tf.clip_by_value(grad, 0., tf.reduce_max(grad))\n out_data = tf.truediv(grad, tf.reduce_max(grad))\n\n vid_list = glob.glob(IN_PATH+'/*/*')\n # pool = Pool(num_worker)\n # pool.map(proc_frames, zip(vid_list, range(len(vid_list))))\n for i in vid_list:\n proc_frames((i, 0))\n\n","sub_path":"sobel_operation.py","file_name":"sobel_operation.py","file_ext":"py","file_size_in_byte":2874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"411255783","text":"#cesar marroquin\n#assignment 1\nguess = 1\nepsilon = .01\nmarginOfError = True\niteration = 0\nnotNum = True\nwhile notNum:\n try:\n #val = int(user_input)\n user_input = int(input(\"Please Enter a positive number: \"))\n notNum = False\n except ValueError:\n print(\"That's not a number!\")\n\nif user_input == 0:\n print(\"The square root of 0 is: 0\")\nelif user_input < 0:\n user_input = user_input * -1\n realpart = user_input**(1/2)\n imaginary = complex(realpart, 1)\n print(imaginary)\nelse:\n while marginOfError:\n best_guess = abs((guess * guess) - user_input)\n iteration += 1\n if best_guess < 0.01:\n marginOfError = False\n print(\"The square root of\", user_input , \"is\", round(guess, 2))\n else:\n guess = abs((guess + user_input / guess) / 2)\n print(\"guess number\", iteration ,\"is: \",guess)\n","sub_path":"square_root.py","file_name":"square_root.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"39152842","text":"#! /Library/Frameworks/Python.framework/Versions/3.9/bin/python3.9\n#Script to take multi-column MTZ from k_optimiser and plot\n#sections of Patterson maps \n#cctbx must be installed / module loaded for Patterson map generation\n\nimport subprocess\nimport numpy as np\nimport gemmi\nimport matplotlib\nmatplotlib.rcParams['text.usetex'] = True\nfrom matplotlib import pyplot as plt\n\n#Input MTZ file name \nmtz_name = 'k_intensity_corrections.mtz'\n#High resolution limit\nhigh_res = 2.0\n#'y' if you want to generate maps, 'n' if they have already been generated\ngenerate_maps = 'n' \n#Base pattern intensities and sigIs. \nI_base = 'I_'\nsig_base = 'sigI_'\n\nk = 0\nmax_dataset = 50\nwhile k <=50:\n\tkk = \"{0:0=3d}\".format(k)\n\t#Run cctbx.patterson_map to generate Patterson map in CCP4 format \n\tresolution_key = 'high_resolution=' + str(high_res)\n\tfile_name_key = 'map_file_name = k_patt_plot_' + str(kk) + '.ccp4'\n\tlabel_key = 'labels =' +'\\'' + I_base + str(kk) + ',' + sig_base + str(kk) + '\\''\n\tif generate_maps == 'y':\n\t\tsubprocess.call(['cctbx.patterson_map', mtz_name, file_name_key, label_key, resolution_key])\n\telse:\n\t\tbreak\n\tk = k + 1\n\n#Read in CCP4 format Patterson maps as 3D Numpy arrays and \n\nccp4 = gemmi.read_ccp4_map('k_patt_plot_000.ccp4')\nccp4.setup()\narr = np.array(ccp4.grid, copy=False)\nx = np.linspace(0, ccp4.grid.unit_cell.a, num=arr.shape[0], endpoint=False)\ny = np.linspace(0, ccp4.grid.unit_cell.b, num=arr.shape[1], endpoint=False)\nplt.plot(y, arr[0,:,0], label='k = 0')\n\nccp4 = gemmi.read_ccp4_map('k_patt_plot_015.ccp4')\nccp4.setup()\narr = np.array(ccp4.grid, copy=False)\nplt.plot(y, arr[0,:,0], label='k = 15')\n\nccp4 = gemmi.read_ccp4_map('k_patt_plot_018.ccp4')\nccp4.setup()\narr = np.array(ccp4.grid, copy=False)\nplt.plot(y, arr[0,:,0], label='k = 18')\n\nccp4 = gemmi.read_ccp4_map('k_patt_plot_021.ccp4')\nccp4.setup()\narr = np.array(ccp4.grid, copy=False)\nplt.plot(y, arr[0,:,0], label='k = 21')\n\nccp4 = gemmi.read_ccp4_map('k_patt_plot_030.ccp4')\nccp4.setup()\narr = np.array(ccp4.grid, copy=False)\nplt.plot(y, arr[0,:,0], label='k = 30')\n#Code to make contour plot of Patterson Map, uncomment and de-indent to use \n\t\t#X, Y = np.meshgrid(x, y, indexing='ij')\n\t\t#plt.contour(X, Y, arr[:,:,100])\n\t\t#plt.gca().set_aspect('equal', adjustable='box')\n\t\t#plt.show()\n\t\t#print(x,y)\n\t\t#print(arr.shape)\n\n\nplt.grid()\nplt.legend()\nplt.xlabel(r'\\textbf{Interatomic a=0 c =0 b-vectors (Angstroms)}')\nplt.ylabel(r'\\textbf{Patterson Peak Height}')\nplt.show()\n","sub_path":"translational_disorder/k_patt_plot.py","file_name":"k_patt_plot.py","file_ext":"py","file_size_in_byte":2457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"77490910","text":"# Another question I did for a job a while back. \n# Had to take a list of numbers between 1 and 1 million, and count all the missing ones. \n\n\ndef question_two(list1):\n ints = list(range(1, 1000000)) # list in the appropriate range\n for i in list1: # iterate through input list\n if i in ints: # if statement to handle any duplicates in input list\n ints.remove(i) # simply remove the element from the list\n return ints # return the ints variable containing all missing numbers in the appropriate range\n\n\ndef validate(input_list, output_list): # function to validate the above function\n check = [] # create a list\n val = bool # initiate output variable\n for i in input_list: # iterate through the shorter list\n if i in output_list: # iterate through longer list to check no same values exist in both\n check.append(i) # if same value in both lists, append value to check list\n if len(check) == 0: # if the check list is empty, the function has worked\n val = True # set val to true meaning the function has worked\n elif len(check) > 0: # if check list is not empty function has not worked\n val = False\n return val\n\n\ndef create_list():\n # creating a random list of integers in appropriate range\n from random import randint\n lst = []\n n = 2\n while n < 1001: # list of 1 thousand integers so there's a higher probability of duplicates than 100\n lst.append(randint(1, 1000000))\n n += 1\n return lst\n\n\ninp_list = create_list()\n# using a second function to double check the lists don't have any of the same values\nif validate(inp_list, question_two(inp_list)) is True:\n print('Program works.')\nelse:\n print('Program failed.')\n","sub_path":"question_two.py","file_name":"question_two.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"97981025","text":"from dnevnik import School\n\n\nclass ClassUnit:\n \"\"\"Объект класса\"\"\"\n __client = None\n id: int = None\n display_name: str = None\n letter: str = None\n name: str = None\n student_count: int = None\n ae_percentage: float = None\n home_based: bool = None\n\n __school_id: id = None\n\n def __init__(self, client, class_unit_id: int):\n self.__client = client\n data = client.make_request(f\"/core/api/class_units/{class_unit_id}\")\n self.id = data[\"id\"]\n self.display_name = data[\"display_name\"]\n self.letter = data[\"letter\"]\n self.name = data[\"name\"]\n self.ae_percentage = data[\"ae_percentage\"]\n self.student_count = data[\"student_count\"]\n self.home_based = data[\"home_based\"]\n\n @property\n def school(self):\n return School(self.__client, self.__school_id)\n","sub_path":"dnevnik/class_unit.py","file_name":"class_unit.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"97180166","text":"'''\npython3 script to construct a tensor network file for T3NS\n'''\n\n\nfrom collections import deque\nimport sys\nimport random\nimport os\nimport os.path as path\nimport shutil\n\n# check the arguments\nif len(sys.argv) != 3: # incl. python script\n error_message = 'Syntaxis: python plant.py FCIDUMP ' + \\\n 'SEED'\n raise IOError(error_message)\n# try interpreting the arguments as files\nfcidump = sys.argv[1]\nseed = sys.argv[2]\n# test wether these files can be opened\ntest = open(sys.argv[1])\ntest.close()\ntest = open(sys.argv[2])\ntest.close()\n\n\n# make temporary files to store the several output pieces\ntmp_dir = 'tmpplant' + str(random.randint(0,999999))\nos.mkdir(tmp_dir)\n# print('tmp_dir was: %s' % tmp_dir)\nheader = path.join(tmp_dir, 'header')\ntree = path.join(tmp_dir, 'tree')\n\n\n# dictionaries to keep track of the labels\ngroups = {}\nbranchtensors = {}\norbsym = []\n# first try to read the groups from the seed\nwith open(seed) as f:\n for line in f:\n words = line.split()\n if len(words) > 0 and 'GROUP' in words[0]:\n orbsym = [words[i] for i in range(1,len(words))]\n# otherwise read in the orbital numbers from the fcidump file\nif len(orbsym) == 0:\n with open(fcidump) as f:\n # parse the orbsym array\n line = next(f).lstrip()\n while not line.startswith('ORBSYM'):\n line = next(f).lstrip()\n key, value = line.split('=')\n orbsym = list(value.strip(',\\n').split(','))\n\n# create a group dictionary from them\nfor i in list(set(orbsym)):\n groups[i] = deque([])\nfor i in range(len(orbsym)):\n groups[orbsym[i]].append(i)\n\n# the groups dictionary can be visually verified\n# print(groups)\n\n\n# help function:\n# get the right orbital number for a certain label\nbranchtensornr = len(orbsym)\ndef nr(label):\n global groups, branchtensors, branchtensornr\n if label in groups:\n if not groups[label]:\n raise ValueError(\"Orbitals of the seed \" \\\n + \"does not match the fcidump!\")\n result = groups[label].popleft()\n elif label in branchtensors:\n result = branchtensors[label]\n else:\n result = branchtensornr\n branchtensors[label] = branchtensornr\n branchtensornr += 1\n return result\n\n# create a temporary file for the actual tree\ntmp_file = 'tmp' + str(random.randint(0,999999))\n# print('tmp_file was: %s' % tmp_dir)\n\n# loop over the seed and construct the tree\nnr_bonds = 0\nwith open(tree, \"w\") as t:\n with open(seed) as f:\n for line in f:\n words = line.split()\n if len(words) > 0 and 'GROUP' not in words[0]:\n nbrs = []\n # give all orbitals a unique label\n if words[0] in groups:\n nbrs.append(-1)\n for tensor in words:\n nbrs.append(nr(tensor))\n # write out the given branch\n for i in range(1, len(nbrs)):\n nr_bonds += 1\n t.write(str(nbrs[i-1]) + ' ' + str(nbrs[i]) + '\\n')\n last = nbrs[-1]\n nr_bonds += 1\n t.write(str(last) + ' -1')\n\nnr_phys_sites = len(orbsym)\nnr_sites = nr_phys_sites + len(branchtensors)\n\nwith open(header, 'w') as f:\n # write out the header\n f.write('NR_SITES = %d\\n' % nr_sites)\n f.write('NR_PHYS_SITES = %d\\n' % nr_phys_sites)\n f.write('NR_BONDS = %d\\n' % nr_bonds)\n f.write('/\\n')\n convertion = [i for i in range(len(orbsym))] + \\\n ['*' for i in range(nr_sites - nr_phys_sites)]\n for orb in range(len(convertion)):\n f.write(str(convertion[orb]) + ' ')\n #f.write(str(*convertion) + '\\n')\n f.write('\\n/END\\n')\n\n# bash way:\n# os.system(\"cat \" + header + \" \" + tree)\n# pythonic way:\nprint(''.join([open(f).read() for f in [header, tree]]))\n# remove the temporary directory\nshutil.rmtree(tmp_dir)\n","sub_path":"plant.py","file_name":"plant.py","file_ext":"py","file_size_in_byte":3853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"319096009","text":"# 登录认证\n# 加密 解密\n# 摘要算法\n# 两个字符串:\n# import hashlib #提供摘要算法的模块 摘要算法过程不可逆\n# md5 = hashlib.md5()\n# md5.update(b'alex3714')\n# print(md5.hexdigest())\n\n# 不管算法多不同,摘要的功能始终不变\n# 对于相同的字符串进行摘要算法处理,得到的值不变\n# 不管使用什么算法,hashlib的方式永远不变\n# import hashlib\n# sha = hashlib.sha256()\n# sha.update(b'alex3714')\n# print(sha.hexdigest())\n\n# 摘要算法\n# 密码的密文存储\n# 文件的一致性验证(检查下载文件是否与其他机器上的文件一致)\n#\n\n\n#用户登录\n# import hashlib\n# usr = input('username:')\n# pwd = input('password:')\n# with open('userinfo') as f:\n# for line in f:\n# user,passwd,role = line.split(\"|\")\n# md5 = hashlib.md5()\n# md5.update(bytes(pwd,encoding='utf-8'))\n# md5_pwd = md5.hexdigest()\n# if usr == user and md5_pwd == passwd:\n# print('登录成功')\nimport hashlib\nmd5 = hashlib.md5(bytes('加盐',encoding='utf-8')+b'111')\nmd5.update(b'123456')\nprint(md5.hexdigest())","sub_path":".py/hashilib模块.py","file_name":"hashilib模块.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"489290426","text":"from django import template\nfrom booking.models import RoomType\nfrom booking.models import Room\nfrom booking.models import Order\n\nregister = template.Library()\n\n\n@register.filter(name='filter_me')\ndef filter_me(data):\n # print(data)\n orders = Order.objects.all()\n diff_day = ''\n user_name = ''\n for i in orders:\n if data == i.start_date:\n myOrder = i\n diff_day = str(myOrder.diff_days)\n user_name = str(myOrder.user_name)\n print(diff_day)\n text = f\" {user_name} \"\n return text\n\n\n@register.simple_tag()\ndef build_key(room, day):\n return \"{}_{:%Y-%m-%d}\".format(room.id, day)\n\n\n@register.simple_tag()\ndef get_item(dictionary, key):\n return dictionary.get(key)\n","sub_path":"dashboard/templatetags/tags.py","file_name":"tags.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"27931214","text":"import numpy as np\nimport scipy.linalg as sla\nfrom collections import deque, UserDict\n\nimport matplotlib.pyplot as plt\n\nimport ipdb\n\n\nclass System():\n def __init__(self, args):\n self.args = args\n self.unc = Uncertainty(args)\n\n def reset(self):\n return np.zeros(2)\n\n def step(self, t, x, u):\n args = self.args\n\n next_x = x + args.t_step * (\n args.A.dot(x[:, np.newaxis])\n + args.B.dot(self.unc.Lambda).dot(\n (u + self.unc.delta(x))[:, np.newaxis])\n ).ravel()\n\n return next_x\n\n\nclass Uncertainty():\n def __init__(self, args):\n self.W = np.array([-18.59521, 15.162375, -62.45153,\n 9.54708, 21.45291])[:, np.newaxis]\n self.Lambda = np.diag([0.7])\n\n def basis(self, x):\n return np.hstack((x, np.abs(x)*x[1], x[0]**3))\n\n def delta(self, x):\n return self.W.T.dot(self.basis(x))\n\n\nclass DirectMrac():\n def __init__(self, system):\n self.basis = system.unc.basis\n self.args = system.args\n\n self.P = sla.solve_lyapunov(self.args.A.T, self.args.Q_lyap)\n\n\nclass Cmrac():\n def __init__(self, system):\n self.basis = system.unc.basis\n self.args = system.args\n\n delta_num = int(self.args.delta / self.args.t_step)\n self.memory = deque(maxlen=delta_num)\n\n self.P = sla.solve_lyapunov(self.args.A.T, self.args.Q_lyap)\n\n def reset(self):\n args = self.args\n self.xr = np.zeros(args.ndim_state)\n self.v1 = np.zeros(args.ndim_input)\n self.v2 = np.zeros(args.ndim_basis)\n self.v3 = np.zeros(args.ndim_state)\n self.lambdahat = np.eye(args.ndim_input)\n self.vhat = self.lambdahat.dot(\n np.zeros((args.ndim_basis, args.ndim_input)).T\n )\n self.what = np.zeros((args.ndim_basis, args.ndim_input))\n # self.vhat = self.lambdahat.dot(self.what.T)\n\n self.memory.clear()\n\n return self.xr, self.v1, self.v2, self.v3, self.lambdahat, self.vhat\n\n def get_inputs(self, t, x):\n args = self.args\n\n # lambdahat = self.lambdahat\n # vhat = self.vhat\n\n # if args.use_cmrac:\n # what = vhat.T.dot(np.diag(1 / np.diag(lambdahat)))\n # else:\n # what = self.what\n\n what = self.what\n\n c = self.command(t)\n\n # u_n = 0*np.diag(1 / np.diag(lambdahat)).dot(args.Kr).dot(c)\n u_n = args.Kr.dot(c)\n u_a = - what.T.dot(self.basis(x))\n\n return u_n + u_a\n\n def update(self, t, x):\n args = self.args\n\n # realize variables\n xr = self.xr\n v1 = self.v1\n v2 = self.v2\n v3 = self.v3\n lambdahat = self.lambdahat\n vhat = self.vhat\n c = self.command(t)\n\n e = x - xr\n\n # if args.use_cmrac:\n # what = vhat.T.dot(np.diag(1 / np.diag(lambdahat)))\n # else:\n # what = self.what\n\n what = self.what\n\n self.memory.append((x, e, what))\n\n x_delta, e_delta, what_delta = self.memory[0]\n\n y = e - e_delta - args.A.dot(v3[:, np.newaxis]).ravel()\n yhat = args.B.dot(\n - lambdahat.dot(v1[:, np.newaxis])\n + vhat.dot(v2[:, np.newaxis])\n ).ravel()\n\n # update reference model\n next_xr = xr + args.t_step * (\n args.A.dot(xr[:, np.newaxis]) + args.Br.dot(c[:, np.newaxis])\n ).ravel()\n\n next_v1 = v1 + args.t_step * (\n what.T.dot(self.basis(x)[:, np.newaxis])\n - what_delta.T.dot(self.basis(x_delta)[:, np.newaxis])\n ).ravel()\n\n next_v2 = v2 + args.t_step * (\n self.basis(x) - self.basis(x_delta)\n )\n\n next_v3 = v3 + args.t_step * (e - e_delta)\n\n next_lambdahat = lambdahat + args.t_step * (\n args.g1 * np.diag(v1)\n * np.diag(args.B.T.dot((yhat - y)[:, np.newaxis]))\n )\n\n next_vhat = vhat + args.t_step * (\n - args.g2 * args.B.T.dot((yhat - y)[:, np.newaxis]) * v2\n )\n\n next_what = what + args.t_step * (\n args.g3 * self.basis(x)[:, np.newaxis]\n * e.T.dot(self.P).dot(args.B)\n )\n\n self.xr = next_xr\n self.v1 = next_v1\n self.v2 = next_v2\n self.v3 = next_v3\n self.lambdahat = next_lambdahat\n self.vhat = next_vhat\n self.what = next_what\n\n return dict(reference_model=xr, w_hat=what)\n\n # next_what = - args.g1 * what.dot(np.diag(self.\n\n def command(self, t):\n if t < 10:\n c = 1\n elif t < 20:\n c = -1\n elif t < 30:\n c = 1\n elif t < 40:\n c = -1\n else:\n c = 0\n\n return np.deg2rad(5*np.array([c]))\n\n\nclass Arguments(UserDict):\n def __getattr__(self, name):\n return self.data[name]\n\n def __setattr__(self, name, value):\n if name == 'data':\n super().__setattr__(name, value)\n else:\n self.data[name] = value\n\n\nclass Data(Arguments):\n def append(self, name, val):\n val = np.atleast_1d(val)[np.newaxis, :]\n if name not in self:\n self[name] = val\n else:\n self[name] = np.append(self[name], val, axis=0)\n\n def ele_plot(self, name):\n x = self.time\n y = self[name]\n\n plt.plot(x, y)\n\n\n# class Data():\n# def __init__(self, timebase=None, base=None):\n# if type(base) is not int:\n# self.content = np.zeros(timebase.shape + base.shape)\n# else:\n# self.content = np.zeros(timebase.shape + (base, ))\n\n# self.ts = timebase\n\n# def append(self, index, value):\n# self.content[index] = value\n\n# def plot(self):\n# fig, ax = plt.subplots()\n\n# x = self.ts\n# y = self.content\n\n# ax.plot(x, y)\n\n# return fig\n\n\ndef main():\n args = Arguments()\n args.A = np.array([[-2, -1], [1, -1]])\n args.B = np.array([[0, 1]]).T\n args.Br = np.array([[0, 1]]).T\n args.Kr = np.array([[1]])\n args.Q_lyap = np.eye(2)\n args.g1 = 10\n args.g2 = 10\n args.g3 = 100\n args.delta = 3\n args.t_step = 0.01\n args.t_final = 40\n args.ndim_state = 2\n args.ndim_input = 1\n args.ndim_basis = 5\n args.ts = np.arange(0, args.t_final, args.t_step)\n args.comp_gain = 0\n\n system = System(args)\n control = Cmrac(system)\n\n x = system.reset()\n control.reset()\n\n data = Data()\n for i in range(args.ts.size):\n t = args.ts[i]\n u = control.get_inputs(t, x)\n\n # step\n next_x = system.step(t, x, u)\n\n # controller update\n current_data = control.update(t, x)\n\n data.append('time', t)\n data.append('state', x)\n data.append('input', u)\n\n [data.append(*item) for item in current_data.items()]\n\n x = next_x\n\n data.ele_plot('state')\n\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"subject/bare-test/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"170802088","text":"#!usr/bin/python\n\nimport bz2\nimport random\n\n\nfin = \"./data/enwiki_knock81.txt.bz2\"\nfout = \"./data/pair_knock82.txt\"\n\n\nprint(\"Loading file....\", end=\" \", flush=True)\ncorpus = bz2.decompress(open(fin, \"rb\").read()).decode().split()\nprint(\"done\")\n\n\nprint('write file', end=\" \", flush=True)\nfout = open(fout, \"w\")\nfor num in range(5, len(corpus) - 5):\n rand_int = random.randint(1, 5)\n fout.write(\"{}\\t{}\\n\".format(corpus[num],\n '\\t'.join([corpus[i] for i in range(num-rand_int, num+rand_int+1) if num != i])))\n\n\nfout.close()\nprint(\"done\")\n","sub_path":"kodaira/9set/knock82.py","file_name":"knock82.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"491972097","text":"import mpmath as mp\nimport math\n\n\ndef f(x):\n return mp.sin(x**4 - 4*x**3 + 7*x**2 - 4)\n\n\ndef deriv(x, n):\n return mp.diff(f, x, n)\n\n\ndef m_abs_diff_n(a, b, n):\n\n if a == b:\n return abs(deriv(a, n))\n\n maximums = []\n curr_point = a\n step = (b - a)/1000 # choosing step by dividing [a, b] into 1000 parts\n while curr_point <= b:\n abs_max = abs(deriv(curr_point, n))\n maximums.append(abs_max)\n curr_point += step\n\n return max(maximums)\n\n\ndef deriv_est(x, h, n):\n if n == 0:\n return (-f(x + 2*h) + 8*f(x + h) - 8*f(x - h) + f(x - 2*h))/(12*h)\n else:\n return (-deriv_est(x + 2*h, h, n - 1) +\n 8*deriv_est(x + h, h, n - 1) -\n 8*deriv_est(x - h, h, n - 1) +\n deriv_est(x - 2*h, h, n - 1))/(12*h)\n\n\ndef deriv_error(x, h, n):\n if n == 0:\n return 0\n a = x - 2*n*h\n b = x + 2*n*h\n m_abs_diff_m = m_abs_diff_n(a, b, n + 2)\n return m_abs_diff_m*h**4/30 + (deriv_error(x + 2*h, h, n - 1) +\n 8*deriv_error(x + h, h, n - 1) +\n 8*deriv_error(x - h, h, n - 1) +\n deriv_error(x - 2*h, h, n - 1)/(12*h))\n\n\ndef optm_h(x, e, h, n):\n a = x - 2*n*h\n b = x + 2*n*h\n m_abs_diff_m = m_abs_diff_n(a, b, n + 2)\n optimal_h = pow(45*e/(4*m_abs_diff_m), 1/3)\n return optimal_h\n\n\nif __name__ == \"__main__\":\n print('Laboratory work #4 \\nSavchuk Ivan KM-73 v.18')\n while(True):\n x0 = (1 - pow(6, 1/2))/2\n h = 2\n e = 0.001\n\n for i in range(1):\n h = optm_h(x0, e, h, i+1)\n d = deriv_est(x0, h, i+1)\n error = deriv_error(x0, h, i+1)\n print(\"\\nOptimal step for calculation of the {} derivative: {}\".format(i+1, h))\n print(\"The {} derivative at point x = {} : {} \".format(i+1, x0, d))\n print(\"Error of calculation of the {} derivative: {}\".format(i+1, error))\n\n answer = input('\\nDo you want to run again?(y/n):')\n if answer == 'y':\n continue\n else:\n print('Bye, bye goody!')\n break\n","sub_path":"Numerical derivatives/derivative.py","file_name":"derivative.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"577934024","text":"# %%\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport os\n\nfname = os.path.join('jena_climate', 'jena_climate_2009_2016.csv')\n\nf = open(fname)\ndata = f.read()\nf.close\n\nlines = data.split('\\n')\nheader = lines[0].split(',')\nlines = lines[1:]\n\nprint(header)\nprint(lines)\n# %%\n\nfloat_data = np.zeros((len(lines), len(header)-1))\nfor i, line in enumerate(lines):\n values = [float(x) for x in line.split(',')[1:]]\n float_data[i] = values\n# %%\n\ntemp = float_data[:, 1]\nplt.plot(range(len(temp)), temp)\n# %%\nplt.plot(range(1440), temp[:1440])\n\n# %%\n","sub_path":"6.3.1.py","file_name":"6.3.1.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"402439344","text":"from __future__ import print_function\n\nimport sys\nsys.path.append('/opt/pycharm/pycharm-2.7.1/pycharm-debug-py3k.egg')\n\n#import pydevd\n#pydevd.settrace('localhost', port=9989, stdoutToServer=True, stderrToServer=True)\n\nimport ipdb\n#ipdb.set_trace()\n#from ipdb import launch_ipdb_on_exception\n\nimport tornado.ioloop\nimport tornado.web\n\nclass DebuggingLoop(tornado.ioloop.IOLoop):\n def handle_callback_exception(self, callback):\n exc_type, exc_value, tb = sys.exc_info()\n ipdb.post_mortem(tb)\n\nioloop = DebuggingLoop()\nioloop.install()\n\nclass DebuggingRequest(tornado.web.RequestHandler):\n\n def _handle_request_exception(self, e):\n tornado.web.RequestHandler._handle_request_exception(self, e)\n exc_type, exc_value, tb = sys.exc_info()\n ipdb.post_mortem(tb)\n\ndef init_ipython():\n from IPython.config.loader import Config\n from IPython.frontend.terminal.embed import InteractiveShellEmbed\n\n try:\n get_ipython\n except NameError:\n nested = 0\n cfg = Config()\n else:\n print(\"Running nested copies of IPython.\")\n print(\"The prompts for the nested copy have been modified\")\n cfg = Config()\n nested = 1\n\n ipshell = InteractiveShellEmbed(config=cfg,\n banner1 = 'Stopping IO Loop and dropping to ipython')\n\n class shell_wrapper(object):\n\n def __init__(self):\n self.user_wants_out = False\n\n def __call__(self):\n ipshell('Ctrl-D, quit, exit all exit interpreter and continue program\\n'\n 'If you need to kill the program %kill', stack_depth=3)\n return self.user_wants_out\n\n _shell_wrapper = shell_wrapper()\n\n def kill_program(self, parameter_s=''):\n _shell_wrapper.user_wants_out = True\n ipshell.exit()\n\n def really_die(self, etype, value, tb, tb_offset=None):\n _shell_wrapper.user_wants_out = True\n return None\n\n ipshell.define_magic(\"kill\", kill_program)\n ipshell.confirm_exit = False\n ipshell.set_custom_exc((SystemExit,), really_die)\n\n return _shell_wrapper\n\nIPSHELL = init_ipython()\n\ndef drop_to_shell(ipshell=IPSHELL):\n if ipshell:\n exit = ipshell()\n if exit:\n sys.exit(0)\n\ndef run_loop(ioloop):\n while True:\n try:\n ioloop.start()\n except KeyboardInterrupt:\n ioloop.stop()\n drop_to_shell()\n print('Resuming I/O loop')\n\n\n## ALL OF THE ABOVE WILL DISAPPEAR INTO A MODULE\n\nclass MainHandler(DebuggingRequest):\n def get(self):\n something = ['this', 'is', 'the', 'response']\n drop_to_shell()\n self.write(' '.join(something))\n\nclass Broken(DebuggingRequest):\n def get(self):\n raise NotImplemented()\n\napp = tornado.web.Application([\n (r'/', MainHandler),\n (r'/test', Broken)\n], debug=True)\n\nif __name__ == '__main__':\n app.listen(8000)\n ioloop.add_callback(lambda: sys.stdout.write('Started on port 8000 -C to abort\\n'))\n run_loop(ioloop)","sub_path":"dockerized-gists/5202233/snippet.py","file_name":"snippet.py","file_ext":"py","file_size_in_byte":3003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"310055729","text":"#!/usr/bin/env python3\nimport argparse\nimport configparser\nimport datetime\nimport logging\nimport os\nimport subprocess\nimport sys\n\nfrom flask import Flask\nfrom flask import request\nfrom flask import session\nfrom flask.ext.scrypt import generate_random_salt\n\nimport pajbot.models.apitoken\nimport pajbot.web.common\nimport pajbot.web.routes\nfrom pajbot.bot import Bot\nfrom pajbot.managers import DBManager\nfrom pajbot.managers import RedisManager\nfrom pajbot.managers import TimeManager\nfrom pajbot.models.module import ModuleManager\nfrom pajbot.models.sock import SocketClientManager\nfrom pajbot.streamhelper import StreamHelper\nfrom pajbot.tbutil import init_logging\nfrom pajbot.tbutil import load_config\nfrom pajbot.web.models import errors\nfrom pajbot.web.utils import download_logo\n\ninit_logging('pajbot')\nlog = logging.getLogger('pajbot')\n\napp = Flask(__name__)\napp._static_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'static')\n\nconfig = configparser.ConfigParser()\n\nparser = argparse.ArgumentParser(description='start the web app')\nparser.add_argument('--config', default='config.ini')\nparser.add_argument('--host', default='0.0.0.0')\nparser.add_argument('--port', type=int, default=2325)\nparser.add_argument('--debug', dest='debug', action='store_true')\nparser.add_argument('--no-debug', dest='debug', action='store_false')\nparser.set_defaults(debug=False)\n\nargs = parser.parse_args()\n\nconfig = load_config(args.config)\nconfig.read('webconfig.ini')\n\nif 'web' not in config:\n log.error('Missing [web] section in config.ini')\n sys.exit(1)\n\nif 'api' not in config:\n log.error('Missing [api] section in config.ini, adding it now!')\n config.add_section('api')\n\nif 'pleblist_password_salt' not in config['web']:\n salt = generate_random_salt()\n config.set('web', 'pleblist_password_salt', salt.decode('utf-8'))\n\nif 'secret_key' not in config['web']:\n salt = generate_random_salt()\n config.set('web', 'secret_key', salt.decode('utf-8'))\n\nif 'token_secret' not in config['api']:\n salt = generate_random_salt()\n config.set('api', 'token_secret', salt.decode('utf-8'))\n\nif 'logo' not in config['web']:\n res = download_logo(config['main']['streamer'])\n if res:\n config.set('web', 'logo', 'set')\n\nStreamHelper.init_web(config['main']['streamer'])\n\nredis_options = {}\nif 'redis' in config:\n redis_options = config._sections['redis']\n\nRedisManager.init(**redis_options)\n\nwith open(args.config, 'w') as configfile:\n config.write(configfile)\n\napp.bot_modules = config['web'].get('modules', '').split()\napp.bot_commands_list = []\napp.bot_config = config\napp.secret_key = config['web']['secret_key']\n\n\nif 'sock' in config and 'sock_file' in config['sock']:\n SocketClientManager.init(config['sock']['sock_file'])\n\n\nDBManager.init(config['main']['db'])\nTimeManager.init_timezone(config['main'].get('timezone', 'UTC'))\n\napp.module_manager = ModuleManager(None).load()\n\npajbot.models.apitoken.secret_key = config['api']['token_secret']\n\npajbot.web.routes.admin.init(app)\npajbot.web.routes.api.init(app)\npajbot.web.routes.base.init(app)\n\npajbot.web.common.filters.init(app)\npajbot.web.common.assets.init(app)\npajbot.web.common.tasks.init(app)\npajbot.web.common.menu.init(app)\n\napp.register_blueprint(pajbot.web.routes.clr.page)\napp.register_blueprint(pajbot.web.routes.api.page)\n\nerrors.init(app)\npajbot.web.routes.api.config = config\npajbot.web.routes.clr.config = config\n\nversion = Bot.version\nlast_commit = ''\ncommit_number = 0\ntry:\n current_branch = subprocess.check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD']).decode('utf8').strip()\n latest_commit = subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode('utf8').strip()[:8]\n commit_number = subprocess.check_output(['git', 'rev-list', 'HEAD', '--count']).decode('utf8').strip()\n last_commit = subprocess.check_output(['git', 'log', '-1', '--format=%cd']).decode('utf8').strip()\n version = '{0} DEV ({1}, {2}, commit {3})'.format(version, current_branch, latest_commit, commit_number)\nexcept:\n pass\n\ndefault_variables = {\n 'version': version,\n 'last_commit': last_commit,\n 'commit_number': commit_number,\n 'bot': {\n 'name': config['main']['nickname'],\n },\n 'site': {\n 'domain': config['web']['domain'],\n 'deck_tab_images': config.getboolean('web', 'deck_tab_images'),\n 'websocket': {\n 'host': config['websocket'].get('host', config['web']['domain']),\n 'port': config['websocket']['port'],\n 'ssl': config.getboolean('websocket', 'ssl')\n }\n },\n 'streamer': {\n 'name': config['web']['streamer_name'],\n 'full_name': config['main']['streamer']\n },\n 'modules': app.bot_modules,\n 'request': request,\n 'session': session,\n 'google_analytics': config['web'].get('google_analytics', None),\n }\n\nif 'streamtip' in config:\n default_variables['streamtip_data'] = {\n 'client_id': config['streamtip']['client_id'],\n 'redirect_uri': config['streamtip']['redirect_uri'],\n }\nelse:\n default_variables['streamtip_data'] = {\n 'client_id': 'MISSING',\n 'redirect_uri': 'MISSING',\n }\n\nif 'twitchalerts' in config:\n default_variables['twitchalerts_data'] = {\n 'client_id': config['twitchalerts']['client_id'],\n 'redirect_uri': config['twitchalerts']['redirect_uri'],\n }\nelse:\n default_variables['twitchalerts_data'] = {\n 'client_id': 'MISSING',\n 'redirect_uri': 'MISSING',\n }\n\n\n@app.context_processor\ndef current_time():\n current_time = {}\n current_time['current_time'] = datetime.datetime.now()\n return current_time\n\n\n@app.context_processor\ndef inject_default_variables():\n return default_variables\n\nif __name__ == '__main__':\n app.run(debug=args.debug, host=args.host, port=args.port)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"32288179","text":"#coding:utf-8\n\nimport requests\nimport os,re,json\n\n# re_hash=re.compile('\"hash\":\"(.*?)\"',re.S|re.I)\n# url=''\nhash_url='https://searchrecommend.kugou.com/get/complex'\nparams={\n 'callback': 'jQuery1124011578388853206789_1614761739771',\n 'word': '周杰伦',\n '_': '1614761739773'\n}\nhash_html=requests.get(hash_url, params=params).text\nstart=hash_html.find('{\"data\"')\nend=hash_html.find('\"info\":\"\"}')+len('\"info\":\"\"}')\nresult=json.loads(hash_html[start:end])['data']['song']\nfor lis in result:\n song_name=lis['songname']\n songer_name=lis['singername']\n hash=lis['hash']\n albumid=lis['AlbumID']\n\n url='https://wwwapi.kugou.com/yy/index.php'\n params={\n 'r': 'play/getdata',\n 'callback': 'jQuery19104341443083221761_1614761024893',\n 'hash': str(hash),\n 'dfid': '0PVFDZ3qVwt23cY1C93BuBR1',\n 'mid': 'c16afe0890ad8c2a00901214f815242a',\n 'platid': '4',\n 'album_id': albumid,\n '_': '1614761024895'\n }\n html=requests.get(url,params=params).text\n start = html.find('{\"status\"')\n end = html.find('}}}')+len('}}}')\n result = json.loads(html[start:end])['data']\n name = result['audio_name']\n url = result['play_url']\n print('正在下载',name)\n with open('E:\\个人文件\\音乐\\{}.mp3'.format(name),'wb') as f:\n f.write(requests.get(url).content)\n\n\n\n","sub_path":"爬虫-酷狗音乐.py","file_name":"爬虫-酷狗音乐.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"523058031","text":"import re\nimport sdist_upip\nfrom setuptools import setup\n\n\ndef long_desc_from_readme():\n with open('README.rst', 'r') as fd:\n long_description = fd.read()\n\n # remove badges\n long_description = re.compile(r'^\\.\\. start-badges.*^\\.\\. end-badges', re.M | re.S).sub('', long_description)\n\n # strip links. keep link name and use literal text formatting\n long_description = re.sub(r'`([^<`]+) ]+>`_', '``\\\\1``', long_description)\n\n return long_description\n\n\nsetup(\n name=\"micropython-py-esp32-ulp\",\n use_scm_version={\n 'local_scheme': 'no-local-version',\n },\n description=\"Assembler toolchain for the ESP32 ULP co-processor, written in MicroPython\",\n long_description=long_desc_from_readme(),\n long_description_content_type='text/x-rst',\n url=\"https://github.com/ThomasWaldmann/py-esp32-ulp\",\n license=\"MIT\",\n author=\"py-esp32-ulp authors\",\n author_email=\"tw@waldmann-edv.de\",\n maintainer=\"py-esp32-ulp authors\",\n maintainer_email=\"tw@waldmann-edv.de\",\n classifiers=[\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: Implementation :: MicroPython',\n ],\n setup_requires=['setuptools_scm'],\n platforms=[\"esp32\", \"linux\", \"darwin\"],\n cmdclass={\"sdist\": sdist_upip.sdist},\n packages=[\"esp32_ulp\"],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"430644522","text":"import cv2 #computer vision\nimport matplotlib.pyplot as plt \nimport numpy as np\n\n\nimg = cv2.imread('images/Costa.jpg',cv2.IMREAD_COLOR)\nimgRGB=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\n#m=np.ones(imgRGB.shape,dtype='uint8')*50\n#Mejorar el contraste\nm=np.ones(imgRGB.shape)*0.8\nm2=np.ones(imgRGB.shape)*1.2\n#img1=cv2.add(imgRGB,m)\n#img2=cv2.subtract(imgRGB,m)\n\nimg1=np.uint8(cv2.multiply(np.float64(img),m))\nimg2=np.uint8(np.clip(cv2.multiply(np.float64(img),m2),0,255))#Lo ultimo es para indicar que los valores van de 0 a 255 para que los normalice\n\n\"\"\"plt.subplot(131);plt.imshow(imgRGB);plt.title('Original')\nplt.subplot(132);plt.imshow(img1);plt.title('Clara')\nplt.subplot(133);plt.imshow(img2);plt.title('Oscura')\"\"\"\n\nplt.subplot(131);plt.imshow(img1);plt.title('Bajo contraste')\nplt.subplot(132);plt.imshow(img);plt.title('Original')\nplt.subplot(133);plt.imshow(img2);plt.title('Alto contraste')\n\n#plt.imshow(imgRGB)\nplt.waitforbuttonpress()\n\n\n","sub_path":"6.Dia_6/5.MejoraImage.py","file_name":"5.MejoraImage.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"479853633","text":"import itertools as it\nimport string\nimport unicodedata\nfrom unicode_math_symbols import *\n\nJSON_SNIPPET_TEMPLATE = \"\"\"\\\n \"{0}\": {{\n \"prefix\": \"{0}\",\n \"body\": [\"{1}\"],\n \"description\": \"{2}\"\n }},\n\"\"\"\n\ndef json_escape(char):\n codepoint = ord(char)\n if codepoint <= 0xFFFF:\n return \"\\\\u{0:X}\".format(codepoint)\n else:\n high = (codepoint - 0x10000) // 0x400 + 0xD800\n low = (codepoint - 0x10000) % 0x400 + 0xDC00\n return \"\\\\u{0:X}\\\\u{1:X}\".format(high, low)\n\ndef snippet_generator(command, symbols, args):\n assert len(symbols) == len(args)\n for symbol, arg in zip(symbols, args):\n yield JSON_SNIPPET_TEMPLATE.format(\n \"\\\\\\\\{0}{{{1}}}\".format(command, arg),\n json_escape(symbol),\n unicodedata.name(symbol))\n\ndef naked_greek_snippet_generator():\n MATHEMATICAL_NAKED_GREEK_LETTERS = (\n MATHEMATICAL_ITALIC_GREEK_LETTERS[:25] +\n (chr(0x2207),) +\n MATHEMATICAL_ITALIC_GREEK_LETTERS[26:51] +\n (chr(0x2202),) +\n MATHEMATICAL_ITALIC_GREEK_LETTERS[52:58])\n for letter, command in zip(MATHEMATICAL_NAKED_GREEK_LETTERS,\n LATEX_GREEK_COMMANDS):\n yield JSON_SNIPPET_TEMPLATE.format(\n command,\n json_escape(letter),\n unicodedata.name(letter))\n\ndef full_snippet_generator(command, capitals, smalls, greeks, digits):\n if capitals is not None:\n yield from snippet_generator(command, capitals, string.ascii_uppercase)\n if smalls is not None:\n yield from snippet_generator(command, smalls, string.ascii_lowercase)\n if greeks is not None:\n yield from snippet_generator(command, greeks, LATEX_GREEK_COMMANDS)\n if digits is not None:\n yield from snippet_generator(command, digits, string.digits)\n\nif __name__ == \"__main__\":\n with open(\"snippets.json\", \"w+\") as f:\n f.write(\"{\\n\")\n for commands, (capitals, smalls,\n greeks, digits) in LATEX_STYLE_COMMANDS.items():\n for command in commands:\n for snippet in full_snippet_generator(command, capitals, smalls,\n greeks, digits):\n f.write(snippet)\n for snippet in naked_greek_snippet_generator():\n f.write(snippet)\n f.write(\"}\\n\")\n","sub_path":"snippet_generator.py","file_name":"snippet_generator.py","file_ext":"py","file_size_in_byte":2372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"217540980","text":"import numpy as np\n\nfrom .. import Operation as opmod \nfrom ..Operation import Operation\n\nclass NoiseArray(Operation):\n \"\"\"Creates and outputs a square array of noise\"\"\"\n\n def __init__(self):\n input_names = ['size']\n output_names = ['array']\n super(NoiseArray,self).__init__(input_names,output_names) \n self.input_doc['size'] = 'dimension of output array'\n self.output_doc['array'] = 'an array of noise'\n self.inputs['size'] = 100 \n \n def run(self):\n s = self.inputs['size']\n self.outputs['array'] = np.random.rand(s,s) \n\n","sub_path":"paws/core/operations/TESTS/NoiseArray.py","file_name":"NoiseArray.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"169901455","text":"from youtubesearchpython import VideosSearch\nimport youtube_dl, os, subprocess\n\n\ndef run():\n ydl = youtube_dl.YoutubeDL()\n search_query = input(\"(search) > \")\n while( search_query != 'q'):\n if len(search_query) == 0:\n print(\"No search query provided.\")\n print(\"Searching\", search_query)\n try:\n videosSearch = VideosSearch(search_query, limit = 10)\n results = videosSearch.result()['result']\n for i,j in enumerate(results):\n print(i, j['title'])\n selected_video_num = int(input(\"(select) > \"))\n result = results[selected_video_num]\n info_dict = ydl.extract_info(result['id'], download=False)\n url = info_dict['formats'][0]['url']\n print('Playing', result['title'])\n with open(os.devnull, \"w\") as devnull:\n subprocess.run(['vlc', url, '--play-and-exit', '--meta-title=%s' % result['title']], shell=False, stderr=devnull)\n #os.system('/usr/bin/vlc \"%s\" --meta-title=\"%s\"' % (url, result['title']))\n except Exception as ex:\n print(ex)\n search_query = input(\"(search) > \")\n\nif __name__ == '__main__':\n run()\n","sub_path":"inverminal/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"197773809","text":"import xlrd\nimport datetime\nimport math\nfrom .base import TableParser\n\n\nclass WorkbookParser(TableParser):\n workbook = None\n worksheet = None\n sheet_name = 0\n start_row = None\n column_count = None\n\n def parse(self):\n self.parse_workbook()\n if self.sheet_name is None:\n self.data = [{'name': name, 'data': self.get_sheet_by_name(name)}\n for name in self.sheet_names]\n return\n\n sheet_name = self.sheet_name\n if isinstance(self.sheet_name, int):\n sheet_name = self.sheet_names[sheet_name]\n\n self.parse_worksheet(sheet_name)\n\n if self.header_row is None:\n if self.start_row is not None:\n self.header_row = self.start_row - 1\n else:\n self.column_count = 0\n\n def checkval(cell):\n if cell.value is not None and cell.value != '':\n return True\n return False\n\n for row in range(min(len(self.worksheet) - 1, 5), -1, -1):\n count = len(filter(checkval, self.worksheet[row]))\n if count >= self.column_count:\n self.column_count = count\n self.header_row = row\n\n if self.start_row is None:\n self.start_row = self.header_row + 1\n\n if self.field_names is None:\n rows = self.worksheet[self.header_row:self.start_row]\n self.field_names = [\n unicode(c.value) or u'c%s' % i for i, c in enumerate(rows[0])\n ]\n for row in rows[1:]:\n for i, c in enumerate(row):\n self.field_names[i] += \"\\n\" + unicode(c.value)\n\n seen_fields = set()\n for i, field in enumerate(self.field_names):\n if field in seen_fields:\n field += unicode(i)\n self.field_names[i] = field\n seen_fields.add(field)\n\n self.data = map(self.parse_row, self.worksheet[self.start_row:])\n if self.header_row > 0:\n for r in range(0, self.header_row):\n for c, cell in enumerate(self.worksheet[r]):\n val = self.get_value(cell)\n if val is not None and val != '':\n self.extra_data.setdefault(r, {})\n self.extra_data[r][c] = val\n\n def parse_workbook(self):\n raise NotImplementedError\n\n @property\n def sheet_names(self):\n raise NotImplementedError\n\n def get_sheet_by_name(self, name):\n raise NotImplementedError\n\n def parse_worksheet(self, name):\n raise NotImplementedError\n\n def parse_row(self, row):\n raise NotImplementedError\n\n def get_value(self, cell):\n raise NotImplementedError\n\n\nclass ExcelParser(WorkbookParser):\n def parse_workbook(self):\n self.workbook = xlrd.open_workbook(file_contents=self.file.read())\n\n @property\n def sheet_names(self):\n return self.workbook.sheet_names()\n\n def get_sheet_by_name(self, name):\n return self.workbook.sheet_by_name(name)\n\n def parse_worksheet(self, name):\n worksheet = self.get_sheet_by_name(name)\n self.worksheet = [worksheet.row(i) for i in range(worksheet.nrows)]\n\n def parse_row(self, row):\n return {name: self.get_value(row[i])\n for i, name in enumerate(self.get_field_names())\n if i < len(row)}\n\n def get_value(self, cell):\n if cell.ctype == xlrd.XL_CELL_DATE:\n time, date = math.modf(cell.value)\n tpl = xlrd.xldate_as_tuple(cell.value, self.workbook.datemode)\n if date and time:\n return datetime.datetime(*tpl)\n elif date:\n return datetime.date(*tpl[0:3])\n else:\n return datetime.time(*tpl[3:6])\n return cell.value\n","sub_path":"parsers/xls.py","file_name":"xls.py","file_ext":"py","file_size_in_byte":3939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"192247379","text":"'''\n47. 機能動詞構文のマイニング\n動詞のヲ格にサ変接続名詞が入っている場合のみに着目したい.46のプログラムを以下の仕様を満たすように改変せよ.\n\n「サ変接続名詞+を(助詞)」で構成される文節が動詞に係る場合のみを対象とする\n述語は「サ変接続名詞+を+動詞の基本形」とし,文節中に複数の動詞があるときは,最左の動詞を用いる\n述語に係る助詞(文節)が複数あるときは,すべての助詞をスペース区切りで辞書順に並べる\n述語に係る文節が複数ある場合は,すべての項をスペース区切りで並べる(助詞の並び順と揃えよ)\n例えば「別段くるにも及ばんさと、主人は手紙に返事をする。」という文から,以下の出力が得られるはずである.\n\n返事をする と に は 及ばんさと 手紙に 主人は\nこのプログラムの出力をファイルに保存し,以下の事項をUNIXコマンドを用いて確認せよ.\n\nコーパス中で頻出する述語(サ変接続名詞+を+動詞)\nコーパス中で頻出する述語と助詞パターン\n'''\nfrom operator import itemgetter\nfrom knock41 import get_chunk_list\n\nfor chunks in get_chunk_list(\"neko.txt.cabocha\"):\n for chunk in chunks:\n if not chunk.check_pos('動詞'):\n continue\n\n # 係り元の「サ変接続名詞+を(助詞)」で構成される文節を取得\n sahen_wo = ''\n for src in chunk.srcs:\n sahen_wo = chunks[src].get_sahen_wo()\n\n if not sahen_wo:\n continue\n\n predicate = sahen_wo + chunk.get_surfaces('pos', '動詞')[0] # 述語\n case_frame = []\n for src in chunk.srcs:\n if len(chunks[src].get_surfaces('pos', '助詞')) == 0:\n continue\n case = chunks[src].get_surfaces('pos', '助詞').pop()\n if case != \"を\":\n case_frame.append((\n case, # 格パターン\n chunks[src].normalized_surface() # 項(文節そのもの)\n ))\n\n if case_frame:\n case_frame.sort(key=itemgetter(0))\n print(f\"{predicate}\\t \\\n {' '.join([x[0] for x in case_frame])}\\t \\\n {' '.join([x[1] for x in case_frame])}\")","sub_path":"yoshimura/chapter05/knock47.py","file_name":"knock47.py","file_ext":"py","file_size_in_byte":2409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"590534084","text":"import pkgutil\nimport donica.modules as folder\n\n\nclass ModuleRouter(object):\n def __init__(self):\n self.modules = self.get_modules()\n\n @classmethod\n def get_modules(cls):\n location = folder\n\n modules = []\n for finder, name, ispkg in pkgutil.walk_packages(path=location.__path__,\n prefix=location.__name__+'.'):\n try:\n loader = finder.find_module(name)\n mod = loader.load_module(name)\n except Exception as e:\n raise e\n else:\n if hasattr(mod, 'TITLE'):\n modules.append(mod)\n else:\n print('MODULE ROUTER: Skipping because could not find right format of {}'.format(name))\n return modules\n\n def query(self, titles, message):\n for module in self.modules:\n for title in titles:\n if module.is_valid(title):\n try:\n module.handle(title, message)\n except Exception as e:\n raise e\n else:\n print('MODULE ROUTER: Handling of phrase {} by module {} completed'.format(title,\n module.__name__))\n finally:\n return\n\n","sub_path":"donica/ModuleRouter.py","file_name":"ModuleRouter.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"601356280","text":"class Team:\n\tdef __init__(self, name, players):\n\t\tself.name = name\n\t\tself.players = players\n\t\tself.result = None\n\t\tself.score = 0\n\t\tself.chances = 0\n\n\t\tdf, mf, fw = 0,0,0\n\t\tfor player in players:\n\t\t\tif player.position.upper() == \"DF\":\n\t\t\t\tdf += 1\n\t\t\telif player.position.upper() == \"MF\":\n\t\t\t\tmf += 1\n\t\t\telif player.position.upper() == \"FW\":\n\t\t\t\tfw += 1\n\n\t\t\tself.chances += player.skill.passing\n\n\t\tself.chances = (self.chances) % len(players)\n\t\tself.formation = [df, mf, fw]\n\t\t\n\t\tif sum(self.formation) != 10:\n\t\t\traise(InvalidFormationError(self, self.formation, \"You should have 10 players in the team aside from the GK\"))\n\n\t\tif (1 >= len(players) > 11):\n\t\t\traise(InvalidNumberOfPlayerError(players), \"Invalid number of players: Must be 11 players\")\n\n\tdef __str__(self):\n\t\treturn self.name\n\n\tdef __repr__(self):\n\t\treturn self.name\t\t\n\n\tdef result(self, opponent):\n\t\t\n\t\tif opponent.score > self.score:\n\t\t\tself.result = \"loss\"\n\t\telif opponent.score < self.score:\n\t\t\tself.result = \"win\"\n\t\telse:\n\t\t\tself.result = \"draw\"\n\t\t\n\t\treturn self.result\n\n\nclass InvalidFormationError(BaseException):\n\tdef __init__(self, team, formation, message):\n\t\tself.team = team\n\t\tself.formation = formation \n\t\tself.message = message\n\n\nclass InvalidNumberOfPlayerError(BaseException):\n\tdef __init__(self, team_number):\n\t\tself.team_number = team_number\n","sub_path":"team/team.py","file_name":"team.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"71825215","text":"\"\"\"Helper function for high-throughput GNN trainings.\"\"\"\r\n\"\"\"Implementation based on the template of ALIGNN.\"\"\"\r\nimport matplotlib.pyplot as plt\r\n\r\n# import numpy as np\r\nimport time\r\n# from matformer.train import train_dgl\r\nimport glob\r\nimport os\r\nfrom collections import defaultdict\r\nimport os\r\nimport argparse\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom jarvis.core.atoms import pmg_to_atoms\r\nfrom jarvis.db.jsonutils import dumpjson, loadjson\r\nfrom sklearn.metrics import mean_absolute_error, roc_auc_score\r\nfrom matbench.bench import MatbenchBenchmark\r\nfrom matbench.constants import CLF_KEY\r\nfrom train_on_folder import train_for_folder\r\n\r\nparser = argparse.ArgumentParser(\r\n description=\"Trainer\"\r\n)\r\nparser.add_argument(\"--single_run\", required=False, help=\"specific part of subset.\", default=None)\r\nparser.add_argument(\"--fold\", required=False, help=\"fold.\", default=None)\r\nparser.add_argument(\"--checkpoint\", required=False, help=\"fold.\", default=None)\r\nparser.add_argument(\"--device\", required=False, help=\"device.\", default=\"gpu:0\")\r\nargs = vars(parser.parse_args())\r\nprint(\"Input of argparse:\", args)\r\n\r\nfold_to_run = int(args[\"fold\"]) if args[\"fold\"] is not None else None\r\n# fold_to_run = \"matbench_mp_is_metal\"\r\ncheckpoint_to_use= args[\"checkpoint\"]\r\ndevice_to_use = args[\"device\"]\r\n\r\nif args[\"single_run\"] is None:\r\n subset =[\r\n \"matbench_jdft2d\",\r\n \"matbench_dielectric\",\r\n \"matbench_phonons\",\r\n \"matbench_perovskites\",\r\n \"matbench_log_gvrh\",\r\n \"matbench_log_kvrh\",\r\n \"matbench_mp_e_form\",\r\n \"matbench_mp_gap\",\r\n \"matbench_mp_is_metal\",\r\n ]\r\nelse:\r\n subset = [args[\"single_run\"]]\r\n\r\nmb = MatbenchBenchmark(\r\n autoload=False,\r\n subset=subset\r\n)\r\n\r\n\r\ndef train_tasks(\r\n mb=None, config_template=\"config_example.json\", file_format=\"poscar\", device=\"gpu:0\"\r\n):\r\n \"\"\"Train MatBench clalssification and regression tasks.\"\"\"\r\n for task in mb.tasks:\r\n task.load()\r\n if task.metadata.task_type == CLF_KEY:\r\n classification = True\r\n else:\r\n classification = False\r\n # Classification tasks\r\n if classification:\r\n # rocs = []\r\n for ii, fold in enumerate(task.folds):\r\n if fold_to_run is not None:\r\n if fold_to_run != ii:\r\n continue\r\n train_df = task.get_train_and_val_data(fold, as_type=\"df\")\r\n test_df = task.get_test_data(\r\n fold, include_target=True, as_type=\"df\"\r\n )\r\n train_df[\"is_metal\"] = train_df[\"is_metal\"].astype(float)\r\n test_df[\"is_metal\"] = test_df[\"is_metal\"].astype(float)\r\n # Name of the target property\r\n target = [\r\n col\r\n for col in train_df.columns\r\n if col not in (\"id\", \"structure\", \"composition\")\r\n ][0]\r\n # Making sure there are spaces or parenthesis which\r\n # can cause issue while creating folder\r\n fold_name = (\r\n task.dataset_name\r\n + \"_\"\r\n + target.replace(\" \", \"_\")\r\n .replace(\"(\", \"-\")\r\n .replace(\")\", \"-\")\r\n + \"_fold_\"\r\n + str(ii)\r\n )\r\n if not os.path.exists(fold_name):\r\n os.makedirs(fold_name)\r\n os.chdir(fold_name)\r\n # ALIGNN requires the id_prop.csv file\r\n f = open(\"id_prop.csv\", \"w\")\r\n for jj, j in train_df.iterrows():\r\n id = j.name\r\n atoms = pmg_to_atoms(j.structure)\r\n pos_name = id\r\n atoms.write_poscar(pos_name)\r\n val = j[target]\r\n line = str(pos_name) + \",\" + str(val) + \"\\n\"\r\n f.write(line)\r\n # There is no pre-defined validation splt, so we will use\r\n # a portion of training set as validation set, and\r\n # keep test set intact\r\n val_df = train_df[0 : len(test_df)]\r\n for jj, j in val_df.iterrows():\r\n # for jj, j in test_df.iterrows():\r\n id = j.name\r\n atoms = pmg_to_atoms(j.structure)\r\n pos_name = id\r\n atoms.write_poscar(pos_name)\r\n val = j[target]\r\n line = str(pos_name) + \",\" + str(val) + \"\\n\"\r\n f.write(line)\r\n for jj, j in test_df.iterrows():\r\n id = j.name\r\n atoms = pmg_to_atoms(j.structure)\r\n pos_name = id\r\n atoms.write_poscar(pos_name)\r\n val = j[target]\r\n line = str(pos_name) + \",\" + str(val) + \"\\n\"\r\n f.write(line)\r\n n_train = len(train_df)\r\n n_val = len(val_df)\r\n n_test = len(test_df)\r\n config = loadjson(config_template)\r\n config[\"n_train\"] = n_train\r\n config[\"n_val\"] = n_val\r\n config[\"n_test\"] = n_test\r\n # Just for testing\r\n # config[\"n_train\"] = 500\r\n # config[\"n_val\"] = 100\r\n # config[\"n_test\"] = 100\r\n config[\"keep_data_order\"] = True\r\n config[\"batch_size\"] = 64\r\n config[\"epochs\"] = 50\r\n config[\"classification_threshold\"] = 0.01\r\n config[\"progress\"] = False\r\n config[\"learning_rate\"] = 0.0005\r\n config[\"criterion\"] = \"BCEWithLogitsLoss\"\r\n config[\"dataset\"] = task.dataset_name\r\n config[\"target\"] = \"target\" # target.replace(\" \", \"_\")\r\n fname = \"config_fold_\" + str(ii) + \".json\"\r\n outdir_name = (\r\n task.dataset_name\r\n + \"_\"\r\n + target.replace(\" \", \"_\")\r\n .replace(\"(\", \"-\")\r\n .replace(\")\", \"-\")\r\n + \"_outdir_\"\r\n + str(ii)\r\n )\r\n config[\"output_dir\"] = outdir_name\r\n dumpjson(data=config, filename=fname)\r\n f.close()\r\n os.chdir(\"..\")\r\n cmd = (\r\n \"train_folder.py --root_dir \"\r\n + fold_name\r\n + \" --config \"\r\n + fold_name\r\n + \"/\"\r\n + fname\r\n + \" --file_format=\"\r\n + file_format\r\n + \" --keep_data_order=True\"\r\n + \" --classification_threshold=0.01\"\r\n + \" --output_dir=\"\r\n + outdir_name\r\n )\r\n print(cmd)\r\n # os.system(cmd)\r\n train_for_folder(root_dir=fold_name,\r\n config_name=fold_name + \"/\" + fname,\r\n file_format=file_format,\r\n output_dir=outdir_name,\r\n keep_data_order=True,\r\n classification_threshold=0.01,\r\n restore_checkpoint=checkpoint_to_use,\r\n device=device\r\n )\r\n test_csv = outdir_name + \"/prediction_results_test_set.csv\"\r\n df = pd.read_csv(test_csv)\r\n target_vals = df.target.values\r\n id_vals = df.id.values\r\n\r\n # Regression tasks\r\n # TODO: shorten the script by taking out repetitive lines\r\n if not classification:\r\n maes = []\r\n for ii, fold in enumerate(task.folds):\r\n if fold_to_run is not None:\r\n if fold_to_run != ii:\r\n continue\r\n train_df = task.get_train_and_val_data(fold, as_type=\"df\")\r\n test_df = task.get_test_data(\r\n fold, include_target=True, as_type=\"df\"\r\n )\r\n # Name of the target property\r\n target = [\r\n col\r\n for col in train_df.columns\r\n if col not in (\"id\", \"structure\", \"composition\")\r\n ][0]\r\n # Making sure there are spaces or parenthesis which\r\n # can cause issue while creating folder\r\n fold_name = (\r\n task.dataset_name\r\n + \"_\"\r\n + target.replace(\" \", \"_\")\r\n .replace(\"(\", \"-\")\r\n .replace(\")\", \"-\")\r\n + \"_fold_\"\r\n + str(ii)\r\n )\r\n if not os.path.exists(fold_name):\r\n os.makedirs(fold_name)\r\n os.chdir(fold_name)\r\n # ALIGNN requires the id_prop.csv file\r\n f = open(\"id_prop.csv\", \"w\")\r\n for jj, j in train_df.iterrows():\r\n id = j.name\r\n atoms = pmg_to_atoms(j.structure)\r\n pos_name = id\r\n atoms.write_poscar(pos_name)\r\n val = j[target]\r\n line = str(pos_name) + \",\" + str(val) + \"\\n\"\r\n f.write(line)\r\n # There is no pre-defined validation splt, so we will use\r\n # a portion of training set as validation set, and\r\n # keep test set intact\r\n val_df = train_df[0 : len(test_df)]\r\n for jj, j in val_df.iterrows():\r\n # for jj, j in test_df.iterrows():\r\n id = j.name\r\n atoms = pmg_to_atoms(j.structure)\r\n pos_name = id\r\n atoms.write_poscar(pos_name)\r\n val = j[target]\r\n line = str(pos_name) + \",\" + str(val) + \"\\n\"\r\n f.write(line)\r\n for jj, j in test_df.iterrows():\r\n id = j.name\r\n atoms = pmg_to_atoms(j.structure)\r\n pos_name = id\r\n atoms.write_poscar(pos_name)\r\n val = j[target]\r\n line = str(pos_name) + \",\" + str(val) + \"\\n\"\r\n f.write(line)\r\n n_train = len(train_df)\r\n n_val = len(val_df)\r\n n_test = len(test_df)\r\n config = loadjson(config_template)\r\n config[\"n_train\"] = n_train\r\n config[\"n_val\"] = n_val\r\n config[\"n_test\"] = n_test\r\n config[\"keep_data_order\"] = True\r\n config[\"batch_size\"] = 64\r\n config[\"epochs\"] = 500\r\n config[\"dataset\"] = task.dataset_name\r\n if task.dataset_name == \"matbench_mp_gap\" or task.dataset_name == \"matbench_mp_e_form\":\r\n config[\"learning_rate\"] = 0.0005\r\n config[\"target\"] = \"target\" # target.replace(\" \", \"_\")\r\n fname = \"config_fold_\" + str(ii) + \".json\"\r\n outdir_name = (\r\n task.dataset_name\r\n + \"_\"\r\n + target.replace(\" \", \"_\")\r\n .replace(\"(\", \"-\")\r\n .replace(\")\", \"-\")\r\n + \"_outdir_\"\r\n + str(ii)\r\n )\r\n config[\"output_dir\"] = outdir_name\r\n dumpjson(data=config, filename=fname)\r\n f.close()\r\n os.chdir(\"..\")\r\n cmd = (\r\n \"train_folder.py --root_dir \"\r\n + fold_name\r\n + \" --config \"\r\n + fold_name\r\n + \"/\"\r\n + fname\r\n + \" --file_format=\"\r\n + file_format\r\n + \" --keep_data_order=True\"\r\n + \" --output_dir=\"\r\n + outdir_name\r\n )\r\n print(cmd)\r\n # os.system(cmd)\r\n train_for_folder(root_dir=fold_name,\r\n config_name=fold_name + \"/\" + fname,\r\n file_format=file_format,\r\n output_dir=outdir_name,\r\n keep_data_order=True,\r\n restore_checkpoint=checkpoint_to_use,\r\n device=device\r\n )\r\n test_csv = outdir_name + \"/prediction_results_test_set.csv\"\r\n df = pd.read_csv(test_csv)\r\n target_vals = df.target.values\r\n # id_vals = df.id.values\r\n pred_vals = df.prediction.values\r\n mae = mean_absolute_error(target_vals, pred_vals)\r\n maes.append(mae)\r\n task.record(fold, pred_vals, params=config)\r\n print(\r\n \"Dataset_name, Fold, MAE=\",\r\n task.dataset_name,\r\n fold,\r\n mean_absolute_error(target_vals, pred_vals),\r\n )\r\n maes = np.array(maes)\r\n print(maes, np.mean(maes), np.std(maes))\r\n print()\r\n print()\r\n print()\r\n\r\n\r\ndef compile_results(key=\"matbench_phonons\", regression=True):\r\n \"\"\"Compile fold based results for each task.\"\"\"\r\n # Some of the jobs such as mp_e_form takes a couple of\r\n # days to complete for each fold\r\n # so we compile the results as follows\r\n maes = []\r\n roc_aucs = []\r\n results = defaultdict()\r\n\r\n for i in glob.glob(key + \"*/prediction_results_test_set.csv\"):\r\n fold = int(os.path.split(i)[0].split(\"_\")[-1])\r\n # fold = int(i.split(\"/\")[0].split(\"_\")[-1])\r\n # print (i,fold)\r\n df = pd.read_csv(i)\r\n\r\n target_vals = df.target.values\r\n # id_vals = df.id.values\r\n pred_vals = df.prediction.values\r\n if regression:\r\n mae = mean_absolute_error(target_vals, pred_vals)\r\n maes.append(mae)\r\n print(\"MAE\", fold, mae)\r\n if not regression:\r\n roc = roc_auc_score(target_vals, pred_vals)\r\n roc_aucs.append(roc)\r\n print(\"ROC\", fold, roc)\r\n # We changed the predictions to sigmoid.\r\n # pred_vals = [True if i == 1 else False for i in pred_vals]\r\n results[fold] = pred_vals\r\n\r\n if regression:\r\n maes = np.array(maes)\r\n print(key, maes, np.mean(maes), np.std(maes))\r\n if not regression:\r\n roc_aucs = np.array(roc_aucs)\r\n print(key, roc_aucs, np.mean(roc_aucs), np.std(roc_aucs))\r\n return results\r\n\r\nrun_training = True\r\n\r\nif __name__ == \"__main__\":\r\n config_template = os.path.abspath(\r\n os.path.join(os.path.dirname(__file__), \"config_example.json\")\r\n )\r\n config = loadjson(config_template)\r\n if run_training:\r\n train_tasks(mb=mb, config_template=config_template, file_format=\"poscar\", device=device_to_use)\r\n\r\n run_dir = \"../matbench\"\r\n\r\n cwd = os.getcwd()\r\n\r\n os.chdir(run_dir)\r\n\r\n results = defaultdict()\r\n for task in mb.tasks:\r\n task.load()\r\n task_name = task.dataset_name\r\n regr = True\r\n if \"is\" in task_name:\r\n regr = False\r\n results = compile_results(task_name, regression=regr)\r\n for ii, fold in enumerate(task.folds):\r\n train_df = task.get_train_and_val_data(fold, as_type=\"df\")\r\n test_df = task.get_test_data(\r\n fold, include_target=True, as_type=\"df\"\r\n )\r\n pred_vals = results[fold]\r\n task.record(fold, pred_vals, params=config)\r\n os.chdir(cwd)\r\n mb.add_metadata({\"algorithm\": \"Matformer\"})\r\n mb.to_file(\"results.json.gz\")\r\n\r\n\r\nfor key, values in mb.scores.items():\r\n factor = 1000.0 if key in [\"matbench_mp_e_form\", \"matbench_mp_gap\", \"matbench_perovskites\"] else 1.0\r\n if key not in [\"matbench_mp_is_metal\"]:\r\n print(key, factor*values[\"mae\"][\"mean\"], factor*values[\"mae\"][\"std\"])\r\n else:\r\n print(key, values[\"rocauc\"][\"mean\"], values[\"rocauc\"][\"std\"])","sub_path":"benchmarks/matbench_v0.1_matformer/train_matbench.py","file_name":"train_matbench.py","file_ext":"py","file_size_in_byte":16483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"357022726","text":"# Andrew Akshan\r\n\r\nfrom pyKarel import *\r\nfrom Athlete import Athlete\r\n\r\nname_of_world = raw_input(\"Which maze?(maze1,maze2,maze3)\")\r\n\r\nwld=World(name_of_world, width=10, height=10, delay=0.001)\r\n\r\nal = Athlete(wld, 1, 1, south, 0)\r\n\r\nwhile not al.nextToABeeper():\r\n if al.rightIsClear():\r\n al.turnRight()\r\n al.move()\r\n elif al.frontIsClear():\r\n al.move()\r\n else: \r\n al.turnLeft()\r\n \r\n \r\nwld.mainloop()","sub_path":"Lab10.py","file_name":"Lab10.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"548359238","text":"import base64\nfrom flask import request, Flask\nfrom python_fission.lib.atomiq.classes.cloudevent import cloud_event_decorator\nfrom python_fission.lib.atomiq.classes.helper import extract_parameters\nfrom python_fission.lib.ssh.ssh import run_command\nfrom python_fission.lib.atomiq.classes import constants\nfrom python_fission.lib.atomiq.classes.response_helper import success_response,failure_response\n\n# Developers need to make sure the input json will be of below format\n# This is the input template that will be validated\n# Please don't change only keys inside the 'data' dict.\n# Please don't change any naming convention\n# '''\nINPUT_TEMPLATE = {\n \"data\": {\n \"flowName\": \"\",\n \"company\": \"\",\n \"project\": \"\",\n \"parameters\": [\n {\"source_machine\": {\"entity\": \"\"}},\n {\"sender\": \"\"},\n {\"subject\": \"\"},\n {\"recipients\": []},\n {\"body\": \"\"},\n {\"encoded\": \"\"},\n {\"attachments\": []}\n ]\n }\n}\n# main Atom method\n'''\njson_input --> is a request dict.There is no need to run \"json.loads()\" on it\nThe response has to be of type dict/list and we need to return it as \"json_result\" from the\n\"main()\" def All the business logic will be written in \"handle()\"\n'''\n\n\ndef handle(json_input):\n '''\n General description:\n Sends mail to the requested recipients.\n Args:\n param1 json_input(dict) : This is the input json received.\n Returns:\n and return the result as below :\n Success -> JSON response with status as success and message as below\n {\"status\": \"success\", \"message\": \"Mail send successfully\"}\n Failure -> JSON response with status as failure, exit-code as 1\n and Error-details contains error info.\n {\"status\": \"failure\", \"exit-code\": exitcode, \"Error-details\": stderr}\n\n Example :\n handle(json_input)\n '''\n parameters = extract_parameters(json_input)\n config_details = parameters.get('source_machine')\n subject = parameters.get('subject')\n sender = parameters.get('sender')\n encoded = parameters.get('encoded')\n body = parameters.get('body')\n\n if encoded == 'true':\n body = base64.b64decode(body)\n\n attachments = parameters.get('attachments')\n recipients = parameters.get('recipients')\n command = \"echo '\" + str(body) + \"'| mail -r \" + sender\n\n for _s in attachments:\n command = command + \" -a \" + _s\n\n command = command + \" -s \" + \"'\" + subject + \"'\"\n\n for _r in recipients:\n command = command + \" \" + _r\n status, data = run_command(config_details, command)\n\n if status == constants.get_success():\n output = {'output': 'Mail sent successfully'}\n json_result = success_response(output)\n else:\n output = {'output' : str(data)}\n json_result = failure_response(output)\n\n return json_result\n\n\n# '''\n# Fission is invoking the main() method.\n# Please do not change anything below this method declation or body\n# The input payload validation is being done in execut() decorator\n# The main job is being done by the handle definition\n# '''\n\nAPP = Flask(__name__) # USED TO TEST.. COMMENT IT WHILE UPLOADING\n\n\n# USED TO TEST.. COMMENT IT WHILE UPLOADING\n@APP.route('/check', methods=['POST'])\n@cloud_event_decorator(INPUT_TEMPLATE)\ndef main():\n '''\n General description:\n Args:\n\n Returns:\n Returns result entities that were collected by the atom.\n Example :\n main()\n '''\n return handle(request.get_json())\n\n\nif __name__ == '__main__': # USED TO TEST.. COMMENT IT WHILE UPLOADING\n # USED TO TEST.. COMMENT IT WHILE UPLOADING\n APP.run(port=1012, host='0.0.0.0', debug=True)\n","sub_path":"python_fission/python/atoms/atomiq-py-ssh/send_mail/send_mail.py","file_name":"send_mail.py","file_ext":"py","file_size_in_byte":3734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"262853411","text":"\"\"\"\n# run celery task\n# celery --app=fufel.celery_app:app worker -l info\n# run celery periodic task\n# celery --app=fufel.celery_app:app beat\n\"\"\"\nfrom __future__ import absolute_import\n\nimport time\nfrom django.utils.timezone import datetime\nfrom fufel.celery_app import app\nfrom Fufel.models import Channel, Video\n\n\n@app.task(bind=True)\ndef debug_task(self):\n print('Request: {0!r}'.format(self.request))\n\n\n@app.task\ndef populate_video(**kwargs):\n channel_inst = Channel.objects.get(pk=2)\n text = \"TEST TEST TEST\"\n for i in range(10):\n video = Video(name='Video {}'.format(i+1), description=text, youtube_id='32145',\n youtube_url='https://www.youtube.com/watch?v=Tj75Arhq5ho', channel=channel_inst,\n uploaded=datetime.now())\n video.save()\n\n\n@app.task\ndef timer(sec):\n print (datetime.now(), ' wait {} sec'.format(sec))\n time.sleep(sec)\n print ('Done')\n","sub_path":"Fufel/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"637721290","text":"# coding: UTF-8\nimport sys\n\nN = 65 #セルの最大個数\nR = 8 #ルール表の大きさ\nMAXT = 50 #繰り返しの回数\n\ndef setrule(rule,ruleno):\n for i in range(0,R):\n rule[i] = ruleno % 2\n ruleno = ruleno // 2\n for i in range(R - 1,-1,-1):\n print(rule[i])\n\ndef initca(ca):\n line = input(\"caの初期値を入力して下さい:\")\n print()\n for no in range(len(line)):\n ca[no] = int(line[no])\n\ndef putca(ca):\n for no in range(N - 1,-1,-1):\n print(\"{:1d}\".format(ca[no]), end=\"\")\n print()\n\ndef nextt(ca,rule):\n nextca = [0 for i in range(N)]\n for i in range(1,N - 1):\n nextca[i] = rule[ca[i + 1] * 4 + ca[i] * 2 + ca[i-1]]\n for i in range(N):\n ca[i] = nextca[i]\n\nrule = [0 for i in range(R)]\nruleno = int(input(\"ルール番号を入力して下さい:\"))\nprint()\n\nif ruleno < 0 or ruleno > 255:\n print(\"ルール番号が正しくありません(\", ruleno,\")\")\n sys.exit()\n\nsetrule(rule,ruleno) #\n\nca = [0 for i in range(N)] #\ninitca(ca)\nputca(ca)\n\nfor t in range(MAXT):\n nextt(ca, rule)\n putca(ca)\n","sub_path":"cell.py","file_name":"cell.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"31991997","text":"# -*- coding: utf-8 -*-\n###\n# (C) Copyright (2012-2019) Hewlett Packard Enterprise Development LP\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the 'Software'), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n###\n\nfrom unittest import TestCase\n\nimport mock\n\nfrom hpOneView.connection import connection\nfrom hpOneView.resources.servers.logical_enclosures import LogicalEnclosures\nfrom hpOneView.resources.resource import Resource, ResourceHelper, ResourcePatchMixin\n\n\nclass LogicalEnclosuresTest(TestCase):\n def setUp(self):\n self.host = '127.0.0.1'\n self.connection = connection(self.host)\n self._logical_enclosures = LogicalEnclosures(self.connection)\n self.uri = \"/rest/logical-enclosures/ad28cf21-8b15-4f92-bdcf-51cb2042db32\"\n self._logical_enclosures.data = {\"uri\": self.uri}\n\n @mock.patch.object(ResourceHelper, 'create')\n def test_create_called_once(self, mock_create):\n resource = dict(\n enclosureUris=[\n \"/rest/enclosures/0000000000A66101\",\n \"/rest/enclosures/0000000000A66102\",\n \"/rest/enclosures/0000000000A66103\"\n ],\n enclosureGroupUri=\"/rest/enclosure-groups/e41118e4-2233-4b6b-9318-c9982dbf01fa\",\n forceInstallFirmware=False,\n name=\"testLogicalEnclosure\"\n )\n mock_create.return_value = {}\n\n self._logical_enclosures.create(resource)\n mock_create.assert_called_once_with(resource.copy(), None, -1, None, False)\n\n @mock.patch.object(ResourceHelper, 'delete')\n def test_delete_called_once(self, mock_delete):\n self._logical_enclosures.delete(force=False)\n\n mock_delete.assert_called_once_with(self.uri, custom_headers=None,\n force=False, timeout=-1)\n\n @mock.patch.object(ResourceHelper, 'delete')\n def test_delete_called_once_with_force(self, mock_delete):\n self._logical_enclosures.delete(force=True)\n\n mock_delete.assert_called_once_with(self.uri, custom_headers=None,\n force=True, timeout=-1)\n\n @mock.patch.object(ResourceHelper, 'get_all')\n def test_get_all_called_once(self, mock_get_all):\n filter = 'name=TestName'\n sort = 'name:ascending'\n scope_uris = 'rest/scopes/cd237b60-09e2-45c4-829e-082e318a6d2a'\n\n self._logical_enclosures.get_all(2, 500, filter, sort, scope_uris)\n\n mock_get_all.assert_called_once_with(2, 500, filter=filter, sort=sort, scope_uris=scope_uris)\n\n @mock.patch.object(ResourceHelper, 'get_all')\n def test_get_all_called_once_with_default_values(self, mock_get_all):\n self._logical_enclosures.get_all()\n\n mock_get_all.assert_called_once_with(0, -1, filter='', sort='', scope_uris='')\n\n @mock.patch.object(Resource, 'get_by')\n def test_get_by_name_called_once(self, mock_get_by):\n self._logical_enclosures.get_by_name('OneViewSDK-Test-Logical-Enclosure')\n mock_get_by.assert_called_once_with('name', 'OneViewSDK-Test-Logical-Enclosure')\n\n @mock.patch.object(Resource, 'ensure_resource_data')\n @mock.patch.object(ResourceHelper, 'update')\n def test_update_called_once_with_defaults(self, mock_update, mock_ensure_client):\n logical_enclosure = {\n \"name\": \"one_enclosure_le\",\n }\n logical_enclosure[\"uri\"] = self.uri\n self._logical_enclosures.update(logical_enclosure)\n mock_update.assert_called_once_with(logical_enclosure, self.uri, False, -1, None)\n\n @mock.patch.object(Resource, 'ensure_resource_data')\n @mock.patch.object(ResourceHelper, 'update')\n def test_update_called_once(self, mock_update, mock_ensure_client):\n logical_enclosure = {\n \"name\": \"one_enclosure_le\",\n }\n logical_enclosure[\"uri\"] = self.uri\n self._logical_enclosures.update(logical_enclosure, 70)\n mock_update.assert_called_once_with(logical_enclosure, self.uri,\n False, 70, None)\n\n @mock.patch.object(ResourcePatchMixin, 'patch_request')\n def test_patch_should_use_user_defined_values(self, mock_patch):\n mock_patch.return_value = {}\n custom_headers = {'If-Match': '*'}\n\n self._logical_enclosures.patch(\n 'replace', '/name', 'new_name', custom_headers, 1)\n mock_patch.assert_called_once_with(self.uri,\n body=[{'path': '/name',\n 'op': 'replace',\n 'value': 'new_name'}],\n custom_headers={'If-Match': '*'},\n timeout=1)\n\n @mock.patch.object(Resource, 'refresh')\n @mock.patch.object(ResourceHelper, 'update')\n def test_update_configuration(self, mock_update, mock_refresh):\n uri_rest_call = '{}/configuration'.format(self.uri)\n\n self._logical_enclosures.update_configuration()\n\n mock_update.assert_called_once_with(None, uri_rest_call, timeout=-1)\n\n @mock.patch.object(ResourceHelper, 'do_get')\n def test_get_script(self, mock_get):\n uri_rest_call = '{}/script'.format(self.uri)\n\n self._logical_enclosures.get_script()\n\n mock_get.assert_called_once_with(uri_rest_call)\n\n @mock.patch.object(ResourceHelper, 'update')\n def test_update_script(self, mock_update):\n uri_rest_call = '/rest/logical-enclosures/ad28cf21-8b15-4f92-bdcf-51cb2042db32/script'\n information = {\"#TEST COMMAND\": \"\"}\n configuration_rest_call = information.copy()\n\n self._logical_enclosures.update_script(information)\n\n mock_update.assert_called_once_with(\n configuration_rest_call, uri=uri_rest_call, timeout=-1)\n\n @mock.patch.object(ResourceHelper, 'create')\n def test_support_dump_called_once(self, mock_create):\n information = {\n \"errorCode\": \"MyDump16\",\n \"encrypt\": True,\n \"excludeApplianceDump\": False\n }\n uri_rest_call = '{}/support-dumps'.format(self.uri)\n\n mock_create.return_value = {}\n\n self._logical_enclosures.generate_support_dump(information)\n mock_create.assert_called_once_with(\n information.copy(), uri=uri_rest_call, timeout=-1)\n\n @mock.patch.object(ResourceHelper, 'update')\n def test_update_from_group(self, mock_update):\n uri_rest_call = '{}/updateFromGroup'.format(self.uri)\n\n self._logical_enclosures.update_from_group()\n\n mock_update.assert_called_once_with(None, uri_rest_call, timeout=-1)\n","sub_path":"tests/unit/resources/servers/test_logical_enclosures.py","file_name":"test_logical_enclosures.py","file_ext":"py","file_size_in_byte":7567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"54131022","text":"# keras import\nimport tensorflow as tf\nimport numpy as np\nimport os\nnp.random.seed(42)\nfrom keras.callbacks import Callback, LambdaCallback\nfrom keras.models import Model\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.layers import Input, Dense, Dropout, Activation, Flatten\nfrom keras.layers import Convolution2D, MaxPooling2D\nfrom keras.engine.topology import Layer\nfrom keras.utils import np_utils\nfrom keras import backend as K\nfrom keras.models import load_model\n# big boy utils\nfrom utils.configuration import *\nfrom utils.load_data import *\nfrom utils.dataset import *\nfrom utils.preprocessing import *\nfrom utils.model import *\nfrom utils.reporting import *\nfrom utils.visualization import *\nfrom utils.pca_tsne import *\nfrom utils.load_preprocessed import *\n# sklearn\nfrom sklearn.model_selection import train_test_split as tts\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.preprocessing import OneHotEncoder\nenc = OneHotEncoder()\nlab_enc = LabelEncoder()\n\ninput(\"Press Enter to continue.\")\n\n# Get raw data\nall_images1, all_labels1, all_bottles1 = load_preprocessed_data(corpus_dir, bottle_dir, img_dims)\n\ntrained_dir_2 = \"/mnt/data/corpi/gaussian_masked_clr_20_50/\"\ncorpus_dir_2 = os.path.join(trained_dir_2, \"corpus\")\nbottle_dir_2 = os.path.join(trained_dir_2, \"bottleneck\")\n# Get Raw data 2\nall_images2, all_labels2, all_bottles2 = load_preprocessed_data(corpus_dir_2,\n bottle_dir_2, img_dims)\n\n# Split Data into train_test\ni_train1, i_test1, b_train1, b_test1, l_train1, l_test1 = tts(all_images1, all_bottles1,\n all_labels1, test_size=0.30,\n stratify=all_labels1, random_state=42)\n\ni_train2, i_test2, b_train2, b_test2, l_train2, l_test2 = tts(all_images2, all_bottles2,\n all_labels2, test_size=0.30,\n stratify=all_labels2, random_state=42)\nimgs_train = np.vstack((i_train1, i_train2))\nbots_train = np.vstack((b_train1, b_train2))\nlab_train = np.concatenate((l_train1, l_train2), axis=0)\n#Get test images\nimgs_test = np.vstack((i_test1, i_test2))\nbots_test = np.vstack((b_test1, b_test2))\nlab_test = np.concatenate((l_test1, l_test2), axis=0)\n\n# Perform one-hot encoding on all labels\nlab_train_le = lab_enc.fit_transform(lab_train)\nlab_train_ohe = enc.fit_transform(lab_train_le.reshape(-1,1)).toarray()\nlab_test_le = lab_enc.fit_transform(lab_test)\nlab_test_ohe = enc.fit_transform(lab_test_le.reshape(-1,1)).toarray()\n\n#Split train into train and validation\nimgs_val, imgs_test, bots_val, bots_test, lab_val_ohe, lab_test_ohe = tts(imgs_test, bots_test,\n lab_test_ohe, test_size=0.50,\n stratify=lab_test_ohe, random_state=42)\n\nprint(imgs_train.shape)\nprint(imgs_val.shape)\nprint(imgs_test.shape)\nprint(lab_train_ohe.shape)\nprint(lab_val_ohe.shape)\nprint(lab_test_ohe.shape)\n\ninput(\"Press Enter to continue.\")\n\n# Make a Scheduler:\nepoch_count = K.variable(0)\nbeta = K.variable(1.)\nclass RegScheduler(Callback):\n def __init__(self, beta, epoch_count):\n self.beta = beta\n self.epoch_count = epoch_count\n def on_epoch_begin(self, epoch, logs={}):\n K.set_value(self.epoch_count, epoch)\n def on_epoch_end(self, epoch, logs={}):\n max_epoch= 70\n power = 4\n stop = 0\n K.set_value(self.beta, ((1-(epoch/max_epoch)) ** power ) * (1-stop) + stop )\n print('---current beta: %.3f' % K.get_value(beta))\n\n# checkpoint load\nmodel_dir = \"./mdl/\"\nif not os.path.isdir(model_dir):\n os.makedirs(model_dir)\n\nmodel_path = os.path.join(model_dir, 'model_noreg_150.h5')\nif os.path.exists(model_path):\n print('Loading model...')\n #for name in glob.glob('./mdl/model?.txt'):\n model = load_model(model_path)\nelse:\n print('Building Model..')\n # Make the layers:\n inputs1 = Input(shape=(150, 150, 3))\n\n x = Convolution2D(32, (3, 3), padding='same', name='c0')(inputs1)\n x = BatchNormalization(name='c0_bn')(x)\n x = Activation('relu', name='c0_act')(x)\n x = MaxPooling2D(pool_size=(2, 2), name='c0_max')(x)\n\n x = Convolution2D(32, (3, 3), padding='same', name='c1')(x)\n x = BatchNormalization(name='c1_bn')(x)\n x = Activation('relu', name='c1_act')(x)\n x = MaxPooling2D(pool_size=(2, 2), name='c1_max')(x)\n\n x = Convolution2D(32, (3, 3), padding='same', name='c2')(x)\n x = BatchNormalization(name='c2_bn')(x)\n x = Activation('relu', name='c2_act')(x)\n x = MaxPooling2D(pool_size=(2, 2), name='c2_max')(x)\n\n x = Flatten(name='flat_0')(x)\n\n x = Dense(2048, name='fc_0')(x)\n x = BatchNormalization(name='fc_0_bn')(x)\n x = Activation('sigmoid', name='fc_0_act')(x)\n x = Dropout(0.7, name='fc_0_drop')(x)\n\n x = Dense(2048, name='fc_1')(x)\n x = BatchNormalization(name='fc_1_bn')(x)\n x = Activation('sigmoid', name='fc_1_act')(x)\n\n x = Dense(20, name='fc_2')(x)\n prediction = Activation('softmax')(x)\n\n model = Model(inputs=[inputs1], outputs=[prediction])\n\n model.compile(loss='categorical_crossentropy',\n optimizer='adam', metrics=['accuracy'])\n\nprint('layers made!')\n#model.summary()\n\ninput(\"Press Enter to continue.\")\n# Training:\ntry:\n model.fit([imgs_train], lab_train_ohe,\n batch_size=50, epochs=80, verbose=1,\n validation_data=([imgs_val], lab_val_ohe),\n callbacks=[RegScheduler(beta=beta, epoch_count=epoch_count)])\n\n score, accuracy = model.evaluate([imgs_test], lab_test_ohe, batch_size=100, verbose=0)\n print('Test score:', score)\n print('Test accuracy:', accuracy)\n\n epc_count= int(K.get_value(epoch_count))+1\n file_name = 'model2_noreg_'+str(epc_count)+'.h5'\n model_path_save = os.path.join(model_dir, file_name)\n model.save(model_path_save)\n\n # Layers sizes\n input(\"Press Enter to continue.\")\n bot_lay_size = 2048\n n_train_imgs = imgs_train.shape[0]\n n_test_imgs = imgs_test.shape[0]\n n_classes = lab_train_ohe.shape[1]\n print('train images', n_train_imgs)\n print('test images', n_test_imgs)\n\n # backend function to accesss values from bottle layer\n bottle_tensor_func = K.function([model.layers[0].input, K.learning_phase()],\n [model.get_layer('fc_1_act').output])\n #set up np.array to store values for all images\n bottle_tensor_train = np.zeros(shape=(n_train_imgs, bot_lay_size))\n bottle_labels_train = np.zeros(shape=(n_train_imgs, n_classes))\n bottle_tensor_test = np.zeros(shape=(n_test_imgs, bot_lay_size))\n bottle_labels_test = np.zeros(shape=(n_test_imgs, n_classes))\n\n def batcher(X_train, y_train, size):\n X_batch = [X_train[indx:indx + size] for indx in range(0, len(X_train), size)]\n y_batch = [y_train[indx:indx + size] for indx in range(0, len(y_train), size)]\n return zip(X_batch, y_batch)\n\n counter = 0\n bot_batch = 20\n # get train set bottleneck activation values:\n for batch_x, batch_y in batcher(imgs_train, lab_train_ohe, bot_batch):\n bot_train = bottle_tensor_func([batch_x, 0])[0]\n bottle_tensor_train[counter:counter+bot_batch] = bot_train\n bottle_labels_train[counter:counter+bot_batch] = batch_y\n counter += bot_batch\n\n # get test set bottleneck activation values:\n counter = 0\n bot_batch = 20\n for batch_x, batch_y in batcher(imgs_test, lab_test_ohe, bot_batch):\n bot_train = bottle_tensor_func([batch_x, 0])[0]\n bottle_tensor_test[counter:counter+bot_batch] = bot_train\n bottle_labels_test[counter:counter+bot_batch] = batch_y\n counter += bot_batch\n # stack values in signle np.array\n final_bottle = np.vstack((bottle_tensor_train, bottle_tensor_test))\n final_labels = np.vstack((bottle_labels_train, bottle_labels_test))\n\n tsne_output2(final_bottle, final_labels, 30, 5000, n_train_imgs, filename='merge_tsne.png')\n K.clear_session()\n\nexcept KeyboardInterrupt:\n score, accuracy = model.evaluate([imgs_test1], lab_test_ohe, batch_size=50, verbose=0)\n print('Test score:', score)\n print('Test accuracy:', accuracy)\n epc_count= int(K.get_value(epoch_count))+1\n file_name = 'model2_noreg_'+str(epc_count)+'.h5'\n model_path_save = os.path.join(model_dir, file_name)\n model.save(model_path_save)\n K.clear_session()\n","sub_path":"bb_sk_corp2_noreg.py","file_name":"bb_sk_corp2_noreg.py","file_ext":"py","file_size_in_byte":8554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"339832518","text":"from modules.darkSUSY.classDarkSUSY import *\nfrom modules.general.F_search import *\n\nfrom IPython.display import clear_output\nimport numpy as np\nimport h5py\nimport os\n\nDelphes_DIR = \"/home/franky8939/PROGRAMAS/MG5_aMC/Delphes/\" # Directory local of Delphes\nROOT_DIR = \"/home/franky8939/PROGRAMAS/ROOT/\" # Directory local of Root\nfbash(Delphes_DIR, ROOT_DIR) # path in bash of Delphes and Root\n\n# CLASS DARKSUSY\nDarkFile = DarkSUSY() # initialize the DarkSUSY class\n\n# Create general h5 for all data\nOUTPUT = '/home/franky8939/GITHUP/DarkSUSY-master/data/h5_muon_all/DarkSUSY_all_NMuon.h5'\nFILES_INPUT = \"/media/franky8939/10FE09E910FE09E9/datos_investigacion_grandes/\"\n\n# Si existe el archivo OUTPUT ACTUALIZARLO #\nif os.path.exists(OUTPUT):\n hf = h5py.File(OUTPUT, 'a')\nelse:\n hf = h5py.File(OUTPUT, 'w') # create h5py\n\nfor files_root in os.listdir(FILES_INPUT):\n if \".root\" in files_root: # THE FILE IS *.root\n for i in [1]:\n\n try:\n # Identificar la posicion en *.h5\n var = Ob_Value(files_root)\n name_local_group = \"MNeuL_\" + var[\"MNeuL\"] + \"/MNeuD_\" + var[\"MNeuD\"] + \"/MPhoD_\" + var[\"MPhoD\"] + \\\n \"/TcPhoD_\" + var[\"TcPhoD\"] + \"/\" + var[\"Card\"]\n\n if var[\"Card\"] is \"_HL2_\":\n break\n if np.array(hf.get(name_local_group + \"/Verification\")) == \"ON\":\n print(\" :: INFO OF FILE \" + files_root + \" EXIST, CONTINUE WITH THE NEXT\")\n # continue\n break\n\n hf.require_group(name_local_group) # requerirlo para que lo cree si no existe\n del hf[name_local_group] # borrar siempre para actualizarlo\n local_group = hf.require_group(name_local_group) # volverlo a crear\n # local_group.require_dataset(name=\"Name_of_FileROOT\", data=files_root) # Number of Mu for Event\n local_group.create_dataset(name=\"Name_of_FileROOT\", data=files_root) # Number of Mu for Event\n local_group.create_dataset(name=\"Verification\", data=\"OFF\")\n\n # Variables\n DarkFileTemp = DarkFile # new\n DarkFileTemp.Add_File(FILES_INPUT + files_root) # Add File\n NMu = DarkFileTemp.Mu_for_Event()\n local_group.create_dataset(name='Entries', data=DarkFileTemp.Entries,\n dtype=int) # Number of Event\n local_group.create_dataset(name='Mu_Entries', data=NMu,\n dtype=int) # Number of Mu for Event\n # print(\" :: Finalizo correctamente :: \")\n del local_group[\"Verification\"] # borrar siempre para actualizarlo\n local_group.create_dataset(name=\"Verification\", data=\"ON\")\n # break\n # sys.exit()\n except:\n print(\" :: FILE WITH PROBLEMS :: \" + files_root)\n # sys.exit()\n\nhf.close()\n","sub_path":"genera_h5_all.py","file_name":"genera_h5_all.py","file_ext":"py","file_size_in_byte":3030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"444838226","text":"from struct import *\r\nfrom collections import namedtuple\r\nimport numpy as np\r\n\r\ndef read_snapshot(fname):\r\n with open(fname, 'r') as fp:\r\n data = fp.read()\r\n header = namedtuple(\"header\", \"N Npart Mass Mass0 Mass1 Mass2 Mass3 Mass4 Mass5 a z FlagSfr FlagFeedback Nall0 Nall1 Nall2 Nall3 Nall4 Nall5 FlagCooling NumFiles BoxSize Omega_0 Omega_L h FlagMultphase FlagStellarAge FlagSfrHistogram\")\r\n o = 4\r\n #s = \"%lsf\" % (header.nx)\r\n #x = np.asarray(unpack(s,data[o:o+4*header.nx]))\r\n s = \"%lsi\" % (6)\r\n header.Npart = np.asarray(unpack(s,data[o:o+4*6]),dtype=int)\r\n\r\n #header.Npart0 = int(unpack(\"i\", data[o:o+4])[0])\r\n o += 4*6\r\n #header.Npart1 = int(unpack(\"i\", data[o:o+4])[0])\r\n #o += 4\r\n\r\n #header.Npart2 = int(unpack(\"i\", data[o:o+4])[0])\r\n #o += 4\r\n\r\n #header.Npart3 = int(unpack(\"i\", data[o:o+4])[0])\r\n #o += 4\r\n\r\n #header.Npart4 = int(unpack(\"i\", data[o:o+4])[0])\r\n #o += 4\r\n\r\n #header.Npart5 = int(unpack(\"i\", data[o:o+4])[0])\r\n #o += 4\r\n\r\n s = \"%lsd\" % (6)\r\n header.Mass = np.asarray(unpack(s,data[o:o+8*6]))\r\n o += 8*6\r\n\r\n #header.Mass0 = float(unpack(\"d\", data[o:o+8])[0])\r\n #o += 8\r\n\r\n #header.Mass1 = float(unpack(\"d\", data[o:o+8])[0])\r\n #o += 8\r\n\r\n #header.Mass2 = float(unpack(\"d\", data[o:o+8])[0])\r\n #o += 8\r\n\r\n #header.Mass3 = float(unpack(\"d\", data[o:o+8])[0])\r\n #o += 8\r\n\r\n #header.Mass4 = float(unpack(\"d\", data[o:o+8])[0])\r\n #o += 8\r\n\r\n #header.Mass5 = float(unpack(\"d\", data[o:o+8])[0])\r\n #o += 8\r\n\r\n header.a = float(unpack(\"d\", data[o:o+8])[0])\r\n o += 8\r\n a = header.a\r\n\r\n header.z = float(unpack(\"d\", data[o:o+8])[0])\r\n o += 8\r\n z = header.z\r\n\r\n header.FlagSfr = int(unpack(\"i\", data[o:o+4])[0])\r\n o += 4\r\n\r\n header.FlagFeedback = int(unpack(\"i\", data[o:o+4])[0])\r\n o += 4\r\n\r\n s = \"%lsi\" % (6)\r\n header.Nall = np.asarray(unpack(s,data[o:o+4*6]),dtype=int)\r\n o += 4*6\r\n\r\n #header.Nall0 = int(unpack(\"i\", data[o:o+4])[0])\r\n #o += 4\r\n\r\n #header.Nall1 = int(unpack(\"i\", data[o:o+4])[0])\r\n #o += 4\r\n\r\n #header.Nall2 = int(unpack(\"i\", data[o:o+4])[0])\r\n #o += 4\r\n\r\n #header.Nall3 = int(unpack(\"i\", data[o:o+4])[0])\r\n #o += 4\r\n\r\n #header.Nall4 = int(unpack(\"i\", data[o:o+4])[0])\r\n #o += 4\r\n\r\n #header.Nall5 = int(unpack(\"i\", data[o:o+4])[0])\r\n #o += 4 \r\n\r\n header.FlagCooling = int(unpack(\"i\", data[o:o+4])[0])\r\n o += 4\r\n\r\n header.NumFiles = int(unpack(\"i\", data[o:o+4])[0])\r\n o += 4\r\n\r\n header.BoxSize = float(unpack(\"d\", data[o:o+8])[0])\r\n o += 8\r\n #size = header.BoxSize\r\n\r\n header.Omega_0 = float(unpack(\"d\", data[o:o+8])[0])\r\n o += 8\r\n\r\n header.Omega_L = float(unpack(\"d\", data[o:o+8])[0])\r\n o += 8\r\n\r\n header.h = float(unpack(\"d\", data[o:o+8])[0])\r\n o += 8\r\n\r\n header.FlagMultiphase = int(unpack(\"i\", data[o:o+4])[0])\r\n header.FlagStellarAge = int(unpack(\"i\", data[o:o+4])[0])\r\n header.FlagSfrHistogram = int(unpack(\"i\", data[o:o+4])[0])\r\n\r\n header.N = np.sum(header.Npart,dtype=int)\r\n\r\n #fill\r\n o += 84\r\n o += 4\r\n dummy = int(unpack(\"i\", data[o:o+4])[0])\r\n #print(\"dummy = \",dummy)\r\n\r\n #positions\r\n #dummy\r\n o += 4\r\n s = \"%lsf\" % (3*header.N)\r\n #print(s)\r\n x = np.asarray(unpack(s,data[o:o+3*header.N*4]))\r\n #print(\"x info=\",x[0],x.min(),x.max())\r\n x = np.resize(x,(header.N,3))\r\n o += 3*header.N*4\r\n #dummy\r\n o += 4\r\n\r\n #velocity\r\n #dummy\r\n o += 4\r\n s = \"%lsf\" % (3*header.N)\r\n #print(s)\r\n v = np.asarray(unpack(s,data[o:o+3*header.N*4]))\r\n o += 3*header.N*4\r\n #dummy\r\n o += 4\r\n\r\n #ids\r\n #dummy\r\n o += 4\r\n s = \"%lsi\" % (header.N)\r\n #print(s)\r\n #print(4*header.N)\r\n ids = np.asarray(unpack(s,data[o:o+4*header.N]),dtype=int)\r\n o += header.N*4\r\n #dummy\r\n o += 4\r\n\r\n #dummy\r\n #o += 4\r\n #coordinates\r\n #o = 48\r\n #s = \"%lsf\" % (header.nx)\r\n #x = np.asarray(unpack(s,data[o:o+4*header.nx]))\r\n\r\n #print_header(header)\r\n return header, x, v, ids#, z, a, size\r\n\r\ndef print_header(header):\r\n print(\"N = \",header.N)\r\n print(\"Npart = \",header.Npart)\r\n print(\"Mass = \",header.Mass)\r\n #print(header.Npart0)\r\n #print(header.Npart1)\r\n #print(header.Npart2)\r\n #print(header.Npart3)\r\n #print(header.Npart4)\r\n #print(header.Npart5)\r\n #print(header.Mass0)\r\n #print(header.Mass1)\r\n #print(header.Mass2)\r\n #print(header.Mass3)\r\n #print(header.Mass4)\r\n #print(header.Mass5)\r\n print(\"a = \",header.a)\r\n print(\"z = \",header.z)\r\n print(\"FlagSfr = \",header.FlagSfr)\r\n print(\"FlagFeedback = \",header.FlagFeedback)\r\n print(\"Nall = \",header.Nall)\r\n #print(header.Nall1)\r\n #print(header.Nall2)\r\n #print(header.Nall3)\r\n #print(header.Nall4)\r\n #print(header.Nall5)\r\n print(\"FlagCooling = \",header.FlagCooling)\r\n print(\"Numfiles = \",header.NumFiles)\r\n print(\"BoxSize = \",header.BoxSize)\r\n print(\"Omega_0 = \",header.Omega_0)\r\n print(\"Omega_L = \",header.Omega_L)\r\n print(\"h = \",header.h)\r\n print(\"FlagMultiphase = \",header.FlagMultiphase)\r\n print(\"FlagStellarAge = \",header.FlagStellarAge)\r\n print(\"FlagSfrHistogram = \",header.FlagSfrHistogram)\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"read_snapshot.py","file_name":"read_snapshot.py","file_ext":"py","file_size_in_byte":5178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"507032230","text":"\nimport requests #installed?\nimport json\nimport time\nimport datetime #installed\nimport csv\nimport re\nfrom area import area\nimport boto3\nfrom boto3.dynamodb.conditions import Key, Attr\n\n\ndef handler(event, context):\n \n res=\"\"\n token = \"\"\n\n \n\n dynamodb = boto3.resource('dynamodb', region_name='us-east-2')#change regions\n table = dynamodb.Table('Activetask')#change table name\n \n #response = table.put_item( Item={ 'taskID' : str(today), 'Data': mass,})\n #return res \n \n try:\n response = table.get_item(\n Key={\n 'taskID': 'miss' \n }\n )\n except ClientError as e:\n print(e.response['Error']['Message'])\n else:\n item = response['Item']\n \n \n mass=response['Item'][\"Data\"]\n\n\n \n headers = {\n 'content-type': \"application/json\",\n \n 'authorization': token #Alex\n }\n massRes=[]\n for k in mass[80:90]:\n results_url = \"https://api.astrodigital.com/v2.0/results?task_id=\"+k\n \n\n results_response = requests.request(\"GET\", results_url, headers=headers, )\n\n results_json_data = json.loads(results_response.text)\n if ('detail' in results_json_data):\n break\n\n for i in range(0,len(results_json_data[\"results\"])):\n\n res+= str(json.dumps(results_json_data[\"results\"][i][\"task\"]))+\"\\n\"\n if 'properties' in results_json_data[\"results\"][i][\"value\"].keys():\n for j in range(0,len(results_json_data[\"results\"][i][\"value\"][\"properties\"][\"ndvi_values\"])):\n \n massRes.append(results_json_data[\"results\"][i][\"value\"][\"properties\"][\"ndvi_values\"][j][\"date\"])\n massRes.sort()\n for k in range(0,len(massRes)-1):\n stime1=datetime.datetime.strptime(massRes[k+1] , '%Y-%m-%d')\n stime2=datetime.datetime.strptime(massRes[k] , '%Y-%m-%d')\n if (stime1-stime2>datetime.timedelta(days=10)):\n res+=str(stime1-stime2)[0:7]+\" \"+str(stime1)[0:10]+\" \"+str(stime2)[0:10]+\"\\n\"#\n \n\n \n #res = response\n\n\n\n\n\n #token = \"\"\n \n dictSlack = {\n \"strChannel\" : \"#random\",\n \"strName\" : \"StatBot\",\n \"strIconUrl\" : \"https://astrodigital.com/images/meta/apple-touch-icon-152x152.png\",\n \"strTitle\" : \"report\",\n \"strHookUrl\" : \"https://hooks.slack.com/services/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\"\n }\n\n def writeToSlack(dictSlack,jsonAttachments):\n jsonPayload = {\n \"channel\": dictSlack[\"strChannel\"],\n \"username\": dictSlack[\"strName\"],\n \"icon_url\": dictSlack[\"strIconUrl\"],\n \"text\": dictSlack[\"strTitle\"],\n \"attachments\": jsonAttachments,\n }\n \n payload = \"-----011000010111000001101001\\r\\nContent-Disposition: form-data; name=\\\"payload\\\"\\r\\n\\r\\n\"+json.dumps(jsonPayload)+\"\\r\\n-----011000010111000001101001--\"\n \n headers = {\n 'content-type': \"multipart/form-data; boundary=---011000010111000001101001\"\n }\n response = requests.request(\"POST\", dictSlack[\"strHookUrl\"], data=payload, headers=headers)\n\n\n jsonAttachments = [{\n \"fallback\": \"Required plain-text summary of the attachment.\",\n \"color\": \"#015752\",\n \"fields\": [{\n \"title\": \"Missed days:\",\n \"value\": str(res),\n \"Date\": \"\"\n \n }]\n }]\n writeToSlack(dictSlack,jsonAttachments)\n\n##\n## jsonAttachments2 = [{\n## \"fallback\": \"Required plain-text summary of the attachment.\",\n## \"color\": \"#015752\",\n## \"fields\": [{\n## \"title\": \"Missed days:\",\n## \"value\": str(event),\n## \"Date\": \"\"\n## \n## }]\n## }]\n## writeToSlack(dictSlack,jsonAttachments2)\n\n res=str(event)+str(res) \n\n\n client = boto3.client('lambda')\n response = client.invoke(\n InvocationType='Event',\n FunctionName='missDayCount11',\n Payload=json.dumps({\"test\": str(res)})\n ) \n return res\n \n\n\n\n","sub_path":"missDayCountDB10/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":4327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"614333149","text":"import csv\nimport random\nfrom db_connection import DBconnection\nfrom hm4_sqlalchemy_tables import Advisor, Student, Course\n\nif __name__ == '__main__':\n\n\t# First, we create two lists -- advisors and students\n\tadvisor_list = []\n\tstudent_list = []\n\tcourse_list = []\n\tenrollment = []\n\tdb_conn = DBconnection('public')\n\tsession = db_conn.get_session()\n\n# you don't need to do the session.add() after modifying the list each time, but you can.\n\n\twith open('advisors.csv', 'r') as advisors_file:\n\t\tcsv_advisors = csv.reader(advisors_file, delimiter=',')\n\t\tfor row in csv_advisors:\n\n\t\t\tadvisor = Advisor(first_name=row[0], last_name=row[1],\n\t\t\t\tfaculty_name=row[2])\n\t\t\tadvisor_list.append(advisor)\n\t\t\tsession.add(advisor)\n\n\twith open('courses.csv', 'r') as courses_file:\n\t\tcsv_courses = csv.reader(courses_file, delimiter=',')\n\t\tfor row in csv_courses:\n\n\t\t\tcourse = Course(name=row[0], credits=row[1])\n\t\t\tcourse_list.append(course)\n\t\t\tsession.add(course)\n\n# would have been nice to do multiple additions of courses, we will look at my solution together\n\n\twith open('students.csv', 'r') as students_file:\n\t\tcsv_students = csv.reader(students_file, delimiter=',')\n\t\tfor row in csv_students:\n\n\t\t\tstudent = Student(first_name=row[0], last_name=row[1],\n\t\t\t\tbirth_day=row[2])\n\t\t\tstudent.advisor_id = random.choice(advisor_list).id\n\t\t\tstudent.course.append(random.choice(course_list))\n\t\t\tsession.add(student)\n\n\tsession.commit()\n","sub_path":"hm4_insert_data.py","file_name":"hm4_insert_data.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"652229858","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass contentEncoder(nn.Module):\n def __init__(self, weights, method, input_size, hidden_size, output_size):\n super(StyleEncoder, self).__init__()\n self.output_size = output_size\n self.embedding = weights[0] if weights is not None else nn.Embedding(input_size,128)\n self.embLinear = weights[1] if weights is not None else nn.Linear(128, hidden_size)\n if method == \"RNN\":\n self.forward = self.forwardRNN\n self.RNN = nn.RNN(hidden_size, hidden_size, num_layers=2, dropout=0.2, batch_first=True, bidirectional=True)\n if method == \"Transformer\":\n self.forward = self.forwardTransformers\n d_model = 512\n nhead = 8\n num_encoder_layers = 6\n dim_feedforward=2048\n dropout=0.1\n activation=\"relu\"\n encoder_layer = nn.TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout, activation)\n encoder_norm = nn.LayerNorm(d_model)\n self.Transformer = nn.TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)\n\n def forwardRNN(self, seqs):\n embedding = self.embedding(seqs)\n emblinear = self.embLinear(embedding)\n output, _ = self.RNN(emblinear)\n return output[:,-1,:]\n\n def forwardTransformers(self, seqs):\n embedding = self.embedding(seqs)\n emblinear = self.embLinear(embedding)\n output = self.Transformer(emblinear)\n return output.mean(dim=1)\n\n\nclass StyleDisperser(nn.Module):\n def __init__(self, weights, method, input_size, hidden_size, output_size, normalize=100, margin=1):\n super(StyleDisperser, self).__init__()\n self.encoder = StyleEncoder(weights, method, input_size, hidden_size, output_size)\n self.normalize = normalize\n self.margin = margin\n\n def forward(self, batch, same=32):\n ret_z = self.encoder(batch)\n normloss = self.normalize*torch.pow((torch.norm(ret_z, dim=1)-1),2).mean()\n\n true_z, random_z = ret_z[:same], ret_z[same:]\n true_mean = torch.mean(true_z, dim=0)\n true_std = torch.std(true_z, dim=0).sum()\n random_true_std = (torch.mv(random_z,true_mean.T)/torch.norm(random_z,dim=1)/torch.norm(true_mean)).mean()\n stdloss = true_std - random_true_std + self.margin\n return normloss, stdloss\n\n def inference(self, x):\n return self.encoder(x)\n","sub_path":"networks/main/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"332398585","text":"# gl.py\n\n#Import struct to have c# alike structures with memory defined\nimport struct\n#We import our object class to gl.py\nfrom obj import Obj\n\n#char 1 byte\ndef char(var):\n return struct.pack('=c',var.encode('ascii'))\n\n#word 2 bytes\ndef word(var):\n return struct.pack('=h',var)\n\n#double word 4 bytes\ndef dword(var):\n return struct.pack('=l',var)\n\n#double double word 8 bytes\ndef ddword(var):\n return struct.pack('=q',var)\n\n#color function ro return rgb in bytes\ndef color(r,g,b):\n return bytes([b,g,r])\n\n#color function ro return rgb in bytes\ndef colorScale(r,g,b):\n return bytes([round(b*255),round(g*255),round(r*255)])\n\n#class Render for library of gl\nclass Render(object):\n #Inititalize function glInit\n #Takes width and height to initialize, also the color\n def __init__(self,width,height,color=None):\n self.glInit(width,height,color)\n\n def glInit(self,width,height,color=None):\n self.glCreateWindow(width,height)\n self.currentColor = colorScale(1,1,1) if color==None else color\n self.glClear()\n self.glViewPort(0,0,width-1,height-1)\n \n #Size of image result\n def glCreateWindow(self,width,height):\n self.width = width\n self.height = height\n \n #Change Viewport position\n def glViewPort(self,x, y, width, height):\n if(x>=self.width or y>=self.height):\n return False\n if(x+width>=self.width or y+height>=self.height):\n return False\n #We save the data necessary for the viewPort\n self.viewPortWidth= width\n self.viewPortHeight = height\n self.viewPortX = x\n self.viewPortY = y\n return True\n\n #Clear to set bitmap of one color default black\n def glClear(self):\n #Set to black\n self.glClearColorScaleRGB(0,0,0)\n\n #Set bitmap to specif color\n def glClearColorScaleRGB(self,r,g,b):\n self.backgroundColor = colorScale(r,g,b)\n #Basically painting background\n # for x in range(self.width):\n # for y in range(self.height):\n # self.pixels[x][y]=colorPixels\n # Easier to use nested list comprenhension\n #https://www.geeksforgeeks.org/nested-list-comprehensions-in-python/\n self.pixels= [[self.backgroundColor for x in range(self.width)] for y in range(self.height)]\n\n #Functions to create points as absolute position\n def glVertexRGBAbsolute(self,x,y,r,g,b):\n\n self.pixels[y][x]=colorScale(r,g,b)\n\n def glVertexColorAbsolute(self,x,y,color=None):\n try:\n self.pixels[y][x]=self.currentColor if color == None else color\n \n except:\n #If tries to draw outside scren\n pass\n #Functions to create points as relative position of ViewPort\n def glVertexRGBRelative(self,x,y,r,g,b):\n xAbs =round(((x+1)*(self.viewPortWidth/2))+ self.viewPortX)\n yAbs =round(((y+1)*(self.viewPortHeight/2))+ self.viewPortY)\n self.pixels[yAbs][xAbs]=colorScale(r,g,b)\n\n def glVertexColorRelative(self,x,y,color=None):\n try:\n xAbs =round(((x+1)*(self.viewPortWidth/2))+ self.viewPortX)\n yAbs =round(((y+1)*(self.viewPortHeight/2))+ self.viewPortY)\n self.pixels[yAbs][xAbs]=self.currentColor if color == None else color\n except:\n #If tries to draw outside scren\n pass\n #Change current vertex color\n def glColor(self,color):\n self.currentColor=color;\n\n def glColorRGB(self,r,g,b):\n self.currentColor=colorScale(r,g,b)\n \n #Function to write image in file\n def glFinish(self,filename):\n file = open(filename,'wb')\n #https://itnext.io/bits-to-bitmaps-a-simple-walkthrough-of-bmp-image-format-765dc6857393\n #Reference to construct BMP\n\n #File Type Data BMP Header 14 Bytes\n file.write(char('B'))\n file.write(char('M'))\n file.write(dword(14+40+self.width*self.height*3))\n file.write(dword(0))\n file.write(dword(14+40))\n\n #File Image Header 40 Bytes\n file.write(dword(40))\n file.write(dword(self.width))\n file.write(dword(self.height))\n file.write(word(1))\n file.write(word(24))\n file.write(dword(0))\n file.write(dword(self.width*self.height*3))\n file.write(dword(0))\n file.write(dword(0))\n file.write(dword(0))\n file.write(dword(0))\n\n #Pixels 3 Bytes each\n for x in range(self.height):\n for y in range(self.width):\n file.write(self.pixels[x][y])\n file.close()\n\n\n #Function for a line\n def glLine(self,x0,y0,x1,y1,color=None):\n #Convert to absolute coordinates\n x0Abs =round(((x0+1)*(self.viewPortWidth/2))+ self.viewPortX)\n y0Abs =round(((y0+1)*(self.viewPortHeight/2))+ self.viewPortY)\n x1Abs =round(((x1+1)*(self.viewPortWidth/2))+ self.viewPortX)\n y1Abs =round(((y1+1)*(self.viewPortHeight/2))+ self.viewPortY)\n dy=y1Abs-y0Abs\n dx=x1Abs-x0Abs\n #Graphic a point if is the same\n if(x0Abs==x1Abs and y0Abs==y1Abs):\n self.glVertexColorAbsolute(round(x0Abs),round(y0Abs))\n \n #If vertical line\n if(dx==0):\n #Vertical Line\n step= +1 if (y1Abs>y0Abs) else -1;\n \n for y in range(y0Abs,y1Abs,step):\n x=x0Abs\n self.glVertexColorAbsolute(round(x),round(y))\n #Any other line\n else:\n #Use mx+b=y if m<=1 else my+b=x m>1\n #This is better for points by set rather tan using just mx+b=y\n m=dy/dx\n if(abs(m)<=1 or dy==0):\n b=y0Abs-(m*x0Abs)\n step = 1 if (dx>0) else -1\n if(m>0 and dy<=0 and dx<=0):\n step=-1\n elif(m>0 and dy>=0 and dx>=0):\n step=+1\n \n for x in range(x0Abs,x1Abs,step):\n y=m*x+b\n self.glVertexColorAbsolute(round(x),round(y))\n else:\n m=dx/dy\n b=x0Abs-(m*y0Abs)\n step = 1 if (dy>0) else -1\n if(m>0 and dy<=0 and dx<=0):\n step=-1\n elif(m>0 and dy>=0 and dx>=0):\n step=+1\n \n for y in range(y0Abs,y1Abs,step):\n x=m*y+b\n self.glVertexColorAbsolute(round(x),round(y))\n \n \n #Function for a line Coordenadas absolutas\n def glLineAbsolute(self,x0Abs,y0Abs,x1Abs,y1Abs,color=None):\n if(x0Abs>self.width or y0Abs>self.height or x1Abs>self.width or y1Abs>self.height):\n return False\n dy=y1Abs-y0Abs\n dx=x1Abs-x0Abs\n #Graphic a point if is the same\n if(x0Abs==x1Abs and y0Abs==y1Abs):\n self.glVertexColorAbsolute(round(x0Abs),round(y0Abs))\n \n #If vertical line\n if(dx==0):\n #Vertical Line\n step= +1 if (y1Abs>y0Abs) else -1;\n \n for y in range(y0Abs,y1Abs,step):\n x=x0Abs\n self.glVertexColorAbsolute(round(x),round(y))\n #Any other line\n else:\n #Use mx+b=y if m<=1 else my+b=x m>1\n #This is better for points by set rather tan using just mx+b=y\n m=dy/dx\n if(abs(m)<=1 or dy==0):\n b=y0Abs-(m*x0Abs)\n step = 1 if (dx>0) else -1\n if(m>0 and dy<=0 and dx<=0):\n step=-1\n elif(m>0 and dy>=0 and dx>=0):\n step=+1\n \n for x in range(x0Abs,x1Abs,step):\n y=m*x+b\n self.glVertexColorAbsolute(round(x),round(y))\n else:\n m=dx/dy\n b=x0Abs-(m*y0Abs)\n step = 1 if (dy>0) else -1\n if(m>0 and dy<=0 and dx<=0):\n step=-1\n elif(m>0 and dy>=0 and dx>=0):\n step=+1\n \n for y in range(y0Abs,y1Abs,step):\n x=m*y+b\n self.glVertexColorAbsolute(round(x),round(y))\n \n \n #Function to load any obj model\n def loadObjModel(self,filename,translateX=None,translateY=None,scaleX=None,scaleY=None):\n #Load our objModel so we can draw it in our gl\n objModel = Obj(filename)\n \n #Our tranlations and scales to draw\n translateX= translateX if translateX!=None else round(self.width/2)\n translateY= translateY if translateY!=None else round(self.height/2)\n scaleX= scaleX if scaleX!=None else round(self.width/4)\n scaleY= scaleY if scaleY!=None else round(self.height/4)\n #For each face that has reference to v,vn,vt\n for face in objModel.faces:\n #For each reference to [v,vn and vt] as a list\n #Vertex[0] will make reference to each v referencing a vertexIndex to an actual vertex\n for i in range(len(face)):\n vertex=face[i]\n vertex1=face[(i+1)%len(face)]\n #We only focus on the first value of each f/// that is for just v\n #Vertex[0] has reference to the position of v starting counting in 1 for the actual coordinates of vertex\n try:\n v0=objModel.vertexIndexes[vertex[0]-1]\n x0=round(v0[0]*scaleX + translateX)\n y0=round(v0[1]*scaleY + translateY)\n v1=objModel.vertexIndexes[vertex1[0]-1]\n x1=round(v1[0]*scaleX + translateX)\n y1=round(v1[1]*scaleY + translateY)\n # self.glVertexColorAbsolute(x0,y0)\n self.glLineAbsolute(x0,y0,x1,y1)\n except:\n #There must be an error on the files point\n pass\n \n #Function to draw any polygon\n def glDrawPolygon(self,vertexList,color=None):\n color=self.currentColor if color == None else color\n #We save and max and min in y to paint them\n xMin=None\n xMax=None\n yMin=None\n yMax=None\n for i in range(len(vertexList)):\n \n vertex=vertexList[i]\n vertex1=vertexList[(i+1)%len(vertexList)]\n #Now we can draw lines from vertex to vertex\n try:\n \n x0=round(vertex[0])\n y0=round(vertex[1])\n \n x1=round(vertex1[0])\n y1=round(vertex1[1])\n self.glLineAbsolute(x0,y0,x1,y1,color)\n except:\n #There must be an error on the vertexList\n pass\n\n #Function to draw and paint any polygon\n def glDrawAndPaintPolygon(self,vertexList,color=None):\n color=self.currentColor if color == None else color\n #We save and max and min in y to paint them\n xMin=vertexList[0][0]\n xMax=vertexList[0][0]\n yMin=vertexList[0][1]\n yMax=vertexList[0][1]\n for i in range(len(vertexList)):\n \n vertex=vertexList[i]\n vertex1=vertexList[(i+1)%len(vertexList)]\n xMin = xMin if(xMin<=vertex[0]) else vertex[0]\n xMax = xMax if(xMax>=vertex[0]) else vertex[0]\n yMin = yMin if(yMin<=vertex[1]) else vertex[1]\n yMax = yMax if(yMax>=vertex[1]) else vertex[1]\n #Now we can draw lines from vertex to vertex\n try:\n \n x0=round(vertex[0])\n y0=round(vertex[1])\n \n x1=round(vertex1[0])\n y1=round(vertex1[1])\n self.glLineAbsolute(x0,y0,x1,y1,color)\n except:\n #There must be an error on the vertexList\n pass\n for y in range(yMin,yMax):\n count=0; \n for x in range(xMin,xMax):\n try:\n if(self.pixels[y][x]==color):\n count=count+1 \n if(count%2==1):\n vertexOnly=True\n for x2 in range(x,xMax+1):\n if(self.pixels[y][x2]==color):\n vertexOnly=False\n if(not vertexOnly): \n self.glVertexColorAbsolute(x,y,color)\n except:\n #Error coordinates\n pass\n #Points in y that were not collored\n for x in range(xMin,xMax): \n for y in range(yMin,yMax):\n if(self.pixels[y-1][x]==color and self.pixels[y+1][x]==color):\n self.glVertexColorAbsolute(x,y,color)\n # elif(self.pixels[y-1][x]==self.currentColor and self.pixels[y][x+1]==self.currentColor):\n # self.glVertexColorAbsolute(x,y)\n # elif(self.pixels[y+1][x]==self.currentColor and self.pixels[y][x-1]==self.currentColor):\n # self.glVertexColorAbsolute(x,y)\n # elif(self.pixels[y][x-1]==self.currentColor and self.pixels[y][x+1]==self.currentColor):\n # self.glVertexColorAbsolute(x,y)\n\n\n #Function to draw and paint any polygon \n def glDrawAndPaintPolygonOddEven(self,vertexList,color=None):\n color=self.currentColor if color == None else color\n #We save and max and min in y to paint them\n xMin=vertexList[0][0]\n xMax=vertexList[0][0]\n yMin=vertexList[0][1]\n yMax=vertexList[0][1]\n for i in range(len(vertexList)):\n \n vertex=vertexList[i]\n vertex1=vertexList[(i+1)%len(vertexList)]\n xMin = xMin if(xMin<=vertex[0]) else vertex[0]\n xMax = xMax if(xMax>=vertex[0]) else vertex[0]\n yMin = yMin if(yMin<=vertex[1]) else vertex[1]\n yMax = yMax if(yMax>=vertex[1]) else vertex[1]\n #Now we can draw lines from vertex to vertex\n try:\n \n x0=round(vertex[0])\n y0=round(vertex[1])\n \n x1=round(vertex1[0])\n y1=round(vertex1[1])\n self.glLineAbsolute(x0,y0,x1,y1,color)\n except:\n #There must be an error on the vertexList\n pass\n for y in range(yMin,yMax): \n for x in range(xMin,xMax):\n if(self.isPointInPolygon(x,y,vertexList)):\n self.glVertexColorAbsolute(x,y,color)\n\n #Function to check oddEven\n #Determine if point is in path\n #https://handwiki.org/wiki/Even%E2%80%93odd_rule\n #This code was extracted from the link before and it works perfectly\n def isPointInPolygon(self,x, y, vertexList):\n vertexCount = len(vertexList)\n i = 0\n j = vertexCount - 1\n inPolygon = False\n for i in range(vertexCount):\n if ((vertexList[i][1] > y) != (vertexList[j][1] > y)) and \\\n (x < vertexList[i][0] + (vertexList[j][0] - vertexList[i][0]) * (y - vertexList[i][1]) /\n (vertexList[j][1] - vertexList[i][1])):\n inPolygon = not inPolygon\n j = i\n return inPolygon \n \n #Function to draw and paint any polygon from triangles\n def glDrawAndPaintPolygonFromTriangles(self,vertexList):\n #We count the vertex to know how to unite them\n vertexCount=(len(vertexList))\n self.glDrawAndPaintPolygon(vertexList)\n for i in range(vertexCount):\n self.glDrawAndPaintPolygon([vertexList[0],vertexList[1],vertexList[i]])\n self.glDrawAndPaintPolygon([vertexList[0],vertexList[vertexCount-1],vertexList[i]]) \n \n ","sub_path":"Lab1/gl.py","file_name":"gl.py","file_ext":"py","file_size_in_byte":15954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"432861263","text":"import os\nimport lzma\nimport tarfile\nimport shutil\nimport zipfile\n\nfile_to_dataset = {\n \"Insects_FortyImagesPerCategory_Forty_Images_Per_Category.zip\": \"insects\",\n \"Plankton_FortyImagesPerCategory_Forty_Images_Per_Category.zip\": \"plankton\",\n \"plantvillage_plantvillage-formatted-image (1).zip\": \"plants\",\n \"MedicinalLeaf_medleaf-formatted-image.zip\": \"medleaf\",\n \"Texture_1_FortyImagesPerCategory_Forty_Images_Per_Category.zip\": \"texture1\",\n \"Texture_2_FortyImagesPerCategory_Forty_Images_Per_Category.zip\": \"texture2\",\n \"rsi-cb-128-remotesensing_rsicb128-formatted-image.zip\": \"rsicb\",\n \"resisc45-remotesensing_resisc45-formatted-image (1).zip\": \"resisc\", \n \"OmniPrint_overview_OmniPrint_MetaDL_Ihsan_format_meta-mix_first_set.zip\": \"omniprint1\",\n \"OmniPrint_overview_OmniPrint_MetaDL_Ihsan_format_meta5-bis_first_set.zip\": \"omniprint2\",\n}\n\nall_data = \"publicdata.zip\"\nroot_dir = \"./data/\"\n\nassert os.path.exists(all_data), \"Could not find {} in the current directory\".format(all_data)\n\nif not os.path.isdir(root_dir):\n os.mkdir(root_dir)\n\n# unzip the alldata.zip\nwith zipfile.ZipFile(all_data, 'r') as zip_ref:\n zip_ref.extractall(\"./\")\n\nfor zfile, dirname in file_to_dataset.items():\n print(\"Processing {} files\".format(dirname))\n unzip_location = os.path.join(root_dir, dirname)\n if not os.path.isdir(unzip_location):\n os.mkdir(unzip_location)\n else:\n print(\"\\tDirectory {} already existed. Not touching this and moving to the next one\".format(unzip_location))\n continue\n\n # Check file extension (if .zip -> unzip, if .xz -> convert to tar and untar)\n extension =zfile.split(\".\")[1]\n if extension.lower() == \"zip\":\n # Read zip file and extract it \n with zipfile.ZipFile(zfile, 'r') as zip_ref:\n zip_ref.extractall(unzip_location)\n elif extension.lower() == \"xz\":\n with lzma.open(zfile) as f:\n with tarfile.open(fileobj=f) as tar:\n tar.extractall(unzip_location)\n else:\n print(\"Unknown file extension .{} for {}\".format(extension, dirname))\n\n\n os.remove(zfile)\n\n # Make sure there now is a folder called images in the unzip location\n image_dir = os.path.join(unzip_location, \"images\")\n if not os.path.isdir(image_dir):\n folder_in_zip_loc = os.path.join(unzip_location, os.listdir(unzip_location)[0])\n files_to_move = os.listdir(folder_in_zip_loc)\n for f in files_to_move:\n # unpack the folder\n shutil.move(os.path.join(folder_in_zip_loc, f), os.path.join(unzip_location, f))\n shutil.rmtree(folder_in_zip_loc)\n print(\"\\tSuccess.\")\nprint(\"\\n[*] Everything went well. Data sets are ready!\")\n\n \n","sub_path":"FewShotBaselines/setup_data.py","file_name":"setup_data.py","file_ext":"py","file_size_in_byte":2719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"90530601","text":"import sys\nimport pprint\nsys.stdin = open('내리막 길.txt','r')\nsys.setrecursionlimit(1000000)\n\nY, X = map(int,input().split())\nboard = [list(map(int,input().split())) for _ in range(Y)]\nDP = [[-1] * X for _ in range(Y)]\n\ndef DFS(y,x):\n if DP[y][x] != -1:\n return DP[y][x]\n if x == X-1 and y == Y-1:\n return 1\n DP[y][x] = 0\n for dy,dx in (0,1),(0,-1),(1,0),(-1,0):\n ny = y + dy\n nx = x + dx\n if 0 <= ny < Y and 0 <= nx < X:\n if board[ny][nx] < board[y][x]:\n DP[y][x] += DFS(ny,nx)\n return DP[y][x]\nDFS(0,0)\nprint(DP[0][0])\n","sub_path":"10월/1008/연습장.py","file_name":"연습장.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"413677814","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 19 05:49:53 2021\n\n@author: Pratsch\n\"\"\"\n\nimport pandas as pd\n\n\nSTATE_DATA = { 'alabama':'AL','alaska':'AK','arizona':'AZ','arkansas':'AZ','california':'CA'\n ,'colorado':'CO','connecticut':'CT','delaware':'DE','florida':'FL','georgia':'GA'\n ,'hawaii':'HI','idaho':'ID','illinois':'IL','indiana':'IN','iowa':'IA','kansas':'KS'\n ,'kentucky':'KY','louisiana':'LA','maine':'ME','maryland':'MD',\n 'massachusetts':'MA','michigan':'MI','minnesota':'MN','mississippi':'MS','missouri':'MO'\n ,'montana':'MT','nebraska':'NE','nevada':'NV','New Hampshire':'NH'\n ,'new jersey':'NJ','new mexico':'NM','new york':'NY','north carolina':'NC'\n , 'north dakota':'ND','ohio':'OH','oklahoma':'OK','oregon':'OR',\n 'pennsylvania':'PA','rhode island':'RI','south carolina':'SC','south dakota':'SD'\n ,'tennesee':'TN','texas':'TX','utah':'UT','vermont':'VT',\n 'virginia':'VA','washington':'WA','west virginia':'WV','wisconsin':'WI'\n ,'wyoming':'WY','washington dc':'dc'}\n\n#Function to take inputs from the user and validate the values\ndef get_filters():\n #Grab all inputs of the user\n print('Hello! Let\\'s find you an affordable place!')\n # Get user's State\n while True:\n try:\n state = str(input('Please enter a State you are interested in: ').lower())\n if state in STATE_DATA.keys():\n state=STATE_DATA.get(state)\n break\n print(\"Please enter a valid state\")\n except Exception as e:\n print(e)\n \n # Get user income level\n while True:\n try:\n income = int(input('Please enter your income: '))\n if income >= 0:\n break\n print(\"Please enter a valid income\")\n except Exception as e:\n print(e)\n \n # Are you planning on owning a home or renting\n while True:\n try:\n living = str(input('Are you planning on Renting or Owning a Home?\\\n \\nEnter Renting or Owning: ').lower())\n if living == 'renting' or living == 'owning':\n break\n print(\"Invalid Answer Entered\")\n except Exception as e:\n print(e)\n \n # Are you Retired or Working\n while True:\n try:\n work = str(input('Are you Retired?\\\n \\nEnter Yes or No: ').lower())\n if work == 'yes' or work == 'no':\n break\n print(\"Invalid Answer Entered\")\n except Exception as e:\n print(e)\n \n #If not retired, type of household\n if work == 'no':\n while True:\n try:\n household = str(input('Single or Dual Income?\\\n \\nEnter Single or Dual: ').lower())\n if household == 'single' or household == 'dual':\n break\n print(\"Invalid Answer Entered\")\n except Exception as e:\n print(e)\n else:\n household = 'retired'\n \n \n #Prefer Public Transit or Driving \n while True:\n try:\n transport = str(input('Do you prefer Public Transit or Driving?\\\n \\nEnter Public Transit or Driving: ').lower())\n if transport == 'public transit' or transport == 'driving':\n break\n print(\"Invalid Answer Entered\")\n except Exception as e:\n print(e)\n \n print('-'*40)\n \n return state, income, living, work, household, transport\n\n\n\nstate, income, living, work, household, transport = get_filters()\nprint(state)\nprint(income)\nprint(living)\nprint(work)\nprint(household)\nprint(transport)\n\n\nfilename = '/Users/Pratsch/CS_Project/Location_Affordability_Index_v_1.0.csv'\ndf = pd.DataFrame(pd.read_csv(filename))\n\ndfstate = df.loc[df['SF1_BlockGroups_ST_ABBREV'] == state]\nprint(dfstate['per_capita_income'].describe())\n","sub_path":"archive/Project_User_Input.py","file_name":"Project_User_Input.py","file_ext":"py","file_size_in_byte":4043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"652642207","text":"from __future__ import print_function\r\n\r\nimport numpy as np\r\nnp.random.seed(1337)\r\n\r\nfrom sklearn.preprocessing import Normalizer \r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense, Dropout, Activation\r\nimport pandas as pd\r\nfrom keras.layers import Convolution2D,Flatten\r\nfrom keras.layers import LSTM, GRU, SimpleRNN\r\nfrom keras.constraints import maxnorm\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn.metrics import classification_report \r\nfrom keras.models import model_from_json\r\nimport matplotlib.pyplot as plt\r\n\r\ndataset=pd.read_csv('attackpt1.csv')\r\nX = dataset.iloc[:, :-1].values\r\nY = dataset.iloc[:, 41].values\r\nlabelencoder_x_1 = LabelEncoder()\r\nlabelencoder_x_2 = LabelEncoder()\r\nlabelencoder_x_3 = LabelEncoder()\r\nlabelencoder_y = LabelEncoder()\r\n\r\nlabelencoder_x_1 = labelencoder_x_1.fit(['icmp' 'tcp' 'udp'])\r\nlabelencoder_x_2 = labelencoder_x_2.fit(['IRC' 'X11' 'Z39_50' 'aol' 'auth' 'bgp' 'courier' 'csnet_ns' 'ctf'\r\n 'daytime' 'discard' 'domain' 'domain_u' 'echo' 'eco_i' 'ecr_i' 'efs'\r\n 'exec' 'finger' 'ftp' 'ftp_data' 'gopher' 'harvest' 'hostnames' 'http'\r\n 'http_2784' 'http_443' 'http_8001' 'imap4' 'iso_tsap' 'klogin' 'kshell'\r\n 'ldap' 'link' 'login' 'mtp' 'name' 'netbios_dgm' 'netbios_ns'\r\n 'netbios_ssn' 'netstat' 'nnsp' 'nntp' 'other' 'pm_dump' 'pop_2' 'pop_3'\r\n 'printer' 'private' 'remote_job' 'rje' 'shell' 'smtp' 'sql_net' 'ssh'\r\n 'sunrpc' 'supdup' 'systat' 'telnet' 'tim_i' 'time' 'urp_i' 'uucp'\r\n 'uucp_path' 'vmnet' 'whois'])\r\nlabelencoder_x_3 = labelencoder_x_1.fit(['OTH' 'REJ' 'RSTO' 'RSTOS0' 'RSTR' 'S0' 'S1' 'S2' 'S3' 'SF' 'SH'])\r\nlabelencoder_y = labelencoder_y.fit(['back.' 'buffer_overflow.' 'ftp_write.' 'guess_passwd.' 'imap.'\r\n 'ipsweep.' 'land.' 'loadmodule.' 'multihop.' 'neptune.' 'nmap.' 'perl.'\r\n 'phf.' 'pod.' 'portsweep.' 'rootkit.' 'satan.' 'smurf.' 'spy.'\r\n 'teardrop.' 'warezclient.' 'warezmaster.'])\r\n\r\nX[:, 1] = labelencoder_x_1.fit_transform(X[:, 1])\r\nX[:, 2] = labelencoder_x_2.fit_transform(X[:, 2])\r\nX[:, 3] = labelencoder_x_3.fit_transform(X[:, 3])\r\ntestY= labelencoder_y.fit_transform(Y)\r\n\r\nscaler = Normalizer().fit(X)\r\ntestX= scaler.transform(X)\r\nlabels=['back.' ,'buffer_overflow.', 'ftp_write.', 'guess_passwd.', 'imap.',\r\n 'ipsweep.' ,'land.', 'loadmodule.', 'multihop.', 'neptune.', 'nmap.', 'perl.',\r\n 'phf.', 'pod.', 'portsweep.', 'rootkit.' ,'satan.' ,'smurf.', 'spy.',\r\n 'teardrop.', 'warezclient.' ,'warezmaster.']\r\n\r\ndef con_mat(y_pred,y_test):\r\n cm=confusion_matrix(y_test,y_pred)\r\n print(\"done\") \r\n #print(labels[int(y_pred)])\r\n print(\"\\n\"+classification_report(y_test, y_pred))\r\n from mlxtend.plotting import plot_confusion_matrix\r\n fig,ax=plot_confusion_matrix(conf_mat=cm,figsize=(15,15))\r\n plt.show()\r\n fig,ax=plt.subplots()\r\n ax.scatter(y_test,y_pred )\r\n ax.plot([y_test.min(),y_test.max()],[y_test.min(),y_test.max()],'k--',lw=4)\r\n ax.set_xlabel('Measured')\r\n ax.set_ylabel('predicted')\r\n fig.show()\r\n \r\n \r\n \r\ndef cnnload(testX,op):\r\n cnn = Sequential()\r\n cnn.add(Convolution2D(64, 3,3, border_mode=\"same\",activation=\"relu\",input_shape=(1,41,1),W_constraint=maxnorm(3)))\r\n cnn.add(Convolution2D(64, 3,3, border_mode=\"same\", activation=\"relu\",W_constraint=maxnorm(3)))\r\n cnn.add(Convolution2D(128, 3,3, border_mode=\"same\", activation=\"relu\",W_constraint=maxnorm(3)))\r\n cnn.add(Convolution2D(128, 3,3,border_mode=\"same\", activation=\"relu\",W_constraint=maxnorm(3)))\r\n cnn.add(Flatten())\r\n cnn.add(Dense(128, activation=\"relu\"))\r\n cnn.add(Dropout(0.5))\r\n cnn.add(Dense(op, activation=\"softmax\"))\r\n cnn.load_weights(\"cnn1.hdf5\")\r\n print(\"loaded cnn\")\r\n testX=np.array(testX)\r\n testXR = np.reshape(testX, (testX.shape[0],1,testX.shape[1],1))\r\n y_pred = cnn.predict_classes(testXR)\r\n print(y_pred)\r\n #con_mat(y_pred,testY)\r\n return cnn,y_pred\r\n\r\ndef lstmload(testX,op):\r\n lstm = Sequential()\r\n lstm.add(LSTM(128,input_dim=41, return_sequences=True)) \r\n lstm.add(Dropout(0.1))\r\n lstm.add(LSTM(128,return_sequences=True))\r\n lstm.add(Dropout(0.1))\r\n lstm.add(LSTM(128, return_sequences=True)) \r\n lstm.add(Dropout(0.1))\r\n lstm.add(LSTM(128, return_sequences=False)) \r\n lstm.add(Dropout(0.1))\r\n lstm.add(Dense(op))\r\n lstm.add(Activation('softmax'))\r\n lstm.load_weights(\"lstm1.hdf5\")\r\n print(\"loaded lstm\")\r\n testXR = np.reshape(testX, (testX.shape[0],1,testX.shape[1]))\r\n y_pred = lstm.predict_classes(testXR)\r\n con_mat(y_pred,testY)\r\n return lstm,y_pred\r\n \r\ndef gruload(testX,op):\r\n gru = Sequential()\r\n gru.add(GRU(64,input_dim=41, return_sequences=True)) \r\n gru.add(Dropout(0.1))\r\n gru.add(GRU(64,return_sequences=True)) \r\n gru.add(Dropout(0.1))\r\n gru.add(GRU(64, return_sequences=True)) \r\n gru.add(Dropout(0.1))\r\n gru.add(GRU(64, return_sequences=False)) \r\n gru.add(Dropout(0.1))\r\n gru.add(Dense(op))\r\n gru.add(Activation('softmax'))\r\n gru.load_weights(\"gru1.hdf5\")\r\n print(\"loaded gru\")\r\n testXR = np.reshape(testX, (testX.shape[0],1,testX.shape[1]))\r\n y_pred = gru.predict_classes(testXR)\r\n con_mat(y_pred,testY)\r\n return gru,y_pred\r\n \r\n\r\ndef dnnload(testX,op):\r\n dnn = Sequential()\r\n dnn.add(Dense(1024,input_dim=41,activation='relu')) \r\n dnn.add(Dropout(0.01))\r\n dnn.add(Dense(768,activation='relu')) \r\n dnn.add(Dropout(0.01))\r\n dnn.add(Dense(512,activation='relu')) \r\n dnn.add(Dropout(0.01))\r\n dnn.add(Dense(256,activation='relu')) \r\n dnn.add(Dropout(0.01))\r\n dnn.add(Dense(128,activation='relu')) \r\n dnn.add(Dropout(0.01))\r\n dnn.add(Dense(op))\r\n dnn.add(Activation('softmax'))\r\n dnn.load_weights('dnn1.hdf5')\r\n print(\"loaded dnn\")\r\n testXR = np.reshape(testX, (testX.shape[0],testX.shape[1]))\r\n y_pred = dnn.predict_classes(testXR)\r\n con_mat(y_pred,testY)\r\n return dnn,y_pred\r\n\r\n\r\ndef rnnload(testX,op):\r\n rnn = Sequential()\r\n rnn.add(SimpleRNN(128,input_dim=41, return_sequences=True)) \r\n rnn.add(Dropout(0.1))\r\n rnn.add(SimpleRNN(128,return_sequences=True)) \r\n rnn.add(Dropout(0.1))\r\n rnn.add(SimpleRNN(128, return_sequences=True)) \r\n rnn.add(Dropout(0.1))\r\n rnn.add(SimpleRNN(128, return_sequences=False)) \r\n rnn.add(Dropout(0.1))\r\n rnn.add(Dense(op))\r\n rnn.add(Activation('softmax'))\r\n rnn.load_weights('rnn1.hdf5')\r\n print(\"loaded rnn\")\r\n testXR = np.reshape(testX, (testX.shape[0],1,testX.shape[1]))\r\n y_pred = rnn.predict_classes(testXR)\r\n con_mat(y_pred,testY)\r\n return rnn,y_pred\r\n\r\ndef signatureM(testX):\r\n cnn,yc=cnnload(testX,22)\r\n lstm,yl=lstmload(testX,22)\r\n gru,yg=gruload(testX,22)\r\n dnn,yd=dnnload(testX,22)\r\n rnn,yr=rnnload(testX,22)\r\n '''cnn.summary()\r\n lstm.summary()\r\n gru.summary()\r\n dnn.summary()\r\n rnn.summary()'''\r\n \r\ndef anamoly():\r\n dataset=pd.read_csv('kd10.csv')\r\n X = dataset.iloc[:, :-1].values\r\n labelencoder_x_1 = LabelEncoder()\r\n labelencoder_x_2 = LabelEncoder()\r\n labelencoder_x_3 = LabelEncoder()\r\n labelencoder_x_1 = labelencoder_x_1.fit(['icmp' 'tcp' 'udp'])\r\n labelencoder_x_2 = labelencoder_x_2.fit(['IRC' 'X11' 'Z39_50' 'aol' 'auth' 'bgp' 'courier' 'csnet_ns' 'ctf'\r\n 'daytime' 'discard' 'domain' 'domain_u' 'echo' 'eco_i' 'ecr_i' 'efs'\r\n 'exec' 'finger' 'ftp' 'ftp_data' 'gopher' 'harvest' 'hostnames' 'http'\r\n 'http_2784' 'http_443' 'http_8001' 'imap4' 'iso_tsap' 'klogin' 'kshell'\r\n 'ldap' 'link' 'login' 'mtp' 'name' 'netbios_dgm' 'netbios_ns'\r\n 'netbios_ssn' 'netstat' 'nnsp' 'nntp' 'other' 'pm_dump' 'pop_2' 'pop_3'\r\n 'printer' 'private' 'remote_job' 'rje' 'shell' 'smtp' 'sql_net' 'ssh'\r\n 'sunrpc' 'supdup' 'systat' 'telnet' 'tim_i' 'time' 'urp_i' 'uucp'\r\n 'uucp_path' 'vmnet' 'whois'])\r\n labelencoder_x_3 = labelencoder_x_1.fit(['OTH' 'REJ' 'RSTO' 'RSTOS0' 'RSTR' 'S0' 'S1' 'S2' 'S3' 'SF' 'SH'])\r\n dataset['normal.'] = dataset['normal.'].replace(['back.', 'buffer_overflow.', 'ftp_write.', 'guess_passwd.', 'imap.', 'ipsweep.', 'land.', 'loadmodule.', 'multihop.', 'neptune.', 'nmap.', 'perl.', 'phf.', 'pod.', 'portsweep.', 'rootkit.', 'satan.', 'smurf.', 'spy.', 'teardrop.', 'warezclient.', 'warezmaster.'], 'attack')\r\n T = dataset.iloc[:, 41].values\r\n labelencoder_yBN = LabelEncoder()\r\n labelencoder_yBN=labelencoder_yBN.fit(['attack','normal.'])\r\n y_test=labelencoder_yBN.fit_transform(T)\r\n X[:, 1] = labelencoder_x_1.fit_transform(X[:, 1])\r\n X[:, 2] = labelencoder_x_2.fit_transform(X[:, 2])\r\n X[:, 3] = labelencoder_x_3.fit_transform(X[:, 3])\r\n scaler = Normalizer().fit(X) \r\n testX= scaler.transform(X)\r\n\r\n classifier = Sequential()\r\n classifier.add(Dense(output_dim = 200, init = 'uniform', activation = 'relu', input_dim = 41))\r\n classifier.add(Dense(output_dim = 200, init = 'uniform', activation = 'relu'))\r\n classifier.add(Dense(output_dim = 200, init = 'uniform', activation = 'relu'))\r\n classifier.add(Dense(output_dim = 1, init = 'uniform', activation = 'sigmoid'))\r\n #classifier.summary() \r\n json_file = open('ann1.json', 'r')\r\n loaded_model_json = json_file.read()\r\n json_file.close()\r\n classifier = model_from_json(loaded_model_json)\r\n classifier.load_weights(\"ann1.h5\")\r\n print(\"Loaded model binaryAnn\")\r\n y_pred = classifier.predict(testX)\r\n print(y_pred)\r\n y_pred1= (y_pred > 0.6)\r\n print(y_pred1)\r\n con_mat(y_pred1,y_test)\r\n \r\nanamoly() \r\nsignatureM(testX)","sub_path":"HIDStesting1.py","file_name":"HIDStesting1.py","file_ext":"py","file_size_in_byte":9463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"437310982","text":"import pytest\nimport sys\nimport re\nimport uuid\n\n# Python 2.x 3.x compatibility\nfrom six.moves import xrange\n\nfrom socketmsg.utils import config\n\n\nclass TestConfig(object):\n def test_parse_yaml_raises_type_error_if_provided_nonstring_for_path(self):\n with pytest.raises(TypeError):\n # send am IntType object instead of string, 99 is just an integer\n config.parse_yaml(type(99))\n\n def test_parse_yaml_raises_does_not_raise_type_error_if_provided_string_for_path(self):\n try:\n config.parse_yaml(\"./tests/configs/test.yaml\")\n except TypeError:\n pytest.fail(\"Raised a TypeError when provided string for path. If running in test the test file is in tests/configs/test.yaml\")\n\n def test_parse_yaml_returns_dict(self):\n try:\n opts = config.parse_yaml(\"./tests/configs/test.yaml\")\n except TypeError:\n pytest.fail(\"Raised a TypeError when provided string for path. If running in test the test file is in tests/configs/test.yaml\")\n assert type(opts) is dict # test you get a dict back when providing valid yaml\n assert opts == {'log_level': 'INFO', 'version': '0.0.0'} # test the contents are parsed properly\n\n def test_default_config_returns_dict(self):\n assert type(config.default_config()) is dict\n\n # for more info on parameterize\n # https://docs.pytest.org/en/latest/parametrize.html#parametrize\n @pytest.mark.parametrize(\"prop,prop_type\", [\n (\"log_level\", str),\n ], scope=\"class\")\n def test_default_config_has_all_the_right_properties_and_prop_types(self, prop, prop_type):\n \"\"\"overloaded test\"\"\"\n assert prop in config.default_config()\n assert type(config.default_config()[prop]) is prop_type\n","sub_path":"tests/test_utils_config.py","file_name":"test_utils_config.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"60954705","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('ahtung_api', '0003_auto_20141107_0919'),\n ]\n\n operations = [\n migrations.RenameModel(\n old_name='EnabledSignals',\n new_name='EnabledSignal',\n ),\n ]\n","sub_path":"ahtung_api/migrations/0004_auto_20141107_0927.py","file_name":"0004_auto_20141107_0927.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"566236941","text":"# coding=utf-8\n# pystray\n# Copyright (C) 2016 Moses Palmér\n#\n# This program is free software: you can redistribute it and/or modify it under\n# the terms of the GNU Lesser General Public License as published by the Free\n# Software Foundation, either version 3 of the License, or (at your option) any\n# later version.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more\n# details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program. If not, see .\n\nimport ctypes\nimport os\nimport six\nimport sys\nimport threading\nimport tempfile\n\nfrom ctypes import windll, wintypes\nfrom six.moves import queue\n\nfrom . import _base\n\n\nclass Icon(_base.Icon):\n _HWND_TO_ICON = {}\n\n def __init__(self, *args, **kwargs):\n super(Icon, self).__init__(*args, **kwargs)\n\n self._icon_handle = None\n self._hwnd = None\n\n # This is a mapping from win32 event codes to handlers used by the\n # mainloop\n self._message_handlers = {\n WM_STOP: self._on_stop,\n WM_NOTIFY: self._on_notify}\n\n self._queue = queue.Queue()\n\n # Create the message loop\n msg = wintypes.MSG()\n lpmsg = ctypes.byref(msg)\n PeekMessage(lpmsg, None, 0x0400, 0x0400, PM_NOREMOVE)\n\n self._atom = self._register_class()\n self._hwnd = self._create_window(self._atom)\n self._HWND_TO_ICON[self._hwnd] = self\n\n def __del__(self):\n if self._running:\n self._stop()\n if self._thread.ident != threading.current_thread().ident:\n self._thread.join()\n\n def _show(self):\n self._assert_icon_handle()\n self._message(\n NOTIFYICONDATA.NIM_ADD,\n NOTIFYICONDATA.NIF_MESSAGE | NOTIFYICONDATA.NIF_ICON |\n NOTIFYICONDATA.NIF_TIP,\n uCallbackMessage=WM_NOTIFY,\n hIcon=self._icon_handle,\n szTip=self.title)\n\n def _hide(self):\n self._message(\n NOTIFYICONDATA.NIM_DELETE,\n 0)\n\n def _update_icon(self):\n self._icon_handle = None\n self._assert_icon_handle()\n self._message(\n NOTIFYICONDATA.NIM_MODIFY,\n NOTIFYICONDATA.NIF_ICON,\n hIcon=self._icon_handle)\n\n def _update_title(self):\n self._message(\n NOTIFYICONDATA.NIM_MODIFY,\n NOTIFYICONDATA.NIF_TIP,\n szTip=self.title)\n\n def _run(self):\n self._mark_ready()\n\n # Run the event loop\n self._thread = threading.current_thread()\n self._mainloop()\n\n def _stop(self):\n PostMessage(self._hwnd, WM_STOP, 0, 0)\n\n def _mainloop(self):\n \"\"\"The body of the main loop thread.\n\n This method retrieves all events from *Windows* and makes sure to\n dispatch clicks.\n \"\"\"\n # Pump messages\n try:\n while True:\n msg = wintypes.MSG()\n lpmsg = ctypes.byref(msg)\n while True:\n r = GetMessage(lpmsg, None, 0, 0)\n if not r:\n break\n elif r == -1:\n break\n else:\n TranslateMessage(lpmsg)\n DispatchMessage(lpmsg)\n\n # Make sure the icon is removed\n self._hide()\n\n except:\n # TODO: Report errors\n pass\n\n finally:\n try:\n self._hide()\n del self._HWND_TO_ICON[self._hwnd]\n except:\n pass\n\n DestroyWindow(self._hwnd)\n self._unregister_class(self._atom)\n\n def _on_stop(self, wparam, lparam):\n \"\"\"Handles ``WM_STOP``.\n\n This method posts a quit message, causing the mainloop thread to\n terminate.\n \"\"\"\n PostQuitMessage(0)\n\n def _on_notify(self, wparam, lparam):\n \"\"\"Handles ``WM_NOTIFY``.\n\n This method calls the activate callback. It will only be called for\n left button clicks.\n \"\"\"\n if lparam == WM_LBUTTONDOWN:\n self.on_activate(self)\n\n def _create_window(self, atom):\n \"\"\"Creates the system tray icon window.\n\n :param atom: The window class atom.\n\n :return: a window\n \"\"\"\n hwnd = CreateWindowEx(\n 0,\n atom,\n None,\n 0,\n 0, 0, 0, 0,\n HWND_MESSAGE,\n None,\n GetModuleHandle(None),\n None)\n if not hwnd:\n raise ctypes.WinError(wintypes.get_last_error())\n else:\n return hwnd\n\n def _message(self, code, flags, **kwargs):\n \"\"\"Sends a message the the systray icon.\n\n This method adds ``cbSize``, ``hWnd``, ``hId`` and ``uFlags`` to the\n message data.\n\n :param int message: The message to send. This should be one of the\n ``NIM_*`` constants.\n\n :param int flags: The value of ``NOTIFYICONDATA::uFlags``.\n\n :param kwargs: Data for the :class:`NOTIFYICONDATA` object.\n \"\"\"\n r = Shell_NotifyIcon(code, ctypes.byref(NOTIFYICONDATA(\n cbSize=ctypes.sizeof(NOTIFYICONDATA),\n hWnd=self._hwnd,\n hID=id(self),\n uFlags=flags,\n **kwargs)))\n if not r:\n raise ctypes.WinError(wintypes.get_last_error())\n\n def _assert_icon_handle(self):\n \"\"\"Asserts that the cached icon handle exists.\n \"\"\"\n if self._icon_handle:\n return\n\n fd, icon_path = tempfile.mkstemp('.ico')\n try:\n with os.fdopen(fd, 'wb') as f:\n self._icon.save(f, format='ICO')\n hicon = LoadImage(\n None,\n wintypes.LPCWSTR(icon_path),\n IMAGE_ICON,\n 0,\n 0,\n LR_DEFAULTSIZE | LR_LOADFROMFILE)\n if not hicon:\n raise ctypes.WinError(wintypes.get_last_error())\n else:\n self._icon_handle = hicon\n\n finally:\n try:\n os.unlink(icon_path)\n except:\n pass\n\n def _register_class(self):\n \"\"\"Registers the systray window class.\n\n :return: the class atom\n \"\"\"\n window_class = WNDCLASSEX(\n cbSize=ctypes.sizeof(WNDCLASSEX),\n style=0,\n lpfnWndProc=_dispatcher,\n cbClsExtra=0,\n cbWndExtra=0,\n hInstance=GetModuleHandle(None),\n hIcon=None,\n hCursor=None,\n hbrBackground=COLOR_WINDOW + 1,\n lpszMenuName=None,\n lpszClassName='%s%dSystemTrayIcon' % (self.name, id(self)),\n hIconSm=None)\n atom = RegisterClassEx(ctypes.byref(window_class))\n if not atom:\n raise ctypes.WinError(wintypes.get_last_error())\n else:\n return atom\n\n def _unregister_class(self, atom):\n \"\"\"Unregisters the systray window class.\n\n :param atom: The class atom returned by :meth:`_register_class`.\n \"\"\"\n r = UnregisterClassEx(atom, GetModuleHandle(None))\n if not r:\n raise ctypes.WinError(wintypes.get_last_error())\n\n\nWM_CREATE = 0x0001\nWM_NCCREATE = 0x0081\nWM_LBUTTONDOWN = 0x0201\nWM_USER = 0x400\nWM_STOP = WM_USER + 10\nWM_NOTIFY = WM_USER + 11\n\nHWND_MESSAGE = -3\nPM_NOREMOVE = 0\n\nCOLOR_WINDOW = 5\n\nIMAGE_ICON = 1\nLR_LOADFROMFILE = 0x00000010\nLR_DEFAULTSIZE = 0x00000040\n\nNOTIFYICON_VERSION = 3\n\nShell_NotifyIcon = windll.shell32.Shell_NotifyIconW\n\nGetModuleHandle = windll.kernel32.GetModuleHandleW\n\nRegisterClassEx = windll.user32.RegisterClassExW\nCreateWindowEx = windll.user32.CreateWindowExW\nCreateWindowEx.argtypes = [\n wintypes.DWORD,\n wintypes.LPVOID,\n wintypes.LPCWSTR,\n wintypes.DWORD,\n wintypes.INT,\n wintypes.INT,\n wintypes.INT,\n wintypes.INT,\n wintypes.HWND,\n wintypes.HMENU,\n wintypes.HINSTANCE,\n wintypes.LPVOID]\nCreateWindowEx.restype = wintypes.HWND\nDestroyWindow = windll.user32.DestroyWindow\nUnregisterClassEx = windll.user32.UnregisterClassW\n\nLoadImage = windll.user32.LoadImageW\n\nDispatchMessage = windll.user32.DispatchMessageW\nGetMessage = windll.user32.GetMessageW\nPeekMessage = windll.user32.PeekMessageW\nPostMessage = windll.user32.PostMessageW\nPostQuitMessage = windll.user32.PostQuitMessage\nTranslateMessage = windll.user32.TranslateMessage\n\n\nWNDPROC = ctypes.WINFUNCTYPE(\n ctypes.HRESULT,\n wintypes.HWND, wintypes.UINT, wintypes.WPARAM, wintypes.LPARAM)\n\n\nclass WNDCLASSEX(ctypes.Structure):\n _fields_ = [\n ('cbSize', wintypes.UINT),\n ('style', wintypes.UINT),\n ('lpfnWndProc', WNDPROC),\n ('cbClsExtra', wintypes.INT),\n ('cbWndExtra', wintypes.INT),\n ('hInstance', wintypes.HANDLE),\n ('hIcon', wintypes.HICON),\n ('hCursor', wintypes.HANDLE),\n ('hbrBackground', wintypes.HBRUSH),\n ('lpszMenuName', wintypes.LPCWSTR),\n ('lpszClassName', wintypes.LPCWSTR),\n ('hIconSm', wintypes.HICON)]\n\n\n@WNDPROC\ndef _dispatcher(hwnd, uMsg, wParam, lParam):\n try:\n return int(Icon._HWND_TO_ICON[hwnd]._message_handlers.get(\n uMsg, lambda w, l: 0)(wParam, lParam))\n\n except KeyError:\n # Icon._HWND_TO_ICON[hwnd] is not yet set; this message is sent during\n # window creation, so we assume it is WM_CREATE or WM_NCCREATE and\n # return TRUE\n return 1\n\n except:\n # TODO: Report\n return 0\n\n\nclass NOTIFYICONDATA(ctypes.Structure):\n class VERSION_OR_TIMEOUT(ctypes.Union):\n _fields_ = [\n ('uTimeout', wintypes.UINT),\n ('uVersion', wintypes.UINT)]\n\n NIF_MESSAGE = 0x00000001\n NIF_ICON = 0x00000002\n NIF_TIP = 0x00000004\n NIF_STATE = 0x00000008\n NIF_INFO = 0x00000010\n NIF_GUID = 0x00000020\n NIF_REALTIME = 0x00000040\n NIF_SHOWTIP = 0x00000080\n\n NIM_ADD = 0x00000000\n NIM_MODIFY = 0x00000001\n NIM_DELETE = 0x00000002\n NIM_SETFOCUS = 0x00000003\n NIM_SETVERSION = 0x00000004\n\n _fields_ = [\n ('cbSize', wintypes.DWORD),\n ('hWnd', wintypes.HWND),\n ('uID', wintypes.UINT),\n ('uFlags', wintypes.UINT),\n ('uCallbackMessage', wintypes.UINT),\n ('hIcon', wintypes.HICON),\n ('szTip', wintypes.WCHAR * 64),\n ('dwState', wintypes.DWORD),\n ('dwStateMask', wintypes.DWORD),\n ('szInfo', wintypes.WCHAR * 256),\n ('version_or_timeout', VERSION_OR_TIMEOUT),\n ('szInfoTitle', wintypes.WCHAR * 64),\n ('dwInfoFlags', wintypes.DWORD),\n ('guidItem', wintypes.LPVOID),\n ('hBalloonIcon', wintypes.HICON)]\n\n _anonymous_ = [\n 'version_or_timeout']\n","sub_path":"lib/pystray/_win32.py","file_name":"_win32.py","file_ext":"py","file_size_in_byte":10975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"53870873","text":"import pygame\nimport math\nimport random\nimport platform\nimport functions\n\nfrom pygame.locals import *\nfrom random import *\nfrom math import *\n\nclass cocoa_danmaku_1(pygame.sprite.Sprite):\n def __init__(self, boss_position):\n pygame.sprite.Sprite.__init__(self)\n if platform.system() == 'Windows':\n self.cocoa_danmaku_type1 = pygame.image.load(\"images\\\\boss\\\\Cocoa\\\\cocoa_danmaku_type1_00.png\").convert_alpha()\n if platform.system() == 'Linux' or platform.system()=='Darwin':\n self.cocoa_danmaku_type1 = pygame.image.load(\"images/boss/Cocoa/cocoa_danmaku_type1_00.png\").convert_alpha()\n self.image = self.cocoa_danmaku_type1\n self.rect = self.image.get_rect()\n self.center = [boss_position[0], boss_position[1]]\n self.rect.left = self.center[0] - 10\n self.rect.top = self.center[1] - 10\n self.direction = [0.0,-1.0]\n \n self.damage = 13\n self.speed = 2.5\n self.birth_life = 6\n self.special_count = 0\n self.radius = 0\n self.effects_time = 60\n \n def move(self):\n self.center[0] += self.speed * self.direction[0]\n self.center[1] += self.speed * self.direction[1]\n self.rect.left = self.center[0] - 10\n self.rect.top = self.center[1] - 10\n if self.effects_time:\n self.effects_time -= 1\n else:\n self.effects_time = 60\n\nclass cocoa_danmaku_1_effects(pygame.sprite.Sprite):\n def __init__(self, danmaku):\n pygame.sprite.Sprite.__init__(self)\n self.cocoa_danmaku_type1_effects = []\n for i in range(0,10):\n ch = \"images/boss/Cocoa/cocoa_danmaku_type2_0\" + str(i) + \".png\"\n self.cocoa_danmaku_type1_effects.append(pygame.image.load(ch).convert_alpha())\n self.image = self.cocoa_danmaku_type1_effects[0]\n self.direction = danmaku.direction\n self.center = danmaku.center\n self.rect = self.cocoa_danmaku_type1_effects[0].get_rect()\n self.rect.left = self.center[0] - 10\n self.rect.top = self.center[1] - 10\n self.speed = danmaku.speed\n self.lifetime = 30\n \n def move(self):\n self.center[0] += self.speed * self.direction[0]\n self.center[1] += self.speed * self.direction[1]\n self.rect.left = self.center[0] - self.lifetime/5 * self.direction[0]\n self.rect.top = self.center[1] - self.lifetime/5 * self.direction[1]\n self.image = self.cocoa_danmaku_type1_effects[9 - self.lifetime//3]\n\nclass cocoa_bomb(pygame.sprite.Sprite):\n def __init__(self):\n pass\n\nclass Cocoa(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n if platform.system() == 'Windows':\n oimage1 = pygame.image.load(\"images\\\\boss\\\\Cocoa\\\\Cocoa_00.png\").convert_alpha()\n oimage2 = pygame.image.load(\"images\\\\boss\\\\Cocoa\\\\Cocoa_01.png\").convert_alpha()\n oimage3 = pygame.image.load(\"images\\\\boss\\\\Cocoa\\\\Cocoa_02.png\").convert_alpha()\n #self.illustraction = pygame.image.load(\"image\\\\boss\\\\Cocoa\\\\Cocoa_tachie.png\").convert_alpha()\n if platform.system() == 'Linux' or platform.system()=='Darwin':\n oimage1 = pygame.image.load(\"images/boss/Cocoa/Cocoa_00.png\").convert_alpha()\n oimage2 = pygame.image.load(\"images/boss/Cocoa/Cocoa_01.png\").convert_alpha()\n oimage3 = pygame.image.load(\"images/boss/Cocoa/Cocoa_02.png\").convert_alpha()\n #self.illustraction = pygame.image.load(\"images/boss/Cocoa/Cocoa_tachie.png\").convert_alpha()\n \n self.image = pygame.transform.scale(oimage2, (60,75))\n #self.image = oimage2\n self.name = \"cocoa\"\n self.rect = self.image.get_rect()\n self.center = [255.0, 100.0]\n self.temp_position = [255, 100]\n self.direction = [0, -1]\n self.rect.left = self.center[0] - 30\n self.rect.top = self.center[1] - 37\n \n self.speed = 2\n self.radius = 20\n \n self.collide = 1\n self.hp = 1000\n self.max_hp = 1000\n self.spell = 3\n self.crash = 9\n self.energy = 90\n self.spell_time = 0\n \n self.bgm = pygame.mixer.music\n self.bgm.load(\"bgm/Rabi-Ribi Original Soundtrack - 36 Get On With It.ogg\")\n \n def move(self):\n if self.temp_position[0] < 50:\n self.temp_position[0] = 50\n elif self.temp_position[0] > 420:\n self.temp_position[0] = 420\n if self.temp_position[1] < 50:\n self.temp_position[1] = 50\n elif self.temp_position[1] > 200:\n self.temp_position[1] = 200\n \n distance = sqrt( \\\n (self.center[0] - self.temp_position[0]) ** 2 + \\\n (self.center[1] - self.temp_position[1]) ** 2 )\n if distance:\n self.direction = [\n (self.temp_position[0] - self.center[0]) / distance, \n (self.temp_position[1] - self.center[1]) / distance ]\n self.speed = log(distance + 1)/3\n else:\n self.speed = 0\n self.center[0] += self.direction[0] * self.speed\n self.center[1] += self.direction[1] * self.speed\n self.rect.left = self.center[0] - 30\n self.rect.top = self.center[1] - 37\n \n def damage(self, shouting_group):\n for each in shouting_group:\n self.hp -= each.damage\n if self.hp < 0:\n self.hp = 0\n self.spell -= 1\n \n def cocoa_spell_1(self, difficulty, me_erina, boss_group, birth_group, effects_group):\n if self.spell_time < 1800:\n temp_time = self.spell_time % 120\n if temp_time:\n if temp_time%10 == 1 and temp_time<62:\n temp_snipe = functions.snipe(self, me_erina)\n offset = randint(-10,10)\n for i in range(-8,9):\n temp_danmaku = cocoa_danmaku_1(self.center)\n temp_danmaku.center = [self.center[0], self.center[1]]\n temp_danmaku.direction = [cos(temp_snipe + i*pi/32 + pi*offset/320), sin(temp_snipe + i*pi/32 + pi*offset/320)]\n birth_group.add(temp_danmaku)\n else:\n pass\n self.temp_position[0] = randint(50,380)\n self.temp_position[1] = randint(50,160)\n self.spell_time += 1\n","sub_path":"rabiribi-danmaku/boss/section1/stage1a/cocoa_old.py","file_name":"cocoa_old.py","file_ext":"py","file_size_in_byte":6475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"64881184","text":"\nimport unittest\nimport pandas as pd\n\nfrom ..speech_act import speech_act\n\nclass SpeechActTestCase(unittest.TestCase):\n\n def test_speechActNonEpsConcat(self):\n\n df = pd.DataFrame({'title': ['ep1', 'ep1','ep1', 'ep2'],\n 'writer': ['Ed', 'Lory', 'Jack', 'Lory'],\n 'pony': ['Pinkie Pie', 'Pinkie Pie', 'Applejack', 'Applejack'],\n 'dialog': [\"hello\", \"there\", \"oh hey\", \"OOBLAH!\"]})\n\n fixed_df = speech_act(df)\n fixed_df = fixed_df.reset_index(drop=True)\n\n real_fixed_df = pd.DataFrame({'pony': ['Pinkie Pie', 'Applejack', 'Applejack'],\n 'dialog': [\"hello there\", \"oh hey\", \"OOBLAH!\"]})\n\n self.assertEqual([fixed_df.pony.all(), fixed_df.dialog.all()], [real_fixed_df.pony.all(), real_fixed_df.dialog.all()])\n","sub_path":"src/hw3/tests/speech_act.py","file_name":"speech_act.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"35381860","text":"# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\n 作者: 顾志文\r\n 日期: 2020/03/01\r\n 项目名称:识别Twitter用户性别 (Twitter User Gender Classification)\r\n Kaggle地址:https://www.kaggle.com/crowdflower/twitter-user-gender-classification\r\n\"\"\"\r\nfrom skimage import io\r\nimport os\r\nimport re\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.tokenize import RegexpTokenizer\r\nimport pandas as pd\r\nimport math\r\nimport numpy as np\r\nfrom skimage import exposure, img_as_float\r\n\r\n\r\n# 头像图片保存路径\r\nprofile_image_path = './pro_img/'\r\n\r\n\r\ndef inspect_dataset(df_data):\r\n \"\"\"pytoho\r\n 查看加载的数据基本信息\r\n \"\"\"\r\n print('数据集基本信息:')\r\n print(df_data.info())\r\n print('数据集有{}行,{}列'.format(df_data.shape[0], df_data.shape[1]))\r\n print('数据预览:')\r\n print(df_data.head())\r\n\r\n\r\ndef check_profile_image(img_link):\r\n \"\"\"\r\n 判断头像图片链接是否有效\r\n 如果有效,下载到本地,并且返回保存路径\r\n \"\"\"\r\n save_image_path = ''\r\n # 有效的图片扩展名\r\n valid_img_ext_lst = ['.jpeg', '.png', '.jpg']\r\n\r\n try:\r\n img_data = io.imread(img_link)\r\n image_name = img_link.rsplit('/')[-1]\r\n if any(valid_img_ext in image_name.lower() for valid_img_ext in valid_img_ext_lst):\r\n # 确保图片文件包含有效的扩展名\r\n save_image_path = os.path.join(profile_image_path, image_name)\r\n io.imsave(save_image_path, img_data)\r\n except:\r\n print('头像链接 {} 无效'.format(img_link))\r\n\r\n return save_image_path\r\n\r\n\r\ndef clean_text(text):\r\n \"\"\"\r\n 清洗文本数据\r\n \"\"\"\r\n # just in case\r\n text = text.lower()\r\n\r\n # 去除特殊字符\r\n text = re.sub('\\s\\W', ' ', text)\r\n text = re.sub('\\W\\s', ' ', text)\r\n text = re.sub('\\s+', ' ', text)\r\n\r\n return text\r\n\r\n\r\ndef split_train_test(df_data, size=0.8):\r\n \"\"\"\r\n 分割训练集和测试集\r\n \"\"\"\r\n # 为保证每个类中的数据能在训练集中和测试集中的比例相同,所以需要依次对每个类进行处理\r\n df_train = pd.DataFrame()\r\n df_test = pd.DataFrame()\r\n\r\n labels = [0, 1]\r\n for label in labels:\r\n # 找出gender的记录\r\n text_df_w_label = df_data[df_data['label'] == label]\r\n # 重新设置索引,保证每个类的记录是从0开始索引,方便之后的拆分\r\n text_df_w_label = text_df_w_label.reset_index()\r\n\r\n # 默认按80%训练集,20%测试集分割\r\n # 这里为了简化操作,取前80%放到训练集中,后20%放到测试集中\r\n # 当然也可以随机拆分80%,20%(尝试实现下DataFrame中的随机拆分)\r\n\r\n # 该类数据的行数\r\n n_lines = text_df_w_label.shape[0]\r\n split_line_no = math.floor(n_lines * size)\r\n text_df_w_label_train = text_df_w_label.iloc[:split_line_no, :]\r\n text_df_w_label_test = text_df_w_label.iloc[split_line_no:, :]\r\n\r\n # 放入整体训练集,测试集中\r\n df_train = df_train.append(text_df_w_label_train)\r\n df_test = df_test.append(text_df_w_label_test)\r\n\r\n df_train = df_train.reset_index()\r\n df_test = df_test.reset_index()\r\n return df_train, df_test\r\n\r\n\r\ndef get_word_list_from_data(text_s):\r\n \"\"\"\r\n 将数据集中的单词放入到一个列表中\r\n \"\"\"\r\n word_list = []\r\n for _, text in text_s.iteritems():\r\n word_list += text.split(' ')\r\n return word_list\r\n\r\n\r\ndef proc_text(text):\r\n \"\"\"\r\n 分词+去除停用词\r\n \"\"\"\r\n tokenizer = RegexpTokenizer(r'\\w+')\r\n words = tokenizer.tokenize(text)\r\n filtered_words = [word for word in words if word not in stopwords.words('english')]\r\n return \" \".join(filtered_words)\r\n\r\n\r\ndef extract_tf_idf(text_s, text_collection, common_words_freqs):\r\n \"\"\"\r\n 提取tf-idf特征\r\n \"\"\"\r\n # 这里只选择TF-IDF特征作为例子\r\n # 可考虑使用词频或其他文本特征作为额外的特征\r\n\r\n n_sample = text_s.shape[0]\r\n n_feat = len(common_words_freqs)\r\n\r\n common_words = [word for word, _ in common_words_freqs]\r\n\r\n # 初始化\r\n X = np.zeros([n_sample, n_feat])\r\n\r\n print('提取tf-idf特征...')\r\n for i, text in text_s.iteritems():\r\n feat_vec = []\r\n for word in common_words:\r\n if word in text:\r\n # 如果在高频词中,计算TF-IDF值\r\n tf_idf_val = text_collection.tf_idf(word, text)\r\n else:\r\n tf_idf_val = 0\r\n\r\n feat_vec.append(tf_idf_val)\r\n\r\n # 赋值\r\n X[i, :] = np.array(feat_vec)\r\n\r\n return X\r\n\r\n\r\ndef hex_to_rgb(value):\r\n \"\"\"\r\n 十六进制颜色码转换为RGB值\r\n \"\"\"\r\n rgb_list = list(int(value[i:i + 2], 16) for i in range(0, 6, 2))\r\n return rgb_list\r\n\r\n\r\ndef extract_rgb_feat(hex_color_s):\r\n \"\"\"\r\n 从十六进制颜色码中提取RGB值作为特征\r\n \"\"\"\r\n n_sample = hex_color_s.shape[0]\r\n n_feat = 3\r\n\r\n # 初始化\r\n X = np.zeros([n_sample, n_feat])\r\n\r\n print('提取RGB特征...')\r\n for i, hex_val in hex_color_s.iteritems():\r\n feat_vec = hex_to_rgb(hex_val)\r\n\r\n # 赋值\r\n X[i, :] = np.array(feat_vec)\r\n\r\n return X\r\n\r\n\r\ndef extract_rgb_hist_feat(img_path_s):\r\n \"\"\"\r\n 从图像中提取RGB直方图特征\r\n \"\"\"\r\n n_sample = img_path_s.shape[0]\r\n n_bins = 100 # 每个通道bin的个数\r\n n_feat = n_bins * 3\r\n\r\n # 初始化\r\n X = np.zeros([n_sample, n_feat])\r\n\r\n print('提取RGB直方图特征...')\r\n for i, img_path in img_path_s.iteritems():\r\n # 加载图像\r\n img_data = io.imread(img_path)\r\n img_data = img_as_float(img_data)\r\n\r\n if img_data.ndim == 3:\r\n # 3个通道\r\n hist_r, _ = exposure.histogram(img_data[:, :, 0], nbins=n_bins)\r\n hist_g, _ = exposure.histogram(img_data[:, :, 1], nbins=n_bins)\r\n hist_b, _ = exposure.histogram(img_data[:, :, 2], nbins=n_bins)\r\n else:\r\n # 2个通道\r\n hist, _ = exposure.histogram(img_data, nbins=n_bins)\r\n hist_r = hist.copy()\r\n hist_g = hist.copy()\r\n hist_b = hist.copy()\r\n\r\n feat_vec = np.concatenate((hist_r, hist_b, hist_g))\r\n\r\n # 赋值\r\n X[i, :] = np.array(feat_vec)\r\n\r\n return X\r\n","sub_path":"pd_tools.py","file_name":"pd_tools.py","file_ext":"py","file_size_in_byte":6406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"640202360","text":"\n\"\"\"-----------------------------------------------------------------------------\nDate: 2010-06-09, 2010-07-27, 2010-09-23, 2012-01-12\nPurpose: Short End Curve Management\nDepartment and Desk: IRD Desk\nRequester: CTB\nDeveloper: Zaakirah kajee\nCR Number: 336924, 382872, 441959, 550622, 870016 \n\n\nUpdated to convert all values to ZAR\nCR870016 - Anil Parbhoo - Created a new ael variable for excel or csv output type\n\n\n-------------------------------------------------------------------------------\"\"\"\nimport acm\n\nfrom ShortEndProvisionReport import PSResetRiskReport\nfrom PS_Functions import get_pb_fund_shortname\nfrom at_logging import getLogger, bp_start\n\nLOGGER = getLogger()\n\n\nael_variables = [ ('InputType', 'Report Input Type: ', 'string', ['Filter', 'Portfolio'], 'Filter', 1),\n ('Portfolio', 'Portfolio: ', 'FPhysicalPortfolio', None, None, 0, 1, 'Name of Portfolio'),\n ('TrdFilter', 'Trade Filter: ', 'FTradeSelection', acm.FTradeSelection.Instances(), None, 1, 0, 'Name of Trade Filter'), \n ('OutputType', 'Output Type: ', 'string', ['Excel', 'CSV'], 'Excel'),\n ('Outpath', 'Output Path: ', 'string', None, '/services/frontnt/Task/', 1),\n ('Outfile', 'Output File: ', 'string', None, 'File_RiskResetDates', 1),\n ('Currency', 'Currency: ', 'FCurrency', acm.FCurrency.Instances(), 'ZAR', 1, 0, 'Currency'),\n ('Curve', 'Yield Curve: ', 'FYieldCurve', acm.FYieldCurve.Instances(), 'ZAR-SWAP', 1, 0, 'Yield Curve'),\n ('shortName', 'Short Name', 'string', None, None, 1, 0)\n ] \n\n\ndef _convertToParamDictionary(configuration, report_name):\n paramDict = {}\n paramDict['InputType'] = 'Filter'\n paramDict['Portfolio'] = None\n paramDict['OutputType'] = 'Excel'\n \n paramDict['Currency'] = configuration['Currency_'+ report_name]\n paramDict['TrdFilter'] = configuration['TrdFilter_'+ report_name]\n \n paramDict['Outpath'] = configuration['OutputPath']\n paramDict['Outfile'] = configuration['Filename_'+ report_name]\n \n paramDict['Curve'] = configuration['Curve_'+ report_name]\n paramDict['fileID_SoftBroker'] = configuration['fileID_SoftBroker']\n paramDict['shortName'] = get_pb_fund_shortname(acm.FParty[configuration[\"clientName\"]])\n return paramDict\n\n\n#===========================================================Main================================================================================\n\n\ndef ael_main(config):\n process_name = \"ps.reset_risk.{0}\".format(config[\"shortName\"])\n with bp_start(process_name):\n \n file_suffix = 'csv'\n csv_writer_parameters = None\n if 'fileID_SoftBroker' in config.keys():\n file_name = '_'.join([config['fileID_SoftBroker'], config['Outfile'], acm.Time.DateToday().replace('-', '')])\n else:\n file_name = '_'.join([config['filename'], acm.Time.DateToday().replace('-', '')])\n output_path = config['Outpath']\n input_type = config['InputType']\n if input_type == 'Portfolio':\n source = config['Portfolio']\n else:\n source = config['TrdFilter']\n \n \n yield_curve = config['Curve']\n currency = config['Currency']\n if 'frameworkVersion' in config.keys():\n frameworkVersion = config['frameworkVersion']\n else:\n frameworkVersion = 'N/A'\n \n \n collection = PSResetRiskReport(file_suffix, output_path,\n csv_writer_parameters, source, yield_curve,\n currency, frameworkVersion, file_name)\n collection.create_reports()\n \n return","sub_path":"Python modules/PS_ResetRisk_Report.py","file_name":"PS_ResetRisk_Report.py","file_ext":"py","file_size_in_byte":3773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"363234558","text":"from django.shortcuts import render, redirect\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse, Http404, JsonResponse\nfrom django.contrib.auth.models import User\nfrom django.db.models import Q, Count\nfrom django.conf import settings\nfrom django.utils import translation\nfrom ..models import Occupation, Artist, Genre, Movie, Profile, MovieRating, Series\nfrom datetime import date\nimport random\n\n## Additional functions\n\ndef is_valid_queryparam(param):\n return param != '' and param is not None\n\ndef getMovieRating(movie_id):\n movie = Movie.objects.get(pk=movie_id)\n ratings = MovieRating.objects.filter(movie=movie)\n count = ratings.count()\n average = 0\n if count > 0:\n sum = 0\n for r in ratings:\n sum += r.rating\n average = sum / count\n return average\n\ndef calculate_age(born):\n today = date.today()\n return today.year - born.year - ((today.month, today.day) < (born.month, born.day))\n\ndef getBirthdate(age):\n today = date.today()\n year = today.year - age\n return date(year, today.month, today.day)\n\n\n## Main views\n\ndef home(request):\n # user_language = 'mn'\n # translation.activate(user_language)\n # request.session[translation.LANGUAGE_SESSION_KEY] = user_language\n latestmovies = Movie.objects.all().order_by('-created_at')[:4]\n latestseries = Series.objects.all().order_by('-created_at')[:4]\n suggestedmovie = random.choice(latestmovies)\n suggestedseries = Series.objects.latest('created_at')\n count_movie = Movie.objects.all().count()\n count_series = Series.objects.all().count()\n count_artist = Artist.objects.all().count()\n # topratedmovies = Movie.objects.all().order_by('-imdb_rating')[:4] \n # mostlikedmovies = Movie.objects.annotate(count_liked=Count('liked_movies')).order_by('-count_liked')[:6]\n # mostwatchedmovies = Movie.objects.annotate(count_watched=Count('moviewatchedlist')).order_by('-count_watched')[:6]\n profile = None\n if request.user.is_authenticated:\n profile = Profile.objects.get(user=request.user)\n context = {\n 'latestmovies': latestmovies,\n 'latestseries': latestseries,\n 'suggestedmovie': suggestedmovie,\n 'suggestedseries': suggestedseries,\n 'count_movie': count_movie,\n 'count_series': count_series,\n 'count_artist': count_artist,\n # 'mostlikedmovies': mostlikedmovies,\n # 'mostwatchedmovies': mostwatchedmovies,\n 'profile': profile\n }\n return render(request, 'home.html', context) \n\n@login_required\ndef profile(request):\n profile = Profile.objects.get(user=request.user) \n movie_favorite = profile.movie_favorite.order_by('name') \n movie_watched = profile.movie_watched.order_by('name')\n movie_watchlist = profile.movie_watchlist.order_by('name')\n context = {\n 'profile': profile,\n 'movie_favorite': movie_favorite,\n 'movie_watched': movie_watched,\n 'movie_watchlist': movie_watchlist\n }\n return render(request, 'profile.html', context)\n","sub_path":"movies/views/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"286297406","text":"import time\nimport numpy as np\nimport threading\nimport math\nimport os\nimport datetime\nimport pygame\nimport keyboard\nfrom ctypes import windll\nfrom pylsl import StreamInlet, resolve_stream\nfrom scipy.signal import butter, lfilter\nfrom FBCSP import FBCSP\nfrom CommonSpatialPattern import CommonSpatialPattern\nimport multiprocessing\nfrom multiprocessing import Process, Pipe\nfrom socket import *\nfrom struct import *\n\nEEGdata_size = 60\nfreq = 512\ntotal_ch = 31\nselect_ch = range(1, total_ch + 1)\n\nnum_trials = 8\nnum_tests = 0\nsession_sec = 12 # how long each session is\nwindow_sec = 3\nstride_sec = 1\nwait_time = 0 # how long to wait before starting to collect eeg\n\nnum_classes = 4 # rest, right, left, up\nclass_order = [3, 0, 1, 2]\nnum_iteration = len(class_order)\n\nfolder = \"Data\"\ndata_type = \"EEG_BallFour(training)\"\ndate = datetime.datetime.now().strftime(\"%m%d\")\n\nSetWindowPos = windll.user32.SetWindowPos\nNOSIZE = 1\nNOMOVE = 2\nTOPMOST = -1\nNOT_TOPMOST = -2\ncircle_radius = 20\nXSCREEN = 1920 # 1600\nYSCREEN = 1080 # 900\nscreen = None\nplus_size = XSCREEN / 24\n\nchannel_names=[]\n\n\nclass Marker:\n def __init__(self):\n self.position = 0\n self.points = 0\n self.channel = -1\n self.type = \"\"\n self.description = \"\"\n\nclass BallRecorderFour:\n def __init__(self, subject=\"test\", nChannels=31, frequency=512, streamer_type=\"OpenVibe\", channels=None):\n global screen, total_ch, freq, select_ch, channel_names\n freq = int(frequency)\n channel_names=channels\n total_ch = int(nChannels)\n select_ch = range(1, total_ch + 1)\n\n self.streamer=str(streamer_type)\n self.name = self.__class__.__name__\n self.running = True\n self.edx = 0\n self.global_time = time.clock()\n self.EEGdata = np.zeros((freq * EEGdata_size, total_ch + 1))\n\n if(self.streamer==\"OpenVibe\"):\n print(\"{}: Looking for an EEG stream...\".format(self.name))\n streams = resolve_stream('type', 'signal')\n self.inlet = StreamInlet(streams[0])\n\n i = 1\n fname = \"{}/{}_{}_{}t{}c{}s{}ch_{}\".format(folder, date, data_type, num_trials, num_classes, window_sec,\n len(select_ch), subject)\n self.filename = \"{}_{}.txt\".format(fname, i)\n while os.path.exists(self.filename):\n i += 1\n self.filename = \"{}_{}.txt\".format(fname, i)\n self.file = open(self.filename, \"w\")\n print(\"{}: Writing to {}\".format(self.name, self.filename))\n\n pygame.init()\n pygame.font.init()\n screen = pygame.display.set_mode([XSCREEN, YSCREEN], pygame.FULLSCREEN)\n always_on_top(False)\n self.output_data = []\n self.output_label = []\n self.class_count = [0] * num_classes\n\n self.model = CommonSpatialPattern(augment=False, nChannels=total_ch, chnames=channels)\n\n @staticmethod\n def draw_train(flag):\n screen.fill((0, 0, 0))\n if flag == 0:\n pygame.draw.rect(screen, (255, 255, 255), (XSCREEN / 2 - (plus_size / 2), YSCREEN / 2 - 3, plus_size, 6))\n pygame.draw.rect(screen, (255, 255, 255), (XSCREEN / 2 - 3, YSCREEN / 2 - (plus_size / 2), 6, plus_size))\n elif flag == 1:\n pygame.draw.polygon(screen, (255, 255, 255), (\n (XSCREEN / 2 - (plus_size / 2), YSCREEN / 2 - (plus_size / 2)),\n (XSCREEN / 2 + (plus_size / 2), YSCREEN / 2),\n (XSCREEN / 2 - (plus_size / 2), YSCREEN / 2 + (plus_size / 2))))\n elif flag == 2:\n pygame.draw.polygon(screen, (255, 255, 255), (\n (XSCREEN / 2 - (plus_size / 2), YSCREEN / 2),\n (XSCREEN / 2 + (plus_size / 2), YSCREEN / 2 - (plus_size / 2)),\n (XSCREEN / 2 + (plus_size / 2), YSCREEN / 2 + (plus_size / 2))))\n elif flag == 3:\n pygame.draw.polygon(screen, (255, 255, 255), (\n (XSCREEN / 2, YSCREEN / 2 - (plus_size / 2)),\n (XSCREEN / 2 + (plus_size / 2), YSCREEN / 2 + (plus_size / 2)),\n (XSCREEN / 2 - (plus_size / 2), YSCREEN / 2 + (plus_size / 2))))\n else:\n assert False\n pygame.display.update()\n\n @staticmethod\n def draw_test(flag):\n screen.fill((0, 0, 0))\n if flag == 0:\n pygame.draw.rect(screen, (255, 255, 255),\n (int(XSCREEN / 2) - (plus_size / 2), int(YSCREEN / 2) - 3, plus_size, 6))\n pygame.draw.rect(screen, (255, 255, 255),\n (int(XSCREEN / 2) - 3, int(YSCREEN / 2) - (plus_size / 2), 6, plus_size))\n elif flag == 1:\n pygame.draw.rect(screen, (0, 255, 0), (XSCREEN - circle_radius, 0, circle_radius, YSCREEN))\n pygame.draw.circle(screen, (0, 0, 255), (int(XSCREEN / 2), YSCREEN - circle_radius * 2), circle_radius)\n elif flag == 2:\n pygame.draw.rect(screen, (0, 255, 0), (0, 0, circle_radius, YSCREEN))\n pygame.draw.circle(screen, (0, 0, 255), (int(XSCREEN / 2), YSCREEN - circle_radius * 2), circle_radius)\n elif flag == 3:\n pygame.draw.rect(screen, (0, 255, 0), (0, 0, XSCREEN, circle_radius))\n pygame.draw.circle(screen, (0, 0, 255), (int(XSCREEN / 2), YSCREEN - circle_radius * 2), circle_radius)\n else:\n assert False\n pygame.display.update()\n\n @staticmethod\n def update_circle(x, y, v_x, v_y):\n pygame.draw.circle(screen, (0, 0, 0), (x, y), circle_radius)\n x += v_x\n y += v_y\n pygame.draw.circle(screen, (0, 0, 255), (x, y), circle_radius)\n pygame.display.update()\n return x, y\n\n def collect_data(self):\n print(\"{}: Collection starting\".format(self.name))\n for i in range(num_trials * num_iteration):\n self.edx = 0\n time.sleep(5)\n flag = class_order[i % num_iteration]\n self.draw_train(flag)\n start_time = time.clock()\n prev_time = 0\n while True:\n current_time = int(math.floor(time.clock() - start_time))\n if current_time >= session_sec:\n break\n if current_time >= wait_time + window_sec and current_time >= prev_time + stride_sec:\n prev_time = current_time\n edx=self.edx\n selected_eeg = self.EEGdata[(edx - (freq * window_sec)): edx, select_ch]\n print(selected_eeg)\n self.output_data.append(selected_eeg.T)\n self.output_label.append(flag)\n self.file.write(str(np.ndarray.tolist(selected_eeg)) + '\\n')\n self.file.write(str(flag) + '\\n')\n self.class_count[flag] += 1\n screen.fill((0,0,0))\n pygame.display.update()\n print(\"{}: Collection finished\".format(self.name))\n\n def test_model(self):\n print(\"{}: Started model testing\".format(self.name))\n for t in range(num_tests * 3):\n # collecting direction data\n size_rest, size_right, size_left, size_up = self.class_count\n circle_x = int(XSCREEN / 2)\n circle_y = YSCREEN - circle_radius * 2\n v_x = 0\n v_y = 0\n if size_right <= size_left and size_right <= size_up:\n flag = 1\n elif size_left <= size_right and size_left <= size_up:\n flag = 2\n else:\n flag = 3\n self.draw_test(flag)\n self.edx = 0\n start_time = time.clock()\n prev_time = 0\n print(\"Flag is: {}, class_count is: {}\".format(flag, self.class_count))\n while circle_radius < circle_x < XSCREEN - circle_radius and circle_y > circle_radius:\n current_time = int(math.floor(time.clock() - start_time))\n if keyboard.is_pressed('m'):\n time.sleep(2)\n break\n if keyboard.is_pressed('p'):\n self.file.close()\n time.sleep(3)\n print(\"{}: Forced close model testing\".format(self.name))\n return\n if current_time < 3:\n continue\n\n circle_x, circle_y = self.update_circle(circle_x, circle_y, v_x, v_y)\n time.sleep(0.01)\n\n if current_time >= prev_time + window_sec:\n prev_time = current_time\n edx=self.edx\n selected_eeg = get_eeg(self.EEGdata, edx - (freq * window_sec), edx)\n transformed_eeg = np.asarray(np.transpose(np.asmatrix(selected_eeg)))\n transformed_eeg = np.asarray([transformed_eeg])\n print(transformed_eeg)\n if np.shape(transformed_eeg) != (1, len(select_ch), window_sec * freq):\n print(np.shape(transformed_eeg))\n assert False\n\n predicted_label = self.model.predict(transformed_eeg)\n print(predicted_label)\n if predicted_label[0] == 0:\n v_x = 0\n v_y = 0\n elif predicted_label[0] == 1:\n v_x = 1\n v_y = 0\n elif predicted_label[0] == 2:\n v_x = -1\n v_y = 0\n elif predicted_label[0] == 3:\n v_x = 0\n v_y = -1\n self.output_data.append(selected_eeg.T)\n self.output_label.append(flag)\n self.file.write(str(np.ndarray.tolist(selected_eeg)) + '\\n')\n self.file.write(str(flag) + '\\n')\n self.class_count[flag] += 1\n\n # collecting rest data\n flag = 0\n self.draw_test(flag)\n self.edx = 0\n start_time = time.clock()\n prev_time = 0\n size_rest, size_right, size_left, size_up = self.class_count\n while size_rest < min(size_left, size_right, size_up):\n current_time = int(math.floor(time.clock() - start_time))\n if keyboard.is_pressed('m'):\n time.sleep(2)\n break\n if keyboard.is_pressed('p'):\n self.file.close()\n time.sleep(3)\n print(\"{}: Forced close model testing\".format(self.name))\n return\n\n if current_time >= 3 and current_time >= prev_time + window_sec:\n prev_time = current_time\n edx=self.edx\n selected_eeg = get_eeg(self.EEGdata, edx - (freq * window_sec), edx)\n self.output_data.append(selected_eeg.T)\n self.output_label.append(flag)\n self.file.write(str(np.ndarray.tolist(selected_eeg)) + '\\n')\n self.file.write(str(flag) + '\\n')\n self.class_count[flag] += 1\n size_rest, size_right, size_left, size_up = self.class_count\n\n min_data = []\n min_label = []\n min_count = min(self.class_count)\n count = [0] * num_classes\n print(\"len_output_data: {}, min_count: {}\".format(len(self.output_data), min_count))\n for j in range(len(self.output_data)):\n if count[self.output_label[j]] >= min_count:\n continue\n min_data.append(self.output_data[j])\n min_label.append(self.output_label[j])\n count[self.output_label[j]] += 1\n\n self.model.build_model(min_data, min_label)\n\n def close_recorder(self):\n self.file.close()\n self.running = False\n if(self.streamer==\"OpenVibe\"):\n self.inlet.close_stream()\n pygame.display.quit()\n\n def retrieve_eeg(self):\n if(self.streamer==\"Brainvision Recorder\"):\n parent_conn, child_conn = Pipe()\n eeg_process = multiprocessing.Process(target=retrieve_eeg_BREC, args=(child_conn,))\n eeg_process.start()\n\n while self.running:\n if(self.streamer==\"Brainvision Recorder\"):\n (sample, timestamp) = parent_conn.recv()\n elif(self.streamer==\"OpenVibe\"):\n sample, timestamp = self.inlet.pull_sample()\n\n current_time = time.clock() - self.global_time\n self.EEGdata[self.edx % (freq * EEGdata_size), 0] = current_time\n self.EEGdata[self.edx % (freq * EEGdata_size), 1:total_ch + 1] = sample\n self.edx = self.edx + 1\n if self.edx >= freq * EEGdata_size:\n self.edx = 0\n\n def start(self):\n eeg_thrd = threading.Thread(target=self.retrieve_eeg)\n eeg_thrd.daemon = True\n eeg_thrd.start()\n\n self.collect_data()\n self.model.build_model(self.output_data, self.output_label)\n self.test_model()\n self.close_recorder()\n\n return self.filename\n\n\ndef always_on_top(b):\n zorder = (NOT_TOPMOST, TOPMOST)[b] # choose a flag according to bool\n hwnd = pygame.display.get_wm_info()['window'] # handle to the window\n SetWindowPos(hwnd, zorder, 0, 0, 0, 0, NOMOVE | NOSIZE)\n\n\ndef get_eeg(data, x, y):\n if x < 0:\n if y == 0:\n return data[x:(freq * EEGdata_size), select_ch]\n else:\n return np.concatenate((data[x:(freq * EEGdata_size), select_ch], data[0: y, select_ch]), axis=0)\n return data[x:y, select_ch]\n\n\n######Brainvision Recorder section######\n\n\ndef RecvData(socket, requestedSize):\n returnStream = ''\n while len(returnStream) < requestedSize:\n databytes = socket.recv(requestedSize - len(returnStream))\n if databytes == '':\n raise RuntimeError\n returnStream += databytes\n\n return returnStream\n\n\ndef SplitString(raw):\n stringlist = []\n s = \"\"\n for i in range(len(raw)):\n if raw[i] != '\\x00':\n s = s + raw[i]\n else:\n stringlist.append(s)\n s = \"\"\n\n return stringlist\n\n\ndef GetProperties(rawdata):\n # Extract numerical data\n (channelCount, samplingInterval) = unpack(' ')\n\nn = [\n [1, 3, 5, 7],\n [10, 11, 16, 20],\n [23, 30, 34, 50]\n]\n\ns = Solution()\nprint(s.search_matrix(n, 23))\n\n","sub_path":"easyleetcode/leetcodes/Leetcode_074_Search a 2D Matrix.py","file_name":"Leetcode_074_Search a 2D Matrix.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"1429202","text":"from progress.bar import IncrementalBar\nfrom stemming import PorterStemmer\nfrom nltk.tokenize import word_tokenize\nimport os\nimport csv\nimport sys\n\n\ndef main():\n\n stop_words = set()\n if len(sys.argv) > 2:\n print(\"\\nPlease enter the file name correctly\")\n elif not os.path.exists(sys.argv[1]):\n print(\"File does noyt exits, please check\")\n else:\n '''\n Prepare for the stop_words set\n '''\n with open(\"stop_words.lst.txt\", 'r') as f:\n lines = f.readlines()\n for line in lines:\n stop_words.add(line.rstrip())\n\n\n root = './doc/'\n if not os.path.exists(root):\n os.mkdir(root)\n f = sys.argv[1]\n\n file_processing(f,root,stop_words)\n\ndef file_processing(file,root,stop_words):\n p = PorterStemmer()\n with open(file) as f:\n length = len(f.readlines())-1\n bar = IncrementalBar('In progress', max=length)\n\n with open(file, 'r') as csvFile:\n\n reader = csv.reader(csvFile)\n next(reader)\n\n for row ,i in zip(reader,range(1,length+1)):\n if not os.path.exists(root+row[1]):\n os.mkdir(root+row[1])\n\n # Remove stop words first\n example = row[0]\n word_tokens = word_tokenize(example)\n\n filtered_sentence = [w for w in word_tokens if not w in stop_words]\n joined_sentence = (\" \").join(filtered_sentence)+'\\n'\n\n # Do stemming\n\n output = ''\n word = ''\n line = joined_sentence\n if line == '':\n break\n for c in line:\n\n if c.isalpha():\n word += c.lower()\n else:\n if word:\n output += p.stem(word, 0, len(word) - 1)\n word = ''\n output += c.lower()\n\n\n path = root+row[1]+'/'+row[2]+'.txt'\n with open(path, \"w\") as cursor:\n\n # Write file\n cursor.write(output)\n\n bar.next()\n\n\n bar.finish()\n\nif __name__ == '__main__':\n main()","sub_path":"4412-proj/doc_prepare.py","file_name":"doc_prepare.py","file_ext":"py","file_size_in_byte":2135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"506966488","text":"import eel\nimport time\nimport serial\nimport time\nimport argparse\n\neel.init('web')\n\n@eel.expose\ndef getTime():\n return time.strftime('%c')\n\n@eel.expose \ndef enviarGcode(gcode):\n\ts = serial.Serial('/dev/ttyUSB0',9600,timeout=5)\n\ttime.sleep(2) \n\t#s = serial.Serial('/dev/ttyACM0',9600)\n\tprint ( 'Abrindo porta serial' )\n\t\n\tf = open(gcode,'r') \n\tprint ( 'Abrindo gcode' ) \n\ttime.sleep(3) \t\t\t\t\t # Wait for Printrbot to initialize\n\ts.flushInput()\n\ts.flushOutput() \t\t\t\t\t # Flush startup text in serial input\n\tprint ( 'Enviando gcode' )\n\t\n\tfor line in f:\n\t\tl = line\n\t\tl = l.rstrip('\\r\\n')\t\t\t\t\t # Strip all EOL characters for streaming\n\t\tif (len(l)>0) :\n\t\t\tfor b in l: \t\t\t\t\t\t\t\t\n\t\t\t\td = bytearray(b'b')\n\t\t\t\ts.write(b)\n\t\t\t\ts.flush()\n\t\t\t\ts.flushInput()\n\t\t\t\ts.flushOutput() \n\t\t\t\t\t\n\t\ts.flush() \n\t\tgrbl_out = s.readline()\n\t\twhile(b'done' not in grbl_out): \n\t\t\tgrbl_out = s.readline()\n\t\t\ttime.sleep(2)\n\t\t\tprint ( ' : ' + grbl_out )\n\t\t\t\n\t\ts.flushInput()\n\t\ts.flushOutput()\n\t\n\t# Close file and serial port\n\tf.close()\n\ts.close()\t\t\n\neel.start('main.html')\n","sub_path":"EnviarGcode/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"214092472","text":"#!/usr/bin/python3\n\nimport os\nimport sys\nimport random\nfrom collections import defaultdict\n\n\ndef derivative_of_cost_function(x, y, dict_len, slope, segment):\n\tfunc = float(segment + float(slope * x))\n\tsegment_val = float(func - y)\n\tslope_val = (float(func - y) * x)\n\t\n\tprint (\"Segment Val: %d :: Slope Val: %d\" % (segment_val, slope_val))\n\treturn (segment_val, slope_val)\n\n\ndef compute_grad_variable(adict):\n\tslope = float(0)\n\tsegment = float(0)\n\tlearning = float(0.001)\n\t\n\tdict_len = len(adict)\n\tprint(adict)\n\tprint(dict_len)\n\tseg_grad = slope_grad = float(0)\n\twhile (True):\n\t\tfor key in adict:\n\t\t\tx = key\n\t\t\talist = adict[key]\n\t\t\tfor idx in range(len(alist)):\n\t\t\t\ty = alist[idx]\n\t\t\t\t(seg_val, slope_val) = derivative_of_cost_function(x, y, dict_len, slope, segment)\n\t\t\t\tseg_grad += seg_val\n\t\t\t\tslope_grad += slope_val\n\n\t\tgrad = float(1 / float(dict_len))\n\t\tprint(grad)\n\t\tseg_grad = float(seg_grad * grad)\n\t\tslope_grad = float(slope_grad * grad)\n\t\tprint (\"GRAD Segment: %d :: Slope: %d\" % (seg_grad, slope_grad))\n\t\ttemp_seg = float(segment - float(learning * seg_grad))\n\t\ttemp_slope = float(slope - float(learning * slope_grad))\n\t\tprint (\"TEMP Segment: %d :: Slope: %d\" % (temp_seg, temp_slope))\n\t\tprint (\"Segment: %d :: Slope: %d\" % (segment, slope))\n\n\t\tif (segment == temp_seg and slope == temp_slope):\n\t\t\tbreak\n\t\telse:\n\t\t\tsegment = temp_seg\n\t\t\tslope = temp_slope\n\t\tprint (\"Segment: %d :: Slope: %d\" % (segment, slope))\n\t\n\treturn(segment, slope)\n\ntheta_segment = 0\ntheta_slope = 0\n\ndef main():\n\tinput_dict = defaultdict(list);\n\tx_list = []\n\ty_list = []\n\tinput_data = True\n\twith open(\"grad.txt\", 'r') as fp:\n\t\tfor line in fp:\n\t\t\tif (input_data):\n\t\t\t\tline = line.rstrip('\\n')\n\t\t\t\tx_list = [i for i in line.split(' ')]\n\t\t\t\tinput_data = False\n\t\t\telse:\n\t\t\t\tline = line.rstrip('\\n')\n\t\t\t\ty_list = [i for i in line.split(' ')]\n\t\t\t\tinput_data = True\n\n\tx_list = list(map(int, x_list))\n\ty_list = list(map(int, y_list))\n\n\tif (len(x_list) != len(y_list)):\n\t\tprint(\"Syncing error in input and output data X : Y\")\n\telse:\n\t\tfor a, b in zip(x_list, y_list):\n\t\t\tinput_dict[a].append(b)\n\n\tsegment, slope = compute_grad_variable(input_dict)\n\ttheta_segment = segment\n\ttheta_slope = slope\n\t\n\tprint(\"$1: %f :: $2: %f\" % (segment, slope))\n\tinput_dict.clear()\n\n\tfp.close()\n\t\t\t\t\n\nmain()\n","sub_path":"Linear_Regression/gradient_descent/gradient_descent.py","file_name":"gradient_descent.py","file_ext":"py","file_size_in_byte":2264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"467205122","text":"###########################################\n#\n# API for interfacing firmware > 2.0\n#\n# (c) 2019 Qontrol Systems LLP\n#\n###########################################\n\nfrom __future__ import print_function\nimport serial, re, time\nfrom collections import deque as fifo\nfrom random import shuffle\nfrom serial.tools import list_ports\nimport sys\n\n\nQ8x_ERRORS = {0:'Unknown error.',\n\t1:'Over-voltage error on channel {ch}.',\n\t2:'Over-current error on channel {ch}.',\n\t3:'Power error.',\n\t4:'Calibration error.',\n\t5:'Output error.',\n\t10:'Unrecognised command.',\n\t11:'Unrecognised input parameter.',\n\t12:'Unrecognised channel, {ch}.',\n\t13:'Operation forbidden.',\n\t14:'Serial buffer overflow.',\n\t15:'Serial communication error.',\n\t16:'Command timed out.',\n\t17:'SPI error.',\n\t18:'ADC error.',\n\t19:'I2C error.',\n\t30:'Firmware error.',\n\t90:'Powered up.'}\n\n\n\t\nRESPONSE_OK = 'OK\\n'\nERROR_FORMAT = '[A-Za-z]{1,3}(\\d+):(\\d+)'\n\n\nclass Qontroller(object):\n\t\"\"\"\n\tSuper class which handles serial communication, device identification, and logging.\n\t\n\t\tdevice_id = None\t\t\t\t\tDevice ID\n\t\tserial_port = None\t\t\t\t\tSerial port object\n\t\tserial_port_name = None\t\t\t\tName of serial port, eg 'COM1' or '/dev/tty1'\n\t\terror_desc_dict = Q8x_ERRORS\t\t\tError code descriptions\n\t\tlog = fifo(maxlen = 256)\t\t\tLog FIFO of sent commands and received errors\n\t\tlog_handler = None\t\t\t\t\tFunction which catches log dictionaries\n\t\tlog_to_stdout = True\t\t\t\tCopy new log entries to stdout\n\t\tresponse_timeout = 0.050\t\t\tTimeout for response or error to commands\n\t\tinter_response_timeout = 0.020\t\tTimeout for response or error to get commands\n\t\n\tLog handler:\n\tThe log handler may be used to catch and dynamically handle certain errors, as they arise. In the following example, it is set up to raise a RuntimeError upon reception of errors E01, E02, and E03:\n\t\n\t\tq = Qontroller()\n\t\n\t\tfatal_errors = [1, 2, 3]\n\t\n\t\tdef my_log_handler(err_dict):\n\t\t\tif err_dict['type'] is 'err' and err_dict['id'] in fatal_errors:\n\t\t\t\traise RuntimeError('Caught Qontrol error \"{1}\" at {0} ms'.format(1000*err_dict['proctime'], err_dict['desc']))\n\n\t\tq.log_handler = my_log_handler\n\t\n\t\"\"\"\n\n\n\tdef __init__(self, *args, **kwargs):\n\t\t\"\"\"\n\t\tInitialiser.\n\t\t\"\"\"\n\t\t\n\t\t# Defaults\n\t\t\n\t\tself.device_id = None\t\t\t\t\t\t# Device ID (i.e. [device type]-[device number])\n\t\tself.serial_port = None\t\t\t\t\t\t# Serial port object\n\t\tself.serial_port_name = None\t\t\t\t\t# Name of serial port, eg 'COM1' or '/dev/tty1'\n\t\tself.baudrate = 115200\t\t\t\t\t\t# Serial port baud rate (signalling frequency, Hz)\n\t\tself.error_desc_dict = Q8x_ERRORS\t\t\t\t# Error code descriptions\n\t\tself.log = fifo(maxlen = 512)\t\t\t\t# Log FIFO of sent commands and received errors\n\t\tself.log_handler = None\t\t\t\t\t\t# Function which catches log dictionaries\n\t\n\t\tself.log_to_stdout = False\t\t\t\t\t# Copy new log entries to stdout\n\t\tself.response_timeout = 0.050\t\t\t\t# Timeout for RESPONSE_OK or error to set commands\n\t\tself.inter_response_timeout = 0.020\t\t\t# Timeout between received messages\n\t\t\n\t\t\n\t\t# Setup Rx and Tx logs\n\t\tself.total_rx_str = ''\n\t\tself.total_tx_str = ''\n\t\t\n\t\t# Set a time benchmark\n\t\tself.init_time = time.time()\n\t\t\n\t\t# Get arguments from init\n\t\t\n\t\t# Populate parameters, if provided\n\t\tfor para in ['device_id', 'serial_port_name', 'error_desc_dict', 'log_handler', 'log_to_stdout', 'response_timeout', 'inter_response_timeout', 'baudrate']:\n\t\t\ttry:\n\t\t\t\tself.__setattr__(para, kwargs[para])\n\t\t\texcept KeyError:\n\t\t\t\tcontinue\n\t\t\n\t\t# Find serial port by asking it for its device id\n\t\tif 'device_id' in kwargs:\n\t\t\t# Search for port with matching device ID\n\t\t\tob = re.match('(Q\\w+)-([0-9a-fA-F\\*]+)', self.device_id)\n\t\t\ttarg_dev_type,targ_dev_num = ob.groups()\n\t\t\tif ob is None:\n\t\t\t\traise AttributeException('Entered device ID ({0}) must be of form \"[device type]-[device number]\" where [device number] can be hexadecimal'.format(self.device_id))\n\t\t\t\n\t\t\t# Find serial port based on provided device ID (randomise their order)\n\t\t\tcandidates = []\n\t\t\tpossible_ports = list(list_ports.comports())\n\t\t\tshuffle(possible_ports)\n\t\t\ttries = 0\n\t\t\tfor port in possible_ports:\n\t\t\t\tfor i in range(60):\n\t\t\t\t\tsys.stdout.write(' ')\n\t\t\t\tsys.stdout.write('\\r')\n\t\t\t\tsys.stdout.write('Querying port {:}... '.format(port.device))\n\t\t\t\tsys.stdout.flush()\n\t\t\t\t\n\t\t\t\ttry:\n\t\t\t\t\t# Instantiate the serial port\n\t\t\t\t\tself.serial_port = serial.Serial(port.device, self.baudrate, timeout=0.5)\n\t\t\t\t\tself.serial_port.close()\n\t\t\t\t\tself.serial_port.open()\n\t\t\t\t\t# Clear buffer\n\t\t\t\t\tself.serial_port.reset_input_buffer()\n\t\t\t\t\tself.serial_port.reset_output_buffer()\n\t\t\t\t\t# Transmit our challenge string\n\t\t\t\t\tself.serial_port.write(\"id?\\n\".encode('ascii'))\n\t\t\t\t\t# Receive response\n\t\t\t\t\tresponse = self.serial_port.read(size=64).decode(\"ascii\") \n\t\t\t\t\t# Check if we received a response\n\t\t\t\t\tif response == '':\n\t\t\t\t\t\tsys.stdout.write('No response\\n')\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t# Match the device ID\n\t\t\t\t\tob = re.match('.*((?:'+ERROR_FORMAT+')|(?:Q\\w+-[0-9a-fA-F\\*]+)).*', response)\n\t\t\t\t\tif ob is not None:\n\t\t\t\t\t\tob = re.match('(Q\\w+)-([0-9a-fA-F\\*]+)\\n', response)\n\t\t\t\t\t\tif ob is not None:\n\t\t\t\t\t\t\tsys.stdout.write('{:}\\n'.format(response))\n\t\t\t\t\t\t\tsys.stdout.flush()\n\t\t\t\t\t\t\tdev_type,dev_num = ob.groups()\n\t\t\t\t\t\t\tcandidates.append({'dev_type':dev_type, 'dev_num':dev_num, 'port':port.device})\n\t\t\t\t\t\t\tif dev_type == targ_dev_type and dev_num == targ_dev_num:\n\t\t\t\t\t\t\t\tself.serial_port_name = port.device\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tob = re.match(ERROR_FORMAT, response)\n\t\t\t\t\t\t\tif ob is not None:\n\t\t\t\t\t\t\t\tsys.stdout.write('Error')\n\t\t\t\t\t\t\t\t# Try this port again later\n\t\t\t\t\t\t\t\tif tries < 3:\n\t\t\t\t\t\t\t\t\tsys.stdout.write('. Will try again...')\n\t\t\t\t\t\t\t\t\tpossible_ports.append(port)\n\t\t\t\t\t\t\t\t\ttries += 1\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tsys.stdout.write('\\n')\n\t\t\t\t\t\t\t\tsys.stdout.flush()\n\t\t\t\t\telse:\n\t\t\t\t\t\tsys.stdout.write('Not a valid device\\n'.format(response))\n\t\t\t\t\t\tsys.stdout.flush()\n\t\t\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t# Close port\n\t\t\t\t\tself.serial_port.close()\n\t\t\t\t\t\n\t\t\t\texcept serial.serialutil.SerialException:\n\t\t\t\t\tsys.stdout.write('Busy\\n')\n\t\t\t\t\tsys.stdout.flush()\n\t\t\t\t\tcontinue\n\t\t\t\n\t\t\t# If the target device is not found\n\t\t\tif not self.serial_port.is_open:\n\t\t\t\t# Check whether we found another possibility\n\t\t\t\tfor candidate in candidates:\n\t\t\t\t\tif candidate['dev_type'] == targ_dev_type:\n\t\t\t\t\t\tself.device_id = candidate['dev_type']+'-'+candidate['dev_num']\n\t\t\t\t\t\tself.serial_port_name = candidate['port']\n\t\t\t\t\t\tprint ('Qontroller.__init__: Warning: Specified device ID ({0}) could not be found. Using device with matching type ({2}) on port {1}.'.format(kwargs['device_id'], self.serial_port_name, self.device_id))\n\t\t\t\t\t\tbreak\n\t\t\t\t# If no similar device exists, abort\n\t\t\t\tif all([candidate['dev_type'] != targ_dev_type for candidate in candidates]):\n\t\t\t\t\traise AttributeError('Specified device ID ({0}) could not be found.'.format(kwargs['device_id']))\n\t\t\t\n\t\t\tprint ('Using serial port {0}'.format(self.serial_port_name))\n\t\t\t# If serial_port_name was also specified, check that it matches the one we found.\n\t\t\tif ('serial_port_name' in kwargs) and (self.serial_port_name != kwargs['serial_port_name']):\n\t\t\t\tprint ('Qontroller.__init__: Warning: Specified serial port ({0}) does not match the one found based on the specified device ID ({1}, {2}). Using serial port {2}.'.format(kwargs['serial_port_name'], self.device_id, self.serial_port_name))\n\t\t\n\t\t# Open serial port directly, get device id\n\t\telif 'serial_port_name' in kwargs:\n\t\t\t# Open serial communication\n\t\t\t# This will throw a serial.serialutil.SerialException if busy\n\t\t\tself.serial_port = serial.Serial(self.serial_port_name, self.baudrate, timeout = self.response_timeout)\n\t\t\t\n\t\t\t# Get device ID\n\t\t\t# Transmit our challenge string\n\t\t\t# This repeated try mechanism accounts for serial ports with starting hiccups\n\t\t\ttimed_out = True\n\t\t\tfor t in range(3):\n\t\t\t\t# Clear buffer\n\t\t\t\tself.serial_port.reset_input_buffer()\n\t\t\t\tself.serial_port.reset_output_buffer()\n\t\t\t\t# Send challenge\n\t\t\t\tself.serial_port.write('id?\\n'.encode('ascii'))\n\t\t\t\t# Receive response\n\t\t\t\tstart_time = time.time()\n\t\t\t\t# Wait for first byte to arrive\n\t\t\t\twhile (self.serial_port.in_waiting == 0) and (time.time() - start_time < 0.2):\n\t\t\t\t\tpass\n\t\t\t\t# Read response, ignoring unparsable characters\n\t\t\t\ttry:\n\t\t\t\t\tresponse = self.serial_port.read(size=64).decode('ascii')\n\t\t\t\texcept UnicodeDecodeError:\n\t\t\t\t\tresponse = \"\"\n\t\t\t\t# Parse it\n\t\t\t\tob = re.match('.*((?:'+ERROR_FORMAT+')|(?:Q\\w+-[0-9a-fA-F\\*]+)).*', response)\n\t\t\t\t# Check whether it's valid\n\t\t\t\tif ob is not None:\n\t\t\t\t\t# Flag that we have broken out correctly\n\t\t\t\t\ttimed_out = False\n\t\t\t\t\tbreak\n\t\t\t\n\t\t\t# Store the parsed value\n\t\t\tif not timed_out:\n\t\t\t\tself.device_id = ob.groups()[0]\n\t\t\t\t# Check if it was an error, in which case clear the stored value but proceed\n\t\t\t\tob = re.match('((?:'+ERROR_FORMAT+')|(?:Q\\w+-\\*+))', self.device_id)\n\t\t\t\tif ob is not None:\n\t\t\t\t\t# It was an error (no ID assigned yet)\n\t\t\t\t\tself.device_id = None\n\t\t\telse:\n\t\t\t\traise RuntimeError('Qontroller.__init__: Error: Unable to communicate with device on port {0} (received response {1}, \"{2}\").'.format(self.serial_port_name, \":\".join(\"{:02x}\".format(ord(c)) for c in response), response.replace('\\n', '\\\\n')))\n\t\telse:\n\t\t\traise AttributeError('At least one of serial_port_name or device_id must be specified on Qontroller initialisation. Available serial ports are:\\n serial_port_name = {:}'.format('\\n serial_port_name = '.join([port.device for port in list(list_ports.comports())])))\n\t\t\n\t\t\n\t\t# Establish contents of daisy chain\n\t\ttry:\n\t\t\t# Ask for number of upstream devices, parse it\n\t\t\ttry:\n\t\t\t\tchain = self.issue_command('nupall', operator = '?', target_errors = [0,10,11,12,13,14,15,16], output_regex = '(?:([^:\\s]+)\\s*:\\s*(\\d+)\\n*)*')\n\t\t\texcept:\n\t\t\t\tchain = self.issue_command('nup', operator = '?', target_errors = [0,10,11,12,13,14,15,16], output_regex = '(?:([^:\\s]+)\\s*:\\s*(\\d+)\\n*)*')\n\t\t\t# Further parse each found device into a dictionary\n\t\t\tfor i in range(len(chain)):\n\t\t\t\tob = re.match('([^-]+)-([0-9a-fA-F\\*]+)', chain[i][0])\n\t\t\t\n\t\t\t\tdevice_id = chain[i][0]\n\t\t\t\tdevice_type = ob.groups()[0]\n\t\t\t\tdevice_serial = ob.groups()[1]\n\t\t\t\n\t\t\t\ttry:\n\t\t\t\t\tindex = int(chain[i][1])\n\t\t\t\texcept ValueError:\n\t\t\t\t\tindex = -1\n\t\t\t\t\tprint ('Qontroller.__init__: Warning: Unable to determine daisy chain index of device with ID {:}.'.format(device_id))\n\t\t\t\n\t\t\t\t# Scan out number of channels from device type\n\t\t\t\tob = re.match('[^\\d]+(\\d*)[^\\d]*', device_type)\n\t\t\t\n\t\t\t\n\t\t\t\ttry:\n\t\t\t\t\tn_chs = int(ob.groups()[0])\n\t\t\t\texcept ValueError:\n\t\t\t\t\tn_chs = -1\n\t\t\t\t\tprint ('Qontroller.__init__: Warning: Unable to determine number of channels of device at daisy chain index {:}.'.format(index))\n\t\t\t\n\t\t\t\tchain[i] = {\n\t\t\t\t\t'device_id':device_id,\n\t\t\t\t\t'device_type':device_type,\n\t\t\t\t\t'device_serial':device_serial,\n\t\t\t\t\t'n_chs':n_chs,\n\t\t\t\t\t'index':index}\n\t\texcept:\n\t\t\tchain = []\n\t\t\tprint ('Qontroller.__init__: Warning: Unable to determine daisy chain configuration.')\n\t\t\n\t\tself.chain = chain\n\t\n\t\n\tdef __del__(self):\n\t\t\"\"\"\n\t\tDestructor.\n\t\t\"\"\"\n\t\tself.close()\n\t\n\t\n\tdef close(self):\n\t\t\"\"\"\n\t\tRelease resources\n\t\t\"\"\"\n\t\tif self.serial_port is not None and self.serial_port.is_open:\n\t\t\t# Close serial port\n\t\t\tself.serial_port.close()\n\t\n\t\n\tdef transmit (self, command_string):\n\t\t\"\"\"\n\t\tLow-level transmit data method.\n\t\t\"\"\"\n\t\t# Ensure serial port is open\n\t\tif not self.serial_port.is_open:\n\t\t\tself.serial_port.open()\n\t\t\n\t\t# Write to port\n\t\tself.serial_port.write(command_string.encode('ascii'))\n\t\t\n\t\t# Log it\n\t\tself.total_tx_str += command_string\n\t\n\t\n\tdef receive (self):\n\t\t\"\"\"\n\t\tLow-level receive data method which also checks for errors.\n\t\t\"\"\"\n\t\t# Ensure serial port is open\n\t\tif not self.serial_port.is_open:\n\t\t\tself.serial_port.open()\n\t\t\n\t\t# Read from port\n\t\tlines = []\n\t\terrs = []\n\t\t\n\t\t# Check if there's anything in the input buffer\n\t\twhile self.serial_port.in_waiting > 0:\n\t\t\t# Get a line from the receive buffer\n\t\t\tline = str(self.serial_port.readline().decode('ascii'))\n\t\t\t\n\t\t\t# Log it\n\t\t\tself.total_rx_str += line\n\t\t\tself.total_rx_str += '\\n'\n\t\t\t\n\t\t\t# Check if it's an error by parsing it\n\t\t\terr = self.parse_error(line)\n\t\t\tif err is None:\n\t\t\t\t# No error, keep the line\n\t\t\t\tlines.append(line)\n\t\t\telse:\n\t\t\t\t# Line represents an error, add to list\n\t\t\t\terrs.append(err)\n\t\t\n\t\t# Add any errors we found to our log\n\t\tfor err in errs:\n\t\t\tself.log_append(type='err', id=err['id'], ch=err['ch'], desc=err['desc'], raw=err['raw'])\n\t\t\n\t\treturn (lines, errs)\n\t\n\t\n\tdef log_append (self, type='err', id='', ch=0, value=0, desc='', raw=''):\n\t\t\"\"\"\n\t\tAppend an event to the log, adding both a calendar- and a process-timestamp.\"\n\t\t\"\"\"\n\t\t# Append to log fifo\n\t\tself.log.append({'timestamp':time.asctime(), 'proctime':round(time.time()-self.init_time,3), 'type':type, 'id':id, 'ch':ch, 'value':value, 'desc':desc, 'raw':raw})\n\t\t# Send to handler function (if defined)\n\t\tif self.log_handler is not None:\n\t\t\tself.log_handler(self.log[-1])\n\t\t# Send to stdout (if requested)\n\t\tif self.log_to_stdout:\n\t\t\tself.print_log (n = 1)\n\t\n\t\n\tdef print_log (self, n = None):\n\t\t\"\"\"\n\t\tPrint the n last log entries. If n == None, print all log entries.\n\t\t\"\"\"\n\t\tif n is None:\n\t\t\tn = len(self.log)\n\t\t\n\t\tfor i in range(-n,0):\n\t\t\tprint('@ {0: 8.1f} ms, {1} : {2}'.format(1000*self.log[i]['proctime'], self.log[i]['type'], self.log[i]['desc']) )\n\t\n\t\n\tdef parse_error (self, error_str):\n\t\t\"\"\"\n\t\tParse an encoded error (e.g. E02:07) into its code, channel, and human-readable description.\n\t\t\"\"\"\n\t\t# Regex out the error and channel indices from the string\n\t\tob = re.match(ERROR_FORMAT, error_str)\n\t\t\n\t\t# If error_str doesn't match an error, return None\n\t\tif ob is None:\n\t\t\treturn None\n\t\t\n\t\t# Extract the two matched groups (i.e. the error and channel indices)\n\t\terrno,chno = ob.groups()\n\t\terrno = int(errno)\n\t\tchno = int(chno)\n\t\t\n\t\t# Get the error description; if none is defined, mark as unrecognised\n\t\terrdesc = self.error_desc_dict.get(errno, 'Unrecognised error code.').format(ch=chno)\n\t\t\n\t\treturn {'type':'err', 'id':errno, 'ch':chno, 'desc':errdesc, 'raw':error_str}\n\t\n\t\n\tdef wait (self, seconds=0.0):\n\t\t\"\"\"\n\t\tDo nothing while watching for errors on the serial bus.\n\t\t\"\"\"\n\t\tstart_time = time.time()\n\t\twhile time.time() < start_time + seconds:\n\t\t\tself.receive()\n\t\n\t\n\tdef issue_command (self, command_id, ch=None, operator='', value=None, n_lines_requested=2**31, target_errors=None, output_regex='(.*)', special_timeout = None):\n\t\t\"\"\"\n\t\tTransmit command ([command_id][ch][operator][value]) to device, collect response.\n\t\t\n\t\t\tcommand_id\t\t\tCommand header (e.g. 'v' in 'v7=1.0')\n\t\t\tch\t\t\t\t\tChannel index to apply command to (e.g. '7' in 'v7=1.0')\n\t\t\toperator\t\t\tType of command in {?, =} (e.g. '=' in 'v7=1.0')\n\t\t\tvalue\t\t\t\tValue of set command (e.g. '1.0' in 'v7=1.0')\n\t\t\tn_lines_requested\tLines of data (not error) to stop after receiving, or timeout\n\t\t\ttarget_errors\t\tError numbers which will be raised as RuntimeError\n\t\t\tspecial_timeout\t\tTimeout to use for this command only (!= self.response_timeout)\n\t\t\"\"\"\n\t\t# Check for previous errors\n\t\tlines,errs = self.receive()\n\t\t\n\t\t# Transmit command\n\t\tif ch is None:\n\t\t\tch = ''\n\t\tif value is None:\n\t\t\tvalue = ''\n\t\ttx_str = '{0}{1}{2}{3}'.format(command_id, ch, operator, value)\n\t\tself.transmit(tx_str+'\\n')\n\t\t\n\t\t# Log it\n\t\tself.log_append(type= 'set' if operator is '=' else 'get', value=value, id=command_id, ch=ch, desc='Command: \"'+tx_str+'\".')\n\t\t\n\t\t# Receive response\n\t\tlines = []\n\t\terrs = []\n\t\tif target_errors is None:\n\t\t\ttarget_errors = []\n\t\tstart_time = time.time()\n\t\tlast_message_time = start_time\n\t\t\n\t\twhile (True):\n\t\t\t\t\n\t\t\t\t# Break conditions\n\t\t\t\tif (RESPONSE_OK in lines):\n\t\t\t\t\tbreak\n\t\t\t\telif (len(lines) >= n_lines_requested):\n\t\t\t\t\tbreak\n\t\t\t\telif not all([err['id'] not in target_errors for err in errs]):\n\t\t\t\t\tbreak\n\t\t\t\telif (time.time() - start_time > self.response_timeout):\n\t\t\t\t\tif (time.time() - last_message_time > self.inter_response_timeout):\n\t\t\t\t\t\tbreak\n\t\t\t\t\n\t\t\t\t# Receive data\n\t\t\t\trec_lines,rec_errs = self.receive()\n\t\t\t\t\n\t\t\t\t# Update the last time a message was received\n\t\t\t\t# We won't proceed now until self.inter_response_timeout has elapsed\n\t\t\t\tif len(rec_lines) + len(rec_errs) > 0:\n\t\t\t\t\tlast_message_time = time.time()\n\t\t\t\t\t\n\t\t\t\t# Integrate received lines and errors\n\t\t\t\tlines.extend(rec_lines)\n\t\t\t\terrs.extend(rec_errs)\n\t\t\t\t\n\t\t\t\t# Check whether we have received a serial comms error (E15)\n\t\t\t\tif any([err['id'] == 15 for err in errs]):\n\t\t\t\t\t# If we did, we should try issuing the command again, recursively\n\t\t\t\t\treturn self.issue_command (command_id, ch, operator, value, n_lines_requested, target_errors, output_regex)\n\t\t\t\t\n\t\t\t\t# Check whether we have received a fatal error\n\t\t\t\tif any([err['id'] in target_errors for err in errs]):\n\t\t\t\t\traise RuntimeError('Received targetted error code {0}, \"{1}\". Log is: \\n{2}.'.format(err['id'], err['desc'], self.log))\n\t\t\n\t\t# We timed out.\n\t\tif len(lines) == 0 and len(errs) == 0:\n\t\t\tif operator == '?':\n\t\t\t\t# If we are looking for a return value, raise an error\n\t\t\t\traise RuntimeError ('Response to command {0} timed out after {1} ms.'.format(tx_str, 1000*(last_message_time - start_time)))\n\t\t\telse:\n\t\t\t\t# If we are setting something, just warn the user\n\t\t\t\tprint('Qontroller.set_command: Warning: Response to command {0} timed out after {1:.3f} ms.'.format(tx_str, 1000*(last_message_time - start_time)))\n\t\t\n\t\t# Parse the output\n\t\tvalues = []\n\t\tfor line in lines:\n\t\t\top = re.match(output_regex, line)\n\t\t\tif op is None:\n\t\t\t\tvalue = []\n\t\t\telse:\n\t\t\t\tvalue = op.groups()\n\t\t\tvalues.append(value)\n\t\t\n\t\t\n\t\tself.log_append(type= 'rcv', value=None, id=command_id, ch=ch, desc='Received: \"'+str(values)+'\".')\n\t\t\n\t\treturn values\n\t\n\t\n\tdef __getattr__(self, attr):\n\t\t\"\"\"\n\t\tAllow convenience attribute access for certain parameters\n\t\t\"\"\"\n\t\tif (attr in ['firmware', 'vfull', 'ifull', 'lifetime']):\n\t\t\treturn self.issue_command (command_id=attr, ch=None, operator='?', n_lines_requested=1)[0][0]\n\t\telse:\n\t\t\treturn object.__getattr__(self, attr)\n\n\n\nclass ChannelVector(object):\n\t\"\"\"\n\tCustom list class which has a fixed length but mutable (typed) elements, and which phones home when its elements are read or modified.\n\t\"\"\"\n\t\n\tset_handle = None\n\tget_handle = None\n\tvalid_types = (int,float)\n\t\n\tdef __init__(self, base_list):\n\t\tself.list = base_list\n\n\t\n\tdef __len__(self):\n\t\treturn len(self.list)\n\t\t\n\t\n\tdef __getitem__(self, key):\n\t\tif isinstance(key, slice):\n\t\t\t# Handle slice key\n\t\t\treturn [self[k] for k in range(len(self))[key.start:key.stop:key.step]]\n\t\telse:\n\t\t\t# Handle normal key\n\t\t\tif self.get_handle is not None:\n\t\t\t\tget_val = self.get_handle (key, self.list[key])\n\t\t\t\tif get_val is not None:\n\t\t\t\t\tself.list[key] = get_val\n\t\t\treturn self.list[key]\n\t\t\n\t\n\tdef __setitem__(self, key, value):\n\t\tif all([type(value) != valid_type for valid_type in self.valid_types]):\n\t\t\traise TypeError('Attempt to set value to type {0} is forbidden. Valid types are {1}.'.format(type(value), self.valid_types))\n\t\tif isinstance(key, slice):\n\t\t\t# Handle slice key\n\t\t\tfor k in range(len(self))[key.start:key.stop:key.step]:\n\t\t\t\tself[k] = value\n\t\telse:\n\t\t\t# Handle normal key\n\t\t\tif self.set_handle is not None:\n\t\t\t\tself.set_handle (key, value)\n\t\t\tself.list[key] = value\n\t\t\n\t\n\tdef __iter__(self):\n\t\treturn iter(self.list)\n\t\n\t\n\tdef __repr__(self):\n\t\treturn repr([self[i] for i in range(len(self))])\n\n\n\nclass QXOutput(Qontroller):\n\n\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper(type(self), self).__init__(*args, **kwargs)\n\n\t\tself.n_chs = 0\n\t\tself.v_full = 0\n\t\tself.v = None\t\t\t# Channel voltages (direct access)\n\t\tself.i = None\t\t\t# Channel currents (direct access)\n\t\tself.vmax = None\t\t# Channel voltages (direct access)\n\t\tself.imax = None\t\t# Channel currents (direct access)\n\t\t\n\t\t# Get our full-scale voltage (VFULL)\n\t\ttry:\n\t\t\tself.v_full = float(self.issue_command('vfull', operator = '?', n_lines_requested = 1, output_regex='(?:\\+|-|)([\\d\\.]+) V')[0][0])\n\t\texcept:\n\t\t\traise RuntimeError(\"Unable to obtain VFULL from qontroller on port {:}.\".format(self.serial_port_name))\n\t\t\n\t\t# Get our number of channels\n\t\ttry:\n\t\t\t# See if its in the list of kwargs\n\t\t\tself.n_chs = kwargs['n_chs']\n\t\t\tif self.n_chs <= 0 or self.n_chs == None:\n\t\t\t\traise KeyError()\n\t\texcept KeyError:\n\t\t\t# If not in kwargs, try to get it from the chain\n\t\t\ttry:\n\t\t\t\tself.n_chs = sum([device['n_chs'] for device in self.chain])\n\t\t\texcept KeyError:\n\t\t\t\t# If not, just ask the top device how many ports its got\n\t\t\t\ttry:\n\t\t\t\t\tself.n_chs = int(self.issue_command('nchan', operator = '?', n_lines_requested = 1, target_errors = [10], output_regex = '(\\d+)\\n')[0][0])\n\t\t\t\texcept:\n\t\t\t\t\t# If not, just take some random value\n\t\t\t\t\tself.n_chs = 8\n\t\t\t\t\tprint (\"QXOutput.__init__: Warning: Failed to obtain number of daisy-chained channels automatically. Include this as n_chs argument on initialisation to workaround.\")\n\t\t\n\t\t\n\t\t\n\t\t# Set up output direct access\n\t\t# These initialise themselves when they are first used (i.e. the 0 init is OK)\n\t\t\n\t\t# Voltage\n\t\tself.v = ChannelVector([0] * self.n_chs)\n\t\tself.v.set_handle = lambda ch,val: self.set_value(ch,'v',val)\n\t\tself.v.get_handle = lambda ch,val: self.get_value(ch,'v')\n\t\t\n\t\tself.vmax = ChannelVector([0] * self.n_chs)\n\t\tself.vmax.set_handle = lambda ch,val: self.set_value(ch,'vmax',val)\n\t\tself.vmax.get_handle = lambda ch,val: self.get_value(ch,'vmax')\n\t\t\n\t\t# Current\n\t\tself.i = ChannelVector([0] * self.n_chs)\n\t\tself.i.set_handle = lambda ch,val: self.set_value(ch,'i',val)\n\t\tself.i.get_handle = lambda ch,val: self.get_value(ch,'i')\n\t\t\n\t\tself.imax = ChannelVector([0] * self.n_chs)\n\t\tself.imax.set_handle = lambda ch,val: self.set_value(ch,'imax',val)\n\t\tself.imax.get_handle = lambda ch,val: self.get_value(ch,'imax')\n\t\t\n\t\tself.initialised = True\n\t\n\t\n\tdef set_value (self, ch, para='v', new=0):\n\t\tself.issue_command(para, ch=ch, operator='=', value=new)\n\t\n\tdef get_value (self, ch, para='v'):\n\t\tresult = self.issue_command(para, ch = ch, operator = '?', n_lines_requested = 1, output_regex = '((?:\\+|-){0,1}[\\d\\.]+)')\n\t\tif len(result) > 0:\n\t\t\tif len(result[0]) > 0:\n\t\t\t\treturn float(result[0][0])\n\t\treturn None\n\t\n\tdef get_all_values (self, para='v'):\n\t\tresult = self.issue_command(para+'all', operator = '?', n_lines_requested = self.n_chs, output_regex = '(?:\\+|-|)([\\d\\.]+)', special_timeout = 2*self.response_timeout)\n\t\tif len(result) > 0:\n\t\t\tif len(result[0]) > 0:\n\t\t\t\tresult = [float(result[i][0]) for i in range(len(result))]\n\t\t\t\treturn result\n\t\treturn None\n\t\n\tdef __setattr__(self, attr, val):\n\t\t# Prevent overwrite of internal variables\n\t\ttry:\n\t\t\tif (self.initialised is True and attr in ['v', 'i', 'vmax', 'imax', 'v_full', 'n_chs']):\n\t\t\t\tprint (\"QXOutput.__setattr__: Warning: Overwriting of '{:}' is forbidden.\".format(attr) )\n\t\t\t\treturn\n\t\texcept AttributeError:\n\t\t\t# If we are still initialising, carry on setting variable\n\t\t\tpass\n\t\t\n\t\tobject.__setattr__(self, attr, val)\n","sub_path":".ipynb_checkpoints/ZeroLevel_Qontrol-checkpoint.py","file_name":"ZeroLevel_Qontrol-checkpoint.py","file_ext":"py","file_size_in_byte":22559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"577697015","text":"import pygame\nimport param as p\nfrom characters import *\nfrom utils import *\n\n\ndef start(screen, clock, bird):\n # draw\n screen.fill(p.background)\n bird.show(screen)\n text = 'Flappy Circular Bird Floating in Vacuum'\n showText(screen, text, p.title_pos, p.font, 50, (0, 0, 0))\n text = 'press \"Space\" to play'\n showText(screen, text, p.subtitle_pos, p.font, 30, (0, 0, 0))\n pygame.display.update()\n\n # detect operation\n events = pygame.event.get()\n for e in events:\n if e.type == pygame.QUIT:\n return 'quit'\n elif e.type == pygame.KEYDOWN and e.key == pygame.K_SPACE:\n print('START!!!')\n return 'game'\n\n # wait\n clock.tick_busy_loop(80)\n return 'start'\n\n\ndef game(screen, clock, bird, obs_list, frame, scoreboard):\n # calculate score\n for obs in obs_list:\n if obs.score():\n scoreboard.gain_point()\n\n # draw\n screen.fill(p.background)\n bird.show(screen)\n for obs in obs_list:\n obs.show(screen)\n scoreboard.show(screen)\n pygame.display.update()\n\n # detect collision\n collision = False\n collision = collision or CollisionDetector.detect(bird, frame)\n for obs in obs_list:\n collision = collision or CollisionDetector.detect(bird, obs)\n if collision:\n print('COLLIDE!!!')\n scoreboard.save_score()\n return 'fail'\n\n # detect operation\n events = pygame.event.get()\n for e in events:\n if e.type == pygame.QUIT:\n return 'quit'\n elif e.type == pygame.KEYDOWN and e.key == pygame.K_SPACE:\n bird.flap()\n print('FLAP!!!')\n elif e.type == pygame.KEYDOWN and e.key == pygame.K_ESCAPE:\n print('PAUSE!!!')\n return 'pause'\n\n # move\n dT = clock.get_time() * .06\n if ObsGenerator.needNewObs(obs_list):\n ObsGenerator.getNewObs(obs_list)\n bird.move(dT)\n for obs in obs_list:\n obs.move(dT)\n\n # wait\n clock.tick_busy_loop(80)\n return 'game'\n\n\ndef pause(screen, clock):\n # draw\n # TODO: Draw a circle with play symbol\n pos = (int(0.5 * p.size[0]), int(0.5 * p.size[1]))\n pygame.draw.circle(screen, p.background, pos, p.pause_rad, 0)\n pygame.draw.circle(screen, (0, 0, 0), pos, p.pause_rad, 5)\n pointlist = ((int(pos[0] - p.pause_lwidth), int(pos[1] - 0.5 * p.pause_hight)),\n (int(pos[0] - p.pause_lwidth), int(pos[1] + 0.5 * p.pause_hight)),\n (int(pos[0] + p.pause_rwidth), int(pos[1])))\n pygame.draw.polygon(screen, (0, 0, 0), pointlist, 0)\n pygame.display.update()\n\n # detect operation\n events = pygame.event.get()\n for e in events:\n if e.type == pygame.QUIT:\n return 'quit'\n elif e.type == pygame.KEYDOWN and e.key == pygame.K_SPACE:\n print('CONTINUE!!!')\n return 'game'\n elif e.type == pygame.KEYDOWN and e.key == pygame.K_ESCAPE:\n print('CONTINUE!!!')\n return 'game'\n\n # wait\n clock.tick_busy_loop(80)\n return 'pause'\n\n\ndef fail(screen, clock, bird, obs_list, scoreboard):\n # draw\n text = 'You Have Failed With a Score of {}'.format(scoreboard.score)\n showText(screen, text, p.title_pos, p.font, 50, (255, 0, 0))\n text = 'press \"Space\" to play again'\n showText(screen, text, p.subtitle_pos, p.font, 30, (0, 0, 0))\n pygame.display.update()\n\n # detect operation\n events = pygame.event.get()\n for e in events:\n if e.type == pygame.QUIT:\n return 'quit'\n elif e.type == pygame.KEYDOWN and e.key == pygame.K_SPACE:\n init(bird, obs_list, scoreboard)\n print('START!!!')\n return 'game'\n elif e.type == pygame.KEYDOWN and e.key == pygame.K_ESCAPE:\n init(bird, obs_list, scoreboard)\n print('MENU!!!')\n return 'start'\n\n # wait\n clock.tick_busy_loop(80)\n return 'fail'\n","sub_path":"scenes.py","file_name":"scenes.py","file_ext":"py","file_size_in_byte":3920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"436188265","text":"from spacy import displacy\nimport streamlit as st\nimport requests\nimport json\nimport pandas as pd\nimport numpy as np\nimport ast\nimport seaborn as sn\nimport matplotlib.pyplot as plt\nimport plotly.express as px\n\n\ndef query_df(df, speaker, speech):\n \"\"\"Function to query a dataframe with the given pararmeters\"\"\"\n return df.query(\"speaker == @speaker and text == @speech\")\n\n\ndef display_ner_data(df):\n st.markdown(\"# Natural Entity Recognition (NER)\")\n\n st.markdown(\"\"\"\n NER was used to extract the various entities involved in the discussion.\n We finetuned 2 models (`xlm-roberta-base` and `xlm-roberta-base-ontonotes5`) using manually annotated hansard data to extract the following entity types `PERSON, NORP, FAC, ORG, GPE, LAW, DATE`.\n\n Models were evaluated on Singapore Hansard NER Dataset validation set.\n\n | Model | F1 Score | Precision | Recall | Remark |\n |-------------------------------------------|----------|-----------|--------|------------\n | asahi417/tner-xlm-roberta-base-ontonotes5 | 0.343 | 0.274 | 0.458 |Pretrained model without finetuning|\n | xlm-roberta-base-sh-ner | 0.786 | 0.742 | 0.837 | Pretrained xlm-roberta-base model finetuned on the manually annotated Singapore hansard dataset|\n | xlm-roberta-base-ontonotes5-sh-ner | **0.819**| **0.778** | **0.864** | Pretrained xlm-roberta-base-ontonotes5 model finetuned on the manually annotated Singapore hansard dataset|\n\n ### View our model results using the buttons below:\n \"\"\")\n col1, col2 = st.beta_columns([.35, 1])\n\n with col1:\n speakers = df['speaker'].unique()\n speaker_choice = st.selectbox('Select speaker:', speakers, index=47)\n with col2:\n speeches = df['text'].loc[df[\"speaker\"] == speaker_choice].unique()\n speech_choice = st.selectbox('Select Speech', speeches, index=2)\n\n df_filtered = query_df(df, speaker_choice, speech_choice)\n\n doc = {\"text\": df_filtered['text'].values[0],\n \"ents\": ast.literal_eval(df_filtered['entities'].values[0])}\n HTML_WRAPPER = \"\"\"
{}
\"\"\"\n html = displacy.render(doc, style=\"ent\", manual=True)\n html = html.replace(\"\\n\\n\", \"\\n\")\n st.write(HTML_WRAPPER.format(html), unsafe_allow_html=True)\n\n sequence = \"\"\"Enter some Parliamentary Hansard text here to extract various entities like names (Heng Swee Kwat, K Shanmugam),\n places(Singapore, Malaysia), dates (12th May, 2012-02-03), organisations (SAF, MOH, MHA, Ministry of Finance),\n laws (Adoption of Children Act, Work Injury Compensation Act 2019)\n \"\"\"\n\n st.markdown(\"## **Live Inference**\")\n st.write(\"Please note that loading may take upto 1 min due to deployment cost contraints.\")\n st.markdown(\"### Try out our NER Model `xlm-roberta-base-ontonotes5-sh-ner`\")\n text_box = st.text_area(\"Enter some text for NER\", sequence)\n\n HTML_WRAPPER = \"\"\"
{}
\"\"\"\n\n if text_box:\n url = 'https://hansard-nlp-api-l6lhxur2aq-uc.a.run.app/ner/'\n req_body = json.dumps({'hansard_text': text_box, 'output': {}})\n response = requests.post(url, data=req_body)\n response = json.loads(response.text)['output']\n\n html = displacy.render(response, style=\"ent\", manual=True)\n html = html.replace(\"\\n\\n\", \"\\n\")\n st.write(HTML_WRAPPER.format(html), unsafe_allow_html=True)\n","sub_path":"ner_info.py","file_name":"ner_info.py","file_ext":"py","file_size_in_byte":3582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"499966014","text":"# Copyright 2015 Red Hat, Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\nimport yaml\n\nfrom tripleo_common.core import constants\nfrom tripleo_common.core import exception\n\n\ndef add_key_prefix(source):\n result = dict()\n for keyname, value in source.items():\n new_keyname = \"%s%s\" % (constants.OBJECT_META_KEY_PREFIX, keyname)\n result[new_keyname] = value\n return result\n\n\ndef remove_key_prefix(source):\n result = dict()\n for keyname, value in source.items():\n new_keyname = keyname.replace(constants.OBJECT_META_KEY_PREFIX, '')\n result[new_keyname] = value\n return result\n\n\ndef add_file_metadata(plan_files):\n cm = {k: v for (k, v) in plan_files.items()\n if v.get('meta', {}).get('file-type') == 'capabilities-map'}\n # if there is more than one capabilities-map file, throw an exception\n # if there is a capabilities-map file, then process it and set metadata\n # in files found\n if len(cm) > 1:\n raise exception.TooManyCapabilitiesMapFilesError()\n if len(cm) == 1:\n mapfile = yaml.load(list(cm.items())[0][1]['contents'])\n\n # identify the root template\n if mapfile['root_template']:\n if plan_files[mapfile['root_template']]:\n # if the file exists in the plan and has meta, update it\n # otherwise add meta dict\n if 'meta' in plan_files[mapfile['root_template']]:\n plan_files[mapfile['root_template']]['meta'].update(\n dict(constants.ROOT_TEMPLATE_META)\n )\n else:\n plan_files[mapfile['root_template']]['meta'] =\\\n dict(constants.ROOT_TEMPLATE_META)\n\n # identify all environments\n for topic in mapfile['topics']:\n for eg in topic['environment_groups']:\n for env in eg['environments']:\n if 'meta' in plan_files[env['file']]:\n plan_files[env['file']]['meta'].update(\n dict(constants.ENVIRONMENT_META)\n )\n else:\n plan_files[env['file']]['meta'] =\\\n dict(constants.ENVIRONMENT_META)\n\n # identify the root environment\n if mapfile['root_environment']:\n if plan_files[mapfile['root_environment']]:\n # if the file exists in the plan and has meta, update it\n # otherwise add meta dict\n if 'meta' in plan_files[mapfile['root_environment']]:\n plan_files[mapfile['root_environment']]['meta'].update(\n dict(constants.ROOT_ENVIRONMENT_META)\n )\n else:\n plan_files[mapfile['root_environment']]['meta'] =\\\n dict(constants.ROOT_ENVIRONMENT_META)\n return plan_files\n","sub_path":"tripleo_common/utils/meta.py","file_name":"meta.py","file_ext":"py","file_size_in_byte":3436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"304292047","text":"import unittest\n\nfrom dateutil.parser import parse\nfrom google.appengine.ext import testbed\n\nfrom ewentts.models import Event, User\nfrom ewentts.utils import validate_picture_url, return_event, return_user, validate_location\n\n\nclass RequestDecodedTokenTestCase(unittest.TestCase):\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n\nclass RequestUIDTestCase(unittest.TestCase):\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n\nclass ValidatePictureUrlTest(unittest.TestCase):\n def test_validate_string_is_picture(self):\n picture_url1 = \"https://c1.staticflickr.com/2/1520/24330829813_944c817720_b.jpg\"\n picture_url2 = \"https://www.gettyimages.ie/gi-resources/images/Homepage/Hero/UK/CMS_Creative_164657191_Kingfisher.jpg\"\n picture_url3 = \"https://cdn.pixabay.com/photo/2016/06/18/17/42/image-1465348_960_720.jpg\"\n picture_url4 = \"https://upload.wikimedia.org/wikipedia/commons/thumb/4/47/PNG_transparency_demonstration_1.png/280px-PNG_transparency_demonstration_1.png\"\n\n self.assertEqual(validate_picture_url(picture_url1), True)\n self.assertEqual(validate_picture_url(picture_url2), True)\n self.assertEqual(validate_picture_url(picture_url3), True)\n self.assertEqual(validate_picture_url(picture_url4), True)\n\n def test_non_picture_str_raises_error(self):\n picture_url1 = \"abc\"\n picture_url2 = \"www.picture.cz\"\n picture_url3 = \"picture.img\"\n picture_url4 = \".img\"\n picture_url5 = \"www.picture.cz/1234.img/123\"\n\n with self.assertRaises(ValueError):\n validate_picture_url(picture_url1)\n with self.assertRaises(ValueError):\n validate_picture_url(picture_url2)\n with self.assertRaises(ValueError):\n validate_picture_url(picture_url3)\n with self.assertRaises(ValueError):\n validate_picture_url(picture_url4)\n with self.assertRaises(ValueError):\n validate_picture_url(picture_url5)\n\n def test_other_datatype_raise_error(self):\n picture_url1 = 1\n picture_url2 = None\n picture_url3 = [\"picture\", \"img\"]\n\n with self.assertRaises(ValueError):\n validate_picture_url(picture_url1)\n with self.assertRaises(ValueError):\n validate_picture_url(picture_url2)\n with self.assertRaises(ValueError):\n validate_picture_url(picture_url3)\n\n\nclass ReturnEventTestCase(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n pass\n\n def setUp(self):\n self.testbed = testbed.Testbed()\n self.testbed.activate()\n self.testbed.setup_env(overwrite=True)\n self.testbed.init_datastore_v3_stub()\n self.testbed.init_memcache_stub()\n self.user = User(user_names=[\"User\", \"Name\"],\n id=\"ab11\",\n profile_picture_url=\"https://c1.staticflickr.com/2/1520/24330829813_944c817720_b.jpg\",\n user_email=\"email\")\n\n self.event1 = Event(event_name=\"Event Name\",\n status=\"future\",\n start_datetime=parse(\"2100-10-03T10:17:30\"),\n end_datetime=parse(\"2100-11-04T10:17:30\"),\n latitude=49.395470,\n longitude=15.590950,\n event_picture_url=\"https://i.imgur.com/nqTGipe.jpg\",\n private = True,\n organiser = self.user.key)\n\n self.event1.put()\n\n def tearDown(self):\n self.testbed.deactivate()\n\n def test_return_event_woks(self):\n event = return_event(self.event1.key.id())\n self.assertEqual(event, self.event1)\n\n def test_return_fails_wrong_id(self):\n with self.assertRaises(Exception):\n return_event(9887)\n\n\nclass ReturnUserTestCase(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n pass\n\n def setUp(self):\n self.testbed = testbed.Testbed()\n self.testbed.activate()\n self.testbed.setup_env(overwrite=True)\n self.testbed.init_datastore_v3_stub()\n self.testbed.init_memcache_stub()\n self.user = User(user_names=[\"User\", \"Name\"],\n id=\"ab11\",\n profile_picture_url=\"https://c1.staticflickr.com/2/1520/24330829813_944c817720_b.jpg\",\n user_email=\"email\")\n\n self.user.put()\n\n def tearDown(self):\n self.testbed.deactivate()\n\n def test_return_user_woks(self):\n user = return_user(self.user.key.id())\n\n self.assertEqual(user, self.user)\n\n def test_return_user_wrong_id(self):\n with self.assertRaises(Exception):\n return_user(\"9885\")\n\n\nclass ValidateLocationTest(unittest.TestCase):\n def test_validate_tuple_is_location(self):\n location1 = [0, 0]\n location2 = [90, 180]\n location3 = [-90, -180]\n location4 = [-90, 180]\n location5 = [-40, 20]\n\n self.assertEqual(validate_location(*location1), True)\n self.assertEqual(validate_location(*location2), True)\n self.assertEqual(validate_location(*location3), True)\n self.assertEqual(validate_location(*location4), True)\n self.assertEqual(validate_location(*location5), True)\n\n def test_wrong_location_raises_error(self):\n location1 = [0, 181]\n location2 = [90, -181]\n location3 = [-91, -180]\n location4 = [91, 180]\n location5 = [1000, 20]\n location6 = [100, 200]\n\n with self.assertRaises(ValueError):\n validate_location(*location1)\n with self.assertRaises(ValueError):\n validate_location(*location2)\n with self.assertRaises(ValueError):\n validate_location(*location3)\n with self.assertRaises(ValueError):\n validate_location(*location4)\n with self.assertRaises(ValueError):\n validate_location(*location5)\n with self.assertRaises(ValueError):\n validate_location(*location6)\n\n def test_other_datatype_raise_error(self):\n location1 = 1\n location2 = None\n location3 = [\"picture\", \"img\"]\n location4 = [\"10\", \"10\"]\n\n with self.assertRaises(Exception):\n validate_location(*location1)\n with self.assertRaises(Exception):\n validate_location(*location2)\n with self.assertRaises(Exception):\n validate_location(*location3)\n with self.assertRaises(Exception):\n validate_location(*location4)","sub_path":"tests/utils_test.py","file_name":"utils_test.py","file_ext":"py","file_size_in_byte":6588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"357998066","text":"import tkinter as tk\nfrom tkinter import *\nfrom tkinter import filedialog\n\nimport os\nimport shutil\nimport sqlite3\n\n\n\nroot = Tk()\nroot.title('Choose directory')\nroot.geometry('{}x{}'.format(670,250))\n\nsourcePath=StringVar()\ndestPath=StringVar()\n\n\nbtn1 = Button(root, text=\"Source\",width=10,height=1,font=('Times New Roman',13), command=lambda: askdirectory1())\nbtn1.grid(row=0, column=0, padx=(20,0), pady=(100,0))\n\nbtn2 = Button(root, text=\"Destination\",width=10,height=1,font=('Times New Roman',13), command=lambda: askdirectory2())\nbtn2.grid(row=1, column=0, padx=(20,0), pady=(20,0))\n\nbtn3 = Button(root, text=\"Move files\",width=15,height=1,font=('Times New Roman',13), command=lambda: getTxtFile())\nbtn3.grid(row=2, column=1, padx=(0,0), pady=(10,20), sticky=SE)\n\n\ntxt1 = Text(root, height=1, width=55)\ntxt1.grid(row=0,column=1, padx=(20,0), pady=(100,0))\n\ntxt2 = Text(root, height=1, width=55)\ntxt2.grid(row=1,column=1, padx=(20,0), pady=(20,0))\n\n\n\ndef askdirectory1():\n filepath=filedialog.askdirectory()\n sourcePath.set(filepath)\n txt1.insert(END, sourcePath.get())\n\ndef askdirectory2():\n filepath=filedialog.askdirectory()\n destPath.set(filepath)\n txt2.insert(END, destPath.get())\n\n\ndef getTxtFile():\n source = sourcePath.get()\n if source==\"\":\n print(\"Please choose source directory\")\n destination = destPath.get()\n if destination==\"\":\n print(\"Please choose destination directory\")\n files = os.listdir(source)\n \n for i in files:\n name, ext = os.path.splitext(i)\n if ext == \".txt\":\n abspath = os.path.join(source, i)\n dest=shutil.move(abspath,destination)\n\n\n conn = sqlite3.connect(\"test.db\")\n\n with conn:\n cur = conn.cursor()\n cur.execute(\"CREATE TABLE IF NOT EXISTS tbl_info( \\\n ID INTEGER PRIMARY KEY AUTOINCREMENT, \\\n col_txt TEXT, \\\n col_date_time)\")\n conn.commit()\n conn.close()\n\n\n\n conn = sqlite3.connect(\"test.db\")\n\n with conn:\n files = os.listdir(destination)\n for file in files:\n name, ext = os.path.splitext(file)\n if ext == \".txt\":\n filepath = os.path.join(destination, file)\n time = os.path.getmtime(filepath)\n \n cur = conn.cursor()\n cur.execute(\"INSERT INTO tbl_info (col_txt, col_date_time) VALUES (?,?)\", (file,time))\n conn.commit()\n\n cur.execute(\"SELECT col_txt, col_date_time FROM tbl_info\")\n txtList = cur.fetchall()\n print (txtList)\n conn.close()\n \n\n \n\n\nroot.mainloop()\n\n\n\n","sub_path":"drill_page123.py","file_name":"drill_page123.py","file_ext":"py","file_size_in_byte":2592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"318364399","text":"import logging\nimport pathlib\nimport random\nfrom typing import Tuple, Generator\n\nfrom keras.utils import Sequence\n\nfrom keras.preprocessing.sequence import TimeseriesGenerator as KerasTimeSeriesGenerator\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import KFold, train_test_split\n\nMAX_TRACKS = 2500\n\ndef get_data(\n dataset_path: pathlib.Path,\n seed: int = 137,\n test_size: float = 0.3\n) -> Tuple[pd.DataFrame, pd.DataFrame]:\n\n logging.info(f\"Reading data from {dataset_path}\")\n train_df = pd.read_csv(dataset_path.as_posix())\n\n # train_df.drop(columns=['index', 'event_id'], inplace=True)\n train_df = train_df.astype({'signal': int})\n train_df, test_df = train_test_split(\n train_df,\n test_size=test_size,\n random_state=seed\n )\n logging.debug(f\"Train set: {train_df.shape} Test set: {test_df.shape}\")\n logging.debug(f\"Train columns: {train_df.columns.values}\")\n return train_df, test_df\n\n\ndef k_folds(data: pd.DataFrame, y_column='signal', n_splits=3, shuffle=False) \\\n -> Generator[Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray], None, None]:\n kf = KFold(n_splits=n_splits, shuffle=shuffle)\n\n for train_index, test_index in kf.split(data):\n logging.info(\"TRAIN:\", train_index, \"TEST:\", test_index)\n\n X: pd.DataFrame = data.drop([y_column], axis=1)\n y: pd.DataFrame = data[[y_column]]\n X_train, X_test = X.loc[train_index].values, X.loc[test_index].values\n y_train, y_test = y.loc[train_index].values, y.loc[test_index].values\n\n yield X_train, y_train, X_test, y_test\n\n\ndef shuffle_arrays(x, y):\n s = np.arange(x.shape[0])\n np.random.shuffle(s)\n return x[s], y[s]\n\n\ndef load_events(dataset_path: pathlib.Path):\n data_df, _ = get_data(dataset_path, test_size=0)\n true_events = data_df[data_df['event_id'] != -999].copy()\n\n events = true_events.drop(['index'], axis=1)\n events.sort_values('event_id')\n events['track_id'] = events.groupby('event_id').cumcount()\n\n index = pd.MultiIndex.from_arrays([events['event_id'], events['track_id']])\n\n new_events = events.set_index(index)\n new_events = new_events.sort_index()\n new_events = new_events.drop(['event_id', 'track_id'], axis='columns')\n\n event_idx = pd.Index(events['event_id'].unique())\n track_idx = pd.RangeIndex(0, MAX_TRACKS)\n\n new_index = pd.MultiIndex.from_product([event_idx, track_idx])\n\n final_events = new_events.reindex(index=new_index)\n fake_events = data_df[data_df['event_id'] == -999][\n ['X', 'Y', 'Z', 'TX', 'TY', 'chi2', 'signal']].copy()\n\n fake_events = fake_events.sample(len(fake_events))\n\n wrong_inputs = fake_events.sample(\n len(np.isnan(final_events)), replace=False\n ).values\n\n new_full_values = np.where(\n np.isnan(final_events.values),\n wrong_inputs,\n final_events.values\n )\n\n new_values_df = pd.DataFrame(\n data=new_full_values,\n index=final_events.index,\n columns=final_events.columns\n )\n\n return new_values_df\n\n\nclass SequenceGenerator(Sequence):\n \"\"\"Utility class for generating batches of temporal data.\n Similar to keras.preprocessing.sequence.TimeseriesGenerator\n\n This class takes in a sequence of data-points gathered at\n equal intervals, along with time series parameters such as\n stride, length of history, etc., to produce batches for\n training/validation.\n\n # Arguments\n data: Indexable generator (such as list or Numpy array)\n containing consecutive data points (timesteps).\n The data should be at 2D, and axis 0 is expected\n to be the time dimension.\n targets: Targets corresponding to timesteps in `data`.\n It should have same length as `data`.\n length: Length of the output sequences (in number of timesteps).\n sampling_rate: Period between successive individual timesteps\n within sequences. For rate `r`, timesteps\n `data[i]`, `data[i-r]`, ... `data[i - length]`\n are used for create a sample sequence.\n stride: Period between successive output sequences.\n For stride `s`, consecutive output samples would\n be centered around `data[i]`, `data[i+s]`, `data[i+2*s]`, etc.\n start_index, end_index: Data points earlier than `start_index`\n or later than `end_index` will not be used in the output sequences.\n This is seful to reserve part of the data for test or validation.\n shuffle: Whether to shuffle output samples,\n or instead draw them in chronological order.\n reverse: Boolean: if `true`, timesteps in each output sample will be\n in reverse chronological order.\n batch_size: Number of timeseries samples in each batch\n (except maybe the last one).\n \"\"\"\n\n def __init__(self, data, targets, length,\n sampling_rate=1,\n stride=1,\n start_index=0,\n end_index=None,\n shuffle=False,\n reverse=False,\n batch_size=128,\n verbose=False\n ):\n\n self.data = data\n self.targets = targets\n self.length = length\n self.sampling_rate = sampling_rate\n self.stride = stride\n self.start_index = start_index\n if end_index is None:\n end_index = len(data) - 1\n self.end_index = end_index\n self.shuffle = shuffle\n self.reverse = reverse\n self.batch_size = batch_size\n self.verbose = verbose\n\n def __len__(self):\n return int(np.ceil(\n (self.end_index - self.start_index) /\n (self.batch_size * self.stride)))\n\n def _empty_batch(self, num_rows):\n samples_shape = [num_rows, self.length // self.sampling_rate]\n samples_shape.extend(self.data.shape[1:])\n targets_shape = [num_rows, self.length // self.sampling_rate]\n targets_shape.extend(self.targets.shape[1:])\n return np.empty(samples_shape), np.empty(targets_shape)\n\n def __getitem__(self, index):\n i = self.start_index + self.batch_size * self.stride * index\n if self.verbose:\n logging.debug(f\"i: {i}\")\n logging.debug(\n f\"{i, (i + self.batch_size * self.stride, self.end_index), self.stride}\")\n rows = np.arange(i, min(i + self.batch_size *\n self.stride, self.end_index), self.stride)\n if self.verbose:\n logging.debug(f\"rows: {rows}\")\n\n samples, targets = self._empty_batch(len(rows))\n for j, row in enumerate(rows):\n indices = np.arange(rows[j], rows[j] + self.length, self.sampling_rate)\n if self.shuffle:\n # indices = (indices)\n np.random.shuffle(indices)\n if self.verbose:\n logging.debug(f\"indices: {indices}\")\n samples[j] = self.data[indices]\n targets[j] = self.targets[indices]\n if self.reverse:\n return samples[:, ::-1, ...], targets\n return samples, targets\n\n# todo def balance_dataset(x,y)\n\n# todo def metrics_report(model, x,y)\n\n# todo def history_plot(model)","sub_path":"ml_hep_tracking/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"103528655","text":"import time\n\nclass Movie():\n def __init__(self,movie_title,genre, movie_storyline,poster_image,trailer_youtube,releasedate,actors,directors,wikilink):\n self.title = movie_title\n self.genre = genre\n self.storyline = movie_storyline\n self.poster_image_url = poster_image\n self.trailer_youtube_url=trailer_youtube\n self.releasedate = releasedate\n self.actors = actors\n self.directors = directors\n self.wiki_link = wikilink \n self.release_year = releasedate[-4:]\n print(releasedate)\n print(releasedate[-4:])\n","sub_path":"Udacity/FSWD/001 MovieTrailerWebsite/media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"625580698","text":"#Методы кластеризации\nimport numpy as np \nimport matplotlib.pyplot as plt\nfrom sklearn.neighbors import NearestNeighbors\n\n#входные данные\nA = np.array([[3.1, 2.3], [2.3, 4.2], [3.9, 3.5], [3.7, 6.4], [4.8, 1.9], \n [8.3, 3.1], [5.2, 7.5], [4.8, 4.7], [3.5, 5.1], [4.4, 2.9]])\n\n#определяем ближайших соседей\nk = 3\n\n#тестовые данные\ntest_data = [3.3, 2.9]\n\n#визуализируем входные данные\nplt.figure()\nplt.title(\"input data\")\nplt.scatter(A[:, 0], A[:, 1], marker = \"o\", s = 100, c = \"black\")\n\n#построим ближайшего соседа, обучим его \nknn_model = NearestNeighbors(n_neighbors = k, algorithm = \"auto\").fit(A)\ndistances, indices = knn_model.kneighbors([test_data])\n\n#координаты K ближайших соседей\nprint(\"\\nk ближайших соседей: \")\nfor rank, index in enumerate(indices[0][:k], start = 1):\n print(str(rank) + \" is\", A[index])\n\n#визуализируем K ближайших соседей\nplt.title(\"K Nearest Neighbors\")\nplt.scatter(A[:, 0], A[:, 1], marker=\"o\", s=100, c=\"k\")\nplt.scatter(A[indices][0][:][:, 0], A[indices][0][:][:, 1], marker = \"o\", s=250, facecolors = 'none', edgecolors='purple')\nplt.scatter(test_data[0], test_data[1], marker = \"x\", c = \"purple\", s = 100)\n\nplt.show()\n","sub_path":"pr_4_3.py","file_name":"pr_4_3.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"379287372","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.utils.data as Data\nfrom torchvision import datasets, transforms\nfrom torch.autograd import Variable\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nBATCH_SIZE = 128\nKERNEL_SIZE = 3\nEPOCH = 100\ngradient_list = []\nloss_list = []\nNUM_WORKER = 1\n\ndata_train = datasets.MNIST(root = 'MNIST_train.npy', transform = transforms.Compose([transforms.ToTensor(),transforms.Normalize([0.5],[0.5])]), train = True, download = True)\ndata_test = datasets.MNIST(root = 'MNIST_test.npy', transform = transforms.Compose([transforms.ToTensor(),transforms.Normalize([0.5],[0.5])]), train = False, download = True)\n\ndata_loader_train = Data.DataLoader(dataset = data_train, batch_size = BATCH_SIZE, shuffle = True, num_workers = NUM_WORKER)\ndata_loader_test = Data.DataLoader(dataset = data_test, batch_size = BATCH_SIZE, shuffle = True, num_workers = NUM_WORKER)\n\nclass Net(torch.nn.Module):\n\tdef __init__(self):\n\t\tsuper(Net,self).__init__()\n\t\tself.conv = torch.nn.Sequential(nn.Conv2d(1,64,kernel_size = KERNEL_SIZE, stride = 1, padding = 1),\n\t\t\t\t\t\t\t\t\t\tnn.ReLU(),\n\t\t\t\t\t\t\t\t\t\tnn.Conv2d(64,128,kernel_size = KERNEL_SIZE, stride = 1, padding = 1),\n\t\t\t\t\t\t\t\t\t\tnn.ReLU(),\n\t\t\t\t\t\t\t\t\t\tnn.MaxPool2d(stride=2, kernel_size = KERNEL_SIZE-1))\n\t\tself.dense = torch.nn.Sequential(nn.Linear(14*14*128, 1024),\n\t\t\t\t\t\t\t\t\t\tnn.ReLU(),\n\t\t\t\t\t\t\t\t\t\tnn.Dropout(p=0.5),\n\t\t\t\t\t\t\t\t\t\tnn.Linear(1024,10))\n\tdef forward(self,x):\n\t\tx = self.conv(x)\n\t\tx = x.view(-1, 14*14*128)\n\t\tx = self.dense(x)\n\t\treturn x\n\nclass CNN(nn.Module):\n\tdef __init__(self):\n\t\tsuper(CNN, self).__init__()\n\t\tself.conv1 = nn.Sequential(\n\t\t\tnn.Conv2d(\n\t\t\t\tin_channels =1,\n\t\t\t\tout_channels = 16,\n\t\t\t\tkernel_size=5,\n\t\t\t\tstride=1,\n\t\t\t\tpadding=2),\n\t\t\tnn.ReLU(),\n\t\t\tnn.MaxPool2d(kernel_size=2)\n\t\t)\n\t\tself.conv2 = nn.Sequential(\n\t\t\tnn.Conv2d(16,32,5,1,2),\n\t\t\tnn.ReLU(),\n\t\t\tnn.MaxPool2d(2)\n\t\t)\n\t\tself.out = nn.Linear(32*7*7,10)\n\n\tdef forward(self, x):\n\t\tx = self.conv1(x)\n\t\tx = self.conv2(x)\n\t\tx = x.view(x.size(0),-1)\n\t\toutput = self.out(x)\n\t\treturn output\n\t\t\nnet = CNN()\nloss_func = nn.CrossEntropyLoss()\noptimizer = optim.SGD(net.parameters(),lr = 0.2, momentum = 0.9)\n\nprint(net)\n\ndef grad_norm(p):\n grad_sum = 0.0\n for element in net.parameters():\n grad = (element.grad.cpu().data.numpy()**p).sum()\n grad_sum += grad\n return grad_sum ** (1/p)\n\nprint (len(data_loader_train))\nfor epoch in range(EPOCH):\n\tepoch_grad = 0\n\tepoch_loss = 0\n\t'''for i, x in enumerate(data_loader_train):\n\t\tprint (x)'''\n\tfor step, (batch_x,batch_y) in enumerate(data_loader_train):\n\t\tbatch_x,batch_y = Variable(batch_x), Variable(batch_y)\n\t\toptimizer.zero_grad()\n\n\t\tprediction = net(batch_x)\n\n\t\tloss = loss_func(prediction, batch_y)\n\t\tepoch_loss += loss.data.numpy()\n\n\t\tloss.backward()\n\n\t\toptimizer.step()\n\t\tepoch_grad += grad_norm(2)\n\t\tprint ('epoch: %d, loss: %.7f, gradient: %.7f' %(epoch, loss, grad_norm(2)))\n\t\tgradient_list.append(grad_norm(2))\n\t\tloss_list.append(loss.data.numpy())\n\t'''print ('epoch: %d, loss: %.7f, gradient: %.7f' %(epoch, epoch_loss/len(data_loader_train),epoch_grad/len(data_loader_train) ))\n\tgradient_list.append(epoch_grad/len(data_loader_train))\n\tloss_list.append(epoch_loss/len(data_loader_train))'''\n\nprint(sum (p.numel() for p in net.parameters()))\nprint ('TRAINING FINISHING.')\n\ntorch.save(net,'MNIST.save')\nprint('save model.')\n\n\ngradient = np.array(gradient_list)\nx = np.linspace(0,len(data_loader_train),len(data_loader_train))\nx_ep = np.linspace(0,EPOCH,EPOCH)\nplt.subplot(211)\nplt.plot(x,gradient_list,label = 'Gradient Norm')\nplt.legend(loc = 'upper right')\n#plt.show()\nnp.save('gradientNorm_MNIST.npy', gradient)\nloss = np.array(loss_list)\nplt.subplot(212)\nplt.plot(x,loss_list, label = 'Loss', color = 'red')\nplt.legend(loc = 'upper right')\nplt.show()\nnp.save('loss_MNIST.npy',loss)\nprint('save loss.')\n\npd.DataFrame(gradient_list).to_csv('gradientNorm_MNIST.csv')\npd.DataFrame(loss_list).to_csv('loss_MNIST.csv')\n\n\n\n\n","sub_path":"hw1/hw1-2/observe_gradient_norm/gradient_mnist.py","file_name":"gradient_mnist.py","file_ext":"py","file_size_in_byte":4007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"404489809","text":"#read in mandel data computed in C\nimport csv\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nxres = 1920\nyres = 1080\nimg = np.zeros( (yres,xres)) \nrowcount = 0\nwith open('mandelbrot.txt', 'r') as f:\n\treader = csv.reader(f, delimiter='\\t')\n\tfor row in reader:\n\t\tcolcount = 0\n\t\tfor col in row[:-1]:\n\t\t\timg[rowcount][colcount] = float(col)\n\t\t\tcolcount += 1\n\t\trowcount += 1\n\nplt.imsave( 'mandel-hot.png', img, cmap='hot')\nplt.imsave( 'mandel-ncar.png', img, cmap='gist_ncar')\nplt.imsave( 'mandel-brbg.png', img, cmap='BrBG')\nplt.imsave( 'mandel-jet.png', img, cmap='jet')\nplt.imsave( 'mandel-prism.png', img, cmap='prism')\n","sub_path":"Mandelbrot/read_mandel.py","file_name":"read_mandel.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"571179212","text":"import hashlib\nfrom odoo import models, fields, api, _\n\n\nclass ProductTemplate(models.Model):\n _inherit = \"product.template\"\n\n def prepare_vals_for_create_listing(self, instance):\n self.ensure_one()\n vals = {\n 'name': self.name,\n 'mk_instance_id': instance.id,\n 'product_tmpl_id': self.id,\n 'description': self.description_sale,\n 'product_category_id': self.categ_id.id\n }\n if hasattr(self, '%s_prepare_vals_for_create_listing' % instance.marketplace):\n vals.update(getattr(self, '%s_prepare_vals_for_create_listing' % instance.marketplace)(instance))\n return vals\n\n def prepare_vals_for_update_listing(self, instance):\n self.ensure_one()\n vals = {\n 'product_category_id': self.categ_id.id,\n }\n if hasattr(self, '%s_prepare_vals_for_update_listing' % instance.marketplace):\n vals.update(getattr(self, '%s_prepare_vals_for_update_listing' % instance.marketplace)(instance))\n return vals\n\n def create_or_update_listing_image(self, listing_id):\n if not self.image_1920:\n return True\n listing_image_obj = self.env['mk.listing.image']\n image_hex = hashlib.md5(self.image_1920).hexdigest()\n listing_image_id = listing_image_obj.search([('image_hex', '=', image_hex), ('mk_listing_id', '=', listing_id.id)], limit=1)\n if listing_image_id:\n return True\n listing_image_obj.create({'name': listing_id.name, 'image': self.image_1920, 'mk_listing_id': listing_id.id, })\n return True\n\n def create_or_update_listing(self, instance):\n self.ensure_one()\n listing_obj = self.env['mk.listing']\n listing_id = listing_obj.search([('mk_instance_id', '=', instance.id), ('product_tmpl_id', '=', self.id)])\n if not listing_id:\n vals = self.prepare_vals_for_create_listing(instance)\n listing_id = listing_obj.create(vals)\n else:\n vals = self.prepare_vals_for_update_listing(instance)\n listing_id.write(vals)\n # self.create_or_update_listing_image(listing_id)\n sequence = 1\n for product_variant_id in self.product_variant_ids:\n product_variant_id.create_or_update_listing_item(instance, sequence, listing_id)\n sequence += 1\n return listing_id\n\n mk_listing_ids = fields.One2many('mk.listing', 'product_tmpl_id', string=\"Listing\")\n\n\nclass ProductProduct(models.Model):\n _inherit = \"product.product\"\n\n mk_listing_item_ids = fields.One2many('mk.listing.item', 'product_id', string=\"Listing Items\")\n\n def prepare_vals_for_create_listing_item(self, instance, sequence, listing_id):\n self.ensure_one()\n vals = {\n 'name': self.name,\n 'sequence': sequence,\n 'mk_instance_id': instance.id,\n 'product_id': self.id,\n 'default_code': self.default_code,\n 'mk_listing_id': listing_id.id\n }\n if hasattr(self, '%s_prepare_vals_for_create_listing_item' % instance.marketplace):\n vals.update(getattr(self, '%s_prepare_vals_for_create_listing_item' % instance.marketplace)(instance))\n return vals\n\n def prepare_vals_for_update_listing_item(self, instance, sequence, listing_id):\n self.ensure_one()\n vals = {\n 'sequence': sequence,\n 'default_code': self.default_code,\n }\n if hasattr(self, '%s_prepare_vals_for_update_listing_item' % instance.marketplace):\n vals.update(getattr(self, '%s_prepare_vals_for_update_listing_item' % instance.marketplace)(instance))\n return vals\n\n def create_or_update_listing_image(self, listing_item_id):\n if not self.image_1920:\n return True\n listing_image_obj = self.env['mk.listing.image']\n image_hex = hashlib.md5(self.image_1920).hexdigest()\n listing_image_id = listing_image_obj.search([('image_hex', '=', image_hex), ('mk_listing_item_ids', 'in', listing_item_id.ids)], limit=1)\n if listing_image_id:\n return True\n listing_image_obj.create(\n {'name': listing_item_id.name, 'image': self.image_1920, 'mk_listing_id': listing_item_id.mk_listing_id.id, 'mk_listing_item_ids': [(6, 0, listing_item_id.ids)]})\n return True\n\n def create_or_update_listing_item(self, instance, sequence, listing_id):\n self.ensure_one()\n listing_item_obj = self.env['mk.listing.item']\n listing_item_id = listing_item_obj.search([('mk_instance_id', '=', instance.id), ('product_id', '=', self.id)])\n if not listing_item_id:\n vals = self.prepare_vals_for_create_listing_item(instance, sequence, listing_id)\n listing_item_id = listing_item_obj.create(vals)\n listing_item_id.create_or_update_pricelist_item(float(self.lst_price))\n else:\n vals = self.prepare_vals_for_update_listing_item(instance, sequence, listing_id)\n listing_item_id.write(vals)\n pricelist_item_id = self.env['product.pricelist.item'].search([('pricelist_id', '=', instance.pricelist_id.id), ('product_id', '=', listing_item_id.product_id.id)],\n limit=1)\n if not pricelist_item_id:\n listing_item_id.create_or_update_pricelist_item(float(self.lst_price))\n self.create_or_update_listing_image(listing_item_id)\n return listing_id\n\n def get_product_stock(self, export_qty_type, export_qty_value, warehouse_id, stock_type):\n product_id = self.with_context(warehouse=warehouse_id.id)\n stock = getattr(product_id, stock_type)\n if stock > 0:\n if export_qty_type == 'percentage':\n quantity = (stock * export_qty_value) / 100\n if quantity >= stock:\n return stock\n else:\n return quantity\n elif export_qty_type == 'fix':\n if export_qty_value >= stock:\n return stock\n else:\n return export_qty_value\n return stock\n\n\nclass ProductTemplateAttributeLine(models.Model):\n _inherit = 'product.template.attribute.line'\n\n def create_or_update_ptal(self, attribute_dict, product_tmpl_id, attribute_line_vals):\n for name, value in attribute_dict.items():\n ptal_id = self.env['product.template.attribute.line'].search([('product_tmpl_id', '=', product_tmpl_id.id), ('attribute_id.name', '=ilike', name)])\n if not ptal_id:\n # product_tmpl_id.write({'attribute_line_ids': attribute_line_vals})\n # ptal_id = self.search([('product_tmpl_id', '=', product_tmpl_id.id), ('attribute_id.name', '=ilike', name)])\n return False\n attribute_value_id = self.env['product.attribute.value'].search([('name', '=ilike', value), ('attribute_id', '=', ptal_id.attribute_id.id)], limit=1)\n if not attribute_value_id:\n attribute_value_id = self.env['product.attribute.value'].create({'name': value, 'attribute_id': ptal_id.attribute_id.id})\n if attribute_value_id not in ptal_id.value_ids:\n ptal_id.write({'value_ids': [(4, attribute_value_id.id, False)]})\n return True\n","sub_path":"base_marketplace/models/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":7378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"151536690","text":"\"\"\"\nGeneral extensions to the flask app.\n\nMost of these extensions will have to be registered with the app seperately.\n\"\"\"\nfrom flask_restful import Api\nfrom flask_sqlalchemy import SQLAlchemy\n\n\ndb = SQLAlchemy()\napi = Api()\n\n\ndef register_extensions(app):\n \"\"\"Register extensions to the given app.\"\"\"\n db.init_app(app)\n api.init_app(app)\n","sub_path":"api_auth/api_auth/extensions.py","file_name":"extensions.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"610118084","text":"from collections import OrderedDict\nSHAPES = OrderedDict({\n 'line': {\n 'lines': [\n [(-1, 0, 0), (1, 0, 0)],\n ],\n 'quads': [],\n },\n 'square': {\n 'lines': [\n [(-1, 0, -1), (1, 0, -1), (1, 0, 1), (-1, 0, 1), (-1, 0, -1)]\n ],\n 'quads': [],\n },\n 'shaded_square': {\n 'lines': [\n [(-1, 0, -1), (1, 0, -1), (1, 0, 1), (-1, 0, 1), (-1, 0, -1)]\n ],\n 'quads': [\n [(-1, 0, -1), (1, 0, -1), (1, 0, 1), (-1, 0, 1)],\n ],\n }\n})\n\n\n\ndef get_bounds(shape, scale=1):\n\n lines, quads = shape['lines'], shape['quads']\n\n max_values = []\n min_values = []\n\n if lines:\n flatlines = [t for line in lines for t in line]\n max_values.append(map(max, *flatlines))\n min_values.append(map(min, *flatlines))\n\n if quads:\n flatquads = [t for quad in quads for t in quad]\n max_values.append(map(max, *flatquads))\n min_values.append(map(min, *flatquads))\n\n if len(max_values) < 2:\n min_value = min_values[0]\n max_value = max_values[0]\n else:\n max_value = map(max, *max_values)\n min_value = map(min, *min_values)\n\n return [x*scale for x in min_value], [x*scale for x in max_value]\n\n","sub_path":"scripts/test_get_bounds.py","file_name":"test_get_bounds.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"280657756","text":"\"\"\"Keras Sequence object for running BinarySkipGramSequence on texts.\"\"\"\nfrom typing import Tuple\n\nimport numpy as np # type: ignore\nfrom ensmallen_graph import preprocessing # pylint: disable=no-name-in-module\n\nfrom .abstract_word2vec_sequence import AbstractWord2VecSequence\n\n\nclass WordBinarySkipGramSequence(AbstractWord2VecSequence):\n \"\"\"Keras Sequence object for running BinarySkipGramSequence on texts.\"\"\"\n\n def __init__(\n self,\n sequences: np.ndarray,\n batch_size: int,\n vocabulary_size: int,\n negative_samples: float = 7,\n window_size: int = 4,\n shuffle: bool = True,\n seed: int = 42,\n elapsed_epochs: int = 0,\n ):\n \"\"\"Create new Sequence object.\n\n Parameters\n -----------------------------\n sequences: np.ndarray,\n List of encoded texts.\n batch_size: int,\n Number of nodes to include in a single batch.\n negative_samples: float = 7,\n Factor of negative samples to use.\n window_size: int = 4,\n Window size for the local context.\n On the borders the window size is trimmed.\n shuffle: bool = True,\n Wthever to shuffle the vectors.\n seed: int = 42,\n The seed to use to make extraction reproducible.\n elapsed_epochs: int = 0,\n Number of elapsed epochs to init state of generator.\n \"\"\"\n self._negative_samples = negative_samples\n self._vocabulary_size = vocabulary_size\n\n super().__init__(\n sequences=sequences,\n batch_size=batch_size,\n window_size=window_size,\n shuffle=shuffle,\n seed=seed,\n elapsed_epochs=elapsed_epochs\n )\n\n def __getitem__(self, idx: int) -> Tuple[Tuple[np.ndarray, np.ndarray], None]:\n \"\"\"Return batch corresponding to given index.\n\n The return tuple of tuples is composed of an inner tuple, containing\n the words vector and the vector of vectors of the contexts.\n Depending on the order of the input_layers of the models that can\n accept these data format, one of the vectors is used as training\n input and the other one is used as the output for the NCE loss layer.\n\n The words vectors and contexts vectors contain numeric IDs, that\n represent the index of the words' embedding column.\n\n The true output value is None, since no loss function is used after\n the NCE loss, that is implemented as a layer, and this vastly improves\n the speed of the training process since it does not require to allocate\n empty vectors of considerable size for the one-hot encoding process.\n\n Parameters\n ---------------\n idx: int,\n Index corresponding to batch to be rendered.\n\n Returns\n ---------------\n Tuple of tuples with input data.\n \"\"\"\n return preprocessing.binary_skipgrams(\n idx+self.elapsed_epochs,\n self._sequences[idx],\n vocabulary_size=self._vocabulary_size,\n window_size=self._window_size,\n negative_samples=self._negative_samples,\n shuffle=self._shuffle\n )\n","sub_path":"embiggen/sequences/word_binary_skipgram_sequence.py","file_name":"word_binary_skipgram_sequence.py","file_ext":"py","file_size_in_byte":3241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"501106995","text":"import pprint\nimport json\nimport progressbar\nfrom progress.bar import IncrementalBar\n# coding: utf-8\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import Select\n\n\ndef initialize_browser(url):\n # Initialize Browser Driver\n browser = webdriver.Chrome(executable_path='chromedriver.exe')\n browser.get(url)\n return browser\n\ndef get_def(): \n \n # Initialize Browser\n browser = initialize_browser('https://www.google.com')\n hebrew_data = {\n \"errors\": {\n \"concordance\": [],\n \"verse\": []\n }\n }\n\n dict_num_range = 87\n dict_bar = IncrementalBar('GATHERING HEBREW DICTIONARY', max=dict_num_range)\n for num in range(0, dict_num_range):\n try: \n url = 'http://www.htmlbible.com/sacrednamebiblecom/kjvstrongs/STRHEB'+str(num)+'.htm'\n browser.get(url)\n base_url = '/html/body/center[1]/table/tbody/tr'\n\n num_rows = len(browser.find_elements_by_xpath(base_url)) \n for row in range(2, num_rows + 1):\n column_num = len(browser.find_elements_by_xpath(base_url+'['+str(row)+']/td'))\n if column_num == 1:\n continue\n \n # Store all references into string\n hebrew_number = browser.find_element_by_xpath('/html/body/center[1]/table/tbody/tr['+str(row)+']/td[1]/p/a[2]').text\n language_text = browser.find_element_by_xpath('/html/body/center[1]/table/tbody/tr['+str(row)+']/td[2]/p').text.split('\\n')\n definition = browser.find_element_by_xpath('/html/body/center[1]/table/tbody/tr['+str(row)+']/td[3]/p').text\n\n hebrew_data[hebrew_number] = {\n 'transliteration': language_text[0],\n 'pronunciation': language_text[1],\n 'definition': definition,\n 'verses': {}\n }\n\n except Exception as e:\n print(e)\n hebrew_data['errors']['concordance'].append(hebrew_number)\n continue\n\n finally:\n dict_bar.next()\n\n dict_bar.finish()\n\n range_num = 868\n heb_verse_bar = IncrementalBar('GATHERING CONCORDANCE', max=range_num)\n for num in range(0, range_num):\n try:\n url = 'http://www.htmlbible.com/sacrednamebiblecom/kjvstrongs/CONHEB'+str(num)+'.htm'\n browser.get(url)\n base_url = '/html/body/center[1]/table/tbody/tr'\n\n num_rows = len(browser.find_elements_by_xpath(base_url))\n for row in range(2, num_rows + 1):\n column_num = len(browser.find_elements_by_xpath(base_url+'['+str(row)+']/td'))\n if column_num == 1:\n continue\n \n hebrew_number = browser.find_element_by_xpath('/html/body/center[1]/table/tbody/tr['+str(row)+']/td[1]/p/a').text\n print(hebrew_number)\n num_word_forms = len(browser.find_elements_by_xpath(base_url+'['+str(row)+']/td[3]/p'))\n\n for word_form in range(1, num_word_forms + 1):\n word_form_text = browser.find_element_by_xpath(base_url+'['+str(row)+']/td[3]/p['+str(word_form)+']').text\n num_verses = len(browser.find_elements_by_xpath(base_url+'['+str(row)+']/td[3]/p['+str(word_form)+']/a'))\n print('Word form: {0}, num verses: {1}'.format(word_form_text, num_verses))\n\n word_form_label = word_form_text[:word_form_text.find('\\n')]\n if word_form_label == 'The Following Have Multiple Hebrew Words Associated To A Single English Word':\n continue\n\n verses = []\n print(\"Getting associated verses\")\n for verse in range(1, num_verses + 1):\n verse_text = (browser.find_element_by_xpath(base_url+'['+str(row)+']/td[3]/p['+str(word_form)+']/a['+str(verse)+']').text)\n print(verse_text)\n verses.append(verse_text)\n \n hebrew_data[hebrew_number]['verses'][word_form_label] = verses\n\n except Exception as e:\n print(e)\n hebrew_data['errors']['verse'].append(hebrew_number)\n continue\n\n finally:\n heb_verse_bar.next()\n\n heb_verse_bar.finish()\n browser.quit()\n\n with open('heb_concordance_errors.json', 'w') as fp:\n json.dump(hebrew_data, fp, indent=4)\n\nif __name__ == '__main__':\n get_def()\n\n\n ","sub_path":"get_hebrew_dictionary.py","file_name":"get_hebrew_dictionary.py","file_ext":"py","file_size_in_byte":4595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"293851366","text":"from flask_restful import Resource, reqparse\nfrom src.models.user import UserModel, UserSchema\nfrom src.database import db\n\nclass UserApi(Resource):\n\n def getUsers(self):\n results = UserModel.query.all() # consider: これで取れる仕組みが理解できていない\n users = UserSchema(many=True).dump(results)\n\n return users\n\n def postUser(self, user):\n user = UserModel(\n name=user['name']\n )\n\n db.session.add(user)\n db.session.commit()\n res = UserSchema().dump(user)\n\n return res","sub_path":"src/apis/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"63944990","text":"#!/usr/bin/env python\n#\n# argdist.py Trace a function and display a distribution of its\n# parameter values as a histogram or frequency count.\n#\n# USAGE: argdist.py [-h] [-p PID] [-z STRING_SIZE] [-i INTERVAL]\n# [-n COUNT] [-C specifier [specifier ...]]\n# [-H specifier [specifier ...]]\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\")\n# Copyright (C) 2016 Sasha Goldshtein.\n\nfrom bcc import BPF\nfrom time import sleep, strftime\nimport argparse\n\nclass Specifier(object):\n text = \"\"\"\nDATA_DECL\n\nint PROBENAME(struct pt_regs *ctx SIGNATURE)\n{\n PID_FILTER\n KEY_EXPR\n if (!(FILTER)) return 0;\n COLLECT\n return 0;\n}\n\"\"\"\n next_probe_index = 0\n aliases = { \"$PID\": \"bpf_get_current_pid_tgid()\" }\n\n def _substitute_aliases(self, expr):\n if expr is None:\n return expr\n for alias, subst in Specifier.aliases.items():\n expr = expr.replace(alias, subst)\n return expr\n\n def __init__(self, type, specifier, pid):\n self.raw_spec = specifier \n spec_and_label = specifier.split(';')\n self.label = spec_and_label[1] \\\n if len(spec_and_label) == 2 else None\n parts = spec_and_label[0].strip().split(':')\n if len(parts) < 3 or len(parts) > 6:\n raise ValueError(\"invalid specifier format\")\n self.type = type # hist or freq\n self.is_ret_probe = parts[0] == \"r\"\n if self.type != \"hist\" and self.type != \"freq\":\n raise ValueError(\"unrecognized probe type\")\n if parts[0] not in [\"r\", \"p\"]:\n raise ValueError(\"unrecognized probe type\")\n self.library = parts[1]\n self.is_user = len(self.library) > 0\n fparts = parts[2].split('(')\n if len(fparts) != 2:\n raise ValueError(\"invalid specifier format\")\n self.function = fparts[0]\n self.signature = fparts[1][:-1]\n self.is_default_expr = len(parts) < 5\n if not self.is_default_expr:\n self.expr_type = parts[3]\n self.expr = parts[4]\n else:\n if not self.is_ret_probe and self.type == \"hist\":\n raise ValueError(\"dist probes must have expr\")\n self.expr_type = \\\n \"u64\" if not self.is_ret_probe else \"int\"\n self.expr = \"1\" if not self.is_ret_probe else \"$retval\"\n self.expr = self.expr.replace(\"$retval\",\n \"(%s)ctx->ax\" % self.expr_type)\n self.filter = None if len(parts) != 6 else parts[5]\n if self.filter is not None:\n self.filter = self.filter.replace(\"$retval\",\n \"(%s)ctx->ax\" % self.expr_type)\n self.expr = self._substitute_aliases(self.expr)\n self.filter = self._substitute_aliases(self.filter)\n self.pid = pid\n self.probe_func_name = \"%s_probe%d\" % \\\n (self.function, Specifier.next_probe_index)\n self.probe_hash_name = \"%s_hash%d\" % \\\n (self.function, Specifier.next_probe_index)\n Specifier.next_probe_index += 1\n\n def _is_string_probe(self):\n return self.expr_type == \"char*\" or self.expr_type == \"char *\"\n\n def generate_text(self, string_size):\n program = self.text.replace(\"PROBENAME\", self.probe_func_name)\n signature = \"\" if len(self.signature) == 0 \\\n else \",\" + self.signature\n program = program.replace(\"SIGNATURE\", signature)\n if self.pid is not None and not self.is_user:\n # kernel probes need to explicitly filter pid\n program = program.replace(\"PID_FILTER\",\n \"u32 pid = bpf_get_current_pid_tgid();\\n\" + \\\n \"if (pid != %d) { return 0; }\" % self.pid)\n else:\n program = program.replace(\"PID_FILTER\", \"\")\n if self._is_string_probe():\n decl = \"\"\"\nstruct %s_key_t { char key[%d]; };\nBPF_HASH(%s, struct %s_key_t, u64);\n\"\"\" \\\n % (self.function, string_size,\n self.probe_hash_name, self.function)\n collect = \"%s.increment(__key);\" % self.probe_hash_name\n key_expr = \"\"\"\nstruct %s_key_t __key = {0};\nbpf_probe_read(&__key.key, sizeof(__key.key), %s);\n\"\"\" \\\n % (self.function, self.expr)\n elif self.type == \"freq\":\n decl = \"BPF_HASH(%s, %s, u64);\" % \\\n (self.probe_hash_name, self.expr_type)\n collect = \"%s.increment(__key);\" % self.probe_hash_name\n key_expr = \"%s __key = %s;\" % \\\n (self.expr_type, self.expr)\n elif self.type == \"hist\":\n decl = \"BPF_HISTOGRAM(%s, %s);\" % \\\n (self.probe_hash_name, self.expr_type)\n collect = \"%s.increment(bpf_log2l(__key));\" % \\\n self.probe_hash_name \n key_expr = \"%s __key = %s;\" % \\\n (self.expr_type, self.expr)\n program = program.replace(\"DATA_DECL\", decl)\n program = program.replace(\"KEY_EXPR\", key_expr) \n program = program.replace(\"FILTER\", self.filter or \"1\") \n program = program.replace(\"COLLECT\", collect)\n return program\n\n def attach(self, bpf):\n self.bpf = bpf\n if self.is_user:\n if self.is_ret_probe:\n bpf.attach_uretprobe(name=self.library,\n sym=self.function,\n fn_name=self.probe_func_name,\n pid=self.pid or -1)\n else:\n bpf.attach_uprobe(name=self.library,\n sym=self.function,\n fn_name=self.probe_func_name,\n pid=self.pid or -1)\n else:\n if self.is_ret_probe:\n bpf.attach_kretprobe(event=self.function,\n fn_name=self.probe_func_name)\n else:\n bpf.attach_kprobe(event=self.function,\n fn_name=self.probe_func_name)\n\n def display(self):\n print(self.label or self.raw_spec)\n data = self.bpf.get_table(self.probe_hash_name)\n if self.type == \"freq\":\n print(\"\\t%-10s %s\" % (\"COUNT\", \"EVENT\"))\n for key, value in sorted(data.items(),\n key=lambda kv: kv[1].value):\n key_val = key.key if self._is_string_probe() \\\n else str(key.value)\n if self.is_default_expr:\n if not self.is_ret_probe:\n key_str = \"total calls\"\n else:\n key_str = \"retval = %s\" % \\\n key_val\n else:\n key_str = \"%s = %s\" % \\\n (self.expr, key_val)\n print(\"\\t%-10s %s\" % \\\n (str(value.value), key_str))\n elif self.type == \"hist\":\n label = self.expr if not self.is_default_expr \\\n else \"retval\"\n data.print_log2_hist(val_type=label)\n\nexamples = \"\"\"\nProbe specifier syntax:\n {p,r}:[library]:function(signature)[:type:expr[:filter]][;label]\nWhere:\n p,r -- probe at function entry or at function exit\n in exit probes, only $retval is accessible\n library -- the library that contains the function\n (leave empty for kernel functions)\n function -- the function name to trace\n signature -- the function's parameters, as in the C header\n type -- the type of the expression to collect\n expr -- the expression to collect\n filter -- the filter that is applied to collected values\n label -- the label for this probe in the resulting output\n\nEXAMPLES:\n\nargdist.py -H 'p::__kmalloc(u64 size):u64:size'\n Print a histogram of allocation sizes passed to kmalloc\n\nargdist.py -p 1005 -C 'p:c:malloc(size_t size):size_t:size:size==16'\n Print a frequency count of how many times process 1005 called malloc\n with an allocation size of 16 bytes\n\nargdist.py -C 'r:c:gets():char*:$retval;snooped strings'\n Snoop on all strings returned by gets()\n\nargdist.py -p 1005 -C 'p:c:write(int fd):int:fd'\n Print frequency counts of how many times writes were issued to a\n particular file descriptor number, in process 1005\n\nargdist.py -p 1005 -H 'r:c:read()'\n Print a histogram of error codes returned by read() in process 1005\n\nargdist.py -H \\\\\n 'p:c:write(int fd, const void *buf, size_t count):size_t:count:fd==1'\n Print a histogram of buffer sizes passed to write() across all\n processes, where the file descriptor was 1 (STDOUT)\n\nargdist.py -C 'p:c:fork();fork calls'\n Count fork() calls in libc across all processes\n Can also use funccount.py, which is easier and more flexible \n\nargdist.py \\\\\n -H 'p:c:sleep(u32 seconds):u32:seconds' \\\\\n -H 'p:c:nanosleep(struct timespec { time_t tv_sec; long tv_nsec; } *req):long:req->tv_nsec'\n Print histograms of sleep() and nanosleep() parameter values\n\nargdist.py -p 2780 -z 120 \\\\\n -C 'p:c:write(int fd, char* buf, size_t len):char*:buf:fd==1'\n Spy on writes to STDOUT performed by process 2780, up to a string size\n of 120 characters \n\"\"\"\n\nparser = argparse.ArgumentParser(description=\n \"Trace a function and display a summary of its parameter values.\",\n formatter_class=argparse.RawDescriptionHelpFormatter,\n epilog=examples)\nparser.add_argument(\"-p\", \"--pid\", type=int,\n help=\"id of the process to trace (optional)\")\nparser.add_argument(\"-z\", \"--string-size\", default=80, type=int,\n help=\"maximum string size to read from char* arguments\")\nparser.add_argument(\"-i\", \"--interval\", default=1, type=int,\n help=\"output interval, in seconds\")\nparser.add_argument(\"-n\", \"--number\", type=int, dest=\"count\",\n help=\"number of outputs\")\nparser.add_argument(\"-H\", \"--histogram\", nargs=\"*\", dest=\"histspecifier\",\n help=\"probe specifier to capture histogram of (see examples below)\")\nparser.add_argument(\"-C\", \"--count\", nargs=\"*\", dest=\"countspecifier\",\n help=\"probe specifier to capture count of (see examples below)\")\nparser.add_argument(\"-v\", \"--verbose\", action=\"store_true\",\n help=\"print resulting BPF program code before executing\")\nargs = parser.parse_args()\n\nspecifiers = []\nfor specifier in (args.countspecifier or []):\n specifiers.append(Specifier(\"freq\", specifier, args.pid))\nfor histspecifier in (args.histspecifier or []):\n specifiers.append(Specifier(\"hist\", histspecifier, args.pid))\nif len(specifiers) == 0:\n print(\"at least one specifier is required\")\n exit(1)\n\nbpf_source = \"#include \\n\"\nfor specifier in specifiers:\n bpf_source += specifier.generate_text(args.string_size)\n\nif args.verbose:\n print(bpf_source)\n\nbpf = BPF(text=bpf_source)\n\nfor specifier in specifiers:\n specifier.attach(bpf)\n\ncount_so_far = 0\nwhile True:\n try:\n sleep(args.interval)\n except KeyboardInterrupt:\n exit()\n print(\"[%s]\" % strftime(\"%H:%M:%S\"))\n for specifier in specifiers:\n specifier.display()\n count_so_far += 1\n if args.count is not None and count_so_far >= args.count:\n exit()\n","sub_path":"tools/argdist.py","file_name":"argdist.py","file_ext":"py","file_size_in_byte":13053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"70983177","text":"from django.db import models\r\nimport datetime\r\nfrom django.utils.timezone import now\r\n\r\nCHOICES = (\r\n (\"Alin\",\"Aljabar Linear\"),\r\n (\"MPPI\",\"Metodologi Penelitian dan Penulisan Ilmiah\"),\r\n (\"PBP\",\"Pemrograman Berbasis Platform\"),\r\n (\"SOSI\",\"Sistem Operasi untuk Sistem Informasi\"),\r\n (\"SDA\",\"Struktur Data & Algoritma\"),\r\n)\r\n\r\nPRIORITY = (\r\n (\"Tinggi\",\"Tinggi\"),\r\n (\"Sedang\",\"Sedang\"),\r\n (\"Rendah\",\"Rendah\"),\r\n)\r\n\r\nclass JadwalBelajarBareng(models.Model):\r\n def __str__(self): \r\n return self.Topik\r\n Prioritas = models.TextField(max_length = 15, choices=PRIORITY)\r\n Matkul = models.TextField(max_length = 150, choices=CHOICES)\r\n Waktu = models.DateTimeField()\r\n Topik = models.CharField(max_length = 150)\r\n Informasi = models.TextField()\r\n Link = models.URLField(max_length = 200)","sub_path":"jadwal_belajar_bareng/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"116572631","text":"# Collect input on paper dimensions\n# Validating input to ensure our input can be multiplied\n# defining a generic function allows for consistent input of a given type\ndef get_number(prompt):\n while True:\n try:\n number = int(input(prompt))\n except ValueError:\n print(\"Invalid entry. Please enter an integer value.\")\n continue\n else:\n return number\n\n\n# Assign l and w by calling functions\nl = get_number(\"Please enter the length of the paper in millimeters:\")\nw = get_number(\"Please enter the width of the paper in millimeters:\")\n# P = 2(l + w).\nperimeter = 2 * (l + w)\n# Concatenate the string with the output from our perimeter calculation\n# convert int to str in order to join them in one line\nprint(\"The perimeter of the page is \" + str(perimeter) + \"mm\")\n","sub_path":"PerimeterCalc.py","file_name":"PerimeterCalc.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"273932378","text":"import os\nimport time\nimport unicodedata\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nfrom animedb.models import *\n\n\ndef get_content(url, params=None, stream=False):\n r = requests.get(url, params=params, stream=stream)\n while r.status_code != 200:\n time.sleep(3)\n r = requests.get(url, params=params, stream=stream)\n return r\n\ndef get_soup(url, params=None):\n r = get_content(url, params=params, stream=False)\n return BeautifulSoup(r.content, 'html5lib', from_encoding='utf-8')\n\ndef get_safe_str(string):\n string = string.replace('/', '')\n string = string.replace(':', '-')\n string = unicodedata.normalize('NFKC', string)\n return string.strip()\n\ndef get_img(url, dest_dir, file_name):\n img_ext = url.split('.')[-1].lower()\n file_name = '{}.{}'.format(file_name, img_ext)\n img_path = os.path.join(dest_dir, file_name)\n if os.path.exists(img_path):\n return file_name\n\n r = get_content(url, stream=True)\n if r.status_code == 200:\n with open(img_path, 'wb') as f:\n for chunk in r.iter_content(1024):\n f.write(chunk)\n return file_name\n else:\n return 'NA'\n\ndef get_or_create_anime(data_dict):\n if Anime.objects.filter(title=data_dict['title']).exists():\n anime = Anime.objects.get(title=data_dict['title'])\n else:\n genres = data_dict['genres'].split(', ')\n del data_dict['genres']\n anime = Anime.objects.create(**data_dict)\n for genre in genres:\n obj, created = Genre.objects.get_or_create(name=genre.strip())\n anime.genres.add(obj)\n return anime\n","sub_path":"animedb/updaters/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"554959549","text":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\nimport functools\nimport unittest\n\nfrom pyramid import testing\nfrom webtest import TestApp\n\nfrom cornice.tests import CatchErrors\nfrom mozsvc.metrics import MetricsService\n\nservice3 = MetricsService(name=\"service3\", path=\"/service3\")\nservice4 = MetricsService(name=\"service4\", path=\"/service4\")\nservice5 = MetricsService(name=\"service5\", path=\"/service5\")\n\n\ndef wrap_fn(fn):\n if not hasattr(fn, '_wrap_count'):\n fn._wrap_count = 0\n else:\n fn._wrap_count += 1\n\n @functools.wraps(fn)\n def wrapper(*args, **kwargs):\n result = fn(*args, **kwargs)\n result[\"wrapped%d\" % fn._wrap_count] = \"yes\"\n return result\n return wrapper\n\n\n@service3.get(decorators=[wrap_fn])\ndef wrapped_get3(request):\n return {\"test\": \"succeeded\"}\n\n\n@service4.post(decorators=[wrap_fn])\n@service4.get(decorators=[wrap_fn])\ndef wrapped_get4(request):\n return {\"test\": \"succeeded\"}\n\n\n@service5.get(decorators=[wrap_fn])\n@service5.get(accept=\"application/json\", renderer=\"simplejson\")\n@service5.get(accept=\"application/newlines\", renderer=\"newlines\")\n@service5.post(decorators=[wrap_fn])\ndef wrapped_get5(request):\n return {\"test\": \"succeeded\"}\n\n\nclass TestServiceDefinition(unittest.TestCase):\n\n def setUp(self):\n self.config = testing.setUp()\n self.config.include(\"cornice\")\n self.config.scan(\"mozsvc.tests.test_service_definition\")\n self.app = TestApp(CatchErrors(self.config.make_wsgi_app()))\n\n def tearDown(self):\n testing.tearDown()\n\n def test_decorated_view_fn(self):\n # passing a decorator in to the service api call should result in a\n # decorated view callable\n resp = self.app.get(\"/service3\")\n self.assertEquals(resp.json, {'test': 'succeeded', 'wrapped0': 'yes'})\n\n def test_stacked_decorated_view(self):\n # passing a decorator in to the service api call should result in a\n # decorated view callable, ordering of the particular decorators\n # shouldn't break things\n resp = self.app.get(\"/service4\")\n self.assertEquals(resp.json, {'test': 'succeeded', 'wrapped0': 'yes'})\n\n resp = self.app.get(\"/service5\")\n self.assertEquals(resp.json, {'test': 'succeeded', 'wrapped0': 'yes'})\n","sub_path":"mozsvc/tests/test_service_definition.py","file_name":"test_service_definition.py","file_ext":"py","file_size_in_byte":2437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"351648884","text":"from typing import TYPE_CHECKING\n\nfrom ..types import TealType\nfrom ..ir import TealOp, Op, TealBlock\nfrom ..errors import TealInputError\nfrom .leafexpr import LeafExpr\n\nif TYPE_CHECKING:\n from ..compiler import CompileOptions\n\n\nclass Arg(LeafExpr):\n \"\"\"An expression to get an argument when running in signature verification mode.\"\"\"\n\n def __init__(self, index: int) -> None:\n \"\"\"Get an argument for this program.\n\n Should only be used in signature verification mode. For application mode arguments, see\n :any:`TxnObject.application_args`.\n\n Args:\n index: The integer index of the argument to get. Must be between 0 and 255 inclusive.\n \"\"\"\n super().__init__()\n\n if type(index) is not int:\n raise TealInputError(\"invalid arg input type {}\".format(type(index)))\n\n if index < 0 or index > 255:\n raise TealInputError(\"invalid arg index {}\".format(index))\n\n self.index = index\n\n def __teal__(self, options: \"CompileOptions\"):\n op = TealOp(self, Op.arg, self.index)\n return TealBlock.FromOp(options, op)\n\n def __str__(self):\n return \"(arg {})\".format(self.index)\n\n def type_of(self):\n return TealType.bytes\n\n\nArg.__module__ = \"pyteal\"\n","sub_path":"pyteal/ast/arg.py","file_name":"arg.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"421533943","text":"N=int(input(\"n=\"))\nif(N<=1000):\n y=N\n rev=0\nwhile N>0:\n rem=N%10\n rev=rev*10+rem\n N=N//10\nif(y==rev):\n print(\"palindrome\")\nelse:\n print(\"not palindrome\")\n","sub_path":"number is palindrome or not.py","file_name":"number is palindrome or not.py","file_ext":"py","file_size_in_byte":156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"130620947","text":"# -*- coding: utf-8 -*-\nimport sys\nimport time\n#import r\nimport pexpect\nimport random\nimport string\nimport threading\n\n\nclass Server(threading.Thread):\n def __init__(self, prog_path, port_number):\n super(Server, self).__init__()\n self.prog_path = prog_path\n self.controler = None\n self.port_number = port_number\n\n def run(self):\n if '.py' in prog_path:\n command = \"python3 %s s %d\" % (self.prog_path, self.port_number)\n else:\n command = \"%s s %d\" % (self.prog_path, self.port_number)\n self.controler = pexpect.spawn(command)\n print(\"Server Port:\", self.port_number)\n while True:\n try:\n self.controler.expect(\"\\n\\r\\t\\n\\n\", timeout=2)\n except pexpect.TIMEOUT:\n continue\n except pexpect.EOF:\n break\n\n def close(self):\n self.controler.sendcontrol('c')\n\n\ndef check_binary_valid(prog_path):\n if '.py' in prog_path:\n command = \"python3 %s\" % prog_path\n else:\n command = prog_path\n try:\n client = pexpect.spawn(command)\n except pexpect.ExceptionPexpect:\n return 0\n try:\n client.expect(\"Usage: myprog c
or myprog s \", timeout=2)\n client.close()\n return 1\n except (pexpect.TIMEOUT, pexpect.EOF) as e:\n client.close()\n return 0\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n print('Usage: python3 lab1_test_script.py myprog_path')\n else:\n prog_path = sys.argv[1]\n \n if not check_binary_valid(prog_path):\n print('Please input a valid path of your binary program')\n else:\n messages = [\n 'test case 1',\n 'test case 2: COMPSCI 356 i\\f\\\\\\'\\\"\\as an undergraduate course in computer science teaching the fundamentals of computer networks. We will cover the technologies supporting the Internet, from Ethernet and WiFi through the routing protocols that govern the flow of traffic, and the web technologies that\tgenerate most of it.Topics: The topics we will study include but are not limited to: how to achieve reliable communications over unreliable channels, how to find a good path through a network, how to share networ',\n [],\n \"EOF\"\n ]\n port_number = random.randint(1024, 65535)\n server = Server(prog_path, port_number)\n\n server.start()\n time.sleep(2)\n\n def send_message(client_handle, message):\n if message != 'EOF':\n client_handle.sendline(message)\n else:\n client_handle.sendcontrol('d')\n if message != 'EOF':\n try:\n client_handle.expect(\"Enter message:\", timeout=4)\n new_msg = client_handle.before.decode(\"utf-8\") \n response = new_msg.replace(\"\\r\\n\", \"\\n\")[:-1]\n reply = response.split(\"\\n\")[-1]\n if message == reply:\n return 1\n else:\n return 0\n except (pexpect.TIMEOUT, pexpect.EOF) as e:\n return 0\n else:\n try:\n client_handle.expect('Enter message:', timeout=4)\n return 0\n except pexpect.TIMEOUT:\n return 0\n except pexpect.EOF:\n return 1\n\n def start_client(client_number):\n if '.py' in prog_path:\n command = \"python3 %s c %d 127.0.0.1\" % (prog_path, port_number)\n else:\n command = \"%s c %d 127.0.0.1\" % (prog_path, port_number)\n client = pexpect.spawn(command)\n print(\"client connect to\", port_number)\n try:\n i = client.expect([\"Enter message:\", 'Connection refused'], timeout=4)\n if i == 1:\n print('Client%d: Connection refused.' % client_number)\n return 0\n except (pexpect.TIMEOUT, pexpect.EOF) as e:\n print(e)\n print('Client%d: cannot connect to the server.' % client_number)\n return 0\n results = [1, 1, 1, 1]\n for i in range(len(messages)):\n message_list = [messages[i]]\n if i == 2:\n message_list = []\n for j in range(20):\n message_list.append(''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(512)))\n for message in message_list:\n result = send_message(client, message)\n if result == 0:\n results[i] = 0\n break\n for i in range(4):\n if i == 0:\n print('Client%d: Testing short word:' % client_number,end=\" \")\n elif i == 1:\n print(' Testing long sentence:', end=\" \")\n elif i == 2:\n print(' Testing multiple sentences:', end=\" \")\n else:\n print(' Testing EOF:', end=\" \")\n if results[i] == 0:\n print('\\033[1;31;40mFAILED\\033[0m')\n else:\n print('\\033[1;32;40mPASSED\\033[0m')\n client.close()\n if sum(results) == 4:\n return 1\n else:\n return 0\n\n if start_client(1):\n start_client(2)\n\n server.close()\n\n","sub_path":"lab1_test_script.py","file_name":"lab1_test_script.py","file_ext":"py","file_size_in_byte":5976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"517212946","text":"from case.CBaseCase import *\nimport traceback\n\nclass T4604_bmc_DestroyChassisResumeChecksum(CBaseCase):\n '''\n **************************************************************\n [Purpose ]: Destroy checksum:\n Writing a wrong resume checksum by I2C Access \n command, then make BMC aware of this corrupt \n checksum by flush resume cache. BMC will make FRU \n data checksum invalid by adding 1 to it for each \n FRU data block.\n Recover checksum:\n Write FRU data (not FRU data checksum) will update \n FRU data and write a valid checksum to EEPROM, \n together with the cached data.\n [Author ]: Forrest.Gu@emc.com\n [Sprint ]: Sprint 2.0.21\n [Tickets ]: ATOM-1238\n [Platform]: All\n [Type ]: Auto\n [History ]:\n - Forrest.Gu@emc.com 05/22/2014\n First edition.\n **************************************************************\n '''\n def __init__(self):\n CBaseCase.__init__(self, self.__class__.__name__)\n\n \n def config(self):\n CBaseCase.config(self)\n \n # Get orignial FFID\n self.log('INFO', 'Record FFID in case config.')\n self.list_resume = self.enclosure.action_get_resume('Chassis Resume', 'Family/FRU ID')\n \n # Inform infrastructure this failure\n if self.list_resume == []:\n self.result(FAIL, 'Fail to get FFID in case config.')\n \n \n def test(self):\n if not self.enclosure.verify_resume_checksum():\n self.result(FAIL, 'Fail to verify resume checksum.')\n \n if not self.enclosure.verify_destroy_resume_checksum():\n self.result(FAIL, 'Fail to destroy resume checksum.')\n \n if not self.enclosure.verify_invalid_resume_checksum():\n self.result(FAIL, 'Fail to check invalid resume checksum.')\n \n if not self.enclosure.verify_flush_fru_data():\n self.result(FAIL, 'Fail to flush fru data.')\n \n if not self.enclosure.verify_invalid_fru_data_checksum():\n self.result(FAIL, 'Fail to check invalid fru data checksum.')\n \n if not self.enclosure.verify_refresh_fru_data_ffid():\n self.result(FAIL, 'Fail to refresh fru data.')\n \n if not self.enclosure.verify_resume_checksum():\n self.result(FAIL, 'Fail to verify resume checksum.')\n \n if not self.enclosure.verify_fru_data_checksum():\n self.result(FAIL, 'Fail to verify fru data checksum.')\n \n def deconfig(self):\n # Check if deconfig condition is ready\n if self.list_resume == []:\n self.result(FAIL, 'FFID list is empty. Fail to set FFID in case deconfig.')\n return CBaseCase.deconfig(self)\n \n # Recover orignial FFID\n if not self.enclosure.action_set_resume('Chassis Resume', 'Family/FRU ID', self.list_resume):\n self.result(FAIL, 'Fail to set FFID in case deconfig.')\n \n CBaseCase.deconfig(self)\n","sub_path":"case/regression/bmc/T4604_bmc_DestroyChassisResumeChecksum.py","file_name":"T4604_bmc_DestroyChassisResumeChecksum.py","file_ext":"py","file_size_in_byte":3144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"64432270","text":"import os\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport codecs\r\nfrom keras.utils import to_categorical\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\r\nos.environ['KERAS_BACKEND ']='tensorflow'\r\n\r\ndef load_wsd_train_x():\r\n wsd_train_x = codecs.open('40699_train_data', mode = 'r', encoding= 'utf-8')\r\n line = wsd_train_x.readline()\r\n list1 = []\r\n while line:\r\n a = line.split()\r\n b = a[3:]\r\n list1.append(b)\r\n line = wsd_train_x.readline()\r\n return np.array(list1)\r\n wsd_train_x.close()\r\n\r\n\r\ndef load_wsd_test_x():\r\n wsd_test_x = codecs.open('40699_test_data', mode = 'r', encoding= 'utf-8')\r\n line = wsd_test_x.readline()\r\n list1 = []\r\n while line:\r\n a = line.split()\r\n b = a[3:]\r\n list1.append(b)\r\n line = wsd_test_x.readline()\r\n return np.array(list1)\r\n wsd_test_x.close()\r\n\r\n\r\ndef load_wsd_train_y():\r\n wsd_train_y = codecs.open('40699_train_target', mode = 'r', encoding = 'utf-8')\r\n line = wsd_train_y.readline()\r\n list1 = []\r\n while line:\r\n a = line.split()\r\n b = a[1:2]\r\n list1.append(b)\r\n line = wsd_train_y.readline()\r\n return (np.array(list1)).reshape(50,)\r\n wsd_train_y.close()\r\n\r\n\r\n\r\ndef load_wsd_test_y():\r\n wsd_test_y = codecs.open('40699_test_target', mode = 'r', encoding = 'utf-8')\r\n line = wsd_test_y.readline()\r\n list1 = []\r\n while line:\r\n a = line.split()\r\n b = a[1:2]\r\n list1.append(b)\r\n line = wsd_test_y.readline()\r\n return (np.array(list1)).reshape(50,)\r\n wsd_test_y.close()\r\n\r\n\r\nb = np.zeros(50)\r\n\r\nwsd_train_x = load_wsd_train_x()\r\nwsd_test_x = load_wsd_test_x()\r\n\r\nwsd_train_y = load_wsd_train_y()\r\nwsd_train_y = to_categorical(wsd_train_y)\r\n#wsd_train_y = np.c_[wsd_train_y, b]\r\n\r\nwsd_test_y = load_wsd_test_y()\r\nwsd_test_y = to_categorical(wsd_test_y)\r\n#wsd_test_y = np.c_[wsd_test_y, b]\r\n\r\nmax_epoch = 100\r\ntrain_size = wsd_train_x.shape[0]\r\nbatch_size = 10\r\nn_batch = train_size // batch_size\r\n\r\n\r\nlayer_num = 2\r\ngogi_num = 5\r\n\r\nif layer_num == 3:\r\n\r\n x = tf.placeholder(tf.float32, [None, 768])\r\n y = tf.placeholder(tf.float32, [None, gogi_num])\r\n\r\n W1 = tf.Variable(tf.zeros([768, 50]))\r\n b1 = tf.Variable(tf.zeros([50]))\r\n L1 = tf.nn.sigmoid(tf.matmul(x, W1) + b1)\r\n\r\n W2 = tf.Variable(tf.zeros([50, gogi_num]))\r\n b2 = tf.Variable(tf.zeros[gogi_num])\r\n\r\n predict = tf.nn.softmax(tf.matmul(L1, W2) + b2)\r\n\r\n\r\nif layer_num == 2:\r\n\r\n x = tf.placeholder(tf.float32, [None, 768])\r\n y = tf.placeholder(tf.float32, [None, gogi_num])\r\n\r\n W = tf.Variable(tf.zeros([768, gogi_num]))\r\n b = tf.Variable(tf.zeros([gogi_num]))\r\n\r\n predict = tf.nn.softmax(tf.matmul(x, W) + b)\r\n\r\n\r\nloss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y, logits=predict))\r\ntrain_step = tf.train.AdamOptimizer().minimize(loss)\r\n\r\ninit = tf.global_variables_initializer()\r\n\r\ncorrect_predict = tf.equal(tf.argmax(y, 1), tf.argmax(predict, 1))\r\naccuracy = tf.reduce_mean(tf.cast(correct_predict, tf.float32))\r\n\r\n\r\nsaver = tf.train.Saver()\r\n\r\n\r\nwith tf.Session() as sess:\r\n sess.run(init)\r\n\r\n for epoch in range(max_epoch):\r\n batch_mask = np.random.choice(train_size, batch_size)\r\n for batch in range(n_batch):\r\n\r\n x_batch = wsd_train_x[batch_mask]\r\n t_batch = wsd_train_y[batch_mask]\r\n\r\n sess.run(train_step, feed_dict={x: x_batch, y: t_batch})\r\n acc = sess.run(accuracy, feed_dict={x:wsd_test_x, y:wsd_test_y})\r\n saver.save(sess, 'model/40699_wsd_model.ckpt')\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"01_40699_wsd_train.py","file_name":"01_40699_wsd_train.py","file_ext":"py","file_size_in_byte":3631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"61942071","text":"#!/usr/bin/env python3\n\nimport time\n# from neopixel import *\nimport argparse\n\ndef Color(red, green, blue, white = 0):\n\t\"\"\"Convert the provided red, green, blue color to a 24-bit color value.\n\tEach color component should be a value 0-255 where 0 is the lowest intensity\n\tand 255 is the highest intensity.\n\t\"\"\"\n\treturn (white << 24) | (red << 16)| (green << 8) | blue\n\n# Morning stages configuration\nBLUE = Color(0, 0, 255)\nORANGE = Color(255, 165, 0)\nGREEN = Color(0, 255, 0)\n\nclass Morning_Prep_Stage:\n def __init__(self, color, duration):\n self.color = color\n self.duration = duration \n\n# Main program logic follows:\nif __name__ == '__main__':\n stages = dict(\n wakeUp = Morning_Prep_Stage(BLUE, 15),\n eatAndDress = Morning_Prep_Stage(ORANGE, 30),\n wipeFaceBrushTeeth = Morning_Prep_Stage(GREEN, 15)\n )\n\n for key in stages:\n print(key)\n\n for item in stages.items():\n print(item)\n","sub_path":"python/examples/school_mornings_dicts.py","file_name":"school_mornings_dicts.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"338995637","text":"from ftw import bumblebee\nfrom opengever.bumblebee import is_auto_refresh_enabled\nfrom Products.CMFPlone.browser.jsvariables import JSVariables\n\n\nTEMPLATE = u\"{other_vars}\\\nvar bumblebee_notification_url = '{bumblebee_notification_url}';\"\n\n\nclass GeverJSVariables(JSVariables):\n \"\"\"Expose variables to javascript\n\n For instance we need bumblebee's notifications URL to be\n available in the javascript part. GEVER knows how to build\n bumblebee URLs, so we expose the generated URL to javascript.\n \"\"\"\n\n def __call__(self, *args, **kwargs):\n other_vars = super(GeverJSVariables, self).__call__(*args, **kwargs)\n notification_url = bumblebee.get_service_v3().get_notifications_url()\n\n if not is_auto_refresh_enabled():\n return other_vars\n\n return TEMPLATE.format(\n other_vars=other_vars,\n bumblebee_notification_url=notification_url)\n","sub_path":"opengever/base/browser/jsvariables.py","file_name":"jsvariables.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"425101259","text":"# Copyright (c) 2011, Enthought, Ltd.\n# Author: Pietro Berkes \n# License: Modified BSD license (2-clause)\n\n\"\"\"Defines objects to create and manipulate raw annotations.\"\"\"\n\nfrom traits.has_traits import HasStrictTraits, cached_property\nfrom traits.trait_numeric import Array\nfrom traits.trait_types import Str, List, Int\nfrom traits.traits import Property\n\nimport numpy as np\nfrom pyanno.util import MISSING_VALUE, PyannoValueError\n\n\ndef _robust_isnan(x):\n res = False\n\n # workaround for the fact that np.isnan is not defined for non-numerical\n # type, e.g. strings\n try:\n res = np.isnan(x)\n except NotImplementedError:\n pass\n\n return res\n\n\ndef _is_nan_in_list(lst):\n return np.any([_robust_isnan(el) for el in lst])\n\n\nclass AnnotationsContainer(HasStrictTraits):\n \"\"\"Translate from general annotations files and arrays to pyAnno's format.\n\n This class exposes a few methods to import data from files and arrays, and\n converts them to pyAnno's format:\n\n * annotations are 2D integer arrays; rows index items, and columns\n annotators\n\n * label classes are numbered 0 to :attr:`nclasses`-1 . The attribute\n :attr:`labels` defines a mapping from label tokens to label classes\n\n * missing values are defined as :attr:`pyanno.util.MISSING_VALUE`. The\n attribute :attr:`missing_values` contains the missing values tokens\n found in the original, raw data\n\n The converted data can be accessed through the :attr:`annotations` property.\n\n The `AnnotationsContainer` is also used as the format to store annotations\n in :class:`~pyanno.database.PyannoDatabase` objects.\n \"\"\"\n\n DEFAULT_MISSING_VALUES_STR = ['-1', 'NA', 'None', '*']\n DEFAULT_MISSING_VALUES_NUM = [-1, np.nan, None]\n DEFAULT_MISSING_VALUES_ALL = (DEFAULT_MISSING_VALUES_STR +\n DEFAULT_MISSING_VALUES_NUM)\n\n #: raw annotations, as they are imported from file or array\n raw_annotations = List(List)\n\n #: name of file or array from which the annotations were imported\n name = Str\n\n #: list of all labels found in file/array\n labels = List\n\n #: labels corresponding to a missing value\n missing_values = List\n\n #: number of classes found in the annotations\n nclasses = Property(Int, depends_on='labels')\n def _get_nclasses(self):\n return len(self.labels)\n\n #: number of annotators\n nannotators = Property(Int, depends_on='raw_annotations')\n def _get_nannotators(self):\n return len(self.raw_annotations[0])\n\n #: number of annotations\n nitems = Property(Int, depends_on='raw_annotations')\n def _get_nitems(self):\n return len(self.raw_annotations)\n\n #: annotations in pyAnno format\n annotations = Property(Array, depends_on='raw_annotations')\n\n @cached_property\n def _get_annotations(self):\n nitems, nannotators = len(self.raw_annotations), self.nannotators\n anno = np.empty((nitems, nannotators), dtype=int)\n\n # build map from labels and missing values to annotation values\n raw2val = dict(list(zip(self.labels, list(range(self.nclasses)))))\n raw2val.update([(mv, MISSING_VALUE) for mv in self.missing_values])\n\n # translate\n nan_in_missing_values = _is_nan_in_list(self.missing_values)\n for i, row in enumerate(self.raw_annotations):\n for j, lbl in enumerate(row):\n if nan_in_missing_values and _robust_isnan(lbl):\n # workaround for the fact that np.nan cannot be used as\n # the key to a dictionary, since np.nan != np.nan\n anno[i,j] = MISSING_VALUE\n else:\n anno[i,j] = raw2val[lbl]\n\n return anno\n\n\n @staticmethod\n def _from_generator(rows_generator, missing_values, name=''):\n\n missing_set = set(missing_values)\n labels_set = set()\n\n raw_annotations = []\n nannotators = None\n for n, row in enumerate(rows_generator):\n\n # verify that number of lines is consistent in the whole file\n if nannotators is None: nannotators = len(row)\n else:\n if len(row) != nannotators:\n raise PyannoValueError(\n 'File has inconsistent number of entries '\n 'on separate lines (line {})'.format(n))\n\n raw_annotations.append(row)\n labels_set.update(row)\n\n # remove missing values from set of labels\n all_labels = sorted(list(labels_set - missing_set))\n missing_values = sorted(list(missing_set & labels_set))\n\n # workaround for np.nan != np.nan, so intersection does not work\n if _is_nan_in_list(all_labels):\n # uses fact that np.nan < x, for every x\n all_labels = all_labels[1:]\n missing_values.insert(0, np.nan)\n\n # create annotations object\n anno = AnnotationsContainer(\n raw_annotations = raw_annotations,\n labels = all_labels,\n missing_values = missing_values,\n name = name\n )\n\n return anno\n\n @staticmethod\n def _from_file_object(fobj, missing_values=None, name=''):\n \"\"\"Useful for testing, as it can be called using a StringIO object.\n \"\"\"\n\n if missing_values is None:\n missing_values = AnnotationsContainer.DEFAULT_MISSING_VALUES_STR\n\n # generator for rows of file-like object\n def file_row_generator():\n for line in fobj.readlines():\n # remove commas and split in individual tokens\n line = line.strip().replace(',', ' ')\n\n # ignore empty lines\n if len(line) == 0: continue\n\n labels = line.split()\n yield labels\n\n return AnnotationsContainer._from_generator(file_row_generator(),\n missing_values,\n name=name)\n\n\n @staticmethod\n def from_file(filename, missing_values=None):\n \"\"\"Load annotations from a file.\n\n The file is a text file with a columns separated by spaces and/or\n commas, and rows on different lines.\n\n Arguments\n ---------\n filename : string\n File name\n\n missing_values : list\n List of labels that are considered missing values.\n Default is :attr:`DEFAULT_MISSING_VALUES_STR`\n \"\"\"\n\n if missing_values is None:\n missing_values = AnnotationsContainer.DEFAULT_MISSING_VALUES_STR\n\n with open(filename) as fh:\n anno = AnnotationsContainer._from_file_object(fh,\n missing_values=missing_values,\n name=filename)\n\n return anno\n\n\n @staticmethod\n def from_array(x, missing_values=None, name=''):\n \"\"\"Create an annotations object from an array or list-of-lists.\n\n Arguments\n ---------\n x : ndarray or list-of-lists\n Array or list-of-lists containing numerical or string annotations\n\n missing_values : list\n List of values that are considered missing values.\n Default is :attr:`DEFAULT_MISSING_VALUES_ALL`\n\n name : string\n Name of the annotations (for user interaction and used as key in\n databases).\n \"\"\"\n\n if missing_values is None:\n missing_values = AnnotationsContainer.DEFAULT_MISSING_VALUES_ALL\n\n # generator for array objects\n def array_rows_generator():\n for row in x:\n yield list(row)\n\n return AnnotationsContainer._from_generator(array_rows_generator(),\n missing_values, name=name)\n\n\n def save_to(self, filename, set_name=False):\n \"\"\"Save raw annotations to file.\n\n Arguments\n ---------\n filename : string\n File name\n\n set_name : bool\n Set the :attr:`name` of the annotation container to the file name\n \"\"\"\n if set_name:\n self.name = filename\n with open(filename, 'w') as f:\n f.writelines(\n (' '.join(map(str, row))+'\\n'\n for row in self.raw_annotations)\n )\n\n\ndef load_annotations(filename, missing_values=None):\n \"\"\"Load annotations from file.\n\n The file is a text file with a columns separated by spaces and/or\n commas, and rows on different lines.\n\n Arguments\n ---------\n filename : string\n File name\n\n missing_values : list\n List of labels that are considered missing values.\n Default is\n :attr:`~pyanno.AnnotationsContainer.DEFAULT_MISSING_VALUES_STR`\n\n \"\"\"\n anno = AnnotationsContainer.from_file(filename,\n missing_values=missing_values)\n return anno.annotations\n","sub_path":"pyanno/annotations.py","file_name":"annotations.py","file_ext":"py","file_size_in_byte":8975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"154513748","text":"from asyncio import gather\nfrom collections import namedtuple\nfrom logging import getLogger\n\nfrom discord.ext.commands import (\n Cog,\n bot_has_permissions,\n group,\n guild_only,\n has_permissions,\n)\n\nfrom ..utils import maybe_send\n\nlogger = getLogger(__name__)\n_perms = namedtuple(\"perms\", [\"send_messages\", \"add_reactions\"])\n\n\ndef is_public(channel):\n \"\"\"\n Tests if a given text channel is public.\n\n Args:\n channel (discord.TextChannel: Channel to check.\n\n Returns:\n bool: `True` when the channel is public, `False` otherwise.\n \"\"\"\n role = channel.guild.default_role\n return channel.overwrites_for(role).read_messages is not False\n\n\nclass Stop(Cog):\n \"\"\"\n Utilities for locking down text channels.\n \"\"\"\n\n def __init__(self):\n self._perm_cache = {}\n\n async def _lock_channel(self, channel):\n \"\"\"\n Locks a text channel, denying Send Messages and Add Reactions.\n\n Args:\n channel (discord.TextChannel): Channel to lock.\n \"\"\"\n role = channel.guild.default_role\n\n await maybe_send(\n channel, \"Sending messages to this channel has been restricted.\"\n )\n\n overwrite = channel.overwrites_for(role)\n self._perm_cache.setdefault(\n channel.id, _perms(overwrite.send_messages, overwrite.add_reactions)\n )\n overwrite.send_messages = False\n overwrite.add_reactions = False\n await channel.set_permissions(role, overwrite=overwrite)\n\n async def _unlock_channel(self, channel):\n \"\"\"\n Unlock a text channel, restoring Send Messages and\n Add Reactions to their previous values.\n\n Args:\n channel (discord.TextChannel): Channel to unlock.\n \"\"\"\n role = channel.guild.default_role\n overwrite = channel.overwrites_for(role)\n\n perms = self._perm_cache.pop(channel.id, _perms(None, None))\n overwrite.send_messages = perms.send_messages\n overwrite.add_reactions = perms.add_reactions\n\n overwrite = (\n overwrite if not overwrite.is_empty() else None\n ) # Clear overwrite if empty\n await channel.set_permissions(role, overwrite=overwrite)\n\n await maybe_send(\n channel, \"Sending messages to this channel has been unrestricted.\"\n )\n\n @group()\n @guild_only()\n @has_permissions(manage_channels=True)\n @bot_has_permissions(manage_channels=True)\n async def stop(self, ctx):\n \"\"\"\n Commands to (un)restrict access to a channel.\n\n Required context: Server\n\n Required permissions:\n - Manage Channels\n\n Required bot permissions:\n - Manage Channels\n \"\"\"\n if ctx.invoked_subcommand is None:\n await maybe_send(\n ctx, 'Invalid subcommand passed. Possible options are \"on\" and \"off\".'\n )\n\n @stop.command(\"on\")\n async def _on_single(self, ctx):\n \"\"\"\n Restrict messaging and reactions to a channel for everyone.\n Note that the user issueing this command should probably have some form of way to\n still write to the channel, or they will need to release the lock manually.\n \"\"\"\n channel = ctx.channel\n await self._lock_channel(channel)\n\n @stop.command(\"off\")\n async def _off_single(self, ctx):\n \"\"\"\n Re-open a channel after it was locked down.\n This restores the default role's modified permissions,\n i.e. Send Messages and Add Reactions, to their previous values.\n If the channel was not previously locked down, nothing happens.\n \"\"\"\n channel = ctx.channel\n\n await self._unlock_channel(channel)\n\n @stop.group()\n async def all(self, ctx):\n \"\"\"\n Commands for (un)locking all public channels at once.\n \"\"\"\n if ctx.invoked_subcommand is None:\n await maybe_send(\n ctx, 'Invalid subcommand passed. Possible options are \"on\" and \"off\".'\n )\n\n @all.command(\"on\")\n async def _on_all(self, ctx):\n \"\"\"\n Works like regular \"stop on\", except it locks all\n public channels instead of just the current one.\n \"\"\"\n # Public channel => @everyone is not denied read perms\n coros = (\n self._lock_channel(channel)\n for channel in ctx.guild.text_channels\n if is_public(channel)\n )\n await gather(*coros)\n\n @all.command(\"off\")\n async def _off_all(self, ctx):\n \"\"\"\n Works like regular \"stop off\", except that it unlocks all\n locked public channels at once.\n \"\"\"\n public_channels = {\n channel.id: channel\n for channel in ctx.guild.text_channels\n if is_public(channel)\n }\n # Select all locked channels in current guild using set intersection\n locked_channel_ids = self._perm_cache.keys() & public_channels.keys()\n\n coros = (\n self._unlock_channel(public_channels[channel_id])\n for channel_id in locked_channel_ids\n )\n await gather(*coros)\n","sub_path":"src/cardinal/cogs/stop.py","file_name":"stop.py","file_ext":"py","file_size_in_byte":5143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"546869480","text":"import sys\nimport os\nfrom os import path as osp\nimport pprint\nimport subprocess\nfrom collections import defaultdict\ndef addPath(path):\n if path not in sys.path:\n sys.path.append(path) \nimdbPath = osp.abspath(osp.join(osp.dirname(__file__),'../../imdb'))\nhome = osp.abspath(osp.join(osp.dirname(__file__),'../..'))\nlib_vos_path = osp.abspath(osp.join(osp.dirname(__file__),'../../lib_vos'))\nlib_path = osp.abspath(osp.join(osp.dirname(__file__),'../../lib'))\ntool_path = osp.abspath(osp.join(osp.dirname(__file__),'../../tools'))\naddPath(lib_vos_path)\naddPath(lib_path)\naddPath(home)\naddPath(imdbPath)\nfrom core.config import cfg, cfg_from_file, cfg_from_list, assert_and_infer_cfg\nfrom vos_modeling import vos_model_builder\nfrom vos import davis_db\nfrom utils.timer import Timer\nimport torch\nimport torch.nn as nn\nimport nn as mynn\nfrom torch.autograd import Variable\nfrom core.test import im_detect_all\nimport distutils.util\nimport utils.misc as misc_utils\nimport utils.net as net_utils\nimport utils.vis as vis_utils\n#import datasets.dummy_datasets as datasets\nfrom vos import davis_db as datasets\nimport argparse\n\ndef parse_args():\n \"\"\"Parse in command line arguments\"\"\"\n parser = argparse.ArgumentParser(description='Demonstrate mask-rcnn results')\n parser.add_argument(\n '--dataset', required=True,\n help='training dataset')\n\n parser.add_argument(\n '--cfg', dest='cfg_file', required=True,\n help='optional config file')\n parser.add_argument(\n '--set', dest='set_cfgs',\n help='set config keys, will overwrite config in the cfg_file',\n default=[], nargs='+')\n\n parser.add_argument(\n '--no_cuda', dest='cuda', help='whether use CUDA', action='store_false')\n\n parser.add_argument('--load_ckpt', help='path of checkpoint to load')\n\n parser.add_argument('--no_overwrite',help='not overwrite output', action='store_false')\n \n parser.add_argument(\n '--image_dir',\n help='directory to load images for demo')\n parser.add_argument(\n '--images', nargs='+',\n help='images to infer. Must not use with --image_dir')\n parser.add_argument(\n '--output_dir',\n help='directory to save demo results',\n default=\"./Output/\")\n parser.add_argument(\n '--merge_pdfs', type=distutils.util.strtobool, default=True)\n\n args = parser.parse_args()\n\n return args\n\nif __name__=='__main__':\n args = parse_args()\n print('Called with args:')\n print(args)\n\n if not torch.cuda.is_available():\n sys.exit(\"Need a CUDA device to run the code.\")\n\n if args.cuda or cfg.NUM_GPUS > 0:\n cfg.CUDA = True\n else:\n raise ValueError(\"Need Cuda device to run !\")\n\n if args.dataset.startswith(\"coco\"):\n dataset = datasets.get_coco_dataset()\n cfg.MODEL.NUM_CLASSES = len(dataset.classes)\n print('cfg.MODEL.NUM_CLASSES:',cfg.MODEL.NUM_CLASSES)\n elif args.dataset.startswith(\"davis\"):\n cfg.MODEL.NUM_CLASSES = 81\n # Load train data, which has the corresponding global id.\n cfg.TRAIN.DATASETS = ('davis_train',)\n dataset = davis_db.DAVIS_imdb(db_name=\"DAVIS\", split = 'train',cls_mapper = None, load_flow=False)\n else:\n raise ValueError('Unexpected dataset name: {}'.format(args.dataset))\n\n #Add unknow class type if necessary.\n if cfg.MODEL.ADD_UNKNOWN_CLASS is True:\n cfg.MODEL.NUM_CLASSES +=1\n\n cfg_from_file(args.cfg_file)\n if args.set_cfgs is not None:\n cfg_from_list(args.set_cfgs)\n \n if cfg.MODEL.IDENTITY_TRAINING and cfg.MODEL.IDENTITY_REPLACE_CLASS:\n cfg.MODEL.NUM_CLASSES = 145\n cfg.MODEL.IDENTITY_TRAINING = False\n cfg.MODEL.ADD_UNKNOWN_CLASS = False\n\n #Add unknow class type if necessary.\n if cfg.MODEL.IDENTITY_TRAINING:\n cfg.MODEL.TOTAL_INSTANCE_NUM = 145\n if cfg.MODEL.ADD_UNKNOWN_CLASS is True:\n cfg.MODEL.NUM_CLASSES +=1 \n\n \n assert bool(args.load_ckpt)\n assert_and_infer_cfg()\n maskRCNN = vos_model_builder.Generalized_VOS_RCNN()\n \n if args.cuda:\n maskRCNN.cuda()\n \n if args.load_ckpt:\n load_name = args.load_ckpt\n print(\"loading checkpoint %s\" % (load_name))\n checkpoint = torch.load(load_name, map_location=lambda storage, loc: storage)\n net_utils.load_ckpt_no_mapping(maskRCNN, checkpoint['model'])\n \n maskRCNN = mynn.DataParallel(maskRCNN, cpu_keywords=['im_info', 'roidb'],minibatch=True, device_ids=[0])\n \n maskRCNN.eval()\n db = davis_db.DAVIS_imdb(db_name=\"DAVIS\", split = 'train', cls_mapper = None)\n \n for seq_idx in range(db.get_num_sequence()):\n db.set_to_sequence(seq_idx)\n seq_name = db.get_current_seq_name()\n cur_output_dir = osp.join(args.output_dir,seq_name)\n if args.no_overwrite is True and osp.exists(osp.join(cur_output_dir,'results.pdf')):\n continue\n if not osp.isdir(cur_output_dir):\n os.makedirs(cur_output_dir)\n assert(cur_output_dir)\n for idx in range(db.get_current_seq_length()):\n im = db.get_image_cv2(idx)\n assert im is not None\n timers = defaultdict(Timer)\n cls_boxes, cls_segms, cls_keyps = im_detect_all(maskRCNN, im, timers=timers)\n im_name = '%03d-%03d'%(seq_idx,idx)\n print(osp.join(seq_name,im_name))\n vis_utils.vis_one_image(\n im[:, :, ::-1], # BGR -> RGB for visualization\n im_name,\n cur_output_dir,\n cls_boxes,\n cls_segms,\n cls_keyps,\n dataset=dataset,\n box_alpha=0.3,\n show_class=True,\n thresh=0.7,\n kp_thresh=2\n )\n\n if args.merge_pdfs:\n merge_out_path = '{}/results.pdf'.format(cur_output_dir)\n if os.path.exists(merge_out_path):\n os.remove(merge_out_path)\n command = \"pdfunite {}/*.pdf {}\".format(cur_output_dir,\n merge_out_path)\n subprocess.call(command, shell=True)\n \n","sub_path":"lib_vos/tools/predict_davis.py","file_name":"predict_davis.py","file_ext":"py","file_size_in_byte":5880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"567352077","text":"from enum import Enum\nfrom core.project_base import Section\n\n\nclass LIDType(Enum):\n # BC for bio-retention cell; RG for rain garden; GR for green roof; PP for porous pavement;\n # IT for infiltration trench; RB for rain barrel;RD for rooftup disconnect; VS for vegetative swale.\n BC = 1\n RG = 2\n GR = 3\n IT = 4\n PP = 5\n RB = 6\n RD = 7\n VS = 8\n\n\nclass LIDControl(Section):\n \"\"\"Defines scale-independent LID controls that can be deployed within subcatchments\"\"\"\n\n LineTypes = (\n (\"has_surface_layer\",\n \"SURFACE\",\n \"surface_layer_storage_depth\",\n \"surface_layer_vegetative_cover_fraction\",\n \"surface_layer_surface_roughness\",\n \"surface_layer_surface_slope\",\n \"surface_layer_swale_side_slope\"),\n (\"has_soil_layer\",\n \"SOIL\",\n \"soil_layer_thickness\",\n \"soil_layer_porosity\",\n \"soil_layer_field_capacity\",\n \"soil_layer_wilting_point\",\n \"soil_layer_conductivity\",\n \"soil_layer_conductivity_slope\",\n \"soil_layer_suction_head\"),\n (\"has_pavement_layer\",\n \"PAVEMENT\",\n \"pavement_layer_thickness\",\n \"pavement_layer_void_ratio\",\n \"pavement_layer_impervious_surface_fraction\",\n \"pavement_layer_permeability\",\n \"pavement_layer_clogging_factor\"),\n (\"has_storage_layer\",\n \"STORAGE\",\n \"storage_layer_height\",\n \"storage_layer_void_ratio\",\n \"storage_layer_filtration_rate\",\n \"storage_layer_clogging_factor\"),\n (\"has_underdrain_system\",\n \"DRAIN\",\n \"drain_coefficient\",\n \"drain_exponent\",\n \"drain_offset_height\",\n \"drain_delay\"),\n (\"has_drainmat_system\",\n \"DRAINMAT\",\n \"drainmat_thickness\",\n \"drainmat_void_fraction\",\n \"drainmat_roughness\"))\n\n def __init__(self):\n Section.__init__(self)\n\n ## Name used to identify the particular LID control\n self.name = \"Unnamed\"\n\n ## Generic type of LID being defined\n self.lid_type = LIDType.BC\n\n ## does lid have surface layer\n self.has_surface_layer = False\n\n ## does lid have pavement layer\n self.has_pavement_layer = False\n\n ## does lid have soil layer\n self.has_soil_layer = False\n\n ## does lid have storage layer\n self.has_storage_layer = False\n\n ## does lid have underdrain system\n self.has_underdrain_system = False\n\n ## does lid have drainmat system\n self.has_drainmat_system = False\n\n ## When confining walls or berms are present this is the maximum depth to\n ## which water can pond above the surface of the unit before overflow\n ## occurs (in inches or mm). For LIDs that experience overland flow it is\n ## the height of any surface depression storage. For swales, it is the height\n ## of its trapezoidal cross section.\n self.surface_layer_storage_depth = \"0.0\"\n\n ## Fraction of the storage area above the surface that is filled with vegetation\n self.surface_layer_vegetative_cover_fraction = \"0.0\"\n\n ## Manning's n for overland flow over the surface of porous pavement or a vegetative swale\n self.surface_layer_surface_roughness = \"0.0\"\n\n ## Slope of porous pavement surface or vegetative swale\n self.surface_layer_surface_slope = \"0.0\"\n\n ## Slope (run over rise) of the side walls of a vegetative swale's cross section\n self.surface_layer_swale_side_slope = \"0.0\"\n\n ## Thickness of the pavement layer\n self.pavement_layer_thickness = \"0.0\"\n\n ## Volume of void space relative to the volume of solids in the pavement\n self.pavement_layer_void_ratio = \"0.0\"\n\n ## Ratio of impervious paver material to total area for modular systems\n self.pavement_layer_impervious_surface_fraction = \"0.0\"\n\n ## Permeability of the concrete or asphalt used in continuous systems or hydraulic\n ## conductivity of the fill material (gravel or sand) used in modular systems\n self.pavement_layer_permeability = \"0.0\"\n\n ## Number of pavement layer void volumes of runoff treated it takes to completely clog the pavement\n self.pavement_layer_clogging_factor = \"0.0\"\n\n ## Thickness of the soil layer\n self.soil_layer_thickness = \"0.0\"\n\n ## Volume of pore space relative to total volume of soil\n self.soil_layer_porosity = \"0.0\"\n\n ## Volume of pore water relative to total volume after the soil has been allowed to drain fully\n self.soil_layer_field_capacity = \"0.0\"\n\n ## Volume of pore water relative to total volume for a well dried soil where only bound water remains\n self.soil_layer_wilting_point = \"0.0\"\n\n ## Hydraulic conductivity for the fully saturated soil\n self.soil_layer_conductivity = \"0.0\"\n\n ## Slope of the curve of log(conductivity) versus soil moisture content\n self.soil_layer_conductivity_slope = \"0.0\"\n\n ## Average value of soil capillary suction along the wetting front\n self.soil_layer_suction_head = \"0.0\"\n\n ## Height of a rain barrel or thickness of a gravel layer\n self.storage_layer_height = \"0.0\"\n\n ## Volume of void space relative to the volume of solids in the layer\n self.storage_layer_void_ratio = \"0.0\"\n\n ## Maximum rate at which water can flow out the bottom of the layer after it is first constructed\n self.storage_layer_filtration_rate = \"0.0\"\n\n ## Total volume of treated runoff it takes to completely clog the bottom of the layer divided by the\n ## void volume of the layer\n self.storage_layer_clogging_factor = \"0.0\"\n\n ## Coefficient that determines the rate of flow through the underdrain as a function of height of\n ## stored water above the drain height\n self.drain_coefficient = \"0.0\"\n\n ## Exponent that determines the rate of flow through the underdrain as a function of height of\n ## stored water above the drain height\n self.drain_exponent = \"0.0\"\n\n ## Height of any underdrain piping above the bottom of a storage layer or rain barrel\n self.drain_offset_height = \"0.0\"\n\n ## Number of dry weather hours that must elapse before the drain line in a rain barrel is opened\n self.drain_delay = \"0.0\"\n\n ## Thickness of the drainage mat (inches or mm)\n self.drainmat_thickness = \"0.0\"\n\n ## Ratio of void volume to total volume in the mat\n self.drainmat_void_fraction = \"0.5\"\n\n ## Manning's n constant used to compute the horizontal flow rate of drained water through the mat\n self.drainmat_roughness = \"0.1\"\n\n\n","sub_path":"src/core/swmm/hydrology/lidcontrol.py","file_name":"lidcontrol.py","file_ext":"py","file_size_in_byte":6771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"127202599","text":"import matplotlib.pyplot as plt\nimport netCDF4\nimport numpy as np\nimport matplotlib.gridspec as gridspec\nimport math\nfrom matplotlib import colors\n\ndeg = unichr(176)\ndelta = unichr(916)\nk_B = 1.38064852e-23\nN_A = 6.02214086e+23\nR = 8.3144598\nspecies_list = ['atomic_oxygen', 'ozone', 'atomic_hydrogen', 'carbon_dioxide', 'carbon_monoxide', 'temperature', 'density']\nsymbol_list = ['O', 'O3', 'H', 'CO2', 'CO', 'T', 'n']\nunits_list = ['ppmv', '$\\mathregular{cm^{-3}}$', 'K']\n\nfname_uni = netCDF4.Dataset('/nfs/a328/eecwk/earth_system_grid/ccsm4_monthly_ave/zonal_means/f.e20.FXSD.f19_f19.001.cam.h0.2000-01.nc', 'r', format='NETCDF4')\nlats = fname_uni.variables['lat'][:]\nlons = fname_uni.variables['lon'][:]\nfname_uni.close()\n\ndef calc_z3_zon_mer_t_av(levs): \n if levs == 88:\n fname = netCDF4.Dataset('/nfs/a265/earfw/SD_WACCM4/john_ca_paper_JDmif_nad4cad7.cam2.h0.%s-0%s.nc' %(year, month), 'r', format='NETCDF4')\n if levs == 145:\n fname = netCDF4.Dataset('/nfs/a328/eecwk/earth_system_grid/ccsm4_monthly_ave/f.e20.FXSD.f19_f19.001.cam.h0.%s-0%s.nc' %(year, month), 'r', format='NETCDF4')\n z3 = np.zeros([1,levs,96,144])\n z3[:] = fname.variables['Z3'][:]*(1.e-3)\n z3_zon_av = np.mean(z3[:], axis=3)\n z3_zon_mer_av = np.mean(z3_zon_av[:], axis=2)\n z3_zon_mer_t_av = np.mean(z3_zon_mer_av[:], axis=0) \n fname.close()\n return z3_zon_mer_t_av\n\ndef calc_z3_zon_t_av(levs): \n if levs == 88:\n fname = netCDF4.Dataset('/nfs/a265/earfw/SD_WACCM4/john_ca_paper_JDmif_nad4cad7.cam2.h0.%s-0%s.nc' %(year, month), 'r', format='NETCDF4')\n if levs == 145:\n fname = netCDF4.Dataset('/nfs/a328/eecwk/earth_system_grid/ccsm4_monthly_ave/f.e20.FXSD.f19_f19.001.cam.h0.%s-0%s.nc' %(year, month), 'r', format='NETCDF4')\n z3 = np.zeros([1,levs,96,144])\n z3[:] = fname.variables['Z3'][:]*(1.e-3)\n z3_zon_av = np.mean(z3[:], axis=3)\n z3_zon_t_av = np.mean(z3_zon_av[:], axis=0) \n fname.close()\n return z3_zon_t_av\n\ndef calc_cos_factor(param, levs, lowlat, highlat):\n param_weighted = np.zeros(levs) \n for j in range (0, levs): \n sig_cos_x = 0\n sig_cos = 0\n for k in range (lowlat, highlat):\n sig_cos_x = sig_cos_x + (math.cos(math.radians(lats[k])) * param[j][k])\n sig_cos = sig_cos + math.cos(math.radians(lats[k])) \n if k == (highlat - 1):\n param_weighted[j] = sig_cos_x / sig_cos\n return param_weighted\n\ndef calc_species_zon_av(symbol, levs): \n if levs == 88:\n fname = netCDF4.Dataset('/nfs/a265/earfw/SD_WACCM4/john_ca_paper_JDmif_nad4cad7.cam2.h0.%s-0%s.nc' %(year, month), 'r', format='NETCDF4')\n if levs == 145:\n fname = netCDF4.Dataset('/nfs/a328/eecwk/earth_system_grid/ccsm4_monthly_ave/f.e20.FXSD.f19_f19.001.cam.h0.%s-0%s.nc' %(year, month), 'r', format='NETCDF4') \n species_dat = np.zeros([1,levs,96,144])\n if symbol == 'T':\n species_dat = fname.variables[symbol][:]\n elif symbol == 'n':\n species_dat = fname.variables['T'][:]\n else:\n species_dat = fname.variables[symbol][:]*(1.e6) \n species_tm = np.mean(species_dat[:], axis=0)\n species_zon_av = np.mean(species_tm[:], axis=2)\n fname.close()\n return species_zon_av\n\ndef interp_waccmx_species(z3_1, z3_2, species_2):\n species_2_int = np.zeros([88,96])\n species_2_int_rev = np.zeros([88,96])\n z3_1_rev = z3_1[::-1]\n z3_2_rev = z3_2[::-1]\n species_2_rev = species_2[::-1]\n for i in range(0,88): \n for j in range(0,96):\n species_2_int[i,j] = np.interp(z3_1_rev[i], z3_2_rev[:], species_2_rev[:,j]) \n species_2_int_rev = species_2_int[::-1]\n return species_2_int_rev\n\ndef calc_diff(param1, param2): \n diff = np.zeros([88,96])\n for i in range(0,88):\n for j in range(0,96): \n diff[i,j] = ( (param2[i,j] - param1[i,j]) / param1[i,j] ) * 100\n return diff\n\ndef calc_ratio(param1, param2, levs):\n ratio = np.zeros([levs,96])\n for i in range(0,levs):\n for j in range(0,96): \n ratio[i,j] = param1[i,j] / param2[i,j]\n return ratio\n \ndef calc_z3_zon_t_av_weighted(levs, lowlat, highlat):\n z3_zon_t_av = calc_z3_zon_t_av(levs)\n z3_zon_t_av_weighted = calc_cos_factor(z3_zon_t_av, levs, lowlat, highlat)\n return z3_zon_t_av_weighted\n\ndef get_lev(levs):\n if levs == 88:\n fname = netCDF4.Dataset('/nfs/a265/earfw/SD_WACCM4/john_ca_paper_JDmif_nad4cad7.cam2.h0.%s-0%s.nc' %(year, month), 'r', format='NETCDF4')\n if levs == 145:\n fname = netCDF4.Dataset('/nfs/a328/eecwk/earth_system_grid/ccsm4_monthly_ave/f.e20.FXSD.f19_f19.001.cam.h0.%s-0%s.nc' %(year, month), 'r', format='NETCDF4') \n lev = np.zeros([levs])\n lev = fname.variables['lev'][:]\n fname.close()\n return lev\n\ndef calc_density(T, levs, lowlat, highlat):\n lev = get_lev(levs)\n T_weighted = calc_cos_factor(T, levs, lowlat, highlat)\n n_weighted = np.zeros(levs)\n for i in range(0,levs):\n n_weighted[i] = (N_A * 100 * lev[i]) / (R * T_weighted[i]) * (1.e-6)\n return n_weighted\n\ndef calc_profiles(param, levs, lowlat, highlat):\n if symbol == 'n':\n param_weighted = calc_density(param, levs, lowlat, highlat)\n else:\n param_weighted = calc_cos_factor(param, levs, lowlat, highlat)\n return param_weighted\n\ndef calc_conc_profiles(param, levs, lowlat, highlat):\n lev = get_lev(levs)\n T_zon_t_av = calc_species_zon_av('T', levs)\n T_zon_t_av_weighted = calc_cos_factor(T_zon_t_av, levs, lowlat, highlat) \n param_weighted = calc_cos_factor(param, levs, lowlat, highlat)\n param_weighted_conc = np.zeros(levs) \n for i in range(0,levs):\n param_weighted_conc[i] = (param_weighted[i] * 1.e-6 * N_A * 100 * lev[i]) / (R * T_zon_t_av_weighted[i]) * (1.e-6)\n return param_weighted_conc\n\ndef plot_1d_global(name, config, units, z3, species, color, plot_no):\n x = species[::-1]\n y = z3[::-1]\n plt.plot(x, y, color=color, label=config)\n plt.xlabel('%s [%s]' %(name, units), fontsize=12)\n plt.ylabel('Altitude [km]', fontsize=12)\n plt.ylim(60,160)\n if name == 'atomic_oxygen':\n if units == 'ppmv':\n plt.xlim(0,500000)\n if units == '$\\mathregular{cm^{-3}}$': \n plt.xlim(0,8.e+11)\n if name == 'ozone':\n plt.xscale('log')\n if units == 'ppmv':\n plt.xlim(1.e-8,1.e+1)\n if units == '$\\mathregular{cm^{-3}}$': \n plt.xlim(1.e-4,1.e+12)\n if name == 'atomic_hydrogen':\n if units == 'ppmv':\n plt.xlim(0,20)\n if units == '$\\mathregular{cm^{-3}}$': \n plt.xlim(0,5.e+8)\n if name == 'temperature':\n 1==1\n if name == 'density':\n plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))\n plt.ylim(100,160)\n plt.xlim(0,3.e+12)\n if plot_no == 1:\n plt.legend()\n return\n\ndef plot_1d_multi(name, config, units, z3, species, lowlat, highlat, color, plot_no):\n if plot_no > 5:\n plot_no = plot_no - 6\n plt.subplot(gs1[plot_no])\n plt.title('%s%s to %s%s' %(lowlat_no, deg, highlat_no, deg), fontsize=14)\n x = species[::-1]\n y = z3[::-1]\n plt.plot(x, y, color=color, label=config)\n plt.ylim(60,160)\n if plot_no == 0:\n plt.ylabel('Altitude [km]', fontsize=12)\n plt.tick_params(labelbottom='off')\n if plot_no == 1:\n plt.tick_params(labelleft='off')\n plt.tick_params(labelbottom='off')\n if plot_no == 2:\n plt.tick_params(labelleft='off')\n plt.tick_params(labelbottom='off')\n if plot_no == 3:\n plt.xlabel('%s [%s]' %(name, units), fontsize=12)\n plt.ylabel('Altitude [km]', fontsize=12)\n if plot_no == 4:\n plt.xlabel('%s [%s]' %(name, units), fontsize=12)\n plt.tick_params(labelleft='off')\n if plot_no == 5:\n plt.xlabel('%s [%s]' %(name, units), fontsize=12)\n plt.tick_params(labelleft='off')\n if name == 'atomic_oxygen':\n plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))\n if units == 'ppmv':\n plt.xlim(0,500000)\n if units == '$\\mathregular{cm^{-3}}$': \n plt.xlim(0,8.e+11)\n if name == 'ozone':\n #plt.xscale('log')\n #plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))\n if units == 'ppmv':\n plt.xlim(1.e-8,1.e+1)\n if units == '$\\mathregular{cm^{-3}}$':\n plt.ylim(77,100)\n #plt.xlim(1.e-4,1.e+12)\n plt.xlim(0,7.e+8)\n if name == 'atomic_hydrogen':\n if units == 'ppmv':\n plt.xlim(0,20)\n if units == '$\\mathregular{cm^{-3}}$': \n plt.xlim(0,5.e+8)\n if name == 'carbon_dioxide':\n plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))\n if units == 'ppmv':\n plt.xlim(0,500)\n 1==1\n if units == '$\\mathregular{cm^{-3}}$': \n plt.xlim(0,1.e+13) \n if name == 'temperature':\n plt.xlim(0,800)\n if name == 'density':\n plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))\n plt.ylim(100,160)\n plt.xlim(0,3.e+12)\n if config == 'waccm-x' and plot_no == 2:\n plt.legend(loc=1)\n return\n\ndef plot_1d_ratio(name, config, units, z3, species, lowlat, highlat, color, plot_no):\n if plot_no > 5:\n plot_no = plot_no - 6\n plt.subplot(gs1[plot_no])\n plt.title('%s%s to %s%s' %(lowlat_no, deg, highlat_no, deg), fontsize=14)\n x = species[::-1]\n y = z3[::-1]\n plt.plot(x, y, color=color, label=config)\n if plot_no == 0:\n plt.ylabel('Altitude [km]', fontsize=12)\n plt.tick_params(labelbottom='off')\n if plot_no == 1:\n plt.tick_params(labelleft='off')\n plt.tick_params(labelbottom='off')\n if plot_no == 2:\n plt.tick_params(labelleft='off')\n plt.tick_params(labelbottom='off')\n if plot_no == 3:\n plt.xlabel('%s' %name, fontsize=12)\n plt.ylabel('Altitude [km]', fontsize=12)\n if plot_no == 4:\n plt.xlabel('%s' %name, fontsize=12)\n plt.tick_params(labelleft='off')\n if plot_no == 5:\n plt.xlabel('%s' %name, fontsize=12)\n plt.tick_params(labelleft='off')\n plt.ylim(60,160)\n plt.xlim(0,10)\n if config == 'waccm-x' and plot_no == 2:\n plt.legend(loc=1)\n return\n\ndef plot_2d(name, z3, species, plot_no):\n plt.subplot(gs1[plot_no])\n x, y = np.meshgrid(lats, z3)\n plt.xlabel('Latitude [%s]' %deg, fontsize=12)\n plt.xticks(np.arange(-90,120,30), fontsize=12) \n plt.yticks(np.arange(0,220,20), fontsize=12) \n plt.ylim(90,200)\n plt.axhline(y=waccm_z3[0], color='w', linewidth=1, linestyle=':')\n if name == 'atomic_oxygen':\n diffs = [1.e+3, 175.e+1, 25.e+2, 325.e+1, 4.e+3, 475.e+1, 55.e+2, 625.e+1, 7.e+3, 775.e+1, 85.e+2, 925.e+1, 1.e+4, 175.e+2, 25.e+3, 325.e+2, 4.e+4, 475.e+2, 55.e+3, 625.e+2, 7.e+4, 775.e+2, 85.e+3, 925.e+2, 1.e+5, 175.e+3, 25.e+4, 325.e+3, 4.e+5, 475.e+3, 55.e+4, 625.e+3, 7.e+5, 775.e+3, 85.e+4, 925.e+3, 1.e+6]\n cbar_ticks = [1.e+3, 1.e+4, 1.e+5, 1.e+6]\n plot = 'log'\n elif name == 'ozone':\n diffs = [1.e-8, 25.e-9, 4.e-8, 55.e-9, 7.e-8, 85.e-9, 1.e-7, 25.e-8, 4.e-7, 55.e-8, 7.e-7, 85.e-8, 1.e-6, 25.e-7, 4.e-6, 55.e-7, 7.e-6, 85.e-7, 1.e-5, 25.e-6, 4.e-5, 55.e-6, 7.e-5, 85.e-6, 1.e-4, 25.e-5, 4.e-4, 55.e-5, 7.e-4, 85.e-5, 1.e-3, 25.e-4, 4.e-3, 55.e-4, 7.e-3, 85.e-4, 1.e-2, 25.e-3, 4.e-2, 55.e-3, 7.e-2, 85.e-3, 1.e-1, 25.e-2, 4.e-1, 55.e-2, 7.e-1, 85.e-2, 1.e+0, 25.e-1, 4.e+0, 55.e-1, 7.e+0, 85.e-1, 1.e+1]\n cbar_ticks = [1.e-8, 1.e-7, 1.e-6, 1.e-5, 1.e-4, 1.e-3, 1.e-2, 1.e-1, 1.e+0, 1.e+1]\n plot = 'log'\n elif name == 'atomic_hydrogen':\n diffs = np.arange(1,91,1)\n cbar_ticks = np.arange(0,100,10)\n plot = 'linear'\n diffs_per = np.arange(-200,201,1)\n if plot_no == 0:\n if plot == 'linear':\n ax = plt.contourf(x[:,:], y[:,:], species[:,:], diffs)\n elif plot == 'log':\n ax = plt.contourf(x[:,:], y[:,:], species[:,:], diffs, norm=colors.LogNorm())\n plt.title('WACCM')\n plt.ylabel('Altitude [km]', fontsize=12)\n elif plot_no == 1:\n if plot == 'linear':\n ax = plt.contourf(x[:,:], y[:,:], species[:,:], diffs)\n elif plot == 'log':\n ax = plt.contourf(x[:,:], y[:,:], species[:,:], diffs, norm=colors.LogNorm())\n plt.title('WACCM-X')\n plt.tick_params(labelleft='off')\n cbar_ax = fig.add_axes([0.94, 0.15, 0.02, 0.7])\n cbar = fig.colorbar(ax, cax=cbar_ax, ticks=cbar_ticks, orientation='vertical')\n cbar.set_label('%s [ppmv]' %name, fontsize=12)\n cbar.ax.tick_params(labelsize=12)\n elif plot_no == 2:\n ax2 = plt.contourf(x[:,:], y[:,:], species[:,:], diffs_per, extend='both', cmap=plt.get_cmap('seismic'))\n plt.title('WACCM to WACCM-X Difference')\n plt.tick_params(labelleft='off')\n cbar_ax2 = fig.add_axes([1.05, 0.15, 0.02, 0.7])\n cbar2 = fig.colorbar(ax2, cax=cbar_ax2, ticks=np.arange(-200,250,50), orientation='vertical')\n cbar2.cmap.set_under('#001648')\n cbar2.set_label('[%]', fontsize=12)\n cbar2.ax.tick_params(labelsize=12)\n return\n\nyear = 2014\nmonth = 1\nname = species_list[0]\nsymbol = symbol_list[0]\nunits = units_list[0]\nchemistry = True\nglobal_only = False\nsave = False\n# For ratio:\nname2 = species_list[3]\nsymbol2 = symbol_list[3]\n\nif units == 'ppmv':\n units_print = 'ppmv'\nelif units == '$\\mathregular{cm^{-3}}$':\n units_print = 'cm-3'\nelif units == 'K':\n units_print = 'K'\n\nif global_only == True:\n if month == 1:\n plt.title('January %s Global' %year , fontsize=16)\n elif month == 7:\n plt.title('July %s Global' %year, fontsize=16)\n step = 96\n a = 0\n b = 1\nelse:\n fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(11,8))\n gs1 = gridspec.GridSpec(2, 3)\n gs1.update(wspace=0.1, hspace=0.1)\n if month == 1:\n plt.suptitle('January %s' %year, fontsize=16)\n elif month == 7:\n plt.suptitle('July %s' %year, fontsize=16)\n step = 16\n a = 0\n b = 6\n\n#waccm_z3 = calc_z3_zon_mer_t_av(88)\n#waccmx_z3 = calc_z3_zon_mer_t_av(145)\n\nwaccm_species = calc_species_zon_av(symbol, 88)\nwaccmx_species = calc_species_zon_av(symbol, 145)\n\n#waccmx_species_int = interp_waccmx_species(waccm_z3, waccmx_z3, waccmx_species)\n#diff = calc_diff(waccm_species, waccmx_species_int)\n\n# 1D Plot Code\nfor i in range(a,b): \n lowlat = i * step\n highlat = (i * step) + step\n lowlat_no = int((lowlat * 1.875) - 90)\n highlat_no = int((highlat * 1.875) - 90)\n waccm_z3_weighted = calc_z3_zon_t_av_weighted(88, lowlat, highlat)\n waccmx_z3_weighted = calc_z3_zon_t_av_weighted(145, lowlat, highlat) \n if chemistry == True:\n if units == 'ppmv':\n waccm_species_profile = calc_profiles(waccm_species, 88, lowlat, highlat)\n waccmx_species_profile = calc_profiles(waccmx_species, 145, lowlat, highlat)\n elif units == '$\\mathregular{cm^{-3}}$':\n waccm_species_profile = calc_conc_profiles(waccm_species, 88, lowlat, highlat)\n waccmx_species_profile = calc_conc_profiles(waccmx_species, 145, lowlat, highlat)\n else:\n if symbol == 'T':\n waccm_species_profile = calc_profiles(waccm_species, 88, lowlat, highlat)\n waccmx_species_profile = calc_profiles(waccmx_species, 145, lowlat, highlat) \n elif symbol == 'n':\n waccm_species_profile = calc_profiles(waccm_species, 88, lowlat, highlat)\n waccmx_species_profile = calc_profiles(waccmx_species, 145, lowlat, highlat) \n if global_only == True:\n plot_1d_global(name, 'waccm', units, waccm_z3_weighted, waccm_species_profile, 'k', 0)\n plot_1d_global(name, 'waccm-x', units, waccmx_z3_weighted, waccmx_species_profile, 'b', 1)\n else:\n plot_1d_multi(name, 'waccm', units, waccm_z3_weighted, waccm_species_profile, lowlat, highlat, 'k', i)\n plot_1d_multi(name, 'waccm-x', units, waccmx_z3_weighted, waccmx_species_profile, lowlat, highlat, 'b', i)\nif save == True:\n if global_only == True:\n plt.savefig('/nfs/a328/eecwk/waccm-x/figures/atomic_oxygen_experiment/john_ca_paper_JDmif_nad4cad7/%s/%s_month%s_profile_global_%s.jpg' %(year, name, month, units_print), bbox_inches='tight', dpi=300)\n else:\n plt.savefig('/nfs/a328/eecwk/waccm-x/figures/atomic_oxygen_experiment/john_ca_paper_JDmif_nad4cad7/%s/%s_month%s_profile_lat_bands_%s.jpg' %(year, name, month, units_print), bbox_inches='tight', dpi=300)\n\n'''\n# 1D Ratio Plot Code: WACCM-X only comparison workaround for missing WACCM species\nwaccmx_species = calc_species_zon_av(symbol, 145)\nwaccmx_species2 = calc_species_zon_av(symbol2, 145)\nwaccmx_ratio = calc_ratio(waccmx_species, waccmx_species2, 145)\nfor i in range(a,b): \n lowlat = i * step\n highlat = (i * step) + step\n lowlat_no = int((lowlat * 1.875) - 90)\n highlat_no = int((highlat * 1.875) - 90)\n waccm_z3_weighted = calc_z3_zon_t_av_weighted(88, lowlat, highlat)\n waccmx_z3_weighted = calc_z3_zon_t_av_weighted(145, lowlat, highlat) \n waccmx_species_profile = calc_profiles(waccmx_ratio, 145, lowlat, highlat) \n plot_1d_ratio('%s / %s ratio' %(symbol, symbol2), 'waccm-x', units, waccmx_z3_weighted, waccmx_species_profile, lowlat, highlat, 'b', i)\nif save == True:\n plt.savefig('/nfs/a328/eecwk/waccm-x/figures/atomic_oxygen_experiment/john_ca_paper_JDmif_nad4cad7/%s/%s_%s_ratio_month%s_profile_lat_bands.jpg' %(year, name, name2, month), bbox_inches='tight', dpi=300)\n'''\n'''\n# 2D Plot Code\n# Currently for chemistry only\nfig, axes = plt.subplots(nrows=1, ncols=3, figsize=(11,5))\ngs1 = gridspec.GridSpec(1, 3)\ngs1.update(wspace=0.1, hspace=0.1)\n\nif month == 1:\n fig.suptitle('January %s' %year, fontsize=16)\nelif month == 7:\n fig.suptitle('July %s' %year, fontsize=16)\n\nplot_2d(name, waccm_z3, waccm_species, 0)\nplot_2d(name, waccmx_z3, waccmx_species, 1)\nplot_2d(name, waccm_z3, diff, 2)\nplt.savefig('/nfs/a328/eecwk/waccm-x/figures/atomic_oxygen_experiment/john_ca_paper_JDmif_nad4cad7/%s/%s_month%s.jpg' %(year, name, month), bbox_inches='tight', dpi=300)\n'''\nplt.show()","sub_path":"atomic_oxygen_experiment.py","file_name":"atomic_oxygen_experiment.py","file_ext":"py","file_size_in_byte":18182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"443722241","text":"# Package initialisation\nfrom pypy.interpreter.mixedmodule import MixedModule\n\nclass Module(MixedModule):\n \"\"\"\n This module implements Stackless for applications.\n \"\"\"\n\n appleveldefs = {\n 'GreenletExit' : 'app_greenlet.GreenletExit',\n 'GreenletError' : 'app_greenlet.GreenletError',\n }\n\n interpleveldefs = {\n 'tasklet' : 'interp_stackless.tasklet',\n 'coroutine' : 'coroutine.AppCoroutine',\n 'greenlet' : 'interp_greenlet.AppGreenlet',\n 'usercostate': 'composable_coroutine.W_UserCoState',\n '_return_main' : 'coroutine.return_main',\n }\n\n def setup_after_space_initialization(self):\n # post-installing classmethods/staticmethods which\n # are not yet directly supported\n from pypy.module._stackless.coroutine import post_install as post_install_coro\n post_install_coro(self)\n from pypy.module._stackless.interp_greenlet import post_install as post_install_greenlet\n post_install_greenlet(self)\n\n if self.space.config.translation.gc in ('framework', 'stacklessgc'):\n from pypy.module._stackless.clonable import post_install as post_install_clonable\n self.extra_interpdef('clonable', 'clonable.AppClonableCoroutine')\n self.extra_interpdef('fork', 'clonable.fork')\n post_install_clonable(self)\n","sub_path":"flex-backend/pypy/module/_stackless/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"402890429","text":"# -*- coding: utf-8 -*-\n\"\"\"Episode: 3-3.貧乏なう\n\"\"\"\n## path\nimport os\nimport sys\nsys.path.append(os.path.join(os.path.dirname(__file__), '../..'))\nsys.path.append('storybuilder')\n## local libs\nfrom storybuilder.builder.world import World\nfrom storybuilder.builder.writer import Writer\n\n\n## define alias\nW = Writer\n_ = W.getWho()\n\n\n## scenes\ndef sc_playgame(w: World):\n hero, mako, sol, yula = W(w.hero), W(w.mako), W(w.sol), W(w.yula)\n inside, outside = W(w.inside), W(w.outside)\n return w.scene(\"ゲームおもしろい\",\n hero.be(\"部屋でゲームにはまっている\"),\n hero.explain(\"画面には楽しそうな絵が出ている\",\n \"結構リアルで人形みたいな人間と魔物が動いている\"),\n hero.do(\"何とか準備も整い、明日から旅立とうという夜\"),\n hero.do(\"$Sはベッドに寝転がりながら、ぼんやりと$smaphを眺めていた\"),\n hero.do(\"そこには世界中のあらゆることが投稿されている\",\n \"多くの人の$w_tweetが流れていく\"),\n hero.do(\"その中に$w_gameというものが流れてくる\"),\n hero.talk(\"何だこれ?\"),\n hero.do(\"それは$smaphを使って架空の世界で遊ぶことができる、\",\n \"不思議な遊具のことらしい\"),\n hero.do(\"試しに勇者になって冒険するという、まさに今の自分に似つかわしい$w_gameをやってみる\"),\n hero.talk(\"おお、これすごい\"),\n hero.do(\"まだ冒険に出たことのない$Sは、これで練習ができると��しみになる\"),\n hero.do(\"物語は朝起こされるところから始まった\"),\n w.comment(\"DQ3ベースで\"),\n hero.do(\"仲間を集めてから、冒険の旅に出る\"),\n hero.do(\"最初は城の周辺で戦うといいらしい\"),\n hero.do(\"初めての魔物との戦闘\"),\n hero.do(\"そこで$Sは初勝利\"),\n hero.talk(\"なんだ、簡単じゃん\", \"これなら魔王だって余裕で倒せそうだな\"),\n stage=w.on_heroroom_int,\n time=w.at_night,\n )\n\ndef sc_numa(w: World):\n hero, mako, sol, yula = W(w.hero), W(w.mako), W(w.sol), W(w.yula)\n inside, outside = W(w.inside), W(w.outside)\n return w.scene(\"ゲームの沼\",\n hero.be(),\n hero.do(\"しかしすぐに勝てなくなる\"),\n hero.do(\"魔物はどんどん強くなるのに、こちらのレベルは上がらない\"),\n hero.talk(\"なんだよ、これ\"),\n hero.do(\"そんな時、お助けアイテムなどがもらえる$w_gachaというものがあった\",\n \"それをやると一定の確率で旅を助ける道具や武器防具、強力な仲間などが手に入るらしい\"),\n hero.talk(\"よーし、頼む\"),\n hero.do(\"$w_gachaにより、一気に旅が進む\"),\n hero.do(\"しかしまたすぐに壁が現れた\"),\n hero.talk(\"よし、$w_gachaだ\"),\n hero.do(\"だが最初はいいものが引けたのに、次は全然出ない\"),\n hero.talk(\"あれ? 次どうやったら$w_gachaできるんだ?\"),\n hero.do(\"説明を見ると、課金すると$w_gachaの券が買えるらしい\"),\n hero.talk(\"まあ一回くらいなら\"),\n hero.do(\"やってみると強い武器が手に入り、あっという間に倒せてしまう\"),\n hero.talk(\"なんだ、やっぱり楽勝じゃん!\"),\n hero.do(\"しかしまた次のボスが倒せない\"),\n hero.do(\"$w_gachaを引く。そして当てる\"),\n hero.do(\"だが徐々に確率が悪くなり、簡単な武器やアイテムでは攻略できなくなってくる\"),\n hero.talk(\"なんだよこれは! 面白くないよ!\"),\n hero.do(\"$smaphを放り出して一旦寝そべるが\"),\n hero.talk(\"まああと一回だけなら\"),\n hero.do(\"こうして$Sは明け方まで続いた\"),\n )\n\ndef sc_billcollector(w: World):\n hero, mako, sol, yula = W(w.hero), W(w.mako), W(w.sol), W(w.yula)\n geruon = W(w.geruon)\n mam = W(w.mam)\n inside, outside = W(w.inside), W(w.outside)\n return w.scene(\"支払いは\",\n hero.be(\"眠りこけている\"),\n mako.come(),\n mako.talk(\"$heroさま?\"),\n sol.come(),\n sol.talk(\"おーい\"),\n hero.do(\"$makoたちによって起こされた\"),\n mako.talk(\"何してたんですか?\"),\n mako.do(\"全く準備ができていないのを見て\"),\n hero.talk(\"いやあ、なんか$w_gameってのを見つけちゃってさ\",\n \"それで遊んでたら知らないうちに気絶してた\",\n \"今から大急ぎで準備するわ\"),\n hero.do(\"顔を洗いに部屋から出ようとするが、\"),\n hero.hear(\"玄関をどんどんするのを聞く\"),\n mam.talk(\"ごめんなさい\", \"今ちょっと手が離せないから出てくれる?\"),\n hero.talk(\"仕方ないな\"),\n hero.do(\"誰か見に行く\"),\n geruon.talk(\"あー、ちょっとお尋ねしますが、こちらは$heroさんのお宅でよろしいですかね?\"),\n hero.talk(\"ええ、$heroは$meですが\"),\n geruon.talk(\"ああ、あなたが\",\n \"こちら、請求書になります\"),\n hero.do(\"それは信じられない金額が書かれていた\"),\n hero.talk(\"あの、なんですか、これ\"),\n geruon.talk(\"だからあなたが使った$w_gachaの請求書ですよ\",\n \"口座に1$w_Gもないんだから当然全部借金な訳ですよ\",\n \"それが支払われないので、$meが来ました\"),\n geruon.look(\"真っ黒な服の上下で、鋭い目つき\",\n \"手にはかっちりした黒い鞄を持っている\"),\n hero.do(\"それは借金の取り立てだった\"),\n stage=w.on_herohome,\n )\n\ndef sc_deadend(w: World):\n hero, mako, sol, yula = W(w.hero), W(w.mako), W(w.sol), W(w.yula)\n geruon = W(w.geruon)\n inside, outside = W(w.inside), W(w.outside)\n return w.scene(\"借金取り\",\n w.comment(\"ここがラストの逃げるところで関わってくる場面だといい\",\n \"$yulaのアジトが近いとか\"),\n hero.come(\"逃げてやってくる\"),\n outside.look(\"林になっているところを抜けた先、\",\n \"$on_mtjihanを臨む\",\n \"崖が迫る\"),\n hero.do(\"借金取りに追われて、逃げ出す\"),\n hero.do(\"崖に追い詰められる\"),\n geruon.come(),\n geruon.talk(\"で、払ってもらえますかね?\"),\n hero.do(\"$Sは追い詰められて、崖から落下した\"),\n stage=w.on_jihancliff,\n )\n\n## episode\ndef ep_poor(w: World):\n return w.episode(\"3-3.貧乏なう\",\n sc_playgame(w),\n sc_numa(w),\n sc_billcollector(w),\n sc_deadend(w),\n ## NOTE\n ## - ゲームを知る勇者。そこで冒険ができることを知った\n ## - ゲームにはまり、準備資金が全部なくなる\n ## - 借金取りに追われて、逃げているうちに崖から落ちて勇者は死んだ\n )\n","sub_path":"src/c3_preparation/e3_poor.py","file_name":"e3_poor.py","file_ext":"py","file_size_in_byte":7703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"86175121","text":"#!/usr/bin/env python3\n# Author: Bradley Pratt\n# Created: 01/11/2021\n# Last Edit: 01/15/2021\n\nimport PySimpleGUI as sg\nfrom sympy import *\n\n# ########GLOBAL VARIABLES##########\nwelcomeMessage = \"Welcome to my python-based calculator!\"\noperators = [\"^\", \"√\", \"÷\", \"*\", \"+\", \"-\"]\nimplicitMultiply = [\"LOG\", \"LN\", \"SIN\", \"COS\", \"TAN\", \"(\"]\nnumbers = [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \".\"]\nhistory = []\n\ncolumn1 = [\n [sg.Button(button_text=\"LOG\", size=(5, 1))],\n [sg.Button(button_text=\"LN\", size=(5, 1))],\n [sg.Button(button_text=pretty(pi), size=(5, 1))],\n [sg.Button(button_text=\"^\", size=(5, 1))],\n [sg.Button(button_text=\"DEL\", size=(5, 1))],\n [sg.Button(button_text=\"OFF\", size=(5, 1))]\n]\n\ncolumn2 = [\n [sg.Button(button_text=\"SIN\", size=(5, 1))],\n [sg.Button(button_text=\"√\", size=(5, 1))],\n [sg.Button(button_text=\"7\", size=(5, 1), button_color=('#12100E', 'white'))],\n [sg.Button(button_text=\"4\", size=(5, 1), button_color=('#12100E', 'white'))],\n [sg.Button(button_text=\"1\", size=(5, 1), button_color=('#12100E', 'white'))],\n [sg.Button(button_text=\"0\", size=(5, 1), button_color=('#12100E', 'white'))]\n]\n\ncolumn3 = [\n [sg.Button(button_text=\"COS\", size=(5, 1))],\n [sg.Button(button_text=\"(\", size=(5, 1))],\n [sg.Button(button_text=\"8\", size=(5, 1), button_color=('#12100E', 'white'))],\n [sg.Button(button_text=\"5\", size=(5, 1), button_color=('#12100E', 'white'))],\n [sg.Button(button_text=\"2\", size=(5, 1), button_color=('#12100E', 'white'))],\n [sg.Button(button_text=\".\", size=(5, 1), button_color=('#12100E', 'white'))]\n]\n\ncolumn4 = [\n [sg.Button(button_text=\"TAN\", size=(5, 1))],\n [sg.Button(button_text=\")\", size=(5, 1))],\n [sg.Button(button_text=\"9\", size=(5, 1), button_color=('#12100E', 'white'))],\n [sg.Button(button_text=\"6\", size=(5, 1), button_color=('#12100E', 'white'))],\n [sg.Button(button_text=\"3\", size=(5, 1), button_color=('#12100E', 'white'))],\n [sg.Button(button_text=\"(-)\", size=(5, 1), button_color=('#12100E', 'white'))]\n]\n\ncolumn5 = [\n [sg.Button(button_text=\"CLEAR\", size=(5, 1))],\n [sg.Button(button_text=\"÷\", size=(5, 1), button_color=('white', '#3A3E5C'))],\n [sg.Button(button_text=\"*\", size=(5, 1), button_color=('white', '#3A3E5C'))],\n [sg.Button(button_text=\"+\", size=(5, 1), button_color=('white', '#3A3E5C'))],\n [sg.Button(button_text=\"-\", size=(5, 1), button_color=('white', '#3A3E5C'))],\n [sg.Button(button_text=\"=\", size=(5, 1), button_color=('white', '#3A3E5C'))]\n]\n\nlayout = [\n [sg.Output(size=(47, 5), key='-DISPLAY-', echo_stdout_stderr=True)],\n [sg.Column(column1),\n sg.VSeperator(pad=(1, 1)),\n sg.Column(column2),\n sg.VSeperator(pad=(1, 1)),\n sg.Column(column3),\n sg.VSeperator(pad=(1, 1)),\n sg.Column(column4),\n sg.VSeperator(pad=(1, 1)),\n sg.Column(column5)]\n]\n\n\n# ########MAIN FUNCTION##########\ndef main():\n global history\n\n # Create the window\n window = sg.Window('Scientific Calculator', layout, no_titlebar=True, grab_anywhere=True)\n print(welcomeMessage)\n equation = []\n\n # Create an event loop\n while True:\n event, values = window.read()\n\n if event == \"OFF\" or event == sg.WIN_CLOSED:\n break\n elif event == \"CLEAR\":\n window['-DISPLAY-'].update('')\n equation = []\n elif event == \"DEL\":\n output = window['-DISPLAY-'].Get()\n window['-DISPLAY-'].update('')\n print(output[0:-2], end=\"\")\n del equation[-1]\n elif event == \"=\":\n if parenthesesChecker(equation):\n answer = calculate(equation)\n print(f\"= {answer}\")\n history.append(answer)\n equation = []\n else:\n print(\"ERROR: missing parenthesis!\")\n else:\n if event == \"LOG\" or event == \"LN\" or event == \"SIN\" or event == \"COS\" or event == \"TAN\" or event == \"√\":\n print(event.lower() + \"(\", end=\"\")\n else:\n if event in operators and event != \"√\" and len(equation) == 0:\n if len(history) == 0:\n print(\"ERROR: no previous history. Cannot perform operation on empty value.\")\n else:\n equation.append(history[-1])\n print(\"ANS\", end=\"\")\n print(event, end=\"\")\n if len(equation) != 0 and equation[-1][-1] in numbers and event in numbers:\n equation[-1] += event\n else:\n equation.append(event)\n\n window.close()\n\n\n# ########ACCESSORY FUNCTION##########\ndef parenthesesChecker(equation):\n stack = []\n\n for element in equation:\n if element in implicitMultiply or element == \"√\":\n stack.append(1)\n if element == ')':\n if len(stack) == 0:\n return False\n else:\n stack.pop()\n return True\n\n\ndef calculate(equation):\n subEq = []\n current = []\n subPar = 0\n tracking = False\n\n if len(equation) == 0:\n return 0\n\n track = 0\n for el in equation:\n # print(current)\n # if el not in operators:\n # if track != len(equation) - 1 and equation[track + 1] in implicitMultiply:\n # current.append(\"*\")\n if tracking and el == \")\" and subPar == 0:\n tracking = False\n if len(subEq) != 0:\n # print(current)\n current.append(funcCalc(equation[track - 2], calculate(subEq)))\n subEq = []\n elif tracking:\n if el in implicitMultiply:\n subPar += 1\n if el == \")\":\n subPar -= 1\n subEq.append(el)\n elif el in implicitMultiply:\n tracking = True\n else:\n current.append(el)\n track += 1\n #print(current)\n # print(current)\n endNotReached = True\n while endNotReached:\n for item in range(len(current)):\n if current[item] == \"^\":\n current[item - 1] = performOperation(float(current[item - 1]), float(current[item + 1]), current[item])\n del current[item + 1]\n del current[item]\n break\n if current[item] == \"√\":\n current[item] = performOperation(float(current[item + 1]), 0, current[item])\n del current[item + 2]\n del current[item + 1]\n break\n if item == len(current) - 1:\n endNotReached = False\n break\n\n endNotReached = True\n while endNotReached:\n for item in range(len(current)):\n if current[item] == \"*\" or current[item] == \"÷\":\n current[item - 1] = performOperation(float(current[item - 1]), float(current[item + 1]), current[item])\n del current[item + 1]\n del current[item]\n break\n if item == len(current) - 1:\n endNotReached = False\n break\n\n endNotReached = True\n while endNotReached:\n for item in range(len(current)):\n if current[item] == \"+\" or current[item] == \"-\":\n current[item - 1] = performOperation(float(current[item - 1]), float(current[item + 1]), current[item])\n del current[item + 1]\n del current[item]\n break\n if item == len(current) - 1:\n endNotReached = False\n break\n\n if len(current) != 1:\n print(f\"Current answer array length: {len(current)}\")\n return \"INTERNAL ERROR: Improper calculation.\"\n else:\n return float(current[0])\n\n\ndef funcCalc(func, value):\n if func == \"(\":\n return value\n elif func == \"LOG\":\n return log(value)\n elif func == \"LN\":\n return ln(value)\n elif func == \"SIN\":\n return sin(value)\n elif func == \"COS\":\n return cos(value)\n elif func == \"TAN\":\n return tan(value)\n else:\n print(f\"ERROR: unrecognized function: {func}. Returning 0.\")\n return 0\n\n\ndef performOperation(num1, num2, op):\n if op == \"^\":\n return exp(num1, num2)\n elif op == \"√\":\n return exp(num1, 1 / 2)\n elif op == \"+\":\n return add(num1, num2)\n elif op == \"-\":\n return add(num1, -num2)\n elif op == \"*\":\n return multiply(num1, num2)\n else:\n return multiply(num1, 1 / num2)\n\n\ndef exp(base, power):\n return base ** power\n\n\ndef add(first, second):\n return first + second\n\n\ndef multiply(first, second):\n return first * second\n\n\n# Call the main function\nif __name__ == \"__main__\":\n main()\n","sub_path":"scientific_calculator.py","file_name":"scientific_calculator.py","file_ext":"py","file_size_in_byte":8729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"156629354","text":"import concurrent.futures\nimport io\nimport json\nimport re\nimport shlex\nfrom collections import deque, OrderedDict\n\nimport twitter\nfrom PIL import Image\n# from googletrans import Translator\n\nfrom api_keys import *\nfrom apis import twitch_rss\nfrom apis.olliebot_web import OllieBotAPI\nfrom apis.worldtime import *\nfrom apis.youtubeapi import YoutubeAPI\nfrom response import *\nfrom util.containers import *\n\n\n# Returns a BotContainer by name\ndef get_bot(name: str):\n global bots\n for b in bots:\n if b.name == name:\n return b\n return None\n\n\ndef proxy_message(bot, channel_id: str, content: str, embed: discord.Embed = None):\n global out_messages\n out_messages.append(ProxyMessage(bot=bot,\n channel=discord.Object(id=channel_id),\n content=content,\n embed=embed))\n\n\ndef get_quote(in_server, in_name, do_spam=False):\n for c in in_server.commands:\n if do_spam:\n if c['name'] == in_name:\n return c\n else:\n if c['name'] == in_name and int(c['timer']) < 1:\n c['timer'] = str(in_server.command_delay * 60)\n return c\n return None\n\n\ndef bc_from_bot(bot):\n global bots\n for b in bots:\n if b.id == bot.user.id:\n return b\n\n\ndef extract_url(arg: str, start: int) -> str:\n out = ''\n for c in arg[start:start + 100]:\n if c == '\"':\n break\n out += c\n return out\n\n\ndef extract_mention_id(id: str):\n out = ''\n for c in id:\n if c.isdigit():\n out += c\n return out\n\n\nurl_regex = re.compile(\n r'^(?:http|ftp)s?://' # http:// or https://\n r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+(?:[A-Z]{2,6}\\.?|[A-Z0-9-]{2,}\\.?)|' #domain...\n r'localhost|' #localhost...\n r'\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})' # ...or ip\n r'(?::\\d+)?' # optional port\n r'(?:/?|[/?]\\S+)$', re.IGNORECASE)\n\n\ndef validate_url(url):\n return re.match(url_regex, url)\n\n\ndef yt_shortened_to_long(link: str):\n if 'https://youtu.be' in link:\n link_parts = link.rsplit('/', maxsplit=1)\n try:\n return 'https://www.youtube.com/watch?v={}'.format(link_parts[1])\n except IndexError:\n return None\n return None\n\n\ndef yt_extract_id(url: str):\n if 'youtube' in url and 'v=' in url:\n v_tag = url.rsplit('v=', 1)\n return v_tag[1].split('&')[0]\n elif 'youtu.be' in url:\n return url.rsplit('/', 1)[1]\n return None\n\n\ndef is_num(text: str, base: int = 10):\n try:\n num = int(text, base)\n return num\n except (ValueError, TypeError):\n return None\n\n\ndef safe_list_get(l, idx, default):\n try:\n return l[idx]\n except IndexError:\n return default\n\n\ndef strip_args(args: str) -> list:\n arg_list = shlex.split(args, ' ')\n out_list = []\n for a in arg_list:\n if a:\n pieces = a.split('=')\n out_list.append((pieces[0], pieces[1]))\n return out_list\n\n\ndef flush_delete_queue():\n global delete_queue\n for d in delete_queue: # type: DeleteMessage\n d.timer = 0\n\n\ndef replace_color(img: Image.Image, base_color: int, with_color: int, variance: int):\n red = (base_color & 0xff0000) >> 16\n green = (base_color & 0xff00) >> 8\n blue = base_color & 0xff\n\n with_red = (with_color & 0xff0000) >> 16\n with_green = (with_color & 0xff00) >> 8\n with_blue = with_color & 0xff\n\n pixels = img.load()\n\n for y in range(img.size[1]):\n for x in range(img.size[0]):\n at = pixels[x, y]\n\n # at[0:4] -> [red, green, blue, alpha]\n\n if abs(red - at[0]) <= variance and abs(green - at[1]) <= variance and abs(blue - at[2]) <= variance:\n pixels[x, y] = (with_red, with_green, with_blue, at[3]) # keep original alpha | alpha-blind\n\n\nasync def get_json(page: str) -> dict:\n with aiohttp.ClientSession() as session:\n async with session.get(page) as resp:\n if resp.status == 200:\n d = await resp.json()\n return d\n\n\nasync def get_image(url: str):\n try:\n with aiohttp.ClientSession() as session:\n async with session.get(url) as resp:\n if resp.status == 200:\n image_bytes = await resp.read()\n\n image = Image.open(io.BytesIO(image_bytes))\n image = image.convert('RGBA')\n return image\n except Exception:\n return None\n\n\ndef extract_image_url(arg, msg: discord.Message):\n if type(arg) is str and arg.startswith('http'):\n return arg\n if msg.attachments:\n return msg.attachments[0]['url']\n if msg.embeds:\n return msg.embeds[0]['url']\n\n\ndef extract_filename(path):\n if '.' in path[-5:]:\n matches = re.findall(r'\\b[a-zA-Z]+\\.[a-zA-Z]{3}\\b', path)\n if matches:\n return matches[-1]\n\n matches = re.findall(r'\\b([a-zA-Z]+)', path)\n\n if matches:\n return matches[-1]\n\n return path\n\n\n# I forgot what non-builtin attributes are called so it's \"new\"\ndef get_new_attr(thing, check=None):\n if check:\n return (x for x in thing.__dict__ if not x.startswith('__') and check(getattr(thing, x)))\n return (x for x in thing.__dict__ if not x.startswith('__'))\n\n\n# iterator find help util\ndef iterfind(iterable, check, default=None):\n for i in iterable:\n if check(i):\n return i\n return default\n\n\ndef bool_eval(text):\n if text.lower() in ['yes', 'y', 'ya', 'yea', 'yeah', 'yup', 'true', 't', 'yes please', 'hit me up', 'hell yeah']:\n return True\n elif text.lower() in ['n', 'no', 'nah', 'nah fam', 'please no', 'god no', 'no thanks', 'false', 'f']:\n return False\n # else return None\n\n\n# time in seconds\ndef schedule_delete(bot, msg, time: int):\n delete_queue.append(DeleteMessage(message=msg, bot=bot, timer=time))\n\n\ndef schedule_future(coro, time: int, name: str = ''):\n coro_queue.append(TimedFuture(coro=coro, timer=time, name=name))\n\n\ndef future_is_scheduled(name: str):\n for tf in coro_queue:\n if tf.name and tf.name == name:\n return True\n\n\n# helper function purely for formatting\ndef help_form(text: str):\n return text\n\n\n# global save protection\nsave_in_progress = False\n\n\n# save_in_progress decorator\ndef global_save(func):\n def decorator(*args, **kwargs):\n global save_in_progress\n save_in_progress = True\n func(*args, **kwargs)\n save_in_progress = False\n\n return decorator\n\n\n# str.split but an iter\ndef split_iter(string, include: str = ''):\n return (x.group(0) for x in re.finditer(r\"[A-Za-z0-9{}']+\".format(include), string))\n\n\n# slice an `OrderedDict`\ndef od_slice(od, fr=0, to=0):\n if not to:\n to = len(od)\n desired = list(od)[fr:to]\n return OrderedDict((k, od[k]) for k in desired)\n\n\ndef file_exists(filepath: str):\n try:\n open(filepath, 'r').close()\n return True\n except:\n return False\n\n\nexit_timer = 0\n\nout_messages = deque([]) # for proxy message delivery system\n\ndelete_queue = []\n\ncoro_queue = []\n\nmute_queue = []\n\nbypass_perm = []\n\nalive_timer = 0\n\n# temp storage for temp admin key\nadminKey = ''\n\n# global timer for rss feeds\nrss_timer = 60\n\n# global bad timer\nbad_timer = 0\n\n# global shoe jesus timer\nshoe_jesus_timer = 0\n\ninternal_shutdown = False\n\nsync_shutdown = False\n\n# default executor\ndef_executor = concurrent.futures.ThreadPoolExecutor(max_workers=10)\n\n# -----------\n# CONSTANTS\n# -----------\nCHAR_ZWS = chr(0x200B)\n\nTITLE_BAR = '───────────────────────'\n\nTIME_RESPONSE_EXIT = 300 # in seconds\n\nTIME_RSS_LOOP = 70 # in seconds\n\nTIME_ASYNC_EXIT = 60 # in seconds\n\nTIME_MUSIC_TIMEOUT = 120 # in seconds\n\nTIME_SHOE_JESUS = 2700\n\nOWNER_ID = '305407800778162178'\n\nMUSIC_QUEUE_LIMIT = 50\n\nDATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S'\n\nwith open('resources/emoji_alphabet.json', 'r', encoding='utf8') as f:\n emoji_alphabet = json.load(f)\n\n\n# |--------------[ API Setup ]--------------|\ntwitter_api = twitter.Api(consumer_key=TWITTER_CONSUMER_KEY,\n consumer_secret=TWITTER_CONSUMER_SECRET,\n access_token_key=TWITTER_TOKEN_KEY,\n access_token_secret=TWITTER_TOKEN_SECRET)\n\nyt = YoutubeAPI(key=YOUTUBE_TOKEN)\n\ntwitch = twitch_rss.TwitchRss(client_id=TWITCH_CLIENT_ID,\n client_secret=TWITCH_CLIENT_SECRET,\n oauth=TWITCH_TOKEN)\n\nworldtime = WorldTime(key=GEO_TIME_TOKEN)\n\nolliebot_api = OllieBotAPI(OLLIEBOT_TOKEN)\n\nrss_feeds = ['twitter', 'twitch', 'youtube']\n\nrss_colors = {'twitter': 0x00aced,\n 'twitch': 0x6441a5,\n 'youtube': 0xbb0000}\n\n# shortened music commands to be replaced\nmusic_commands = {'cq': 'queue clear',\n 'qc': 'queue clear',\n 'p': 'play',\n 'q': 'queue',\n 'd': 'disconnect',\n 'sk': 'skip',\n 'se': 'search',\n 'lq': 'queue listall',\n 'ql': 'queue listall',\n 'ps': 'pause',\n 'sh': 'shuffle',\n 'c': 'current track info'}\n\nhug_library = []\npat_library = []\n\nnum2word = {'0': 'zero',\n '1': 'one',\n '2': 'two',\n '3': 'three',\n '4': 'four',\n '5': 'five',\n '6': 'six',\n '7': 'seven',\n '8': 'eight',\n '9': 'nine'}\n","sub_path":"util/global_util.py","file_name":"global_util.py","file_ext":"py","file_size_in_byte":9612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"590464211","text":"import requests\nimport sys\nfrom bs4 import BeautifulSoup\n\n\n# def file_write(path_,content):\n# my_file = open(path_, 'a+', encoding='utf-8')\n# my_file.write(content+'\\n')\n# my_file.close()\n\n\ndef make_request(url, header):\n try:\n s = requests.session()\n page_ = s.get(url, headers=header)\n if page_.status_code == 200:\n return page_\n except requests.exceptions.ConnectionError:\n print('Something wrong with your internet connection')\n sys.exit()\n\ndef get_url(lang1, lang2, w):\n url = f'https://context.reverso.net/translation/{lang1.lower()}-{lang2.lower()}/{w}'\n return url\n\n\ndef get_translations(soup):\n word_translation = soup.find_all('a', {'class': 'translation'})\n return [t.text.strip() for t in word_translation][1:]\n\n\ndef get_example(soup):\n examples = soup.find_all('div', {'class': ['src', 'trg']})\n return [e.text.strip().strip('\\n\\n\\n') for e in examples]\n\n\ndef save_translations(tran, lan, path_):\n my_file = open(path_, 'a+', encoding='utf-8')\n\n my_file.write(f'{lan} Translations:\\n')\n\n my_file.write(tran[0] + '\\n')\n\n my_file.write('\\n')\n my_file.close()\n\n\ndef save_example(exam, lan, path_):\n my_file = open(path_, 'a+', encoding='utf-8')\n my_file.write(f'{lan} Examples:\\n')\n\n my_file.write(exam[0] + ':' + '\\n')\n\n if lan == 'Turkish':\n my_file.write(exam[1])\n else:\n my_file.write(exam[1] + '\\n')\n\n if lan != 'Turkish':\n my_file.write('\\n\\n')\n\n my_file.close()\n\n\ndef welcome_message():\n print(\"Hello, you're welcome to the translator.\")\n print('Translator supports:')\n print('1. Arabic')\n print('2. German')\n print('3. English')\n print('4. Spanish')\n print('5. French')\n print('6. Hebrew')\n print('7. Japanese')\n print('8. Dutch')\n print('9. Polish')\n print('10. Portuguese')\n print('11. Romanian')\n print('12. Russian')\n print('13. Turkish')\n\n\nif __name__ == \"__main__\":\n args = sys.argv\n lang_list = ['Arabic', 'German', 'English', 'Spanish', 'French', 'Hebrew', 'Japanese', 'Dutch', 'Polish',\n 'Portuguese', 'Romanian', 'Russian', 'Turkish']\n src_language = args[1]\n\n target_language = args[2]\n\n if src_language.capitalize() not in lang_list:\n print(f\"Sorry,the program doesn't support {src_language}\")\n sys.exit()\n if target_language.capitalize() not in lang_list+['All']:\n print(f\"Sorry,the program doesn't support {target_language}\")\n sys.exit()\n\n word = args[3]\n path = f'{word}.txt'\n headers = {'User-Agent': 'Chrome-Windows'}\n\n if target_language == 'all':\n for k in range(len(lang_list)):\n if lang_list[k].lower() == src_language.lower():\n continue\n url1 = get_url(src_language, lang_list[k], word)\n page = make_request(url1, headers)\n try:\n soup_ = BeautifulSoup(page.content, 'html.parser')\n except AttributeError:\n print(f'Sorry, unable to find {word}')\n sys.exit()\n soup_.prettify()\n translation_ = get_translations(soup_)\n example_ = get_example(soup_)\n\n save_translations(translation_, lang_list[k], path)\n save_example(example_, lang_list[k], path)\n\n else:\n url1 = get_url(src_language, target_language, word)\n page = make_request(url1, headers)\n try:\n soup_ = BeautifulSoup(page.content, 'html.parser')\n except AttributeError:\n print(f'Sorry, unable to find {word}')\n sys.exit()\n soup_.prettify()\n translation_ = get_translations(soup_)\n example_ = get_example(soup_)\n\n save_translations(translation_, target_language, path)\n save_example(example_, target_language, path)\n\n\n file = open(path, 'r', encoding='utf-8')\n print(file.read())\n file.close()\n","sub_path":"translator.py","file_name":"translator.py","file_ext":"py","file_size_in_byte":3926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"9092846","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def swapPairs(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n if head is None or head.next is None:\n return head\n \n ret_head = head.next\n curr_1, curr_2 = head, head.next\n pre = None\n \n while curr_1 != None and curr_2 != None:\n curr_1.next = curr_2.next\n curr_2.next = curr_1\n curr_1, curr_2 = curr_2, curr_1\n if pre != None:\n pre.next = curr_1\n \n if curr_1.next == None or curr_2.next == None:\n return ret_head\n pre = curr_2\n curr_1 = curr_1.next.next\n curr_2 = curr_2.next.next\n \n return ret_head\n","sub_path":"024. Swap Nodes in Pairs/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"528805573","text":"#program that returns function investment to the nearest penny\ntry:\n c = int(input(\"Enter initial amount of investment: \"))\n r = float(input(\"Yearly interest rate: \"))\n t = int(input(\"Number of years until maturation: \"))\n n = int(input(\"Number of times interest is compounded per year: \"))\n p = c*((1+r/n)**(t*n))\n print(round(p, 2))\nexcept Exception:\n print(\"Enter numeric values\")","sub_path":"src/chapter 4/exercise2.py","file_name":"exercise2.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"647805595","text":"\nimport re\n\ndef arg_max(iterator, callback):\n max_value = -1\n max_index = None\n for i in iterator:\n value = callback(i)\n if value > max_value:\n max_value = value\n max_index = i\n return (max_index, max_value)\n\ndef emission_probability(word, tag, counts):\n if tag == \"*\":\n denominator = counts.ngram_counts[1][(tag, tag)]\n else:\n denominator = counts.ngram_counts[0][(tag,)]\n \n return counts.emission_counts[(word, tag)] / denominator\n\ndef bigram_probability(tag1, tag2, counts):\n if tag1 == \"*\":\n denominator = counts.ngram_counts[1][(tag1, tag1)]\n else:\n denominator = counts.ngram_counts[0][(tag1,)]\n \n return counts.ngram_counts[1][(tag1, tag2)] / denominator\n\ndef trigram_probability(tag1, tag2, tag3, counts, vocab_size):\n numerator = counts.ngram_counts[2][(tag1, tag2, tag3)]\n denominator = counts.ngram_counts[1][(tag1, tag2)]\n return (numerator + 1) / (denominator + vocab_size)\n \ndef classify_word(word):\n classes = [\n (re.compile(\"^[A-Z\\.]+$\"), \"_ABREVIATION_\"),\n #(re.compile(\"(^[A-Z\\.]+)|(^[A-Z]{3})$\"), \"_ABREVIATION_\"),\n #(re.compile(\"^[A-Z]\\.$\"), \"_ABREVIATION_\"),\n (re.compile(\"^[\\.\\-\\,\\d]+$\"), \"_NUMBER_\"),\n (re.compile(\"^[A-Z][a-z]+$\"), \"_CAPITALIZED_\"),\n (re.compile(\"^[A-Z]+$\"), \"_UPPERCASE_\"),\n (re.compile(\"^[a-z]+$\"), \"_LOWERCASE_\")\n #(re.compile(\"-\"), \"_HYPHEN_\"), \n ]\n for pattern, name in classes:\n if pattern.search(word) != None:\n return name\n return \"_RARE_\"\n \n\n ","sub_path":"Labs/Assignment4/Code/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"224169729","text":"def main():\r\n #Alphabetically sort the inital anagram\r\n anagram = sorted(input(\"Enter anagram: \"))\r\n with open('dictionary.txt') as dictionary:\r\n #Go through each word in the dictionary file\r\n for current_word in dictionary:\r\n #Strip newline chars and alphabetically sort the current word\r\n current_word = current_word.rstrip()\r\n sorted_word = sorted(current_word)\r\n #If the current word sorted alphabetically matches the anagram\r\n #sorted alphabetically we know that the word is a solution for the anagram\r\n if(sorted_word == anagram):\r\n print(current_word)\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"anagram.py","file_name":"anagram.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"430842770","text":"from bokeh.resources import CDN\nfrom bokeh.plotting import figure\nfrom bokeh.embed import json_item\nfrom jinja2 import Template\nfrom flask import Flask, render_template, request\nfrom bokeh.embed import components\nimport json\nfrom flask import Flask, request, jsonify, render_template, flash\nfrom forms.forms import PredictionForm, VisualizationForm\nfrom graph.graph import PlotNetwork\nfrom models.predictor import Predict\nimport os\nfrom time import localtime, strftime\nimport psycopg2\nfrom nltk.data import find\nimport gensim\n\nsecret_key = os.environ.get('SECRET_KEY')\ndb_username = os.environ.get('DB_USERNAME')\ndb_password = os.environ.get('DB_PASSWORD')\nhost = os.environ.get('HOST')\nport = os.environ.get('PORT')\ndb = os.environ.get('DB')\ntable = os.environ.get('TABLE')\n\n# for data visualization component\nword2vec_sample = str(find('models/word2vec_sample/pruned.word2vec.txt'))\nmodel = gensim.models.KeyedVectors.load_word2vec_format(\n word2vec_sample, binary=False)\n\n\ndef insert_db(component, user_input, user_input_no, prediction):\n ip = request.remote_addr\n timestamp = strftime(\"%Y-%m-%d %H:%M:%S\", localtime()) \n connection = psycopg2.connect(user = db_username,\n password = db_password,\n host = host,\n port = port,\n database = db)\n\n cursor = connection.cursor()\n\n postgres_insert_query = f\"\"\" INSERT INTO {table}(component, user_input, user_input_no, prediction, timestamp, ip) VALUES (%s,%s,%s,%s,%s,%s)\"\"\"\n record_to_insert = (component, user_input, user_input_no, prediction, timestamp, ip)\n cursor.execute(postgres_insert_query, record_to_insert)\n\n connection.commit()\n # count = cursor.rowcount\n\n cursor.close()\n connection.close()\n\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = secret_key\napp.debug = True\n\n\n@app.route('/')\n@app.route('/about')\ndef about():\n return render_template('about.html', body='About')\n\n\n@app.route('/aa')\ndef academic_achievements():\n return render_template('aa.html', body='Academic Achievements')\n\n\n@app.route('/ld', methods=['GET', 'POST'])\ndef language_detection():\n prediction= \"\"\n form = PredictionForm()\n if form.user_input.data:\n text = form.user_input.data\n if 300<=len(text)<=3000:\n prediction = Predict([text]).detect_language()\n insert_db(\"ld\", text, str(0), str(prediction))\n else:\n prediction = \"Enter a paragraph between 300 and 3000 characters.\"\n return render_template('ld.html', prediction=prediction, form=form)\n\n\n@app.route('/sa', methods=['GET', 'POST'])\ndef sentiment_analysis():\n form = PredictionForm()\n prediction = \"\"\n if form.user_input.data:\n text = form.user_input.data\n if 300<=len(text)<=3000:\n prediction = Predict(text).analyze_sentiment()\n insert_db(\"sa\", text, str(0), str(prediction))\n else:\n prediction = \"Enter a paragraph between 300 and 3000 characters.\"\n return render_template('sa.html', prediction=prediction, form=form)\n\n\n@app.route('/dv', methods=['GET', 'POST'])\ndef data_visualization():\n prediction = \"\"\n form = VisualizationForm()\n if form.user_input.data and form.user_input_no:\n current_word = form.user_input.data\n current_no = form.user_input_no.data\n else:\n current_word, current_no = 'data', 50\n try:\n current_no = int(current_no)\n if 0pk_lim1[0]) & (epos['m2q']pk_lim2[0]) & (epos['m2q']//', auth_views.PasswordResetConfirmView.as_view(), name='password_reset_confirm'),\n # path('reset/done/', auth_views.PasswordResetCompleteView.as_view(), name='password_reset_complete'),\n # alternative url patterns are defined in django.contrib.auth.urls (zamiast powyższych):\n path('', include('django.contrib.auth.urls')),\n\n path('', views.dashboard, name='dashboard'),\n path('register/', views.usersignup, name='register_user'),\n url(r'^activate/(?P[0-9A-Za-z_\\-]+)/(?P[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',\n views.activate_account, name='activate'),\n path('edit/', views.edit, name='edit'),\n path('profile/', views.profile_view, name='profile_view'),\n path('/profile/', views.bidder_view, name='bidder_view'),\n path('api/', include('account.api.urls')),\n\n]","sub_path":"account/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"226231456","text":"import requests\nimport os, re\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport numpy as np\nfrom numpy import random as rnd\nimport time\nimport pickle as pkl\nimport re\nimport json\n\n\"\"\"\n##############################################################################################################################\nINPUT:\n 1. biz_reviews_collection.json --collection of reviews\nOUTPUT:\n 1. pickled_user_data.pkl -- scraped data that's saved along the way\n 2. SanDiego_users.csv -- if successful then scraped data is saved into this csv file.\n 3. count.txt -- keeps track of the number of pages processed. \n##############################################################################################################################\nDESCRIPTION:\n Read in user IDs from the reviews, construct URLS, and gather data on each user. \n##############################################################################################################################\n\"\"\"\n\n\n#################################### 0. SETUP ##################################################\ndata_path=\"c:/users/gene/documents//duke/dropbox/gene/yelp_scrapping\"\nos.chdir(data_path)\n\n################################################################################################\n##################################### 1. FUNCTIONS ############################################# \n\n############################### I. Scraping Functions ###############################\ndef fetch_website(url):\n \"\"\"\n To hide that the scraping is being done via Python, I change the user-agent. The numerous user-agents\n included herein are simply the ones most commonly used at the time of scraping. Having multiple agents and\n having ones picked randomly did not help. Yelp still occasionally returned fake websites. \n \"\"\"\n user_agent_list=[\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_4) AppleWebKit/600.7.12 (KHTML, like Gecko) Version/8.0.7 Safari/600.7.12',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:39.0) Gecko/20100101 Firefox/39.0',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.125 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.130 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:39.0) Gecko/20100101 Firefox/39.0',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:39.0) Gecko/20100101 Firefox/39.0',\n 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.107 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.85 Safari/537.36']\n user_agent=give_rndm_userAgent(user_agent_list)\n print(user_agent['User-agent'])\n r=requests.get(url, headers=user_agent)\n try:\n print(\"Accessed and downloaded URL data\")\n return(r.content)\n except ConnectionError:\n print(\"Incurred the infamous connection error\")\n print(\"Skipping this url\")\n return(\"Skip\")\n\ndef fetch_user_reviews(user_id, num_reviews,base_url=\"http://www.yelp.com/user_details_reviews_self?rec_pagestart=0&userid=\"):\n ##NOTE: some users will probably have hundreds or thousands of reviews. For now, we restrict to collecting at most 50 reviews\n soup=BeautifulSoup(fetch_website(base_url+user_id))\n Master_reviews_list=soup.findAll(\"div\", class_='review')\n \n if (num_reviews>50):\n print(\"Seriously prolific user with [%d] reviews\" %num_reviews)\n if (num_reviews>9) & (num_reviews<200):\n review_links=[x.attrs['href'] for x in soup.findAll('a', class_='page-option available-number')]\n rev_soup_list=[]\n for url in review_links:\n wait()\n rev_soup_list.append(BeautifulSoup(fetch_website(url)))\n list_reviewsList=[rev_soup.findAll(\"div\", class_='review') for rev_soup in rev_soup_list] \n for reviewsList in list_reviewsList:\n Master_reviews_list.extend(reviewsList) \n print(\"# of reviews: %d\\n# reviews got: %d\" %(num_reviews, len(Master_reviews_list))) \n return(Master_reviews_list) \n \n \ndef fetch_user_friends(user_id, num_friends,base_url=\"http://www.yelp.com/user_details_friends?userid=\"):\n #j=0\n friend_set=[]\n friend_list=[]\n wait()\n soup=BeautifulSoup(fetch_website(base_url+user_id))\n master_user_info_list=soup.findAll(\"ul\", class_=\"user-passport-info\")\n master_user_stats_list=soup.findAll('ul', class_='user-passport-stats')\n if len(soup.findAll('a', class_='page-option available-number'))>0:\n friends_links=[x.attrs['href'] for x in soup.findAll('a', class_='page-option available-number')]\n friends_soup_list=[BeautifulSoup(fetch_website(url)) for url in friends_links]\n list_friendsList=[friend_soup.findAll(\"ul\", class_=\"user-passport-info\") for friend_soup in friends_soup_list] \n for friendsList in list_friendsList:\n master_user_info_list.extend(friendsList)\n \n for user_info in master_user_info_list:\n id_link=user_info.find('a').attrs['href']\n friend_list.append(re.search('userid=(\\S+)', id_link).group(1))\n friend_set=list(set(friend_list)) \n print('# of friends: %d\\n# friends found: %d' %(num_friends, len(friend_set))) \n \n return(friend_set)\n\n###################################################################################### \n########################## II. Processing Functions ################################## \ndef extract_data(response, url):\n \"\"\"INPUT: response -- the data given by response.content() from Requests module.\n OUTPUT: 1. data_dict -- the data dictionary of desired data.\n 2. appended reviews file. See [append_reviews_txt()]\n EXTERNAL function\"\"\"\n data_dict={}\n data_dict['url']=[url]\n soup=BeautifulSoup(response)\n user_id=re.search('userid=(\\S+)',url).group(1)\n ##FUNCTION START\n #Corrupted website?\n check1=soup.find(\"li\", class_='miniOrange')\n if check1 is not None:\n print(\"\\n!!!!!Yelp gave a corrupted website!!!!!!\")\n return(\"Bad soup\")\n \n data_dict['user_id']=user_id\n #User location:\n if soup.find('h3', class_='user-location alternate') is not None:\n data_dict['Location']=soup.find('h3', class_='user-location alternate').getText()\n if soup.find('h3', class_='user-location alternate') is None:\n data_dict['Location']=np.nan\n \n #Friend and Review Count:\n for grab in ['friend-count', 'review-count']:\n if soup.find('li', class_=grab) is not None:\n data_dict[grab]=int(re.search('\\d+',str.strip(soup.find('li', class_=grab).getText())).group())\n if soup.find('li', class_=grab) is None:\n data_dict[grab]=np.nan\n \n #Elite Status:\n data_dict['elite_num']=len(soup.findAll('span', class_='elite-badge'))\n \n #Bizs Reviewed:\n if data_dict['review-count']>1:\n Master_reviews_list=fetch_user_reviews(user_id, data_dict['review-count'])\n rvwd_biz_url_list=[review_data.find('a', class_='biz-name').attrs['href'] for review_data in Master_reviews_list]\n rvwd_biz_date_list=[str.strip(review_data.find('span', class_='rating-qualifier').getText()) for review_data in Master_reviews_list]\n data_dict['bizReviewed']=rvwd_biz_url_list\n data_dict['bizRvwDate']=rvwd_biz_date_list\n if data_dict['review-count']==1:\n data_dict['bizReviews']=np.nan\n data_dict['date_rvwd']=np.nan\n #Biz Reviewed Ratings\n \"\"\"\n rvwd_biz_rating_list=[]\n for review_data in Master_reviews_list:\n rvwd_biz_rating_list.append(int(re.search('(\\d+)',review_data.find('i', class_='star-img').attrs['title']).group(1)))\n data_dict['given_stars']=rvwd_biz_rating_list\n \"\"\"\n \n #Friends\n if data_dict['friend-count']>0:\n friend_list=fetch_user_friends(user_id, num_friends=data_dict['friend-count'])\n data_dict['friendIDs']=friend_list\n if data_dict['friend-count']==0:\n data_dict['friend-count']=0\n \n ##FUNCTION END \n return(data_dict)\n \n\n######################################################################################## \n############################ III. Convenience Functions ################################ \ndef make_json(file_name='yelp_user_data.json'):\n \"\"\"file_name -- the file where scraped reviews will be saved.\n EXTERNAL function. \"\"\"\n if os.path.isfile(file_name)==False:\n with open(file_name, 'w') as f:\n json.dump('[', f)\n \ndef make_update_df(data_dict, file_name=\"SanDiego_biz_addendum.csv\"):\n empty_data_dict={}\n for key in data_dict.keys():\n empty_data_dict[key]=[]\n pd.DataFrame(empty_data_dict).to_csv(file_name, index=False) \n \ndef Pickle(data,file_name='business_addendum.pkl'):\n with open(file_name, 'wb') as f:\n pkl.dump(data, f)\n print(\"Downloaded JSON data pickled to [%s]\" %file_name) \n \ndef eat_pickle(file_name='business_addendum.pkl'):\n with open(file_name, 'rb') as f:\n return(pkl.load(f)) \n\ndef write_count(count, file_name=\"count.txt\", start_count='0' ):\n if os.path.isfile(file_name) == False:\n print(\"Creating new count file: [%s]\" %file_name)\n with open(file_name, 'w') as f:\n f.write(start_count)\n \n if os.path.isfile(file_name):\n with open(file_name, 'w') as the_file:\n the_file.write(str(count))\n \ndef read_count(file_name='count.txt'):\n with open(file_name) as f:\n count=f.readline()\n return(int(count)) \n \ndef counter_reset():\n answer='the cake is a lie!'\n while (answer!='Y') & (answer!='N'):\n answer=input(\"Would you like to reset the counter to 0? [Y/N]: \")\n if answer == \"Y\": \n write_count(0)\n print(\"Counter reset\") \n\ndef step_display(i):\n if i%50==0:\n print(\"On number: %s\" %i)\n\ndef wait():\n wait_time=int(rnd.uniform(low=1,high=5))\n print(\"\\nPausing for: %d seconds...\" %wait_time)\n time.sleep(wait_time)\n #print('seconds: [%d%%]\\r' %seconds_elapsed)\n print(\"--\"*20) \n\ndef read_json_as_text(reviews_file='biz_reviews_collection.json'): \n with open(reviews_file) as f:\n user_data=f.read()\n \n try:\n data_dict=json.loads(user_data)\n except Exception:\n user_data2=re.sub(\"}{\",\"},{\" ,user_data)\n data_dict=json.loads(user_data2)\n return(data_dict) \n \"\"\"\n with open( 'biz_reviews_collection_fixed.json', 'w') as f:\n f.write(user_data2)\n reviews_file='biz_reviews_collection_fixed.json'\n with open(reviews_file, 'r').read() as f:\n user_data=json.loads(f) \n \"\"\"\n \ndef extract_user_ids(reviews_list):\n users_list=[]\n for i in range(0,len(reviews_list)):\n reviews_dict=reviews_list[i]\n entries_list=reviews_dict[list(reviews_dict.keys())[0]]\n if len(entries_list)>0:\n reviews_id_list=[re.sub(\"^user_id:\", \"\",review['id']) for review in entries_list]\n users_list.extend(reviews_id_list)\n print(\"Total # of entries found: [%d] \" %len(users_list))\n unique_users_list=list(set(users_list))\n print(\"\\nTotal # of unique users found: [%d] \" %len(unique_users_list))\n return(unique_users_list) \n \ndef construct_user_urls(unique_users_list, base_url='http://www.yelp.com/user_details?userid='):\n users_url_list=[base_url+user_id for user_id in unique_users_list]\n return(users_url_list)\n \ndef load_yelp_user_urls(count ,file_name='yelp_users_url.pkl'): \n if os.path.isfile(file_name)==True:\n print(\"Loading data...\")\n with open(file_name, 'rb') as f:\n users_url_list=pkl.load(f)[count:]\n\n if os.path.isfile(file_name)==False:\n #if the pickle doesn't exist yet, then make one\n print(\"\\nCouldn't find Pickled User URLs so Loading Original JSON Data\")\n reviews_list=read_json_as_text()\n unique_users_list=extract_user_ids(reviews_list)\n users_url_list=construct_user_urls(unique_users_list)\n with open(file_name, 'wb') as f:\n pkl.dump(users_url_list, f)\n print(\"\\nPickled Yelp User URLs to [%s]\" %file_name) \n print(\"Data loaded\")\n print(\"--\"*20)\n return(users_url_list) \n \ndef add_bad_soup(url, file_name='bad_soup_urls.txt'):\n if os.path.isfile(file_name):\n with open(file_name, 'a') as f:\n f.write(','+url)\n if os.path.isfile(file_name)==False:\n with open(file_name, 'w') as f:\n f.write(url)\n print(\"\\nBad soup's URL recorded in: [%s]\" %file_name) \n \ndef give_rndm_userAgent(user_agent_list):\n rnd_agent=np.random.choice(user_agent_list,1)[0]\n user_agent={'User-agent':rnd_agent}\n return(user_agent)\n \n####################################################################################### \n########################### IV. MAIN Function ########################################\ndef main():\n data_path=\"c:/users/gene/documents//duke/dropbox/gene/yelp_scrapping\"\n os.chdir(data_path)\n \n counter_reset()\n count=read_count()\n print(\"--\"*20)\n users_url_list=load_yelp_user_urls(count)\n print(\"--\"*20)\n make_json()\n \n for (i,url) in zip(range(count, len(users_url_list)),users_url_list):\n print(\"--\"*40)\n print(\"--\"*40)\n print(' '*15,url)\n step_display(i)\n response=fetch_website(url)\n Pickle(response, file_name='dwnld_user_profile.pkl')\n #response=eat_pickle('dwnld_user_profile.pkl')\n if response=='Skip':\n pass\n if response !='Skip':\n data_dict=extract_data(response, url)\n if data_dict!='Bad soup':\n with open('yelp_user_data.json', 'a') as f:\n json.dump(',', f, indent=0)\n json.dump({data_dict['user_id']:data_dict}, f, indent=2)\n if data_dict=='Bad soup':\n add_bad_soup(url)\n\n write_count(i)\n #if int(i)%3==0:\n wait()\n\nif \"__main__\"==__name__:\n main()","sub_path":"YelpScrapeUsers_local.py","file_name":"YelpScrapeUsers_local.py","file_ext":"py","file_size_in_byte":14416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"508484788","text":"\"\"\"\ncommswave\n=========\nTakes device communications up and down according to a timefunction.\nComms will be working whenever the timefunction returns non-zero.\n\nConfigurable parameters::\n\n {\n \"timefunction\" : A timefunction definition\n \"threshold\" : (optional) Comms will only work when the timefunction is returning >= threshold. If missing then any non-zero value will make comms work.\n }\n\nDevice properties created::\n\n {\n }\n\n\"\"\"\n\nfrom .device import Device\nfrom common import importer\nimport logging\n\nclass Commswave(Device):\n def __init__(self, instance_name, time, engine, update_callback, context, params):\n \"\"\"Take Comms up and down according to some time function\"\"\"\n tf = params[\"commswave\"][\"timefunction\"]\n self.comms_timefunction = importer.get_class(\"timefunction\", list(tf.keys())[0])(engine, self, tf[list(tf.keys())[0]])\n self.comms_tf_threshold = params[\"commswave\"].get(\"threshold\", None)\n self.messages_sent = 0\n self.messages_attempted = 0\n super(Commswave,self).__init__(instance_name, time, engine, update_callback, context, params)\n\n def comms_ok(self):\n self.messages_attempted += 1\n is_ok = super(Commswave, self).comms_ok()\n if self.comms_tf_threshold is not None:\n tf_ok = self.comms_timefunction.state() >= self.comms_tf_threshold\n if not tf_ok:\n pass # logging.info(\"commswave suppressing a communication due to timefunction state\")\n is_ok = is_ok and tf_ok\n else:\n is_ok = is_ok and self.comms_timefunction.state()\n if is_ok:\n self.messages_sent += 1\n return is_ok\n\n def external_event(self, event_name, arg):\n super(Commswave, self).external_event(event_name, arg)\n\n def close(self):\n super(Commswave,self).close()\n logging.info(\"Comms report for \" + str(self.properties[\"$id\"]) + \" \" +\n str(self.messages_sent) + \" sent (\"+str(100 * self.messages_sent/self.messages_attempted) + \"%) from \" +\n str(self.messages_attempted) + \" total\")\n\n\n # Private methods\n\n## (we don't actually need to tick, as we can instantaneously look up timefunction state whenever we need to)\n## def tick_commswave(self, _):\n## self.ok_commswave = self.comms_timefunction.state()\n## self.engine.register_event_at(self.comms_timefunction.next_change(), self.tick_commswave, self, self)\n","sub_path":"synth/devices/commswave.py","file_name":"commswave.py","file_ext":"py","file_size_in_byte":2454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"469715217","text":"\"\"\"\nThis script runs the application using a development server.\nIt contains the definition of routes and views for the application.\n\"\"\"\n\nfrom flask import Flask, flash, render_template, request, redirect\napp = Flask(__name__)\n\n# Make the WSGI interface available at the top level so wfastcgi can get it.\nwsgi_app = app.wsgi_app\n\n\n@app.route('/')\ndef index():\n return render_template(\"index.html\")\n\n@app.route('/result', methods=['POST'])\ndef create_user():\n print(\"Got Post Info\")\n print(request.form)\n name_from_form = request.form['name']\n location_from_form = request.form['loc']\n language_from_form = request.form['language']\n comment_from_form = request.form['comment']\n return render_template(\"result.html\", name_on_template=name_from_form, location_on_template=location_from_form, language_on_template=language_from_form, comment_on_template=comment_from_form)\n\nif __name__ == '__main__':\n import os\n HOST = os.environ.get('SERVER_HOST', 'localhost')\n try:\n PORT = int(os.environ.get('SERVER_PORT', '5555'))\n except ValueError:\n PORT = 5555\n app.run(HOST, PORT)\n","sub_path":"python_stack/flask/flask_mysql/DojoSurvey/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"509404953","text":"import sys\nfrom collections import deque\n\ninput =sys.stdin.readline\n\ndef solution(n, m, A, know):\n s = deque(set(map(int, know)))\n chk = [1]*(n+m+1)\n\n while s:\n x = s.popleft()\n chk[x] = 0\n for j in A[x]:\n if chk[j]==0:\n continue\n s.append(j)\n print(sum(chk[n+1:]))\n\nN, M = map(int, input().split())\nknow = input().split()\narr = [[] for _ in range(N+M+1)]\nif len(know)<=1:\n for _ in range(M): input()\n print(M)\nelse:\n for i in range(1, M+1):\n tmp = list(map(int, input().split()))\n for j in range(1, tmp[0]+1):\n arr[tmp[j]].append(N+i)\n arr[N+i].append(tmp[j])\n solution(N, M, arr, know[1:])","sub_path":"Graph/[1043]거짓말/[1043]거짓말.py","file_name":"[1043]거짓말.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"165729962","text":"# Frequency by letter histogram\n\nimport string\nletter_histogram = { }\nrequested_file = open('data/mbox.txt')\n\nfor a_new_line in requested_file :\n a_new_line = a_new_line.translate(str.maketrans('', '', string.punctuation + string.digits))\n a_new_line = a_new_line.lower().rstrip()\n a_word_list = a_new_line.split()\n for a_word in a_word_list :\n a_letter_list = list(a_word)\n for a_letter in a_letter_list :\n letter_histogram[a_letter] = letter_histogram.get(a_letter, 0) + 1\n\nsorted_letters = sorted( [ (k,v) for k,v in letter_histogram.items() ] )\nfor k,v in sorted_letters :\n print(k,v)\n","sub_path":"src/1_mbox_letter_frequency_dicitonary.py","file_name":"1_mbox_letter_frequency_dicitonary.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"273419763","text":"from base.base_tool import BaseTool\nfrom base import utils\nfrom base.decorators import input_tableview, input_output_table, parameter, data_nodata, raster_formats\nimport arcpy\n\ntool_settings = {\"label\": \"Reclass by Table\",\n \"description\": \"Reclass by table...\",\n \"can_run_background\": \"True\",\n \"category\": \"Raster\"}\n\n\nclass ReclassByTableRasterTool(BaseTool):\n \"\"\"\n \"\"\"\n\n def __init__(self):\n \"\"\"\n\n Returns:\n\n \"\"\"\n\n BaseTool.__init__(self, tool_settings)\n\n self.execution_list = [self.iterate]\n\n self.from_value_field = None\n self.to_value_field = None\n self.output_value_field = None\n\n return\n\n @input_tableview(data_type=\"raster\")\n @input_tableview(data_type=None, ob_name=\"in_remap_table\", ob_title=\"Remap Table\", other_fields=\"from_value_field From_Value Required from_value, to_value_field To_Value Required to_value, output_value_field Output_Value Required output_value\")\n @parameter(\"missing_values\", \"Missing value treatment\", \"GPString\", \"Optional\", False, \"Input\", data_nodata, None, None, data_nodata[0], \"Options\")\n @parameter(\"raster_format\", \"Format for output rasters\", \"GPString\", \"Required\", False, \"Input\", raster_formats, None, None, \"Esri Grid\")\n @input_output_table(affixing=True)\n def getParameterInfo(self):\n \"\"\"\n\n Returns:\n\n \"\"\"\n\n return BaseTool.getParameterInfo(self)\n\n def iterate(self):\n \"\"\"\n\n Returns:\n\n \"\"\"\n\n p = self.get_parameter_dict()\n\n self.from_value_field = p[\"from_value_field\"]\n self.to_value_field = p[\"to_value_field\"]\n self.output_value_field = p[\"output_value_field\"]\n\n self.iterate_function_on_tableview(self.reclass, return_to_results=True)\n\n return\n\n def reclass(self, data):\n \"\"\"\n\n Args:\n data:\n\n Returns:\n\n \"\"\"\n\n ras = data[\"raster\"]\n\n utils.validate_geodata(ras, raster=True)\n\n ws = self.output_file_workspace or self.output_workspace\n\n ras_out = utils.make_table_name(ras, ws, self.raster_format, self.output_filename_prefix, self. output_filename_suffix)\n\n self.info(\"Reclassifying {0} -->> {1}...\".format(ras, ras_out))\n\n arcpy.ReclassByTable_3d(ras, self.in_remap_table, self.from_value_field, self.to_value_field, self.output_value_field, ras_out, self.missing_values)\n\n return {\"raster\": ras_out, \"source_geodata\": ras}\n\n# \"http://desktop.arcgis.com/en/arcmap/latest/tools/3d-analyst-toolbox/reclass-by-table.htm\"\n","sub_path":"tools/raster/reclass_by_table.py","file_name":"reclass_by_table.py","file_ext":"py","file_size_in_byte":2593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"545726383","text":"from django.urls import path\nfrom apps.sites.views import SitesView, SiteDetailView, SummaryView, SummaryAverageView\n\n\nurlpatterns = [\n path(r'sites/', SitesView.as_view(), name='sites'),\n path(r'sites//', SiteDetailView.as_view(), name='sites'),\n path(r'summary/', SummaryView.as_view(), name='summary'),\n path(r'summary-average/', SummaryAverageView.as_view(), name='summary-average'),\n]\n","sub_path":"apps/sites/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"490331157","text":"from scrapy.spiders import CrawlSpider, Rule, BaseSpider, Spider\nfrom scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor\nfrom scrapy.selector import Selector\nfrom scrapy.http import HtmlResponse\n\nfrom courses.items import Course\n\n\nclass EduSpider(CrawlSpider):\n name = 'bu.edu'\n allowed_domains = ['bu.edu']\n start_urls = ['http://www.bu.edu/academics/']\n\n rules = (\n Rule(LxmlLinkExtractor(\n allow=('.*/academics/[a-z][a-z][a-z]/courses/[a-z][a-z][a-z]-[a-z][a-z]-[0-9][0-9][0-9]/', ),\n ), callback='parse_item'),\n\n Rule(LxmlLinkExtractor(\n allow=('.*/academics/[a-z][a-z][a-z]/', '.*/academics/[a-z][a-z][a-z]/courses/.*'),\n )),\n )\n\n def parse_item(self, response):\n item = Course()\n item[\"institute\"] = 'Boston University'\n item['site'] = 'www.bu.edu'\n item['title'] = response.xpath('//*[@id=\"col1\"]/div/h1/text()').extract()[0]\n item['id'] = response.xpath('//*[@id=\"col1\"]/div/h2/text()').extract()[0]\n item['credits'] = response.xpath('//*[@id=\"info-box\"]/dl/dd[1]/text()').extract()[0]\n item['description'] = response.xpath('//*[@id=\"course-content\"]/p[1]/text()').extract()[0]\n yield item\n","sub_path":"courses/spiders/bu.py","file_name":"bu.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"356750760","text":"import xarray as xr\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport pandas as pd\nimport os\nfrom scipy.interpolate import interp1d\nfrom scipy.ndimage.filters import gaussian_filter1d\nfrom scipy.interpolate import CubicSpline, BSpline\nfrom hapi import *\n\n\nfrom matplotlib.font_manager import FontProperties\n\n### Plot settings\nfont = {'weight' : 'bold',\n 'size' : 12}\nlabel_fontdict = {'weight' : 'bold',\n 'size' : 12}\ntitle_fontdict = {'weight' : 'bold',\n 'size' : 12}\n\nmatplotlib.rc('font', **font)\n\n##### Define constants\n\ngo = 9.8196 #(m/s**2) \nRd = 287.04 # specific gas constant for dry air\nR_universal = 8.314472\nNa = 6.0221415e23\n\n# Some constants and the planck function (as radiance!)\npi = np.pi\nh = 6.62607004e-34 # m^2/kg/s\nc = 299792458 # m/s\nk = 1.380649e-23 # J/K\nstefan_boltzmann_c = 5.670374419*(10**-8)\n\n## Define conversion factors \nW_M_MW_CM = 1e2*1e3\n\n###### Helper function for fundamental equations\ndef planck(wav, T):\n c1 = 2.0*h*c**2\n c2 = h*c/(wav*k*T)\n intensity = c1/ ( (wav**5)*(np.exp(c2) - 1.0) )\n # Convert to W/sr/m^2/µm here directly (it was W/sr/m^2/m)\n return intensity*1.e-6\n# return intensity*1.e-2\n\ndef planck_wavenumber(wavenum, T):\n c1 = 2.0*h*(c**2)*(wavenum**3)\n c2 = (h*c*wavenum)/(k*T)\n intensity = c1/(np.exp(c2) - 1.0)\n return intensity\n# return (c1, c2, intensity)\n\ndef stefan_boltzmann(T):\n return stefan_boltzmann_c*(T**4)\n\ndef compute_rf_from_diff_spec_ds(rad_diff, nu):\n '''Given difference of spectra in \n W*m^-2*sr^-1*cm^-1, compute radiative forcing\n W/m^2'''\n# rad_diff = ds['lw_down_total']\n# nu = ds['nu']\n \n integral_rad_diff = np.trapz(rad_diff, x = nu)\n # factor of pi comes from integrating over half sphere\n return integral_rad_diff * np.pi\n\ndef compute_rf_from_diff_spec(rad_diff,\n nu):\n '''Given difference of spectra in \n W*m^-2*sr^-1*cm^-1, compute radiative forcing\n W/m^2'''\n \n integral_rad_diff = np.trapz(rad_diff, x = nu)\n # factor of pi comes from integrating over half sphere\n return integral_rad_diff * np.pi\n\n\n\ndef _filter_k_range_to_aeri(rad_array, nu):\n '''Filter array of radiances to lie with wavenumber\n range of instrument. \n \n Args\n -----\n rad_array - np.array\n \n nu - np.array\n '''\n nu_inds = np.where((nu > 491.79016) & \n (nu < 1799.8556))\n return (rad_array[nu_inds], nu[nu_inds])\n\ndef compute_mean_rad_800_band(rad_array, nu):\n '''Compute mean radiance in 790 - 810 cm band.'''\n \n nu_inds = np.where((nu > 790.0) & (nu < 810.0))\n return np.nanmean(rad_array[nu_inds])\n\n\n###### Helper function for calculating profile properties\ndef compute_profile_properties_merra2(ds, verbose=True):\n ''' Given single profile from merra2 meteorlogical reanalysis, compute pressure levels, VMR \n for water vapor. Profile should contain variables PS, PL, QV, T, and DELP'''\n # Surface pressure at location\n ps_local = ds['PS'].values\n p_local = ds['PL'].values\n # q and T profiles at location\n q_local = ds['QV'].values\n T_local = ds['T'].values\n\n NLEV = len(T_local)\n\n dz = np.divide(ds['DELP'].values,ds['PL'].values)*(Rd*T_local*(1+0.608*q_local))/go\n rho_N = ds['PL'].values*(1-q_local*1.6068)/(R_universal*T_local)*Na/10000.0\n rho_N_h2o = ds['PL'].values*(q_local*1.6068)/(R_universal*T_local)*Na/10000.0\n vmr_h2o = q_local*1.6068\n\n if verbose:\n print('Total column density of dry air: ' +str(np.sum(dz*rho_N))+' molec/cm^2')\n print('Total column density of water vapor: ' + str(np.sum(dz*rho_N_h2o))+' molec/cm^2')\n VCD_dry = dz*rho_N\n \n return(p_local, T_local, dz, vmr_h2o, VCD_dry, rho_N_h2o, rho_N)\n\n\n\ndef create_cross_section_matrix_hapi(p_prof, T_prof, xmin, xmax, time_i=None, output_path=None):\n '''Given temperature/pressure profile, create cross-section matrix (w/ option to save)\n Args:\n output_path - str\n If not None, save cs matrices as netcdf to specified path.\n \n Returns:\n cs_matrix - xr.Dataset [number of levels, number of wavelengths]\n '''\n nu_, cs_co2 = absorptionCoefficient_Voigt(SourceTables='CO2_S', WavenumberRange=[xmin,xmax],Environment={'p':1,'T':270},IntensityThreshold=1e-27)\n \n NLEV = len(p_prof)\n \n cs_matrix_co2 = np.zeros((len(nu_),NLEV))\n cs_matrix_ch4 = np.zeros((len(nu_),NLEV))\n cs_matrix_h2o = np.zeros((len(nu_),NLEV))\n \n\n # Loop over each layer \n for i in range(NLEV):\n print(str(i)+'/'+str(NLEV), end='\\r')\n p_ = p_prof[i]/101325\n # print(p_)”\n T_ = T_prof[i]\n nu_, cs_co2 = absorptionCoefficient_Voigt(SourceTables='CO2_S', WavenumberRange=[xmin,xmax],Environment={'p':p_,'T':T_},IntensityThreshold=1e-27)\n nu_, cs_ch4 = absorptionCoefficient_Voigt(SourceTables='CH4_S', WavenumberRange=[xmin,xmax],Environment={'p':p_,'T':T_},IntensityThreshold=1e-27)\n nu_, cs_h2o = absorptionCoefficient_Voigt(SourceTables='H2O_S', WavenumberRange=[xmin,xmax],Environment={'p':p_,'T':T_},IntensityThreshold=1e-27)\n cs_matrix_co2[:,i] = cs_co2\n cs_matrix_ch4[:,i] = cs_ch4\n cs_matrix_h2o[:,i] = cs_h2o\n \n \n cs_matrix_ds = xr.Dataset()\n cs_matrix_co2_da = xr.DataArray(cs_matrix_co2, coords = [nu_, p_prof], dims = ['nu','pressure'])\n cs_matrix_ch4_da = xr.DataArray(cs_matrix_ch4, coords = [nu_, p_prof], dims = ['nu','pressure'])\n cs_matrix_h2o_da = xr.DataArray(cs_matrix_h2o, coords = [nu_, p_prof], dims = ['nu','pressure'])\n\n\n cs_matrix_ds['cs_matrix_co2'] = cs_matrix_co2_da\n cs_matrix_ds['cs_matrix_ch4'] = cs_matrix_ch4_da\n cs_matrix_ds['cs_matrix_h2o'] = cs_matrix_h2o_da\n if not (time_i is None):\n cs_matrix_ds['time'] = time_i\n cs_matrix_ds = cs_matrix_ds.assign_coords(time = cs_matrix_ds['time'])\n\n if output_path:\n cs_matrix_ds.to_netcdf(output_path)\n return cs_matrix_ds\n\n######## Functions for performing RT calculations\n\n# def compute_tau_matrix(cs_matrix_co2,\n# cs_matrix_h2o,\n# cs_matrix_ch4,\n# CO2_mr = 400.e-6, \n# CH4_mr = 1.8e-6,\n# AMF=1.0):\n# '''Given cross-section matrices and VMRs of \n# gases, compute matrix of optical depths. \n\n# '''\n\n\n\ndef compute_downwelling_radiation(cs_matrix_co2,\n cs_matrix_h2o,\n cs_matrix_ch4,\n T_prof,\n VCD_dry_prof, \n vmr_h2o_prof,\n nu,\n CO2_mr = 400.e-6, \n CH4_mr = 1.8e-6,\n AMF=1.0):\n '''Compute downwelling radiation from an atmosphere containing \n 3 greenhouse gasses (CO2, CH4, and water vapor).\n \n CO2 and CH4 are assumed to be well-mixed, whereas the vmr of water vapor\n can vary. \n \n '''\n NLEV = cs_matrix_co2.shape[1]\n\n # Generate matrices of optical thickness per layer now for each gas: \n tau_co2 = cs_matrix_co2*VCD_dry_prof*CO2_mr*AMF \n tau_h2o = cs_matrix_h2o*VCD_dry_prof*vmr_h2o_prof*AMF \n tau_ch4 = cs_matrix_ch4*VCD_dry_prof*CH4_mr*AMF \n \n # total transmission\n T = np.exp(-tau_co2)*np.exp(-tau_h2o)*np.exp(-tau_ch4)\n \n # component-by-component transmission \n T_CO2 = np.exp(-tau_co2)\n T_H2O = np.exp(-tau_h2o)\n T_CH4 = np.exp(-tau_ch4)\n \n # Generate Planck curve per layer + surface:\n wl_nu = 1.e7/nu*1.e-9\n wavenum_m = nu*1e2\n # Use skin temperature of 300K\n# B = np.zeros((len(nu_),NLEV))\n\n B = np.zeros(T.shape)\n for i in range(NLEV):\n B[:,i] = planck_wavenumber(wavenum_m,T_prof[i])*1e2\n \n # compute downwelling IR radiation \n Rdown = np.zeros(cs_matrix_co2.shape)\n Rdown_CO2 = np.empty_like(Rdown)\n Rdown_CH4 = np.empty_like(Rdown)\n Rdown_H2O = np.empty_like(Rdown)\n\n\n\n for i in range(NLEV):\n Rdown[:,i] = B[:,i]*(1-T[:,i])*np.prod(T[:,i+1:],axis=1)\n # component-by-component\n Rdown_CO2[:,i] = B[:,i]*(1-T_CO2[:,i])*np.prod(T_CO2[:,i+1:],axis=1)\n Rdown_CH4[:,i] = B[:,i]*(1-T_CH4[:,i])*np.prod(T_CH4[:,i+1:],axis=1)\n Rdown_H2O[:,i] = B[:,i]*(1-T_H2O[:,i])*np.prod(T_H2O[:,i+1:],axis=1)\n \n Surface_Down = np.sum(Rdown,axis=1)\n\n Surface_Down_CO2 = np.sum(Rdown_CO2,axis=1)\n Surface_Down_CH4 = np.sum(Rdown_CH4,axis=1)\n Surface_Down_H2O = np.sum(Rdown_H2O,axis=1)\n \n return (Surface_Down_CO2, Surface_Down_CH4, Surface_Down_H2O, Surface_Down)\n############\n\ndef interpolate_profile(p_prof, \n var_prof, \n p_interp_grid, \n method = 'CubicSpline',\n return_interp_obj = False,\n **kwargs):\n '''\n Interpolate profile to given pressure grid. \n \n Args\n -----\n p_prof - np.array\n pressure profile \n var_prof - np.array\n profile of variable to interpolate\n method - str {'CubicSpline','Linear'}\n interpolation method to use\n \n return_interp_obj - bool\n if True, return scipy.interpolate object along with profile\n \n \n Returns\n -------\n (p_interp_grid, var_prof_interpolated) - Profile interpolated to p_interp_grid\n \n '''\n # ensure coords are increasing \n reversed_coords = False\n if (p_prof[1] < p_prof[0]) & \\\n (var_prof[1] < var_prof[0]):\n# (p_interp_grid[1] < p_interp_grid[0]):\n# print('here')\n p_prof = p_prof[::-1]\n var_prof = var_prof[::-1]\n reversed_coords = True\n \n# return (p_prof, var_prof)\n if method == 'CubicSpline':\n interp_obj = CubicSpline(p_prof,var_prof, bc_type = 'natural')\n var_prof_interpolated = interp_obj(p_interp_grid)\n if method == 'BSpline':\n interp_obj = BSpline(p_prof,var_prof, **kwargs)\n var_prof_interpolated = interp_obj(p_interp_grid)\n \n if method == 'Linear':\n interp_obj = interp1d(p_prof,var_prof, bounds_error = False)\n var_prof_interpolated = interp_obj(p_interp_grid)\n \n# if reversed_coords: \n# var_prof_interpolated = var_prof_interpolated[::-1]\n if return_interp_obj:\n return (p_interp_grid, var_prof_interpolated, interp_obj)\n else:\n return (p_interp_grid, var_prof_interpolated)\n\ndef interpolate_T_prof(T_prof, \n dz_prof,\n num_vertical_points = 3000):\n # interpolate T vs. pressure (to find dT/dz @ tau = 1)\n # interpolate from bottom up\n z_prof = np.cumsum(dz_prof[::-1])\n p_interp_grid = np.linspace(0, z_prof.max(), num_vertical_points)\n \n # make temperature increasing\n T_prof_incr = T_prof[::-1]\n\n bottom_lapse_rate = (T_prof_incr[1] - T_prof_incr[0])/(z_prof[1] - z_prof[0])\n\n # need a level at surface (z=0) to perform integration\n z_prof_0 = np.append(np.array([0]),z_prof)\n # T_prof_0 = np.append(np.array(T_prof_incr[0]),T_prof_incr)\n\n T_prof_0 = np.append(np.array(T_prof_incr[0] - bottom_lapse_rate * (z_prof[0])),T_prof_incr)\n\n interp_prof = interpolate_profile(z_prof_0,\n T_prof_0,\n mehtod = 'CubicSpline',\n p_interp_grid = p_interp_grid,\n return_interp_obj=True)\n \n \n return (z_prof_0, interp_prof[-1])\n\n\ndef calc_emission_and_dT_dz(tau_matrix,\n T_interpolator, \n z_prof_0):\n \n '''Given tau matrix of d_taus through layers,\n compute dT/dz and emission height and emission height.\n \n \n Args\n ---------\n tau_matrix - np.array \n Matrix of taus for a gas\n \n interp_prof - scipy.interpolate._cubic.CubicSpline\n Spline object for T(z) from which dT/dz can be inferred \n \n '''\n\n\n N_ks = tau_matrix.shape[0]\n tau_matrix_cumsum = np.cumsum(tau_matrix, axis = 1)\n\n # add zero column to represent tau @ surface\n zero_col = np.zeros(tau_matrix.shape[0]).reshape((tau_matrix.shape[0],1))\n tau_matrix_cumsum = np.concatenate((zero_col, tau_matrix_cumsum), axis = 1)\n\n\n\n tau_wl = np.zeros((N_ks,))\n dT_dz = np.zeros((N_ks,))\n # we're only going up to tau = 1\n p_interp_grid_tau = np.linspace(0, 5, 1500)\n for k_i in range(N_ks):\n \n tau_cumsum_z = tau_matrix_cumsum[k_i,:]\n # interpolate cumsum(tau) vs. pressure (to find tau = 1)\n interp_tau_cumsum = interpolate_profile(tau_cumsum_z,\n z_prof_0, \n method = 'Linear',\n p_interp_grid = p_interp_grid_tau,\n return_interp_obj=True)\n\n\n z_at_tau_1 = interp_tau_cumsum[-1](1.0)\n # interp to pressure where tau = 1\n tau_wl[k_i] = interp_tau_cumsum[-1](1.0)\n dT_dz[k_i] = T_interpolator(tau_wl[k_i], nu = 1)\n \n return (tau_wl, dT_dz)\n\n\n####### Plotting functions\n\ndef plot_profile(v_coord, temp,\n v_coord_type = 'pressure',\n plot_kind = 'line',\n min_pres = 10, xlabel = \"Temperature [C]\", newfig_bool = True,\n xlim = None,\n ylim = None,\n linewidth = 10,\n figsize = (6,6),\n label = None, rotation = 0):\n '''Given xr.dataset of single profile, plot vertical profile w/ log(p)\n \n Args\n -------\n v_coord - array-like\n vertical coordinate\n v_coord_type - str {'pressure', 'height'}\n Use pressure or height as vertical coordinate\n plot_kind - str {'line', 'scatter'}\n Kind of plot to use.\n \n Returns\n --------\n plt.axis\n \n '''\n\n \n if newfig_bool:\n plt.figure(figsize = figsize)\n if plot_kind == 'line':\n plt.plot(temp, v_coord, linewidth = linewidth, label = label)\n elif plot_kind == 'scatter':\n plt.scatter(temp, v_coord, label = label)\n \n if v_coord_type == 'pressure':\n plt.gca().invert_yaxis()\n plt.ylim([np.nanmax(v_coord), min_pres])\n plt.gca().set_yscale('log')\n plt.ylabel(\"Pressure [Pa]\")\n elif v_coord_type == 'height':\n plt.ylim([np.nanmin(v_coord), np.nanmax(v_coord)])\n plt.ylabel(\"Height [m]\", weight = 'bold')\n \n \n plt.grid()\n plt.xlabel(xlabel, weight = 'bold')\n \n if xlim: \n plt.xlim(xlim)\n if ylim: \n plt.ylim(ylim)\n \n if rotation != 0: \n plt.xticks(rotation=rotation)\n# plt.locator_params(nbins=8)\n# plt.yticks(np.arange(min_pres, pres.max(), 100.0))\n\n return plt.gca()\n\n\ndef plot_emission_height(wl_nm, \n tau_wl, \n T_prof, \n p_prof, \n label, \n tau_wl_2 = None, \n label_2 = None,\n ylim = None , \n xlim = [5,30],\n log_scale = False,\n ave_emmission_pres = None):\n# wl_nm = wl_nu*1e6\n\n plt.figure(figsize = (15,5))\n ax0 = plt.subplot(121)\n plt.plot(wl_nm, tau_wl, label = label)\n if not (tau_wl_2 is None):\n plt.plot(wl_nm, tau_wl_2, label = label_2)\n if log_scale:\n plt.yscale('log')\n plt.grid()\n# plt.gca().set_yscale('log')\n# plt.gca().invert_yaxis()\n\n plt.xlabel(r'Wavenumber $[cm^{-1}]$')\n plt.ylabel(r'$\\tau = 1$ Height [m]')\n plt.legend()\n if ylim:\n# plt.ylim([p_full.max(), 4*10**4])\n plt.ylim(ylim)\n# plt.xlim((12,18))\n if xlim:\n plt.xlim(xlim)\n else:\n plt.xlim([wl_nm.min(), wl_nm.max()])\n plt.subplot(122) #, sharey = ax0)\n\n plt.plot(T_prof, p_prof, '.-')\n if log_scale:\n plt.yscale('log')\n# plt.gca().set_yscale('log')\n# plt.gca().invert_yaxis()\n if ave_emmission_pres:\n plt.axhline(y = ave_emmission_pres, color = 'r', linestyle = '--')\n if ylim:\n plt.ylim(ylim) \n plt.grid()\n \n \ndef plot_downwelling_rad(Down_CO2, \n Down_CH4, \n Down_H2O, \n nu,\n xlims = (500, 1800),\n figsize = (12,7)):\n fig = plt.figure(figsize = figsize)\n ax1 = fig.add_subplot(111) \n ax2 = ax1.twiny()\n \n \n ax1.plot(nu, 1e3*Down_CO2,label='R $CO_2$', alpha=0.7 ,linewidth = 0.5)\n ax1.plot(nu, 1e3*Down_CH4,label='R $CH_4$', alpha=0.7 ,linewidth = 0.5)\n ax1.plot(nu, 1e3*Down_H2O,label='R $H_{2}O$', alpha=0.7 ,linewidth = 0.5)\n wl_nu = 1.e7/nu*1.e-9\n wavenum_m = nu*1e2\n\n ax1.plot(nu, W_M_MW_CM*planck_wavenumber(wavenum_m,244),label='BB @ 244K',alpha=0.8)\n \n ax1.plot(nu, W_M_MW_CM*planck_wavenumber(wavenum_m,249),label='BB @ 249K',alpha=0.8)\n\n ax1.plot(nu, W_M_MW_CM*planck_wavenumber(wavenum_m,230),label='BB @ 230K',alpha=0.8)\n\n \n ax1.plot(nu, W_M_MW_CM*planck_wavenumber(wavenum_m,214),label='BB @ 214K',alpha=0.8)\n plt.legend(loc=0)\n \n ax1.plot(nu, W_M_MW_CM*planck_wavenumber(wavenum_m,200),label='BB @ 200K',alpha=0.8)\n ax1.legend(loc=0)\n\n # plt.xlim((491,1799))\n if not xlims is None:\n ax1.set_xlim(xlims)\n \n def tick_function(X):\n wnum = (1/X)*1e4\n return [\"%.1f\" % z for z in wnum]\n\n ax1Ticks = ax1.get_xticks()\n ax2Ticks = ax1Ticks\n ax2.set_xticks(ax2Ticks)\n ax2.set_xbound(ax1.get_xbound())\n ax2.set_xlabel(r'Wavelength $[\\mu m]$', \n fontsize = 8,\n weight = 'bold')\n ax2.set_xticklabels(tick_function(ax2Ticks))\n \n ax1.set_xlabel('Wavenumber ($cm^{-1}$)', weight = 'bold')\n ax1.set_ylabel(r'Downwelling Radiance [$mW m^{-2} sr^{-1} cm^{-1}$]', weight = 'bold')\n # plt.xlim((4,30))\n ax1.set_title('Downwelling Thermal Radiance at Surface', weight = 'bold')\n ax1.grid()\n # plt.savefig('figs/christian_update_9_14/Rdown_gas_components_zoom.png', dpi = 300)","sub_path":"rad_transfer_python/.ipynb_checkpoints/simulate_radiances_utils-checkpoint.py","file_name":"simulate_radiances_utils-checkpoint.py","file_ext":"py","file_size_in_byte":18247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"363853003","text":"\r\nprint(\"Welcome to the Music World. U Can Select the Songs Displayed Below......\\n\")\r\n\r\ndef fun():\r\n \r\n print(\"Bad boy\\nSahore Bahubali\\nMaari\\nKuch Kuch Hota hai\\nPremam Malare\\nSalam Rocky Bhai\\nChampion\\nFast&Furious We own it\\nShape Of You\\n\")\r\n \r\nwhile True:\r\n fun() \r\n choice= input(\"Enter Name of the Song :\").lower()\r\n if choice ==\"bad boy\":\r\n with open ('badboy.txt','r') as song_lyrics:\r\n for line in song_lyrics.readlines():\r\n print(line)\r\n\r\n elif choice ==\"sahore bahubali\":\r\n with open ('bahu.txt','r') as song_lyrics:\r\n for line in song_lyrics.readlines():\r\n print(line)\r\n\r\n elif choice ==\"champion\":\r\n with open ('champion.txt','r') as song_lyrics:\r\n for line in song_lyrics.readlines():\r\n print(line)\r\n\r\n\r\n elif choice ==\"we own it\":\r\n with open ('weownit.txt','r') as song_lyrics:\r\n for line in song_lyrics.readlines():\r\n print(line)\r\n\r\n elif choice ==\"salam rocky bhai\":\r\n with open ('kgf.txt','r') as song_lyrics:\r\n for line in song_lyrics.readlines():\r\n print(line)\r\n \r\n elif choice ==\"maari\":\r\n with open ('mari.txt','r') as song_lyrics:\r\n for line in song_lyrics.readlines():\r\n print(line)\r\n \r\n elif choice ==\"kuch kuch hota hai\":\r\n with open ('kuch.txt','r') as song_lyrics:\r\n for line in song_lyrics.readlines():\r\n print(line)\r\n\r\n elif choice ==\"shape of you\":\r\n with open ('shapeofu.txt','r') as song_lyrics:\r\n for line in song_lyrics.readlines():\r\n print(line)\r\n\r\n elif choice ==\"malare\":\r\n with open ('premam.txt','r') as song_lyrics:\r\n for line in song_lyrics.readlines():\r\n print(line)\r\n\r\n \r\n else:\r\n print(\"Plzzzz Enter a valid song\")\r\n## break\r\n var = input(\"Do you want to Continue to Next song.. Press c or Press q to exit: \")\r\n if var == \"c\":\r\n continue\r\n elif var == \"q\":\r\n print(\"Thank U ..............\")\r\n break\r\n \r\n","sub_path":"lyricssong.py","file_name":"lyricssong.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"271122790","text":"from reconfigure.tests.parsers.base_test import BaseParserTest\nfrom reconfigure.parsers import JsonParser\nfrom reconfigure.nodes import *\n\n\nclass JsonParserTest (BaseParserTest):\n parser = JsonParser()\n source = \"\"\"{\n \"p2\": 123,\n \"s1\": {\n \"s1p1\": \"qwerty\"\n }\n}\n\"\"\"\n\n parsed = RootNode(None,\n PropertyNode('p2', 123),\n Node('s1',\n PropertyNode('s1p1', 'qwerty'),\n ),\n )\n\n\ndel BaseParserTest\n","sub_path":"reconfigure/tests/parsers/jsonparser_tests.py","file_name":"jsonparser_tests.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"382862195","text":"import PySimpleGUI as sg\n\n\nclass AbstractAddEditPessoa:\n def __init__(self, nome_tela: str, texto_entradas: list, texto_botao: str):\n self.__nome_tela = nome_tela\n self.__texto_botao = texto_botao\n self.__texto_entradas = texto_entradas\n self.__janela = None\n\n def configura(self, texto_entradas, texto_botao):\n sg.ChangeLookAndFeel(\"Reddit\")\n\n layout = [\n [sg.Text('Por favor, informe os dados necessários.')],\n ]\n for i in range(len(texto_entradas)):\n layout.append([sg.Text(texto_entradas[i], size=(15, 1)), sg.InputText()])\n\n layout.append([sg.Button(texto_botao, size=(200, 4),\n button_color=('#000', '#5CBEFF'),\n font=('Helvetica', 14))])\n\n self.__janela = sg.Window(self.__nome_tela, layout, size=(500, 300),\n element_justification=\"center\")\n\n def mostra_opcoes(self):\n self.configura(self.__texto_entradas, self.__texto_botao)\n botao, dict_valores = self.__janela.Read()\n self.__janela.Close()\n nome, cpf, telefone, email = dict_valores.values()\n\n return nome, cpf, telefone, email\n","sub_path":"limite/abstract_add_edit_pessoa.py","file_name":"abstract_add_edit_pessoa.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"100486813","text":"# -*- coding: utf-8 -*-\n# /***************************************************************************/\n# * __________________________________\n# * METIS CYBERSPACE TECHNOLOGY S.A.\n# * www.metis.tech\n# * __________________________________\n# * [2019] All Rights Reserved.\n# *\n# * NOTICE: All information contained herein is, and remains\n# * the property of Metis CyberSpace Technology and its suppliers,\n# * if any. The intellectual and technical concepts contained\n# * herein are proprietary to METIS CYBERSPACE TECHNOLOGY\n# * and its suppliers and may be covered by European and Foreign Patents,\n# * patents in process, and are protected by trade secret or copyright law.\n# * Dissemination of this information or reproduction of this material\n# * is strictly forbidden unless prior written permission is obtained\n# * from Metis Cyberspace Technology.\n#\n# /***************************************************************************/\n#\n# Created Date: Monday March 18th 2019\n# Author: Vassilis Lemonidis\n\"\"\"Module holding :class:`TokenGetter` used to retrieve authentication service token\n\"\"\"\nimport os\nfrom requests import exceptions\nfrom metis_pylib.security.Authorization import Authorization\nfrom metis_pylib import PROJECT_CONFIG, LOGGER\n\nclass TokenGetter:\n \"\"\"Class used to retrieve service token\n \"\"\"\n\n def __init__(self):\n self.authorizer = Authorization()\n self._credentials = {'auth_url': PROJECT_CONFIG['AUTH_URL'],\n 'client_id': PROJECT_CONFIG['CLIENT_ID'],\n 'client_secret': PROJECT_CONFIG['CLIENT_SECRET']}\n\n def get_token(self):\n try:\n return self.authorizer.getToken(self._credentials)\n except exceptions.ConnectionError:\n if os.environ['PY_DEPLOYMENT'].startswith('local_'):\n LOGGER.warning('Cound not contact authentication server,'\n ' will raise if error persists in other deployments')\n else:\n raise\n return None","sub_path":"Metis/file-parser/Model/Authorization/authorization.py","file_name":"authorization.py","file_ext":"py","file_size_in_byte":2034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"464180247","text":"from django import forms\nfrom django.utils.timezone import datetime\n\nfrom apps.focus.fields import ConceptTagWidget, ConceptTagField\nfrom apps.datasets.fields import PublisherSelectWidget, CatalogRecordSelectMultipleWidget\nfrom .models import Story\n\n\nclass SuggestStoryForm(forms.ModelForm):\n tags = ConceptTagField()\n\n class Meta:\n model = Story\n fields = ['title', 'subheader', 'author', 'organization', 'datasets', 'datasource_urls',\n 'body', 'featured_image', 'featured_image_caption', 'tags']\n widgets = {\n 'tags': ConceptTagWidget,\n 'organization': PublisherSelectWidget,\n 'datasets': CatalogRecordSelectMultipleWidget,\n }\n\n def __init__(self, posted_by, **kwargs):\n self.posted_by = posted_by\n super(SuggestStoryForm, self).__init__(**kwargs)\n\n def save(self):\n instance = super(SuggestStoryForm, self).save(commit=False)\n instance.posted_by = self.posted_by\n instance.save()\n tags = self['tags'].data\n concepts = self.fields['tags'].concepts_from_tags(tags)\n instance.tags.add(*tags)\n instance.concepts.add(*concepts)\n if self.cleaned_data['datasets']:\n instance.datasets.add(*self.cleaned_data['datasets'])\n return instance\n\n\nclass PublishStoryForm(forms.ModelForm):\n class Meta:\n model = Story\n fields = ['title', 'subheader', 'organization', 'datasets',\n 'tags', 'concepts', 'body', 'featured_image']\n\n def __init__(self, approved_by, **kwargs):\n self.approved_by = approved_by\n super(PublishStoryForm, self).__init__(**kwargs)\n\n def save(self):\n instance = super(PublishStoryForm, self).save(commit=False)\n instance.approved_by = self.approved_by\n instance.publish()\n instance.save()\n return instance\n","sub_path":"apps/stories/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"391325754","text":"from setuptools import setup\n\n__VERSION__ = \"1.0\"\n\nsetup(\n name=\"visdom-pooled\",\n version=__VERSION__,\n description=\"Slightly More Efficient Visdom Wrapper\",\n url=\"https://github.com/kaniblu/visdom-pooled\",\n author=\"Kang Min Yoo\",\n author_email=\"k@nib.lu\",\n license=\"MIT\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python :: 3\"\n ],\n keywords=\"visdom deeplearning visualization pooled\",\n packages=[\"visdom_pooled\"],\n install_requires=[\n \"visdom\",\n \"numpy\"\n ]\n)\n","sub_path":"pypi_install_script/visdom-pooled-1.0.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"562417124","text":"import attitude_utils as attu\nimport env_utils as envu \nimport numpy as np\nfrom time import time\n\nclass Dynamics_model(object):\n\n \"\"\"\n The dynamics model take a lander model object (and later an obstacle object) and modifies \n the state of the lander.\n\n The lander object instantiates an engine model, that maps body frame thrust and torque to\n the inertial frame. Note that each lander can have its own intertial frame which can be \n centered on the lander's target. \n\n Currentlly this model does not model environmental dynamics, will be added later\n \n The lander model maintains a state vector: \n position [0:3]\n velocity [3:6]\n body frame rotational velocity (w_bn) [6:9]\n mass [9] \n attitude in target frame [10:] (size depends on attitude parameterization)\n \n\n \"\"\"\n\n def __init__(self, h=5e-2, noise_u=np.zeros(3), noise_sd=np.zeros(3), l_offset=np.zeros(3), adjust_inertia_tensor=True):\n self.h = h \n self.Isp = 210.0\n self.g_o = 9.81\n self.g = np.asarray([0.0,0.0,-3.7114])\n self.noise_sd = noise_sd\n self.noise_u = noise_u\n self.l_offset = l_offset\n self.adjust_inertia_tensor = adjust_inertia_tensor\n \n print('6dof dynamics model')\n\n def next(self,t,thrust_command,lander):\n #t0 = time()\n if self.adjust_inertia_tensor:\n J = lander.inertia_tensor * lander.state['mass'] / lander.nominal_mass\n else:\n J = lander.inertia_tensor\n w = lander.state['w']\n x = lander.get_state_dynamics()\n #\n # get force and torque in body frame\n # \n\n\n F,L,mdot = lander.thruster_model.thrust(thrust_command)\n L += self.l_offset\n\n #\n # convert force to acceleration\n #\n\n acc_body_frame = F / lander.state['mass']\n\n #\n # Find acceleration to inertial frame\n # Since the attitude is BN (body with respect to inertial) the associated DCM \n # is BN and maps from inertial to body, so we need to invert it (transpose)\n # to map pfrom body to inertial (I think)\n # \n\n #noise = (self.noise_u + self.noise_sd * np.random.normal(size=3)) / lander.state['mass']\n noise = (self.noise_u + np.clip(self.noise_sd * np.random.normal(size=3), 0, 3*self.noise_sd)) / lander.state['mass']\n\n dcm_NB = lander.attitude_parameterization.get_body_to_inertial_DCM(lander.state['attitude'])\n acc_inertial_frame = dcm_NB.dot(acc_body_frame) \n thrust = acc_inertial_frame * lander.state['mass']\n acc_inertial_frame += self.g + noise \n \n #\n # Here we use the Euler rotational equations of motion to find wdot\n #\n\n Jinv = np.linalg.inv(J)\n w_tilde = attu.skew(w)\n wdot = -Jinv.dot(w_tilde).dot(J).dot(w) + Jinv.dot(L)\n #print('DEBUG: ',L,wdot)\n #\n # differential kinematic equation for derivative of attitude\n #\n # integrate w_bt (body frame lander rotation relative to target frame) to get \n # lander attitude in target frame\n # w_bn is stored in lander (rotation in inertial frame, which is caused by thruster torque)\n # reward function will try to make w_bt zero\n #\n\n w_bt = w\n qdot = lander.attitude_parameterization.qdot(lander.state['attitude'], w_bt)\n\n #\n # Use 4th order Runge Kutta to integrate equations of motion\n #\n\n ode = lambda t,x : self.eqom(t, x, acc_inertial_frame, qdot, wdot, mdot)\n x_next = envu.rk4(t, x, ode, self.h )\n attitude = x_next[10:]\n attitude = lander.attitude_parameterization.fix_attitude(attitude) # normalize quaternions\n assert np.all(attitude < np.pi + 1e-9)\n # integrate w_bt (lander_body to targeta to get lander attitude in target frame)\n # w_bn is stored in lander (rotation in inertial frame, which is caused by thruster torque)\n\n #print(thrust_command, w, x_next[6:9])\n lander.state['position'] = x_next[0:3]\n lander.state['velocity'] = x_next[3:6]\n lander.state['w'] = x_next[6:9]\n lander.state['mass'] = x_next[9]\n lander.state['attitude'] = attitude \n lander.state['attitude_321'] = lander.attitude_parameterization.q2Euler321(attitude) \n\n #if not np.all(lander.state['attitude'] < 4):\n # print(lander.state['attitude'] , lander.state['w'])\n #assert np.all(lander.state['attitude'] < 4)\n\n lander.state['thrust'] = thrust \n lander.state['bf_thrust'] = F\n lander.state['torque'] = L\n\n _, t_go = lander.track_func(lander.state['position'],lander.state['velocity'])\n lander.state['t_go'] = t_go\n\n #print('DEBUG3: ',lander.state['w']) \n return x_next\n\n \n \n \n def eqom(self,t, x, acc, qdot, wdot, mdot):\n\n r = x[0:3]\n v = x[3:6]\n w = x[6:9]\n\n rdot = v\n vdot = acc\n\n xdot = np.zeros(10+qdot.shape[0])\n xdot[0:3] = v\n xdot[3:6] = acc\n xdot[6:9] = wdot\n xdot[9] = mdot\n xdot[10:] = qdot\n\n return xdot\n \n \n \n \n \n","sub_path":"AAS-18-290_6DOF_journal/dynamics_model.py","file_name":"dynamics_model.py","file_ext":"py","file_size_in_byte":5393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"213431574","text":"import numpy as np\n\n\nclass Wood:\n \"\"\"\n Class for evaluating a function including the gradient and hessian matrix\n at a given point x\n \"\"\"\n\n def eval(self, x):\n \"\"\"Evaluates function at point x\n\n Parameters\n ----------\n x : numpy.array\n Point at which function is going to be evaluated\n \"\"\"\n\n x1 = x[0]\n x2 = x[1]\n x3 = x[2]\n x4 = x[3]\n\n return sum((\n 100*(x1**2 - x2)**2,\n (x1-1)**2,\n (x3-1)**2,\n 90*(x3**2 - x4)**2,\n 10.1*((x2-1)**2 + (x4-1)**2),\n 19.8*(x2-1)*(x4-1),\n ))\n\n def gradient(self, x):\n \"\"\"Evaluates gradient of function at point x\n\n Parameters\n ----------\n x : numpy.array\n Point at which gradient is going to be evaluated\n \"\"\"\n\n x1 = x[0]\n x2 = x[1]\n x3 = x[2]\n x4 = x[3]\n\n return np.array([\n 400*x1*(x1**2-x2) + 2*(x1-1),\n -200*(x1**2-x2) + 20.2*(x2-1) + 19.8*(x4-1),\n 2*(x3-1) + 360*x3*(x3**2-x4),\n -180*(x3**2-x4) + 20.2*(x4-1) + 19.8*(x2-1)\n ])\n\n def hessian(self, x):\n \"\"\"Evaluates hessian of function at point x\n\n Parameters\n ----------\n x : numpy.array\n Point at which hessian is going to be evaluated\n \"\"\"\n\n x1 = x[0]\n x2 = x[1]\n x3 = x[2]\n x4 = x[3]\n\n hessian = np.zeros((4, 4))\n\n hessian[0][0] = 1200*x1**2 - 400*x2 + 2\n hessian[0][1] = -400*x1\n\n hessian[1][0] = -400*x1\n hessian[1][1] = 220.2\n hessian[1][3] = 19.8\n\n hessian[2][2] = 1080*x3**2 - 360*x4 + 2\n hessian[2][3] = -360*x3\n\n hessian[3][1] = 19.8\n hessian[3][2] = -360*x3\n hessian[3][3] = 200.2\n\n return hessian\n","sub_path":"tarea7/Wood.py","file_name":"Wood.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"408825272","text":"import math\n\ndef sumfor(n):\n a=0\n for i in range(1,n):\n a+=1/(i**2)\n return a\nprint(sumfor(100))\n\ndef sumwhile(tol):\n summen=0\n i=1\n while abs(summen-(math.pi**2/6))>tol:\n summen+=1/(i**2)\n i+=1\n return summen,i\nprint(sumwhile(0.002))\n","sub_path":"Øving3/sum.py","file_name":"sum.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"544835232","text":"# Import packages\nimport h5py\nimport numpy as np\nimport seaborn as sns\nfrom function_initialization_activation import * # importo la libreria el * identifica todas las funciones del file\n\n\n# Define helper functions that will be used in L-model forward prop\n\ndef linear_forward(A_prev, W, b):\n \"\"\"\n Computes affine transformation of the input.\n\n Arguments\n ---------\n A_prev : 2d-array\n activations output from previous layer.\n W : 2d-array\n weight matrix, shape: size of current layer x size of previuos layer.\n b : 2d-array\n bias vector, shape: size of current layer x 1.\n\n Returns\n -------\n Z : 2d-array\n affine transformation output.\n cache : tuple\n stores A_prev, W, b to be used in backpropagation.\n \"\"\"\n Z = np.dot(W, A_prev) + b\n cache = (A_prev, W, b)\n\n return Z, cache\n\n\ndef linear_activation_forward(A_prev, W, b, activation_fn):\n \"\"\"\n Computes post-activation output using non-linear activation function.\n\n Arguments\n ---------\n A_prev : 2d-array\n activations output from previous layer.\n W : 2d-array\n weight matrix, shape: size of current layer x size of previuos layer.\n b : 2d-array\n bias vector, shape: size of current layer x 1.\n activation_fn : str\n non-linear activation function to be used: \"sigmoid\", \"tanh\", \"relu\".\n\n Returns\n -------\n A : 2d-array\n output of the activation function.\n cache : tuple\n stores linear_cache and activation_cache. ((A_prev, W, b), Z) to be used in backpropagation.\n \"\"\"\n assert activation_fn == \"sigmoid\" or activation_fn == \"tanh\" or \\\n activation_fn == \"relu\"\n\n if activation_fn == \"sigmoid\":\n Z, linear_cache = linear_forward(A_prev, W, b)\n A, activation_cache = sigmoid(Z)\n\n elif activation_fn == \"tanh\":\n Z, linear_cache = linear_forward(A_prev, W, b)\n A, activation_cache = tanh(Z)\n\n elif activation_fn == \"relu\":\n Z, linear_cache = linear_forward(A_prev, W, b)\n A, activation_cache = relu(Z)\n\n assert A.shape == (W.shape[0], A_prev.shape[1])\n\n cache = (linear_cache, activation_cache)\n\n return A, cache\n\n\ndef L_model_forward(X, parameters, hidden_layers_activation_fn=\"relu\"):\n \"\"\"\n Computes the output layer through looping over all units in topological\n order.\n\n Arguments\n ---------\n X : 2d-array\n input matrix of shape input_size x training_examples.\n parameters : dict\n contains all the weight matrices and bias vectors for all layers.\n hidden_layers_activation_fn : str\n activation function to be used on hidden layers: \"tanh\", \"relu\".\n\n Returns\n -------\n AL : 2d-array\n probability vector of shape 1 x training_examples.\n caches : list\n that contains L tuples where each layer has: A_prev, W, b, Z.\n \"\"\"\n A = X \n caches = [] \n L = len(parameters) // 2 \n\n for l in range(1, L):\n A_prev = A\n A, cache = linear_activation_forward(\n A_prev, parameters[\"W\" + str(l)], parameters[\"b\" + str(l)],\n activation_fn=hidden_layers_activation_fn)\n caches.append(cache)\n\n AL, cache = linear_activation_forward(\n A, parameters[\"W\" + str(L)], parameters[\"b\" + str(L)],\n activation_fn=\"sigmoid\")\n caches.append(cache)\n\n assert AL.shape == (1, X.shape[1])\n\n return AL, caches","sub_path":"function_feed_forward.py","file_name":"function_feed_forward.py","file_ext":"py","file_size_in_byte":3460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"643733440","text":"#!/usr/bin/env python3.6\n# -*- coding: utf-8 -*-\n\nimport sqlite3\nfrom sqlite3 import Error\nfrom datetime import date, timedelta, datetime, time\nfrom crc_functions import create_connection, get_data_today, get_difference_all_1day\nfrom crc_home import html_1, html_2, html_end\nimport os\n\npath = os.path.expanduser('~/OOS/cisco')\nprint(html_1)\nprint(html_2)\ndb_file = path+'/oos.db' # название базы sqlite3\ntry:\n conn = create_connection(db_file) # подключиться к базе sqlite3\n data_today = get_data_today(conn) # список кортежей с данными сетевых элементов, для которых crc или dropped packets >0. Данные берутся из самой последней созданной таблице \n result = get_difference_all_1day(conn,1,data_today)\n conn.close()\nexcept:\n print('Ooops... Что-то пошло не так(((')\nprint('

Значения CRC и Output_dropped_packets на портах оборудования cisco опорной сети ШПД за последние сутки:

')\nprint('
')\nprint(result)\nprint(\"
\")\nprint(html_end)\n","sub_path":"WEB/cgi-bin/crc_statistic_all_24h.py","file_name":"crc_statistic_all_24h.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"569123906","text":"\"\"\"\nCreated on Sat Oct 20 2018\nModules for extracting data from the Chronos database\n\n@author: T.J. Heimovaara\n\"\"\"\n\n\nimport numpy as np\nimport scipy as sp\nimport scipy.stats as stats\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom context import dbl\n# import DataBaseLibrary as dbl\n#from pydream.core import run_dream\nfrom pydream.parameters import SampledParam\n#from pydream.convergence import Gelman_Rubin\n#import inspect\n\n# Meteorological data will be obtained from two sources:\n# 1: a close by weather station (for WM Berkhout, for BB: Lelystad)\n# we will use the evapotranspiration data obtained from the weather station...\n# 2: rainfall from the 1km gridded radar corrected interpolated rainfall data obtained\n# from climate4impact...\n\n# surface areas of Kragge compartment 3 and 4\n\n# In[0]: Import data from KNMI\nweather_station = '269' #Lelystad\nt_range = ['20030101','20210301']\n\npklfile = './DataFiles/meteoLS.gz'\n#path = './MeteoData/WM_Rain_2008-2019.bz2'\n\ninpfile = 'etmgeg_269.txt'\n# Read data from close by meteo station\nmeteo_data_stat = dbl.download_meteoKNMI_etmgeg (t_range, weather_station, pklfile, inpfile)\n\n#meteo_data_stat = dbl.download_meteoKNMI (t_range, weather_station, pklfile)\nmeteo_data_stat = meteo_data_stat.rename(columns={'rain':'rain_station'})\n\n# Read data from gridded radar corrected interpolated rainfall data\n#ain_radar = pd.read_pickle(fpath,compression='infer')\n# transform rain values from kg/m2 (mm) to m water column\n#ain_radar['rain'] = rain_radar['rain']/1e3\n# Merge the station data and the interpolated rain data in to a single dataframe\nmeteo_data = meteo_data_stat\n# meteo_data is top boundary condition. We run the model from 2003 onward\nmeteo_data = meteo_data[slice('2003-01-01','2021-03-01')]\n\n#eteo_data.rain.loc[meteo_data['rain'].isnull()] = \\\n# meteo_data.rain_station.loc[meteo_data['rain'].isnull()]\n\n## Download flow and level data from CHRONOS\npump_code = 'PP-11N'\npklfile = './DataFiles/flowdata_PP-11N.gz'\ndf_inline = dbl.download_flow_level (pump_code, pklfile)\n\n# We create a pivot table based on column cname (component names)\n#inline_par = pd.pivot_table(df_inline, values='rawvalue', index=['datetime'],\n# columns=['cname'], aggfunc=np.sum)\n\ntotF0 = pd.pivot_table(df_inline, values='rawvalue', index=['datetime'],\n columns=['cname'], aggfunc=np.sum)\n\n#totF = dbl.remove_outliers_inline(inline_par)\ntotF = dbl.remove_outliers_inlineBB(totF0)\n\n# as the model allows for leachate recirculation it expects a totIniflF dataset\n# For a situation where no leachate is recirculated, we set the totIniflF to zero\n\ntotF0['totInfilF'] = 0\n\nlevelD = totF0['level']\ninfilF = totF0['totInfilF']\n\n#sensData = dbl.download_sens_data_Kragge (pump_code, tmeas, pklfile)\n\n# We create a pivot table based on column cname (component names)\n#inline_par = pd.pivot_table(df_inline, values='rawvalue', index=['datetime'],\n# columns=['cname'], aggfunc=np.sum)\n#totF = sensData['totalFlow']\n#levelD = sensData['levelD']\n#infilF = sensData['totInfilF']\n\n# Download laboratory data for pump pit\npklfile = './DataFiles/labdata_PP-11N.gz'\n\ndf_lab = dbl.download_lab(pump_code, pklfile)\n\n# We create a pivot table based on column cname (component names)\nlab_data = pd.pivot_table(df_lab, values='value', index=['date'],\n columns=['cname'], aggfunc=np.sum)\n\nlab_data = lab_data.rename(index=str, columns={'Ammonium (als N)': 'NH4',\n 'Bromide': 'Br',\n 'Chloride': 'Cl',\n 'Totaal organisch koolstof (TOC)': 'TOC'})\n\nlab_data.index = pd.to_datetime(lab_data.index)\n\n# meteo_data is top boundary condition. We run the model over 10 years\nmeteo_data = meteo_data[slice('2003-01-01','2021-03-01')]\n\n\n# Define simulation time range (trange)\ntrange = pd.date_range(start='2003-01-01',end = '2021-03-01',freq='D')\n\n# Select measurements, should fall within trange.\n# tmeas contains times where measurements are available!\n# can have multiple tmeas vectors for different types of measurements\n# totF contains measured data from mid 2012. We choose to start on the 2012-07-01\n# Because the outflow is influenced by operator decisions we choose to select weekly\n# cumulative totals...\nmeasFreq = 7\ntmeas = pd.date_range(start='2012-06-14',end = '2021-03-01',freq='7D')\nfinter = sp.interpolate.interp1d(totF.index.astype(int),totF.values)\ntotF_val = finter(tmeas.astype(int))\ntotF2 = pd.DataFrame(data = totF_val, index=tmeas)\ntotF2 = totF2-totF2.iloc[0]\nmeas_data_flow = totF2.rename(columns = {0:'totF'})\n\n# Define calibration time range. This will be used by DREAM to compare\n# simulated values with calibration set...\n# Data set to be matched by modifying parameters...\ntcalib = pd.date_range(start='2012-06-14',end = '2020-01-01',freq='D')\n\n# In order to facilitate quick and easy comparison of simulation with data\n# we need to define the overlapping indices:\n# tmeas_ind: trange[tmeas_ind] = tmeas\n# tcalib_ind: trange[tcalib_ind]=tcalib\n# tcalmeas_ind, tmeascal_ind: tmeas[tcalmeas_ind]=tcalib[tmeascal_ind]\n\n\nxy, ind1, tmeas_ind = np.intersect1d(tmeas, trange,\n return_indices=True)\nxy, ind1, tcalib_ind = np.intersect1d(tcalib, trange,\n return_indices=True)\nxy, tmeascal_ind, tcalmeas_ind = np.intersect1d(tcalib, tmeas,\n return_indices=True)\n\nxy, tlabmeas_ind, tmeaslab_ind = np.intersect1d(lab_data.index, trange,\n return_indices=True)\nxy, tmeascal_lab_ind, tcalmeas_lab_ind = np.intersect1d(tcalib, lab_data.index,\n return_indices=True)\n\n\ntdata = {'trange':trange,\n 'tmeas':tmeas,\n 'tcalib':tcalib,\n 'tmeas_ind':tmeas_ind,\n 'tcalib_ind':tcalib_ind,\n 'tcalmeas_ind':tcalmeas_ind,\n 'tmeascal_ind':tmeascal_ind,\n 'tlabmeas_ind': tlabmeas_ind,\n 'tmeaslab_ind': tmeaslab_ind,\n 'tmeascal_lab_ind': tmeascal_lab_ind,\n 'tcalmeas_lab_ind': tcalmeas_lab_ind}\n\ntdseries = pd.Series(tdata)\n\n\n# Obtain landfill specific properties\ncellIdx = 0 # 11N = 0, 11Z = 1, 12 = 2\nlF = dbl.wastebodyPropertiesBB(cellIdx) #m2\n\n## Run model wil optimal parameter set...\n\n## Prepare DREAM model...\n# Model parameters which are required to calculate fluxes etc. (often need to\n# optimized).\n\n# List of parameters\npar_d = {'dzCL': [0.5, 1.9, 0.9267], #%m\n 'cropFact': [0.75, 1.5, 0.9584], #[-]\n 'cLPor': [0.15, 0.60, 0.1722], #[-]\n 'cLthRes': [0.001, 0.9, 0.0293], # percentage of thTot\n 'cLKSat': [-5, 1, -1.1936], # 10 log\n 'cLbpar': [0, 8, 2.8698], #%10log! [m/d]\n 'wBtau1': [1, 200, 37.5672],\n 'wBsig1': [-5, 1, -0.4642],\n 'wBtau2': [0, 5*365, 170.3241],\n 'wBsig2': [-5, 1, 0.3246],\n 'wBmfrac': [0.01, 1.0, 0.2338],\n 'wBthIni': [0.05, 0.95, 0.5516],\n 'wBfRes' : [0, 0.9, 0.0851],\n 'wBbFlow': [-7,-2,-4.0935], #10log!\n 'cLcIni': [-4, 3, -1.5343], #10log!\n 'wBcIni': [2, 6, 3.1901],\n 'wBvWK': [0, 0.9, 0.1546],\n 'wBalphavW': [-9,2,-4.3908],#, #10log!\n 'wBndEx': [365, 750, 500]\n }\n\n\n\npar_df = pd.DataFrame(data=par_d)\n\nsdData = np.array([1,25])\n\n","sub_path":"BB11N_noExchange/Initialize_BB11N_DREAM01.py","file_name":"Initialize_BB11N_DREAM01.py","file_ext":"py","file_size_in_byte":7416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"497150243","text":"from bus_generator import Generator\nfrom dist_stop import DistStop\nimport hyper_parameters as paras\nfrom arena import calculate_avg_delay, check_convergence\n\n# from multiprocessing import Pool, cpu_count, Process\n\n# def sim_one_isolated_scenario(berth_num, queue_rule, flows, services, persistent, assign_plan):\n\n\ndef sim_one_isolated_scenario(*args):\n queue_rule, berth_num, flows, services, is_persistent, assign_plan = args\n ######## hyper-parameters ########\n max_tolerance_delay = paras.max_tolerance_delay # seconds\n each_eval_interval = 3600 * 10\n total_eval_num = 60 # 400\n epoch_num = each_eval_interval * total_eval_num # the total number of epochs\n\n minimum_eval_num = 20 # 150\n minimum_epoch_num = minimum_eval_num * each_eval_interval\n # if the last *std_num* of mean_seq is greater than threshold, return\n std_num = 20 # 20\n threshold = 0.05\n\n ######## simulation ########\n # duration = int(epoch_num / paras.sim_delta)\n duration = int(epoch_num * paras.sim_delta)\n generator = Generator(flows, duration, assign_plan)\n stop = DistStop(0, berth_num, queue_rule, services, None, None)\n total_buses = []\n mean_seq = []\n for epoch in range(0, epoch_num, 1):\n t = epoch * paras.sim_delta\n # operation at the stop ...\n stop.process(t)\n\n # dispatch process ...\n if is_persistent:\n # the capacity case, keep the entry queue length == berth_num\n while stop.get_entry_queue_length() < berth_num:\n bus = generator.dispatch(t, persistent=True)\n total_buses.append(bus)\n stop.bus_arrival(bus, t)\n else:\n # according to arrival table\n dispatched_buses = generator.dispatch(t)\n for bus in dispatched_buses:\n total_buses.append(bus)\n stop.bus_arrival(bus, t)\n\n # evaluate the convergence\n if epoch % each_eval_interval == 0 and epoch != 0:\n if is_persistent:\n mean_seq.append(stop.exit_counting / (t * 1.0) * 3600)\n else:\n mean_seq.append(calculate_avg_delay(total_buses))\n if mean_seq[-1] >= max_tolerance_delay:\n return mean_seq\n if epoch > minimum_epoch_num:\n if check_convergence(mean_seq[-std_num:], threshold):\n return mean_seq\n\n # reset for next round\n # stop.reset()\n # generator.reset()\n # total_buses = []\n\n # return (assign_plan, mean_seq[-1])\n return mean_seq\n\n\n\"\"\"\n\ndef test():\n ######### parameters ########\n berth_num = 4\n total_bus_flow = 140.0 # buses/hr\n mu_S = 25 # seconds\n line_num = 4\n persistent = False\n assign_plan = {0: 0, 1: 1, 2: 2, 3: 3} # line -> berth\n # assign_plan = None\n flows, services = generate_line_info(line_num, total_bus_flow, mu_S)\n\n # flows = {0: [f/4, 1.0], 1:[f/4, 1.0], 2:[f/4, 1.0], 3:[f/4, 1.0]} # [buses/hr, c.v.]\n # services = {0: [mu_S, c_S], 1: [mu_S, c_S], 2: [mu_S, c_S], 3: [mu_S, c_S]}\n\n ######### for plotting time-space diagram ########\n # queue_rule = 'FO-Bus'\n # res = sim_one_isolated_scenario(berth_num, queue_rule, flows, services, persistent, assign_plan)\n\n ### plot settings\n line_styles = [\"-\", \":\", \"--\", \"-.\"]\n rules = [\"FIFO\", \"LO-Out\", \"FO-Bus\", \"FO-Lane\"]\n rule2style = {rules[i]: line_styles[i] for i in range(len(rules))}\n\n ######### for desire ########\n # rules = ['FIFO', 'LO-Out', 'FO-Bus', 'FO-Lane']\n rules = [\"FIFO\", \"LO-Out\", \"FO-Bus\"]\n\n if persistent:\n c_Ss = [0.1 * x for x in range(11)]\n rule_capacities = {}\n for queue_rule in rules:\n capacities = []\n for c_S in c_Ss:\n print(c_S)\n services = {0: [mu_S, c_S]}\n cpt = sim_one_isolated_scenario(\n berth_num, queue_rule, flows, services, persistent, assign_plan\n )\n capacities.append(cpt)\n rule_capacities[queue_rule] = capacities\n print(rule_capacities)\n\n # plotting ...\n plt, ax = set_x_y_draw(\"C_S\", \"buses/hr\")\n for rule, capacities in rule_capacities.items():\n if rule == \"FO-Lane\":\n plt.plot(c_Ss, capacities, \"r\", linestyle=rule2style[rule], linewidth=2)\n else:\n plt.plot(c_Ss, capacities, \"k\", linestyle=rule2style[rule], linewidth=2)\n ax.legend(\n [r\"FIFO\", r\"LO-Out\", r\"FO-Bus\", r\"FO-Lane\"], handlelength=3, fontsize=13\n )\n plt.show()\n else:\n c_Ss = [0.1 * x for x in range(11)]\n rule_delays = {}\n for queue_rule in rules:\n delays = []\n for c_S in c_Ss:\n services = {\n 0: [mu_S, c_S],\n 1: [mu_S, c_S],\n 2: [mu_S, c_S],\n 3: [mu_S, c_S],\n }\n print(c_S)\n delay = sim_one_isolated_scenario(\n berth_num, queue_rule, flows, services, persistent, assign_plan\n )\n delays.append(delay)\n rule_delays[queue_rule] = delays\n print(rule_delays)\n\n # plotting ...\n plt, ax = set_x_y_draw(\"C_S\", \"delay (secs)\")\n for rule, delays in rule_delays.items():\n if rule == \"FO-Lane\":\n plt.plot(c_Ss, delays, \"r\", linestyle=rule2style[rule], linewidth=2)\n else:\n plt.plot(c_Ss, delays, \"k\", linestyle=rule2style[rule], linewidth=2)\n\n ax.legend(rules, handlelength=3, fontsize=13)\n\n plt.show()\n\"\"\"\n\n\nif __name__ == \"__main__\":\n pass\n","sub_path":"isolated_scenario.py","file_name":"isolated_scenario.py","file_ext":"py","file_size_in_byte":5703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"527363131","text":"from commons import Node\nfrom commons import gen_linked_list\nfrom commons import gen_linked_seq\nfrom commons import gen_linked_by_list\n\ndef print_list(head:Node):\n if head is None:\n print('Empty')\n return\n p = head\n while p.nxt:\n print(p.val, end='->')\n p = p.nxt\n print(p.val)\n\n\n# 反向打印链表,使用递归\ndef print_reverse(head:Node):\n if head is None:\n return\n print_reverse(head.nxt)\n print(head.val, end='->')\n\n\n# 将单链表反转 p q r 三个指针\ndef reverse(head:Node) -> Node:\n if head is None or head.nxt is None:\n return head\n p, q = head, head.nxt\n while q :\n r = q.nxt\n q.nxt = p\n p, q = q, r\n head.nxt = None\n return p\n\n\n# 求单链表中结点的个数\ndef count(head:Node):\n cnt = 0\n p = head\n while p:\n cnt += 1\n p = p.nxt\n return cnt\n\n\n# 查找单链表中的倒数第K个结点(k > 0)\n# 快慢指针\ndef last_k(head:Node, k:int):\n if k <= 0:\n return None\n p, q = head, head\n c = 0\n while p:\n if c >= k:\n p = p.nxt\n q = q.nxt\n else:\n q = q.nxt # 一个指针走\n c += 1 \n return p.val\n\n\n# 查找中间节点 快慢指针,一个一次走一步,一个一次走两步\ndef find_middle(head:Node):\n if head is None or head.nxt is None:\n return head\n p, q= head, head\n while p and p.nxt:\n p = p.nxt.nxt\n q = q.nxt\n return q.val\n\n\n# 插入最后面 需要遍历到尾部 O(N)\ndef add_last(head:Node, v):\n if head is None:\n head = Node(v, None)\n return head\n p = head\n while p.nxt:\n p = p.nxt\n n = Node(v, None)\n p.nxt = n\n return head\n\n\n# 第一个公共节点\n# 先走到离尾部一样距离的位置,然后一起走,看有没有相同的节点\ndef first_common_node(head1:Node, head2:Node):\n m, n = 0, 0\n p, q = head1, head2\n while p:\n m += 1\n p = p.nxt\n while q:\n n += 1\n q = q.nxt\n t = m - n\n p, q = head1, head2\n if t > 0:\n while t > 0:\n p = p.nxt\n t -= 1\n else:\n t = -t\n while t > 0:\n q = q.nxt\n t -= 1\n while p and q:\n if p is q:\n return p\n p = p.nxt\n q = q.nxt\n return None\n\n\ndef merge_sorted_list(head1:Node, head2:Node):\n if head1 is None:\n return head2\n if head2 is None:\n return head1\n p, q = head1, head2 \n if head1.val <= head2.val: # h 指向头,r 指向尾\n h = head1\n p = p.nxt\n else:\n h = head2\n q = q.nxt\n r = h\n while p and q: # 从 p和q 里面选出一个放在 r 后面\n if p.val <= q.val:\n r.nxt = p\n p = p.nxt\n else:\n r.nxt = q\n q = q.nxt\n r = r.nxt # r 在新形成的尾\n if p :\n r.nxt = p\n if q:\n r.nxt = q\n return h\n\n\n# 合并有序链表 递归方法\ndef merge_sorted_list_recursive(head1:Node, head2:Node):\n if head1 is None:\n return head2\n if head2 is None:\n return head1\n if head1.val <= head2.val:\n h = head1\n h.nxt = merge_sorted_list_recursive(head1.nxt, head2)\n else:\n h = head2\n h.nxt = merge_sorted_list_recursive(head1, head2.nxt)\n return h\n\n\n# 冒泡排序\ndef bubble_sort(head:Node):\n if head is None or head.nxt is None:\n return head\n p = head\n while p:\n q = p.nxt\n while q:\n if p.val < q.val: # <则正序 >则倒序\n p.val, q.val = q.val, p.val\n q = q.nxt\n p = p.nxt\n return head\n\n\n\n\n# 是否有环 快慢指针,一个一次走一步,一个一次走两步\n# 如果有环,一定会相交;如果无环,则会走完\ndef has_cycle(head:Node):\n if head is None:\n return False\n p, q = head, head\n while p and p.nxt:\n p = p.nxt.nxt\n q = q.nxt\n if p == q:\n return q # 给下一个找环的入口使用\n return False\n\n\n# 找出进入环的第一个节点\n# 1 判断是否有环\n# 2 从环中找一个节点,假设将环断开,行成两个相交链表\n# 3 找出两个相交链表的第一个相交节点,即为进入环的首节点\ndef fine_first_node_in_cycle(head:Node):\n no = has_cycle(head)\n if not no:\n return None\n return first_common_node(no, head)\n\n\nif __name__ == \"__main__\":\n h1 = gen_linked_by_list('3572917')\n h = bubble_sort(h1)\n print_list(h)","sub_path":"python/structure/linkedlist.py","file_name":"linkedlist.py","file_ext":"py","file_size_in_byte":4547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"595700397","text":"#!/usr/bin/env python\n\nimport modo\nimport lx\nimport lxu.command\nimport lxu.select\nimport traceback\nimport Tila_BatchExportModule as t\nfrom Tila_BatchExportModule import user_value\nfrom Tila_BatchExportModule import batch_export\n\n\nclass CmdBatchExport(lxu.command.BasicCommand):\n def __init__(self):\n lxu.command.BasicCommand.__init__(self)\n\n reload(user_value)\n reload(t)\n\n user_value.add_User_Values(self, t.userValues)\n\n def cmd_Flags(self):\n return lx.symbol.fCMD_MODEL | lx.symbol.fCMD_UNDO\n\n def basic_Enable(self, msg):\n return True\n\n def cmd_Interact(self):\n pass\n\n def basic_Execute(self, msg, flags):\n reload(t)\n reload(batch_export)\n try:\n scn = modo.Scene()\n currScn = modo.scene.current()\n\n userSelection = scn.selected\n userSelectionCount = len(userSelection)\n\n currPath = currScn.filename\n\n if currPath is None:\n currPath = \"\"\n\n scnIndex = lx.eval('query sceneservice scene.index ? current')\n\n userValues = user_value.query_User_Values(self, t.kit_prefix)\n\n tbe = batch_export.TilaBacthExport\n\n userValues[1] = True\n\n if userValues[3]:\n tbe.batch_folder(tbe(userSelection,\n userSelectionCount,\n scn,\n currScn,\n currPath,\n scnIndex,\n userValues))\n elif userValues[2]:\n tbe.batch_files(tbe(userSelection,\n userSelectionCount,\n scn,\n currScn,\n currPath,\n scnIndex,\n userValues))\n except:\n lx.out(traceback.format_exc())\n\n def cmd_Query(self, index, vaQuery):\n lx.notimpl()\n\n\nlx.bless(CmdBatchExport, t.TILA_BATCH_FOLDER)","sub_path":"lxserv/Tila_BatchFolder.py","file_name":"Tila_BatchFolder.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"546422626","text":"# -*- coding:Utf-8 *-\nimport os.path\nimport json\nfrom collections import namedtuple\nimport sys, os\nsys.path.insert(0, os.path.abspath('../'))\nfrom Model.door import Door\nfrom Model.room import Room\nfrom Model.floor import Floor\nfrom Enum.direction import Direction\n\n# -------------- CREATE FLOOR -------------- #\ndef create_floor_from_json(filename):\n\n file = open_file(filename)\n json_file = parse_file_to_json(file)\n\n floor = Floor(json_file.id, json_file.name)\n for room_json in json_file.rooms:\n floor.add_room(create_room(room_json))\n\n floor.initialize_start_room()\n file.close()\n\n return floor\n\ndef open_file(filename):\n current_path = os.path.abspath(os.path.dirname(__file__))\n path_of_json = os.path.join(current_path, \"../resources/{0}\".format(filename))\n file = open(path_of_json, \"r\")\n return file\n\ndef parse_file_to_json(file):\n content = file.read()\n # Parse JSON into an object with attributes corresponding to dict keys.\n json_file = json.loads(content, object_hook=lambda d: namedtuple('X', d.keys())(*d.values()))\n return json_file\n\ndef create_room(room_json):\n room = Room(room_json.id)\n if room_json.isStart == True:\n room.set_isStart()\n\n if room_json.isEnd == True:\n room.set_isEnd()\n\n for door_json in room_json.doors:\n room.add_door(create_door(door_json))\n\n return room\n\ndef create_door(json_door):\n door = Door(json_door.idNextRoom, get_direction(json_door.direction))\n return door\n\n\ndef get_direction(json_direction):\n if json_direction == Direction.NORTH.value:\n return Direction.NORTH\n elif json_direction == Direction.EAST.value:\n return Direction.EAST\n elif json_direction == Direction.SOUTH.value:\n return Direction.SOUTH\n else :\n return Direction.WEST\n\n\n# -------------- WALK IN FLOOR -------------- #\ndef display_current_room(floor):\n floor.display_current_room()\n\ndef check_door_exist_in_current_room(floor, direction):\n current_room = floor.currentRoom\n for door in current_room.doors:\n if door.direction.value == direction :\n return True\n return False\n\ndef move_to_another_room(floor, direction):\n current_room = floor.currentRoom\n door = list(filter(lambda door: door.direction.value == direction, current_room.doors))[0]\n floor.change_room(door.idRoom)\n","sub_path":"process/floorProcess.py","file_name":"floorProcess.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"44032591","text":"#from tkinter import font\r\n#import tkinter as tk\r\n#root = tk.Tk()\r\n#italicised = tkFont.Font(slant='italic')\r\n\r\ndef optionlist():\r\n #determines type of source\r\n global option\r\n option = input(\"Is your reference from a:\\n1. book \\\r\n \\n2. chapter \\\r\n\t\\n3. journal article \\\r\n\t\\n4. lecture \\\r\n \\n5. a document or publication produced by a Government, International Organisation, Corporation or NGO \\\r\n \\n6. an article in a newspaper or magazine \\\r\n\t\\n7. a television or radio broadcast \\\r\n\t\\n8. a website/blog/twitter \\\r\n\t\\n9. online e-book \\\r\n\t\\n10. material accessed by an e-reader \\\r\n\t\\n11. secondary referencing \\\r\n\t\\nType the corresponding number to select.\")\r\n\r\ndef authorchecker():\r\n #informs user of correct reference syntax for name\r\n if option in ['1','2','3','4','5','6','8','9','10','11']:\r\n global authorname\r\n authorname = input(\"Type the author's name in the following manner: Waltz, K. \")\r\n\r\n elif option in ['5','7']:\r\n global authorname2\r\n authorname2 = input(\"Type the publisher's/tv program's/radio station's/Government's name \")\r\n\r\n else:\r\n print(\"Type a number corresponding to one of the values 1 - 11.\")\r\n optionlist()\r\n authorchecker()\r\n\r\ndef titlechecker():\r\n #informs user of correct reference syntax for title\r\n if option in ['2','3','4','5','6','8']:\r\n global titlename\r\n titlename = input(\"Type the title in the following manner: 'The Agent-Structure Problem in International Relations Theory' \")\r\n\r\n elif option in ['1','9']:\r\n global titlename2\r\n titlename2 = input(\"Type the title in the following manner: Theory of International Politics, but in ITALICS. \")\r\n\r\n else:\r\n print(\"Type a number corresponding to one of the values 1 - 11.\")\r\n optionlist()\r\n titlechecker()\r\n\r\ndef secondtitlechecker():\r\n #informs user of correct syntax for booktitle\r\n if option in ['2','3','6']:\r\n global secondtitle\r\n secondtitle = input(\"Type the book's title in the following manner: International Organisation (but in ITALICS.) \")\r\n\r\ndef editorchecker():\r\n #informs user of correct syntax for edited volume\r\n if option in ['2']:\r\n global editor\r\n editor = input(\"Type the editor's name. \")\r\n\r\ndef publishdetailchecker():\r\n #informs the user of correct syntax for publishing information\r\n if option in ['1','2','5','9']:\r\n global publishdet\r\n publishdet = input(\"Type the publisher information in the following manner: (Cambridge, Cambridge University Press, 1999) \")\r\n \r\ndef journalnumberchecker():\r\n #informs the user of the correct syntax for journal volume number\r\n if option in ['3']:\r\n global volumeno\r\n volumeno = input(\"Type the number of the journal volume. \")\r\n\r\ndef pagenumberchecker():\r\n #informs the user of the correct syntax for pagenumber\r\n if option in ['1','2','3','5','6','9']:\r\n global pagenumber\r\n pagenumber = input(\"Type the page number in the following manner: p. 46 \")\r\n \r\ndef datechecker():\r\n #informs the user of the correct syntax for the date\r\n if option in ['4','6','7','8','10']:\r\n global date\r\n date = input(\"Type the date in the following manner: 20 August 2010 \")\r\n\r\ndef datechecker2():\r\n #informs the user of the correct syntax for the date(journal)\r\n if option in ['3']:\r\n global date2\r\n date2 = input(\"Type the year of the publication as follows: (1984) \")\r\n\r\ndef lecturechecker():\r\n #informs the user of the correct syntax for the lecture location\r\n if option in ['4']:\r\n global lectureloc\r\n lectureloc = input(\"Type the location for the lecture as follows: Lecture at Aberystwyth University \")\r\n\r\ndef docnamechecker():\r\n #informs the user of the correct syntax for the document name\r\n if option in ['5']:\r\n global docname\r\n docname = input(\"Type the name of the document as follows: House of Commons Foreign Affairs Committee, (with comma) \")\r\n\r\ndef newspapermagname():\r\n #informs the user of the correct syntax for the newspaper/magazine name\r\n if option in ['6']:\r\n global newsmagname\r\n newsmagname = input(\"Type the name of the newspaper/magazine as follows: The Guardian (but in ITALICS.)\" )\r\n\r\ndef locationchecker():\r\n #informs the user of the correct syntax for the location of publication\r\n if option in ['6']:\r\n global locationpub\r\n locationpub = input(\"Type the location of publication as follows: (London) \")\r\n \r\nprint(\"Welcome to the reference automator v 0.1\")\r\n\r\noptionlist()\r\nauthorchecker()\r\ntitlechecker()\r\nsecondtitlechecker()\r\neditorchecker()\r\npublishdetailchecker()\r\njournalnumberchecker()\r\npagenumberchecker()\r\ndatechecker()\r\ndatechecker2()\r\nlecturechecker()\r\ndocnamechecker()\r\nnewspapermagname()\r\nlocationchecker()\r\n\r\n\r\nif option in ['1']:\r\n print(authorname , titlename2 , publishdet + \", \" + pagenumber + \".\")\r\n\r\nif option in ['2']:\r\n print(authorname , titlename + \", in \" + secondtitle + \" edited by \" + editor , publishdet + \", \" + pagenumber + \".\")\r\n\r\nif option in ['3']:\r\n print(authorname , titlename + \", \" + secondtitle + \", \" + volumeno , date2 , pagenumber + \".\")\r\n\r\nif option in ['4']:\r\n print(authorname , titlename , lectureloc + \", \" + date + \".\")\r\n\r\nif option in ['5']:\r\n print(docname + \", \" + titlename , publishdet + \", \" + pagenumber + \".\")\r\n\r\nif option in ['6']:\r\n print(authorname , titlename + \".\" , newsmagname , locationpub + \", \" + date + \",\" + pagenumber + \".\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"automated reference creator.py","file_name":"automated reference creator.py","file_ext":"py","file_size_in_byte":5609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"608198478","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import cuda\nfrom torch import optim\nfrom torch import autograd\nfrom torchvision import transforms\nfrom classify_svhn import get_data_loader\nfrom q3_vae import View\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport samplers\nimport argparse\nimport os\n\n\nclass D(nn.Module):\n def __init__(self, batch_size, dimz):\n super().__init__()\n self.batch_size = batch_size\n self.dimz = dimz\n\n self.convs = nn.Sequential(\n # layer1\n nn.Conv2d(3, 64, 5, padding=2, stride=2),\n nn.LeakyReLU(0.2),\n\n # layer2\n nn.Conv2d(64, 128, 5, padding=2, stride=2),\n nn.BatchNorm2d(128),\n nn.LeakyReLU(0.2),\n\n # layer3\n nn.Conv2d(128, 256, 5, padding=2, stride=2),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.2),\n\n # layer 4\n nn.Conv2d(256, 1, 5, padding=2, stride=1),\n View(-1, 4*4*1),\n nn.Sigmoid(),\n )\n\n def forward(self, x):\n out = self.convs(x)[:, 0]\n return out\n\n\nclass G(nn.Module):\n\n def __init__(self, batch_size, dimz):\n super().__init__()\n self.batch_size = batch_size\n self.dimz = dimz\n\n self.deconvs = nn.Sequential(\n # layer 1\n nn.Linear(self.dimz, 4 * 4 * 512),\n nn.BatchNorm1d(4*4*512),\n nn.ReLU(),\n View(-1, 512, 4, 4),\n\n # layer2\n nn.ConvTranspose2d(512, 256, 5, padding=2, stride=2, output_padding=1),\n nn.BatchNorm2d(256),\n nn.ReLU(),\n\n # layer 3\n nn.ConvTranspose2d(256, 128, 5, padding=2, stride=2, output_padding=1),\n nn.BatchNorm2d(128),\n nn.ReLU(),\n\n # layer 4\n nn.ConvTranspose2d(128, 3, 5, padding=2, stride=2, output_padding=1),\n nn.Tanh()\n )\n\n def forward(self, z):\n out = self.deconvs(z)\n return out\n\n def extract_features(self, z):\n return z.view(-1, 3*32*32)\n\n\ndef wgan_gp_loss(real, fake, grad, lam):\n \"\"\"\n Function that computes the WGAN-GP metric given the discriminator's output on real and fake data\n :param real: The output of the discriminator on real data of size [batch_size,]\n :param fake: The output of the generator on fake data of size [batch_size,]\n :param grad: The gradient of the output of the discriminator. Size is [batch_size, 3*32*32]\n :param: The lambda factor applied for regularization\n :return: The WGAN-GP loss over all elements in the mini-batch. Size is [batch_size,]\n \"\"\"\n return fake - real + lam * (torch.norm(grad, dim=1) - 1.)**2\n\n\ndef train_model(g, d, train, valid, save_path):\n \"\"\"\n Function that trains the model\n :param g: The model generator to train\n :param d: The discriminator to train\n :param train: The training set\n :param valid: The validation set\n :return:\n \"\"\"\n # optimizer for the network\n g_optim = optim.Adam(g.parameters(), lr=args.lr, betas=(0, 0.9))\n d_optim = optim.Adam(d.parameters(), lr=args.lr, betas=(0, 0.9))\n\n # print PIL image\n display = transforms.ToPILImage()\n\n for epoch in range(args.nb_epochs):\n for i, (batch, label) in enumerate(train):\n # put batch on device\n batch = batch.to(args.device)\n\n # obtain the discriminator output on real data\n real_prob = d(batch)\n\n # obtain the discriminator output on the fake data\n z = torch.randn(batch.size()[0], g.dimz, device=args.device)\n fake = g(z).detach()\n fake_prob = d(fake)\n\n # obtain the gradient term of the WGAN-GP loss\n a = torch.rand(batch.size()[0], 1, 1, 1, device=args.device)\n conv = a * batch + (1. - a) * fake\n conv.requires_grad = True\n d_conv = d(conv)\n grad = autograd.grad(d_conv, conv, torch.ones_like(d_conv).to(args.device),\n retain_graph=True, create_graph=True, only_inputs=True)[0]\n\n # compute the WGAN-GP loss\n loss = wgan_gp_loss(real_prob, fake_prob, grad.view(-1, 3 * 32 * 32), args.lam).mean()\n\n # minimize the loss\n autograd.backward(loss)\n\n # update the parameters\n d_optim.step()\n d_optim.zero_grad()\n\n if i % args.update_ratio == 0. and i > 0:\n # update the generator\n z = torch.randn(batch.size()[0], g.dimz, device=args.device)\n fake = g(z)\n fake_prob = d(fake)\n loss = - fake_prob.mean()\n loss.backward()\n g_optim.step()\n g.zero_grad()\n\n with torch.no_grad():\n # After training for an epoch, output validation loss\n valid_loss = torch.zeros(1)\n nb_batches = 0\n for i, (batch, label) in enumerate(valid):\n nb_batches += 1\n batch = batch.to(args.device)\n real_prob = d(batch)\n z = torch.randn(batch.size()[0], g.dimz, device=args.device)\n fake = g(z)\n display(((fake[0] + 1.) * 255.).to(device='cpu', copy=True)).show()\n fake_prob = d(fake)\n a = torch.rand(batch.size()[0], 1, 1, 1, device=args.device)\n\n with torch.enable_grad():\n conv = a * batch + (1. - a) * fake\n conv.requires_grad = True\n d_conv = d(conv)\n grad = autograd.grad(d_conv, conv, torch.ones_like(d_conv).to(args.device),\n retain_graph=False, create_graph=True, only_inputs=True)[0]\n batch_loss = wgan_gp_loss(real_prob, fake_prob, grad.view(-1, 3 * 32 * 32), args.lam)\n valid_loss += batch_loss.mean()\n valid_loss /= nb_batches\n print(\"After epoch {} the validation loss is: \".format(epoch + 1), valid_loss.item())\n\n # save the model to be used later\n torch.save(g.state_dict(), save_path)\n\n\ndef evaluation(model):\n \"\"\"\n Function that generates samples for the qualitative evaluation of the model\n :param model: The model from which we pull samples\n :return:\n \"\"\"\n with torch.no_grad():\n transf = transforms.ToPILImage()\n z = torch.randn(model.batch_size, model.dimz, device=args.device)\n samples = model.deconvs(z)\n\n if not os.path.isdir(args.sample_dir):\n os.mkdir(args.sample_dir)\n\n decoder_dir = os.path.join(args.sample_dir, \"decoder_samples\")\n if not os.path.isdir(decoder_dir):\n os.mkdir(decoder_dir)\n\n # save the decoder samples\n for i, sample in enumerate(samples):\n im = transf(sample.to(device='cpu'))\n im.save(os.path.join(decoder_dir, \"img_{}.jpeg\".format(i)))\n\n # perturb the z and get samples\n z_ = z + args.eps\n samples = model.deconvs(z_)\n\n perturb_dir = os.path.join(args.sample_dir, \"perturbed_samples\")\n if not os.path.isdir(perturb_dir):\n os.mkdir(perturb_dir)\n\n # save the perturbed samples\n for i, sample in enumerate(samples):\n im = transf(sample.to(device='cpu'))\n im.save(os.path.join(perturb_dir, \"p_img_{}.jpeg\".format(i)))\n\n int_dir = os.path.join(args.sample_dir, \"interpolated_samples\")\n if not os.path.isdir(int_dir):\n os.mkdir(int_dir)\n\n # interpolate between two z's and generate samples. Save them\n for a in range(11):\n z_a = a / 10. * z[0:1] + (1. - a/10.) * z[1:2]\n gz_a = model.deconvs(z_a)[0]\n im = transf(gz_a.to(device='cpu'))\n im.save(os.path.join(int_dir, \"i1_img_{}.jpeg\".format(a)))\n\n # interpolate the result of the two g(z) values\n g_z = model.deconvs(z[0:2])\n for a in range(11):\n x_a = a / 10. * g_z[0] + (1. - a / 10.) * g_z[1]\n im = transf(x_a.to(device='cpu'))\n im.save(os.path.join(int_dir, \"i2_img_{}.jpeg\".format(a)))\n\n # sample 1000 images to use for FID score\n thousand_dir = os.path.join(args.sample_dir, \"1000_samples\", \"samples\")\n if not os.path.isdir(thousand_dir):\n os.makedirs(thousand_dir)\n\n z = torch.randn(1000, model.dimz, device=args.device)\n gz = model.deconvs(z)\n for i, sample in enumerate(gz):\n im = transf(sample.to(device='cpu'))\n im.save(os.path.join(thousand_dir, \"img_{}.jpeg\".format(i)))\n\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-t\", action=\"store_true\", help=\"Flag to specify if we train the model\")\n parser.add_argument(\"--save_path\", type=str, default=\"q3_gan.pt\")\n parser.add_argument(\"--load_path\", type=str, default=\"q3_gan.pt\")\n parser.add_argument(\"--batch_size\", type=int, default=64, help=\"Size of the mini-batches\")\n parser.add_argument(\"--dimz\", type=int, default=100, help=\"Dimension of the latent variables\")\n parser.add_argument(\"--data_dir\", type=str, default=\"svhn.mat\", help=\"SVHN dataset location\")\n parser.add_argument(\"--nb_epochs\", type=int, default=50, help=\"The number of epochs for training\")\n parser.add_argument(\"--lam\", type=int, default=10, help=\"Lambda coefficient for the regularizer in\"\n \"in the WGAN-GP loss\")\n parser.add_argument(\"--lr\", type=float, default=2e-4, help=\"Learning rate for the optimzer\")\n parser.add_argument(\"--update_ratio\", type=int, default=5, help=\"The number of updates to the discriminator\"\n \"before one update to the generator\")\n parser.add_argument(\"--eps\", type=float, default=1e-1, help=\"Perturbation value to the latent when evaluating\")\n parser.add_argument(\"--sample_dir\", type=str, default=\"samples\", help=\"Directory containing samples for\"\n \"evaluation\")\n\n # get the arguments\n args = parser.parse_args()\n args.device = torch.device(\"cuda\") if cuda.is_available() else torch.device('cpu')\n # check for cuda\n device = torch.device(\"cuda\") if cuda.is_available() else torch.device('cpu')\n args.device = device\n\n # load the dataset\n train, valid, test = get_data_loader(args.data_dir, args.batch_size)\n\n # Create model. Load or train depending on choice\n g = G(args.batch_size, args.dimz).to(args.device)\n d = D(args.batch_size, args.dimz).to(args.device)\n if args.t:\n train_model(g, d, train, valid, args.save_path)\n else:\n g.load_state_dict(torch.load(args.load_path))\n g.eval()\n\n evaluation(g)\n","sub_path":"Q3_WGAN.py","file_name":"Q3_WGAN.py","file_ext":"py","file_size_in_byte":10892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"165490566","text":"import tensorflow as tf\nfrom numpy.random import RandomState as R\n\nseed = 42\nds_size = 9*512*2\n\ndef give(dim, n, channels, out_size=10):\n if dim == 1:\n x = R(seed).random((ds_size, n, channels))\n x = x.reshape(x.shape[0], n, channels)\n else:\n x = R(seed).random((ds_size, n, n, channels))\n x = x.reshape(x.shape[0], n, n, channels)\n \n y = R(seed).randint(0,out_size,ds_size)\n y = tf.keras.utils.to_categorical(y, out_size)\n \n dataset = tf.data.Dataset.from_tensor_slices((x, y))\n \n return dataset\n","sub_path":"profile/tflow/tf_data.py","file_name":"tf_data.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"375041817","text":"import turtle\n\nz = turtle.Turtle()\ndef sierpinski(n, dolzina):\n if n > 0:\n for _ in range(3):\n sierpinski(n - 1, dolzina / 2)\n z.forward(dolzina)\n z.left(120)\n else:\n pass\n\ndef krivulja(n, dolzina, pojdi_desno, barva):\n z.color(barva)\n if n > 0:\n if pojdi_desno:\n z.right(60)\n krivulja(n - 1, dolzina / 2, not pojdi_desno, 'red')\n z.left(60)\n krivulja(n - 1, dolzina / 2, pojdi_desno, 'green')\n z.left(60)\n krivulja(n - 1, dolzina / 2, not pojdi_desno, 'blue')\n z.right(60)\n else:\n z.left(60)\n krivulja(n - 1, dolzina / 2, not pojdi_desno, 'red')\n z.right(60)\n krivulja(n - 1, dolzina / 2, pojdi_desno, 'green')\n z.right(60)\n krivulja(n - 1, dolzina / 2, not pojdi_desno, 'blue')\n z.left(60)\n else:\n z.forward(dolzina)\n\nkrivulja(5, 100, True, 'red')","sub_path":"datoteke-s-predavanj/2015-16/04-zelva/sierpinski.py","file_name":"sierpinski.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"340897075","text":"import time\nimport array\nimport math\nimport audioio\nimport audiocore\nimport board\nimport ulab\n\n_audio = audioio.AudioOut(board.A0)\n\n_frequencies = [ 261.63, 277.18, 293.66, 311.13, 329.63, 349.23, 369.99, 392.00, 415.30, 440.00, 466.16, 493.88 ]\n_note_names = { 'C': 0, 'C#': 1, 'Db': 1, 'D': 2, 'D#': 3, 'Eb': 3, 'E': 4, 'F': 5, 'F#': 6, 'Gb': 6, 'G': 7, 'G#': 8, 'Ab': 8, 'A': 9, 'A#': 10, 'Bb': 10, 'B': 11 }\n\ndef note(name, octave=4):\n return _frequencies[_note_names[name]] * math.pow(2, octave-4)\n\ndef play(frequency):\n waveform = wf(frequency)\n samples = ulab.array((waveform + 1) * (2 ** 15 - 1), dtype=ulab.uint16)\n sample = audiocore.RawSample(samples)\n _audio.play(sample, loop=True)\n\ndef stop():\n _audio.stop()\n\ndef wf(frequency):\n samples = 60 * 8000 // frequency\n adj_freq = 60 * 8000 / samples\n samples_per_cycle = 8000 / adj_freq\n print(frequency, adj_freq, samples)\n waveform = ulab.arange(samples, dtype=ulab.float)\n waveform = waveform * 2 * math.pi / samples_per_cycle\n return ulab.vector.sin(waveform)\n\n\n","sub_path":"speaker.py","file_name":"speaker.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"112751413","text":"from django.urls import path\nfrom django import forms\nfrom public_app import views\nfrom django.conf.urls import url, include\nfrom site_directory import views\n\nfrom django.conf.urls import url\nfrom haystack.forms import ModelSearchForm\nfrom haystack.generic_views import FacetedSearchView as BaseFacetedSearchView\nfrom haystack.generic_views import SearchView\nfrom haystack.query import SearchQuerySet, EmptySearchQuerySet\n\nfrom admin_app.choices import TIMEESTIMATECHOICES, TYPEOFBEASTCHOICES\n\nclass MySearchForm(ModelSearchForm):\n max_time = forms.TypedChoiceField(choices=[('', '-')]+list(TIMEESTIMATECHOICES)[:-1], initial='', required=False, coerce=int, empty_value=120)\n beast = forms.TypedChoiceField(choices=[('', '-')]+list(TYPEOFBEASTCHOICES), initial='', required=False,\n label='Kind of beast')\n subject = forms.TypedChoiceField(choices=[('', '-')]\n +[(s,s) for s in [\"Quantum mechanics\",\n \"Classical mechanics\",\n \"Electromagnetism\",\n \"Thermal physics\",\n \"Math\",\n ]],\n initial='', required=False)\n\n # The following is needed in order to allow users to browse e.g. a given\n # subject with an empty query string.\n def no_query_found(self):\n return self.searchqueryset.all()\n\n def search(self):\n #First we need to store SearchQuerySet recieved after / from any other processing that's going on\n sqs = super(MySearchForm, self).search()\n\n if self.cleaned_data['max_time']:\n max_time = int(self.cleaned_data['max_time'])\n if max_time != '' and max_time != 0 and max_time < 120:\n sqs = sqs.filter(time_estimate__lte=max_time)\n\n if self.cleaned_data['beast']:\n beast = self.cleaned_data['beast']\n if beast != '':\n sqs = sqs.filter(beast=beast)\n\n if self.cleaned_data['subject']:\n subject = self.cleaned_data['subject']\n if subject != '':\n sqs = sqs.filter(subject=subject)\n\n return sqs\n\n def without_page(self):\n ''' Returns a urlencoded version of this query, but with the page removed. '''\n v = self.data.copy()\n if 'page' in v:\n del v['page']\n return v.urlencode()\n\n# Now create your own that subclasses the base view. Need to figure out faceting.\n# class FacetedSearchView(BaseFacetedSearchView):\nclass FacetedSearchView(SearchView):\n form_class = ModelSearchForm\n facet_fields = ['beast', 'time_estimate', 'topics']\n # template_name = 'search.html'\n context_object_name = 'page_object'\n form_class = MySearchForm\n\n # ... Any other custom methods etc\n def get(self, request, *args, **kwargs):\n if not request.user.has_perm(\"admin_app.change_problem\"):\n self.queryset = SearchQuerySet().filter(content='is_published')\n else:\n self.queryset = SearchQuerySet()\n return super(FacetedSearchView, self).get(request, *args, **kwargs)\n\n\n\nurlpatterns = [\n # url(r'^homework/keyword/(?P\\w+)/$', views.HomeworkKeywordView, name='homework_keyword'),\n \n path('keyword/', views.HomeworkKeywordView, name='homework_keyword'),\n\n path('', FacetedSearchView.as_view(), name='haystack_search'),\n # path('', SearchView.as_view(), name='haystack_search'),\n # path('', include('haystack.urls')),\n # path('', SearchView.as_view(), name='haystack_search'),\n\n]","sub_path":"osu_www/site_directory/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"246841374","text":"from flask import (\n Blueprint,\n render_template,\n redirect,\n url_for,\n request,\n flash,\n session,\n current_app,\n)\nfrom flask_login import login_required\nfrom sqlalchemy import desc,asc\nimport datetime\n\nfrom project.corsi.forms import CorsiForm, write_to_disk\nfrom project.serate.forms import SerataForm\nfrom project.serate.models import Serata\nfrom project.corsi.models import Corso\nfrom project import db\n\n\n# Define blueprint\ncorsi_blueprint = Blueprint(\n \"corsi\", \n __name__, \n template_folder=\"templates\",\n static_folder='../static'\n)\n\n'''\nLista dei corsi in ordine alfabetico\n'''\n@corsi_blueprint.route(\"/lista\", methods=[\"GET\"])\ndef lista():\n # Ordinamento alfabetico ascendente per titolo\n lista_corsi = Corso.query.order_by(asc(Corso.nome)).all()\n return render_template(\n 'corsi_lista.html', \n lista_corsi=lista_corsi\n )\n\n\n'''\nCreazione di un corso (senza serate e senza tags)\n'''\n@corsi_blueprint.route(\"/create\", methods=[\"GET\", \"POST\"])\n@login_required\ndef create():\n\n form = CorsiForm()\n\n if form.validate_on_submit():\n\n name = form.name.data\n teacher = form.teacher.data\n level = form.level.data\n description = form.description.data\n\n n_course = Corso(name, teacher, level, description)\n db.session.add(n_course)\n\n form.name.data = \"\"\n form.teacher.data = \"\"\n form.level.data = \"\"\n form.description.data = \"\"\n \n try:\n db.session.commit()\n flash('Corso creato correttamente', 'success')\n return redirect(url_for('corsi.lista'))\n except Exception as e:\n db.session.rollback()\n flash(\"Errore durante la creazione del corso: %s\" % str(e), 'danger')\n\n return render_template(\"corsi_create.html\", form=form)\n\n\n'''\nVisualizzazione di un corso (con gestione serate e tags (TODO))\n'''\n@corsi_blueprint.route(\"/\", methods=('GET', 'POST'))\ndef dettaglio_corso(corso_id):\n \n # Gestione aggiunta serate\n form = SerataForm()\n if form.validate_on_submit():\n\n data = form.data.data #date (not datetime!) object\n txt_time = form.txt_time.data #string formato HH:MM\n if not txt_time:\n txt_time = \"00:00\"\n # Converto in oggetto datetime.time per combinarlo con la data\n # in fase di creazione oggetto Serata\n data_time = datetime.datetime.strptime(txt_time, '%H:%M').time()\n nome = form.nome.data\n descrizione = form.descrizione.data\n link_partecipazione = form.link_partecipazione.data\n link_registrazione = form.link_registrazione.data\n\n nuova_serata = Serata(\n nome, \n descrizione, \n datetime.datetime.combine(data,data_time), # Combino data con ore-minuti\n link_partecipazione,\n link_registrazione)\n nuova_serata.corso_id = corso_id\n # Reset dei campi della form\n form.data.data = \"\"\n form.txt_time.data = \"\"\n form.nome.data = \"\"\n form.descrizione.data = \"\"\n form.link_partecipazione.data = \"\"\n form.link_registrazione.data = \"\"\n\n db.session.add(nuova_serata)\n try:\n db.session.commit()\n flash('Inserimento avvenuto con successo.', 'success')\n except Exception as e:\n flash(\"Errore durante l'inserimento della serata: %s\" % str(e), 'error')\n db.session.rollback()\n \n corso = Corso.query.get_or_404(corso_id)\n return render_template('corsi_dettaglio.html', corso=corso, form=form)\n\n\n'''\nCancellazione di un corso\n'''\n@corsi_blueprint.route(\"/delete/\", methods=('GET', 'POST'))\n@login_required\ndef corso_delete(id):\n '''\n Delete corso\n '''\n my_course = Corso.query.filter_by(id=id).first()\n db.session.delete(my_course)\n try:\n db.session.commit()\n flash('Cancellazione avvenuta con successo.', 'success')\n except Exception as e:\n db.session.rollback()\n flash(\"Errore durante la cancellazione del corso: %s\" % str(e), 'danger')\n return redirect(url_for('corsi.lista'))","sub_path":"Flask/Lezione7/project/corsi/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":4141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"447827307","text":"import pandas as pd\r\nimport numpy as np\r\n\r\nfrom .Checker import Checker\r\nfrom .FormatExcelWriter import FormatExcelWriter\r\nfrom .funcs import create_pred_df\r\n\r\nfrom ..utils import calculate_time_execute\r\n\r\n\r\n# PSI Calculation class\r\nclass PSIVariablesChecker(Checker):\r\n \"\"\"\r\n Класс реализации проверки population stability index\r\n по переменным используемым в модели и бинам прогнозов.\r\n\r\n Parameters:\r\n ----------\r\n writer: pd.ExcelWriter\r\n Объект класса excel-writer для записи отчета (файл для отчета должен\r\n быть создан предварительно)\r\n\r\n model_name: str\r\n Имя модели для отображения в названи файлов\r\n\r\n model\r\n Объект scikit-learn like обученной модели\r\n\r\n features_list:list\r\n Список фичей, которые использует модель\r\n\r\n cat_features: list\r\n Список категориальных признаков\r\n\r\n drop_features: list\r\n Список мусорных признаков для исключения из анализа\r\n\r\n current_path: str\r\n Путь к рабочей директории для сохранения изображений и файла с отчетом\r\n \"\"\"\r\n\r\n def __init__(self,\r\n writer: pd.ExcelWriter,\r\n model_name: str,\r\n model,\r\n features_list=list,\r\n cat_features: list = None,\r\n drop_features: list = None,\r\n model_type: str = \"binary_classification\"):\r\n\r\n self.writer = writer\r\n self.model = model\r\n self.features_list = features_list\r\n self.model_name = model_name\r\n self.cat_features = cat_features\r\n self.drop_features = drop_features\r\n self.model_type = model_type\r\n # Датафреймы для хранения результатов проверки \r\n self.psi_short = pd.DataFrame()\r\n self.psi_detailed = pd.DataFrame()\r\n\r\n def _to_excel(self, df: pd.DataFrame, sheet_name: str, fmt=None) -> None:\r\n \"\"\"\r\n Функция записи датафрейма в excel файл на указанный лист и позицию\r\n\r\n Parameters:\r\n ----------\r\n df: pd.DataFrame\r\n Датафрейм для записи в файл\r\n sheet_name: str\r\n Имя листа, на который осуществить запись\r\n plot: bool\r\n Флаг необходимости добавить на страницу с отчетом график из файла\r\n \"\"\"\r\n bold_row = {\"bold\": {\r\n True: df.index[df[\"feature\"] == \"y_pred\"]}\r\n }\r\n\r\n excelWriter = FormatExcelWriter(self.writer)\r\n excelWriter.write_data_frame(df, (0, 0), sheet_name, fmt,\r\n row_formats=bold_row)\r\n\r\n # apply conditional format to highlight validation_report test results\r\n for col in [\"PSI_train_vs_valid_events\",\r\n \"PSI_train_vs_valid_all\",\r\n \"PSI_train_vs_valid_nevents\",\r\n \"PSI_train_vs_OOT_all\",\r\n \"PSI_train_vs_OOT_events\",\r\n \"PSI_train_vs_OOT_nevents\",\r\n \"PSI_train_vs_test_all\",\r\n \"PSI_train_vs_test_events\",\r\n \"PSI_train_vs_test_nevents\",\r\n \"PSI_train_vs_valid\",\r\n \"PSI_train_vs_OOT\",\r\n \"PSI_train_vs_test\",\r\n \"PSI_train_vs_test2_all\",\r\n \"PSI_train_vs_OOT_psi\",\r\n \"PSI_train_vs_OOT_psi_all\",\r\n \"PSI_train_vs_OOT_psi_events\",\r\n \"PSI_train_vs_OOT_psi_nevents\",\r\n ]:\r\n if col in df.columns:\r\n excelWriter.set_col_cond_format(df, (0, 0), col, upper=0.2,\r\n lower=0.1, order=\"straight\")\r\n\r\n def _create_checklist(self, df_list: list):\r\n \"\"\"\r\n Проверка наличия датасетов в словаре и формирование списка для\r\n сравнений с train\r\n\r\n Parameters:\r\n -----------\r\n df_list: list\r\n Список датасетов\r\n\r\n Returns:\r\n list\r\n список датасетов по которым нужно произвести провверку\r\n -------\r\n \"\"\"\r\n check_list = []\r\n\r\n # Создать список для проверки \r\n if \"test\" in df_list:\r\n check_list.append(\"test\")\r\n elif \"valid\" in df_list:\r\n check_list.append(\"valid\")\r\n\r\n if \"OOT\" in df_list:\r\n check_list.append(\"OOT\")\r\n\r\n if \"OOT_psi\" in df_list:\r\n check_list.append(\"OOT_psi\")\r\n\r\n if \"test2\" in df_list:\r\n check_list.append(\"test2\")\r\n\r\n return check_list\r\n\r\n def create_df(self, x: pd.DataFrame,\r\n y: pd.Series, model, features:list) -> dict:\r\n \"\"\"\r\n Функция создает на выходе 3 датафрема: полный, events, no events\r\n\r\n Parameters:\r\n -----------\r\n x:pd.DataFrame\r\n Датафрейм с признаками\r\n y:pd.Series\r\n Истинные значения целевой переменной\r\n model\r\n sklearn-like модель, после применения метода fit\r\n features:list\r\n список переменных используемых в модели\r\n\r\n Returns:\r\n --------\r\n dict\r\n словарь с разбитыми датасетами, ключи:\r\n \"all\", \"events\", \"nevents\"\r\n \"\"\"\r\n y_pred = create_pred_df(model_info=(self.model,\r\n self.features_list),\r\n X=x, y=y)[\"y_pred\"]\r\n if self.model_type == \"binary_classification\":\r\n full_df = pd.concat([x[features], y_pred, y], axis=1)\r\n events_df = full_df[full_df[y.name] == 1]\r\n nevents_df = full_df[full_df[y.name] == 0]\r\n res = {\"all\": full_df,\r\n \"events\": events_df,\r\n \"nevents\": nevents_df}\r\n\r\n elif self.model_type == \"regression\":\r\n full_df = pd.concat([x[features], y_pred, y], axis=1)\r\n res = {\"all\": full_df}\r\n\r\n return res\r\n\r\n def cut_buckets_groups(self, df: pd.DataFrame, df_name: str, perc: dict)\\\r\n -> pd.DataFrame:\r\n \"\"\"\r\n Разбиение всех столбцов в pd.DataFrame по укзаанными в dict пороговым\r\n значениям. Группировка и подсчет доли наблюдений в каждом интервале.\r\n\r\n Parameters:\r\n -----------\r\n df: pd.DataFrame\r\n датафрейм с исходными значениями переменных\r\n\r\n df_name: str\r\n имя набора данных (train, valid, test, oot)\r\n\r\n perc: dict\r\n словарь с необходимыми пороговыми значениями для разбиения\r\n по каждоый фиче вида:\r\n {<название фичи>: [<список пороговых значений>]}\r\n\r\n Returns:\r\n --------\r\n pd.DataFrame\r\n Датафрейм с группировкой фичей по бакетами и долей-количеством\r\n наблюдений в каждом бакете\r\n \"\"\"\r\n\r\n # выходной датафрейм\r\n out_stats = pd.DataFrame()\r\n for col in df.columns:\r\n # для ��ранения порогов разбиения на перцентили\r\n missings = df[df.isna()[col]][col]\r\n\r\n # выделить отдельно пропуски\r\n missing_cnt = np.nan if len(missings) == 0 else len(missings)\r\n missing_stats = pd.DataFrame({\"feature\": [col],\r\n \"bucket\": [\"MISSING\"],\r\n f\"obs_count_{df_name}\":[missing_cnt],\r\n })\r\n\r\n # разбить на бакеты остальные значения\r\n values = df[df[col].notna()][col]\r\n if col not in perc.keys():\r\n perc[col] = np.unique([np.percentile(values\r\n , interpolation=\"lower\"\r\n , q=q) for q in\r\n np.arange(0, 101, 10)])\r\n buckets = pd.cut(x=values,\r\n bins=perc[col],\r\n duplicates=\"drop\",\r\n include_lowest=True,\r\n labels=False).rename(\"bucket\")\r\n\r\n # Склеить номера бакетов со значениями наблюдений\r\n buckets_group = pd.concat([values, buckets], axis=1)\r\n\r\n # min-max статистики только для train выборки\r\n if df_name == \"train\":\r\n buckets_group = buckets_group.groupby(\"bucket\").agg(\r\n [\"min\", \"max\", \"count\"])[col]\\\r\n .rename(columns={\"min\": \"min_value\",\r\n \"max\": \"max_value\",\r\n \"count\": f\"obs_count_{df_name}\"})\r\n else:\r\n buckets_group = buckets_group.groupby(\"bucket\").agg(\r\n [\"count\"])[col].rename(\r\n columns={\"count\": f\"obs_count_{df_name}\"})\r\n\r\n # buckets_group.columns = buckets_group.columns.droplevel()\r\n buckets_group = buckets_group.reset_index()\\\r\n .rename(columns={\"index\": \"bucket\"})\r\n buckets_group[\"feature\"] = col\r\n\r\n # Добавить пропуски по фиче\r\n if len(missings)>0:\r\n buckets_group = buckets_group.append(missing_stats,\r\n ignore_index=True)\r\n\r\n # Доля наблюдений в каждой выборке\r\n buckets_group[f\"obs_share_{df_name}\"] = \\\r\n buckets_group[f\"obs_count_{df_name}\"] \\\r\n / buckets_group[f\"obs_count_{df_name}\"].sum()\r\n\r\n # Добавить всю статистику по фиче в финальный датасет\r\n out_stats = out_stats.append(buckets_group, ignore_index=True)\r\n return out_stats\r\n\r\n def calc_buckets_categories(self, df: pd.DataFrame, df_name: str)\\\r\n -> pd.DataFrame:\r\n \"\"\"\r\n Группировка признаков по уникальным значениям\r\n и подсчет доли-количества наблюдений в каждом интервале.\r\n\r\n Parameters:\r\n -----------\r\n df: pd.DataFrame\r\n датафрейм с исходными значениями переменных\r\n\r\n df_name: str\r\n имя набора данных (train, valid, test, oot)\r\n\r\n Returns:\r\n --------\r\n pd.DataFrame\r\n Датафрейм с группировкой фичей по значениям и долей-количеством\r\n наблюдений в каждом бакете\r\n \"\"\"\r\n\r\n out_stats = pd.DataFrame()\r\n\r\n for col in df.columns:\r\n missings = df[df.isna()[col]][col]\r\n\r\n # выделить отдельно пропуски\r\n missing_cnt = np.nan if len(missings) == 0 else len(missings)\r\n missing_stats = pd.DataFrame({\"feature\": [col],\r\n \"bucket\": [\"MISSING\"],\r\n f\"obs_count_{df_name}\": [missing_cnt]\r\n })\r\n # категории без пропусков \r\n values = df[df.notna()[col]][col]\r\n\r\n if df_name == \"train\":\r\n groups = values.groupby(by=values)\\\r\n .agg([\"min\", \"max\", \"count\"])\\\r\n .rename(columns={\"min\": \"min_value\",\r\n \"max\": \"max_value\",\r\n \"count\": f\"obs_count_{df_name}\"})\r\n else:\r\n groups = values.groupby(by=values)\\\r\n .agg([\"count\"])\\\r\n .rename(columns={\"count\": f\"obs_count_{df_name}\"})\r\n\r\n groups = groups.reset_index().rename(columns={col: \"bucket\"})\r\n groups[\"feature\"] = col\r\n\r\n # Добавить пропуски\r\n if len(missings) is not None:\r\n groups = groups.append(missing_stats, ignore_index=True)\r\n\r\n # Посчитать доли\r\n groups[f\"obs_share_{df_name}\"] = groups[f\"obs_count_{df_name}\"] \\\r\n / groups[f\"obs_count_{df_name}\"]\\\r\n .sum()\r\n\r\n out_stats = out_stats.append(groups, ignore_index=True)\r\n return out_stats\r\n\r\n def calc_psi_pair(self,\r\n base_df: pd.DataFrame,\r\n base_df_name: str,\r\n diff_df: pd.DataFrame,\r\n diff_df_name: str,\r\n features_type: str = \"numeric\") -> pd.DataFrame:\r\n \"\"\"\r\n Расчет PSI по паре датафреймов: base_df vs diff_df для конкретных\r\n наборов признаков \"numeric\" или \"categorical\"\r\n\r\n Parameters:\r\n -----------\r\n base_df: pd.DataFrame\r\n Датафрейм относительно которого будет считаться PSI\r\n\r\n base_df_name: str\r\n Имя основного датасета\r\n\r\n diff_df: pd.DataFrame\r\n Датафрейм на котором будет считаться PSI\r\n\r\n diff_df_name: str\r\n Имя датасета для расчетаPSI\r\n\r\n features_type: str\r\n Тип признаков в датасете (numeric/categorical)\r\n\r\n Returns:\r\n --------\r\n pd.DataFrame\r\n Датафрейм со значениями PSI на паре наборов данных по\r\n каждой переменной\r\n \"\"\"\r\n if features_type == \"numeric\":\r\n perc = {}\r\n base_stats = self.cut_buckets_groups(base_df,\r\n base_df_name, perc=perc)\r\n diff_stats = self.cut_buckets_groups(diff_df,\r\n diff_df_name, perc=perc)\r\n elif features_type == \"categorical\":\r\n base_stats = self.calc_buckets_categories(base_df, base_df_name)\r\n diff_stats = self.calc_buckets_categories(diff_df, diff_df_name)\r\n\r\n all_stats = pd.concat([base_stats.set_index([\"feature\", \"bucket\"]),\r\n diff_stats.set_index([\"feature\", \"bucket\"])],\r\n axis=1,\r\n join='outer')\r\n\r\n # Заполнить нулями cnt для вновь возникших категорий и\r\n # оч. маленьким числом share\r\n all_stats[f\"obs_share_{base_df_name}\"] = \\\r\n all_stats[f\"obs_share_{base_df_name}\"].fillna(0.001)\r\n all_stats[f\"obs_share_{diff_df_name}\"] = \\\r\n all_stats[f\"obs_share_{diff_df_name}\"].fillna(0.001)\r\n\r\n all_stats[f\"obs_count_{base_df_name}\"] = \\\r\n all_stats[f\"obs_count_{base_df_name}\"].fillna(0)\r\n all_stats[f\"obs_count_{diff_df_name}\"] = \\\r\n all_stats[f\"obs_count_{diff_df_name}\"].fillna(0)\r\n\r\n def _psi(base: pd.Series, diff: pd.Series):\r\n return (diff - base) * np.log(diff / base)\r\n\r\n all_stats[f\"PSI_{base_df_name}_vs_{diff_df_name}\"] =\\\r\n _psi(all_stats[f\"obs_share_{base_df_name}\"],\r\n all_stats[f\"obs_share_{diff_df_name}\"])\r\n\r\n return all_stats\r\n\r\n def calc_psi(self,\r\n base_df: pd.DataFrame,\r\n base_df_name: str,\r\n diff_df: pd.DataFrame,\r\n diff_df_name: str) -> pd.DataFrame:\r\n \"\"\"\r\n Общая функция расчета PSI на всем наборе признаков двух наборов данных.\r\n Управляет запуском расчета PSI для categorical и numeric признаков, а\r\n также объединением результатов в один итоговый датафрейм\r\n\r\n Parameters:\r\n -----------\r\n base_df: pd.DataFrame\r\n Датафрейм относительно которого будет считаться PSI\r\n\r\n base_df_name: str\r\n Имя основного датасета\r\n\r\n diff_df: pd.DataFrame\r\n Датафрейм на котором будет считаться PSI\r\n\r\n diff_df_name: str\r\n Имя датасета для расчетаPSI\r\n\r\n Returns:\r\n --------\r\n pd.DataFrame:\r\n Итоговый датафрейм с PSI между двумя наборами данных на всех\r\n переменных\r\n\r\n \"\"\"\r\n \r\n if self.cat_features is not None:\r\n numeric = set(self.features_list) - set(self.cat_features)\r\n categoric = set(self.features_list)\\\r\n .intersection(set(self.cat_features))\r\n else:\r\n numeric = set(self.features_list)\r\n categoric = set([])\r\n \r\n numeric.add(\"y_pred\")\r\n \r\n\r\n if len(numeric) > 0:\r\n num_stats_psi = self.calc_psi_pair(base_df[numeric]\r\n , base_df_name\r\n , diff_df[numeric]\r\n , diff_df_name\r\n , features_type=\"numeric\")\r\n total_psi = num_stats_psi.reset_index()\r\n \r\n if len(categoric) > 0:\r\n cat_stats_psi = self.calc_psi_pair(base_df[categoric]\r\n , base_df_name\r\n , diff_df[categoric]\r\n , diff_df_name\r\n , features_type=\"categorical\")\r\n\r\n total_psi = pd.concat([num_stats_psi.reset_index()\r\n , cat_stats_psi.reset_index()]\r\n , axis=0)\r\n\r\n return total_psi\r\n\r\n @calculate_time_execute\r\n def validate(self, **kwargs):\r\n \"\"\"\r\n Функция инициации расчетов PSI между наборами данных в словаре dict\r\n Управляет расчетами PSI между парами наборов, объединяет результат в\r\n итоговый датафрем.\r\n Записывает результаты проверки PSI и детали расчетов в excel книгу.\r\n\r\n Parameters:\r\n -----------\r\n **kwargs: Dict[str, Tuple(pd.DataFrame, pd.Series)]\r\n Словарь, где ключ - название датасета, значение -\r\n кортеж из (X, y), X - матрица признаков,\r\n y - вектор истинных ответов.\r\n \"\"\"\r\n print(\"Calculating PSI...\")\r\n # составить чек-лист датафреймов для сравнения с train\r\n checklist = self._create_checklist(kwargs.keys())\r\n\r\n # считать train\r\n X_train, y_train = kwargs.get(\"train\", (None, None))\r\n # нарезать train на event, nevents, all\r\n train_df = self.create_df(X_train, y_train, self.model,\r\n self.features_list)\r\n\r\n # Для каждого датафрейма из чек-листа\r\n for df_name in checklist:\r\n\r\n X_diff, y_diff = kwargs.get(df_name, (None, None))\r\n # нарезать датафрейм на events, nevents, all\r\n diff_df = self.create_df(X_diff, y_diff, self.model,\r\n self.features_list)\r\n\r\n _psi_df = pd.DataFrame()\r\n # для каждой части events, nevents,all посчитать статистики\r\n for df_part in train_df.keys(): #[\"all\", \"events\", \"nevents\"]:\r\n _psi = self.calc_psi(train_df[df_part], \"train\",\r\n diff_df[df_part], df_name)\r\n _psi[\"data_part\"] = df_part\r\n _psi = _psi.set_index([\"feature\", \"bucket\", \"data_part\"])\r\n\r\n # добавить столбец в датафрейм с итоговым PSI\r\n _psi_grouped = \\\r\n _psi.groupby(\"feature\")[f\"PSI_train_vs_{df_name}\"] \\\r\n .sum().rename(f\"PSI_train_vs_{df_name}_{df_part}\")\r\n self.psi_short = pd.concat([self.psi_short,\r\n _psi_grouped],\r\n axis=1,\r\n sort=True,\r\n join=\"outer\")\r\n\r\n _psi_df = _psi_df.append(_psi)\r\n\r\n new_cols = _psi_df.columns.difference(self.psi_detailed.columns)\r\n self.psi_detailed = pd.concat([self.psi_detailed,\r\n _psi_df[new_cols]],\r\n axis=1,\r\n join=\"outer\")\r\n\r\n self.psi_detailed = self.psi_detailed.reset_index()\r\n self.psi_short = self.psi_short.reset_index()\r\n self.psi_short.rename(columns={\"index\": \"feature\"}, inplace=True)\r\n\r\n # Записать результат в excel файл\r\n # формат\r\n int_number = \"## ##0\"\r\n float_number_high = \"## ##0.00\"\r\n float_number_low = \"## ##0.0000\"\r\n int_percentage = \"0%\"\r\n float_percentage_high = \"0.00%\"\r\n float_percentage_low = \"0.0000%\"\r\n\r\n # Кастомный формат для таблицы\r\n fmt = {\"num_format\": {\r\n int_number: [\"obs_count_train\",\r\n \"obs_count_valid\",\r\n \"obs_count_OOT\",\r\n \"obs_count_test\",\r\n \"obs_count_OOT_psi\"]\r\n , float_percentage_low: [\"obs_share_train\",\r\n \"obs_share_valid\",\r\n \"obs_share_OOT\",\r\n \"obs_share_test\",\r\n \"obs_share_OOT_psi\"]\r\n , float_number_low: [\"PSI_train_vs_valid\",\r\n \"PSI_train_vs_test\",\r\n \"PSI_train_vs_OOT\",\r\n \"PSI_train_vs_OOT_psi\",\r\n \"PSI_train_vs_valid_all\",\r\n \"PSI_train_vs_test_all\",\r\n \"PSI_train_vs_OOT_all\",\r\n \"PSI_train_vs_OOT_psi_all\",\r\n \"PSI_train_vs_valid_events\",\r\n \"PSI_train_vs_test_events\",\r\n \"PSI_train_vs_OOT_events\",\r\n \"PSI_train_vs_OOT_psi_events\",\r\n \"PSI_train_vs_valid_nevents\",\r\n \"PSI_train_vs_test_nevents\",\r\n \"PSI_train_vs_OOT_nevents\",\r\n \"PSI_train_vs_OOT_psi_nevents\"]\r\n }\r\n }\r\n self._to_excel(self.psi_detailed, sheet_name=\"PSI detailed\", fmt=fmt)\r\n self._to_excel(self.psi_short, sheet_name=\"PSI\", fmt=fmt)\r\n","sub_path":"drafts/dspl/validation_report/.ipynb_checkpoints/PSIVariablesChecker-checkpoint.py","file_name":"PSIVariablesChecker-checkpoint.py","file_ext":"py","file_size_in_byte":24832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"561773229","text":"# /** 272 Climbing Stairs II\n\n\nclass Solution:\n \"\"\"\n @param {int} n a integer\n @return {int} a integer\n \"\"\"\n def climbStairs2(self, n):\n # write your code here\n if n <= 1:\n return 1\n\n if n == 2:\n return 2\n\n a, b, c = 1, 1, 2\n for i in xrange(3, n + 1):\n a, b, c = b, c, a + b + c\n\n return c","sub_path":"J9Ch/src/J_9_DP/Optional_5/python_cpp/_272_Climbing_Stairs_II_Easy.py","file_name":"_272_Climbing_Stairs_II_Easy.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"308914760","text":"from . import hashframe\n\nSHA_HashFrame = hashframe.SHA_HashFrame\nclass SHA384(SHA_HashFrame):\n \"\"\"\n Implements the SHA-384 Algorithm. SHA-384 only differs\n from SHA-512 in two respects. First SHA-384 uses a different\n set of seed values of H, which are predefined in the official\n specification. Second, SHA-384 computes the hash in the same way,\n using 8 state variables, but truncates the output at the end\n to 384 bits.\n \"\"\"\n\n def __init__(self, verbose=1):\n self.verbose = verbose\n\n # SHA-384 uses 1024-bit blocks with 64-bit (long) word sizes\n self.block_size = 1024\n self.word_size = 64\n \n # SHA-384 constants. These are arbitrary in the sense that they are\n # pre-defined in the official specification for the algorithm but \n # otherwise have no other significant mathematical meaning. These are needed\n # to generate the T1 values in the main hash computation\n constants = \"\"\"\n 428a2f98d728ae22 7137449123ef65cd b5c0fbcfec4d3b2f e9b5dba58189dbbc\n 3956c25bf348b538 59f111f1b605d019 923f82a4af194f9b ab1c5ed5da6d8118\n d807aa98a3030242 12835b0145706fbe 243185be4ee4b28c 550c7dc3d5ffb4e2\n 72be5d74f27b896f 80deb1fe3b1696b1 9bdc06a725c71235 c19bf174cf692694\n e49b69c19ef14ad2 efbe4786384f25e3 0fc19dc68b8cd5b5 240ca1cc77ac9c65\n 2de92c6f592b0275 4a7484aa6ea6e483 5cb0a9dcbd41fbd4 76f988da831153b5\n 983e5152ee66dfab a831c66d2db43210 b00327c898fb213f bf597fc7beef0ee4\n c6e00bf33da88fc2 d5a79147930aa725 06ca6351e003826f 142929670a0e6e70\n 27b70a8546d22ffc 2e1b21385c26c926 4d2c6dfc5ac42aed 53380d139d95b3df\n 650a73548baf63de 766a0abb3c77b2a8 81c2c92e47edaee6 92722c851482353b\n a2bfe8a14cf10364 a81a664bbc423001 c24b8b70d0f89791 c76c51a30654be30\n d192e819d6ef5218 d69906245565a910 f40e35855771202a 106aa07032bbd1b8\n 19a4c116b8d2d0c8 1e376c085141ab53 2748774cdf8eeb99 34b0bcb5e19b48a8\n 391c0cb3c5c95a63 4ed8aa4ae3418acb 5b9cca4f7763e373 682e6ff3d6b2b8a3\n 748f82ee5defb2fc 78a5636f43172f60 84c87814a1f0ab72 8cc702081a6439ec\n 90befffa23631e28 a4506cebde82bde9 bef9a3f7b2c67915 c67178f2e372532b\n ca273eceea26619c d186b8c721c0c207 eada7dd6cde0eb1e f57d4f7fee6ed178\n 06f067aa72176fba 0a637dc5a2c898a6 113f9804bef90dae 1b710b35131c471b\n 28db77f523047d84 32caab7b40c72493 3c9ebe0a15c9bebc 431d67c49c100d4c\n 4cc5d4becb3e42b6 597f299cfc657e2a 5fcb6fab3ad6faec 6c44198c4a475817\n \"\"\"\n\n K = ['0x' + item for item in constants.split()]\n K = [int(item, 0) for item in K]\n self.K = K\n\n # Initial state variables for SHA-384. Like the constants, these are\n # pre-defined values in the official specification needed to seed the\n # hash values. These are generated from the first 64 bits of the fractional \n # parts of the square roots of the 9th - 16th prime numbers. Note that\n # these are different from the seed values for SHA-512, which use the\n # first 8 prime numbers.\n H_init = [\"0xcbbb9d5dc1059ed8\", \"0x629a292a367cd507\", \"0x9159015a3070dd17\", \"0x152fecd8f70e5939\", \"0x67332667ffc00b31\", \"0x8eb44a8768581511\", \"0xdb0c2e0d64f98fa7\", \"0x47b5481dbefa4fa4\"]\n self.H0 = [int(item, 0) for item in H_init]\n self.H = [int(item, 0) for item in H_init]\n return\n\n\n def __preprocess__(self, message):\n \"\"\"\n Preprocesses the message by paddding it as appropriate to make the total length\n a multiple of 1024 bits and then splitting it into 1024-bit blocks.\n \"\"\"\n verbose = self.verbose\n\n if (verbose > 1):\n print('[SHA-384] Beginning Preprocessing')\n\n padded_message_bytes = b''\n if type(message) == bytes:\n nbits = len(message) * 8\n\n if (verbose > 1):\n print('[SHA-384] Message Length: %d bits'%(nbits))\n\n # SHA-384 requires the word blocks to be exactly 1024 bits long,\n # in addition to having the message length encoded at the end\n # of the message using 128 bits. Thus, we add 128 to the required\n # bit count for the message length, then round up to 1024 bytes,\n # then pad the zeros and the '1' bit\n num_zeros = 896 - nbits - 1\n while num_zeros > self.block_size: num_zeros -= self.block_size\n while num_zeros < 0: num_zeros += self.block_size\n\n # The number of zeros to pad with is the smallest nonnegative solution\n # to l + 1 + k ≡ 896 mod 1024, with l = nbits, the total number of bits in\n # the unpadded message.\n\n if (verbose > 1):\n print(\"[SHA-384] Adding a single '1' bit\")\n\n # The 896 comes from the fact that the last 128 bits in the last\n # padded block are reserved to hold the total length of the message.\n # Thus the maximum. (896 + 128 = 1024). Thus, the maximum message size\n # that can be hashed with SHA-384 is 2^128 - 1 bits. Note that after\n # the end of the message, we always add a single '1' bit, which is\n # NOT included in the final 128 bits. The number of zeros is selected\n # such that the last fully padded block is 1024 bytes long, of which the\n # last 128 are reserved.\n\n byte_array = list(message)\n byte_array.append(1 << 7)\n\n if (verbose > 1): print(\"[SHA-384] Padding %d Zeros\"%(num_zeros))\n for _ in range(int((num_zeros - 7) / 8)): byte_array.append(0)\n for item in list(nbits.to_bytes(16, 'big')): byte_array.append(item)\n padded_message_bytes = bytes(byte_array)\n\n\n\n nbits = len(padded_message_bytes) * 8\n blocks = []\n nblocks = int(nbits/self.block_size)\n\n if (verbose > 1):\n print('[SHA-384] New Input Length: %d bits'%(8 * len(list(byte_array))))\n print('[SHA-384] Number of %d-bit Blocks: %d'%(self.block_size, nblocks))\n\n # Splits the padded message into 512-bit blocks\n for i in range(nblocks):\n start = int(i * self.block_size / 8)\n end = int((i + 1) * self.block_size / 8)\n blocks.append( padded_message_bytes[start : end])\n\n if (verbose > 1): print('[SHA-384] Preprocessing Complete')\n return blocks\n\n\n def __hash__(self, blocks):\n \"\"\"\n The main hash routine. Accepts the blocks generated from the preprocessing\n routing and computes the SHA-384 hash.\n \"\"\"\n verbose = self.verbose\n N = len(blocks)\n\n if (verbose > 1):\n print('[SHA-384] Initializing State Variables H0-H7')\n print('[SHA-384] H[%2d] = %10s %10s %10s %10s %10s %10s %10s %10s'%(\n 0, '0x' + self.H[0].to_bytes(8, 'big').hex(), '0x' + self.H[1].to_bytes(8, 'big').hex(), \n '0x' + self.H[2].to_bytes(8, 'big').hex(), '0x' + self.H[3].to_bytes(8, 'big').hex(), \n '0x' + self.H[4].to_bytes(8, 'big').hex(), '0x' + self.H[5].to_bytes(8, 'big').hex(), \n '0x' + self.H[6].to_bytes(8, 'big').hex(), '0x' + self.H[7].to_bytes(8, 'big').hex()\n ))\n\n # The algorithm must go through every block, so that a change in any bit\n # changes the hash function output.\n for i in range(N):\n \n if (verbose > 2):\n print('[SHA-384] Iterating through Block %d'%(i))\n\n # Parse the current block\n block = blocks[i]\n\n\n if (verbose > 3):\n print('[SHA-384] Preparing Message Schedule')\n\n W = []\n\n # Prepare the message schedule W. The message schedule for SHA-384 consists \n # of 64 32-bit integers. The first 16 integers are generated from the block\n # itself, since the block is exactly 512 bits (32 x 16 = 512). Note that this\n # results in a different schedule for each block.\n\n for j in range(0, 16, 1):\n start = int(self.word_size * j / 8)\n end = int(self.word_size * (j + 1) / 8)\n word = block[start : end]\n word = int.from_bytes(word, byteorder='big')\n W.append(word)\n\n if (verbose > 2):\n print('[SHA-384] W[%2d]=%10s'%(j, '0x' + word.to_bytes(8, 'big').hex()))\n\n # The last 64 integers in the message schedule are generated iteratively\n # from the first 16. For each new member j of W, it adds W[j-7], W[j-16],\n # and applies two custom functions sigma0 and sigma1 to W[j-15] and W[j-2]. The\n # specific definitions of these are located in the official specification and\n # reproduced below \n\n for j in range(16, 80, 1):\n part1 = self.__sigma1__(W[j-2])\n part2 = self.__sigma0__(W[j-15])\n part3 = W[j-16]\n part4 = W[j-7]\n\n sum = self.__bitwise_add__(part1, part2)\n sum = self.__bitwise_add__(sum, part3)\n sum = self.__bitwise_add__(sum, part4)\n W.append(sum)\n\n if (verbose > 2):\n print('[SHA-384] W[%2d]=%10s \\\n <- σ0(W[%2d]) + σ1(W[%2d]) + W[%2d] + W[%2d]' \\\n %(j, '0x' + sum.to_bytes(8, 'big').hex(), j-15, j-2, j-7, j-16))\n \n if (verbose > 2):\n print('[SHA-384] Finished Preparing Message Schedule')\n print('[SHA-384] Initializing Local Working Variables')\n\n # Initialize local state variables\n a = self.H[0]\n b = self.H[1]\n c = self.H[2]\n d = self.H[3]\n e = self.H[4]\n f = self.H[5]\n g = self.H[6]\n h = self.H[7]\n\n if (verbose > 3):\n print('[SHA-384] a=%10s b=%10s c=%10s d=%10s e=%10s f=%10s g=%10s h=%10s'%(\n '0x' + a.to_bytes(8, 'big').hex(), '0x' + b.to_bytes(8, 'big').hex(), \n '0x' + c.to_bytes(8, 'big').hex(), '0x' + d.to_bytes(8, 'big').hex(), \n '0x' + e.to_bytes(8, 'big').hex(), '0x' + f.to_bytes(8, 'big').hex(), \n '0x' + g.to_bytes(8, 'big').hex(), '0x' + h.to_bytes(8, 'big').hex()\n ))\n\n # At the current iteration, the SHA-384 state variables H0-H7 are read and stored with\n # 8 working variables. Within each block iteration, we iterate through the schedule\n # variables (which are different for each block). Note that in this section, we always\n # use the bitwise addition function.\n\n for t in range(80):\n\n # The variables T1 and T2 are computed first. The computation is documented in\n # the official specification. Observe that T1 uses both the t-th schedule\n # variable and the $t-th constant. The Ch function is a choice function. It uses\n # one word, and at each location picks the value from one of the other two words\n # depending on whether the first word has a '1' or '0'.\n T1 = 0\n T1 = self.__bitwise_add__(T1, self.__Sigma1__(e))\n T1 = self.__bitwise_add__(T1, self.__Ch__(e, f, g))\n T1 = self.__bitwise_add__(T1, self.K[t])\n T1 = self.__bitwise_add__(T1, W[t])\n T1 = self.__bitwise_add__(T1, h)\n\n # The Maj function takes 3 words and for each location returns the most\n # common bit. For example, if in the first bit positions of a, b, c, d,\n # a = 1, b = 0, and c = 1, the return value is 1 in that location, because\n # there are 2 '1's and only 1 '0'.\n T2 = self.__bitwise_add__(self.__Maj__(a, b, c), self.__Sigma0__(a))\n\n if (verbose > 4):\n print('[SHA-384] T1 = %10s <- Σ1(e) + Ch(e,f,g) + K[%2d] + W[%2d]'%('0x' + T1.to_bytes(8, 'big').hex(), t, t))\n print('[SHA-384] T2 = %10s <- Σ0(a) + Maj(a,b,c)'%('0x' + T2.to_bytes(8, 'big').hex()))\n\n\n # This effectively discards the last working variable, because\n # no other working variable is assigned the value of h. Also,\n # note that with a few exceptions, \n h = g\n g = f\n f = e\n \n e = self.__bitwise_add__(d, T1)\n \n d = c\n c = b\n b = a\n\n a = self.__bitwise_add__(T1, T2)\n\n if (verbose > 4):\n print('[SHA-384] h = %10s <- g'%('0x' + h.to_bytes(8, 'big').hex()))\n print('[SHA-384] g = %10s <- f'%('0x' + g.to_bytes(8, 'big').hex()))\n print('[SHA-384] f = %10s <- e'%('0x' + f.to_bytes(8, 'big').hex()))\n print('[SHA-384] e = %10s <- d + T1'%('0x' + e.to_bytes(8, 'big').hex()))\n print('[SHA-384] d = %10s <- c'%('0x' + d.to_bytes(8, 'big').hex()))\n print('[SHA-384] c = %10s <- b'%('0x' + c.to_bytes(8, 'big').hex()))\n print('[SHA-384] b = %10s <- a'%('0x' + b.to_bytes(8, 'big').hex()))\n print('[SHA-384] a = %10s <- T1 + T2'%('0x' + a.to_bytes(8, 'big').hex()))\n\n if (verbose > 3):\n print('[SHA-384] a=%10s b=%10s c=%10s d=%10s e=%10s f=%10s g=%10s h=%10s'%(\n '0x' + a.to_bytes(8, 'big').hex(), '0x' + b.to_bytes(8, 'big').hex(), \n '0x' + c.to_bytes(8, 'big').hex(), '0x' + d.to_bytes(8, 'big').hex(), \n '0x' + e.to_bytes(8, 'big').hex(), '0x' + f.to_bytes(8, 'big').hex(), \n '0x' + g.to_bytes(8, 'big').hex(), '0x' + h.to_bytes(8, 'big').hex()\n ))\n\n # Update the state variables for the next iteration.\n self.H[0] = self.__bitwise_add__(self.H[0], a)\n self.H[1] = self.__bitwise_add__(self.H[1], b)\n self.H[2] = self.__bitwise_add__(self.H[2], c)\n self.H[3] = self.__bitwise_add__(self.H[3], d)\n self.H[4] = self.__bitwise_add__(self.H[4], e)\n self.H[5] = self.__bitwise_add__(self.H[5], f)\n self.H[6] = self.__bitwise_add__(self.H[6], g)\n self.H[7] = self.__bitwise_add__(self.H[7], h)\n\n if (verbose > 1):\n print('[SHA-384] H[%2d] = %10s %10s %10s %10s %10s %10s %10s %10s'%(\n i+1, '0x' + self.H[0].to_bytes(8, 'big').hex(), '0x' + self.H[1].to_bytes(8, 'big').hex(), \n '0x' + self.H[2].to_bytes(8, 'big').hex(), '0x' + self.H[3].to_bytes(8, 'big').hex(), \n '0x' + self.H[4].to_bytes(8, 'big').hex(), '0x' + self.H[5].to_bytes(8, 'big').hex(), \n '0x' + self.H[6].to_bytes(8, 'big').hex(), '0x' + self.H[7].to_bytes(8, 'big').hex()\n ))\n\n # At the end of the computation, the output hash value is just self.H,\n # which we updated iteratively. Note that SHA-384 computes the hash using the same\n # process as SHA-512, but truncates the end result to 384 bits instead of 512\n # bits.\n output = [item.to_bytes(8, 'big').hex() for item in self.H][0:-2]\n self.H = self.H0\n hash_value = ''.join(output)\n\n if (verbose > 0):\n print('[SHA-384] Output Hash: %64s'%(hash_value))\n\n return hash_value\n\n # Define functions specifically needed for SHA-384 operations\n def __Ch__(self, x, y, z):\n return (x & y) ^ (~x & z)\n\n def __Maj__(self, x, y, z):\n return (x & y) ^ (y & z) ^ (x & z)\n \n def __Sigma0__(self, x):\n return self.__rot_right__(x, 28) ^ self.__rot_right__(x, 34) ^ self.__rot_right__(x, 39)\n\n def __Sigma1__(self, x):\n return self.__rot_right__(x, 14) ^ self.__rot_right__(x, 18) ^ self.__rot_right__(x, 41)\n\n def __sigma0__(self, x):\n return self.__rot_right__(x, 1) ^ self.__rot_right__(x, 8) ^ self.__right_shift__(x, 7)\n\n def __sigma1__(self, x):\n return self.__rot_right__(x, 19) ^ self.__rot_right__(x, 61) ^ self.__right_shift__(x, 6)","sub_path":"pySHA/sha384.py","file_name":"sha384.py","file_ext":"py","file_size_in_byte":16712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"164964281","text":"from pyDes import *\nfrom time import time\nfrom binascii import unhexlify as unhex\nimport os,random\n\n\ndef count_head(data):\n\tresult = bytes(10)\n\tresulet_bytes_array =bytearray(result)\n\tdata_bytes_array =bytes(str(data),'utf-8')\n\tresulet_bytes_array[0:len(data_bytes_array)]=data_bytes_array\n\treturn bytes(resulet_bytes_array)\n\ndef decrypt_file(file_path,key_16):\n\tt = des(unhex(key_16))\n\tsize = 1024*32\n\n\tif os.path.exists(file_path):\n\t\tobj_file = open(file_path[0:-4],'wb')#make partfile\n\t\tsrc_file = open(file_path,'rb')\n\t\t#read head\n\t\thead = src_file.read(10)\n\t\tmod_str= head.decode('utf-8').strip('\\0')\n\t\tfilesize=os.path.getsize(file_path)-int(mod_str)\n\t\t\n\t\tsizecount = 0\n\t\twhile True:\n\t\t\tchunkbuffer = src_file.read(size)\n\t\t\tsizecount=sizecount+size\n\n\t\t\tif len(chunkbuffer)==size:\n\t\t\t\t#encrpt\n\t\t\t\tdata=t.decrypt(chunkbuffer)\n\t\t\t\tpercent = (sizecount*100)/(filesize)\n\t\t\t\tprint('Process:'+str(percent)+'%');\n\t\t\t\t#write\n\t\t\t\tobj_file.write(data) #write data into partfile\n\t\t\telse:\n\t\t\t\tprint('last size = '+str(len(chunkbuffer)))\n\t\t\t\tobj_file.write(chunkbuffer) #write data into partfile\n\t\t\tif not chunkbuffer: #check the chunk is empty\n\t\t\t\tbreak\n\t\tobj_file.close()\n\ndef encrypt_file(file_path,key_16):\n\n\tt = des(unhex(key_16))\n\tsize = 1024*32\n\n\t#check whether todir exists or not\n\tif os.path.exists(file_path):\n\n\n\t\tobj_file = open(file_path+'.des','wb')#make partfile\n\t\t#open the fromfile\n\t\tsrc_file = open(file_path,'rb')\n\t\tfilesize=os.path.getsize(file_path)\n\t\tmod = int(filesize%size)\t#get mod\n\t\tnum = int(filesize/size)\n\n\t\tobj_file.write(count_head(mod)) \t\t# write mod in 10 bytes\n\n\t\tcount =0\n\t\twhile True:\n\t\t\tcount = count+1\n\t\t\tchunkbuffer = src_file.read(size)\n\t\t\tif len(chunkbuffer)==size:\n\t\t\t\t#encrpt\n\t\t\t\tdes3_t=t.encrypt(chunkbuffer)\n\t\t\t\tpercent = (count*100)/(num)\n\t\t\t\tprint('Process:'+str(percent)+'%');\n\t\t\t\t#write\n\t\t\t\tobj_file.write(des3_t) #write data into partfile\n\t\t\telse:\n\t\t\t\tprint('last size = '+str(len(chunkbuffer)))\n\t\t\t\tobj_file.write(chunkbuffer) #write data into partfile\n\t\t\tif not chunkbuffer: #check the chunk is empty\n\t\t\t\tbreak\n\t\tobj_file.close()\n\telse:\n\t\tprint(\"file not exists!\")\n\n\ndef create_keyfile():\n\tran = hex(random.randint(0,0XFFFFFFFF))\n\treturn ran\n\ndef encrypt_str(str,keyfile_path):\n\n\tf = open(keyfile_path,\"r\") #设置文件对象\n\tkey_16 = f.read() #将txt文件的所有内容读入到字符串str中\n\tf.close() #将文件关闭\n\n\tt = des(unhex(key_16))\n\tdes3_t=t.encrypt(chunkbuffer)\n\n\nif __name__ == '__main__':\n\t#_example_des_()\n\t#encrypt_file(\"test.pdf\",\"77661100DD223311\")\n\t#decrypt_file(\"test.pdf.des\",\"77661100DD223311\")\n\tprint(create_keyfile())\n\t#_filetest_()\n\t#_profile_()\n\n\tprint(encrypt_str(\"1234\",\"key.des16\"))\n","sub_path":"bonasv0.1/daemon/des/des.py","file_name":"des.py","file_ext":"py","file_size_in_byte":2728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"362630447","text":"from bs4 import BeautifulSoup\r\nimport urllib.request\r\nimport re\r\n\r\n## Takes 2 arguments:\r\n## Company name and Company number. Both as strings.\r\n## returns 2 string arrays:\r\n## arr_values an array of values related to given company in format:\r\n## 0: Cash on Hand. 1: Networth. 2: Asset value. 3: Liabilities value.\r\n## arr_changes an arrat of chnages in the above values since last year.\r\n## Null arrays mean there is no financial data avaliable. \r\n\r\ndef scrape_web_page(company_number, company_name):\r\n regex = re.compile(\"red|green\")\r\n \r\n required_page = (\"https://companycheck.co.uk/company/%s/%s/financials#key-financials\") %(company_number, company_name)\r\n page = urllib.request.urlopen(required_page)\r\n soup = BeautifulSoup(page, 'html.parser')\r\n value_box = soup.findAll('div', attrs={'class': 'Four-financial__figure'})\r\n change_dir_box = soup.findAll('section', attrs={'class': 'Four-financials'})\r\n changes_box = soup.findAll('div', attrs={'class': 'Four-financial__change'})\r\n \r\n arr_values = []\r\n arr_changes = []\r\n \r\n for entry in value_box:\r\n arr_values.append(entry.text.strip())\r\n for entry in changes_box:\r\n arr_changes.append(entry.text.strip())\r\n\r\n counter = 0\r\n for entry in regex.findall(str(change_dir_box)):\r\n if entry == \"red\":\r\n arr_changes[counter] = (\"-\" + arr_changes[counter])\r\n counter+=1\r\n \r\n return arr_values, arr_changes\r\n\r\n##scrape_web_page(\"03584121\", \"ASOSCOM-LIMITED\") ## example\r\n##scrape_web_page(\"11104388\", \"ABBOTT-INTERNATIONAL-PLC-LIMITED\")\r\n","sub_path":"src/finance_scraper.py","file_name":"finance_scraper.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"613733736","text":"from django.contrib.auth import login, logout\nfrom django.contrib.auth.models import User\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.template.response import TemplateResponse\nfrom django.views.generic import TemplateView, View\nfrom django.shortcuts import render\n\nfrom subscription.models import Character, Subscription\nfrom .forms import LoginForm, SignupForm, CharacterForm\n\n# Create your views here.\n\n\ndef login_page(request):\n if request.user.is_authenticated():\n return HttpResponseRedirect(reverse('home'))\n return TemplateResponse(request, 'login.html', {'login_form': LoginForm(), 'registration_form': SignupForm()})\n\n\nclass Home(TemplateView):\n template_name = 'home.html'\n\n def get_context_data(self, **kwargs):\n return {'characters': Character.objects.all()}\n\n\n# author sign-in\ndef login_in(request):\n \"\"\"\n Sign in api\n username -- username of a user\n password -- users password\n \"\"\"\n\n if request.method == 'POST':\n form = LoginForm(request.POST or None)\n if form.is_valid():\n user = form.login(request)\n if user is not None:\n login(request, user)\n return HttpResponseRedirect(reverse('home')) # Redirect to a success page.\n return TemplateResponse(request, 'login.html', {'login_form': form, 'registration_form': SignupForm()})\n\n return HttpResponseRedirect(reverse('home'))\n\n\ndef registration(request):\n \"\"\"\n Registration api\n username -- username of a user\n password1 -- users password\n password2 -- users password confirmation\n \"\"\"\n\n if request.method == 'POST':\n form = SignupForm(request.POST or None)\n if form.is_valid():\n user = User.objects.create_user(\n username=form.data['username'],\n password=form.data['password1']\n )\n message = 'Successfully registered now please login'\n else:\n message = form.errors\n return TemplateResponse(request, 'login.html', {'registration_form': form, 'login_form': LoginForm(),\n 'message': message})\n return HttpResponseRedirect(reverse('home'))\n\n\nclass CharacterSubscription(View):\n form_class = CharacterForm\n template_name = 'character_details.html'\n user = None\n\n def get_queryset(self, character_id):\n return Character.objects.get(id=character_id)\n\n def get(self, request, *args, **kwargs):\n obj = self.get_queryset(kwargs.get('character_id'))\n data = {'fields': []}\n subscription = obj.subscribers.filter(subscriber=request.user)\n if subscription.exists():\n data['fields'] = subscription[0].fields.split(',')\n\n form = self.form_class(initial=data)\n return render(request, self.template_name, {'form': form, 'character': obj})\n\n def post(self, request, *args, **kwargs):\n form = self.form_class(request.POST)\n if form.is_valid():\n obj = self.get_queryset(kwargs.get('character_id'))\n subscription = obj.subscribers.filter(subscriber=request.user)\n\n if subscription.exists():\n if form.data.getlist('fields'):\n subscription.update(fields=','.join(form.data.getlist('fields')))\n else:\n subscription.delete() # delete if no fields are subscribed\n else:\n obj.subscribers.create(subscriber=request.user, fields=','.join(form.data.getlist('fields')))\n\n return render(request, self.template_name, {'form': form, 'character': str(obj) + ' saved successfully.'})\n\n\ndef log_out(request):\n \"\"\"\n logs out a user(Author/Publisher)\n \"\"\"\n if request.method == 'POST':\n logout(request)\n return HttpResponseRedirect(reverse('login-page'))\n","sub_path":"web/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"478154609","text":"import first_follow as ff\r\nfrom lex_analyser import pt\r\n#from prettytable import PrettyTable\r\n\r\ndef ll_table():\r\n\r\n table = {}\r\n prod = ff.prod\r\n foll = ff.follows\r\n\r\n\r\n\r\n for n_term in prod:\r\n table[n_term] = {} #n_term -> rule\r\n for rule in prod[n_term] :\r\n f = ff.first_of_string(rule)\r\n for term in f:\r\n if term in table[n_term] :\r\n print(\"Not an LL(1) Grammar!!\")\r\n exit(0)\r\n\r\n if rule != \"epsilon\" and term != \"epsilon\":\r\n #print(n_term,term,rule)\r\n table[n_term][term] = rule\r\n\r\n\r\n\r\n\r\n\r\n for n_term in foll:\r\n if \"epsilon\" in prod[n_term]:\r\n for term in foll[n_term]:\r\n # print(table[n_term][term])\r\n if not table[n_term].get(term, None):\r\n table[n_term][term] = \"epsilon\"\r\n\r\n for n_term in foll:\r\n for term in foll[n_term]:\r\n entry = table[n_term].get(term,None)\r\n if entry is None:\r\n table[n_term][term]='sync'\r\n\r\n\r\n return table\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#print(ff.first)\r\n#print(ff.prod)\r\n\r\n'''\r\nforeach(A -> α in the grammar):\r\n write A -> α in T[A,b], ∀ b ∈ first(α);\r\n if ( ℇ ∈ first(α) ):\r\n write A -> α in T[A,x], ∀ x ∈ follow(A);\r\n'''\r\n\r\n\r\ntable = ll_table()\r\n\r\n\r\ncols = ['Non-Term']\r\nterm = ff.term\r\nterm.remove('epsilon')\r\nterm.append('$')\r\ncols.extend(term)\r\na = pt(cols)\r\n\r\nfor i in table:\r\n row = [i]\r\n for t in term:\r\n\r\n if t not in table[i]:\r\n row.append('')\r\n else:\r\n row.append(str(table[i][t]))\r\n\r\n a.add_row(row)\r\n\r\nprint(\"Parse Table :\")\r\nprint(a)\r\n\r\n\r\n","sub_path":"parse_table_err_handl.py","file_name":"parse_table_err_handl.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"502883695","text":"from minisom import MiniSom\nimport pandas as pd\nimport numpy as np\n\nbase = pd.read_csv('credit_data.csv')\nbase = base.dropna()\nbase.loc[base.age < 0, 'age'] = 40.92 #Trocamos idades negativas para a media de idade do Banco de dados\n\nX = base.iloc[:, 0:4].values\ny = base.iloc[:, 4].values\n\nfrom sklearn.preprocessing import MinMaxScaler\nnormalizador = MinMaxScaler(feature_range = (0,1))\nX = normalizador.fit_transform(X)\n\nsom = MiniSom(x = 15, y = 15, input_len = 4, random_seed = 0) #Deixaremos o learning_rate e sigma default sendo 0.5 e 0 respectivamente\nsom.random_weights_init(X)\nsom.train_random(data = X, num_iteration = 100)\n\nfrom pylab import pcolor, colorbar, plot\npcolor(som.distance_map().T) #Os amarelos podem ser outliers\ncolorbar()\n\nmarkers = ['o', 's']\ncolors = ['r', 'g']\n\nfor i, x in enumerate(X):\n w = som.winner(x)\n plot(w[0] + 0.5, w[1] + 0.5, markers[y[i]],\n markerfacecolor = 'None', markersize = 10,\n markeredgecolor = colors[y[i]], markeredgewidth = 2)\n\n# ==== Detectando Fraudes ====\n \nmapeamento = som.win_map(X)\nsuspeitos = np.concatenate((mapeamento[(13,9)], mapeamento[(1,10)]), axis = 0) #Pegamos dois quadradinhos amarelos como suspeitos, pode ser diferente dependendo do resultado\nsuspeitos = normalizador.inverse_transform(suspeitos)\n\nclasse = []\nfor i in range(len(base)):\n for j in range(len(suspeitos)):\n if base.iloc[i, 0] == int(round(suspeitos[j,0])): #Roud serve para arredondar\n classe.append(base.iloc[i,4])\nclasse = np.asarray(classe)\n\nsuspeitos_final = np.column_stack((suspeitos, classe))\nsuspeitos_final = suspeitos_final[suspeitos_final[:, 4].argsort()]","sub_path":"Mapas auto organizaveis/credit_data_mapa.py","file_name":"credit_data_mapa.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"640414175","text":"\nimport pandas as pd\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport numpy as np\nfrom sklearn.preprocessing import LabelEncoder\ntrain=pd.read_csv('train.csv')\ntest=pd.read_csv('test.csv')\n\n\n# In[191]:\n\ncat_columns=['city', 'state', 'store_location', 'time_zone',\n 'location_employee_code', 'credit_score']\n\n\n# In[192]:\n\ntrain['credit_score_range_max']=0\ntrain['credit_score_range_min']=0\n\n\ntest['credit_score_range_max']=0\ntest['credit_score_range_min']=0\n\n\nfor i in range(len(train)):\n try:\n train['credit_score_range_max'][i]=int(train.credit_score_range[i].split('TO')[1])\n train['credit_score_range_min'][i]=int(train.credit_score_range[i].split('TO')[0])\n train['credit_score_range'][i]=int(int(train.credit_score_range[i].split('TO')[1])-int(train.credit_score_range[i].split('TO')[0]))\n except:\n train['credit_score_range'][i]=0\n train['credit_score_range_max'][i]=0\n train['credit_score_range_min'][i]=0\n \n \nfor i in range(len(test)):\n try:\n test['credit_score_range_max'][i]=int(test.credit_score_range[i].split('TO')[1])\n test['credit_score_range_min'][i]=int(test.credit_score_range[i].split('TO')[0])\n test['credit_score_range'][i]=int(int(test.credit_score_range[i].split('TO')[1])-int(test.credit_score_range[i].split('TO')[0]))\n except:\n test['credit_score_range'][i]=0\n test['credit_score_range_max'][i]=0\n test['credit_score_range_min'][i]=0\n \n\n\n# In[193]:\n\nfor var in cat_columns:\n lb = LabelEncoder()\n full_var_data = pd.concat((train[var],test[var]),axis=0).astype('str')\n temp = lb.fit_transform(np.array(full_var_data))\n train[var] = lb.transform(np.array( train[var] ).astype('str'))\n test[var] = lb.transform(np.array( test[var] ).astype('str'))\n\n\n# In[194]:\n\ntrain['credit_score_range'] = train['credit_score_range'].apply(pd.to_numeric)\ntest['credit_score_range'] = test['credit_score_range'].apply(pd.to_numeric)\n\n\n# In[195]:\n\ndef getCountVar(compute_df, count_df, var_name):\n grouped_df = count_df.groupby(var_name)\n count_dict = {}\n for name, group in grouped_df:\n count_dict[name] = group.shape[0]\n\n count_list = []\n for index, row in compute_df.iterrows():\n name = row[var_name]\n count_list.append(count_dict.get(name, 0))\n return count_list\n\n\n# In[196]:\n\n#store_location\ntrain['store_location_Count']=getCountVar(train,train,'store_location')\ntest['store_location_Count']=getCountVar(test,train,'store_location')\n#time_zone\ntrain['time_zone_Count']=getCountVar(train,train,'time_zone')\ntest['time_zone_Count']=getCountVar(test,train,'time_zone')\n#location_employee_code\ntrain['location_employee_code_Count']=getCountVar(train,train,'location_employee_code')\ntest['location_employee_code_Count']=getCountVar(test,train,'location_employee_code')\n\n\n# In[197]:\n\ntrain['normalized_household_income']=(train['total_household_income']/train['employee_size'])\ntest['normalized_household_income']=(test['total_household_income']/test['employee_size'])\ntrain=train.drop('total_household_income',1)\ntest=test.drop('total_household_income',1)\n\n\n# In[198]:\n\ny=train.total_sales\ntrain=train.drop(['total_sales','outlet_no'],1)\noutlet=test.outlet_no\ntest=test.drop('outlet_no',1)\n\n\n# In[199]:\n\nfrom xgboost import XGBRegressor\nfrom sklearn.cross_validation import StratifiedKFold\nfrom sklearn.cross_validation import cross_val_score\nfrom sklearn.grid_search import GridSearchCV\n\n\n# In[209]:\n\nmodel = XGBRegressor()\nlearning_rate = [0.001, 0.01, 0.1, 0.2, 0.3]\nn_estimators=[100,200,300,400,500]\nparam_grid = dict(learning_rate=learning_rate,n_estimators=n_estimators)\nkfold = StratifiedKFold(y, n_folds=3, shuffle=True, random_state=7)\ngrid_search = GridSearchCV(model, param_grid, scoring=\"mean_absolute_error\", n_jobs=-1, cv=kfold)\n\n\n# In[210]:\n\nresult = grid_search.fit(train,y)\n# summarize results\nprint(\"Best: %f using %s\" % (result.best_score_, result.best_params_))\n\n\n# In[211]:\n\nmodel=XGBRegressor(learning_rate=0.3,n_estimators=100)\nfor traincv,testcv in kfold:\n model.fit(train.iloc[traincv],y.iloc[testcv])\n\n\n\n# In[212]:\n\ny_pred=model.predict(test)\n\n\n# In[213]:\n\noutput2 = pd.DataFrame( data={\"outlet_no\":outlet,\"total_sales_Actual\": y_pred} )\noutput2.to_csv(\"model.csv\", index=False,quoting=3)\n\n\n# In[ ]:\n\n\n\n","sub_path":"TG_DSchallenge/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"191524251","text":"from web3 import Web3\nimport json\n\nw3 = Web3(Web3.HTTPProvider(\"HTTP://127.0.0.1:8545\"))\nwith open('../conf/contract/PDS.abi', 'r') as myfile:\n abi = myfile.read()\n\nwith open('../conf/contract/PDS.bin', 'r') as myfile:\n binfile = myfile.read()\n bytecode = json.loads(binfile)['object']\n\naccount = w3.eth.accounts[0]\nPDSContract = w3.eth.contract(abi=abi, bytecode=bytecode)\ntx_hash = PDSContract.constructor().transact({'from': account})\ntx_receipt = w3.eth.waitForTransactionReceipt(tx_hash)\naddress = tx_receipt.contractAddress\nprint(address)\nPDSContract_instance = w3.eth.contract(abi=abi, address=address)\ntx_hash = PDSContract_instance.functions.new_token('4qk3Ab43ufPQVif4GAzLUW', w3.toBytes(text='4qk3Ab43ufPQVif4GAzLUW')).transact({'from': account})\nw3.eth.waitForTransactionReceipt(tx_hash)\nDID, enc_token = PDSContract_instance.functions.get_token(0).call()\nprint(DID)","sub_path":"tests/ganache.py","file_name":"ganache.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"435284584","text":"def eratosthenes_sieve(N):\n A = [True]*N\n b = []\n A[0] = A[1] = False\n for k in range(2, N):\n if A[k]:\n for m in range(2*k, N, k):\n A[m] = False\n\n for k in range(N):\n print(k, '-', \"простое\" if A[k] else \"составное\")\n\neratosthenes_sieve(222)","sub_path":"eratosthenes_sieve.py","file_name":"eratosthenes_sieve.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"458611051","text":"from typing import List\n\n\nclass Solution:\n def isValidSudoku(self, board: List[List[str]]) -> bool:\n row = [set() for _ in range(9)]\n col = [set() for _ in range(9)]\n block = [set() for _ in range(9)]\n for i in range(9):\n for j in range(9):\n val = board[i][j]\n if val != \".\":\n if val in row[i] | col[j] | block[i // 3 * 3 + j // 3]:\n return False\n else:\n row[i].add(val)\n col[j].add(val)\n block[i // 3 * 3 + j // 3].add(val)\n return True\n","sub_path":"Week_07/0036_valid_sudoku.py","file_name":"0036_valid_sudoku.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"384763227","text":"import tkinter\n\ntk = tkinter.Tk()\n\n\n#Set title icon.\ntk.iconbitmap(r'icon.ico')\n\ntk.title(' Test ')\n\ntk.geometry('600x500') \ntkinter.Label(tk, text = 'Hello, world!', fg = 'black', font=('Arial bold', 20)).pack(side = tkinter.TOP,expand = tkinter.YES)\n\n\ntk.mainloop()\n","sub_path":"tk.py","file_name":"tk.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"88962153","text":"#!/usr/bin/env python3\n\nimport bme680\nimport configparser\nimport datetime\nimport time\nimport logging\nimport grpc\nimport sensor_pb2 as pb\nimport sensor_pb2_grpc\nimport sys\nimport time\n\n\ndef read_bme680(location, filename, stub):\n sensor = bme680.BME680()\n\n # Oversample sets a balance between accuracy of reading and amount of noise.\n # The higher the oversampling, the greater the reduction in noise, but loses accuracy.\n sensor.set_humidity_oversample(bme680.OS_2X)\n sensor.set_pressure_oversample(bme680.OS_4X)\n sensor.set_temperature_oversample(bme680.OS_8X)\n\n # Filter protects transient changes (like a door slamming).\n sensor.set_filter(bme680.FILTER_SIZE_3)\n\n # GAS readings require the plate to be heated. So readings will be much slower.\n sensor.set_gas_status(bme680.ENABLE_GAS_MEAS)\n sensor.set_gas_heater_temperature(320)\n sensor.set_gas_heater_duration(150)\n sensor.select_gas_heater_profile(0)\n\n try:\n while True:\n current = False\n logging.info(\"Taking a measurement\")\n if sensor.get_sensor_data():\n if sensor.data.heat_stable:\n current = pb.current_readings(\n time = int(datetime.datetime.utcnow().timestamp()),\n location = location,\n filename = filename,\n temperature = sensor.data.temperature,\n humidity = sensor.data.humidity,\n pressure = sensor.data.pressure,\n gas = sensor.data.gas_resistance,\n model = pb.BME680,\n )\n if current:\n logging.info(\"Uploading measurements\")\n result = stub.send_readings(current)\n logging.info(result)\n\n time.sleep(30)\n\n except KeyboardInterrupt:\n pass\n\n\nif __name__ == \"__main__\":\n # Load config\n config = configparser.ConfigParser()\n config.read(\"config.ini\")\n\n #Ensure required options are set\n if not config.has_section('sensor'):\n sys.exit(\"Please ensure you have a config.ini with a sensor section\")\n required = ['location', 'filename', 'server', 'port']\n for options in required:\n if not config.has_option('sensor', options):\n sys.exit(\"Missing required options in config.ini\")\n\n # Get options\n location = config.get('sensor', 'location')\n filename = config.get('sensor', 'filename')\n server = config.get('sensor', 'server')\n port = config.get('sensor', 'port')\n log = config.get('sensor', 'logfile')\n\n #Set up GRPC server details\n grpcserver = \"%s:%s\" % (server, port)\n channel = grpc.insecure_channel(grpcserver)\n stub = sensor_pb2_grpc.sensor_dataStub(channel)\n\n # Set up logging\n format_string = '%(levelname)s: %(asctime)s: %(message)s'\n logging.basicConfig(filename=log, level=logging.INFO, format=format_string)\n\n # Start taking measurements\n read_bme680(location, filename, stub)\n\n print(\"Exiting!\")\n","sub_path":"client/bme680client.py","file_name":"bme680client.py","file_ext":"py","file_size_in_byte":3053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"518322391","text":"import os\n\nfrom flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_cors import CORS\n\n# sub classe para poder customizar parâmetros do driver\n# até o momento o Flask SQLAlchemy não suporta uma maneira mais amigável\n# ver issue: https://github.com/mitsuhiko/flask-sqlalchemy/issues/120\n\n\nclass MySQLAlchemy(SQLAlchemy):\n def apply_driver_hacks(self, app, info, options):\n options.update({\n 'isolation_level': 'REPEATABLE READ',\n })\n super().apply_driver_hacks(app, info, options)\n\n\ndb = MySQLAlchemy()\n\n\ndef create_app(test_config=None):\n # create and configure the app\n app = Flask(__name__, instance_relative_config=True)\n CORS(app)\n app.config.from_mapping(\n SECRET_KEY='dev'\n )\n\n if test_config is None:\n # load the instance config, if it exists, when not testing\n app.config.from_envvar('APP_SETTINGS')\n else:\n # load the test config if passed in\n app.config.from_mapping(test_config)\n\n # ensure the instance folder exists\n try:\n os.makedirs(app.instance_path)\n except OSError:\n pass\n\n # database\n db.init_app(app)\n\n from flaskr.apis.auth import auth_api\n from flaskr.apis.usuario import usuario_api\n from flaskr.apis.notificacao import notificacao_api\n\n app.register_blueprint(auth_api.bp)\n app.register_blueprint(usuario_api.bp)\n app.register_blueprint(notificacao_api.bp)\n\n @app.route('/api')\n def check():\n return 'On!'\n\n return app\n","sub_path":"back/notificacao/flaskr/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"475252304","text":"import json\nimport argparse\nimport sys\nimport time\nfrom marc_to_folio.MtFMapper import MtFMapper\nfrom pymarc import MARCReader\nfrom os import listdir\nfrom os.path import isfile, join\n\n\ndef write_to_file(f, pg_dump, folio_record):\n if(pg_dump):\n f.write('{}\\t{}\\n'.format(folio_record['id'],\n json.dumps(folio_record)))\n else:\n f.write('{}\\n'.format(json.dumps(folio_record)))\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"source_folder\",\n help=\"path of the folder where the marc files resides\")\nparser.add_argument(\"result_path\",\n help=\"path and name of the results file\")\nparser.add_argument(\"okapi_url\",\n help=(\"url of your FOLIO OKAPI endpoint. See settings->\"\n \"software version in FOLIO\"))\nparser.add_argument(\"tenant_id\",\n help=(\"id of the FOLIO tenant. See settings->software \"\n \"version in FOLIO\"))\nparser.add_argument(\"okapi_token\",\n help=(\"the x-okapi-token. Easiest optained via F12 in \"\n \"the webbrowser\"))\nparser.add_argument(\"record_source\",\n help=(\"name of the source system or collection from \"\n \"which the records are added\"))\nparser.add_argument(\"-id_dict_path\", \"-i\",\n help=(\"path to file saving a dictionary of Sierra ids \"\n \"and new InstanceIds to be used for matching the\"\n \"right holdings and items to the right instance.\"))\nparser.add_argument(\"-postgres_dump\",\n \"-p\",\n help=(\"results will be written out for Postgres ingestion.\"\n \" Default is JSON\"),\n action=\"store_true\")\nargs = parser.parse_args()\n\nprint('Will post data to')\nprint('\\tresults file:\\t', args.result_path)\nprint(\"\\tOkapi URL:\\t\", args.okapi_url)\nprint(\"\\tTenanti Id:\\t\", args.tenant_id)\nprint(\"\\tToken: \\t\", args.okapi_token)\nprint(\"\\tRecord source:\\t\", args.record_source)\nprint(\"\\tidMap will get stored at:\\t\", args.id_dict_path)\nid_dict_path = args.id_dict_path\nholdings = 0\nrecords = 0\nstart = time.time()\nfiles = [f for f in listdir(args.source_folder)\n if isfile(join(args.source_folder, f))]\nprint(\"Files to process:\")\nprint(json.dumps(files, sort_keys=True, indent=4))\nidMap = {}\nmapper = MtFMapper(args)\nprint(\"Starting\")\nprint(\"Rec./s\\t\\tHolds\\t\\tTot. recs\\t\\tFile\\t\\t\")\nwith open(args.result_path, 'w+') as results_file:\n for f in files:\n with open(sys.argv[1]+f, 'rb') as fh:\n reader = MARCReader(fh, 'rb',\n hide_utf8_warnings=True,\n utf8_handling='replace')\n for record in reader:\n try:\n records += 1\n if record['004']:\n holdings += 1\n else:\n folio_rec = mapper.parse_bib_record(record,\n args.record_source)\n if(record['907']['a']):\n sierra_id = record['907']['a'].replace('.b', '')[:-1]\n idMap[sierra_id] = folio_rec['id']\n write_to_file(results_file,\n args.postgres_dump,\n folio_rec)\n if records % 1000 == 0:\n elapsed = '{0:.3g}'.format(records/(time.time() - start))\n print_template = \"{}\\t\\t{}\\t\\t{}\\t\\t{}\\t\\t{}\"\n print(print_template.format(elapsed,\n holdings,\n records,\n f,\n len(idMap)), end='\\r')\n except Exception as inst:\n print(type(inst))\n print(inst.args)\n print(inst)\n with open(id_dict_path, 'w+') as json_file:\n json.dump(idMap, json_file, sort_keys=True, indent=4)\n print(\"done\")\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"374535457","text":"import torch\nimport torch.nn as nn\n\nfrom .inv_editor import InvEditorBase, Embedding\n\n\n\nclass LevenshteinInvEditor(InvEditorBase):\n \"\"\"the inverse editor from https://arxiv.org/abs/1709.08878\"\"\"\n def __init__(self, token_embed_dim, edit_embed_dim, hidden_size, \n tgt_dict, edit_dict, num_layers=1, pretrained_token_embed=None):\n super(LevenshteinInvEditor, self).__init__(hidden_size)\n\n\n self.hidden_size = hidden_size\n self.padding_idx = tgt_dict.pad()\n num_token_embeddings = len(tgt_dict)\n num_edit_embeddings = len(edit_dict)\n\n if pretrained_token_embed is None:\n self.embed_tokens = Embedding(num_token_embeddings, token_embed_dim, self.padding_idx)\n else:\n self.embed_tokens = pretrained_token_embed\n\n self.embed_edit = Embedding(num_edit_embeddings, edit_embed_dim, self.padding_idx)\n self.num_layers=num_layers\n\n self.lstm = nn.LSTM(\n input_size=token_embed_dim * 2 + edit_embed_dim,\n hidden_size=hidden_size,\n num_layers=self.num_layers,\n bidirectional=True,\n )\n\n\n def forward(self, src_aligned, tgt_aligned, edit_aligned, aligned_length, **kwargs):\n \"\"\"\n Args: \n src_aligned (LongTensor): (batch, seq_len)\n tgt_aligned (LongTensor): (batch, seq_len)\n\n Returns: Tensor1\n Tensor1: the representation with shape [batch, embed_dim]\n \"\"\"\n\n bsz, seqlen = src_aligned.size()\n\n edit_embed = self.embed_edit(edit_aligned)\n src_embed = self.embed_tokens(src_aligned)\n tgt_embed = self.embed_tokens(tgt_aligned)\n\n x = torch.cat((edit_embed, src_embed, tgt_embed), -1)\n\n # B x T x C -> T x B x C\n x = x.transpose(0, 1)\n\n packed_x = nn.utils.rnn.pack_padded_sequence(x, aligned_length.data.tolist(), enforce_sorted=False)\n state_size = 2 * self.num_layers, bsz, self.hidden_size\n\n h0 = x.new_zeros(*state_size)\n c0 = x.new_zeros(*state_size)\n\n packed_outs, (final_hiddens, final_cells) = self.lstm(packed_x, (h0, c0))\n x, _ = nn.utils.rnn.pad_packed_sequence(packed_outs, padding_value=self.padding_idx)\n\n def combine_bidir(outs):\n out = outs.view(self.num_layers, 2, bsz, -1).transpose(1, 2).contiguous()\n return out.view(self.num_layers, bsz, -1)\n \n\n return combine_bidir(final_hiddens)[-1]\n\n @property\n def output_units(self):\n return 2 * self.hidden_size","sub_path":"sparse_prototype/inv_editor/inv_editor_levenshtein.py","file_name":"inv_editor_levenshtein.py","file_ext":"py","file_size_in_byte":2522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"42094696","text":"\"\"\"\nCreated on 30 sty 2016\n\nAVL tree - my second python program :)\n\nI guess that it's not done quite properly because I've done it without knowing a python behaviour and it can be done much better\n\n@author: Mateusz Stankiewicz\n\"\"\"\n\n\nclass Sentinel(object):\n data = left = right = None\n height = -1\n\n\nsentinel = Sentinel() # singleton of sentinel node\n\n\nclass Node:\n def __init__(self, data, left=sentinel, right=sentinel, height=0):\n self.data = data\n self.left = left\n self.right = right\n self.height = height\n\n def addNode(self, data):\n if self.data == data:\n # return when node duplicated\n return\n isLeft = self.data > data\n child = self.left if isLeft else self.right\n if child is sentinel:\n setattr(self, 'left' if isLeft else 'right', Node(data))\n else:\n child.addNode(data)\n self.rebalance()\n self.setHeight()\n\n def rebalance(self):\n if self.balance() == 2:\n if self.right.balance() == -1:\n self.doubleRotateLeft()\n else:\n self.rotateLeft()\n self.right.setHeight()\n self.left.setHeight()\n elif self.balance() == -2:\n if self.left.balance() == 1:\n self.doubleRotateRight()\n else:\n self.rotateRight()\n self.right.setHeight()\n self.left.setHeight()\n\n def balance(self):\n return self.right.height - self.left.height\n\n def setHeight(self):\n self.height = max(self.left.height, self.right.height) + 1\n\n def printNodes(self):\n if self.left is not sentinel:\n self.left.printNodes()\n print(self)\n if self.right is not sentinel:\n self.right.printNodes()\n\n def printSpaces(self, number):\n for i in range(0, number):\n print(\" \")\n\n def delete(self, key):\n if key == self.data:\n ''' when found and we know that has one or two children'''\n if self.left is sentinel:\n # if has got right child\n self.replaceWithChild(self.right)\n elif self.right is sentinel:\n # if has got left child\n self.replaceWithChild(self.left)\n else:\n if self.right.left is not sentinel:\n self.data = self.right.pullMostLeftData()\n print(\"data\", self.data)\n else:\n # put right child instead of self\n self.data, self.right.data = self.right.data, self.data\n self.right = self.right.right\n else:\n child = self.left if key < self.data else self.right\n if child is sentinel:\n print(\"Not found\")\n else:\n if not child.hasChildren() and child.data == key:\n # delete node if it is a leaf\n setattr(self, 'left' if key < self.data else 'right', sentinel)\n else:\n child.delete(key)\n self.rebalance()\n self.setHeight()\n\n def replaceWithChild(self, child):\n self.data, child.data = child.data, self.data\n self.left, child.left = child.left, self.left\n self.right, child.right = child.right, self.right\n\n def hasChildren(self):\n return self.left is not sentinel or self.right is not sentinel\n\n def pullMostLeftData(self):\n if self.left.left is not sentinel:\n return self.left.pullMostLeftData()\n else:\n data = self.left.data\n self.left = self.left.right\n return data\n\n def rotateRight(self):\n self.data, self.left.data = self.left.data, self.data\n temp = self.left\n self.left = self.left.left\n temp.left = self.right\n self.right = temp\n self.right.right, self.right.left = self.right.left, self.right.right\n\n def rotateLeft(self):\n self.data, self.right.data = self.right.data, self.data\n temp = self.right\n self.right = self.right.right\n temp.right = self.left\n self.left = temp\n self.left.right, self.left.left = self.left.left, self.left.right\n\n def doubleRotateRight(self):\n self.data, self.left.right.data = self.left.right.data, self.data\n self.right, self.left.right = self.left.right, self.right\n self.left.right, self.right.left = self.right.left, self.left.right\n self.right.left, self.right.right = self.right.right, self.right.left\n\n def doubleRotateLeft(self):\n self.data, self.right.left.data = self.right.left.data, self.data\n self.left, self.right.left = self.right.left, self.left\n self.right.left, self.left.right = self.left.right, self.right.left\n self.left.right, self.left.left = self.left.left, self.left.right\n\n def __str__(self):\n return str(self.data)\n\n\nclass binaryTree:\n def __init__(self):\n self.root = sentinel\n\n def add(self, data):\n if self.root is sentinel:\n self.root = Node(data)\n else:\n self.root.addNode(data)\n\n def printAll(self):\n if self.root is sentinel:\n print(\"Empty tree\")\n else:\n self.root.printNodes()\n\n def delete(self, key):\n self.root.delete(key)\n\n def printBFS(self):\n if self.root is not sentinel:\n print(\"Tree:\")\n level = [self.root]\n while level:\n newLevel = list()\n for n in level:\n print(str(n) + \" \", end='')\n if n.left is not sentinel:\n newLevel.append(n.left)\n if n.right is not sentinel:\n newLevel.append(n.right)\n print()\n level = newLevel\n\n\nif __name__ == '__main__':\n b = binaryTree()\n b.add(1)\n b.add(22)\n b.add(4)\n b.add(26)\n b.add(13)\n b.add(3)\n b.add(14)\n b.add(6)\n b.add(13)\n b.add(5)\n b.add(23)\n b.delete(22)\n b.add(36)\n b.add(43)\n b.add(5)\n for i in range(1, 20):\n b.add(i)\n b.delete(26)\n b.delete(13)\n b.delete(8)\n b.printBFS()\n","sub_path":"avl_tree.py","file_name":"avl_tree.py","file_ext":"py","file_size_in_byte":6264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"32347859","text":"__author__ = 'amrit'\n\nimport math\nimport pygmo as pg\nimport numpy as np\n\nfrom random import randint, uniform, choice, sample\nfrom sklearn.metrics import roc_curve, auc\nfrom sklearn.metrics import confusion_matrix\n\nPRE, REC, SPEC, FPR, NPV, ACC, F1 = 7, 6, 5, 4, 3, 2, 1\n\n\ndef _randint(a=0, b=0):\n return randint(a, b)\n\n\ndef _randchoice(a):\n return choice(a)\n\n\ndef _randuniform(a=0.0, b=0.0):\n return uniform(a, b)\n\n\ndef _randsample(a, b=1):\n return sample(a, b)\n\n\ndef unpack(l):\n tmp = []\n for i in l:\n if type(i) is not list:\n tmp.append(i)\n else:\n for x in i:\n tmp.append(x)\n return tmp\n\n\ndef get_performance(prediction, test_labels):\n tn, fp, fn, tp = confusion_matrix(\n test_labels, prediction, labels=[0, 1]).ravel()\n pre = 1.0 * tp / (tp + fp) if (tp + fp) != 0 else 0\n rec = 1.0 * tp / (tp + fn) if (tp + fn) != 0 else 0\n spec = 1.0 * tn / (tn + fp) if (tn + fp) != 0 else 0\n fpr = 1 - spec\n npv = 1.0 * tn / (tn + fn) if (tn + fn) != 0 else 0\n acc = 1.0 * (tp + tn) / (tp + tn + fp + fn) if (\n tp + tn + fp + fn) != 0 else 0\n f1 = 2.0 * tp / (2.0 * tp + fp + fn) if (2.0 * tp + fp + fn) != 0 else 0\n gm = 2.0 * rec * (1 - fpr) / (rec + 1 - fpr) if (rec + 1 - fpr) != 0 else 0\n\n ifa = 0\n actual_results = np.asarray(test_labels)\n predicted_results = prediction\n for i, j in zip(actual_results, predicted_results):\n if (i == 1) and (j == 1):\n break\n elif (i == 0) and (j == 1):\n ifa += 1\n return [round(x, 3) for x in [pre, rec, spec, fpr, npv, acc, f1, gm, ifa]]\n\n\ndef get_score(criteria, prediction, test_labels, data):\n tn, fp, fn, tp = confusion_matrix(\n test_labels, prediction, labels=[0, 1]).ravel()\n pre, rec, spec, fpr, npv, acc, f1, gm, ifa = get_performance(\n prediction, test_labels)\n all_metrics = [tp, fp, tn, fn, pre, rec, spec, fpr, npv, acc, f1, ifa]\n if criteria == \"Accuracy\":\n score = -all_metrics[-ACC]\n elif criteria == \"d2h\":\n score = all_metrics[-FPR]**2 + (1 - all_metrics[-REC])**2\n score = math.sqrt(score) / math.sqrt(2)\n elif criteria == \"Pf_Auc\":\n score = auc_measure(prediction, test_labels)\n elif criteria == \"popt\":\n score = get_auc(data)\n elif criteria == \"popt20\":\n score = get_popt20(data)\n elif criteria == \"Gini\":\n p1 = all_metrics[-PRE] # target == 1 for the positive split\n p0 = 1 - all_metrics[-NPV] # target == 1 for the negative split\n score = 1 - p0**2 - p1**2\n elif criteria == 'recall':\n score = rec\n elif criteria == 'false_alarm':\n score = fpr\n elif criteria == 'g_measure':\n score = gm\n elif criteria == \"ifa\":\n score = ifa\n else: # Information Gain\n P, N = all_metrics[0] + all_metrics[3], all_metrics[1] + all_metrics[2]\n p = 1.0 * P / (P + N) if P + N > 0 else 0 # before the split\n p1 = all_metrics[-PRE] # the positive part of the split\n p0 = 1 - all_metrics[-NPV] # the negative part of the split\n I, I0, I1 = (-x * np.log2(x) if x != 0 else 0 for x in (p, p0, p1))\n I01 = p * I1 + (1 - p) * I0\n score = -(I - I01) # the smaller the better.\n return round(score, 3)\n\n\ndef auc_measure(prediction, test_labels):\n fpr, tpr, _ = roc_curve(test_labels, prediction, pos_label=1)\n auc1 = auc(fpr, tpr)\n return auc1\n\n\ndef subtotal(x):\n xx = [0]\n for _, t in enumerate(x):\n xx += [xx[-1] + t]\n return xx[1:]\n\n\ndef get_recall(true):\n total_true = float(len([i for i in true if i == 1]))\n hit = 0.0\n recall = []\n for i in range(len(true)):\n if true[i] == 1:\n hit += 1\n recall += [hit / total_true if total_true else 0.0]\n return recall\n\n\ndef get_auc(data):\n \"\"\"The smaller the better\"\"\"\n if len(data) == 1:\n return 0\n x_sum = float(sum(data['loc']))\n x = data['loc'].apply(lambda t: t / x_sum)\n xx = subtotal(x)\n yy = get_recall(data['bug'].values)\n try:\n ret = round(auc(xx, yy), 3)\n except:\n # print\"?\"\n ret = 0\n return ret\n\n\ndef get_popt20(data):\n data.sort_values(by=[\"bug\", \"loc\"], ascending=[0, 1], inplace=True)\n x_sum = float(sum(data['loc']))\n x = data['loc'].apply(lambda t: t / x_sum)\n xx = subtotal(x)\n\n # get AUC_optimal\n yy = get_recall(data['bug'].values)\n xxx = [i for i in xx if i <= 0.2]\n yyy = yy[:len(xxx)]\n s_opt = round(auc(xxx, yyy), 3)\n\n # get AUC_worst\n xx = subtotal(x[::-1])\n yy = get_recall(data['bug'][::-1].values)\n xxx = [i for i in xx if i <= 0.2]\n yyy = yy[:len(xxx)]\n try:\n s_wst = round(auc(xxx, yyy), 3)\n except:\n # print \"s_wst forced = 0\"\n s_wst = 0\n\n # get AUC_prediction\n data.sort_values(by=[\"prediction\", \"loc\"], ascending=[0, 1], inplace=True)\n x = data['loc'].apply(lambda t: t / x_sum)\n xx = subtotal(x)\n yy = get_recall(data['bug'].values)\n xxx = [k for k in xx if k <= 0.2]\n yyy = yy[:len(xxx)]\n try:\n s_m = round(auc(xxx, yyy), 3)\n except:\n return 0\n\n Popt = (s_m - s_wst) / (s_opt - s_wst)\n return round(Popt, 3)\n\n\ndef get_best(values, ignore_idx=None):\n \"\"\"Assumping everything are to MAXIMIZED.\n Return the best value indices\n https://esa.github.io/pagmo2/docs/python/utils/py_mo_utils.html\n \"\"\"\n V = np.array(values)\n for idx in ignore_idx:\n V[:, idx] = 0\n _, _, dc, _ = pg.fast_non_dominated_sorting(points=-1 * V)\n\n return [i for i, v in enumerate(dc) if v == 0]\n","sub_path":"src/model/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":5611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"302502240","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport sklearn\nimport sklearn.datasets\nimport sklearn.linear_model\nimport sklearn.model_selection\nimport h5py\nimport scipy.io\n\n\n# def plot_decision_boundary(model, X, y):\n# # Set min and max values and give it some padding\n# x_min, x_max = X[0, :].min() - 1, X[0, :].max() + 1\n# y_min, y_max = X[1, :].min() - 1, X[1, :].max() + 1\n# h = 0.01\n# # Generate a grid of points with distance h between them\n# xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n# # Predict the function value for the whole grid\n# Z = model(np.c_[xx.ravel(), yy.ravel()])\n# Z = Z.reshape(xx.shape)\n# # Plot the contour and training examples\n# plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)\n# plt.ylabel('x2')\n# plt.xlabel('x1')\n# plt.scatter(X[0, :], X[1, :], c=y[0, :], cmap=plt.cm.Spectral)\n\n\n# def plot_decision_boundary(model, X, y):\n# # Set min and max values and give it some padding\n# x_min, x_max = X[0, :].min() - 1, X[0, :].max() + 1\n# y_min, y_max = X[1, :].min() - 1, X[1, :].max() + 1\n# h = 0.01\n# # Generate a grid of points with distance h between them\n# xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n# # Predict the function value for the whole grid\n# Z = model(np.c_[xx.ravel(), yy.ravel()])\n# Z = Z.reshape(xx.shape)\n# # Plot the contour and training examples\n# plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)\n# plt.ylabel('x2')\n# plt.xlabel('x1')\n# plt.scatter(X[0, :], X[1, :], c=y[0, :], cmap=plt.cm.Spectral)\n# plt.show()\n\n# Helper function to plot a decision boundary.\n# If you don't fully understand this function don't worry, it just generates the contour plot below.\n# def plot_decision_boundary(pred_func, X, y):\n# # Set min and max values and give it some padding\n# x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5\n# y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5\n# h = 0.01\n# # Generate a grid of points with distance h between them\n# xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n# # Predict the function value for the whole gid\n# Z = pred_func(np.c_[xx.ravel(), yy.ravel()])\n# Z = Z.reshape(xx.shape)\n# # Plot the contour and training examples\n# plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)\n# plt.scatter(X[:, 0], X[:, 1], c=y[0, :], cmap=plt.cm.Spectral)\n\ndef map_feature(X, degree=2):\n '''\n generate the composition of features of X.\n :param X:\n :param degree:\n :return:\n '''\n assert(X.shape[0]<=2)\n if X.shape[0]==2:\n x1 = X[0,:]\n x2 = X[1,:]\n m = np.sum(np.array([i+1 for i in range(1, degree+1)]))\n out = np.zeros((m, X.shape[1]))\n k = 0\n for i in range(1, degree+1):\n for j in range(0, i+1):\n out[k, :] = np.multiply(np.power(x1, i-j), np.power(x2, j))\n k = k + 1\n return out\n else:\n x1 = X[0, :]\n out = np.zeros((degree, X.shape[1]))\n for i in range(0, degree):\n out[i, :] = np.power(x1, i+1)\n return out\n\n\ndef plot_decision_boundary(model, X, y):\n # Set min and max values and give it some padding\n x_min, x_max = X[0, :].min() - 1, X[0, :].max() + 1\n y_min, y_max = X[1, :].min() - 1, X[1, :].max() + 1\n h = 0.01\n # Generate a grid of points with distance h between them\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n # Predict the function value for the whole grid\n Z = model(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n # Plot the contour and training examples\n plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)\n plt.ylabel('x2')\n plt.xlabel('x1')\n plt.scatter(X[0, :], X[1, :], c=y[0, :], cmap=plt.cm.Spectral)\n plt.show()\n\n\n# map_feature\ndef plot_decision_boundary_map_feature(predictor, X, y, title=\"decision boundary\", degree=2 ):\n plt.title(title)\n axes = plt.gca()\n min = X.min(axis=1)\n max = X.max(axis=1)\n range = max - min\n min = min - 0.1 * range\n max = max + 0.1 * range\n axes.set_xlim((min[0], max[0]))\n axes.set_ylim((min[1], max[1]))\n plot_decision_boundary(lambda x: predictor.predict(map_feature(x.T, degree)), X, y)\n\n\ndef load_sin_line_dataset(data_path=\"../../data/sin_line.txt\", delimiter=\"\\t\", random_state=3, test_size=0.25):\n '''\n ex1data2.txt contains a training set of housing prices in Portland, Oregon.\n The first column is the size of the house (in square feet), the second column\n is the number of bedrooms, and the third column is the price of the house.\n :param data_path:\n :param delimiter:\n :return:\n '''\n data = np.loadtxt(data_path, delimiter=delimiter)\n X = data[:, :-1]\n y = data[:, -1:]\n train_X, test_X, train_y, test_y = sklearn.model_selection.train_test_split(\n X, y, test_size=test_size, random_state=random_state)\n return train_X.T, train_y.T, test_X.T, test_y.T\n\n\ndef load_flat_dataset(data_path=\"../../data/ex1data1.txt\", delimiter=\",\"):\n '''\n ex1data2.txt contains a training set of housing prices in Portland, Oregon.\n The first column is the size of the house (in square feet), the second column\n is the number of bedrooms, and the third column is the price of the house.\n :param data_path:\n :param delimiter:\n :return:\n '''\n data = np.loadtxt(data_path, delimiter=delimiter)\n X = data[:, :-1]\n y = data[:, -1:]\n return X.T, y.T\n\n\ndef load_regression_dataset(n_samples=400, n_features=1, bias=1, noise=5, random_state=3, test_size=0.25):\n X, y = sklearn.datasets.make_regression(n_samples=n_samples, n_features=n_features, n_targets=1, bias=bias,\n coef=False, noise=noise, random_state=random_state)\n train_X, test_X, train_y, test_y = sklearn.model_selection.train_test_split(\n X, y, test_size = test_size, random_state = random_state)\n return train_X, train_y, test_X, test_y\n\n\ndef load_regression_dataset_coef(n_samples=400, n_features=1, bias=1, noise=5, random_state=3, test_size=0.25, b=5):\n X, y, w = sklearn.datasets.make_regression(n_samples=n_samples, n_features=n_features, n_targets=1, bias=bias,\n coef=True, noise=noise, random_state=random_state)\n train_X, test_X, train_y, test_y = sklearn.model_selection.train_test_split(\n X, y, test_size = test_size, random_state = random_state)\n return train_X, train_y+b, test_X, test_y+b, w, b\n\n\ndef load_happy_dataset(train_data_path='../../data/train_happy.h5', test_data_path='../../data/test_happy.h5'):\n train_dataset = h5py.File(train_data_path, \"r\")\n train_set_x_orig = np.array(train_dataset[\"train_set_x\"][:]) # your train set features\n train_set_y_orig = np.array(train_dataset[\"train_set_y\"][:]) # your train set labels\n\n test_dataset = h5py.File(test_data_path, \"r\")\n test_set_x_orig = np.array(test_dataset[\"test_set_x\"][:]) # your test set features\n test_set_y_orig = np.array(test_dataset[\"test_set_y\"][:]) # your test set labels\n\n classes = np.array(test_dataset[\"list_classes\"][:]) # the list of classes\n\n train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))\n test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))\n\n return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes\n\n\ndef load_boston_dataset(test_size=0.25, random_state=3):\n '''\n -Origin\n The origin of the boston housing data is Natural.\n -Usage\n This dataset may be used for Assessment.\n -Number of Cases\n The dataset contains a total of 506 cases.\n -Order\n The order of the cases is mysterious.\n -Variables\n There are 14 attributes in each case of the dataset. They are:\n CRIM - per capita crime rate by town\n ZN - proportion of residential land zoned for lots over 25,000 sq.ft.\n INDUS - proportion of non-retail business acres per town.\n CHAS - Charles River dummy variable (1 if tract bounds river; 0 otherwise)\n NOX - nitric oxides concentration (parts per 10 million)\n RM - average number of rooms per dwelling\n AGE - proportion of owner-occupied units built prior to 1940\n DIS - weighted distances to five Boston employment centres\n RAD - index of accessibility to radial highways\n TAX - full-value property-tax rate per $10,000\n PTRATIO - pupil-teacher ratio by town\n B - 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town\n LSTAT - % lower status of the population\n MEDV - Median value of owner-occupied homes in $1000's\n :return:\n '''\n boston = sklearn.datasets.load_boston()\n train_X, test_X, train_y, test_y = sklearn.model_selection.train_test_split(\n boston.data, boston.target, test_size = test_size, random_state = random_state)\n return train_X, train_y, test_X, test_y\n\n\ndef load_planar_dataset():\n np.random.seed(1)\n m = 400 # number of examples\n N = int(m / 2) # number of points per class\n D = 2 # dimensionality\n X = np.zeros((m, D)) # data matrix where each row is a single example\n Y = np.zeros((m, 1), dtype='uint8') # labels vector (0 for red, 1 for blue)\n a = 4 # maximum ray of the flower\n\n for j in range(2):\n ix = range(N * j, N * (j + 1))\n t = np.linspace(j * 3.12, (j + 1) * 3.12, N) + np.random.randn(N) * 0.2 # theta\n r = a * np.sin(4 * t) + np.random.randn(N) * 0.2 # radius\n X[ix] = np.c_[r * np.sin(t), r * np.cos(t)]\n Y[ix] = j\n\n X = X.T\n Y = Y.T\n\n return X, Y\n\n\ndef load_petal_dataset(num_example=400, random_state=3):\n np.random.seed(1)\n m = num_example # number of examples\n N = int(m / 2) # number of points per class\n D = 2 # dimensionality\n X = np.zeros((m, D)) # data matrix where each row is a single example\n Y = np.zeros((m, 1), dtype='uint8') # labels vector (0 for red, 1 for blue)\n a = 4 # maximum ray of the flower\n\n for j in range(2):\n ix = range(N * j, N * (j + 1))\n t = np.linspace(j * 3.12, (j + 1) * 3.12, N) + np.random.randn(N) * 0.2 # theta\n r = np.sin(4 * t) + np.random.randn(N) * 0.2 /a # radius\n X[ix] = np.c_[r * np.sin(t), r * np.cos(t)]\n Y[ix] = j\n\n\n train_X, test_X, train_y, test_y = sklearn.model_selection.train_test_split(\n X, Y, test_size=0.25, random_state=random_state)\n\n return train_X.T, train_y.T, test_X.T, test_y.T\n\n\ndef load_extra_datasets():\n N = 200\n noisy_circles = sklearn.datasets.make_circles(n_samples=N, factor=.5, noise=.3)\n noisy_moons = sklearn.datasets.make_moons(n_samples=N, noise=.2)\n blobs = sklearn.datasets.make_blobs(n_samples=N, random_state=5, n_features=2, centers=6)\n gaussian_quantiles = sklearn.datasets.make_gaussian_quantiles(mean=None, cov=0.5, n_samples=N, n_features=2,\n n_classes=2, shuffle=True, random_state=None)\n no_structure = np.random.rand(N, 2), np.random.rand(N, 2)\n\n return noisy_circles, noisy_moons, blobs, gaussian_quantiles, no_structure\n\n\ndef load_dataset():\n np.random.seed(1)\n train_X, train_Y = sklearn.datasets.make_circles(n_samples=300, noise=.05)\n np.random.seed(2)\n test_X, test_Y = sklearn.datasets.make_circles(n_samples=100, noise=.05)\n # Visualize the data\n plt.scatter(train_X[:, 0], train_X[:, 1], c=train_Y, s=40, cmap=plt.cm.Spectral);\n train_X = train_X.T\n train_Y = train_Y.reshape((1, train_Y.shape[0]))\n test_X = test_X.T\n test_Y = test_Y.reshape((1, test_Y.shape[0]))\n return train_X, train_Y, test_X, test_Y\n\n\ndef load_circle_dataset(n_train=300, seed_train=1, noise_train=0.05, n_test=100, seed_test=2, noise_test=0.05):\n np.random.seed(seed_train)\n train_X, train_Y = sklearn.datasets.make_circles(n_samples=n_train, noise=noise_train)\n np.random.seed(seed_test)\n test_X, test_Y = sklearn.datasets.make_circles(n_samples=n_test, noise=noise_test)\n train_X = train_X.T\n train_Y = train_Y.reshape((1, train_Y.shape[0]))\n test_X = test_X.T\n test_Y = test_Y.reshape((1, test_Y.shape[0]))\n return train_X, train_Y, test_X, test_Y\n\n\ndef load_image_data(train_data_path='../../data/train_catvnoncat.h5', test_data_path='../../data/test_catvnoncat.h5'):\n\n print('load ' + train_data_path)\n train_dataset = h5py.File(train_data_path, \"r\")\n train_set_x_orig = np.array(train_dataset[\"train_set_x\"][:]) # your train set features\n train_set_y_orig = np.array(train_dataset[\"train_set_y\"][:]) # your train set labels\n\n print('load ' + test_data_path)\n test_dataset = h5py.File(test_data_path, \"r\")\n test_set_x_orig = np.array(test_dataset[\"test_set_x\"][:]) # your test set features\n test_set_y_orig = np.array(test_dataset[\"test_set_y\"][:]) # your test set labels\n\n classes = np.array(test_dataset[\"list_classes\"][:]) # the list of classes\n\n train_set_y_orig = train_set_y_orig.reshape((train_set_y_orig.shape[0], 1))\n test_set_y_orig = test_set_y_orig.reshape((test_set_y_orig.shape[0], 1))\n\n return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes\n\n\ndef initialize_image_data(X, y=None):\n m = X.shape[0]\n X = X.reshape((m, -1)).T\n if y is not None: y = y.T\n return X/255.0, y\n\n\ndef load_moon_dataset():\n np.random.seed(3)\n train_X, train_Y = sklearn.datasets.make_moons(n_samples=300, noise=.2) # 300 #0.2\n # Visualize the data\n\n train_X = train_X.T\n train_Y = train_Y.reshape((1, train_Y.shape[0]))\n\n return train_X, train_Y\n\n\ndef load_football_dataset(file_path='../../data/football.mat'):\n data = scipy.io.loadmat(file_path)\n train_X = data['X'].T\n train_Y = data['y'].T\n test_X = data['Xval'].T\n test_Y = data['yval'].T\n\n return train_X, train_Y, test_X, test_Y\n\n\nif __name__ == \"__main__\":\n # load_image_data()\n # load_football_dataset()\n # load_image_data()\n # train_X, train_y, test_X, test_y = load_boston_dataset()\n # print(train_X.shape)\n # print(train_y.shape)\n # print(test_X.shape)\n # print(test_y.shape)\n train_X, train_y, test_X, test_y = load_regression_dataset()\n print(train_X.shape)\n print(train_y.shape)\n print(test_X.shape)\n print(test_y.shape)","sub_path":"arsenal/eipi10/ml/planar_utils.py","file_name":"planar_utils.py","file_ext":"py","file_size_in_byte":14496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"87887314","text":"# Copyright lowRISC contributors.\n# Licensed under the Apache License, Version 2.0, see LICENSE for details.\n# SPDX-License-Identifier: Apache-2.0\n\n'''A wrapper around reggen for otbn.hjson'''\n\nimport os\nimport sys\nfrom typing import Optional, Tuple\n\n\n# We use reggen to read the hjson file. Since that lives somewhere completely\n# different from this script (and there aren't __init__.py files scattered all\n# over the OpenTitan repository), we have to do sys.path hacks to find it.\n_OLD_SYS_PATH = sys.path\ntry:\n _UTIL_PATH = os.path.join(os.path.dirname(__file__),\n '..', '..', '..', '..', '..', 'util')\n sys.path = [_UTIL_PATH] + _OLD_SYS_PATH\n import reggen.field # type: ignore\n import reggen.ip_block # type: ignore\n import reggen.reg_block # type: ignore\n import reggen.register # type: ignore\n import reggen.window # type: ignore\nfinally:\n sys.path = _OLD_SYS_PATH\n\n# Re-export some reggen types so that code importing otbn_reggen can get them\n# transitively without having to mess around with sys.path.\nRegister = reggen.register.Register\nField = reggen.field.Field\nWindow = reggen.window.Window\nRegBlock = reggen.reg_block.RegBlock\nIpBlock = reggen.ip_block.IpBlock\n\n_LR_RETVAL = None # type: Optional[Tuple[int, object]]\n\n\ndef load_registers() -> Tuple[int, object]:\n '''Load otbn.hjson with reggen\n\n Returns (width, regs) where width is the register width and regs is a\n list of Register, MultiRegister or Window objects. Memoized.\n\n '''\n global _LR_RETVAL\n if _LR_RETVAL is not None:\n return _LR_RETVAL\n\n path = os.path.join(os.path.dirname(__file__),\n '..', '..', 'data', 'otbn.hjson')\n\n try:\n obj = IpBlock.from_path(path, [])\n except ValueError as err:\n raise RuntimeError('Failed to parse {!r}: {}'.format(path, err))\n\n reg_bit_width = obj.regwidth\n assert isinstance(reg_bit_width, int) and reg_bit_width >= 0\n reg_byte_width = (reg_bit_width + 7) // 8\n\n registers = obj.reg_blocks[None]\n assert isinstance(registers, RegBlock)\n _LR_RETVAL = (reg_byte_width, registers)\n return _LR_RETVAL\n","sub_path":"hw/ip/otbn/util/shared/otbn_reggen.py","file_name":"otbn_reggen.py","file_ext":"py","file_size_in_byte":2161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"505634938","text":"__author__ = 'Paul'\n\nfrom django.db import models\nfrom django.contrib.auth.models import User\n\nclass Client(models.Model):\n address = models.CharField(max_length=70)\n phone = models.CharField(max_length=13)\n date_of_birth = models.DateField()\n\n user_fk = models.ForeignKey(User)\n\n# одиниця продукції\n# ціна, об'єм та назва -- обов'язкові атрибути, додаткові атрибути (виробник і тд) в моделі ProductAttributes\nclass Product(models.Model):\n name = models.CharField(max_length=100)\n price = models.DecimalField(max_digits=12, decimal_places=2)\n volume = models.DecimalField(max_digits=8, decimal_places=3)\n is_available = models.BooleanField()\n image_path = models.ImageField()\n\n# атрибути товару\nclass ProductAttributes(models.Model):\n attr_name = models.CharField(max_length=50)\n attr_value = models.CharField(max_length=1000)\n\n product_fk = models.ForeignKey(Product)\n\n# ящик (упаковка) товару\nclass ProductPackage(models.Model):\n total_volume = models.DecimalField(max_digits=12, decimal_places=3)\n total_width = models.DecimalField(max_digits=6, decimal_places=3)\n total_height = models.DecimalField(max_digits=6, decimal_places=3)\n total_length = models.DecimalField(max_digits=6, decimal_places=3)\n image_path = models.ImageField()\n\n consignment_fk = models.ForeignKey(Consignment)\n\n\n# партія товарів\nclass Consignment(models.Model):\n number = models.IntegerField(unique=True)\n creation_date = models.DateTimeField()\n expiration_date = models.DateTimeField()\n\n product_fk = models.ForeignKey(Product)\n\n# коментар до товару\nclass Comments(models.Model):\n text = models.TextField()\n date = models.DateTimeField()\n\n product_fk = models.ForeignKey(Product)\n client_fk = models.ForeignKey(Client)\n\n\n# таблиця для звязку користувача з його лайками (перевіряти чи він уже лайкав той чи інший продукт)\nclass RatedProducts(models.Model):\n product_fk = models.ForeignKey(Product)\n user_fk = models.ForeignKey(User)\n is_rated = models.BooleanField()\n value = models.SmallIntegerField(blank=True)\n\n\nclass Order(models.Model):\n state_choices = ('ACTIVE', 'COMPLETED', 'FROZEN')\n # дата складення замовлення\n initial_date = models.DateTimeField()\n # час в який потрібно доставити товар\n order_date = models.DateTimeField()\n # дата доставки\n delivery_date = models.DateTimeField()\n address = models.CharField(max_length=200)\n state = models.CharField(choices=state_choices)\n\n client_fk = models.ForeignKey(User)\n\nclass ProductsInOrder(models.Model):\n product_fk = models.ForeignKey(Product)\n order_fk = models.ForeignKey(Order)\n\nclass Trucks():\n brand = models.CharField(max_length=25)\n number = models.CharField(max_length=10)\n permissible_load = models.IntegerField(max_length=8)\n width = models.DecimalField(max_digits=10, decimal_places=3)\n height = models.DecimalField(max_digits=10, decimal_places=3)\n length = models.DecimalField(max_digits=10, decimal_places=3)\n\n driver_fk = models.ForeignKey(User)\n\n\n\n\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"232659716","text":"import pickle\nimport string\nimport random\nimport numpy as np\nimport bisect\nimport time\nimport multiprocessing as mp\nimport os\nimport threading\n\n\n# os.system(\"taskset -p 0xff %d\" % os.getpid())\n\n\n\n# class Test:\n# def __init__(self):\n# self.name = 'test'\n# self.id = random.random()\n#\n# dic = Test()\n# with open('test.pickle', 'wb') as f:\n# pickle.dump(dic, f, protocol=pickle.HIGHEST_PROTOCOL)\n#\n# k = open('test.pickle', 'rb')\n# d = pickle.load(k)\n\n# dic = {}\n# with open('../data/word2vec_c', 'r') as f:\n# while True:\n# line = f.readline()\n# if line == '':\n# break\n# ll = line[:-1].split(' ')\n# dic[ll[0]] = np.array(ll[1:]).astype(np.float)\n#\n# with open('../data/w2v.pickle', 'wb') as f:\n# pickle.dump(dic, f)\n\n\n\n\n\nclass My_thread (mp.Process):\n\n def __init__(self, word):\n mp.Process.__init__(self)\n self.word = word\n\n def run(self):\n print(\"Starting {}\".format(self.word))\n output(self.word)\n print(\"Exiting {} \\n \\n\".format(self.word))\n\n\ndef similar_words(word, length):\n tic2 = time.clock()\n with open('../../../data/word2vec/w2v.pickle', 'rb') as f:\n dic = pickle.load(f)\n toc2 = time.clock()\n tot2 = toc2 - tic2\n word = word\n vec = dic[word]\n\n\n\n tot1 = 0\n # tot2 = 0\n minimum = []\n\n for key in dic.keys():\n\n value = dic[key]\n\n if np.shape(value) == np.shape(vec):\n # print(np.dot(dic[key], vec))\n tic = time.clock()\n diff = np.linalg.norm(dic[key] - vec)\n # diff = np.dot(dic[key], vec)\n toc = time.clock()\n tot1 += toc - tic\n if len(minimum) < length:\n minimum.append((diff, key))\n minimum.sort()\n\n else:\n bisect.insort(minimum, (diff, key))\n\n minimum = minimum[:-1]\n\n # print(\"Step1 time for {}: {}\".format(word, tot1))\n # print(\"Step2 time for {}: {} \\n \\n\".format(word, tot2))\n return minimum\n\n\ndef output(word):\n ttic = time.clock()\n print(\"Starting word: {} \\n\".format(word))\n words = similar_words(word, 10)\n print(\"Input word: {} \\n\".format(word))\n for item in words:\n print(\"Word: {} Diff: {}\".format(item[1], item[0]))\n ttoc = time.clock()\n print(\"Total time for {}: {} \\n \\n \".format(word, ttoc - ttic))\n return 0\n\n\n\n# def output1():\n# for i in range(100000000000):\n# j = i**2\n# return 1\n#\n#\n# def output2():\n# for i in range(100000000000):\n# j = i + 2\n# return 2\n\n\nif __name__ == '__main__':\n\n # out = mp.Queue()\n # q = mp.Queue()\n # p1 = mp.Process(target=output1())\n # p2 = mp.Process(target=output2())\n # words = [\"大佬\", \"美女\", \"童年\", \"学习\", \"异常\", \"刺激\", \"色情\", \"抽烟\", \"完美\", ]\n words = [\"财经\", \"金融\", \"股票\", \"证券\", \"异常\", \"刺激\", \"色情\", \"抽烟\", \"完美\", ]\n process_list = []\n for w in words[:6]:\n process_list.append(mp.Process(target=output, args=(w,)))\n\n for p in process_list:\n p.start()\n for p in process_list:\n p.join()\n\n print(\"Exiting main thread \\n\")\n # print(q.get())\n\n\n","sub_path":"src/scripts/nlp/word2vec/w2v_test.py","file_name":"w2v_test.py","file_ext":"py","file_size_in_byte":3203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"491206615","text":"# coding: utf-8\nfrom __future__ import unicode_literals\n\nfrom enum import Enum\nfrom yargy.labels import (\n gram,\n gram_not,\n dictionary,\n is_capitalized,\n gnc_match,\n eq,\n)\n\n\nFEDERAL_DISTRICT_DICTIONARY = {\n 'центральный',\n 'северо-западный',\n 'южный',\n 'северо-кавказский',\n 'приволжский',\n 'уральский',\n 'сибирский',\n 'дальневосточный',\n}\n\nREGION_TYPE_DICTIONARY = {\n 'край',\n 'район',\n 'область',\n 'губерния',\n 'уезд',\n}\n\nCOMPLEX_OBJECT_PREFIX_DICTIONARY = {\n 'северный',\n 'северо-западный',\n 'северо-восточный',\n 'южный',\n 'юго-западный',\n 'юго-восточный',\n 'западный',\n 'восточный',\n 'верхний',\n 'вышний',\n 'нижний',\n 'великий',\n 'дальний',\n}\n\nPARTIAL_OBJECT_PREFIX_DICTIONARY = {\n 'север',\n 'северо-восток',\n 'северо-запад',\n 'юг',\n 'юго-восток',\n 'юго-запад',\n 'запад',\n 'восток',\n}\n\nclass Geo(Enum):\n\n FederalDistrict = [\n {\n 'labels': [\n gram('ADJF'),\n dictionary(FEDERAL_DISTRICT_DICTIONARY),\n ],\n },\n {\n 'labels': [\n dictionary({'федеральный', }),\n ],\n },\n {\n 'labels': [\n dictionary({'округ', }),\n ],\n },\n ]\n\n FederalDistrictAbbr = [\n {\n 'labels': [\n gram('ADJF'),\n dictionary(FEDERAL_DISTRICT_DICTIONARY),\n ],\n },\n {\n 'labels': [\n eq('ФО'),\n ],\n },\n ]\n\n Region = [\n {\n 'labels': [\n gram('ADJF'),\n ],\n },\n {\n 'labels': [\n dictionary(REGION_TYPE_DICTIONARY),\n gnc_match(-1, solve_disambiguation=True),\n ],\n },\n ]\n\n ComplexObject = [\n {\n 'labels': [\n gram('ADJF'),\n dictionary(COMPLEX_OBJECT_PREFIX_DICTIONARY),\n ],\n },\n {\n 'labels': [\n gram('NOUN'),\n gram('Geox'),\n gnc_match(-1, solve_disambiguation=True),\n ],\n },\n ]\n\n PartialObject = [\n {\n 'labels': [\n gram('NOUN'),\n dictionary(PARTIAL_OBJECT_PREFIX_DICTIONARY),\n ],\n },\n {\n 'labels': [\n gram('NOUN'),\n gram('Geox'),\n gnc_match(-1, solve_disambiguation=True),\n ],\n },\n ]\n\n Object = [\n {\n 'labels': [\n is_capitalized(True),\n gram('Geox'),\n gram_not('Abbr'),\n ],\n },\n ]\n","sub_path":"natasha/grammars/geo.py","file_name":"geo.py","file_ext":"py","file_size_in_byte":3075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"601383706","text":"\"\"\"\r\npytorch 如何共享参数\r\nhttps://www.cnblogs.com/sdu20112013/p/12134330.html\r\n\"\"\"\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.init as init\r\n\r\n\r\ndef seq_share_weights():\r\n linear = nn.Linear(1, 1, bias=False)\r\n # 传入 Sequential 的模块是同一个 Module 实例的话参数也是共享的\r\n net = nn.Sequential(linear, linear) # 2个 linear 在内存中对应同一个对象\r\n print(net)\r\n # id -> 139669020411160,对象 id,unique among simultaneously existing objects.\r\n print(id(net[0]) == id(net[1])) # True\r\n print(id(net[0].weight) == id(net[1].weight)) # True\r\n\r\n # y = wx, 初始化 linear 层 w=3; net = 3*3*x = 9x\r\n for name, param in net.named_parameters():\r\n init.constant_(param, val=3)\r\n print(name, param.data)\r\n\r\n x = torch.ones(1, 1) # bs=1\r\n y = net(x).sum()\r\n print(y) # 3*3*1 = 9\r\n y.backward()\r\n print(net[1].weight.grad) # 6 共享参数的 grad 是累加的,相当于更新了2次\r\n print(net[0].weight.grad) # 6\r\n\r\n\r\ndef seq_unique_weights():\r\n linear1 = nn.Linear(1, 1, bias=False)\r\n linear2 = nn.Linear(1, 1, bias=False)\r\n net = nn.Sequential(linear1, linear2)\r\n print(net)\r\n\r\n for name, param in net.named_parameters():\r\n init.constant_(param, val=3)\r\n print(name, param.data)\r\n\r\n x = torch.ones(1, 1)\r\n y = net(x).sum()\r\n print(y)\r\n y.backward()\r\n print(net[1].weight.grad) # 3; 倒数第1层,grad1 = 3x = 3\r\n print(net[0].weight.grad) # 3; 倒数第2层,grad0 = grad1 * x = 3*1 = 3\r\n\r\n\r\nif __name__ == '__main__':\r\n seq_share_weights()\r\n print()\r\n seq_unique_weights()\r\n","sub_path":"zeros/share_params.py","file_name":"share_params.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"362298762","text":"import os\nimport random\n\nimport cv2\nimport numpy as np \nimport tensorflow as tf \n\ndef _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\n\ndef _bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\ndef species_to_num(name):\n species = [0 for _ in range(15)]\n if name.split('_')[0] == '宝贝科':\n species[0] = 1\n elif name.split('_')[0] == '芋螺科':\n species[1] = 1\n elif name.split('_')[0] == '蛾螺科':\n species[2] = 1\n elif name.split('_')[0] == '榧螺科':\n species[3] = 1\n elif name.split('_')[0] == '凤螺科':\n species[4] = 1\n elif name.split('_')[0] == '蚶科':\n species[5] = 1\n elif name.split('_')[0] == '盔螺科':\n species[6] = 1\n elif name.split('_')[0] == '帘蛤科':\n species[7] = 1\n elif name.split('_')[0] == '马蹄螺科':\n species[8] = 1\n elif name.split('_')[0] == '鸟蛤科':\n species[9] = 1\n elif name.split('_')[0] == '细带螺科':\n species[10] = 1\n elif name.split('_')[0] == '玉螺科':\n species[11] = 1\n elif name.split('_')[0] == '贻贝科':\n species[12] = 1\n elif name.split('_')[0] == '砗磲科':\n species[13] = 1\n elif name.split('_')[0] == '扇贝科':\n species[14] = 1\n else:\n raise NameError('No species named %s' % str(file))\n species = bytes(str(species), encoding='utf-8')\n return species\n\ndef create_tfrecords():\n if os.getcwd() != '/home/fish/图片/all_in_one':\n os.chdir('/home/fish/图片/all_in_one')\n #dir = '/home/fish/图片/images_tfrecords'\n dir = '/home/fish/图片/three_tfrecords'\n if not os.path.exists(dir):\n os.mkdir(dir)\n #filename = os.path.join(dir, 'all_pic.tfrecords')\n #writer = tf.python_io.TFRecordWriter(filename)\n #count = 0\n dev_count = 0\n test_count = 0\n train_count = 0\n species = np.zeros((15))\n \"\"\"\n for root, dirs, files in os.walk(os.getcwd()):\n if dirs == []:\n for file in files:\n img = cv2.imread(os.path.join(root, str(file)))\n if img.shape == None:\n print('error')\n continue\n \"\"\"\n for root, dirs, files in os.walk(os.getcwd()):\n if dirs == []:\n num = len(files)\n #dev set\n filename = os.path.join(dir, 'dev.tfrecords')\n writer = tf.python_io.TFRecordWriter(filename)\n for _ in range(int(num*0.2)):\n index = random.randint(0, num-1)\n while (index > len(files)-1):\n index = random.randint(0, num-1)\n img = cv2.imread(os.path.join(root, str(files[index])))\n image_raw = cv2.imencode('.jpg', img)[1].tostring()\n species = species_to_num(str(files[index]))\n example = tf.train.Example(features=tf.train.Features(feature={\n 'label': _bytes_feature(species),\n 'image_raw': _bytes_feature(image_raw)\n }))\n writer.write(example.SerializeToString())\n files.remove(files[index])\n dev_count += 1\n writer.close()\n #test set\n filename = os.path.join(dir, 'test.tfrecords')\n writer = tf.python_io.TFRecordWriter(filename)\n for _ in range(int(num*0.2)):\n index = random.randint(0, int(num*0.8-1))\n while (index > len(files)-1):\n index = random.randint(0, int(num*0.8-1))\n\n img = cv2.imread(os.path.join(root, str(files[index])))\n image_raw = cv2.imencode('.jpg', img)[1].tostring()\n species = species_to_num(str(files[index]))\n example = tf.train.Example(features=tf.train.Features(feature={\n 'label': _bytes_feature(species),\n 'image_raw': _bytes_feature(image_raw)\n }))\n writer.write(example.SerializeToString())\n files.remove(files[index])\n test_count += 1\n writer.close()\n #train set\n filename = os.path.join(dir, 'train.tfrecords')\n writer = tf.python_io.TFRecordWriter(filename)\n for file in files:\n \n img = cv2.imread(os.path.join(root, str(file)))\n image_raw = cv2.imencode('.jpg', img)[1].tostring()\n species = species_to_num(str(file))\n example = tf.train.Example(features=tf.train.Features(feature={\n 'label': _bytes_feature(species),\n 'image_raw': _bytes_feature(image_raw)\n }))\n writer.write(example.SerializeToString())\n train_count += 1\n writer.close() \n \"\"\"\n if img.shape != (256, 256, 3):\n raise ValueError(\"Image size %d doesn't match 256,256,3 \" % img.shape)\n species = species_to_num(str(file))\n image_raw = cv2.imencode('.jpg', img)[1].tostring()\n example = tf.train.Example(features=tf.train.Features(feature={\n 'label': _int64_feature(int(species)),\n 'image_raw': _bytes_feature(image_raw)\n }))\n writer.write(example.SerializeToString())\n print('count==', count)\n count += 1\n writer.close()\n \"\"\"\n print('dev = ', dev_count)\n print('test = ', test_count)\n print('train = ', train_count)\n\ndef main():\n create_tfrecords()\n\nif __name__ == '__main__':\n main()\n ","sub_path":"create_tfrecords.py","file_name":"create_tfrecords.py","file_ext":"py","file_size_in_byte":5730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"82216215","text":"\nclass Node:\n\t__slots__ = \"_element\", \"_next\"\n\t\n\tdef __init__(self, element, next):\n\t\tself._element = element\n\t\tself._next = next\n\t\t\nclass LinkedList:\n\t\n\tdef __init__(self):\n\t\tself.head = None\n\t\tself.tail = None \n\t\tself.size = 0\n\t\t\n\tdef __len__(self):\n\t\treturn self.size\n\t\t\n\tdef isempty(self):\n\t\treturn self.size == 0\n\t\t\n\tdef addlast(self, e):\n\t\tnewest = Node(e,None)\n\t\tif self.isempty():\n\t\t\tself.head = newest\n\t\telse :\n\t\t\tself.tail._next = newest\n\t\tself.tail = newest\n\t\tself.size +=1\n\t\t\n\tdef display(self):\n\t\tp =self.head \n\t\twhile p:\n\t\t\tprint (p._element,end = \"-->\")\n\t\t\tp= p._next\n\t\tprint ()\n\ns = LinkedList()\ns.addlast(5)\ns.addlast(6)\ns.addlast(7)\ns.display()\nprint(len(s))\ns.addlast(2)\ns.addlast(1)\ns.addlast(7)\ns.display()\nprint(len(s))\n\t\t\t\n\t\t\n\t","sub_path":"linked_list/LinkedList.py","file_name":"LinkedList.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"551595354","text":"#!/usr/bin/env python\nfrom __future__ import print_function\nimport os\nimport sys\nimport subprocess\nimport re\nfrom clusterlib.scheduler import submit\nfrom clusterlib.scheduler import queued_or_running_jobs\nfrom module_loader import module\n\ninput_root = \"/group/nealedata4/Piar_wgs/Cleaner_decontam/decontam\"\noutput_root = \"/group/nealedata4/Piar_wgs/mapped\"\nmodule_args = ['java']\nsuffix = \".fq.gz\"\njob_prefix = \"BBMap_\"\npartition = \"bigmemh\"\nmaxmem = \"300\"\nmaxcpu = \"30\"\nreference = \"/group/nealedata4/Psme_reseq/genome/Psme.scf.uniq.fa\"\njavaxmx = str(int(maxmem) - 2)\njavathreads = str(int(maxcpu) - 2)\nemail = \"cacampbell@ucdavis.edu\"\ndry_run = False\nverbose = False\n\n\ndef dispatch_to_slurm(commands):\n scripts = {}\n\n for job_name, command in commands.iteritems():\n script = submit(command, job_name=job_name, time=\"0\",\n memory=\"{}G\".format(maxmem), backend=\"slurm\",\n shell_script=\"#!/usr/bin/env bash\")\n script += \" --partition={}\".format(partition)\n script += \" --ntasks=1\"\n script += \" --cpus-per-task={}\".format(maxcpu)\n script += \" --mail-type=END,FAIL\"\n script += \" --mail-user={}\".format(email)\n scripts[job_name] = script\n\n scheduled_jobs = set(queued_or_running_jobs())\n\n for job_name, script in scripts.iteritems():\n if job_name not in scheduled_jobs:\n if verbose:\n print(\"{}\".format(script), file=sys.stdout)\n\n if not dry_run:\n subprocess.call(script, shell=True)\n else:\n print(\"{} already running, skipping\".format(job_name),\n file=sys.stderr)\n\n\ndef existing_files_check(list_of_files):\n for filename in list_of_files:\n if os.path.isfile(filename):\n return True\n\n return False\n\n\ndef output_file(filename):\n return os.path.join(output_root, os.path.relpath(filename,\n start=input_root))\n\n\ndef make_commands(filenames):\n commands = {}\n filenames = [x for x in filenames if \"_1\" in x]\n\n for filename in filenames:\n job_name = job_prefix + \"{}\".format(os.path.basename(filename))\n input_f1 = filename\n input_f2 = re.sub(\"_1\", \"_2\", filename)\n map_sam = re.sub(\"_1\", \"pe\", filename)\n map_sam = re.sub(suffix, \".sam\", map_sam)\n map_sam = output_file(map_sam)\n unmap_sam = re.sub(\".sam\", \".unmapped.sam\", map_sam)\n covstat = re.sub(\"_1\", \"covstats\", filename)\n covstat = re.sub(suffix, \".txt\", covstat)\n covstat = output_file(covstat)\n covhist = re.sub(\"_1\", \"covhist\", filename)\n covhist = re.sub(suffix, \".txt\", covhist)\n covhist = output_file(covhist)\n basecov = re.sub(\"_1\", \"basecov\", filename)\n basecov = re.sub(suffix, \".txt\", basecov)\n basecov = output_file(basecov)\n bincov = re.sub(\"_1\", \"bincov\", filename)\n bincov = re.sub(suffix, \".txt\", filename)\n bincov = output_file(bincov)\n bashscript = re.sub(\"_1\", \"sort_index\", filename)\n bashscript = re.sub(suffix, '.sh', bashscript)\n bashscript = output_file(bashscript)\n command = (\"bbmap.sh in1={i1} in2={i2} outm={om} outu={ou} ref={r} \"\n \"nodisk covstats={covstat} covhist={covhist} threads={t} \"\n \"slow k=12 -Xmx{xmx}G basecov={basecov} bincov={bincov} \"\n \"bamscript={bs}; source {bs}\").format(i1=input_f1,\n i2=input_f2,\n om=map_sam,\n ou=unmap_sam,\n r=reference,\n covstat=covstat,\n covhist=covhist,\n basecov=basecov,\n bincov=bincov,\n xmx=javaxmx,\n t=javathreads,\n bs=bashscript)\n\n if not existing_files_check([map_sam, unmap_sam, covstat, covhist,\n covhist, basecov, bincov]):\n commands[job_name] = command\n else:\n print(\"{} already ran, skipping\".format(job_name),\n file=sys.stderr)\n\n if verbose:\n print(command, file=sys.stdout)\n\n return commands\n\n\ndef make_directories():\n input_base = os.path.dirname(input_root)\n command = (\"find {} -type d | sed -n 's|{}||p' | \"\n \"parallel --gnu -j 4 mkdir -p {}/{{}}\").format(input_root,\n input_base,\n output_root)\n subprocess.call(command, shell=True)\n\n\ndef get_files(directory):\n filelist = []\n\n for root, directories, filenames in os.walk(directory):\n for filename in filenames:\n if filename.endswith(suffix):\n abs_path = os.path.join(root, filename)\n\n if verbose:\n print(abs_path, file=sys.stdout)\n\n filelist += [abs_path]\n\n return filelist\n\n\ndef load_modules(module_args):\n try:\n args = ['load']\n args.extend(module_args)\n module(args)\n except Exception as e:\n print(\"Could not load module: {}\".format(e), file=sys.stderr)\n\n\ndef main():\n if verbose:\n print(\"Loading Modules...\", file=sys.stdout)\n\n load_modules(module_args)\n\n if verbose:\n print(\"Gathering Files...\", file=sys.stdout)\n\n filenames = get_files(input_root)\n\n if verbose:\n print(\"Making output directories...\")\n\n make_directories()\n\n if verbose:\n print(\"Making Commands...\", file=sys.stdout)\n\n commands = make_commands(filenames)\n\n if verbose:\n print(\"Dispatching to Slurm...\", file=sys.stdout)\n\n dispatch_to_slurm(commands)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"old/Piar/Piar_map.py","file_name":"Piar_map.py","file_ext":"py","file_size_in_byte":6207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"429191017","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nN = 5\nind = np.arange(N) # the x locations for the groups\nwidth = 0.2 # the width of the bars\n\nfig = plt.figure()\nfig.subplots_adjust(bottom=0.25)\nax = fig.add_subplot(111)\n\nyvals = [42, 27, 26, 26, 24]\nrects1 = ax.bar(ind, yvals, width, color='r', label='invalid')\nzvals = [34, 47, 47, 47, 49]\nrects2 = ax.bar(ind + width, zvals, width, color='b', label='direct')\nkvals = [13, 15, 16, 16, 16]\nrects3 = ax.bar(ind + width * 2, kvals, width, color='y',\n label='outlier')\n\nax.set_ylabel('Num of Unique Addresses')\nax.set_xlabel('Minimum threshold (km)')\nax.set_xticks(ind + width)\nax.set_xticklabels(('1', '3', '5', '7', '9'))\nax.legend((rects1[0], rects2[0], rects3[0]),\n ('Multi GC addresses with invalid geocodes',\n 'Multi GC addresses with no outliers',\n 'Multi GC addresses with outliers'),\n loc='upper center', bbox_to_anchor=(0.5, -0.15))\n\n\ndef autolabel(rects):\n for rect in rects:\n h = rect.get_height()\n ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * h, '%d' % int(h),\n ha='center', va='bottom')\n\n\nautolabel(rects1)\nautolabel(rects2)\nautolabel(rects3)\n\nplt.savefig('out/Min_Threshold.png')\n","sub_path":"Programs/Python/new_gc/result_visualize/multigc.py","file_name":"multigc.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"603155225","text":"from model.information import Information\nfrom model.user import User\nfrom model.time_record import TimeRecord\nfrom logging import getLogger\nimport json\nimport urllib.parse\nimport urllib.request\n\nlogger = getLogger(__name__)\n\nADD_RECORD_STATUS_OK = '1',\nADD_RECORD_STATUS_USER_NOT_FOUND = '5'\n\n\nclass ApiClient(object):\n def __init__(self, options):\n self._options = options\n\n def get_information(self):\n try:\n headers = {\n 'X-API-KEY': self._options.api_key\n }\n\n req = urllib.request.Request(\n self._options.get_information_url, headers=headers)\n\n with urllib.request.urlopen(req) as res:\n body = res.read()\n result = ApiResult(res.code, Information.from_json(body))\n\n return result\n\n except Exception as ex:\n logger.error(ex)\n raise ex\n\n def get_users(self):\n try:\n headers = {\n 'X-API-KEY': self._options.api_key\n }\n\n req = urllib.request.Request(\n self._options.get_users_url, headers=headers)\n\n with urllib.request.urlopen(req) as res:\n body = res.read()\n result = ApiResult(res.code, User.from_json(body))\n\n return result\n\n except Exception as ex:\n logger.error(ex)\n raise ex\n\n def add_time_record(self, time_record):\n try:\n body = time_record.to_json()\n headers = {\n 'Content-Type': 'application/json',\n 'X-API-KEY': self._options.api_key\n }\n\n method = 'POST'\n\n req = urllib.request.Request(\n self._options.add_time_record_url, data=body, method=method, headers=headers)\n\n with urllib.request.urlopen(req) as res:\n body = res.read()\n return AddUserRecordResult.from_json(body)\n\n except Exception as ex:\n logger.error(ex)\n raise ex\n\n\nclass ApiOptions(object):\n\n def __init__(self):\n self.api_key = None\n self.get_information_url = None\n self.get_users_url = None\n self.add_time_record_url = None\n\n\nclass ApiResult(object):\n\n def __init__(self, code, value=None):\n self.code = code\n self.value = value\n\n def is_ok(self):\n return self.code == 200\n\n\nclass AddUserRecordResult(object):\n\n def __init__(self, status):\n self.status = status\n\n def is_ok(self):\n return self.status == ADD_RECORD_STATUS_OK\n\n def is_not_found(self):\n return self.status == ADD_RECORD_STATUS_USER_NOT_FOUND\n\n @staticmethod\n def from_json(data):\n json_data = json.loads(data)\n\n return AddUserRecordResult(json_data['status'])\n","sub_path":"api/api_client.py","file_name":"api_client.py","file_ext":"py","file_size_in_byte":2817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"652488640","text":"############################################## IMPORTS ##############################################\n\n# GENERAL\nfrom flask.views import MethodView\nfrom flask_jwt_extended import jwt_required, fresh_jwt_required\nfrom flask_smorest import Blueprint\nimport marshmallow as ma\n\n# MODELS\nfrom app.main.model.group_model import GroupSchema\nfrom app.main.model.utils import GeneralArgumentSchemas\n\n# SERVICES\nfrom app.main.service.group_service import GroupService\n\n# UTILS\nfrom app.main.controller.utils import ErrorDocs, AccessHandler, JWTHandler\n\n############################################## ROUTING ##############################################\n\ngrp_bp = Blueprint('group', 'group', url_prefix='/group', description='Group related operations')\n\ngrp_schema = GroupSchema()\ngrps_schema = GroupSchema(many=True)\ngrp_update_schema = GroupSchema(partial=True)\n\n\n@grp_bp.route('/all')\nclass GroupsAdmin(MethodView):\n \"\"\"\n Methods:\n - GET: get all groups\n \"\"\"\n\n ################################ GET GROUPS ################################\n @fresh_jwt_required\n @AccessHandler.required_access_level('Admin')\n @grp_bp.doc(responses=ErrorDocs.get_error_docs(error_codes=[400, 401]))\n @grp_bp.response(grps_schema, code=200, description='Successfully retrieved groups.')\n def get(self):\n \"\"\"List all groups.\"\"\"\n return GroupService.read_all()\n\n\n@grp_bp.route('/by-user')\nclass GroupsUser(MethodView):\n \"\"\"\n Methods:\n - GET: get all groups of a user\n \"\"\"\n\n ################################ GET GROUPS ################################\n @fresh_jwt_required\n @AccessHandler.required_access_level('User')\n @grp_bp.doc(responses=ErrorDocs.get_error_docs(error_codes=[400, 401]))\n @grp_bp.arguments(GeneralArgumentSchemas.UserIdSchema(), location='query')\n @grp_bp.response(grps_schema, code=200, description='Successfully retrieved groups.')\n def get(self, data):\n \"\"\"List all groups of a user.\"\"\"\n return GroupService.read_all_by_user(data=data)\n\n\n@grp_bp.route('/')\nclass Group(MethodView):\n \"\"\"\n Methods:\n - POST: create a group for a user\n \"\"\"\n\n ################################ POST GROUP ################################\n @fresh_jwt_required\n @AccessHandler.required_access_level('User')\n @grp_bp.doc(responses=ErrorDocs.get_error_docs(error_codes=[400, 401, 409, 422]))\n @grp_bp.arguments(grp_schema, location='json')\n @grp_bp.response(grp_schema, code=201, description='Successfully created group.')\n def post(self, data):\n \"\"\"Create a new group for a user.\"\"\"\n return GroupService.create(data=data)\n\n\n@grp_bp.route('/')\nclass GroupById(MethodView):\n \"\"\"\n Methods:\n - GET: get an group of a user\n - PUT: update an group of a user\n - DELETE: delete an group of a user\n \"\"\"\n\n ################################ GET GROUP ################################\n @fresh_jwt_required\n @AccessHandler.required_access_level('User')\n @grp_bp.doc(responses=ErrorDocs.get_error_docs(error_codes=[400, 401, 404]))\n @grp_bp.response(grp_schema, code=200, description='Successfully retrieved group.')\n def get(self, id):\n \"\"\"Get a group of a user given its id.\"\"\"\n return GroupService.read(id=id)\n\n ################################ PUT GROUP ################################\n @fresh_jwt_required\n @AccessHandler.required_access_level('User')\n @grp_bp.doc(responses=ErrorDocs.get_error_docs(error_codes=[400, 401, 404, 422]))\n @grp_bp.arguments(grp_update_schema, location='json')\n @grp_bp.response(grp_update_schema, code=200, description='Successfully updated group.')\n def put(self, data, id):\n \"\"\"Update a group of a user given its id.\"\"\"\n return GroupService.update(data=data, id=id)\n\n ################################ DELETE GROUP ################################\n @fresh_jwt_required\n @AccessHandler.required_access_level('User')\n @grp_bp.doc(responses=ErrorDocs.get_error_docs(error_codes=[400, 401, 404]))\n @grp_bp.response(grp_schema, code=200, description='Successfully deleted group.')\n def delete(self, id):\n \"\"\"Delete a group of a user given its id.\"\"\"\n return GroupService.delete(id=id)\n","sub_path":"app/main/controller/group_controller.py","file_name":"group_controller.py","file_ext":"py","file_size_in_byte":4254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"94072370","text":"# -*- coding: utf-8 -*-\n'''\n第1回\nLEDの点滅を3回繰り返すプログラムを作ってください。\nLEDが3つのバージョンを作ってください。\n'''\n\nimport RPi.GPIO as GPIO\nimport time\n\nPINS=[10, 11, 12]\n\n#毎回するおまじない\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(PINS,GPIO.OUT)\n\nfor x in range(3):\n GPIO.output(PINS,GPIO.HIGH) # ピン10, 11, 12に電流を流す(HIGH)\n time.sleep(2)\n GPIO.output(PINS,GPIO.LOW) # ピン10, 11, 12に流れる電流を0にする(LOW)\n time.sleep(2)\nGPIO.cleanup()\n","sub_path":"RaspberryPi/work1.py","file_name":"work1.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"574205414","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreation date: 06.04.2018\r\n\r\nKatarzyna Filipiuk\r\nkatarzyna.filipiuk@student.uw.edu.pl\r\nUrszula Romaniuk\r\nurszula.romaniuk@student.uw.edu.pl\r\nIzabela Szopa\r\nim.szopa@student.uw.edu.pl\r\n\r\nVersion: 3.0\r\nDate: 19.04.2018\r\n\"\"\"\r\nimport numba\r\nfrom timeit import timeit\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.signal import buttord, butter, filtfilt, hilbert\r\nfrom scipy.io import loadmat\r\n\r\nname = \"AL2442-STA-wires.mat\"\r\nname_of_matlab_matrix = \"rfOut_1\"\r\naperture = 64\r\nf0 = 5.5e6\r\nfs = 50e6\r\npitch = 0.21e-3\r\nc = 1490\r\n\r\ntemp = loadmat(name)\r\ndata = np.array(temp[name_of_matlab_matrix])\r\nno_transducers = data.shape[0]\r\nno_samples = data.shape[1]\r\nno_events = data.shape[2]\r\ndepth = no_samples * c / (2 * fs)\r\ndx = depth / no_samples\r\n\r\n\r\n#@numba.jit\r\ndef _highpass(matrix, wp=6e4, ws=1e4, gpass=3, gstop=20):\r\n \"\"\"\r\n Filtering out low frequencies of signal\r\n :param wp: Passband edge frequency [Hz]\r\n :param ws: Stopband edge frequency [Hz]\r\n :param gpass: The maximum loss in the passband [dB]\r\n :param gstop: The minimum attenuation in the stopband [dB]\r\n :return: filtered matrix\r\n \"\"\"\r\n\r\n n, wn = buttord(wp * 2. / fs, ws * 2. / fs, gpass, gstop)\r\n b, a = butter(n, wn, btype='high')\r\n return filtfilt(b, a, matrix, axis=1)\r\n\r\n\r\n#@numba.jit\r\ndef _hilbert_transform(matrix):\r\n \"\"\"\r\n Calculating an envelope of a RF signal\r\n :param matrix: data which hilbert transform should be calculated\r\n :return: hilbert transform of a given matrix\r\n \"\"\"\r\n\r\n return np.abs(hilbert(matrix, 2 * no_samples,\r\n axis=1))[:, :no_samples]\r\n\r\n\r\n#@numba.jit\r\ndef _delay(column_distance, h):\r\n \"\"\"\r\n Calculate delay for one transducer in aperture\r\n :param column_distance: Distance between middle column of an aperture \r\n and receiver column\r\n :param h: Depth of a receiver focal point\r\n :return: List of delays in samples for every transducer in an aperture\r\n \"\"\"\r\n\r\n delay = (np.sqrt(h ** 2 + (column_distance * pitch) ** 2) - h) / \\\r\n c * fs\r\n delay = int(np.round((-1) * delay, 0))\r\n return delay\r\n\r\n\r\n#@numba.jit\r\ndef _generate_delays_profile(r=16):\r\n \"\"\"\r\n Calculate delays profile for all transducers in aperture\r\n :param r: Distance in samples between emission of a wave form a first\r\n transducer of an aperture and emission of a wave form a \r\n middle transducer\r\n :return: List of delays in samples for every transducer in an aperture\r\n \"\"\"\r\n\r\n t_max = r / fs\r\n focal_point = ((aperture * pitch / 2.) ** 2 - t_max ** 2 * \\\r\n c ** 2) / (2 * c * t_max)\r\n\r\n delay_profile = np.zeros((aperture), dtype=int)\r\n for i in range(aperture):\r\n column_distance = np.abs(aperture / 2. - i)\r\n delay_profile[i] = _delay(column_distance,\r\n focal_point)\r\n return delay_profile\r\n\r\n\r\n#@numba.jit\r\ndef _bfr(matrix):\r\n \"\"\"\r\n Calculate reconstruction of a USG data\r\n :param matrix: filtered USG data\r\n :return: reconstruction of a USG data\r\n \"\"\"\r\n\r\n delay_profile = _generate_delays_profile()\r\n\r\n reconstruction = np.zeros((no_events - aperture,\r\n no_samples))\r\n half_aperture = aperture // 2\r\n\r\n for i in range(half_aperture, no_events - half_aperture):\r\n temp = matrix[i - half_aperture: i + half_aperture, :, i]\r\n for j in range(aperture):\r\n temp[j, :] = np.roll(temp[j, :], delay_profile[j])\r\n reconstruction[i - half_aperture, :] = np.sum(temp, axis=0)\r\n reconstruction_envelope = _hilbert_transform(reconstruction)\r\n return reconstruction_envelope\r\n\r\n\r\n#@numba.jit\r\ndef _db_conversion(matrix):\r\n \"\"\"\r\n Converting a matrix to log scale\r\n :param matrix: data which should be converted to log scale\r\n :return: a given matrix in log scale\r\n \"\"\"\r\n\r\n norm = np.max(matrix)\r\n return 20 * np.log10(matrix / norm)\r\n\r\n\r\n#@numba.jit\r\ndef _plot_reconstruction(matrix, from_sample=300, to_sample=-300,\r\n cutoff=-50, from_transducer=0, to_transducer=0):\r\n \"\"\"\r\n Plot reconstruction of a USG signal\r\n :param matrix:\r\n :param from_sample:\r\n :param cutoff:\r\n :return:\r\n \"\"\"\r\n\r\n plt.figure()\r\n plt.imshow(matrix[:, from_sample:to_sample].T, cmap=\"Greys_r\",\r\n interpolation='bilinear', vmin=cutoff, vmax=0,\r\n extent=[(0 + from_transducer) * pitch * 100,\r\n pitch * (no_transducers + to_transducer) \\\r\n * 100, (no_samples + to_sample) * dx * \\\r\n 100, from_sample * dx * 100])\r\n plt.title(\"Reconstruction\", fontsize=24)\r\n plt.xlabel(\"Horizon [cm]\", fontsize=20)\r\n plt.ylabel(\"Depth [cm]\", fontsize=20)\r\n plt.colorbar()\r\n plt.subplots_adjust(left=0.0, right=0.86)\r\n\r\n\r\ndata_centered_around_0 = _highpass(data)\r\nbfr = _bfr(data_centered_around_0)\r\nbfr = _db_conversion(bfr)\r\n_plot_reconstruction(bfr, cutoff=-40,\r\n from_transducer=aperture // 2,\r\n to_transducer=-(aperture // 2))\r\nplt.show()\r\n","sub_path":"BFR.py","file_name":"BFR.py","file_ext":"py","file_size_in_byte":5174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"462627590","text":"import json\nimport uuid\nimport time\nimport os.path\nfrom copy import deepcopy\n\nfrom config import *\nconfig = Config(os.path.join(\"..\",\"www\",\"config.json\"))\n\nlpri = len(config[\"priorities\"])-1\n\ndef _calcpriority(priority, time):\n\tfor i in config[\"priority_thresh\"]:\n\t\tif time >= i:\n\t\t\tpriority -= 1\n\treturn max(priority, 0)\n\ndef _concatlist(lists):\n\tmasterlist = []\n\tfor i in lists: \n\t\tfor j in i:\n\t\t\tmasterlist.append(j)\n\treturn masterlist\n\ndef _fillblanks(odict, adict):\n\treturn dict(adict, **odict)\n\nclass Queue:\n\trequiredtags = {\n\t\t\"priority\":0,\n\t\t\"name\":\"DEFAULT\",\n\t\t\"material\":\"o\", \n\t\t\"esttime\": 0, \n\t\t\"coachmodified\": False, \n\t\t\"uuid\": \"this object is so old that it should be deleted\", \n\t\t\"sid\": \"this object is so old that it should be deleted\", \n\t\t\"time\": 2**30,\n\t\t\"totaldiff\": 0\n\t}\n\tdef __init__(self):\n\t\tself.queue = [[] for i in config[\"priorities\"]]\n\n\t@classmethod\n\tdef load(cls, fileobj):\n\t\tjdata = json.load(fileobj)\n\t\tself = cls()\n\t\tif type(jdata) is not list:\n\t\t\treturn self\n\t\tif len(jdata) != len(config[\"priorities\"]):\n\t\t\tif len(jdata) > len(config[\"priorities\"]):\n\t\t\t\tself.queue = jdata[:len(config[\"priorities\"])]\n\t\t\telif len(jdata) < len(config[\"priorities\"]):\n\t\t\t\tself.queue = jdata + [[] for i in range(len(config[\"priorities\"])-len(jdata))]\n\t\telse:\n\t\t\tself.queue = jdata\n\t\tfor ii in range(len(self.queue)):\n\t\t\ti = self.queue[ii]\n\t\t\tfor item in i:\n\t\t\t\titem[\"priority\"] = ii\n\t\t\t\titem = _fillblanks(item, Queue.requiredtags)\n\t\treturn self\n\n\tdef metapriority(self):\n\t\tfor i in self.queue:\n\t\t\tfor item in i:\n\t\t\t\tif time.time()-item[\"time\"] > (config[\"metabump\"] + config[\"metabumpmult\"]*item[\"priority\"]) and config[\"metabump\"]:\n\t\t\t\t\tpri = item[\"priority\"]-1\n\t\t\t\t\tif pri < 0:\n\t\t\t\t\t\titem[\"time\"] = time.time()\n\t\t\t\t\t\tcontinue\n\t\t\t\t\ti.remove(item)\n\t\t\t\t\titem[\"time\"] += config[\"metabump\"] + config[\"metabumpmult\"]*item[\"priority\"]\n\t\t\t\t\titem[\"priority\"] = pri\n\t\t\t\t\tself.queue[pri].append(item)\n\n\n\tdef append(self, **kwargs):\n\t\targs, authstate, sid = kwargs[\"args\"], kwargs[\"authstate\"], kwargs[\"sid\"]\n\t\tname, priority, esttime, material = args[0], args[1], args[2], args[3]\n\t\tif not name or material == \"N/A\" or priority == -1:\n\t\t\treturn\n\t\tbounds = config[\"length_bounds\"]\n\t\tif bounds[0] >= 0:\n\t\t\testtime = max(bounds[0], esttime)\n\t\tif bounds[1] >= 0:\n\t\t\testtime = min(bounds[1], esttime)\n\n\t\tif not config[\"priority_selection\"] and not authstate:\n\t\t\tpriority = min(lpri-config[\"default_priority\"], priority)\n\n\t\tif config[\"recalc_priority\"]:\n\t\t\tpriority = _calcpriority(priority, esttime)\n\n\t\tinqueue = False\n\t\tfor i in self.queue:\n\t\t\tfor j in i: \n\t\t\t\tif name.lower() == j[\"name\"].lower() and (material == j[\"material\"] or not config[\"allow_multiple_materials\"]):\n\t\t\t\t\tinqueue = True\n\t\t\t\t\tbreak\n\n\t\tif config[\"recapitalize\"]:\n\t\t\tname = name.title()\n\n\t\tif not inqueue or config[\"allow_multiples\"]:\n\t\t\tself.queue[lpri-priority].append({\n\t\t\t\t\"totaldiff\": 0,\n\t\t\t\t\"priority\": lpri-priority,\n\t\t\t\t\"name\": name.strip().rstrip(),\n\t\t\t\t\"material\": material,\n\t\t\t\t\"esttime\": esttime,\n\t\t\t\t\"coachmodified\": authstate,\n\t\t\t\t\"uuid\": str(uuid.uuid1()),\n\t\t\t\t\"sid\": sid,\n\t\t\t\t\"time\": time.time()\n\t\t\t})\n\n\tdef remove(self, **kwargs):\n\t\targs = kwargs[\"args\"]\n\t\tu = args[0]\n\t\tfor i in self.queue:\n\t\t\tfor j in i:\n\t\t\t\tif j[\"uuid\"] == u:\n\t\t\t\t\ti.remove(j)\n\tdef passoff(self, **kwargs):\n\t\targs, authstate = kwargs[\"args\"], kwargs[\"authstate\"]\n\t\tu = args[0]\n\t\toindex = -1\n\t\tmasterqueue = _concatlist(self.queue)\n\t\tfor i in self.queue:\n\t\t\tfor j in i:\n\t\t\t\tif j[\"uuid\"] == u:\n\t\t\t\t\toindex = masterqueue.index(j)\n\t\tif oindex == -1: return\n\t\tif oindex == len(masterqueue)-1: return\n\t\ttarget = masterqueue[oindex]\n\t\tfor ii in range(len(self.queue)):\n\t\t\ti = self.queue[ii]\n\t\t\tif target in i:\n\t\t\t\ti.remove(target)\n\t\tend = masterqueue[oindex+1]\n\t\tfor ii in range(len(self.queue)):\n\t\t\ti = self.queue[ii]\n\t\t\tif end in i:\n\t\t\t\ttindex = i.index(end)\n\t\t\t\ttpri = lpri-ii\n\t\ttarget[\"time\"] = time.time()\n\t\ttarget[\"priority\"] = lpri-tpri\n\t\tif authstate: target[\"coachmodified\"] = True\n\t\tself.queue[lpri-tpri].insert(tindex+1, target)\n\n\tdef relmove(self, **kwargs):\n\t\targs, authstate = kwargs[\"args\"], kwargs[\"authstate\"]\n\t\tu, nindex = args[0], args[1]\n\t\ttarget = None\n\t\tmasterqueue = _concatlist(self.queue)\n\t\tif len(masterqueue) <= 1: return\n\t\tfor i in self.queue:\n\t\t\tfor j in i:\n\t\t\t\tif j[\"uuid\"] == u:\n\t\t\t\t\ttarget = deepcopy(j)\n\t\t\t\t\ti.remove(j)\n\t\tif not target: return\n\n\t\tmasterqueue = _concatlist(self.queue)\n\n\t\tif nindex <= 0:\n\t\t\tbpri = masterqueue[0][\"priority\"]\n\t\t\tbind = 0\n\t\telif nindex >= len(masterqueue):\n\t\t\tbpri = masterqueue[-1][\"priority\"]\n\t\t\tbind = len(self.queue[bpri])\n\t\telse:\n\t\t\tbtarget = masterqueue[nindex-1]\n\t\t\tbpri = btarget[\"priority\"]\n\t\t\tbind = self.queue[bpri].index(btarget)+1\n\n\t\ttarget[\"time\"] = time.time()\n\t\ttarget[\"priority\"] = bpri\n\t\tif authstate: target[\"coachmodified\"] = True\n\t\tself.queue[bpri].insert(bind, target)\n\n\n\tdef move(self, **kwargs):\n\t\targs, authstate = kwargs[\"args\"], kwargs[\"authstate\"]\n\t\tu, ni, np = args[0], args[1], args[2]\n\t\ttarget = None\n\t\tfor i in self.queue:\n\t\t\tfor j in i:\n\t\t\t\tif j[\"uuid\"] == u:\n\t\t\t\t\ttarget = deepcopy(j)\n\t\t\t\t\ti.remove(j)\n\t\tif not target: return\n\t\ttarget[\"time\"] = time.time()\n\t\tif authstate: target[\"coachmodified\"] = True\n\t\ttarget[\"priority\"] = lpri-np\n\t\tself.queue[lpri-np].insert(ni, target)\n\n\tdef increment(self, **kwargs):\n\t\targs, authstate = kwargs[\"args\"], kwargs[\"authstate\"]\n\t\tu = args[0]\n\t\tindex = -1\n\t\tpriority = -1\n\t\tfor i in self.queue:\n\t\t\tfor j in i:\n\t\t\t\tif j[\"uuid\"] == u:\n\t\t\t\t\tindex = i.index(j)\n\t\t\t\t\tpriority = lpri-self.queue.index(i)\n\t\tif index == -1 and priority == -1: return\n\t\tif priority == lpri and not index:\n\t\t\treturn\n\t\titem = self.queue[lpri-priority].pop(index)\n\t\tindex -= 1\n\t\tif index < 0:\n\t\t\tpriority += 1\n\t\t\tif priority > lpri:\n\t\t\t\tindex = 0\n\t\t\t\tpriority = lpri\n\t\t\telse:\n\t\t\t\tindex = len(self.queue[max(lpri-priority, 0)])\n\t\titem[\"time\"] = time.time()\n\t\tif authstate: item[\"coachmodified\"] = True\n\t\titem[\"priority\"] = lpri-priority\n\t\tself.queue[max(lpri-priority, 0)].insert(min(index, len(self.queue[max(lpri-priority, 0)])),item)\n\n\tdef decrement(self, **kwargs):\n\t\targs, authstate = kwargs[\"args\"], kwargs[\"authstate\"]\n\t\tu = args[0]\n\t\tindex = -1\n\t\tpriority = -1\n\t\tfor i in self.queue:\n\t\t\tfor j in i:\n\t\t\t\tif j[\"uuid\"] == u:\n\t\t\t\t\tindex = i.index(j)\n\t\t\t\t\tpriority = lpri-self.queue.index(i)\n\t\tif index == -1 and priority == -1: return\n\t\tif not priority and len(self.queue[lpri-priority]) < index:\n\t\t\treturn\n\t\titem = self.queue[lpri-priority].pop(index)\n\t\tindex += 1\n\t\tif len(self.queue[lpri-priority]) < index:\n\t\t\tpriority -= 1\n\t\t\tif priority < 0:\n\t\t\t\tindex = len(self.queue[min(lpri-priority, lpri)])\n\t\t\t\tpriority = 0\n\t\t\telse:\n\t\t\t\tindex = 0\n\t\titem[\"time\"] = time.time()\n\t\tif authstate: item[\"coachmodified\"] = True\n\t\titem[\"priority\"] = lpri-priority\n\t\tself.queue[min(lpri-priority, lpri)].insert(max(index, 0),item)\n\n\tdef attr(self, **kwargs):\n\t\targs, authstate = kwargs[\"args\"], kwargs[\"authstate\"]\n\t\tu, attrname, value = args[0], args[1], args[2]\n\t\tif attrname not in self.requiredtags or attrname in [\"uuid\", \"sid\", \"time\", \"totaldiff\"]:\n\t\t\treturn\n\t\tif attrname not in config[\"attr_edit_perms\"] and not authstate:\n\t\t\treturn\n\t\tindex = -1\n\t\tpriority = -1\n\t\tfor i in self.queue:\n\t\t\tfor j in i:\n\t\t\t\tif j[\"uuid\"] == u:\n\t\t\t\t\tindex = i.index(j)\n\t\t\t\t\tpriority = lpri-self.queue.index(i)\n\t\tif index == -1 and priority == -1: return\n\t\titem = self.queue[lpri-priority][index]\n\t\tif attrname not in config[\"attr_edit_perms\"] and attrname != \"coachmodified\":\n\t\t\titem[\"coachmodified\"] = True\n\n\t\tif attrname == \"name\": item[\"name\"] = str(value).strip().rstrip()\n\t\telif attrname == \"material\" and value in config[\"materials\"]: item[\"material\"] = value\n\t\telif attrname == \"esttime\":\n\t\t\tbounds = config[\"length_bounds\"]\n\t\t\tif bounds[0] >= 0:\n\t\t\t\tvalue = max(bounds[0], value)\n\t\t\tif bounds[1] >= 0:\n\t\t\t\tvalue = min(bounds[1], value)\n\t\t\tprevtime = item[\"esttime\"]\n\t\t\titem[\"esttime\"] = value\n\t\t\tif config[\"recalc_priority\"] and not authstate:\n\t\t\t\tnewpriority = priority*1\n\t\t\t\titem[\"totaldiff\"] += value-prevtime\n\t\t\t\titem[\"totaldiff\"] = max(item[\"totaldiff\"], 0)\n\t\t\t\twhile item[\"totaldiff\"] >= 10: \n\t\t\t\t\tnewpriority -= 1\n\t\t\t\t\titem[\"totaldiff\"] -= 10\n\n\t\t\t\tnewpriority = max(newpriority, 0)\n\t\t\t\titem[\"priority\"] = lpri-newpriority\n\t\t\t\tself.queue[lpri-priority].pop(index)\n\t\t\t\tself.queue[lpri-newpriority].append(item)\n\t\t\telif authstate and config[\"recalc_priority\"]:\n\t\t\t\titem[\"coachmodified\"] = True\n\t\telif attrname == \"coachmodified\": item[\"coachmodified\"] = bool(value)\n\n","sub_path":"scripts/laserqueue.py","file_name":"laserqueue.py","file_ext":"py","file_size_in_byte":8412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"506641006","text":"from __future__ import division\nfrom __future__ import print_function\nimport argparse\nimport os\nimport natsort\nimport numpy as np\nimport cv2\nfrom pathlib import Path\nimport torch\nfrom torch.utils import data\nimport random\n\nclass StaticRandomCrop(object):\n \"\"\"\n Helper function for random spatial crop\n \"\"\"\n def __init__(self, size, image_shape):\n h, w = image_shape\n self.th, self.tw = size\n self.h1 = torch.randint(0, h - self.th + 1, (1,)).item()\n self.w1 = torch.randint(0, w - self.tw + 1, (1,)).item()\n\n def __call__(self, img):\n return img[self.h1:(self.h1 + self.th), self.w1:(self.w1 + self.tw), :]\n\ndef trainval_split(data_dir):\n \"\"\"\n get train/valid filenames\n \"\"\"\n\n train_file_names = []\n val_file_names = []\n full_train=[]\n\n for idx in range(1, 9):\n # sort according to filename\n filenames = (Path(data_dir) / ('instrument_dataset_' + str(idx)) / 'images').glob('*')\n filenames = list(sorted(filenames))\n # file_tmp = []\n # for i in range(len(filenames)):\n # if i % 10 == 0:\n # file_tmp.append(filenames[i])\n # set folds[fold] as validation set\n if idx in [1]:\n val_file_names += filenames\n else:\n train_file_names += filenames\n full_train += filenames\n\n return train_file_names, val_file_names\n\nclass FrameLoader(data.Dataset):\n def __init__(self, args, filename, is_training = False, transform=None, back_propagation = False):\n\n self.is_training = is_training\n self.transform = transform\n self.chsize = 3\n\n # carry over command line arguments\n assert args.sequence_length > 1, 'sequence length must be > 1'\n self.sequence_length = args.sequence_length\n\n assert args.sample_rate > 0, 'sample rate must be > 0'\n self.sample_rate = args.sample_rate\n\n self.crop_size = args.crop_size\n self.start_index = args.start_index\n self.stride = args.stride\n\n if self.is_training:\n self.start_index = 0\n\n # collect, colors, motion vectors, and depth\n self.filename = filename\n self.back_propagation = back_propagation\n\n\n def __len__(self):\n return len(self.filename)\n\n def __getitem__(self, index):\n # adjust index\n if self.is_training:\n if not self.back_propagation:\n for i in range(self.sequence_length):\n if (index + i + 1) % 225 ==0:\n index = index - self.sequence_length\n input_files = [str(self.filename[index + offset]) for offset in range(self.sequence_length + 1)]\n else:\n for i in range(self.sequence_length):\n if (index - i) % 225 == 0:\n index = index + self.sequence_length\n input_files = [str(self.filename[index - offset]) for offset in range(self.sequence_length + 1)]\n else:\n input_files = [str(self.filename[index + offset]) for offset in range(self.sequence_length + 1)]\n\n # reverse image order with p=0.5\n if self.is_training and torch.randint(0, 2, (1,)).item():\n input_files = input_files[::-1]\n\n # images = [imageio.imread(imfile)[..., :self.chsize] for imfile in input_files]\n images = [cv2.imread(str(imfile))[..., :self.chsize] for imfile in input_files]\n input_shape = images[0].shape[:2]\n\n\n if self.is_training:\n imgs = []\n for img in images:\n im = cv2.resize(img, (640, 512))\n imgs.append(im)\n cropper = StaticRandomCrop(self.crop_size, (512, 640))\n images = map(cropper, imgs)\n\n # Pad images along height and width to fit them evenly into models.\n height, width = 512, 640\n if (height % self.stride) != 0:\n padded_height = (height // self.stride + 1) * self.stride\n images = [np.pad(im, ((0, padded_height - height), (0, 0), (0, 0)), 'reflect') for im in images]\n\n if (width % self.stride) != 0:\n padded_width = (width // self.stride + 1) * self.stride\n images = [np.pad(im, ((0, 0), (0, padded_width - width), (0, 0)), 'reflect') for im in images]\n\n else:\n height, width = input_shape\n if (height % self.stride) != 0:\n padded_height = (height // self.stride + 1) * self.stride\n images = [np.pad(im, ((0, padded_height - height), (0, 0), (0, 0)), 'reflect') for im in images]\n\n if (width % self.stride) != 0:\n padded_width = (width // self.stride + 1) * self.stride\n images = [np.pad(im, ((0, 0), (0, padded_width - width), (0, 0)), 'reflect') for im in images]\n\n\n input_images = [torch.from_numpy(im.transpose(2, 0, 1)).float() for im in images]\n\n output_dict = {\n 'image': input_images, 'ishape': input_shape, 'input_files': input_files\n }\n\n return output_dict\n\n\n\n\nif __name__ == '__main__':\n root = '../../../data/cropped_train'\n parser = argparse.ArgumentParser(description='A PyTorch Implementation of SDCNet2D')\n parser.add_argument('--sequence_length', default=2, type=int, metavar=\"SEQUENCE_LENGTH\",\n help='number of interpolated frames (default : 7)')\n parser.add_argument(\"--sample_rate\", type=int, default=1, help=\"step size in looping through datasets\")\n parser.add_argument('--crop_size', type=int, nargs='+', default=[448, 448], metavar=\"CROP_SIZE\",\n help=\"Spatial dimension to crop training samples for training (default : [448, 448])\")\n parser.add_argument(\"--start_index\", type=int, default=0, metavar=\"START_INDEX\",\n help=\"Index to start loading input data (default : 0)\")\n parser.add_argument('--stride', default=64, type=int,\n help='The factor for which padded validation image sizes should be evenly divisible. (default: 64)')\n train_filenames, valid_filenames =trainval_split(root)\n args = parser.parse_args()\n train_dataset = FrameLoader(args, filename=train_filenames, is_training=True)\n print(train_dataset[0]['image'])","sub_path":"motion_learning/datasets/frame_loader2.py","file_name":"frame_loader2.py","file_ext":"py","file_size_in_byte":6287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"406629314","text":"from simple_pid import PID\nfrom datetime import datetime\n\n\nclass shed():\n\n def __init__(self, name, settings):\n self.settings = settings\n self.request = False\n self.name = name\n self.state = \"off\"\n self.configs = settings['state_settings'] # ['on'], ['off'], ['alarm']\n self.set_temp = settings['set_temp']\n self.set_temp_high = self.set_temp + 3\n self.set_temp_low = self.set_temp - 3\n self.pid_state = \"off\" \n self.dependent = settings['dependent'] # List of dependent variables required to be alarm free for operation \n if \"PID\" in settings:\n self.p = settings[\"PID\"][\"p\"]\n self.i = settings[\"PID\"][\"i\"]\n self.d = settings[\"PID\"][\"d\"]\n self.pid = PID(self.p, self.i, self.d, self.set_temp)\n self.pid_valve_hot = settings[\"PID\"][\"valve_control_hot\"]\n self.pid_valve_cold = settings[\"PID\"][\"valve_control_cold\"]\n self.pid_control = settings[\"PID\"][\"control\"]\n self.pid_state = False\n self.pid.output_limits = (0,10)\n self.timer_start = datetime.now()\n self.timer_elapsed = datetime.now() - self.timer_start\n self.timer_state = 0\n self.timer_output = \"\"\n def change_request(self, value):\n self.request = value\n # print (self.request)\n self.update_state()\n\n\n def update_state(self):\n if self.state != \"alarm\":\n if self.request == \"true\":\n self.state = \"on\"\n if self.request == \"false\":\n self.state = \"off\" \n if self.state == \"alarm\":\n pass # possibly add in fuction to bring up pop up window to clear alarms?\n\n def state_monitor(self, active_alarm):\n count = 0\n if self.request == True or self.request == \"true\":\n # print(self.dependent)\n # print(active_alarm)\n for item in self.dependent:\n if \"Gas\" not in item:\n if item in active_alarm:\n count =+ 1 \n else:\n pass \n else:\n if item in active_alarm:\n count =+ 100\n else:\n pass\n #print(count)\n if count > 100:\n self.state = \"alarm\"\n elif count > 0:\n self.state = \"out_of_range\"\n elif count == 0:\n self.state = \"on\"\n else:\n self.state =\"ERROR!\"\n\n else:\n self.state = \"off\"\n #print(self.name, \"state: \", self.state)\n \n \n\n def new_state_output(self):\n return self.configs[self.state]\n\n def change_set_temp(self, temp_set):\n self.set_temp = float(temp_set)\n \n def change_pid(self, newset):\n self.pid_state = newset\n \n\n def pid_func(self, SHED_temp_current):\n output = {}\n # print(self.pid_state)\n if self.pid_state == True or self.pid_state == \"true\" or self.pid_state == \"True\":\n self.pid.setpoint = float(self.set_temp)\n valve_temp = self.pid(float(SHED_temp_current))\n print(valve_temp)\n output[self.pid_valve_hot] = valve_temp \n print(self.set_temp, SHED_temp_current)\n print(output)\n return output\n\n def timer(self):\n if self.timer_state == 0:\n self.timer_start = datetime.now()\n self.timer_elapsed = datetime.now() - self.timer_start\n weeks = 0\n if self.timer_elapsed.days >= 7:\n weeks = self.timer_elapsed // 7\n days = self.timer_elapsed.days - 7 * weeks\n hours = self.timer_elapsed.seconds // 3600\n minutes = self.timer_elapsed.seconds // 60 % 60\n seconds = self.timer_elapsed.seconds - minutes*60 - hours*3600 - days * 86400\n\n self.timer_output = str(weeks) + \"W \" + str(days) + \"d \" + str(hours) + \"h \" +str(minutes) + \"m \" + str(seconds)\n return self.timer_output\n \n def timer_toggle(self):\n if self.timer_state == 0:\n self.timer_state = 1\n self.timer_start = datetime.now()\n else:\n self.timer_state = 0\n\n\n\nclass alarm():\n def __init__(self, name, settings):\n self.settings = settings\n self.name = name\n self.state = settings[\"state\"]\n self.type = settings[\"limit_type\"]\n self.limit_high = settings[\"limits\"][\"high\"]\n self.limit_low = settings[\"limits\"][\"low\"]\n self.active_config = settings[\"active_config\"]\n\n def update_state(self, reading, pump_state):\n if \"Gas\" in self.name:\n if self.state == 0: \n if self.type == \"inside\":\n if float(reading) > float(self.limit_high) or float(reading) < float(self.limit_low):\n self.state = 1\n elif self.state == 1: \n pass # Alarm will not automatically reset!\n \n else:\n #print(\"pump: \", pump_state)\n if pump_state == 0:\n self.state = 2\n else:\n if self.state == 1: ## change this if disabling alarm is required\n if self.type == \"inside\":\n if float(reading) < float(self.limit_high) and float(reading) > float(self.limit_low):\n self.state = 0\n else:\n self.state = 1\n else: \n if self.type == \"inside\":\n if float(reading) < float(self.limit_high) and float(reading) > float(self.limit_low):\n self.state = 0\n else:\n self.state = 1\n \n \n\n #print(\"state: \",self.state)\n def reset(self):\n self.state = 0\n\n def alarm_output(self):\n return self.active_config\n\n def change_limit(self, limit, lim_set):\n \"\"\"\n lim name: name from javascript including \"high_\" or \"low_\" as the prefix\n lim_set: set limit value entered in web interface\n \"\"\"\n if limit == \"low\" and lim_set.isnumeric():\n self.limit_low = lim_set\n #print(\"Change alarm great success!\")\n if limit == \"high\" and lim_set.isnumeric():\n self.limit_high = lim_set\n \n","sub_path":"shed.py","file_name":"shed.py","file_ext":"py","file_size_in_byte":6445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"378887189","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@Time : 2018/3/25 下午2:34\n@Author : Nico\n\"\"\"\n# -*- coding: utf-8 -*-\n\"\"\"\n@Time : 2017/12/29 13:31\n@Author : Nico\n\"\"\"\n\nfrom os import path\n\nimport tensorflow as tf\nfrom keras import backend as K\nfrom keras import losses, optimizers, activations\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping, TensorBoard\nfrom keras.layers import Input, Lambda, Conv2D, Conv2DTranspose\nfrom keras.models import Model\n\nfrom defect_detection.base_model.defect_model import DefectModel, CoreModel\n\n\nclass VAE_FCN(CoreModel):\n def __init__(self, input_size, kernel_num_list, kernel_size,\n z_latent_dim, epsilon_std, kl_alpha,\n learning_rate, n_epoch, batch_size):\n core_name = 'kn%s_ks%d_%s_%s_z%d_kl%g_e%g' % \\\n (kernel_num_list, kernel_size, loss, active, z_latent_dim, kl_alpha, epsilon_std)\n super(VAE_FCN, self).__init__(core_name=core_name)\n self.z_latent_dim = z_latent_dim\n self.epsilon_std = epsilon_std\n self.kl_alpha = kl_alpha\n self.lr = learning_rate\n self.epoch = n_epoch\n self.batch_size = batch_size\n\n x = Input(shape=(input_size, input_size, 1,))\n\n conv1 = Conv2D(kernel_num_list[0], kernel_size=kernel_size, strides=2, padding='same', activation='relu')\n conv2 = Conv2D(kernel_num_list[1], kernel_size=kernel_size, strides=2, padding='same', activation='relu')\n conv3 = Conv2D(kernel_num_list[2], kernel_size=kernel_size, strides=2, padding='same', activation='relu')\n # conv4 = Conv2D(kernel_num[3], kernel_size=kernel_size, strides=2, padding='same', activation='relu')\n # conv5 = Conv2D(kernel_num[4], kernel_size=kernel_size, strides=2, padding='same', activation='relu')\n # conv4 = Conv2D(64, kernel_size=3, strides=2, padding='same', activation='relu')\n\n conv_mean = Conv2D(z_latent_dim, kernel_size=[5, 5], padding='valid', activation=None)\n conv_var = Conv2D(z_latent_dim, kernel_size=[5, 5], padding='valid', activation=None)\n deconv_z = Conv2DTranspose(kernel_num_list[-1], kernel_size=[5, 5], activation='relu')\n\n deconv1 = Conv2DTranspose(kernel_num_list[-2], kernel_size=kernel_size, strides=2, padding='same', activation='relu')\n deconv2 = Conv2DTranspose(kernel_num_list[-3], kernel_size=kernel_size, strides=2, padding='same', activation='relu')\n # deconv3 = Conv2DTranspose(kernel_num[-4], kernel_size=kernel_size, strides=2, padding='same', activation='relu')\n # deconv4 = Conv2DTranspose(kernel_num[-5], kernel_size=kernel_size, strides=2, padding='same', activation='relu')\n deconv5 = Conv2DTranspose(1, kernel_size=kernel_size, strides=2, padding='same', activation=active_func[active])\n\n conv_x = conv3(conv2(conv1(x)))\n self.encode_mean = conv_mean(conv_x)\n self.encode_logvar = conv_var(conv_x)\n\n z = Lambda(self.reparameterize)([self.encode_mean, self.encode_logvar])\n decoded_h = deconv_z(z)\n # decoded = deconv5(deconv3(deconv2(deconv1(decoded_h))))\n decoded = deconv5(deconv2(deconv1(decoded_h)))\n\n self.vae = Model(inputs=x, outputs=decoded)\n self.encoder = Model(inputs=x, outputs=[self.encode_mean, self.encode_logvar])\n\n z_ = Input(shape=(1, 1, z_latent_dim))\n decoded_h_ = deconv_z(z_)\n\n # decoded_ = deconv5(deconv3(deconv2(deconv1(decoded_h_))))\n decoded_ = deconv5(deconv2(deconv1(decoded_h_)))\n self.generator = Model(inputs=z_, outputs=decoded_)\n\n def reparameterize(self, args):\n mean, logvar = args\n learning_phase = K.learning_phase()\n\n if learning_phase == 1:\n epsilon = K.random_normal(\n shape=(K.shape(mean)[0], 1, 1, self.z_latent_dim),\n mean=0., stddev=self.epsilon_std\n )\n return mean + K.exp(0.5 * logvar) * epsilon\n else:\n # return mean + K.exp(0.5 * logvar)\n return mean\n\n def vae_loss(self, x, x_decoded_mean):\n # Because keras did mean(loss, axis=-1) before return loss,\n # which makes loss shape become (N, 28, 28) from (N, 28, 28, 1),\n # we need to sum all dimension error, then loss shape is (N,)\n re_loss = K.sum(loss_func[loss](x, x_decoded_mean), axis=[1, 2])\n # sum all latent dims error, then make loss shape from (N, latent_dim) to (N,)\n kl_loss = -0.5 * K.sum(1 + self.encode_logvar - K.square(self.encode_mean) - K.exp(self.encode_logvar), axis=-1)\n return K.mean(re_loss + kl_loss * self.kl_alpha)\n\n def data_transform_forward(self, data):\n return data[..., None]\n\n def _fit(self, model_dir, model_path, x, y, **kwargs):\n adam = optimizers.Adam(lr=self.lr)\n self.vae.compile(optimizer=adam, loss=self.vae_loss)\n self.vae.summary()\n\n model_tensorboard = TensorBoard(log_dir=path.join(model_dir, 'log/'))\n model_checkpoint = ModelCheckpoint(model_path, verbose=1, save_weights_only=True, save_best_only=True, period=2)\n model_earlystop = EarlyStopping(patience=5, verbose=1)\n\n self.vae.fit(\n x, x,\n shuffle=True,\n verbose=1,\n epochs=self.epoch,\n batch_size=self.batch_size,\n validation_split=0.15,\n callbacks=[model_tensorboard, model_checkpoint, model_earlystop]\n )\n\n def _predict(self, x, **kwargs):\n x = x[..., None]\n return self.vae.predict(x)\n\n def _model_save_func(self, model_path):\n pass\n\n def _model_load_func(self, model_path):\n self.vae.load_weights(model_path)\n\n\nif __name__ == '__main__':\n import os\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = '2'\n config = tf.ConfigProto()\n config.gpu_options.per_process_gpu_memory_fraction = 0.3\n K.set_session(tf.Session(config=config))\n\n pattern = 1\n\n # size = (20, 20)\n # stride = (8, 8)\n size = 40\n stride = 15\n hist = False\n scale_factor = 0.5\n\n kernel_num = [32, 32, 64]\n kernel_size = 5\n\n lr = 3e-4\n epoch = 50\n batch_size = 128\n latent_dim = 30\n epsilon_std = 0.5\n kl_alpha = 0.1\n\n eval_patch_size = 10\n threshold = 0.015\n\n loss_func = {'bc': losses.binary_crossentropy, 'mse': losses.mean_squared_error}\n loss = 'mse'\n active_func = {'sigmoid': activations.sigmoid, 'linear': activations.linear}\n active = 'sigmoid'\n\n vae_cnn_core = VAE_FCN(input_size=size, kernel_num_list=kernel_num, kernel_size=kernel_size,\n z_latent_dim=latent_dim, epsilon_std=epsilon_std, kl_alpha=kl_alpha,\n learning_rate=lr, n_epoch=epoch, batch_size=batch_size)\n defect_model = DefectModel(core_model=vae_cnn_core, pattern=pattern, patch_size=size, patch_stride=stride,\n scale_factor=scale_factor, hist_flag=hist, eval_patch_size=eval_patch_size, eval_threshold=threshold)\n\n # defect_model.train_model()\n defect_model.load_model()\n defect_model.test_model()\n","sub_path":"vae/vae_fcn_patch.py","file_name":"vae_fcn_patch.py","file_ext":"py","file_size_in_byte":7032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"556167044","text":"class HoverChecker:\n\n def __init__(self, drawable_commit, drawer, pubsub):\n self.drawable_commit = drawable_commit\n self.drawer = drawer\n\n self.is_hovered = False\n\n pubsub.sub('on_mouse_motion', self.react_on_mouse_motion)\n\n def react_on_mouse_motion(self, mouse_motion_event):\n\n if self.drawable_commit.is_out_of_screen():\n return\n\n x, y = mouse_motion_event.pos\n if self.is_inside(x, y):\n if not self.is_hovered:\n self.drawer.invalidate()\n self.is_hovered = True\n else:\n if self.is_hovered:\n self.drawer.invalidate()\n self.is_hovered = False\n\n def is_inside(self, x, y):\n if self.drawable_commit.is_head():\n return self.inside_rectangle(x, y)\n else:\n return self.inside_circle(x, y)\n\n def inside_rectangle(self, x, y):\n rect_left, rect_top = self.drawable_commit.left_top()\n rect_right, rect_bot = self.drawable_commit.right_bottom()\n return rect_left <= x <= rect_right and rect_top <= y <= rect_bot\n\n def inside_circle(self, x, y):\n radius = self.drawable_commit.width() / 2\n left, top = self.drawable_commit.left_top()\n center_x, center_y = (left + radius, top + radius)\n square_dist = (center_x - x) ** 2 + (center_y - y) ** 2\n return square_dist <= radius ** 2","sub_path":"drawer/hover_checker.py","file_name":"hover_checker.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"430967174","text":"from django.test import TestCase\nfrom django.contrib.auth import get_user_model\nfrom django.urls import reverse\n\nfrom rest_framework.test import APIClient\nfrom rest_framework import status\n\nfrom ..models import Author\nfrom ..serializers import AuthorSerializer\n\nAUTHOR_LIST_URL = reverse('shelf:author-list')\nAUTHOR_ADD_URL = reverse('shelf:author-add')\n\n\ndef detail_url(author_id):\n return reverse('shelf:author-detail', args=[author_id])\n\n\ndef sample_author(name='Alexander Turgenev', country='Russian'):\n return Author.objects.create(name=name, country=country)\n\n\nclass PublicAuthorApiTests(TestCase):\n\n def setUp(self) -> None:\n self.client = APIClient()\n\n def test_retrieve_author_list(self):\n \"\"\"Test retrieving a list of ingredients\"\"\"\n Author.objects.create(\n name='Ivan Bunin',\n country='USSR'\n )\n\n Author.objects.create(\n name='Alexander Pushkin',\n country='Russian'\n )\n\n res = self.client.get(AUTHOR_LIST_URL)\n\n authors = Author.objects.all().order_by('id')\n serializer = AuthorSerializer(authors, many=True)\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)\n\n def test_author_create_login_required(self):\n \"\"\"Test that login is required to access the endpoint\"\"\"\n res = self.client.get(AUTHOR_ADD_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)\n\n def test_author_detail_login_required(self):\n url = reverse('shelf:author-detail', args=[1])\n res = self.client.get(url)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)\n\n\nclass PrivateAuthorApiTests(TestCase):\n\n def setUp(self) -> None:\n self.client = APIClient()\n self.user = get_user_model().objects.create_user(\n 'test_user@test.com'\n 'password'\n )\n self.client.force_authenticate(user=self.user)\n\n def test_create_author_successful(self):\n payload = {\n 'name': 'Ivan Bunin',\n 'count': 'USSR'\n }\n res = self.client.post(AUTHOR_ADD_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n\n exists = Author.objects.filter(\n name=payload['name']\n ).exists()\n\n self.assertTrue(exists)\n\n def test_create_author_invalid(self):\n payload = {\n 'name': '',\n 'count': 'USSR'\n }\n res = self.client.post(AUTHOR_ADD_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_partial_update_author(self):\n author = sample_author()\n\n payload = {\n 'name': 'Ivan Bunin'\n }\n url = detail_url(author.id)\n\n self.client.put(url, payload)\n author.refresh_from_db()\n self.assertEqual(author.name, payload['name'])\n\n def test_full_update_author(self):\n author = sample_author()\n\n payload = {\n 'name': 'Ivan Bunin',\n 'country': 'USSR'\n }\n url = detail_url(author.id)\n\n self.client.put(url, payload)\n author.refresh_from_db()\n for key in payload.keys():\n self.assertEqual(payload[key], getattr(author, key))\n\n def test_remove_author(self):\n author = sample_author()\n url = detail_url(author.id)\n self.client.delete(url)\n\n exists = Author.objects.filter(\n id=author.id\n ).exists()\n\n self.assertFalse(exists)\n","sub_path":"src/shelf/tests/test_author_api.py","file_name":"test_author_api.py","file_ext":"py","file_size_in_byte":3559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"67602576","text":"import sys\nimport unittest\nimport os\n\np = os.path.join(os.getcwd(),'src')\nprint(\"@@@@@@@@@@@@@@@\")\nprint(p)\nsys.path.insert(0,p)\nimport test3\nimport test4\n##import abc\n\n##class DemoTest(unittest.TestCase):\n\n##def runA():\n## print(os.getcwd())\n## return test_a()\n\n\n\n\ndef test_a():\n try:\n assert test3.a() == True\n return 0\n except AssertionError:\n print(\"test_a screwed up!!\")\n return 1\n\ndef test_a_copy():\n try:\n assert test3.a() == True\n return 0\n except AssertionError:\n print(\"test_a screwed up!!\")\n return 1\n\ndef test_c():\n try:\n num = 6\n print(\"in test_c :\",test4.c(6))\n assert test4.c(6) == 6\n return 0\n except:\n print(\"test_c screwed up\")\n return 1\n\n\n\n","sub_path":"test/test_a1.py","file_name":"test_a1.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"509769529","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 31 10:36:41 2018\n\n@author: Rainbow\n\"\"\"\nimport numpy as np\n\n# key:name value:index\nent_dic = {}\nrel_dic = {}\ntrain_fact_list = []\ntest_fact_list = []\ndef read(filename, ent_dic, rel_dic, fact_list):\n with open(filename, 'r') as f:\n fact_name = [line.strip(\"\\n\").split(\"\\t\") for line in f.readlines()]\n f = np.array(fact_name)\n # print(facts)\n for fact in f:\n if fact[1] not in rel_dic.keys():\n rel_dic[fact[1]] = len(rel_dic)\n r = rel_dic[fact[1]]\n if fact[0] not in ent_dic.keys():\n ent_dic[fact[0]] = len(ent_dic)\n e1 = ent_dic[fact[0]]\n if fact[2] not in ent_dic.keys():\n ent_dic[fact[2]] = len(ent_dic)\n e2 = ent_dic[fact[2]]\n fact_list.append([e1, e2, r])\n\nread(\"train.txt\",ent_dic, rel_dic, train_fact_list)\nread(\"valid.txt\", ent_dic, rel_dic, train_fact_list)\nread(\"test.txt\", ent_dic, rel_dic, test_fact_list)\ntrain_fact = np.array(train_fact_list, dtype=np.int32)\ntest_fact = np.array(test_fact_list, dtype=np.int32)\n\n# Save in files.\nwith open('./train/Fact.txt', 'w') as f:\n f.write(str(len(train_fact)) + '\\n')\n for fact in train_fact:\n f.write(str(fact[0]) + ' ' + str(fact[1]) + ' ' + str(fact[2]) + '\\n')\nwith open('./test/Fact.txt', 'w') as f:\n f.write(str(len(test_fact)) + '\\n')\n for fact in test_fact:\n f.write(str(fact[0]) + ' ' + str(fact[1]) + ' ' + str(fact[2]) + '\\n')\n\n# test: 224 relations' id.\ntest_pre = np.unique(test_fact[:, 2])\nprint('test predicate num: '+str(len(test_pre)))\nwith open('./test/target_pre.txt', 'w') as f:\n f.write(str(len(test_pre)) + '\\n')\n for p in test_pre:\n f.write(str(p) + '\\n')\nprint(\"Over!\")\n\n# print(len(fact_list))\n# a = list(fact_list[:, 0])\n# b = list(fact_list[:, 1])\n# a.extend(b)\n# print(len( np.unique( a )))\n# print( len( np.unique( train )) )\n\n#Entity\n# train 14505\n# test 10348\n# valid 9809\n\n#Relation\n# train 237 \n# test 224\n# valid 223","sub_path":"linkprediction/FB15K237/rawdata/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"515686404","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math\nfrom apex import amp\nimport copy\n\nEPS = 1e-8\n\ndef _clones(module, N):\n return nn.ModuleList([copy.deepcopy(module) for i in range(N)])\n\nclass muse(nn.Module):\n def __init__(self, N, L, B, H, P, X, R, C, M):\n super(muse, self).__init__()\n self.N, self.L, self.B, self.H, self.P, self.X, self.R, self.C = N, L, B, H, P, X, R, C\n \n self.encoder = Encoder(L, N)\n self.separator = TemporalConvNet(N, B, H, P, X, R, C, M)\n self.decoder = Decoder(N, L)\n\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_normal_(p)\n\n def forward(self, mixture, visual):\n mixture_w = self.encoder(mixture)\n est_a_emb, est_mask = self.separator(mixture_w, visual)\n est_source = self.decoder(mixture_w, est_mask)\n\n # T changed after conv1d in encoder, fix it here\n T_origin = mixture.size(-1)\n T_conv = est_source.size(-1)\n est_source = F.pad(est_source, (0, T_origin - T_conv))\n return est_a_emb, est_source\n\nclass Encoder(nn.Module):\n def __init__(self, L, N):\n super(Encoder, self).__init__()\n self.L, self.N = L, N\n self.conv1d_U = nn.Conv1d(1, N, kernel_size=L, stride=L // 2, bias=False)\n\n def forward(self, mixture):\n mixture = torch.unsqueeze(mixture, 1) # [M, 1, T]\n mixture_w = F.relu(self.conv1d_U(mixture)) # [M, N, K]\n return mixture_w\n\nclass Decoder(nn.Module):\n def __init__(self, N, L):\n super(Decoder, self).__init__()\n self.N, self.L = N, L\n self.basis_signals = nn.Linear(N, L, bias=False)\n\n def forward(self, mixture_w, est_mask):\n est_source = mixture_w * est_mask # [M, N, K]\n est_source = torch.transpose(est_source, 2, 1) # [M, K, N]\n est_source = self.basis_signals(est_source) # [M, K, L]\n est_source = overlap_and_add(est_source, self.L//2) # M x C x T\n return est_source\n\n\nclass TemporalConvNet(nn.Module):\n def __init__(self, N, B, H, P, X, R, C, M):\n super(TemporalConvNet, self).__init__()\n self.C = C\n self.layer_norm = ChannelWiseLayerNorm(N)\n self.bottleneck_conv1x1 = nn.Conv1d(N, B, 1, bias=False)\n\n # Audio TCN\n tcn_blocks = []\n tcn_blocks += [nn.Conv1d(B*3, B, 1, bias=False)]\n for x in range(X):\n dilation = 2**x\n padding = (P - 1) * dilation // 2\n tcn_blocks += [TemporalBlock(B, H, P, stride=1,\n padding=padding,\n dilation=dilation)]\n self.tcn = _clones(nn.Sequential(*tcn_blocks), R)\n \n # visual blocks\n ve_blocks = []\n for x in range(5):\n ve_blocks +=[VisualConv1D()]\n self.visual_conv = nn.Sequential(*ve_blocks)\n\n # Audio and visual seprated layers before concatenation\n self.ve_conv1x1 = _clones(nn.Conv1d(512, B, 1, bias=False),R)\n self.ve_conv1x1_SE = _clones(nn.Conv1d(512, B, 1, bias=False),R)\n\n # speaker embedding extraction and classification\n self.se_net=_clones(SpeakerEmbedding(B), R)\n self.audio_linear=_clones(nn.Linear(B, M),R)\n\n # Mask generation layer\n self.mask_conv1x1 = nn.Conv1d(B, N, 1, bias=False)\n\n\n def forward(self, x, visual):\n visual = visual.transpose(1,2)\n visual = self.visual_conv(visual)\n\n x = self.layer_norm(x)\n x = self.bottleneck_conv1x1(x)\n\n mixture = x\n\n batch, B, K = x.size()\n\n est_a_emb=[]\n\n for i in range(len(self.tcn)):\n v = self.ve_conv1x1[i](visual)\n v = F.interpolate(v, (32*v.size()[-1]), mode='linear')\n v = F.pad(v,(0,K-v.size()[-1]))\n v_2 = self.ve_conv1x1_SE[i](visual)\n v_2 = F.interpolate(v_2, (32*v_2.size()[-1]), mode='linear')\n v_2 = F.pad(v_2,(0,K-v_2.size()[-1]))\n a = mixture*F.relu(x)\n a = self.se_net[i](torch.cat((a,v_2),1))\n est_a_emb.append(self.audio_linear[i](a.squeeze()))\n a = torch.repeat_interleave(a, repeats=K, dim=2)\n x = torch.cat((a, x, v),1)\n x = self.tcn[i](x)\n \n x = self.mask_conv1x1(x)\n x = F.relu(x)\n est_a_emb = torch.stack(est_a_emb)\n return est_a_emb, x\n\nclass SpeakerEmbedding(nn.Module):\n def __init__(self, B, R=3, H=256):\n super(SpeakerEmbedding, self).__init__()\n self.conv_proj = nn.Conv1d(B*2, B, 1, bias=False)\n Conv_1=nn.Conv1d(B, H, 1, bias=False)\n norm_1=nn.BatchNorm1d(H)\n prelu_1=nn.PReLU()\n Conv_2=nn.Conv1d(H, B, 1, bias=False)\n norm_2=nn.BatchNorm1d(B)\n self.resnet=_clones(nn.Sequential(Conv_1, norm_1,\\\n prelu_1, Conv_2, norm_2), R)\n self.prelu=_clones(nn.PReLU(),R)\n self.maxPool=_clones(nn.AvgPool1d(3),R)\n\n self.conv=nn.Conv1d(B,B,1)\n self.avgPool=nn.AdaptiveAvgPool1d(1)\n\n def forward(self, x):\n x = self.conv_proj(x)\n for i in range(len(self.resnet)):\n residual = x\n x = self.resnet[i](x)\n x = self.prelu[i](x+residual)\n x = self.maxPool[i](x)\n\n x = self.conv(x)\n x = self.avgPool(x)\n\n return x\n\n\n\nclass VisualConv1D(nn.Module):\n def __init__(self):\n super(VisualConv1D, self).__init__()\n relu = nn.ReLU()\n norm_1 = nn.BatchNorm1d(512)\n dsconv = nn.Conv1d(512, 512, 3, stride=1, padding=1,dilation=1, groups=512, bias=False)\n prelu = nn.PReLU()\n norm_2 = nn.BatchNorm1d(512)\n pw_conv = nn.Conv1d(512, 512, 1, bias=False)\n\n self.net = nn.Sequential(relu, norm_1 ,dsconv, prelu, norm_2, pw_conv)\n\n def forward(self, x):\n out = self.net(x)\n return out + x\n\nclass TemporalBlock(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size,\n stride, padding, dilation):\n super(TemporalBlock, self).__init__()\n conv1x1 = nn.Conv1d(in_channels, out_channels, 1, bias=False)\n prelu = nn.PReLU()\n norm = GlobalLayerNorm(out_channels)\n dsconv = DepthwiseSeparableConv(out_channels, in_channels, kernel_size,\n stride, padding, dilation)\n # Put together\n self.net = nn.Sequential(conv1x1, prelu, norm, dsconv)\n\n def forward(self, x):\n\n residual = x\n out = self.net(x)\n return out + residual # look like w/o F.relu is better than w/ F.relu\n\n\nclass DepthwiseSeparableConv(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size,\n stride, padding, dilation):\n super(DepthwiseSeparableConv, self).__init__()\n depthwise_conv = nn.Conv1d(in_channels, in_channels, kernel_size,\n stride=stride, padding=padding,\n dilation=dilation, groups=in_channels,\n bias=False)\n\n prelu = nn.PReLU()\n norm = GlobalLayerNorm(in_channels)\n pointwise_conv = nn.Conv1d(in_channels, out_channels, 1, bias=False)\n self.net = nn.Sequential(depthwise_conv, prelu, norm,\n pointwise_conv)\n\n def forward(self, x):\n return self.net(x)\n\nclass ChannelWiseLayerNorm(nn.LayerNorm):\n @amp.float_function\n def __init__(self, *args, **kwargs):\n super(ChannelWiseLayerNorm, self).__init__(*args, **kwargs)\n\n @amp.float_function\n def forward(self, x):\n if x.dim() != 3:\n raise RuntimeError(\"{} accept 3D tensor as input\".format(\n self.__name__))\n # N x C x T => N x T x C\n x = torch.transpose(x, 1, 2)\n # LN\n x = super().forward(x)\n # N x C x T => N x T x C\n x = torch.transpose(x, 1, 2)\n return x\n\n\nclass GlobalLayerNorm(nn.Module):\n \"\"\"Global Layer Normalization (gLN)\"\"\"\n @amp.float_function\n def __init__(self, channel_size):\n super(GlobalLayerNorm, self).__init__()\n self.gamma = nn.Parameter(torch.Tensor(1, channel_size, 1)) # [1, N, 1]\n self.beta = nn.Parameter(torch.Tensor(1, channel_size,1 )) # [1, N, 1]\n self.reset_parameters()\n\n @amp.float_function\n def reset_parameters(self):\n self.gamma.data.fill_(1)\n self.beta.data.zero_()\n\n @amp.float_function\n def forward(self, y):\n \"\"\"\n Args:\n y: [M, N, K], M is batch size, N is channel size, K is length\n Returns:\n gLN_y: [M, N, K]\n \"\"\"\n # TODO: in torch 1.0, torch.mean() support dim list\n mean = y.mean(dim=1, keepdim=True).mean(dim=2, keepdim=True) #[M, 1, 1]\n var = (torch.pow(y-mean, 2)).mean(dim=1, keepdim=True).mean(dim=2, keepdim=True)\n gLN_y = self.gamma * (y - mean) / torch.pow(var + EPS, 0.5) + self.beta\n return gLN_y\n\n@amp.float_function\ndef overlap_and_add(signal, frame_step):\n \"\"\"Reconstructs a signal from a framed representation.\n Adds potentially overlapping frames of a signal with shape\n `[..., frames, frame_length]`, offsetting subsequent frames by `frame_step`.\n The resulting tensor has shape `[..., output_size]` where\n output_size = (frames - 1) * frame_step + frame_length\n Args:\n signal: A [..., frames, frame_length] Tensor. All dimensions may be unknown, and rank must be at least 2.\n frame_step: An integer denoting overlap offsets. Must be less than or equal to frame_length.\n Returns:\n A Tensor with shape [..., output_size] containing the overlap-added frames of signal's inner-most two dimensions.\n output_size = (frames - 1) * frame_step + frame_length\n Based on https://github.com/tensorflow/tensorflow/blob/r1.12/tensorflow/contrib/signal/python/ops/reconstruction_ops.py\n \"\"\"\n outer_dimensions = signal.size()[:-2]\n frames, frame_length = signal.size()[-2:]\n\n subframe_length = math.gcd(frame_length, frame_step) # gcd=Greatest Common Divisor\n subframe_step = frame_step // subframe_length\n subframes_per_frame = frame_length // subframe_length\n output_size = frame_step * (frames - 1) + frame_length\n output_subframes = output_size // subframe_length\n\n subframe_signal = signal.view(*outer_dimensions, -1, subframe_length)\n\n frame = torch.arange(0, output_subframes).unfold(0, subframes_per_frame, subframe_step)\n frame = signal.new_tensor(frame).long() # signal may in GPU or CPU\n frame = frame.contiguous().view(-1)\n\n result = signal.new_zeros(*outer_dimensions, output_subframes, subframe_length)\n result.index_add_(-2, frame, subframe_signal)\n result = result.view(*outer_dimensions, -1)\n return result\n","sub_path":"src/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":10805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"32600474","text":"import numpy as np\nimport keras\nfrom keras.models import Model\nfrom keras.layers import Input\nfrom keras.layers import Conv2D,MaxPooling2D,AveragePooling2D\nfrom keras.layers import Dense,Dropout,Flatten,concatenate\nfrom keras.optimizers import SGD\nfrom keras.callbacks import CSVLogger,TensorBoard\nfrom keras.utils import plot_model\nimport keras.backend.tensorflow_backend as KTF\nimport tensorflow as tf\n\nepochs = 1000\nbatch_size = 16\nlr = 0.01\ndecay = 1e-6\nmomentum = 0.9\n\n##========== data loading ==========##\nanode = np.load(\"../../data/cell_a_MAIKo.npy\")\nanode = anode.reshape(\n (-1,\n anode.shape[1],\n anode.shape[2],\n 1))\ncross_point = np.concatenate(\n (np.load(\"../../data/point_xv_MAIKo.npy\"),\n np.load(\"../../data/point_xs_MAIKo.npy\")),\n axis=1)\ncross_point = np.concatenate(\n (cross_point[:,0:1],\n cross_point[:,2:3],\n cross_point[:,3:4],\n cross_point[:,5:6]),\n axis=1)\nanode_test = np.load(\"../../data/cell_a_MAIKo_test.npy\")\nanode_test = anode_test.reshape(\n (-1,\n anode_test.shape[1],\n anode_test.shape[2],\n 1))\ncross_point_test = np.concatenate(\n (np.load(\"../../data/point_xv_MAIKo_test.npy\"),\n np.load(\"../../data/point_xs_MAIKo_test.npy\")),\n axis=1)\ncross_point_test = np.concatenate(\n (cross_point_test[:,0:1],\n cross_point_test[:,2:3],\n cross_point_test[:,3:4],\n cross_point_test[:,5:6]),\n axis=1)\n\n##========== tensorboard setup ==========##\nold_session = KTF.get_session()\nsession = tf.Session('')\nKTF.set_session(session)\nKTF.set_learning_phase(1)\n\n##========== model building ==========##\nanode_input = Input(shape=anode[0].shape)\nx0 = MaxPooling2D(pool_size=(4,4))(anode_input)\nx1 = Conv2D(filters=32,kernel_size=(16,16),\n padding=\"same\",activation=\"relu\")(x0)\nx2 = MaxPooling2D(pool_size=(2,2))(x1)\nx3 = Conv2D(filters=32,kernel_size=(8,8),\n padding=\"same\",activation=\"relu\")(x2)\nx4 = MaxPooling2D(pool_size=(2,2))(x3)\nz1 = Conv2D(filters=32,kernel_size=(4,4),\n padding=\"same\",activation=\"relu\")(x4)\nz2 = MaxPooling2D(pool_size=(4,4))(z1)\nx6 = Flatten()(x2)\nx7 = Dense(128,activation=\"sigmoid\")(x6)\nx8 = Dropout(0.5)(x7)\nx9 = Flatten()(x4)\nx10 = concatenate([x9,x8])\nx11 = Dense(128,activation=\"sigmoid\")(x10)\nx12 = Dropout(0.5)(x11)\nz3 = Flatten()(z2)\nz4 = concatenate([x12,z3])\nz5 = Dense(512,activation=\"sigmoid\")(z4)\nz6 = Dropout(0.5)(z5)\noutput = Dense(4,activation=\"relu\")(z6)\n\nmodel = Model(inputs=anode_input,outputs=output)\nsgd = SGD(lr=lr,decay=decay,momentum=momentum,nesterov=True)\nmodel.compile(loss=\"mse\",\n optimizer=sgd)\n\n##========== fitting ==========##\ncsvlogger = CSVLogger(\"detector_res.csv\")\nboard = TensorBoard(log_dir=\"log/\",histogram_freq=1)\nmodel.fit(anode,\n cross_point,\n epochs=epochs,\n batch_size=batch_size,\n validation_data=[anode_test,cross_point_test],\n callbacks=[csvlogger,board])\nmodel.save(\"detector_res.h5\")\n\nKTF.set_session(old_session)\n","sub_path":"keras/pattern_detection/detector_res.py","file_name":"detector_res.py","file_ext":"py","file_size_in_byte":2977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"251886982","text":"from fenx.tools import setup_log\nfrom fenx.config import config\nimport logging as _logging\nimport os\nlogger = _logging.getLogger(__name__)\n\n\ndef icon(name, default=False):\n ico = find_icon(name)\n if ico:\n return ico\n if not default:\n return ''\n else:\n find_icon('nofile')\n\n\ndef find_icon(name, debug=False):\n if not name:\n return ''\n if debug:\n print(os.path.abspath(name))\n if os.path.exists(os.path.abspath(name)) and os.path.isfile(os.path.abspath(name)):\n return name\n expanded = config.expand_var(name)\n if debug:\n print(expanded)\n if os.path.exists(expanded) and os.path.isfile(os.path.abspath(expanded)):\n return expanded\n if config.get('ICONS', {}).get(name, ''):\n ico = config.expand_var(config.get('ICONS', {}).get(name, ''))\n if debug:\n print(ico)\n if os.path.exists(ico) and os.path.isfile(os.path.abspath(ico)):\n return ico\n # else:\n # logger.debug('Custom icon %s not found' % name)\n # return ''\n icons_roots = config.get('RESOURCE_ICON_PATH', [])\n for icons_root in icons_roots:\n if debug:\n print('>', icons_root)\n for module_icon in [\n os.path.normpath(os.path.join(icons_root, name)),\n os.path.normpath(os.path.join(icons_root, name+'.png')),\n os.path.normpath(os.path.join(icons_root, name+'.ico'))\n ]:\n if debug:\n print(module_icon)\n if os.path.exists(module_icon):\n if debug:\n print('RESULT:', module_icon)\n return module_icon\n return ''\n\n\n","sub_path":"icon_utils.py","file_name":"icon_utils.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"419564468","text":"from rest_framework.routers import DefaultRouter\n\nfrom django.urls import include, path\n\nfrom hotel import views\n\napp_name = 'hotel'\n\nrouter = DefaultRouter()\nrouter.register(r'hotel', views.HotelViewSet)\nrouter.register(r'tax', views.TaxViewSet)\n\nv1_api_urlpatterns = [\n path('', include(router.urls)),\n path('cheapest/', views.Cheapest.as_view(), name='cheapest')\n]","sub_path":"cheapest_hotel/apps/hotel/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"503192470","text":"# app常用操作 滑动 拖拽操作事件\n# 应用场景:做app有些界面或者按钮需要滑动才会出来\n# 滑动:\n# swipe\n# scroll\n# drag_and_drop\n# 参数信息写好\nimport time\n\nfrom appium import webdriver\ninfo={\n # 操作平台 操作 安卓 苹果 Android不区分大小写,不能写错\n 'platformName':'Android',\n # 版本号 设置 关于平板电脑 版本号\n 'platformVersion':'5.1.1',\n # 设备名 adb devices 检测设备名 可以随意写 不要空 不要中文\n 'deviceName':'127.0.0.1:62001',\n # 包名\n 'appPackage':'com.android.settings',\n # 应用名\n 'appActivity':'com.android.settings.Settings',\n # 不重置\n 'noReset':False\n}\n# 启动程序 Remote(服务器,手机配置信息)\ndriver=webdriver.Remote('http://127.0.0.1:4723/wd/hub',info)\ndriver.implicitly_wait(5)\n\n# swipe 写法 swipe(开始x坐标,开始y坐标,结束x坐标,结束y坐标,持续时间)\n# 产生惯性 持续时间越长,惯性越小\n# driver.swipe(535,1610,579,635)\n# driver.swipe(535,1610,579,635,3000)\n\n# 元素滑动\n# scroll(开始元素,结束元素)\n# scroll滑动:从一个元素滑动到另外一个元素 也有惯性\naddress=driver.find_element_by_xpath('//*[@text=\"位置信息\"]')\nmore=driver.find_element_by_xpath('//*[@text=\"更多\"]')\ndriver.scroll(address,more)\n# drag_and_drop:从一个元素滑动到另外一个元素 没有惯性\n# driver.drag_and_drop(address,more)\n\n# 可以滑动 实现滑动 选择 需不需要惯性 ,选择用坐标 还是用元素\n\n\n\n\n# 等待几秒钟 等待3种方式 强制等待 直男 隐士等待 强迫症 显示等待 正常男\ntime.sleep(3)\n# 关闭\n# driver.quit()","sub_path":"Appium01/class1/app2.py","file_name":"app2.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"89527025","text":"from exchanges.base import Exchange\n\nclass HitBTC(Exchange):\n\n TICKER_URL = 'https://api.hitbtc.com/api/1/public/%s/ticker'\n UNDERLYING_DICT = {\n 'BTCUSD' : 'BTCUSD',\n 'BTCEUR' : 'BTCEUR',\n 'ETHBTC' : 'ETHBTC',\n 'PAYETH' : 'PAYETH',\n 'SNTETH' : 'SNTETH',\n 'CVCUSDT' : 'CVCUSD',\n 'EOSETH' : 'EOSETH',\n 'EOSBTC' : 'EOSBTC',\n }\n\n @classmethod\n def _quote_extractor(cls, data, underlying, quote):\n return data.get(cls.QUOTE_DICT[quote])\n","sub_path":"exchanges/hitbtc.py","file_name":"hitbtc.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"79879474","text":"import os\n\nimport pytest\nimport sqlalchemy as sa\n\nfrom sqlalchemy_continuum.dialects.postgresql import (\n drop_trigger,\n sync_trigger\n)\nfrom tests import (\n get_dns_from_driver,\n get_driver_name,\n QueryPool,\n uses_native_versioning\n)\n\n\n@pytest.mark.skipif('not uses_native_versioning()')\nclass TestTriggerSyncing(object):\n def setup_method(self, method):\n driver = os.environ.get('DB', 'sqlite')\n self.driver = get_driver_name(driver)\n self.engine = sa.create_engine(get_dns_from_driver(self.driver))\n self.connection = self.engine.connect()\n if driver == 'postgres-native':\n self.connection.execute('CREATE EXTENSION IF NOT EXISTS hstore')\n\n self.connection.execute(\n 'CREATE TABLE article '\n '(id INT PRIMARY KEY, name VARCHAR(200), content TEXT)'\n )\n self.connection.execute(\n 'CREATE TABLE article_version '\n '(id INT, transaction_id INT, name VARCHAR(200), '\n 'name_mod BOOLEAN, PRIMARY KEY (id, transaction_id))'\n )\n\n def teardown_method(self, method):\n self.connection.execute('DROP TABLE IF EXISTS article')\n self.connection.execute('DROP TABLE IF EXISTS article_version')\n self.engine.dispose()\n self.connection.close()\n\n def test_sync_triggers(self):\n sync_trigger(self.connection, 'article_version')\n assert (\n 'DROP TRIGGER IF EXISTS article_trigger ON \"article\"'\n in QueryPool.queries[-4]\n )\n assert 'DROP FUNCTION ' in QueryPool.queries[-3]\n assert 'CREATE OR REPLACE FUNCTION ' in QueryPool.queries[-2]\n assert 'CREATE TRIGGER ' in QueryPool.queries[-1]\n sync_trigger(self.connection, 'article_version')\n\n def test_drop_triggers(self):\n drop_trigger(self.connection, 'article')\n assert (\n 'DROP TRIGGER IF EXISTS article_trigger ON \"article\"'\n in QueryPool.queries[-2]\n )\n assert 'DROP FUNCTION ' in QueryPool.queries[-1]\n","sub_path":"tests/dialects/test_triggers.py","file_name":"test_triggers.py","file_ext":"py","file_size_in_byte":2045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"225084551","text":"from collections import OrderedDict\nfrom copy import deepcopy\nfrom typing import Union, TypeVar\n\nimport numpy as np\nimport torch\nfrom torch import Tensor\n\nN = TypeVar(\"N\", int, float, Tensor, np.ndarray)\n\n\nclass _BufferMixin:\n \"\"\"\n The buffer in Trainer is for automatic loading and saving.\n \"\"\"\n\n def __init__(self) -> None:\n self._buffers = OrderedDict()\n\n def _register_buffer(self, name: str, value: Union[str, N]):\n r\"\"\"Adds a persistent buffer to the module.\n \"\"\"\n if \"_buffers\" not in self.__dict__:\n raise AttributeError(\"cannot assign buffer before Module.__init__() call\")\n elif not isinstance(name, str):\n raise TypeError(\n \"buffer name should be a string. \" \"Got {}\".format(torch.typename(name))\n )\n elif \".\" in name:\n raise KeyError('buffer name can\\'t contain \".\"')\n elif name == \"\":\n raise KeyError('buffer name can\\'t be empty string \"\"')\n elif hasattr(self, name) and name not in self._buffers:\n raise KeyError(\"attribute '{}' already exists\".format(name))\n else:\n self._buffers[name] = value\n\n def __getattr__(self, name):\n if \"_buffers\" in self.__dict__:\n _buffers = self.__dict__[\"_buffers\"]\n if name in _buffers:\n return _buffers[name]\n raise AttributeError(name)\n\n def __setattr__(self, name, value):\n buffers = self.__dict__.get(\"_buffers\")\n if buffers is not None and name in buffers:\n buffers[name] = value\n else:\n object.__setattr__(self, name, value)\n\n def __delattr__(self, name):\n if name in self._buffers:\n del self._buffers[name]\n else:\n object.__delattr__(self, name)\n\n def _buffer_state_dict(self):\n destination = OrderedDict()\n for name, buf in self._buffers.items():\n value = buf\n if isinstance(buf, Tensor):\n value = buf.detach()\n if isinstance(buf, np.ndarray):\n value = deepcopy(buf)\n destination[name] = value\n return destination\n\n def _load_buffer_from_state_dict(\n self, state_dict, prefix, strict, missing_keys, unexpected_keys, error_msgs\n ):\n\n local_name_params = self._buffers.items()\n local_state = {k: v for k, v in local_name_params if v is not None}\n\n for name, param in local_state.items():\n key = prefix + name\n if key in state_dict:\n input_param = state_dict[key]\n # Backward compatibility: loading 1-dim tensor from 0.3.* to version 0.4+\n with torch.no_grad():\n try:\n if isinstance(input_param, Tensor):\n param.copy_(input_param)\n else:\n self._buffers[name] = input_param\n except Exception as ex:\n error_msgs.append(\n 'While copying the parameter named \"{}\", '\n \"an exception occured : {}.\".format(key, ex.args)\n )\n elif strict:\n missing_keys.append(key)\n\n def _load_buffer_state_dict(self, state_dict):\n r\"\"\"\n \"\"\"\n missing_keys = []\n unexpected_keys = []\n error_msgs = []\n\n # copy state_dict so _load_from_state_dict can modify it\n state_dict = state_dict.copy()\n\n def load(module, prefix=\"\"):\n module._load_buffer_from_state_dict(\n state_dict, prefix, True, missing_keys, unexpected_keys, error_msgs\n )\n\n load(self)\n\n if len(error_msgs) > 0:\n raise RuntimeError(\n \"Error(s) in loading state_dict for {}:\\n\\t{}\".format(\n self.__class__.__name__, \"\\n\\t\".join(error_msgs)\n )\n )\n return missing_keys, unexpected_keys, error_msgs\n","sub_path":"deepclustering2/trainer/_buffer.py","file_name":"_buffer.py","file_ext":"py","file_size_in_byte":4040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"448878664","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.conf.urls import url\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom GoogleTools.models import *\nimport time,datetime\n\n# Create your views here.\nfrom django.http import StreamingHttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.db.models import Q\nfrom pure_pagination import Paginator, EmptyPage, PageNotAnInteger\n\ndef index(request):\n if request.method == 'GET':\n download = request.GET.get('download','')\n documents = Tool.objects.filter(type='Google-Document').order_by('-id')\n dataList={}\n if request.GET.get('search'):\n documents = documents.filter(Q(toolFile__contains=request.GET.get('search')))\n dataList['search'] = request.GET.get('search')\n if download:\n tool = Tool.objects.get(id=int(download))\n the_file_name = tool.toolFile.path\n response = StreamingHttpResponse(file_iterator(the_file_name))\n response['Content-Type'] = 'application/octet-stream'\n index = str(tool.toolFile.name).rfind('/',0,len(str(tool.toolFile.name))-1)\n tool_name = tool.toolFile.name\n if index:\n tool_name = str(tool.toolFile.name)[index+1:]\n response['Content-Disposition'] = 'attachment;filename=%s' %tool_name\n return response\n documentsPage = pointsPage(request,documents,10)\n dataList['documentsPage']=documentsPage\n return render(request,'document.html',dataList)\n\ndef showAutoTool(request):\n if request.method == 'GET':\n autoTools = Tool.objects.filter(type='Auto-Tools').order_by('-id')\n dataList={}\n if request.GET.get('search'):\n autoTools = autoTools.filter(Q(toolFile__contains=request.GET.get('search')))\n dataList['search'] = request.GET.get('search')\n download = request.GET.get('download','')\n if download:\n tool = Tool.objects.get(id=int(download))\n the_file_name = tool.toolFile.path\n response = StreamingHttpResponse(file_iterator(the_file_name))\n response['Content-Type'] = 'application/octet-stream'\n index = str(tool.toolFile.name).rfind('/',0,len(str(tool.toolFile.name))-1)\n tool_name = tool.toolFile.name\n if index:\n tool_name = str(tool.toolFile.name)[index+1:]\n response['Content-Disposition'] = 'attachment;filename=%s' %tool_name\n return response\n autoToolsPage = pointsPage(request,autoTools,10)\n dataList['autoToolsPage']=autoToolsPage\n return render(request,'auto_tools.html',dataList)\n\n\n@csrf_exempt\ndef addTools(request):\n if request.method=='POST':\n info = request.POST\n if info:\n classification = info['file_classification']\n if classification:\n file = request.FILES.get('cts_docFile')\n t = time.strptime(info['due_time'],\"%Y-%m-%d\")\n y,m,d = t[0:3]\n tool = Tool(type=classification,remarks=info['file_note'],toolFile=file,dueTime=datetime.datetime(y,m,d))\n tool.save()\n return HttpResponse(\"suceess\")\n\ndef file_iterator(file_name, chunk_size=3048):\n with open(file_name) as f:\n while True:\n c = f.read(chunk_size)\n if c:\n yield c\n else:\n break\n\ndef pointsPage(request,tools,number):\n try:\n page = request.GET.get('page', 1)\n except PageNotAnInteger:\n page = 1\n page1 = request.GET.get('page1', 1)\n # Provide Paginator with the request object for complete querystring generation\n p = Paginator(tools, number, request=request)\n return p.page(page)","sub_path":"GoogleDatas/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"319451604","text":"import time\n\nstart=time.time()\n\nl=[1]*2+[0]*998\nfor x in [2,3,5,7,11,13,17,19,23,29,31]:\n for y in range(2,1000):\n if y%x==0:\n l[y]=1\n l[x]=0\nl[2]=1\nl[5]=1\n\nd=9\nwhile sum(l)!=999:\n for x in range(1,1000):\n if not l[x]:\n if d%x==0:\n l[x]=1\n d=d*10+9\n\nfor x in range(1000):\n if l[x]==0:\n S=x\n break\n\nelapsed=(time.time()-start)\nprint (\"found %s in %s seconds\" % (S,elapsed))\n","sub_path":"26. Reciprocal cycles.py","file_name":"26. Reciprocal cycles.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"14389097","text":"# NOTE: Must be before we import or call anything that may be synchronous.\nfrom gevent import monkey\n\nmonkey.patch_all()\n\nimport sys\nimport os\n\nsys.path.append(os.path.join(os.path.dirname(__file__), \"../\"))\n\nimport logging\n\n\nbind = \"unix:/tmp/gunicorn_registry.sock\"\nworkers = 1\nworker_class = \"gevent\"\nworker_connections = 30\npythonpath = \".\"\nreload = True\nreload_engine = \"auto\"\n\n\ndef when_ready(server):\n logger = logging.getLogger(__name__)\n logger.debug(\n \"Starting registry gunicorn with %s workers and %s worker class\", workers, worker_class\n )\n","sub_path":"local-dev/gunicorn_registry.py","file_name":"gunicorn_registry.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"76205798","text":"import json\nfrom datetime import timedelta, datetime\n\nimport numpy as np\nimport pandas as pd\nimport requests\nfrom forecast import (\n addMinutes,\n addMonthOfYear,\n add_day_of_week,\n loadModel,\n get_split_indexes,\n)\nfrom forecast_conf import ForecastConfig\nfrom forecast_load_conf import ForecastLoadConfig\nfrom forecast_pv_conf import ForecastPvConfig\nfrom sklearn.externals import joblib\nfrom util import getStepsize, invertScaler, constructTimeStamps\n\nFROM_MEGAWATTHOURS_TO_KILOWATTHOURS = 1000\n\n\nclass NetworkException(Exception):\n pass\n\n\ndef getNinja(filePath, timestamps, offset=timedelta(days=0)):\n with open(filePath, \"r\", encoding=\"utf-8\") as dataFile:\n [dataFile.readline() for i in range(3)]\n data = pd.read_csv(\n dataFile, parse_dates=[\"time\", \"local_time\"], index_col=\"local_time\"\n )\n data = data.loc[\n timestamps[0] + offset : timestamps[-1] + offset + getStepsize(timestamps)\n ]\n origStepsize = getStepsize(data.index)\n wantedStepsize = getStepsize(timestamps)\n if origStepsize > wantedStepsize:\n assert (origStepsize / wantedStepsize).is_integer()\n data = data.resample(wantedStepsize).ffill()\n elif origStepsize < wantedStepsize:\n data = _dropUnfittingValuesAtEndForDownSampling(\n origStepsize, wantedStepsize, timestamps, data\n )\n data = data.resample(wantedStepsize).mean()\n data = data.loc[timestamps[0] + offset : timestamps[-1] + offset]\n\n return data[\"electricity\"]\n\n\ndef getNinjaPvApi(lat, long, timestamps):\n renewNinja = RenewNinja()\n return renewNinja.getPvData(\n lat, long, str(timestamps[0].date()), str(timestamps[-1].date())\n )\n\n\ndef getNinjaWindApi(lat, long, timestamps):\n renewNinja = RenewNinja()\n return renewNinja.getWindData(\n lat, long, str(timestamps[0].date()), str(timestamps[-1].date())\n )\n\n\nclass RenewNinja:\n \"\"\"\n Class to query https://www.renewables.ninja/ API to get pv and wind\n output power based on their dataset + simulation\n\n ...\n\n Attributes\n ----------\n token : API token to access renewables ninja API\n api_base : str\n url of the api of renewables ninja\n s : requests.sessions.Session\n Object to request (package requests)\n\n Methods\n -------\n getPvData(self, lat, long, date_from, date_to, dataset = 'merra2', cap = 1.0, sys_loss = 0.1, track = 0, tilt = 35, azim = 180)\n get the data of the pv from renewables ninja\n \"\"\"\n\n def __init__(self):\n self.token = \"732eaff288b11d478c42381c75173e8e17355fdb\"\n self.api_base = \"https://www.renewables.ninja/api/\"\n self.s = requests.session()\n self.s.headers = {\"Authorization\": \"Token \" + self.token}\n\n def __del__(self):\n self.s.close()\n\n def getPvData(\n self,\n lat,\n long,\n date_from,\n date_to,\n dataset=\"merra2\",\n cap=1.0,\n sys_loss=0.1,\n track=0,\n tilt=35,\n azim=180,\n ):\n \"\"\"Request PV power value\n\n Parameters\n ----------\n lat : float\n latitude of the pv\n long : float\n Longitude of the pv\n date_from : str\n format : year-month-day. Starting date of the requested data\n date_to : str\n format : year-month-day. Ending date of the requested data\n dataset : str, optional\n name of the dataset\n cap : float, optional\n capacity of the pv\n sys_loss : float, optional\n system loss of the pv\n track : bool, optional\n presence of a tracking system\n tilt : int, optional\n azim : int, optional\n\n Returns\n -------\n tuple\n 0 : metadata (dict)\n 1 : data (pandas Dataframe)\n \"\"\"\n\n url = self.api_base + \"data/pv\"\n args = {\n \"lat\": lat,\n \"lon\": long,\n \"date_from\": date_from,\n \"date_to\": date_to,\n \"dataset\": dataset,\n \"capacity\": cap,\n \"system_loss\": sys_loss,\n \"tracking\": track,\n \"tilt\": tilt,\n \"azim\": azim,\n \"format\": \"json\",\n }\n r = self.s.get(url, params=args)\n if r.status_code != 200:\n print(r.text)\n raise NetworkException()\n\n # Parse JSON to get a pandas.DataFrame of data and dict of metadata\n parsed_response = json.loads(r.text)\n\n data = pd.read_json(json.dumps(parsed_response[\"data\"]), orient=\"index\")\n metadata = parsed_response[\"metadata\"]\n return metadata, data\n\n def getWindData(\n self,\n lat,\n long,\n date_from,\n date_to,\n cap=1.0,\n height=100,\n turbine=\"Vestas V80 2000\",\n ):\n \"\"\"Request wind power value\n\n Parameters\n ----------\n lat : float\n latitude of the windmill\n long : float\n Longitude of the windmill\n date_from : str\n format : year-month-day. Starting date of the requested data\n date_to : str\n format : year-month-day. Ending date of the requested data\n cap : float, optional\n capacity of the windmill\n height : int, optional\n height of the windmill\n turbine : str, optional\n type of the turbine\n\n\n Returns\n -------\n tuple\n 0 : metadata (dict)\n 1 : data (pandas Dataframe)\n \"\"\"\n\n url = self.api_base + \"data/wind\"\n args = {\n \"lat\": lat,\n \"lon\": long,\n \"date_from\": date_from,\n \"date_to\": date_to,\n \"capacity\": cap,\n \"height\": height,\n \"turbine\": turbine,\n \"format\": \"json\",\n }\n r = self.s.get(url, params=args)\n if r.status_code != 200:\n print(r.text)\n raise NetworkException()\n\n # Parse JSON to get a pandas.DataFrame of data and dict of metadata\n parsed_response = json.loads(r.text)\n\n data = pd.read_json(json.dumps(parsed_response[\"data\"]), orient=\"index\")\n metadata = parsed_response[\"metadata\"]\n return metadata, data\n\n\ndef resampleData(data, timestamps, offset=timedelta(days=0)):\n origStepsize = getStepsize(data.index)\n wantedStepsize = getStepsize(timestamps)\n if origStepsize > wantedStepsize:\n assert (origStepsize / wantedStepsize).is_integer()\n data = data.resample(wantedStepsize).ffill()\n elif origStepsize < wantedStepsize:\n data = _dropUnfittingValuesAtEndForDownSampling(\n origStepsize, wantedStepsize, timestamps, data\n )\n data = data.resample(wantedStepsize).first()\n data = data.loc[timestamps[0] + offset : timestamps[-1] + offset]\n return data\n\n\ndef getLoadsData(filePath, timestamps):\n with open(filePath, \"r\", encoding=\"utf-8\") as dataFile:\n data = pd.read_csv(\n dataFile,\n parse_dates=[\"DateTime\"],\n index_col=\"DateTime\",\n sep=\";\",\n decimal=\",\",\n )\n data = data.loc[timestamps[0] : timestamps[-1] + getStepsize(timestamps)]\n data = resampleData(data, timestamps)\n data = data.iloc[:, 0]\n data.loc[data <= 0] = 0\n return data\n\n\ndef dateparserWithoutUTC(x):\n d, h = x.split(\" \")[0], x.split(\" \")[1].split(\"-\")[0]\n return pd.datetime.strptime(d + \" \" + h, \"20%y-%m-%d %H:%M:%S\")\n\n\ndef getPecanstreetData(\n filePath,\n timeHeader,\n dataid,\n column,\n timestamps,\n offset=timedelta(days=0),\n nb_rows=20000,\n):\n with open(filePath, \"r\", encoding=\"utf-8\") as dataFile:\n # TODO: read more rows or split dataid into files\n data = pd.read_csv(\n dataFile,\n parse_dates=[timeHeader],\n date_parser=dateparserWithoutUTC,\n nrows=nb_rows,\n )\n\n data = data[data[\"dataid\"] == int(dataid)]\n pd.to_datetime(data[timeHeader])\n data = data.set_index(timeHeader)\n data = data.sort_index()\n if column == \"grid\":\n ev = data.loc[:, [\"car1\"]]\n ev *= -1\n data = data.loc[:, [column, \"solar\", \"solar2\"]]\n data = pd.concat([data, ev], axis=1)\n else:\n data = data.loc[:, [column]]\n stepsize = getStepsize(timestamps)\n if stepsize < timedelta(minutes=15):\n stepsize = timedelta(hours=0)\n\n data = data.loc[timestamps[0] + offset : timestamps[-1] + offset + stepsize]\n data = resampleData(data, timestamps, offset)\n data = data.sum(axis=1)\n min_data_value = min(data)\n for idx, value in enumerate(data):\n if value < 0:\n data[idx] = 0.0\n\n if min_data_value < 0:\n print(\n \"(non-negativity of data) Values in range [{},0) were set to 0\".format(\n min_data_value\n )\n )\n assert all(i >= 0.0 for i in data)\n return data\n\n\ndef splitPecanstreetData(filePath, timeHeader):\n with open(filePath, \"r\", encoding=\"utf-8\") as dataFile:\n data = pd.read_csv(dataFile, parse_dates=[timeHeader])\n current = 0\n # TODO add for loop and store into new csv files\n dataid = data[\"dataid\"][current]\n data = data[data[\"dataid\"] == dataid]\n\n return data\n\n\ndef getPriceData(filePath, timestamps, offset, constantPrice):\n with open(filePath, \"r\", encoding=\"utf-8\") as dataFile:\n data = pd.read_csv(\n dataFile,\n parse_dates=[\"DateTime\"],\n index_col=\"DateTime\",\n sep=\";\",\n decimal=\",\",\n )\n data = data.loc[\n timestamps[0] + offset : timestamps[-1] + offset + getStepsize(timestamps)\n ]\n origStepsize = getStepsize(data.index)\n assert origStepsize == timedelta(hours=1)\n wantedStepsize = getStepsize(timestamps)\n if origStepsize > wantedStepsize:\n assert (origStepsize / wantedStepsize).is_integer()\n data = data.resample(wantedStepsize).asfreq()\n _applyOppositeOfResampleSum(data, timestamps, origStepsize / wantedStepsize)\n elif origStepsize < wantedStepsize:\n data = _dropUnfittingValuesAtEndForDownSampling(\n origStepsize, wantedStepsize, timestamps, data\n )\n data = data.resample(wantedStepsize).sum()\n assert data.shape[1] <= 2\n\n data = data.loc[timestamps[0] + offset : timestamps[-1] + offset]\n return data.iloc[:, 0] / FROM_MEGAWATTHOURS_TO_KILOWATTHOURS + constantPrice / (\n origStepsize / wantedStepsize\n )\n\n\ndef _applyOppositeOfResampleSum(data, timestamps, relation):\n for index in range(len(timestamps)):\n if np.isnan(data.iloc[index, 0]):\n data.iloc[index, 0] = newValue # noqa F821\n else:\n newValue = data.iloc[index, 0] / relation\n data.iloc[index, 0] = newValue\n\n\ndef _dropUnfittingValuesAtEndForDownSampling(\n origStepsize, wantedStepsize, timestamps, data\n):\n relation = _computeIntRelation(wantedStepsize, origStepsize)\n if data.size % relation != 0:\n data = data[: -(data.size % relation)]\n return data\n\n\ndef _computeIntRelation(stepsize1, stepsize2):\n relation = stepsize1 / stepsize2\n assert relation.is_integer(), \"1 stepsize should be a multiple of the other.\"\n return int(relation)\n\n\n# pvValue is at least 3 days\ndef getPredictedPVValue(pvValue, timestamps, delta):\n config_main = ForecastConfig()\n config_pv = ForecastPvConfig(config_main)\n\n config_main.TIMESTAMPS = constructTimeStamps(\n datetime.strptime(config_pv.BEGIN, \"20%y-%m-%d %H:%M:%S\"),\n datetime.strptime(config_pv.END, \"20%y-%m-%d %H:%M:%S\"),\n datetime.strptime(config_pv.STEP_SIZE, \"%H:%M:%S\")\n - datetime.strptime(\"00:00:00\", \"%H:%M:%S\"),\n )\n _, endValidation = get_split_indexes(config_main)\n # we drop the year\n a = datetime.strptime(timestamps[0].strftime(\"%m-%d\"), \"%m-%d\")\n b = datetime.strptime(\n config_main.TIMESTAMPS[endValidation].strftime(\"%m-%d\"), \"%m-%d\"\n )\n assert (a - b).days >= 0\n\n df = addMinutes(pvValue)\n df = addMonthOfYear(df) # , timestamps)\n # datas are normalized\n scaler = joblib.load(config_pv.MODEL_FILE_SC)\n print(scaler.data_max_)\n df = scaler.transform(df)\n\n x = np.empty((len(df) - config_pv.LOOK_BACK, config_pv.LOOK_BACK, df.shape[1]))\n for i in range(len(df) - config_pv.LOOK_BACK):\n x[i] = df[i : i + config_pv.LOOK_BACK, :]\n\n model = loadModel(config_pv)\n res = model.predict(x)\n res = invertScaler(res, scaler)\n\n return res, config_pv.LOOK_BACK, config_pv.OUTPUT_SIZE\n\n\n# loadsData is at least 3 days\ndef getPredictedLoadValue(loadsData, timestamps, timedelta):\n config = ForecastConfig()\n loadConfig = ForecastLoadConfig()\n input_data = addMinutes(loadsData)\n input_data = add_day_of_week(input_data)\n\n config.TIMESTAMPS = constructTimeStamps(\n datetime.strptime(loadConfig.BEGIN, \"20%y-%m-%d %H:%M:%S\"),\n datetime.strptime(loadConfig.END, \"20%y-%m-%d %H:%M:%S\"),\n datetime.strptime(loadConfig.STEPSIZE, \"%H:%M:%S\")\n - datetime.strptime(\"00:00:00\", \"%H:%M:%S\"),\n )\n _, endValidation = get_split_indexes(config)\n # we drop the year\n a = datetime.strptime(timestamps[0].strftime(\"%m-%d\"), \"%m-%d\")\n b = datetime.strptime(config.TIMESTAMPS[endValidation].strftime(\"%m-%d\"), \"%m-%d\")\n assert (a - b).days >= 0\n\n for load in loadConfig.APPLIANCES:\n appliance_data = getPecanstreetData(\n loadConfig.DATA_FILE,\n loadConfig.TIME_HEADER,\n loadConfig.DATAID,\n load,\n timestamps,\n timedelta,\n )\n input_data = pd.concat([input_data, appliance_data], axis=1)\n\n scaler = joblib.load(loadConfig.MODEL_FILE_SC)\n input_data = scaler.transform(input_data)\n\n x = np.empty(\n (\n len(input_data) - loadConfig.LOOK_BACK,\n loadConfig.LOOK_BACK,\n input_data.shape[1],\n )\n )\n for i in range(len(input_data) - loadConfig.LOOK_BACK):\n x[i] = input_data[i : i + loadConfig.LOOK_BACK, :]\n\n model = loadModel(loadConfig)\n res = model.predict(x)\n res = invertScaler(res, scaler)\n return res, loadConfig.LOOK_BACK, loadConfig.OUTPUT_SIZE\n","sub_path":"code/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":14813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"350784458","text":"print('R for rock\\nP for paper\\nS for scissor\\nFirst player to score 5 points wins.')\n\np1 = 0\np2 = 0\n\nwhile p1 < 5 and p2 < 5:\n\n x = input('Player 1 chooses: ')\n y = input('Player 2 chooses: ')\n\n if x == y:\n print('Draw')\n\n elif (x == 'r' or x == 'R') and (y == 's' or y == 'S'):\n p1 = p1 + 1\n\n elif (x == 'p' or x == 'P') and (y == 'r' or y == 'R'):\n p1 = p1 + 1\n\n elif (x == 's' or x == 'S') and (y == 'p' or y == 'P'):\n p1 = p1 + 1\n\n else:\n p2 = p2 + 1\n\n if (x not in 'rpsRPS') or (y not in 'rpsRPS'):\n print('One player chose invalid option.')\n\nif p1 == 5:\n print('Player 1: ', p1, ', Player 2: ', p2)\n print('Player 1 wins')\nif p2 == 5:\n print('Player 1: ', p1, ', Player 2: ', p2)\n print('Player 2 wins')\n","sub_path":"rockpaperscissor.py","file_name":"rockpaperscissor.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"500882750","text":"\n\nfrom xai.brain.wordbase.nouns._sleet import _SLEET\n\n#calss header\nclass _SLEETING(_SLEET, ):\n\tdef __init__(self,): \n\t\t_SLEET.__init__(self)\n\t\tself.name = \"SLEETING\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"sleet\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_sleeting.py","file_name":"_sleeting.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"332692955","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.distributions.multivariate_normal import MultivariateNormal\n\n\nclass NormalizingFlows(nn.Module):\n def __init__(self, transforms, dim=2):\n\n super().__init__()\n if isinstance(transforms, nn.Module):\n self.transforms = nn.ModuleList([transforms, ])\n elif isinstance(transforms, list):\n if not all(isinstance(t, nn.Module) for t in transforms):\n raise ValueError(\"Wrong type of transforms\")\n self.transforms = nn.ModuleList(transforms)\n else:\n raise ValueError(f\"Wrong type of transforms\")\n self.dim = dim\n self.base_dist = MultivariateNormal(torch.zeros(self.dim), torch.eye(self.dim))\n\n def log_prob(self, x):\n\n inv_log_det = 0.0\n for transform in reversed(self.transforms):\n z, inv_log_det_jacobian = transform.inverse(x)\n inv_log_det += inv_log_det_jacobian\n x = z\n log_base = self.base_dist.log_prob(x)\n log_prob = (inv_log_det + log_base)\n\n return log_prob\n\n def sample(self, batch_size):\n\n x = self.base_dist.rsample([batch_size])\n log_base = self.base_dist.log_prob(x)\n log_det = 0.0\n for transform in self.transforms:\n x, log_det_jacobian = transform.forward(x)\n log_det += log_det_jacobian\n log_prob = - log_det + log_base\n\n return x, log_prob\n","sub_path":"NormalizingFlows.py","file_name":"NormalizingFlows.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"457861265","text":"import re\n\nalphabet=\"1234567890ABCDEFGHIJKLMNOPQRSTUVWSYZ-. *\"\nbars=[\"WNNNW\",\"NWNNW\",\"WWNNN\",\"NNWNW\",\"WNWNN\",\"NWWNN\",\"NNNWW\",\"WNNWN\",\"NWNWN\",\"NNWWN\"]\nspaces=[2,3,4,1]\n#code yj规则1:对尾部为010的编码,去掉10\n#code yj Rule 1: If the end of a code is 010, remove the 10\ncodeyjRule1=r\".*010$\" \n#code yj规则2/3:如果一个编码的尾部为0110,其下一个编码��部为110,则去掉两个11中间的0\n#code yj Rule 2/3: If the end of a code is 0110, and the head of the next code is 110, remove the 0 between two codes \ncodeyjRule2=r\".*0110$\" \ncodeyjRule3=r\"^110.*\"\n\ndef encode39(char): #转化为code39码/convert a char to code 39\n i=alphabet.find(char)\n encodedchar=\"\"\n tspace=0\n for bar in bars[i%10]:\n if bar=='W':\n encodedchar=encodedchar+'11'\n elif bar=='N':\n encodedchar=encodedchar+'1'\n tspace+=1\n if tspace==spaces[int(i/10)]:\n encodedchar=encodedchar+'00'\n else:\n encodedchar=encodedchar+'0'\n return encodedchar\n\ndef code2img(code,codewidth=2,imgheight=50): #将二进制码转换为图片/covert binary code to picture\n imgrow=[]\n for c in code:\n if c=='1':\n for i in range(codewidth):\n imgrow.append(0)\n elif c=='0':\n for i in range(codewidth):\n imgrow.append(255)\n \n bimg=[]\n for i in range(imgheight):\n bimg.append(imgrow)\n\n return bimg\n\ndef codeyjencode(string): #将字符串转化为Code yj编码/convert string to code yj \n uncodedstr=string\n encodedstrList=[]\n for c in uncodedstr:\n if re.match(codeyjRule1,encode39(c))==None:\n encodedstrList.append(encode39(c))\n else:\n encodedstrList.append(encode39(c)[:-2])\n if not len(encodedstrList)==1:\n for i in range(len(encodedstrList)-1):\n if not re.match(codeyjRule2,encodedstrList[i])==None:\n if not re.match(codeyjRule3,encodedstrList[i+1])==None:\n encodedstrList[i]=encodedstrList[i][:-1]\n break\n encodedstrList.append('1')\n return \"\".join(encodedstrList)","sub_path":"codeyj.py","file_name":"codeyj.py","file_ext":"py","file_size_in_byte":2162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"426927198","text":"#!/usr/bin/env python\n\n\"\"\"\nAuthor : Tom Dougherty\nDate : 2018 June 25\nDescription : Script to collect data from all FIMO runs\n and create graphs in matplotlib to display\n abundance of TF motifs in genes/protogenes/nongenes.\n\"\"\"\n\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport scipy.stats as stats\n\nfrom graphHelper import *\n\nLENGTH = 500 # Length of promoter to analyze\n\n# Import numpy arrays from fimoData.py\nwith open('Data/fisherData{}.npy'.format(LENGTH), 'rb') as f:\n fisher_data = np.load(f)\nwith open('Data/fisherExact{}.npy'.format(LENGTH), 'rb') as f:\n fisher_exact = np.load(f)\nwith open('Data/proportions{}.npy'.format(LENGTH), 'rb') as f:\n proportions = np.load(f)\nwith open('Data/TFs.npy', 'rb') as f:\n TFs = np.load(f)\nwith open('Data/pvalFisherData{}.npy'.format(LENGTH), 'rb') as f:\n pval_fisher_data = np.load(f)\nwith open('Data/pvalFisherExact{}.npy'.format(LENGTH), 'rb') as f:\n pval_fisher_exact = np.load(f)\n\n# Plot heat map of motif presence fractions\n# Make array (from fisher_data) of ratios of promoters with motifs / total promoters\nfisher_ratios = fisher_data[:,:,0] / (fisher_data[:,:,0] + fisher_data[:,:,1])\n# Sort by increasing ratios for genes\nsorter = fisher_ratios[:,0].argsort()\nfisher_ratios = fisher_ratios[sorter]\n# Create (sorted) labels for heatmap\nTFs_heatmap = np.asarray(TFs)[sorter]\ncategories = ['gene', 'proto-gene', 'non-gene', 'random regions']\n\n# fig, ax = plt.subplots()\n# im = ax.imshow(fisher_ratios, interpolation='none', aspect='auto')\n# # Show all ticks\n# ax.set_xticks(np.arange(len(categories)))\n# ax.set_yticks(np.arange(len(TFs)))\n# # Label them with the respective list entries\n# ax.set_xticklabels(categories)\n# ax.set_yticklabels(TFs, size=5)\n# # Rotate the tick labels and set their alignment.\n# plt.setp(ax.get_xticklabels(), rotation=0, ha=\"right\",\n# rotation_mode=\"anchor\")\n# # Loop over data dimensions and create text annotations.\n# fisher_text = fisher_ratios.astype('\", connectionstyle=\"arc3,rad=.2\"))\nplt.legend(loc='upper left', frameon=False)\nplt.show()\n\n\n# Creating fraction plots for various (FIMO) p-value cutoffs\nfig, axes = plt.subplots(2, 5)\nPVALS = [1e-12, 1e-11, 1e-10, 1e-9, 1e-8, 1e-7, 1e-6, 1e-5, 1e-4, 1e-3]\nfor i, ax in enumerate(axes.flatten()):\n pval_fisher_ratios = pval_fisher_data[i,:,:,0] / (pval_fisher_data[i,:,:,0] + pval_fisher_data[i,:,:,1])\n a = pval_fisher_ratios[sorter] # Sort everything by the order genes are in when all data is included\n labels = TFs_heatmap\n ax.plot(a[:,0], label='gene')\n ax.plot(a[:,1], label='proto-gene')\n ax.plot(a[:,2], label='non-gene')\n ax.plot(a[:,3], label='random regions')\n #ax.xticks(range(len(a)), labels, rotation=45, size=5, rotation_mode=\"anchor\")\n #ax.legend(loc='upper left', frameon=False)\n plt.xlabel('Transcription factors')\n plt.ylabel('Percentage of promoters with TF motif')\n ax.set_title('motifs matches with p-value < {}'.format(PVALS[i]))\n\nplt.legend(loc='upper left', frameon=False)\nplt.show()\n\n\n\n# Heatmap of significance of Fisher's exact test\nsignificance = fisher_exact[:,:,1][sorter]\nfig, ax = plt.subplots()\ncomparisons = ['genes vs. protogenes', 'genes vs. nongenes', 'protogenes vs. nongenes',\n'genes vs. random', 'protogenes vs. random', 'nongenes vs. random']\nim, cbar = heatmap(significance, TFs_heatmap, comparisons, ax=ax,\n cmap=\"YlGn\", cbarlabel=\"P-value\",\n interpolation='none', aspect='auto')\n#im, cbar = heatmap(fisher_exact, TFs_heatmap, categories, ax=ax[1],\n# cmap=\"YlGn\", cbarlabel=\"Significance\")\ntexts = annotate_heatmap(im, valfmt=\"{x:.3f}\", size=5, threshold=0.05)\n# fig.tight_layout()\nplt.show()\n\n# Heatmap of odds ration from Fisher's exact test\nodds = fisher_exact[:,:,0][sorter]\nfig, ax = plt.subplots()\ncomparisons = ['genes vs. protogenes', 'genes vs. nongenes', 'protogenes vs. nongenes',\n'genes vs. random', 'protogenes vs. random', 'nongenes vs. random']\nim, cbar = heatmap(odds, TFs_heatmap, comparisons, ax=ax,\n cmap=\"YlGn\", cbarlabel=\"P-value\",\n interpolation='none', aspect='auto')\n#im, cbar = heatmap(fisher_exact, TFs_heatmap, categories, ax=ax[1],\n# cmap=\"YlGn\", cbarlabel=\"Significance\")\ntexts = annotate_heatmap(im, valfmt=\"{x:.3f}\", size=5, threshold=0.05)\n# fig.tight_layout()\nplt.show()","sub_path":"FIMO/fimoGraphs.py","file_name":"fimoGraphs.py","file_ext":"py","file_size_in_byte":7602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"364795131","text":"num_cases = int(input())\nresults = []\nfor i in range(num_cases):\n N, B = map(int, input().split())\n sum_remainder = 0\n for j in range(B):\n numbers = list(map(int, input().split()))\n remainder = 1\n for number in numbers[1:]:\n temp = number % N\n remainder = (remainder*temp) % N\n sum_remainder = (sum_remainder + remainder) % N\n results.append(sum_remainder)\nfor result in results:\n print(result)\n","sub_path":"BigOOrange/NumberTheory/Boxes of Chocolates.py","file_name":"Boxes of Chocolates.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"446351072","text":"#! /usr/bin/python\n# encoding=utf-8\nimport sys\nimport socket\n\nPORT = sys.argv[1]\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ndest = (\"\", int(PORT))\ns.bind(dest)\ns.listen(1)\nc, ipcliente = s.accept()\nqt_jogada = 0\njogada = 0\nbowie = 0\nvenceu = str\nperdeu = str\n\n#server velha\n#=======================================================================\n\nmsg = c.recv(1000).decode()\nprint(msg)\n\nmsg = \"Hello world pra voce tambem!\".encode()\nc.send(msg)\n\n# hello word pra testar comunicação\n#=======================================================================\n# início do código\n\nvelha = [[1,2,3],[4,5,6],[7,8,9]] # Declarou o formato do tabuleiro\n\ndef printarm(velha):\n for i in range(3):\n if i == 1 or i == 2:\n print(\"-\"*10)\n print(velha[i])\n else:\n print(velha[i])\n\nprintarm(velha) # Executa a função de exibir o estado atual da matriz\n\n\"\"\"def encode(velha):\n if isinstance(velha, list):\n return [encode(x) for x in velha]\n else:\n return velha.encode('utf-8')\"\"\"\n\nenviavelha = velha.encode() # Codifica a matriz para enviar ao client\nc.send(enviavelha) # Envia a matriz codificada\n \ndef posicao(bowie): # alocar jogada do server na matrix\n bowie = int(input(\"Digite onde que jogar: \"))\n while bowie < 0 and bowie > 9:\n bowie = int(input(\"Digite um valor disponível: \"))\n if jogada > 0 and jogada < 10:\n if jogada == 1:\n velha[0][0] = \"x\"\n elif jogada == 2:\n velha[0][1] = \"x\"\n elif jogada == 3:\n velha[0][2] = \"x\"\n elif jogada == 4:\n velha[1][0] = \"x\"\n elif jogada == 5:\n velha[1][1] = \"x\"\n elif jogada == 6:\n velha[1][2] = \"x\"\n elif jogada == 7:\n velha[2][0] = \"x\"\n elif jogada == 8:\n velha[2][1] = \"x\"\n elif jogada == 9:\n velha[2][2] = \"x\"\n enviavelha = velha.encode() # Codifica a matriz para enviar ao client\n c.send(enviavelha) # Envia a matriz codificada\n printarm(velha)\n \n\ndef posicaoc(jogada): # função a cada vez que o client jogar\n jogada = c.recv(1000).decode\n if jogada > 0 and jogada < 10:\n if jogada == 1:\n velha[0][0] = \"o\"\n elif jogada == 2:\n velha[0][1] = \"o\"\n elif jogada == 3:\n velha[0][2] = \"o\"\n elif jogada == 4:\n velha[1][0] = \"o\"\n elif jogada == 5:\n velha[1][1] = \"o\"\n elif jogada == 6:\n velha[1][2] = \"o\"\n elif jogada == 7:\n velha[2][0] = \"o\"\n elif jogada == 8:\n velha[2][1] = \"o\"\n elif jogada == 9:\n velha[2][2] = \"o\"\n\n printarm(velha)\n\ndef conidc(): # Determinara quem ganhou\n venceu = (\"Parabéns, você venceu\").encode()\n perdeu = (\"Infelizmente você perdeu :(\").encode()\nif velha[0][0] and velha[0][1] and velha[0][1] == \"x\":\n print(venceu)\n c.send(perdeu)\n socket.close()\nelif velha[1][0] and velha[1][1] and velha[1][1] == \"x\":\n print(venceu)\n c.send(perdeu)\n socket.close()\nelif velha[2][0] and velha[2][1] and velha[2][1] == \"x\":\n print(venceu)\n c.send(perdeu)\n socket.close()\nelif velha[0][1] and velha[1][1] and velha[2][2] == \"x\":\n print(venceu)\n c.send(perdeu)\n socket.close()\nelif velha[0][2] and velha[1][1] and velha[2][0] == \"x\":\n print(venceu)\n c.send(perdeu)\n socket.close()\nelif velha[0][0] and velha[1][0] and velha[2][0] == \"x\":\n print(venceu)\n c.send(perdeu)\n socket.close()\nelif velha[0][1] and velha[1][1] and velha[2][1] == \"x\":\n print(venceu)\n c.send(perdeu)\n socket.close()\nelif velha[0][2] and velha[1][2] and velha[2][2] == \"x\":\n print(venceu)\n c.send(perdeu)\n socket.close()\nelse:\n if qt_jogada < 9:\n empate = 9 - qt_jogada\n print(\"faltam\", empate, \"jogadas para dar velha!\")\n elif qt_jogada == 9:\n print(\"Deu velha!\")\n\nif velha[0][0] and velha[0][1] and velha[0][1] == \"o\":\n print(perdeu)\n c.send(venceu)\n socket.close()\nelif velha[1][0] and velha[1][1] and velha[1][1] == \"o\":\n print(perdeu)\n c.send(venceu)\n socket.close()\nelif velha[2][0] and velha[2][1] and velha[2][1] == \"o\":\n print(perdeu)\n c.send(venceu)\n socket.close()\nelif velha[0][1] and velha[1][1] and velha[2][2] == \"o\":\n print(perdeu)\n c.send(venceu)\n socket.close()\nelif velha[0][2] and velha[1][1] and velha[2][0] == \"o\":\n print(perdeu)\n c.send(venceu)\n socket.close()\nelif velha[0][0] and velha[1][0] and velha[2][0] == \"o\":\n print(perdeu)\n c.send(venceu)\n socket.close()\nelif velha[0][1] and velha[1][1] and velha[2][1] == \"o\":\n print(perdeu)\n c.send(venceu)\n socket.close()\nelif velha[0][2] and velha[1][2] and velha[2][2] == \"o\":\n print(perdeu)\n c.send(venceu)\n socket.close()\nelse:\n empate = (9 - qt_jogada).encode()\n c.send(empate)\n\nposicao(bowie)\nqt_jogada += 1\nconidc()\nposicaoc(jogada) # Executa a função da jogada do client\nqt_jogada += 1\nconidc()\nposicao(bowie)\nqt_jogada += 1\nconidc()\nposicaoc(jogada)\nqt_jogada += 1\nconidc()\nposicaoc(bowie)\nqt_jogada += 1\nconidc()\nposicaoc(jogada)\nqt_jogada += 1\nconidc()\nposicao(bowie)\nqt_jogada += 1\nconidc()\nposicaoc(jogada)\nqt_jogada += 1\nconidc()\nposicao(bowie)\nqt_jogada += 1\nconidc()\n#\nprint(\"matriz após a jogada do client: \", velha)\n\nprintarm(velha) # Executa a função de exibir o estado atual da matriz\n\n\"\"\"\nposicao(bowie): # alocar jogada do server na matrix\nposicao(jogada): # função a cada vez que o client jogar\nprintarm(velha): # Executa a função de exibir o estado atual da matriz\ncondic(): #\n\"\"\"","sub_path":"teste.py","file_name":"teste.py","file_ext":"py","file_size_in_byte":5631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"415041112","text":"import mahotas as mh\nfrom imread import imread\nfrom matplotlib import pyplot as plt\n\nimage = imread('../DATA/simple-dataset/building05.jpg')\nimage = mh.colors.rgb2gray(image)\n\nstandard_deviations = [8, 16, 32]\n\nfig1 = plt.figure()\n\nfor i in range(3):\n im = mh.gaussian_filter(image, standard_deviations[i])\n\n a = fig1.add_subplot(1, 3, i+1) # this line outputs images side-by-side\n plt.imshow(im)\n plt.gray()\n a.set_title('Sigma = ' + str(standard_deviations[i]))\n\nplt.show()\n","sub_path":"smoothing/gaussian_filtering2.py","file_name":"gaussian_filtering2.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"98652169","text":"# Copyright 2014 Hewlett-Packard Development Company, L.P\n# All Rights Reserved.\n# Copyright 2016 Hewlett Packard Enterprise Development Company, L.P.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom tempest import config\n\nfrom designate_tempest_plugin.schemas.v1 import servers_schema as schema\nfrom designate_tempest_plugin.services.dns.v1.json import base\n\nCONF = config.CONF\n\n\nclass ServersClient(base.DnsClientV1Base):\n @base.handle_errors\n def list_servers(self, params=None):\n \"\"\"List all servers.\"\"\"\n resp, body = self._list_request('servers', params=params)\n\n self.validate_response(schema.list_servers, resp, body)\n\n return resp, body['servers']\n\n @base.handle_errors\n def get_server(self, uuid, params=None):\n \"\"\"Get the details of a server.\"\"\"\n resp, body = self._show_request('servers', uuid, params=params)\n\n self.validate_response(schema.get_server, resp, body)\n\n return resp, body\n\n @base.handle_errors\n def delete_server(self, uuid, params=None):\n \"\"\"Delete the given server.\"\"\"\n resp, body = self._delete_request('servers', uuid, params=params)\n\n self.validate_response(schema.delete_server, resp, body)\n\n return resp, body\n\n @base.handle_errors\n def create_server(self, name, params=None, **kwargs):\n \"\"\"Creates a server.\"\"\"\n post_body = {\n \"name\": name,\n }\n\n for option in ['name']:\n value = kwargs.get(option)\n post_param = option\n if value is not None:\n post_body[post_param] = value\n\n resp, body = self._create_request('servers', post_body, params=params)\n\n self.validate_response(schema.create_server, resp, body)\n\n return resp, body\n\n @base.handle_errors\n def update_server(self, uuid, params=None, **kwargs):\n \"\"\"Updates a server.\"\"\"\n post_body = {}\n\n for option in ['name']:\n post_param = option\n value = kwargs.get(option)\n if value is not None:\n post_body[post_param] = value\n\n resp, body = self._put_request('servers', uuid, post_body,\n params=params)\n\n self.validate_response(schema.update_server, resp, body)\n\n return resp, body\n","sub_path":"designate_tempest_plugin/services/dns/v1/json/servers_client.py","file_name":"servers_client.py","file_ext":"py","file_size_in_byte":2842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"300848986","text":"#Segundo Punto Libre\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\n# sys.path.insert(0, '../')\n\nimport oscillator.oscillator as os\nimport forces.forces as fr\nimport solver.solver as sol\n\ng = 9.8\nl = 1\nm = 1\n\ndef restoring_force(state, params):\n x, L, v, vr, t = state\n w0 = params\n dxdt = v\n dvdt = -w0**2 * np.sin(x)\n drdt = vr\n dvrdt = 0\n return dxdt, drdt, dvdt, dvrdt, 1.0\n\ndef energy(v, x):\n ek = (1/2) * m * (l**2) * (v**2)\n ep = m * g * l * (1 - np.cos(x))\n et = ek + ep\n return et, ek, ep\n\ndef integrate(obj):\n xpos, vpos, tpos= [], [], []\n _, _, _, _, tc = obj.objs.get_state()\n while tc < 7: #9.55:\n xc, _, vc, _, tc = obj.objs.get_state()\n xpos.append(xc)\n vpos.append(vc)\n tpos.append(tc)\n obj.do_step()\n return tpos, xpos, vpos\n\ndef energies(tpos, xpos, vpos):\n ek, ep, em= [], [], []\n for i in range(len(tpos)):\n ener_to, ener_ci, ener_po = energy(vpos[i], xpos[i])\n ek.append(ener_ci)\n ep.append(ener_po)\n em.append(ener_to)\n return ek, ep, em\n\n\nx0, v0, w0, t0 = 0.2, 0., 3., 0\nsim_params = w0\ndeltat = 0.05\n\nm1 = \"Euler\"\nm2 = \"Euler-Cromer\"\nm3 = \"Midpoint\"\n\nnum_method = m2\n\npendulo = os.Oscillator(x0, v0, w0, t0, \"P1\")\npendulo_force = fr.Forces(restoring_force, sim_params)\npendulo.set_force(pendulo_force)\neuler = sol.Solver(pendulo, num_method, deltat)\ntvac, xvac, vac= integrate(euler)\nen_ci, en_po, en_to = energies(tvac, xvac, vac)\n\ndelta_e = en_to[-1] - en_to[0]\nprint(\"Theta: \", x0, \", v: \", v0, \", dt: \", deltat, \", de: \", delta_e)\nprint(\"Energia In: \", en_to[0], \", Energia Fin: \", en_to[-1])\nprint(\"Energia Cinetica In: \", en_ci[0], \", Energia Cinetica Fin: \", en_ci[-1])\nprint(\"Energia Potencial In: \", en_po[0], \", Energia Potencial Fin: \", en_po[-1])\n\ndeltae = []\nfor i in range(len(en_to)):\n deltae.append(en_to[i] - en_to[0])\n\n# fig, ax = plt.subplots()\n# ax.plot(tvac, xvac, '--', label='Angle')\n# ax.plot(tvac, vac, '-.', label='Velocity')\n# ax.set(xlabel='time (AU)', ylabel='State (AU)')\n# ax.grid()\n\n\nfig, ax = plt.subplots()\n# ax.plot(tvac, en_to, ls='--', c = 'blueviolet', label='Total Energy')\nax.plot(tvac, en_ci, ls='--', c ='royalblue', label='Cinetic Energy')\nax.plot(tvac, en_po, ls='--', c = 'deeppink', label='Potecial Energy')\nax.set(xlabel='time (AU)', ylabel='Energy (J)')\nax.grid()\n","sub_path":"libre_p2.py","file_name":"libre_p2.py","file_ext":"py","file_size_in_byte":2372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"417573924","text":"class Solution:\n \"\"\"\n @param root: The root of binary tree.\n @return: True if the binary tree is BST, or false\n \"\"\"\n\n def isValidBST(self, root):\n # write your code here\n\n \"use recursion\"\n \"need to check: 1. left and right child are BST 2. max of left child and min of right child\"\n if root is None:\n return True\n\n return self.isValidBSTHelper(root).isBST\n\n def isValidBSTHelper(self, root):\n\n isBST = True\n\n resulttype1 = None\n if root.left is not None:\n resulttype1 = self.isValidBSTHelper(root.left)\n\n resulttype2 = None\n if root.right is not None:\n resulttype2 = self.isValidBSTHelper(root.right)\n\n \"find out min and max\"\n\n if resulttype1 is not None and resulttype1.max_val >= root.val:\n isBST = False\n\n if resulttype2 is not None and resulttype2.min_val <= root.val:\n isBST = False\n\n min_val = root.val\n max_val = root.val\n if resulttype1 is not None:\n isBST = isBST and resulttype1.isBST\n min_val = min(resulttype1.min_val, min_val)\n max_val = max(resulttype1.max_val, max_val)\n if resulttype2 is not None:\n isBST = isBST and resulttype2.isBST\n min_val = min(resulttype2.min_val, min_val)\n max_val = max(resulttype2.max_val, max_val)\n\n return resulttype(min_val, max_val, isBST)\n\n\nclass resulttype:\n\n def __init__(self, min_val, max_val, isBST):\n self.min_val = min_val\n self.max_val = max_val\n self.isBST = isBST","sub_path":"Algorithm/Python/Tree/BinarySearchTree/ValidBinarySearchTree.py","file_name":"ValidBinarySearchTree.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"137775074","text":"#Lesson 3 - points of interest\nimport filters as filters\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.misc import *\nfrom scipy.ndimage import filters\n\n#Image.open('assets/tile2.png').convert('RGB').save('assets/tile2.jpg')\n\n#im = imread('assets/cat.jpg')\n\nim = face()\n\nscales =[0.2989, 0.5870, 0.1140]\nim=np.dot(im,scales)\n\n#im = ascent()\n\ndef get_harris_points(im,min_dist=2,threshold=0.7):\n \"\"\" Return corners from a Harris response image\n min_dist is the minimum number of pixels separating\n corners and image boundary. \"\"\"\n # find top corner candidates above a threshold\n # derivatives\n sigma=3\n imx = np.zeros(im.shape)\n filters.gaussian_filter(im, (sigma,sigma), (0,1), imx)\n imy = np.zeros(im.shape)\n filters.gaussian_filter(im, (sigma,sigma), (1,0), imy)\n\n # compute components of the Harris matrix\n Wxx = filters.gaussian_filter(imx*imx,sigma)\n Wxy = filters.gaussian_filter(imx*imy,sigma)\n Wyy = filters.gaussian_filter(imy*imy,sigma)\n # determinant and trace\n Wdet = Wxx*Wyy - Wxy**2\n print(Wdet[0])\n Wtr = Wxx + Wyy\n harrisim = Wdet / Wtr\n corner_threshold = harrisim.max() * threshold\n harrisim_t = (harrisim > corner_threshold) * 1\n # get coordinates of candidates\n coords = np.array(harrisim_t.nonzero()).T # ...and their values\n candidate_values = [harrisim[c[0],c[1]] for c in coords] # sort candidates\n index = np.argsort(candidate_values)\n # store allowed point locations in array\n allowed_locations = np.zeros(harrisim.shape)\n allowed_locations[min_dist:-min_dist,min_dist:-min_dist] = 1\n # select the best points taking min_distance into account\n filtered_coords = []\n for i in index:\n if allowed_locations[coords[i,0],coords[i,1]] == 1:\n filtered_coords.append(coords[i])\n allowed_locations[(coords[i, 0] - min_dist):(coords[i, 0] + min_dist),\n (coords[i, 1] - min_dist):(coords[i, 1] + min_dist)] = 0\n\n return filtered_coords\n\n\nplt.figure()\nplt.gray()\nplt.imshow(im)\nfiltered_coords = get_harris_points(im)\nplt.plot([p[1] for p in filtered_coords],[p[0] for p in filtered_coords],\"*\")\n\n\ndef appendimages(im1, im2):\n \"\"\" Return a new image that appends the two images side-by-side. \"\"\"\n rows1 = im1.shape[0]\n rows2 = im2.shape[0]\n if rows1 < rows2:\n im1 = np.concatenate((im1, np.zeros((rows2 - rows1, im1.shape[1]))), axis=0)\n elif rows1 > rows2:\n im2 = np.concatenate((im2, np.zeros((rows1 - rows2, im2.shape[1]))), axis=0)\n # if none of these cases they are equal, no filling needed.\n return np.concatenate((im1, im2), axis=1)\n\n\ndef plot_matches(im1, im2, locs1, locs2, matchscores, show_below=True):\n \"\"\" Show a figure with lines joining the accepted matches\n input: im1,im2 (images as arrays), locs1,locs2 (feature locations), matchscores (as output from ’match()’),\n show_below (if images should be shown below matches). \"\"\"\n im3 = appendimages(im1, im2)\n if show_below:\n im3 = np.vstack((im3, im3))\n plt.imshow(im3)\n cols1 = im1.shape[1]\n for i, m in enumerate(matchscores):\n if m > 0: plt.plot([locs1[i][1], locs2[m][1] + cols1], [locs1[i][0], locs2[m][0]],\"c\")\n\n\n\n\ndef get_descriptors(image,filtered_coords,wid=5):\n \"\"\" For each point return pixel values around the point\n using a neighbourhood of width 2*wid+1. (Assume points are\n extracted with min_distance > wid).\n \"\"\"\n desc = []\n for coords in filtered_coords:\n patch = image[coords[0]-wid:coords[0]+wid+1, coords[1]-wid:coords[1]+wid+1].flatten()\n desc.append(patch)\n return desc\n\ndef match(desc1,desc2,threshold=0.5):\n \"\"\" For each corner point descriptor in the first image,\n select its match to second image using\n normalized cross correlation. \"\"\"\n n = len(desc1[0])\n # pair-wise distances\n d = -np.ones((len(desc1),len(desc2)))\n for i in range(len(desc1)):\n for j in range(len(desc2)):\n d1 = (desc1[i] - np.mean(desc1[i])) / np.std(desc1[i])\n d2 = (desc2[j] - np.mean(desc2[j])) / np.std(desc2[j])\n ncc_value = sum(d1 * d2) / (n-1)\n if ncc_value > threshold:\n d[i,j] = ncc_value\n ndx = np.argsort(-d)\n matchscores = ndx[:,0]\n return matchscores\n\n\n\nwid = 5\n\n\nim1 = face()\nim2 = face()\n\n\n\n\n\n\n\n\n\n\ndef paint_over_points(img1,img2):\n filtered_coords1=point_of_intrest(img1,0.3,10)\n filtered_coords2=point_of_intrest(img2,0.3,10)\n ax1=plt.subplot(1,2,1)\n ax2=plt.subplot(1,2,2)\n\n ax1.imshow(img1, cmap='gray')\n ax2.imshow(img2, cmap='gray')\n\n ax1.plot([p[1] for p in filtered_coords1], [p[0] for p in filtered_coords1], \"*\")\n ax2.plot([p[1] for p in filtered_coords2], [p[0] for p in filtered_coords2], \"*\")\n\n plt.show()\n\n\n\n\n\n#im1 = imread('assets/tile1.jpg')\n#im2 = imread('assets/tile2.jpg')\nim1=np.dot(im1,scales)\nim2=np.dot(im2,scales)\n\nax1 = plt.subplot(1, 2, 1)\nax2 = plt.subplot(1, 2, 2)\nax1.imshow(im1)\nfiltered_coords = get_harris_points(im1)\nax1.plot([p[1] for p in filtered_coords],[p[0] for p in filtered_coords],\"*\")\n\nax2.imshow(im2)\nfiltered_coords2 = get_harris_points(im2)\nax2.plot([p[1] for p in filtered_coords2],[p[0] for p in filtered_coords2],\"*\")\n\nplt.show()\n\nfiltered_coords1 = get_harris_points(im1, threshold=0.8)\nd1 = get_descriptors(im1, filtered_coords1)\nfiltered_coords2 = get_harris_points(im2, threshold=0.8)\nd2 = get_descriptors(im2, filtered_coords2)\n\nmatches = match(d1, d2)\nplt.figure()\nplt.gray()\nplot_matches(im1, im2, filtered_coords1, filtered_coords2, matches)\nplt.show()","sub_path":"ibod tmoona/Lesson3.py","file_name":"Lesson3.py","file_ext":"py","file_size_in_byte":5606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"284630495","text":"import torch\nfrom torch import nn\n\n\nclass NetV1(nn.Module):\n\n def __init__(self):\n super(NetV1, self).__init__()\n self.layer = nn.Sequential(\n nn.Linear(784,100),\n nn.ReLU(),\n nn.Linear(100,10),\n nn.Softmax(dim=1)\n )\n\n def forward(self,x):\n return self.layer(x)\n\nclass NetV2(nn.Sequential):\n def __init__(self):\n super(NetV2, self).__init__(\n nn.Linear(784,100,),\n nn.ReLU(),\n nn.Linear(100,10),\n nn.Softmax(dim=1)\n )\n\nclass NetV3(nn.Module):\n\n def __init__(self):\n super(NetV3, self).__init__()\n self.f1 = nn.Linear\n self.relu = nn.ReLU\n self.softmax = nn.Softmax\n\n def forward(self,x):\n h = self.f1(784,100)(x)\n h = self.relu()(h)\n h = self.f1(100,10)(h)\n h = self.softmax(dim=1)(h)\n return h\n\nclass NetV4(nn.Module):\n\n def __init__(self):\n super(NetV4, self).__init__()\n self.w = nn.Parameter(torch.randn(784,100))\n self.w1 = nn.Parameter(torch.randn(100,10))\n\n def forward(self,x):\n h = x@self.w\n h = h@self.w1\n h = torch.exp(h)\n z = torch.sum(h,dim=1,keepdim=True)\n return h/z\n\n\n\nif __name__ == '__main__':\n net = NetV4()\n x = torch.randn(1,784)\n y = net(x)\n print(y.shape)","sub_path":"DeeplearningStudy/MLP/net.py","file_name":"net.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"182054664","text":"from django.db import models\nfrom entity.models import Item, Personnel\n# Create your models here.\n\n\n# 项目普通人员表\nclass ItemPerson(models.Model):\n item = models.ForeignKey(Item, on_delete=models.CASCADE)\n personnel = models.ForeignKey(Personnel, on_delete=models.CASCADE)\n temp = models.TextField(default='', blank=True)\n\n def to_dict(self):\n return {\n 'id': self.id,\n 'item': self.item.id,\n 'item_name': self.item.name,\n 'personnel': self.personnel.id,\n 'temp': self.temp,\n 'name': self.personnel.name,\n 'account': self.personnel.account,\n 'authority': self.personnel.authority,\n 'team': self.personnel.team, # 所属单位\n }","sub_path":"server/background/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"475854518","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 15 22:15:40 2017\n\n@author: psamtik071\n\"\"\"\n\n#functions for feature-engineering\n\nimport pandas as pd\n\n# make sure tot_docks > 0 (especially when calculating bikes available)\ndef bulk_query(year):\n # query all bikes after or before a certain time\n if year == 2015:\n date_string = \"< '2016-03-01'\"\n if year == 2016:\n date_string = \">= '2016-03-01'\"\n\n query = \"\"\"\n SELECT a.id, a.date, a.hour, bikes_out, bikes_in, dayofweek, month,\n is_weekday, is_holiday, rebal_net_flux, tot_docks, avail_bikes,\n avail_docks, precip, snow, temp, long, lat\n FROM features a\n LEFT JOIN weather b ON a.date = b.date AND a.hour = b.hour\n LEFT JOIN stations c on a.id=c.id\n WHERE a.date {} AND tot_docks > 0\n ORDER BY a.id, a.date, a.hour;\n \"\"\".format(date_string)\n return query\n\ndef strip_unused_stations(df, station_list):\n return df[df.id.isin(station_list)]\n\n\ndef make_categorical(df, cols):\n for col in cols:\n df[col] = df[col].astype('category')\n return df\n\ndef flux_conditions(x, threshold = 0.2):\n # for x in pct_flux, set the following parameters:\n # if x > 0.2 ---> flux_type 1 (rebalance down -- remove bikes)\n # if x < -0.2 ---> flux_type -1 (rebalance up -- add bikes)\n # if abs(x) <= 0.2 ---> flux_type 0 (don't rebalance)\n if x > abs(threshold):\n return x\n elif x < -abs(threshold):\n return x\n else:\n return 0\n\ndef temp_conditions(x):\n # temperature categories\n if x > 80.:\n return 80 #hot\n elif (x > 60.) & (x <= 80.):\n return 70 #mild\n elif (x > 40.) & (x <= 60.):\n return 50 #chilly\n else:\n return 30 #cold\n\ndef precip_conditions(x):\n # precipitation categories\n if x > 0.10:\n return 1\n else:\n return 0\n\ndef merge_by_date(df1, df2):\n return pd.merge(df1, df2, how = 'left', on = 'date')\n\n# create a daily avg flux column and shift it to get yesterday's flux for a given date.\n# also with weekly fluxes\ndef make_lagged_fluxes(df):\n mean_daily_flux = df.groupby('date').mean().flux\n mean_yesterday_flux = mean_daily_flux.shift(1).reset_index()\n mean_lastweek_flux = mean_daily_flux.shift(7).reset_index()\n\n mean_daily_flux = mean_daily_flux.reset_index().rename(columns = {'flux': 'mean_flux'})\n mean_yesterday_flux = mean_yesterday_flux.rename(columns = {'flux': 'yest_flux'})\n mean_lastweek_flux = mean_lastweek_flux.rename(columns = {'flux': 'last_week_flux'})\n\n dfs = [df, mean_daily_flux, mean_yesterday_flux, mean_lastweek_flux]\n return reduce(merge_by_date, dfs)\n\ndef new_features(df):\n # df['date'] = pd.to_datetime(df.date)\n df['hour'] = df['hour'].astype(int)\n\n # turn strings 'True' and 'False' into 1 and 0\n string_dict = {'True': 1, 'False':0}\n df[['is_weekday', 'is_holiday']] = df[['is_weekday', 'is_holiday']].replace(string_dict)\n\n # fix the number of total docks for a given day\n total_docks = df.groupby(['date']).max().tot_docks.reset_index()\n df = pd.merge(df, total_docks, how = 'left', on = 'date').rename(columns = {'tot_docks_y': 'tot_docks'})\n df.drop('tot_docks_x', 1, inplace=True)\n\n # engineer new features\n df['flux'] = df.bikes_in - df.bikes_out\n # df['pct_bikes_in'] = df.bikes_in / df.tot_docks\n # df['pct_bikes_out'] = df.bikes_out / df.tot_docks\n df['pct_avail_bikes'] = df.avail_bikes / df.tot_docks\n df['pct_avail_docks'] = df.avail_docks / df.tot_docks\n df['pct_flux'] = df.flux / df.tot_docks\n #df['pct_rebal_flux'] = df.rebal_net_flux / df.tot_docks\n\n\n #normalize precipitation\n df['precip'] = df.precip / df.precip.max()\n df = df.fillna(method = 'bfill', axis = 0)\n\n\n # get lagged features\n # df_with_lags = make_lagged_fluxes(df).dropna()\n\n # hist_cols = ['mean_flux', 'yest_flux', 'last_week_flux']\n# for col in hist_cols:\n# df_with_lags[col] = df_with_lags[col].apply(flux_conditions).astype('category')\n# df_with_lags = df_with_lags.dropna()\n # features_to_clear = ['bikes_out', 'bikes_in','rebal_net_flux',\n # 'tot_docks', 'avail_bikes', 'avail_docks', 'flux']\n\n return df\n","sub_path":"workflow/features.py","file_name":"features.py","file_ext":"py","file_size_in_byte":4226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"298069014","text":"# Copyright (c) 2012 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\nimport json\nimport logging\nimport operator\n\nfrom appengine_url_fetcher import AppEngineUrlFetcher\nimport url_constants\n\nclass BranchUtility(object):\n def __init__(self, fetch_url, fetcher, object_store_creator):\n self._fetch_url = fetch_url\n self._fetcher = fetcher\n # BranchUtility is obviously cross-channel, so set the channel to None.\n self._object_store = object_store_creator.Create(BranchUtility,\n channel=None)\n\n @staticmethod\n def Create(object_store_creator):\n return BranchUtility(url_constants.OMAHA_PROXY_URL,\n AppEngineUrlFetcher(),\n object_store_creator)\n\n @staticmethod\n def GetAllChannelNames():\n return ['stable', 'beta', 'dev', 'trunk']\n\n @staticmethod\n def SplitChannelNameFromPath(path):\n \"\"\"Splits the channel name out of |path|, returning the tuple\n (channel_name, real_path). If the channel cannot be determined then returns\n (None, path).\n \"\"\"\n if '/' in path:\n first, second = path.split('/', 1)\n else:\n first, second = (path, '')\n if first in ['trunk', 'dev', 'beta', 'stable']:\n return (first, second)\n return (None, path)\n\n def GetBranchForChannel(self, channel_name):\n \"\"\"Returns the branch number for a channel name.\n \"\"\"\n if channel_name == 'trunk':\n return 'trunk'\n\n branch_number = self._object_store.Get(channel_name).Get()\n if branch_number is not None:\n return branch_number\n\n try:\n version_json = json.loads(self._fetcher.Fetch(self._fetch_url).content)\n except Exception as e:\n # This can happen if omahaproxy is misbehaving, which we've seen before.\n # Quick hack fix: just serve from trunk until it's fixed.\n logging.error('Failed to fetch or parse branch from omahaproxy: %s! '\n 'Falling back to \"trunk\".' % e)\n return 'trunk'\n\n branch_numbers = {}\n for entry in version_json:\n if entry['os'] not in ['win', 'linux', 'mac', 'cros']:\n continue\n for version in entry['versions']:\n if version['channel'] != channel_name:\n continue\n branch = version['version'].split('.')[2]\n if branch not in branch_numbers:\n branch_numbers[branch] = 0\n else:\n branch_numbers[branch] += 1\n\n sorted_branches = sorted(branch_numbers.iteritems(),\n None,\n operator.itemgetter(1),\n True)\n self._object_store.Set(channel_name, sorted_branches[0][0])\n\n return sorted_branches[0][0]\n","sub_path":"chrome/common/extensions/docs/server2/branch_utility.py","file_name":"branch_utility.py","file_ext":"py","file_size_in_byte":2775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"311254171","text":"from __future__ import absolute_import\nfrom __future__ import print_function\n\nimport unittest2 as unittest\nimport os\n\nimport numpy as np\nfrom pymatgen.util.testing import PymatgenTest\nfrom pymatgen.core.surface import generate_all_slabs\nfrom pymatgen.analysis.adsorption import *\nfrom pymatgen.symmetry.analyzer import SpacegroupAnalyzer\nfrom pymatgen import Structure, Lattice\nimport json\nfrom six.moves import zip\n\ntest_dir = os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"..\", \"..\",\n 'test_files')\n\n\nclass AdsorbateSiteFinderTest(PymatgenTest):\n def setUp(self):\n self.structure = Structure.from_spacegroup(\"Fm-3m\", Lattice.cubic(3.5),\n [\"Ni\"], [[0, 0, 0]])\n slabs = generate_all_slabs(self.structure, max_index=2,\n min_slab_size=6.0, min_vacuum_size=15.0,\n max_normal_search=1, center_slab=True)\n self.slab_dict = {''.join([str(i) for i in slab.miller_index]):\n slab for slab in slabs}\n self.asf_211 = AdsorbateSiteFinder(self.slab_dict[\"211\"])\n self.asf_100 = AdsorbateSiteFinder(self.slab_dict[\"100\"])\n self.asf_111 = AdsorbateSiteFinder(self.slab_dict[\"111\"])\n self.asf_110 = AdsorbateSiteFinder(self.slab_dict[\"110\"])\n\n def test_init(self):\n asf_100 = AdsorbateSiteFinder(self.slab_dict[\"100\"])\n asf_111 = AdsorbateSiteFinder(self.slab_dict[\"111\"])\n\n def test_from_bulk_and_miller(self):\n asf = AdsorbateSiteFinder.from_bulk_and_miller(self.structure, (1, 1, 1))\n sites = asf.find_adsorption_sites()\n self.assertEqual(len(sites), 4)\n asf = AdsorbateSiteFinder.from_bulk_and_miller(self.structure, (1, 0, 0))\n sites = asf.find_adsorption_sites()\n self.assertEqual(len(sites), 3)\n asf = AdsorbateSiteFinder.from_bulk_and_miller(self.structure, (1, 1, 0),\n undercoord_threshold=0.1)\n self.assertEqual(len(asf.surface_sites), 1)\n\n def test_find_adsorption_sites(self):\n sites = self.asf_100.find_adsorption_sites()\n self.assertEqual(len(sites), 3)\n sites = self.asf_100.find_adsorption_sites(positions=\"bridge\")\n self.assertEqual(len(sites), 2)\n sites = self.asf_111.find_adsorption_sites()\n self.assertEqual(len(sites), 4)\n sites = self.asf_110.find_adsorption_sites()\n self.assertEqual(len(sites), 4)\n sites = self.asf_211.find_adsorption_sites()\n\n def test_functions(self):\n slab = self.slab_dict[\"111\"]\n rot = get_rot(slab)\n reoriented = reorient_z(slab)\n self.assertArrayAlmostEqual(slab.frac_coords[0],\n cart_to_frac(slab.lattice, \n slab.cart_coords[0]))\n self.assertArrayAlmostEqual(slab.cart_coords[0],\n frac_to_cart(slab.lattice,\n slab.frac_coords[0]))\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"pymatgen/analysis/tests/test_adsorption.py","file_name":"test_adsorption.py","file_ext":"py","file_size_in_byte":3135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"515326043","text":"# Написать функцию, которая принимает любое количество аргументов - списков, она должна\n# возвращать список из всех объектов списков, но каждый объект должен\n# быть уникальным join_lists([1, 2], ['a', 2], ['c', 1]) -> [1, 2, 'a', 'c']\n\nlst1 = [1, 2]\nlst2 = ['a', 2]\nlst3 = ['c', 1]\n\n#----------------------------------------------------------------------\n\n\ndef list_uniq(*args):\n joint_lst = []\n uniq_lst = []\n for i in args:\n joint_lst += i\n for j in joint_lst:\n if j not in uniq_lst:\n uniq_lst.append(j)\n return uniq_lst\n \"\"\"\"\"\"\n\np = list_uniq(lst1, lst2, lst3)\nprint(p)\n","sub_path":"HomeWork_Lecture_2/HW_2.7.py","file_name":"HW_2.7.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"409375694","text":"#!/usr/bin/env python\nimport time\nstart_time = time.time()\n\nf = open(\"input.dat\").read()\nT = []\nfor line in f.splitlines():\n T.append([int(i) for i in line.split(' ')])\n\nt = list(reversed(T))\nn = len(t)\n\nfor i in range(1, n):\n for j in range(len(t[i])):\n t[i][j] += max((t[i - 1][j], t[i - 1][j+1]))\n\n# top element is solution\nsolution = t[-1][0]\n\nend_time = time.time()\nrun_time = end_time - start_time\n\nprint( \"--------------------------------------------\")\nprint( \"| Solution to Project Euler problem 18: |\" )\nprint( \"--------------------------------------------\")\nprint( \"Answer: {:d}\".format(solution) )\nprint( \"Wall time: {:3.5f} seconds\".format(run_time) )\n","sub_path":"18/new-18.py","file_name":"new-18.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"469199731","text":"import pandas as pd\nimport numpy as np\nimport gurobipy as gp\nfrom gurobipy import GRB\nfrom DNFRuleModel import DNFRuleModel\nfrom MasterModel import MasterModel\nfrom DNF_IP_RuleGenerator import DNF_IP_RuleGenerator\nfrom GreedyHeuristic import GreedyHeuristic\nimport time\nfrom GeneralRuleGenerator import GeneralRuleGenerator\n\nclass Classifier(object):\n '''\n Object to create a binary classifier using column generation framework\n '''\n \n def __init__(self, X, Y,\n args = {},\n ruleModel = 'DNF',\n ruleGenerator = 'DNF_IP'):\n \n #Define class variables\n self.ruleMod = None\n self.ruleGen = None\n self.master = None\n self.fitRuleSet = None\n self.numIter = 0\n self.args = args\n self.mip_results = []\n self.final_mip = 0\n self.final_ip = 0\n \n # Map parameters to instantiated objects\n self.initRuleModel(X, Y, ruleModel)\n self.initRuleGenerator(ruleGenerator)\n self.master = MasterModel(self.ruleMod, self.args)\n \n def fit(self, initial_rules = None, verbose = False, timeLimit = None, timeLimitPricing = None):\n '''\n Function to generate a rule set\n - Can take initial set of rules\n - Verbose parameter controls how much output is displayed during intermediary steps\n '''\n \n # Add initial rules to master model\n if initial_rules is not None:\n self.master.addRule(initial_rules)\n \n if timeLimit is not None:\n start_time = time.perf_counter() \n \n while True:\n self.numIter += 1\n # Solve relaxed version of restricted problem\n if verbose:\n print('Solving Restricted LP')\n results = self.master.solve(verbose = verbose, relax = True)\n results['verbose'] = verbose\n self.mip_results.append(results['obj'])\n \n if timeLimitPricing is not None:\n results['timeLimit'] = timeLimitPricing\n \n # Generate new candidate rules\n if verbose:\n print('Generating Rule')\n ruleFlag, rules = self.ruleGen.generateRule(results)\n \n # If no new rules generated exit out and solve master to optimality\n if ruleFlag:\n if verbose:\n print('Adding %d new rule(s)'%len(rules))\n \n self.master.addRule(rules)\n else:\n if verbose:\n print('No new rules generated.')\n break\n \n if timeLimit is not None: \n if time.perf_counter() - start_time > timeLimit:\n print('Time limit for column generation exceeded. Solving MIP.')\n break\n \n # Solve master problem to optimality\n if verbose:\n print('Solving final master problem to integer optimality')\n \n results = self.master.solve(verbose = verbose, relax = False)\n \n self.fitRuleSet = results['ruleSet']\n self.final_mip = self.mip_results[-1]\n self.final_ip = results['obj']\n \n #Return final rules\n return self\n \n def predict(self, X):\n '''\n Function to predict class labels using the fitted rule set\n '''\n if self.fitRuleSet is None:\n raise Exception(\"Model not fit. Can't make inference!\")\n \n return self.ruleMod.predict(X, self.fitRuleSet)\n \n def initRuleModel(self, X, Y, ruleModel):\n '''\n Function that maps string rule models to objects\n - To add a new rule model simply add the object to the if control flow\n '''\n \n if ruleModel == 'DNF':\n self.ruleMod = DNFRuleModel(X, Y)\n else:\n raise Exception('No associated rule model found.')\n \n def initRuleGenerator(self, ruleGenerator):\n '''\n Function that maps string rule generators to objects\n - To add a new rule generator simply add the object to the if control flow\n '''\n\n if ruleGenerator == 'DNF_IP':\n self.ruleGen = DNF_IP_RuleGenerator(self.ruleMod, self.args)\n elif ruleGenerator == 'Greedy':\n self.ruleGen = GreedyHeuristic(self.ruleMod, self.args)\n elif ruleGenerator == 'Generic':\n self.ruleGen = GeneralRuleGenerator(self.ruleMod, self.args)\n else:\n raise Exception('No associated rule generator found.')\n \n","sub_path":"Classifier.py","file_name":"Classifier.py","file_ext":"py","file_size_in_byte":4640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"549162349","text":"import requests\nimport json\n#json.dumps() : python transfer to json\n#json.loads() : json transfer to python\n#\nfrom requests.auth import HTTPBasicAuth\nfrom requests.auth import HTTPDigestAuth\nfrom requests_oauthlib import OAuth1\nfrom requests import Request, Session\n\n\ndef try_get():\n r = requests.get('https://api.github.com/user', auth=('user', 'pass'))\n print(r.text)\n print(r.status_code)\n r = requests.get('https://api.github.com/user')\n print(r.text)\n # print(r.json)\n print(r.status_code)\n print(r.headers)\n r = requests.get('https://api.github.com/events')\n print(r.text)\n\ndef try_url():\n payload = {'key1': 'value1', 'key2': 'value2'}\n r = requests.get('http://httpbin.org/get', params=payload)\n print(r.url)\n payload = {'key1': 'value1', 'key2': ['value2', 'value3']}\n r = requests.get('http://httpbin.org/get', params=payload)\n print(r.url)\n\ndef try_practicepython():\n r = requests.get('http://www.practicepython.org/')\n print(r.url)\n print(r.text)\n print(r.status_code)\n\ndef save_raw_stream_as_a_file():\n r = requests.get('http://www.practicepython.org/', stream=True)\n with open('proacticepython.txt', 'wb') as fd:\n for chunk in r.iter_content(chunk_size=128):\n fd.write(chunk)\ndef save_cnn():\n url ='http://edition.cnn.com/2017/02/08/studentnews/ten-content-thurs/index.html'\n r = requests.get(url, stream=True)\n with open('cnn.txt', 'wb') as fd:\n for chunk in r.iter_content(chunk_size=128):\n fd.write(chunk)\n\ndef custom_headers():\n url = 'https://api.github.com/some/endpoint'\n headers = {'user-agent': 'my-app/0.0.1'}\n r = requests.get(url, headers=headers)\n print(r.headers)\n r = requests.get(url)\n print(r.text)\n print(r.headers)\n\ndef Send_form_encoded_data():\n payload = {'key1': 'value1', 'key2': 'value2'}\n r = requests.post(\"http://httpbin.org/post\", data=payload)\n print(r.text)\n print(\"*\"*100)\n r = requests.get(\"http://httpbin.org/post\")\n print(r.text)\n\ndef post_multipart_files():\n url = 'http://httpbin.org/post'\n files = {'file': open('proacticepython.txt', 'rb')}\n r = requests.post(url, files=files)\n print(r.text)\n\ndef cookies():\n url = 'http://example.com/some/cookie/setting/url'\n r = requests.get(url)\n# print(r.text)\n print(r.cookies)\n\ndef send_cookie():\n url = 'http://httpbin.org/cookies'\n cookies = dict(cookies_are='working')\n\n r = requests.get(url, cookies=cookies)\n print(r.text)\n\ndef try_auth():\n r = requests.get('https://api.github.com/user', auth=HTTPBasicAuth('user', 'pass'))\n print(r.status_code)\n# HTTPBasicAuth can be skip\n r = requests.get('https://api.github.com/user', auth=('user', 'pass'))\n print(r.status_code)\n\ndef try_digestAuth():\n url = 'http://httpbin.org/digest-auth/auth/user/pass'\n r = requests.get(url, auth=HTTPDigestAuth('user', 'pass'))\n print(r.status_code)\n\ndef try_oauth():\n # fail\n url = 'https://api.twitter.com/1.1/account/verify_credentials.json'\n auth = OAuth1('YOUR_APP_KEY', 'YOUR_APP_SECRET','USER_OAUTH_TOKEN', 'USER_OAUTH_TOKEN_SECRET')\n r = requests.get(url, auth=auth)\n print(r.status_code)\n\ndef try_session():\n s = requests.Session()\n s.get('http://httpbin.org/cookies/set/sessioncookie/123456789')\n r = s.get('http://httpbin.org/cookies')\n print(r.text)\n\ndef try_prepare():\n s = Session()\n\n req = Request('POST', url, data=data, headers=headers)\n prepped = req.prepare()\n\n # do something with prepped.body\n prepped.body = 'No, I want exactly this as the body.'\n\n # do something with prepped.headers\n del prepped.headers['Content-Type']\n\n resp = s.send(prepped,\n stream=stream,\n verify=verify,\n proxies=proxies,\n cert=cert,\n timeout=timeout\n )\n\n print(resp.status_code)\n\ndef streaming_uploads():\n with open('cnn.txt', 'rb') as f:\n r= requests.post('http://some.url/streamed', data=f)\n print(r.request.headers)\n\ndef gen():\n yield b'a'\n yield b'b'\n\ndef chunk_ended_requests():\n requests.post('http://some.url/chunked', data=gen())\n\n#callback_function\ndef print_url(r, *args, **kwargs):\n print(r.url)\n\ndef event_hooks():\n requests.get('http://httpbin.org', hooks=dict(response=print_url))\n\ndef try_proxies():\n proxies = {\n 'http': 'http://ASIA-PACIFIC\\jerry_chen7:Dell09018@proxy.tpe.apac.dell.com:80',\n 'https': 'https://ASIA-PACIFIC\\jerry_chen7:Dell09018@proxy.tpe.apac.dell.com:80',\n }\n\n requests.get('http://example.org', proxies=proxies)\n\ndef try_socks():\n proxies = {\n 'http': 'socks5://ASIA-PACIFIC\\jerry_chen7:Dell09018@proxy.tpe.apac.dell.com:80',\n 'https': 'socks5://ASIA-PACIFIC\\jerry_chen7:Dell09018@proxy.tpe.apac.dell.com:80',\n }\n requests.get('http://example.org', proxies=proxies)\n\ndef verbs_get():\n r = requests.get(\n 'https://api.github.com/repos/kennethreitz/requests/git/commits/a050faf084662f3a352dd1a941f2c7c9f886d4ad')\n if r.status_code == requests.codes.ok:\n print(r.headers['content-type'])\n\n commit_data = r.json()\n print(commit_data)\n print(commit_data.keys())\n print(commit_data[u'committer'])\n print(commit_data[u'message'])\n\ndef verbs_post():\n r = requests.get('https://api.github.com/repos/kennethreitz/requests/issues/482')\n print(r.status_code)\n\n issue = json.loads(r.text)\n print(issue[u'title'])\n print(issue[u'comments'])\n\n r = requests.get(r.url + u'/comments')\n print(r.status_code)\n comments = r.json()\n print(comments[9])\n print(comments[9].keys())\n print(comments[9][u'body'])\n print(comments[9][u'updated_at'])\n print(comments[9][u'user'][u'login'])\n\n body = json.dumps({u\"body\": u\"Sounds great! I'll get right on it!\"})\n url = u\"https://api.github.com/repos/kennethreitz/requests/issues/482/comments\"\n auth = HTTPBasicAuth('jerry2613@gmail.com', 'jerry2joan')\n r = requests.post(url=url, data=body, auth=auth)\n print(r.status_code)\n r = requests.patch(url=url, data=body, auth=auth)\n print(r.status_code)\n\ndef link_headers():\n url = 'https://api.github.com/users/kennethreitz/repos?page=1&per_page=10'\n r = requests.head(url=url)\n print(r.headers['link'])\n print(r.links[\"next\"])\n print(r.links[\"last\"])\n\nif __name__ == '__main__':\n# r = requests.get('https://api.github.com/events', stream=True)\n# try_practicepython()\n# save_raw_stream_as_a_file()\n# custom_headers()\n# Send_form_encoded_data()\n# post_multipart_files()\n# cookies()\n# send_cookie()\n# try_auth()\n# try_digestAuth()\n# try_oauth()\n# save_cnn()\n# try_session()\n# try_prepare()\n# r = requests.get('https://requestb.in')\n# r = requests.get('https://github.com')\n# print(r.status_code)\n# streaming_uploads()\n# chunk_ended_requests()\n# event_hooks()\n# try_proxies()\n# try_socks()\n# verbs_get()\n# verbs_post()\n link_headers()","sub_path":"Exercise/Decode_A_Web_Page/requests_test.py","file_name":"requests_test.py","file_ext":"py","file_size_in_byte":7034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"374733746","text":"'''\nImplement int sqrt(int x).\nCompute and return the square root of x, where x is guaranteed to be a non-negative integer.\nSince the return type is an integer, the decimal digits are truncated and only the integer part of the result is returned.\nExample 1:\nInput: 4\nOutput: 2\nExample 2:\nInput: 8\nOutput: 2\nExplanation: The square root of 8 is 2.82842..., and since\n the decimal part is truncated, 2 is returned.\n'''\n\nclass Solution:\n # 这是一道easy题,但是我只能想出来超时的办法...\n # 下面这个是找的方案\n # 感觉这个问题的边界条件我不是很能瞬间判断\n # 80.49%\n def mySqrt1(self, x):\n \"\"\"\n :type x: int\n :rtype: int\n \"\"\"\n if x <= 1:\n return x\n low, high = 1, x\n while low < high:\n mid = (low + high) // 2\n if mid * mid > x:\n high = mid\n else:\n low = mid + 1\n return low - 1\n\n # 93.52%\n # 这个当然不行啦\n def mySqrt2(self, x):\n \"\"\"\n :type x: int\n :rtype: int\n \"\"\"\n return int(x ** 0.5)\n\n # 62.20% 93.52%\n def mySqrt(self, x):\n \"\"\"\n :type x: int\n :rtype: int\n \"\"\"\n if x == 1 or x == 0:\n return x\n left, right = 1, x\n while 1:\n mid = left + (right - left) // 2\n if mid > x // mid:\n right = mid - 1\n else:\n if mid + 1 > x // (mid + 1):\n return mid\n left = mid + 1\n\nso = Solution()\nprint(so.mySqrt(8))","sub_path":"Algorithm51-100/69_Sqrt(x).py","file_name":"69_Sqrt(x).py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"99818279","text":"from geosnap import DataStore\nfrom geosnap.io import get_census\nfrom geosnap.analyze import cluster\nfrom geosnap.harmonize import harmonize\n\n\ncolumns = [\"median_household_income\", \"p_poverty_rate\", \"p_unemployment_rate\"]\nreno = get_census(msa_fips=\"39900\", datastore=DataStore())\nreno = harmonize(\n reno,\n intensive_variables=columns,\n target_year=2010,\n allocate_total=True,\n extensive_variables=[\"n_total_pop\"],\n unit_index=\"geoid\",\n).reset_index()\n\nreno, reno_mod = cluster(\n reno,\n columns=columns,\n method=\"kmeans\",\n n_clusters=3,\n unit_index=\"geoid\",\n return_model=True\n)\n\ndef test_single_simulation():\n simulated = reno_mod.predict_markov_labels( base_year=2010, time_steps=1\n )\n assert simulated.shape == (107, 3)\n\n\ndef test_multi_simulation():\n simulated = reno_mod.predict_markov_labels(base_year=2010, time_steps=3, increment=10)\n assert simulated.shape == (428, 4)\n","sub_path":"geosnap/tests/test_forward_sim.py","file_name":"test_forward_sim.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"439989915","text":"import numpy as np\nimport skimage.io\nimport matplotlib.pyplot as plt\nimport skimage.segmentation\n\n# from common import *\nfrom encode import *\n\n\n# https://www.kaggle.com/stkbailey/step-by-step-explanation-of-scoring-metric/notebook\ndef evalute_score(true_masks, pred_masks):\n \"\"\"\n Descripition: .\n \n Args\n ----\n .\n \n Returns\n -------\n .\n \"\"\"\n true_masks = image.masks_merge(true_masks, label = True)\n # pred_masks = image.masks_merge(pred_masks, label = True)\n #\n fig = plt.figure()\n plt.subplot(1,2,1)\n plt.imshow(true_masks)\n plt.title(\"Ground truth masks\")\n plt.subplot(1,2,2)\n plt.imshow(pred_masks)\n plt.title(\"Predict masks\")\n\n #\n true_objects = len(np.unique(true_masks))\n pred_objects = len(np.unique(pred_masks))\n print(\"Number of true objects: \", true_objects)\n print(\"Number of predicted objects: \", pred_objects)\n\n # Compute intersection between all objects\n intersection = np.histogram2d(true_masks.flatten(), pred_masks.flatten(), bins=(true_objects, pred_objects))[0]\n\n # Compute areas (needed for finding the union between all objects)\n area_true = np.histogram(true_masks, bins = true_objects)[0]\n area_pred = np.histogram(pred_masks, bins = pred_objects)[0]\n area_true = np.expand_dims(area_true, -1)\n area_pred = np.expand_dims(area_pred, 0)\n\n # Compute union\n union = area_true + area_pred - intersection\n\n # Exclude background from the analysis\n intersection = intersection[1:,1:]\n union = union[1:,1:]\n union[union == 0] = 1e-9\n\n # Compute the intersection over union\n iou = intersection / union\n\n # Precision helper function\n def precision_at(threshold, iou):\n matches = iou > threshold\n true_positives = np.sum(matches, axis=1) == 1 # Correct objects\n false_positives = np.sum(matches, axis=0) == 0 # Missed objects\n false_negatives = np.sum(matches, axis=1) == 0 # Extra objects\n tp, fp, fn = np.sum(true_positives), np.sum(false_positives), np.sum(false_negatives)\n return tp, fp, fn\n\n # Loop over IoU thresholds\n prec = []\n print(\"Thresh\\tTP\\tFP\\tFN\\tPrec.\")\n for t in np.arange(0.5, 1.0, 0.05):\n tp, fp, fn = precision_at(t, iou)\n p = tp / (tp + fp + fn)\n print(\"{:1.3f}\\t{}\\t{}\\t{}\\t{:1.3f}\".format(t, tp, fp, fn, p))\n prec.append(p)\n print(\"AP\\t-\\t-\\t-\\t{:1.3f}\".format(np.mean(prec)))\n\n plt.show()\n\n \n\n\ndef loU():\n \"\"\"\n Descripition: .\n \n Args\n ----\n .\n \n Returns\n -------\n .\n \"\"\"\n pass","sub_path":"src/utils/evalute.py","file_name":"evalute.py","file_ext":"py","file_size_in_byte":2592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"237451707","text":"import cPickle as pickle\n\nfrom QtNetwork import *\nfrom QtCore import *\n\nclass MessageSocket(QTcpSocket):\n def __init__(self):\n super(MessageSocket, self).__init__()\n self.inBuffer = ''\n self.outBuffer = ''\n self.readyRead.connect(self._onReadyRead)\n self.connected.connect(self._onConnected)\n\n messageReceived = Signal(dict)\n\n @Slot()\n def _onConnected(self):\n super(MessageSocket, self).write(self.outBuffer)\n self.outBuffer = ''\n\n @Slot()\n def _onReadyRead(self):\n fromPos = len(self.inBuffer) + 1\n self.inBuffer = self.inBuffer + str(self.readAll())\n flag = True\n while flag:\n flag = False\n for l in xrange(fromPos, len(self.inBuffer) + 1):\n buf = self.inBuffer[:l]\n try:\n message = pickle.loads(buf)\n self.messageReceived.emit(message)\n self.inBuffer = self.inBuffer[l:]\n fromPos = 1\n flag = True\n break\n except:\n pass\n\n def write(self, data):\n if self.state() != QAbstractSocket.ConnectedState:\n self.outBuffer += data\n else:\n super(MessageSocket, self).write(data)\n\n","sub_path":"network/messagesocket.py","file_name":"messagesocket.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"246192393","text":"# Project Euler\n# Alex Johnson\n# Problem 33: Digit cancelling fractions\n\ndef gcd(a, b):\n if b == 0: return a\n return gcd(b, a % b)\n\ndef rem(string, char):\n for i in range(len(string)):\n if string[i] == char:\n return string[:i] + string[i+1:]\n return string\n\ndef digit_cancel(n, d):\n n_str = str(n)\n d_str = str(d)\n if '0' in n_str + d_str: return (n, d)\n for char in n_str:\n if char in d_str:\n return (int(rem(n_str, char)), int(rem(d_str, char)))\n return (n, d)\n\ndef frac_reduce(n, d):\n g = gcd(n, d)\n return (n//g, d//g)\n\nfracs = []\n\nfor a in range(10, 100):\n for b in range(a + 1, 100):\n if digit_cancel(a, b) != (a, b):\n cancel_a = digit_cancel(a, b)[0]\n cancel_b = digit_cancel(a, b)[1]\n if frac_reduce(cancel_a, cancel_b) == frac_reduce(a, b):\n fracs.append((a, b))\n\nnum = 1\nden = 1\nfor frac in fracs:\n num *= frac[0]\n den *= frac[1]\n\nprint(frac_reduce(num, den)[1])\n","sub_path":"p026-050/p33.py","file_name":"p33.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"530974000","text":"# Algoritmo 2\n\n\ndef main():\n\n opc = int(input(\"Option: \"))\n qtt = int(input(\"Quantity: \"))\n\n total = 0.0\n if opc == 1:\n total = 4.0 * qtt\n elif opc == 2:\n total = 4.5 * qtt\n elif opc == 3:\n total = 5.0 * qtt\n elif opc == 4:\n total = 2.0 * qtt\n else:\n total = 1.5 * qtt\n\n print(\"Total: U$ %.2f\" % total)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"EXERCÍCIOS RESOLVIDOS/python/alternatives/alg2.py","file_name":"alg2.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"64599440","text":"import pandas as pd\nimport numpy as np\nimport plotly.plotly as py\nimport plotly.graph_objs as go\n \n# Import data from csv\ndf = pd.read_csv('results.csv')\ndf.head()\n\ntrace = go.Scatter(\n\tx=df['time'], y=df['usd'], name='testplot'\n\t)\n\nlayout = go.Layout(\n\ttitle='Adaptive ETH Short Analysis',\n\n\txaxis=dict(\n autorange=True,\n showgrid=True,\n zeroline=False,\n showline=False,\n autotick=True,\n ticks='',\n showticklabels=False\n ),\n \t# entry point 1\n\n \t# exit point 1\n\n \t# entry point 2\n\n \t# exit point 2\n \t#height = 600,\n \t#width = 1400\n\t)\n\nrender = go.Figure(data=[trace], layout=layout)\n\n# render and publish to plot.ly\npy.plot(render, filename='eth demo', sharing='public') ","sub_path":"demo/demoPlot.py","file_name":"demoPlot.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"572733424","text":"targets = list(map(int, input().split('|')))\r\ncommand = input()\r\npoints = 0\r\n\r\nwhile not command == 'Game over':\r\n data = command.split()\r\n type = data[0]\r\n if type == 'Shoot':\r\n direction, start_index, length = data[1].split('@')\r\n start_index = int(start_index)\r\n length = int(length)\r\n if start_index in range(len(targets)):\r\n if direction == 'Left':\r\n index = (start_index - length) % len(targets)\r\n else:\r\n index = (start_index + length) % len(targets)\r\n if targets[index] < 5:\r\n points += targets[index]\r\n targets[index] = 0\r\n else:\r\n points += 5\r\n targets[index] -= 5\r\n\r\n elif type == 'Reverse':\r\n targets = targets[::-1]\r\n\r\n command = input()\r\n\r\nprint(' - '.join(map(str, targets)))\r\nprint(f\"Iskren finished the archery tournament with {points} points!\")\r\n","sub_path":"exams/archery_tournament.py","file_name":"archery_tournament.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"83269130","text":"import matplotlib\n\nmatplotlib.use(\"Qt5Agg\")\n\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.figure import Figure\nimport matplotlib.pyplot as plt\nfrom model import list\nimport pandas as pd\n\n\nclass MyFigure(FigureCanvas):\n\n def __init__(self, width=5, height=4, dpi=100):\n self.fig = Figure(figsize=(width, height), dpi=dpi)\n super(MyFigure, self).__init__(self.fig)\n self.axes = self.fig.add_subplot(111)\n plt.rcParams['font.sans-serif'] = ['SimHei']\n plt.rcParams['axes.unicode_minus'] = False\n\n def draw0(self):\n pro_type = [\"单选题\", \"判断题\"]\n bar = []\n\n for type in pro_type:\n time, correct = list.problem_record[type].getTimes()\n if time == 0:\n rate = 0\n else:\n rate = correct / time\n\n x = [type]\n y = [rate]\n bar.append(self.axes.bar(x, y, alpha=0.5, width=0.3, color='yellow', edgecolor='red', label=type, lw=3))\n\n self.axes.set_ylim([0, 1])\n for bar_container in bar:\n for b in bar_container:\n height = b.get_height()\n self.axes.text(b.get_x() + b.get_width() / 2, b.get_height() + 0.01, '%.2f' % height, ha='center',\n va='bottom')\n\n def draw1(self):\n pro_type = [\"单选题\", \"判断题\"]\n bar = []\n\n for type in pro_type:\n time, correct = list.current_record[type][0], list.current_record[type][1]\n if time == 0:\n rate = 0\n else:\n rate = correct / time\n\n x = [type]\n y = [rate]\n bar.append(self.axes.bar(x, y, alpha=0.5, width=0.3, color='blue', edgecolor='green', label=type, lw=3))\n\n self.axes.set_ylim([0, 1])\n for bar_container in bar:\n for b in bar_container:\n height = b.get_height()\n self.axes.text(b.get_x() + b.get_width() / 2, b.get_height() + 0.01, '%.2f' % height, ha='center',\n va='bottom')\n\n def draw2(self, name):\n data_path = \"./data/user/\"\n current_path = data_path + str(name)\n current_excel = current_path + \"/log.xlsx\"\n\n df = pd.read_excel(current_excel)\n cnt = df.shape[0]\n x = []\n y = []\n bar = []\n\n for i in range(max(0, cnt - 4), cnt):\n line = df.loc[i].values\n x.append(line[1])\n y.append(line[8] / line[7])\n\n bar.append(self.axes.bar(x, y, alpha=0.5, width=0.3, color='blue', edgecolor='green', lw=3))\n self.axes.set_ylim([0, 1])\n for bar_container in bar:\n for b in bar_container:\n height = b.get_height()\n self.axes.text(b.get_x() + b.get_width() / 2, b.get_height() + 0.01, '%.2f' % height, ha='center',\n va='bottom')\n\n def draw3(self, name):\n data_path = \"./data/user/\"\n current_path = data_path + str(name)\n current_excel = current_path + \"/log.xlsx\"\n\n df = pd.read_excel(current_excel)\n cnt = df.shape[0]\n x = []\n y = []\n plot = []\n\n for i in range(max(0, cnt - 4), cnt):\n line = df.loc[i].values\n x.append(line[1])\n y.append(line[6] + line[7])\n\n plot.append(self.axes.plot(x, y))\n\n def draw4(self, name):\n data_path = \"./data/user/\"\n current_path = data_path + str(name)\n current_excel = current_path + \"/log.xlsx\"\n\n labels = ['单选题', '判断题', '简答题']\n tot = 0\n tot_singe = 0\n tot_judge = 0\n tot_easy = 0\n data = pd.read_excel(current_excel)\n for i in range(data.shape[0]):\n line = data.loc[i].values\n tot_singe += line[2]\n tot_judge += line[4]\n tot_easy += line[6]\n tot += line[2] + line[4] + line[6]\n\n size = [tot_singe, tot_judge, tot_easy]\n explode = (0.1, 0.1, 0.1)\n self.axes.pie(size, explode, labels=labels, autopct='%1.1f%%')\n","sub_path":"model/figure.py","file_name":"figure.py","file_ext":"py","file_size_in_byte":4142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"382648813","text":"from msvcrt import getch\n\n\nclass SaveMenuOldFashioned:\n @staticmethod\n def save_menu(select, question):\n while True:\n if select == 1:\n print(\"\\r\" + question + \" yes\"+ \" --> no \", end='')\n elif select == 2:\n print(\"\\r\" + question + \" --> yes\" + \" no\", end='')\n\n button = ord(getch())\n if button == 224:\n select += 1\n if select == 3:\n select = 1\n elif button == 13:\n if select == 1:\n return False\n elif select == 2:\n return True\n","sub_path":"save_menu_old.py","file_name":"save_menu_old.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"144386262","text":"from django.urls import path\nfrom . import views\nfrom .views import CreateWorker \n\nurlpatterns = [\n path('', views.workers_overview, name='workers_overview'),\n path('najdi/', views.workers_overview_search, name='workers_overview_search'),\n path('/', views.worker_details, name='worker_details'),\n path('/izbrisi/', views.delete_worker, name='delete_worker'),\n path('novdelavec/', CreateWorker.as_view(), name='create_worker'),\n path('/uredi/', views.edit_worker_info, name=\"edit_worker_info\"),\n path('test/', views.test),\n path('/dodajdelavca/', views.project_assign_worker),\n path('odstranidelavca//', views.unassign_worker),\n]","sub_path":"workers/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"225275911","text":"from collections import deque\n\nh, w = map(int, input().split())\ns = [list(input()) for _ in range(h)]\nwhite_count = 0\nfor i in range(h):\n for j in range(w):\n if s[i][j] == '.':\n white_count += 1\n\nd = [[float(\"inf\")] * w for _ in range(h)]\n\nque = deque()\nque.append((0, 0))\nd[0][0] = 1\nwhile que:\n x, y = que.popleft()\n for dx, dy in [(1, 0), (-1, 0), (0, 1), (0, -1)]:\n nx = x + dx\n ny = y + dy\n if 0 <= nx < w and 0 <= ny < h and s[ny][nx] == '.' and d[ny][nx] == float(\"inf\"):\n que.append((nx, ny))\n d[ny][nx] = d[y][x] + 1\n\nres = d[h-1][w-1]\nif res == float(\"inf\"):\n print(-1)\nelse:\n print(white_count - res)\n","sub_path":"Python_codes/p03436/s159186197.py","file_name":"s159186197.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"477028007","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 26 19:57:21 2016\n\n@author: lshu0\n\"\"\"\n\nimport pandas as pd\npath='C:/Users/lshu0/Documents/apparel tracker uploader/Store List and Item List Summary- Kmart.xlsx'\nxls = pd.ExcelFile(path)\nsheets=xls.sheet_names\n\nitems=[]\nstores=[]\n\nfor sheet in sheets:\n df=xls.parse(sheet)\n test_nm=sheet.split('-')[0].strip()\n cname=[i.split('.')[0].split(' ')[1] for i in df.columns]\n df.columns=[cname,df.iloc[0]]\n df.columns.names=['week_no','info']\n df=df.drop(0)\n df2=df.stack('week_no')\n df2.reset_index(level=1, inplace=True)\n \n store_list = df2[['week_no','Test Store List']]\n store_list.dropna(subset = ['Test Store List'],inplace=True)\n store_list['test_nm'] = test_nm\n \n item_list=df2[['week_no','Div','Item']]\n item_list.dropna(subset = ['Div','Item'], how='all',inplace=True)\n item_list['test_nm'] = test_nm\n \n items.append(item_list)\n stores.append(store_list)\n \nall_items=pd.concat(items)\nall_items.drop_duplicates(inplace=True)\nall_stores=pd.concat(stores)\nall_stores.drop_duplicates(inplace=True)\n\nall_items.to_csv('C:/Users/lshu0/Documents/apparel tracker uploader/all_item_kmart.txt', header=None, index=None, sep=',')\nall_stores.to_csv('C:/Users/lshu0/Documents/apparel tracker uploader/all_stores_kmart.txt',header=None, index=None, sep=',')","sub_path":"1- Tracker to Teradata/test tracker_Kmart.py","file_name":"test tracker_Kmart.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"621135331","text":"from django.shortcuts import render, redirect\nfrom django.views.decorators.http import require_http_methods\nfrom django.contrib.auth.forms import UserCreationForm, AuthenticationForm \n\nfrom django.contrib.auth import login as auth_login, logout as auth_logout\n# 알아서 로그인 해주는 함수 / 이름이 겹치니깐 다르게 부르겠다 (그렇게 안하면 함수에서 재귀함수가 되어버린다. ) \n\n\n@require_http_methods(['GET', 'POST'])\ndef signup(request): # new user\n if request.user.is_authenticated: # 로그인 했으면 signup 못하게 만들기 위해 막아버리기 \n return redirect('sns:posting_list')\n\n # 사용자가 회원가입할 데이터를 보냈다는 뜻 \n if request.method == 'POST': \n form = UserCreationForm(request.POST) # 사용자가 데이터를 넣어 놓은 시험지 / 회원가입하는 form \n if form.is_valid(): # 채점 \n user = form.save()\n return redirect('sns:posting_list')\n # else:\n # return render(request, 'accounts/signup.html', {\n # 'form': form, # 실패된 form 이다. / 망한 시험지 \n # })\n \n else: # 사용자가 회원가입 HTML 을 달라는 뜻 \n form = UserCreationForm() # 시험지 인데 아직 답을 입력하지 않은 셤이다. 새시험지 \n\n return render(request, 'accounts/signup.html', {\n 'form': form, # 새로운 시험지가 나온다. \n })\n\n\n@require_http_methods(['GET', 'POST'])\ndef login(request):\n if request.user.is_authenticated: # 사용자가 로그인한 상태라면, 무시\n return redirect('sns:posting_list') \n\n if request.method == 'POST':\n form = AuthenticationForm(request, request.POST) # 사용자 인증하는 form / 이 form 만 데이터 받아오는 방식이 좀 다르다. \n if form.is_valid():\n\n # 쿠키와 세션을 한꺼번에 세팅해준다. \n auth_login(request, form.get_user()) # form.get_user : form 검증을 통과한 사용자를 꺼내오겠다. == user \n return redirect('sns:posting_list') \n\n # 쿠키세팅\n # response = redirect('sns:posting_list')\n # response.set_cookie(key='nickname', value='idot', max_age=5) # return 이 없다. 원본을 바꿀수 있다. # 쿠키세팅 => 닉네임을 설정했고 5초가 지나면 사라진다. \n # return response\n \n else:\n form = AuthenticationForm()\n return render(request, 'accounts/login.html', {\n 'form': form,\n })\n\n\ndef logout(request):\n auth_logout(request)\n return redirect('sns:posting_list')\n","sub_path":"06_django_advance/03_IMAGE_UPLOAD/accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"482618343","text":"nos = int(input())\nsteps = [int(x) for x in input().split()]\nnosw = 0\nindx = [i for i,v in enumerate(steps) if v == min(steps)] + [nos]\n\nprint(len(indx) - 1)\nfor i in range(1, len(indx)):\n print(indx[i] - indx[i-1], end=\" \")\n\n\n\n\n","sub_path":"CodeForces/496_3_A_nos_stairways.py","file_name":"496_3_A_nos_stairways.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"651877822","text":"#!/usr/bin/env python\n\nimport asyncio\nimport logging\nimport shutil\nimport tempfile\nimport threading\nimport time\nimport traceback\n\nfrom typing import TYPE_CHECKING\nfrom aiohttp import web\n\nfrom opentrons.config import CONFIG\nfrom .rpc import RPCServer\nfrom .http import HTTPServer\nfrom opentrons.api.routers import MainRouter\nfrom opentrons.protocol_api.legacy_wrapper import api\n\nif TYPE_CHECKING:\n from opentrons.hardware_control.types import HardwareAPILike # noqa(F501)\n\nlog = logging.getLogger(__name__)\n\n\n@web.middleware\nasync def error_middleware(request, handler):\n try:\n response = await handler(request)\n except web.HTTPNotFound:\n log.exception(\"Exception handler for request {}\".format(request))\n data = {\n 'message': 'File was not found at {}'.format(request)\n }\n response = web.json_response(data, status=404)\n except Exception as e:\n log.exception(\"Exception in handler for request {}\".format(request))\n data = {\n 'message': 'An unexpected error occured - {}'.format(e),\n 'traceback': traceback.format_exc()\n }\n response = web.json_response(data, status=500)\n\n return response\n\n\nclass ThreadedAsyncLock:\n \"\"\" A thread-safe async lock\n\n This is required to properly lock access to motion calls, which are\n a) done in async contexts (rpc methods and http methods) and should\n block as little as possible\n b) done from several different threads (rpc workers and main thread)\n\n This is a code wart that needs to be removed. It can be removed by\n - making smoothie async so we don't need worker threads anymore\n - removing said threads\n\n This object can be used as either an asynchronous context manager using\n ``async with`` or a synchronous context manager using ``with``.\n \"\"\"\n\n def __init__(self):\n self._thread_lock = threading.RLock()\n\n async def __aenter__(self):\n pref = f\"[ThreadedAsyncLock tid {threading.get_ident()} \"\\\n f\"task {asyncio.Task.current_task()}] \"\n log.debug(pref + 'will acquire')\n then = time.perf_counter()\n while not self._thread_lock.acquire(blocking=False):\n await asyncio.sleep(0.1)\n now = time.perf_counter()\n log.debug(pref + f'acquired in {now-then}s')\n\n async def __aexit__(self, exc_type, exc, tb):\n log.debug(f\"[ThreadedAsyncLock tid {threading.get_ident()} \"\n f\"task {asyncio.Task.current_task()}] will release\")\n self._thread_lock.release()\n\n def __enter__(self):\n self._thread_lock.acquire()\n\n def __exit__(self, exc_type, exc, tb):\n self._thread_lock.release()\n\n\n# Support for running using aiohttp CLI.\n# See: https://docs.aiohttp.org/en/stable/web.html#command-line-interface-cli\ndef init(hardware: 'HardwareAPILike' = None,\n loop: asyncio.AbstractEventLoop = None):\n \"\"\"\n Builds an application and sets up RPC and HTTP servers with it.\n\n :param loop: A specific aiohttp event loop to use. If not specified, the\n server will use the default event loop.\n :param hardware: The hardware manager or hardware adapter to connect to.\n If not specified, the server will use\n :py:attr:`opentrons.hardware`\n \"\"\"\n # Try to migrate containers from database to v2 format\n api.maybe_migrate_containers()\n app = web.Application(middlewares=[error_middleware])\n app['com.opentrons.hardware'] = hardware\n app['com.opentrons.motion_lock'] = ThreadedAsyncLock()\n app['com.opentrons.rpc'] = RPCServer(\n app, MainRouter(\n hardware, lock=app['com.opentrons.motion_lock'], loop=loop))\n app['com.opentrons.response_file_tempdir'] = tempfile.mkdtemp()\n app['com.opentrons.http'] = HTTPServer(app, CONFIG['log_dir'])\n\n async def dispose_response_file_tempdir(app):\n temppath = app.get('com.opentrons.response_file_tempdir')\n if temppath:\n try:\n shutil.rmtree(temppath)\n except Exception:\n log.exception(f\"failed to remove app temp path {temppath}\")\n\n app.on_shutdown.append(dispose_response_file_tempdir)\n app.on_shutdown.freeze()\n return app\n\n\ndef run(hardware: 'HardwareAPILike',\n hostname=None,\n port=None,\n path=None,\n loop=None):\n \"\"\"\n The arguments are not all optional. Either a path or hostname+port should\n be specified; you have to specify one.\n \"\"\"\n if path:\n log.debug(\"Starting Opentrons server application on {}\".format(\n path))\n hostname, port = None, None\n else:\n log.debug(\"Starting Opentrons server application on {}:{}\".format(\n hostname, port))\n path = None\n\n web.run_app(init(hardware=hardware), host=hostname, port=port, path=path)\n","sub_path":"api/src/opentrons/server/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"471860316","text":"# -*- coding: utf-8 -*-\n\nimport unittest\nimport os\nimport time\nimport hashlib\nimport hmac\n\nfrom algoliasearch import algoliasearch\n\n\ndef safe_index_name(name):\n if 'TRAVIS' not in os.environ:\n return name\n job = os.environ['TRAVIS_JOB_NUMBER']\n return '%s_travis-%s' % (name, job)\n\n\nclass ClientTest(unittest.TestCase):\n def setUp(self):\n try:\n self.name = unichr(224) + 'lgol?' + unichr(224) + '-python'\n self.name2 = unichr(224) + 'lgol?' + unichr(224) + '2-python'\n self.name_obj = unichr(224) + '/go/?' + unichr(224) + '2-python'\n except Exception:\n self.name = 'àlgol?à-python'\n self.name2 = 'àlgol?à2-python'\n self.name_obj = 'à/go/?à2-python'\n\n self.client = algoliasearch.Client(\n os.environ['ALGOLIA_APPLICATION_ID'],\n os.environ['ALGOLIA_API_KEY'])\n index_name = safe_index_name(self.name)\n try:\n self.client.delete_index(index_name)\n except algoliasearch.AlgoliaException:\n pass\n self.index = self.client.init_index(index_name)\n\n def tearDown(self):\n index_name = safe_index_name(self.name)\n try:\n self.client.delete_index(index_name)\n except algoliasearch.AlgoliaException:\n pass\n index_name2 = safe_index_name(self.name2)\n try:\n self.client.delete_index(index_name2)\n except algoliasearch.AlgoliaException:\n pass\n\n def test_secured_keys(self):\n self.assertEquals(\n '1fd74b206c64fb49fdcd7a5f3004356cd3bdc9d9aba8733656443e64daafc417',\n hmac.new('my_api_key'.encode('utf-8'), '(public,user1)'.encode(\n 'utf-8'), hashlib.sha256).hexdigest())\n key = self.client.generate_secured_api_key('my_api_key',\n '(public,user1)')\n self.assertEquals(key, hmac.new('my_api_key'.encode('utf-8'),\n '(public,user1)'.encode('utf-8'),\n hashlib.sha256).hexdigest())\n key = self.client.generate_secured_api_key('my_api_key',\n '(public,user1)', 42)\n self.assertEquals(key, hmac.new('my_api_key'.encode('utf-8'),\n '(public,user1)42'.encode('utf-8'),\n hashlib.sha256).hexdigest())\n key = self.client.generate_secured_api_key('my_api_key', ['public'])\n self.assertEquals(key, hmac.new('my_api_key'.encode(\n 'utf-8'), 'public'.encode('utf-8'), hashlib.sha256).hexdigest())\n key = self.client.generate_secured_api_key(\n 'my_api_key', ['public', ['premium', 'vip']])\n self.assertEquals(key, hmac.new('my_api_key'.encode('utf-8'),\n 'public,(premium,vip)'.encode('utf-8'),\n hashlib.sha256).hexdigest())\n\n def test_disjunctive_faceting(self):\n self.index.set_settings(\n {'attributesForFacetting': ['city', 'stars', 'facilities']})\n task = self.index.add_objects([{\n 'name': 'Hotel A',\n 'stars': '*',\n 'facilities': ['wifi', 'bath', 'spa'],\n 'city': 'Paris'\n }, {\n 'name': 'Hotel B',\n 'stars': '*',\n 'facilities': ['wifi'],\n 'city': 'Paris'\n }, {\n 'name': 'Hotel C',\n 'stars': '**',\n 'facilities': ['bath'],\n 'city': 'San Francisco'\n }, {\n 'name': 'Hotel D',\n 'stars': '****',\n 'facilities': ['spa'],\n 'city': 'Paris'\n }, {\n 'name': 'Hotel E',\n 'stars': '****',\n 'facilities': ['spa'],\n 'city': 'New York'\n }, ])\n self.index.wait_task(task['taskID'])\n\n answer = self.index.search_disjunctive_faceting(\n 'h', ['stars', 'facilities'], {'facets': 'city'})\n self.assertEquals(answer['nbHits'], 5)\n self.assertEquals(len(answer['facets']), 1)\n self.assertEquals(len(answer['disjunctiveFacets']), 2)\n\n answer = self.index.search_disjunctive_faceting('h', [\n 'stars', 'facilities'\n ], {'facets': 'city'}, {'stars': ['*']})\n self.assertEquals(answer['nbHits'], 2)\n self.assertEquals(len(answer['facets']), 1)\n self.assertEquals(len(answer['disjunctiveFacets']), 2)\n self.assertEquals(answer['disjunctiveFacets']['stars']['*'], 2)\n self.assertEquals(answer['disjunctiveFacets']['stars']['**'], 1)\n self.assertEquals(answer['disjunctiveFacets']['stars']['****'], 2)\n\n answer = self.index.search_disjunctive_faceting('h', [\n 'stars', 'facilities'\n ], {'facets': 'city'}, {'stars': ['*'],\n 'city': ['Paris']})\n self.assertEquals(answer['nbHits'], 2)\n self.assertEquals(len(answer['facets']), 1)\n self.assertEquals(len(answer['disjunctiveFacets']), 2)\n self.assertEquals(answer['disjunctiveFacets']['stars']['*'], 2)\n self.assertEquals(answer['disjunctiveFacets']['stars']['****'], 1)\n\n answer = self.index.search_disjunctive_faceting('h', [\n 'stars', 'facilities'\n ], {'facets': 'city'}, {'stars': ['*', '****'],\n 'city': ['Paris']})\n self.assertEquals(answer['nbHits'], 3)\n self.assertEquals(len(answer['facets']), 1)\n self.assertEquals(len(answer['disjunctiveFacets']), 2)\n self.assertEquals(answer['disjunctiveFacets']['stars']['*'], 2)\n self.assertEquals(answer['disjunctiveFacets']['stars']['****'], 1)\n","sub_path":"tests/test_old.py","file_name":"test_old.py","file_ext":"py","file_size_in_byte":5784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"362559231","text":"# Define a variable, sum, to keep track of the total sum of numbers that match the given criteria found so far\nsum = 0\n\n# Check each number between 1 and 1000 to see if it matches the given criteria\nfor i in range(1,1000):\n if i%3 == 0 or i%5 == 0:\n sum += i\nprint(sum)\n\n# Returns 233168","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"421822864","text":"from scannerpy import Database, Job, BulkJob, ColumnType, DeviceType\nimport os.path\n\n################################################################################\n# This tutorial shows you how to write and use your own Python custom op. #\n################################################################################\n\nwith Database() as db:\n\n # Custom kernels have to be registered with the Scanner runtime, providing their\n # name and input/output types as well as op argument paths.\n cwd = os.path.dirname(os.path.abspath(__file__))\n db.register_op(\n 'MyResize', [('frame', ColumnType.Video)],\n [('resized', ColumnType.Video)],\n proto_path='./resize_pb2.py')\n\n # Custom Python kernels for ops reside in a separate file, here resize_kernel.py.\n db.register_python_kernel('MyResize', DeviceType.CPU,\n cwd + '/resize_kernel.py')\n\n frame = db.ops.FrameInput()\n # Then we use our op just like in the other examples.\n resize = db.ops.MyResize(frame=frame, width=200, height=300)\n output_op = db.ops.Output(columns=[resize])\n job = Job(op_args={\n frame: db.table('example').column('frame'),\n output_op: 'example_resized',\n })\n bulk_job = BulkJob(output=output_op, jobs=[job])\n db.run(bulk_job, force=True)\n","sub_path":"examples/tutorial/05_custom_python_op.py","file_name":"05_custom_python_op.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"602073970","text":"import numpy as np\nimport pytest\n\nfrom growth import grow\n\n\n@pytest.fixture\ndef test_settings():\n return grow.COOL_SETTINGS[\"snake\"]\n\n\n@pytest.mark.parametrize(\"n_return\", [1, 2, 3, 4, 5])\ndef test_run_reaction_diffusion(test_settings, n_return):\n \"\"\"Make sure the function actually returns the right number of snapshots\"\"\"\n\n results = grow.run_reaction_diffusion(n=10, n_to_return=n_return, **test_settings)\n\n assert len(results) == n_return\n\n\n@pytest.mark.parametrize(\"grid_size\", [(100, 100), (100, 200), (200, 100)])\ndef test_run_reaction_diffusion_grid_size(test_settings, grid_size):\n\n results = grow.run_reaction_diffusion(\n n=10, grid_size=grid_size, n_to_return=1, **test_settings\n )\n assert results[0][0].shape == grid_size\n\n\ndef test_run_reaction_diffusion_raises_grid_size(test_settings):\n\n mask = np.zeros((10, 11))\n with pytest.raises(AssertionError, match=\"The mask size and grid size don't match\"):\n grow.run_reaction_diffusion(\n n=10, grid_size=(10, 10), mask=mask, n_to_return=1, **test_settings\n )\n\n\ndef test_run_reaction_diffusion_raises_param_length(test_settings):\n\n test_settings.update(dict(dA=np.random.random(11)))\n\n with pytest.raises(\n AssertionError, match=\"the wrong dimensions. Should be of length 10\"\n ):\n grow.run_reaction_diffusion(\n n=10, grid_size=(10, 10), n_to_return=1, **test_settings\n )\n","sub_path":"tests/test_grow.py","file_name":"test_grow.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"55159995","text":"def main():\n argument_spec = vmware_argument_spec()\n argument_spec.update(name=dict(type='str'), uuid=dict(type='str'), moid=dict(type='str'), folder=dict(type='str'), datacenter=dict(type='str', default='ha-datacenter'), export_dir=dict(type='path', required=True), export_with_images=dict(type='bool', default=False), download_timeout=dict(type='int', default=10))\n module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_one_of=[['name', 'uuid', 'moid']])\n pyv = VMwareExportVmOvf(module)\n vm = pyv.get_vm()\n if vm:\n vm_facts = pyv.gather_facts(vm)\n vm_power_state = vm_facts['hw_power_status'].lower()\n if (vm_power_state != 'poweredoff'):\n module.fail_json(msg='VM state should be poweredoff to export')\n results = pyv.export_to_ovf_files(vm_obj=vm)\n else:\n module.fail_json(msg='The specified virtual machine not found')\n module.exit_json(**results)","sub_path":"Data Set/bug-fixing-5/d5bff7a87f0d257cc71310e3b3441f1b7628d3ce-
-fix.py","file_name":"d5bff7a87f0d257cc71310e3b3441f1b7628d3ce-
-fix.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"176978933","text":"from contract import ArchiveDocContractService\n\nOWNER_ADDRESS = ''\nOWNER_PRI_KEY = ''\n\nSTAFF_ADDRESS = ''\nSTAFF_PRI_KEY = ''\n\nTEST_HASH = ''\nTEST_HASH_LIST = ['',\n '',\n '']\n\n\ndef test_is_owner():\n is_owner = ArchiveDocContractService.is_owner(OWNER_ADDRESS)\n assert is_owner\n print(f'{OWNER_ADDRESS} is owner')\n\n is_owner = ArchiveDocContractService.is_owner(STAFF_ADDRESS)\n assert not is_owner\n print(f'{STAFF_ADDRESS} is not owner')\n\n\ndef test_add_staff():\n tx_id = ArchiveDocContractService.add_staff(OWNER_PRI_KEY,\n STAFF_ADDRESS)\n\n if tx_id is None:\n print('ArchiveDocContractService.add_staff fail')\n return\n else:\n print(f'ArchiveDocContractService.add_staff success, tx_id={tx_id}')\n\n # 1.wait transaction success\n # 2.execute ArchiveDocContractService.is_staff\n # is_staff = ArchiveDocContractService.is_staff(STAFF_ADDRESS)\n # assert is_staff\n\n\ndef test_remove_staff():\n tx_id = ArchiveDocContractService.remove_staff(OWNER_PRI_KEY,\n STAFF_ADDRESS)\n\n if tx_id is None:\n print('ArchiveDocContractService.add_staff fail')\n return\n else:\n print(f'ArchiveDocContractService.add_staff success, tx_id={tx_id}')\n\n # 1.wait transaction success\n # 2.execute ArchiveDocContractService.is_staff\n # is_staff = ArchiveDocContractService.is_staff(STAFF_ADDRESS)\n # assert not is_staff\n\n\ndef test_is_staff():\n is_staff = ArchiveDocContractService.is_staff(OWNER_ADDRESS)\n print(f'{OWNER_ADDRESS} = {is_staff}')\n\n is_staff = ArchiveDocContractService.is_staff(STAFF_ADDRESS)\n print(f'{STAFF_ADDRESS} = {is_staff}')\n\n\ndef test_add_hash():\n tx_id = ArchiveDocContractService.add_hash(OWNER_PRI_KEY,\n TEST_HASH)\n if tx_id is None:\n print('ArchiveDocContractService.add_hash fail')\n return\n else:\n print(f'ArchiveDocContractService.add_hash success, tx_id={tx_id}')\n\n\ndef test_add_multiple_hash():\n tx_id = ArchiveDocContractService.add_multiple_hash(OWNER_PRI_KEY,\n TEST_HASH_LIST)\n if tx_id is None:\n print('ArchiveDocContractService.add_multiple_hash fail')\n return\n else:\n print(f'ArchiveDocContractService.add_multiple_hash success, tx_id={tx_id}')\n\n\ndef test_is_exist():\n is_exist = ArchiveDocContractService.is_exist('')\n print(f'{TEST_HASH} : {is_exist}')\n\n\nif __name__ == '__main__':\n test_add_multiple_hash()\n","sub_path":"Python/blockchain/ArchiveDocContract/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":2624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"242170787","text":"# Copyright (c) 2020 Aiven, Helsinki, Finland. https://aiven.io/\n\nimport base64\nimport functools\nimport hashlib\nimport json\nimport logging\nimport os\nimport threading\nfrom concurrent.futures.thread import ThreadPoolExecutor\nfrom datetime import datetime\nfrom tempfile import TemporaryDirectory, NamedTemporaryFile\nfrom typing import Collection, Union, BinaryIO, Dict, Iterable\nfrom urllib.parse import urlparse\n\nimport boto3\nimport botocore.exceptions\nimport time\n\nfrom rpm_s3_mirror.repository import RepodataSection, Package\nfrom rpm_s3_mirror.statsd import StatsClient\nfrom rpm_s3_mirror.util import get_requests_session, validate_checksum, download_file\n\nlock = threading.RLock()\n\n\ndef md5_string(string):\n return hashlib.md5(string.encode(\"utf-8\")).hexdigest()\n\n\nclass S3DirectoryNotFound(Exception):\n def __init__(self, response):\n super().__init__()\n self.response = response\n\n\nclass S3:\n def __init__(\n self,\n aws_access_key_id: str,\n aws_secret_access_key: str,\n bucket_name: str,\n bucket_region: str,\n stats: StatsClient,\n max_workers: int = 8,\n scratch_dir: str = \"/var/tmp/\",\n ):\n self.aws_access_key_id = aws_access_key_id\n self.aws_secret_access_key = aws_secret_access_key\n self.bucket_name = bucket_name\n self.bucket_region = bucket_region\n self.stats = stats\n self.max_workers = max_workers\n self.scratch_dir = scratch_dir\n self._s3 = None\n self.session = get_requests_session()\n self.log = logging.getLogger(type(self).__name__)\n\n def sync_packages(\n self,\n base_url: str,\n upstream_repodata: Dict[str, RepodataSection],\n upstream_packages: Collection[Package],\n skip_existing: bool = False,\n ):\n with TemporaryDirectory(prefix=self.scratch_dir) as temp_dir:\n self._sync_objects(temp_dir, upstream_packages, skip_existing=skip_existing)\n synced_bytes = sum((package.package_size for package in upstream_packages))\n self.stats.gauge(\n metric=\"s3_mirror_sync_bytes\",\n value=synced_bytes,\n tags={\"repo\": urlparse(base_url).path},\n )\n self.stats.gauge(\n metric=\"s3_mirror_sync_packages\",\n value=len(upstream_packages),\n tags={\"repo\": urlparse(base_url).path},\n )\n self._sync_objects(temp_dir=temp_dir, repo_objects=upstream_repodata.values(), skip_existing=skip_existing)\n\n def overwrite_repomd(self, base_url):\n with TemporaryDirectory(prefix=self.scratch_dir) as temp_dir:\n url = f\"{base_url}repodata/repomd.xml\"\n repomd_xml = download_file(temp_dir=temp_dir, url=url, session=self.session)\n path = urlparse(url).path\n self.log.info(\"Overwriting repomd.xml\")\n self.put_object(repomd_xml, path, cache_age=0)\n\n def archive_repomd(self, base_url, location):\n self.log.debug(\"Archiving repomd.xml to %s\", location)\n url = f\"{base_url}repodata/repomd.xml\"\n self.copy_object(source=urlparse(url).path, destination=location)\n\n def put_manifest(self, location, manifest):\n self.log.info(\"Writing manifest to: %s\", location)\n manifest_json = json.dumps(manifest._asdict(), default=lambda x: x.isoformat(), indent=2)\n with NamedTemporaryFile(prefix=self.scratch_dir) as f:\n f.write(manifest_json.encode(\"utf-8\"))\n f.flush()\n self.put_object(local_path=f.name, key=location)\n\n def repomd_update_time(self, base_url: str) -> datetime:\n url = f\"{base_url}repodata/repomd.xml\"\n response = self._head_object(key=self._trim_key(remote_path=urlparse(url).path))\n return response[\"LastModified\"]\n\n def _sync_objects(self, temp_dir: str, repo_objects: Iterable[Package], skip_existing: bool):\n sync = functools.partial(self._sync_object, temp_dir, skip_existing)\n start = time.time()\n self.log.info(\"Beginning sync of %s objects.\", len(repo_objects))\n with ThreadPoolExecutor(max_workers=self.max_workers) as executor:\n # We iterate through the generator to pick up and propagate any Exceptions\n for _ in executor.map(sync, repo_objects):\n pass\n elapsed = int(time.time() - start)\n self.log.info(\"Completed syncing %s objects in %s seconds\", len(repo_objects), elapsed)\n\n # pylint: disable=unsubscriptable-object\n def _sync_object(self, temp_dir: str, skip_existing: bool, repo_object: Union[Package, RepodataSection]):\n # When bootstrapping, support backfilling two versions of problematic packages (see below)\n workaround_destination = repo_object.destination.replace(\"+\", \" \")\n if skip_existing:\n if (\"+\" in repo_object.destination and self._object_exists(workaround_destination)) \\\n and self._object_exists(repo_object.destination):\n self.log.debug(\"SKIP: %s\", repo_object.destination)\n return\n\n package_path = download_file(temp_dir=temp_dir, url=repo_object.url, session=self.session)\n validate_checksum(package_path, checksum_type=repo_object.checksum_type, checksum=repo_object.checksum)\n self.put_object(package_path, repo_object.destination)\n if \"+\" in repo_object.destination:\n # Old versions of DNF did not urlencode plus signs in urls, and s3 always does\n # so we need to upload two versions of these packages, one with the + sign unmodified\n # for newer versions of DNF, and one with the + sign replaced with a space for older\n # versions as s3 interprets a space as a + sign.\n # https://bugzilla.redhat.com/show_bug.cgi?id=1817130\n # https://forums.aws.amazon.com/thread.jspa?threadID=55746\n self.log.debug(\n \"Uploading workaround version of package: %s -> %s\", repo_object.destination, workaround_destination\n )\n self.put_object(package_path, key=workaround_destination)\n try:\n os.unlink(package_path)\n except Exception as e: # pylint: disable=broad-except\n self.log.debug(\"Failed to unlink %s: %s\", package_path, e)\n\n def put_object(self, local_path: str, key: str, cache_age=31536000):\n with open(local_path, \"rb\") as package_fp:\n # We need to seek after this call so boto gets the file pointer at the beginning\n md5_header = self._build_md5_header(fp=package_fp)\n package_fp.seek(0)\n\n key = self._trim_key(key)\n self.log.debug(\"PUT: %s\", key)\n self._client.put_object(\n ACL=\"public-read\",\n Bucket=self.bucket_name,\n CacheControl=f\"max-age={cache_age}\",\n Key=key,\n Body=package_fp,\n ContentMD5=md5_header\n )\n\n def delete_subdirectory(self, subdir):\n objects = []\n for s3_object in self.list(subdir):\n objects.append({\"Key\": s3_object[\"Key\"]})\n self._client.delete_objects(Bucket=self.bucket_name, Delete={\"Objects\": objects, \"Quiet\": True})\n\n def exists(self, prefix):\n try:\n self.list(prefix)\n except S3DirectoryNotFound:\n return False\n return True\n\n def list(self, prefix):\n response = self._client.list_objects_v2(Bucket=self.bucket_name, Prefix=prefix)\n if response.get(\"KeyCount\", 0) == 0:\n raise S3DirectoryNotFound(response=response)\n return response[\"Contents\"]\n\n def copy_object(self, source, destination):\n source, destination = self._trim_key(source), self._trim_key(destination)\n self.log.debug(\"COPY: %s -> %s\", source, destination)\n self._client.copy_object(\n Bucket=self.bucket_name,\n CopySource={\n \"Bucket\": self.bucket_name,\n \"Key\": source,\n },\n ACL=\"public-read\",\n Key=destination,\n CacheControl=\"max-age=0\"\n )\n\n def _object_exists(self, key: str) -> bool:\n try:\n self._head_object(key=self._trim_key(key))\n return True\n except botocore.exceptions.ClientError as e:\n if int(e.response[\"Error\"][\"Code\"]) != 404:\n raise\n return False\n\n def _head_object(self, key: str):\n self.log.debug(\"HEAD: %s\", key)\n return self._client.head_object(\n Bucket=self.bucket_name,\n Key=key,\n )\n\n def _trim_key(self, remote_path: str) -> str:\n # Strip the leading / if present otherwise we end up\n # with an extra root directory in s3 which we don't want.\n if remote_path.startswith(\"/\"):\n remote_path = remote_path[1:]\n return remote_path\n\n @property\n def _client(self):\n if self._s3 is None:\n # The boto3 client call is not threadsafe, so only allow calling it from a singe thread at a time\n with lock:\n self._s3 = boto3.client(\n \"s3\",\n region_name=self.bucket_region,\n aws_access_key_id=self.aws_access_key_id,\n aws_secret_access_key=self.aws_secret_access_key,\n )\n return self._s3\n\n def _build_md5_header(self, fp: BinaryIO) -> str:\n \"\"\"\n ContentMD5 (string) -- The base64-encoded 128-bit MD5 digest of the message (without the headers)\n according to RFC 1864. This header can be used as a message integrity check to verify that the data is the same\n data that was originally sent\n https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.put_object\n \"\"\"\n h = hashlib.md5()\n data = fp.read(1000000)\n while data:\n h.update(data)\n data = fp.read(1000000)\n return base64.b64encode(h.digest()).decode(\"utf-8\")\n","sub_path":"rpm_s3_mirror/s3.py","file_name":"s3.py","file_ext":"py","file_size_in_byte":10065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"166877723","text":"# CSC 110 12/5/13\n# James Collins\n# Homework 8\n\n\n# This program builds a one-dimensional cellular automaton\n# that iterates a number of times based on user input. The rules are:\n# 1. Initially, all cells are alive.\n# 2. The new state of cell[j] will be alive If the old states of cell[j-1],\n# cell[j], and cell[j+1] were all the same (either all alive or all dead),\n# 3. Otherwise, the new state of cell[j] will be dead.\n\n# GLOBAL CONSTANTS\n\nCELLS = 65 # Number of characters in each iteration\nALIVE = '+' # Icon that will print for 'living' cells\nDEAD = '-' # and for 'dead' cells\nBOUNDARY_CELL = [ALIVE] # Controls state of cells outside the generating bounds\n\n# The initial state constant is a 2D list. The first element is the tick\n# number for the iterations and the second is a list containing CELLS number\n# of alive cells.\n\nINITIAL_STATE = [[0,[ALIVE] * CELLS]]\n\n# FUNCTION DEFINITIONS\n\n# The iterations() function gets the number of times to iterate from\n# the user and validates the input. It then returns that input as an int.\n\ndef iterations():\n \n invalid = True # Flag variable\n\n # The validation is done with a while loop to catch non-positive int\n # values and a try/except suite to catch value errors from non-int inputs\n \n while invalid:\n \n try: \n iterate = int(input(\"Please enter the number of simulation steps: \"))\n \n if iterate <= 0: \n print(\"You must enter a positive integer value. Please try again.\")\n \n else:\n invalid = False\n \n except ValueError:\n print(\"You must enter a positive integer value. Please try again.\")\n \n return iterate\n\n# lifeOrDeath() is the processing function. It takes a 2D list as input.\n# The first element of the list is unimportant, but the second element should\n# be a 1D list of CELLS elements. The function enacts the rules of the\n# automaton with a for loop based on the number held by the CELLS constant\n# and a series of decision structures. It returns a 1D list of CELLS length\n# containing the transformed cells.\n\ndef lifeOrDeath(oldState):\n \n cellTransform = [] # Creates empty list to hold new values\n \n for index in range(CELLS): # Loops over every position by index \n cell = oldState[1][index] # Holds value of cell of interest for comparison\n\n # List left boundary check.\n \n if index == 0: \n rNeighbor = oldState[1][index+1] # Looks at cell to the right\n \n if cell == BOUNDARY_CELL and cell == rNeighbor:\n cellTransform += [ALIVE] \n else:\n cellTransform += [DEAD]\n\n # Middle bulk check for non-border cells\n \n elif index < CELLS-1:\n lNeighbor = oldState[1][index-1] # Looks at cell to the left\n rNeighbor = oldState[1][index+1] # Looks at cell to the right\n \n if cell == lNeighbor and cell == rNeighbor:\n cellTransform += [ALIVE]\n else:\n cellTransform += [DEAD]\n\n # List right boundary check.\n \n else:\n lNeighbor = oldState[1][index-1] # Looks at cell to the left\n \n if cell == lNeighbor and cell == BOUNDARY_CELL:\n cellTransform += [ALIVE]\n else:\n cellTransform += [DEAD]\n \n return cellTransform\n\n# The cellOutput() function takes a 1D list of 2D lists as input. Each 2D\n# list element contains the iteration number in the first index and a 1D list\n# of the cell states in the second index. The function prints the iteration\n# number, a tab, and then the value of each cell in the iteration.\n\ndef cellOutput(collection):\n \n for state in collection: # Loops through top level list and prints iteration number\n print(state[0], end = '\\t')\n \n for cell in state[1]: # Loops through nested list and prints each cell value\n print(cell, end='')\n \n print()\n \n \n# The main() function contains the primary logic. \n\ndef main():\n\n statesCollection = []+INITIAL_STATE # Collects a 2D list for every iteration\n steps = iterations() # Gets user input\n \n print()\n\n # Loops over the range of the user input\n \n for num in range(steps):\n numState = statesCollection[num] # Extracts the last iteration from the collection\n newCells = lifeOrDeath(numState) # Generates the new cell states\n newState = [[num+1]+[newCells]] # Creates list of iteration number and new state\n statesCollection += newState # Appends new iteration to the collection\n \n cellOutput(statesCollection) # Passes the collection to the output function\n\n\n# CALL TO MAIN\n\nmain() \n \n","sub_path":"CSC110/Week9/CollinsHW8.py","file_name":"CollinsHW8.py","file_ext":"py","file_size_in_byte":5040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"366498669","text":"from odoo import api, exceptions, fields, models, _\nimport datetime\n\n\nclass IRNControl(models.Model):\n _name = \"irn.control\"\n _description = \"IRN Control\"\n _order = 'id desc'\n\n scan = fields.Char('PO Scan')\n product_scan = fields.Char('Product Scan')\n warehouse_id = fields.Many2one('stock.warehouse', string='Warehouse Location', required=True)\n irn_number = fields.Char('IRN Number')\n po_number = fields.Char('PO Number')\n po_id = fields.Integer('Purchase ID')\n remark = fields.Text('Remark')\n received_by = fields.Many2one('res.users', 'Received By', default=lambda self: self.env.uid)\n qc_pushed = fields.Boolean('QC Pushed')\n date_confirm = fields.Datetime('Confirmation Date', select=False)\n state = fields.Selection([\n ('full_received', 'Full Received'),\n ('partial_received', 'Partial Received'),\n\n ], 'Status', copy=False, help=\"Gives the status of the IRN control\", select=True)\n irn_line = fields.One2many('irn.control.line', 'irn_line_id', 'IRN Line', required=True)\n\n def confirm_wms_inbound_irn(self):\n\n # check if already irn pushed\n if not self.qc_pushed:\n qc_data = {\n 'irn_number': str(self.irn_number),\n 'irn_id': self.id,\n 'warehouse_id': self.warehouse_id.id,\n 'po_number': str(self.po_number),\n 'po_id': self.po_id,\n 'remark': str(self.remark),\n 'received_by': self.received_by.id\n }\n\n qc_line_list = list()\n\n for line_item in self.irn_line:\n qc_line_list.append([0, False,{\n 'product': line_item.product,\n 'product_id': line_item.product_id,\n 'product_ean': line_item.product_ean,\n 'description': line_item.remark,\n 'purchase_quantity': line_item.ordered_quantity,\n 'received_quantity': line_item.received_quantity,\n 'accepted_quantity': 0,\n 'pending_quantity': line_item.received_quantity,\n }])\n\n qc_data['qc_line'] = qc_line_list\n\n qc_env = self.env['quality.control']\n saved_qc_id = qc_env.create(qc_data)\n\n for single_id in self:\n qc_pushed_update_query = \"UPDATE irn_control SET qc_pushed=TRUE, date_confirm='{0}' \" \\\n \"WHERE id={1}\".format(str(fields.datetime.now()), single_id.id)\n self._cr.execute(qc_pushed_update_query)\n self._cr.commit()\n\n qc_pushed_update_query_irn_line = \"UPDATE irn_control_line SET qc_pushed=TRUE WHERE \" \\\n \"irn_line_id={0}\".format(single_id.id)\n self._cr.execute(qc_pushed_update_query_irn_line)\n self._cr.commit()\n\n # # update irn end-to-end\n # self.env['inb.end.to.end'].irn_confirm_update(str(self.po_number),\n # self.po_id, str(fields.datetime.now()))\n\n return True\n\n def _check_if_already_irned(self, po_number):\n\n # need to check on the po already IRNed\n irn_state = 'new'\n\n irn_obj = self.search([('po_number', '=', po_number)])\n\n for irn in irn_obj:\n if str(irn.state) == 'full_received':\n irn_state = 'full_received'\n break\n elif str(irn.state) == 'partial_received':\n irn_state = 'partial_received'\n else:\n irn_state = 'new'\n\n return irn_state\n\n def _po_confirmation(self, po):\n\n po_env = self.env['purchase.order']\n po_obj = po_env.search([('name', '=', po)])\n po_state = str(po_obj.state)\n return po_state\n\n def _check_full_or_partial_received(self, record, po_number):\n full_or_partial_rec = False\n\n for line_item in record.irn_line:\n\n line_received_qty = self._check_irn_line_received_quantity(po_number, line_item.product_id)\n\n if line_item.ordered_quantity != line_received_qty:\n full_or_partial_rec = False\n break\n else:\n full_or_partial_rec = True\n\n return full_or_partial_rec\n\n # -------------------------------------------------------------\n def _process_partial_received_order(self, po_number):\n\n partial_received_product_id_list = list()\n partial_rec_prod_list = list()\n\n irn_obj = self.search([('po_number', '=', po_number), ('state', '=', 'partial_received')])\n\n for irn in irn_obj:\n for product in irn.irn_line:\n if product.state == 'partially_received' and product.product_id not in partial_received_product_id_list:\n partial_received_product_id_list.append(product.product_id)\n\n for pr_product_id in partial_received_product_id_list:\n pr_product = self._process_partial_received_product(po_number, pr_product_id)\n if len(pr_product.keys()) > 0:\n partial_rec_prod_list.append(pr_product)\n\n # return list of dictionary\n return partial_rec_prod_list\n\n def _process_partial_received_product(self, po_number, product_id):\n\n irn_obj = self.search([('po_number', '=', po_number)])\n irn_line_env = self.env['irn.control.line']\n product_received_quantity = self._check_irn_line_received_quantity(po_number, product_id)\n product_name = ''\n ordered_quantity = 0\n\n ###########################################\n for irn in irn_obj:\n irn_line_obj = irn_line_env.search([('irn_line_id', '=', irn.id),('product_id', '=', product_id)])\n for product in irn_line_obj:\n product_name = product.product\n product_id = product_id\n ordered_quantity = product.ordered_quantity\n ###########################################\n\n if ordered_quantity > product_received_quantity:\n\n product_env = self.env['product.product']\n product_obj = product_env.search([('id', '=', product_id)])\n\n product_dict = {\n 'product': product_name,\n 'product_id': product_id,\n 'product_ean': str(product_obj.ean13) if product_obj.ean13 else '',\n 'ordered_quantity': ordered_quantity,\n 'received_quantity': ordered_quantity - product_received_quantity,\n 'remark': ''\n }\n return product_dict\n else:\n return {}\n # -------------------------------------------------------------\n\n @api.onchange('scan')\n def po_irn_barcode_onchange(self):\n\n\n try:\n\n po_number = str(self.scan)\n\n self.po_number = po_number\n po_env = self.env['purchase.order']\n po_obj = po_env.search([('name', '=', po_number)])\n\n po_line_env = self.env['purchase.order.line']\n po_line_obj = po_line_env.search([('order_id', '=', po_obj.id)])\n\n self.warehouse_id = po_obj.picking_type_id.warehouse_id.id\n\n po_line_list = list()\n\n\n for po_line in po_line_obj:\n item = {\n\n 'product': str(po_line.name),\n 'product_id': int(po_line.product_id.id),\n 'product_ean': str(po_line.product_id.barcode) if po_line.product_id.barcode else '',\n 'ordered_quantity': str(po_line.product_qty),\n 'received_quantity': 0,\n 'remark': ''\n }\n po_line_list.append((0, 0, item))\n\n self.irn_line = po_line_list\n\n\n self.scan = \"\"\n # if po_number:\n #\n # irn_state = self._check_if_already_irned(po_number)\n # po_state = self._po_confirmation(po_number)\n #\n # # if po_state == 'cancel' or po_state == 'draft':\n # if po_state in ['cancel', 'draft', 'bid', 'category_head_approval_pending',\n # 'category_vp_approval_pending', 'coo_approval_pending', 'ceo_approval_pending']:\n # raise exceptions.ValidationError(_('PO is not confirmed.\\n This PO is in '\n # 'Draft/Approval Pending/Cancel mode'))\n # else:\n #\n # if irn_state == 'full_received':\n # # raise exception\n # raise exceptions.ValidationError(_('Already IRN done!! \\n IRN already done for this PO!!!'))\n #\n # elif irn_state == 'partial_received':\n # # load only left products\n # # search in irn_control with po_number\n # self.irn_line = self._process_partial_received_order(po_number)\n # self.po_number = po_number\n # po_env = self.env['purchase.order']\n # po_obj = po_env.search([('name', '=', po_number)])\n # self.warehouse_id = po_obj.picking_type_id.warehouse_id.id\n # self.scan = \"\"\n #\n # else:\n # self.po_number = po_number\n # po_env = self.env['purchase.order']\n # po_obj = po_env.search([('name', '=', po_number)])\n #\n # po_line_env = self.env['purchase.order.line']\n # po_line_obj = po_line_env.search([('order_id', '=', po_obj.id)])\n #\n # self.warehouse_id = po_obj.picking_type_id.warehouse_id.id\n #\n # po_line_list = list()\n #\n # for po_line in po_line_obj:\n # item = {\n # 'product': str(po_line.name),\n # 'product_id': int(po_line.product_id.id),\n # 'product_ean': str(po_line.product_id.barcode) if po_line.product_id.barcode else '',\n # 'ordered_quantity': str(po_line.product_qty),\n # 'received_quantity': 0,\n # 'remark': ''\n # }\n # po_line_list.append((0, 0, item))\n #\n # self.irn_line = po_line_list\n #\n # self.scan = \"\"\n except:\n\n self.scan = \"\"\n\n def _check_irn_line_received_quantity(self, po_number, product_id):\n\n received_quantity = 0\n\n irn_env = self.env['irn.control']\n irn_obj = irn_env.search([('po_number', '=', po_number)])\n\n irn_line_env = self.env['irn.control.line']\n\n for irn in irn_obj:\n irn_line_obj = irn_line_env.search([('irn_line_id', '=', irn.id), ('product_id', '=', product_id)])\n for irn_line in irn_line_obj:\n received_quantity += irn_line.received_quantity\n\n return received_quantity\n\n def _set_irn_line_state(self, record, po_number):\n\n for line_item in record.irn_line:\n line_received_qty = self._check_irn_line_received_quantity(po_number, line_item.product_id)\n if line_item.ordered_quantity == line_received_qty:\n line_item.state = 'fully_received'\n else:\n line_item.state = 'partially_received'\n\n return record\n\n def _set_po_id_in_line(self, record, po_id):\n\n for line_item in record.irn_line:\n line_item.po_id = po_id\n\n return record\n\n @api.model\n def create(self, vals):\n # generate IRN number\n # IRN0123456\n\n\n record = super(IRNControl, self).create(vals)\n\n\n po_env = self.env['purchase.order']\n po_obj = po_env.search([('name', '=', str(record.po_number))])\n\n irn_number = \"IRN0\"+str(record.id)\n po_id = po_obj.id\n\n record.irn_number = irn_number\n record.po_id = po_id\n record.state = 'full_received' if self._check_full_or_partial_received(record, str(record.po_number)) else 'partial_received'\n\n record = self._set_irn_line_state(record, str(record.po_number))\n record = self._set_po_id_in_line(record, po_id)\n\n # ---------------------------\n create_permission = list()\n\n if 'irn_line' in vals :\n for single_line in vals['irn_line']:\n\n if not not single_line[2]:\n\n if single_line[2]['received_quantity'] > single_line[2]['ordered_quantity']:\n create_permission.append(False)\n else:\n create_permission.append(True)\n\n # if False in create_permission:\n #\n # raise exceptions.ValidationError(_('IRN line adjustment ERROR!!!\\n Received quantity should be less then'\n # ' or equal to (ordered quantity)!!!'))\n #\n # else:\n # self.env['inb.end.to.end'].irn_data_create(record.po_number, po_id, record.id,irn_number,record.create_date)\n # ---------------------------\n return record\n\n # calls at the time of update record\n def write(self, vals):\n # only update only if (received_quantity less or equal to ordered_quantity)\n update_permission = list()\n\n if 'irn_line' in vals :\n for single_line in vals['irn_line']:\n if not not single_line[2]:\n for irn_l in self.irn_line:\n if irn_l.id == single_line[1]:\n\n received_quantity = single_line[2]['received_quantity'] \\\n if 'received_quantity' in single_line[2] else irn_l.received_quantity\n if received_quantity < irn_l.ordered_quantity:\n vals['state']='partial_received'\n single_line[2]['state'] = 'partially_received'\n else:\n vals['state'] = 'full_received'\n single_line[2]['state'] = 'fully_received'\n\n if received_quantity > irn_l.ordered_quantity:\n update_permission.append(False)\n else:\n update_permission.append(True)\n\n if False in update_permission:\n raise exceptions.ValidationError(_('IRN line adjustment ERROR!!! \\n Received quantity '\n 'should be less then or equal to (ordered quantity)!!!'))\n\n record = super(IRNControl, self).write(vals)\n\n return record\n\n @api.onchange('product_scan')\n def po_irn_product_scan(self):\n try:\n po_number = str(self.po_number)\n\n product_scan = str(self.product_scan)\n product_not_in_po_order = True\n\n if product_scan != 'False':\n qty = 1\n if 'Prod' not in product_scan:\n\n product_env = self.env['product.product']\n product_obj = product_env.search(\n ['|', '|', '|', '|', ('ean13', '=', str(product_scan)), ('gift_ean', '=', str(product_scan)),\n ('box_ean', '=', str(product_scan)), ('case_ean', '=', str(product_scan)),\n ('pallet_ean', '=', str(product_scan))])\n\n if not product_obj.id:\n raise exceptions.ValidationError(_(\n 'Product Not found.Please add EAN number in Product Configuration. Or contact with Your department head'))\n\n if str(product_obj.gift_ean) == str(product_scan):\n qty = 1\n elif str(product_obj.box_ean) == str(product_scan):\n qty = product_obj.holding_box_qty\n elif str(product_obj.case_ean) == str(product_scan):\n qty = product_obj.holding_case_qty\n elif str(product_obj.pallet_ean) == str(product_scan):\n qty = product_obj.holding_pallet_qty\n else:\n get_list = product_scan.split('Prod')\n product_id = int(get_list[1])\n\n product_env = self.env['product.product']\n product_obj = product_env.search([('id', '=', product_id)])\n if not product_obj.id:\n raise exceptions.ValidationError(_('Product Not found.Please scan properly with proper product'))\n\n po_env = self.env['purchase.order']\n po_obj = po_env.search([('name', '=', po_number)])\n\n po_line_env = self.env['purchase.order.line']\n po_line_obj = po_line_env.search([('order_id', '=', po_obj.id)])\n\n # po_line_obj[0].product_id.ean13\n # self.irn_line[0].product_ean\n for single_line in self.irn_line:\n if str(single_line.product_id) == str(product_obj.id) and single_line.ordered_quantity >= (single_line.received_quantity +qty):\n single_line.received_quantity += qty\n product_not_in_po_order=False\n break\n # self.irn_line[0].received_quantity\n\n if product_not_in_po_order:\n raise exceptions.ValidationError(_('Product Not found in this Order.Wrong product has been scanned'))\n\n\n self.product_scan = \"\"\n\n except:\n self.product_scan = \"\"\n\n\nclass IRNControlLine(models.Model):\n _name = \"irn.control.line\"\n _description = \"IRN Control Line\"\n\n irn_line_id = fields.Many2one('irn.control', 'IRN Line ID', required=True,\n ondelete='cascade', select=True, readonly=True)\n po_id = fields.Many2one('purchase.order', 'PO ID', required=False,\n ondelete='cascade', select=True, readonly=True)\n product = fields.Char('Product')\n product_id = fields.Integer('Product ID')\n product_ean = fields.Char('Product EAN')\n ordered_quantity = fields.Float('Ordered Qty')\n received_quantity = fields.Float('Received Qty')\n remark = fields.Char('Remark')\n qc_pushed = fields.Boolean('QC Pushed')\n state = fields.Selection([\n ('fully_received', 'Fully Received'),\n ('partially_received', 'Partially Received'),\n ('cancel', 'Cancelled'),\n ], 'Status', help=\"Gives the status of the IRN control line\", select=True)\n\n\nclass IRNWithEan(models.Model):\n _name = \"irn.with.ean\"\n _description = \"IRN With EAN\"\n\n product = fields.Char('Product')\n product_id = fields.Integer('Product ID')\n product_ean = fields.Char('EAN')\n\n def save_irn_with_product_ean(self):\n context = self.env.context\n product_id = context['product_id']\n product_ean = context['product_ean']\n\n if len(product_ean) > 13 or \" \" in product_ean:\n # show error. not a valid ean\n raise exceptions.ValidationError(_('Not a valid EAN number!!!'))\n else:\n\n ean_update_query = \"UPDATE product_product SET ean13='{0}' WHERE id='{1}'\".format(product_ean, product_id)\n self._cr.execute(ean_update_query)\n self._cr.commit()\n\n irn_line_query = \"UPDATE irn_control_line SET product_ean='{0}' WHERE product_id='{1}'\".format\\\n (product_ean, product_id)\n self._cr.execute(irn_line_query)\n self._cr.commit()\n\n # return True\n return {\n 'type': 'ir.actions.client',\n 'tag': 'reload',\n }\n\n def cancel_irn_with_product_ean(self):\n return {\n 'type': 'ir.actions.client',\n 'tag': 'reload',\n }\n","sub_path":"addons/wms_inbound/models/irn.py","file_name":"irn.py","file_ext":"py","file_size_in_byte":20173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"52645201","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# File name :\n# Author : Remi GASCOU\n# Date created :\n# Date last modified :\n# Python Version : 3.*\n\nfrom lib import *\n\nif __name__ == '__main__':\n kw = [\"TEST\",\"NUL\",\"ABCD\",\"HEY\",\"123AZ\"]\n data = \"..TEST.NUL......ABCD.HEYYYY...ABCD.COUCOU\"\n kwp = KeywordsParser(kw)\n kwp.parse(data)\n","sub_path":"Python/GenericKeywordsParser/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"388009991","text":"# 5. Linked List\nclass Node:\n def __init__(self, value):\n self.val = value\n self.next = None\n\nclass UnorderedList:\n def __init__(self):\n self.head = None\n def add(self, value): # add to the front\n temp = Node(value)\n temp.next = (self.head)\n self.head = temp\n def remove(self, value):\n d = Node(0)\n d.next = self.head\n cur = d\n while cur and cur.next:\n if cur.next.val == value:\n cur.next = cur.next.next\n else:\n cur = cur.next\n self.head = d.next\n def isEmpty(self):\n return not self.head\n def size(self):\n head = self.head\n n = 0\n while head:\n head = head.next\n n += 1\n return n\n def search(self, item):\n head = self.head\n while head:\n if head.val == item:\n return True\n head = head.next\n return False\n\nclass OrderedList:\n def __init__(self):\n self.head = None\n def search(self, value):\n head = self.head\n while head:\n if head.val == value:\n return True\n elif head.val > value:\n return False\n else:\n head = head.next\n return False\n def add(self, value):\n head = self.head\n previous = None\n cur = head\n while cur:\n if cur.val >= value:\n break\n else:\n previous = cur\n cur = cur.next\n if previous == None:\n self.head = Node(value)\n self.head.next = head\n else:\n previous.next = Node(value)\n previous.next.next = cur\n\n## test 1\n# a = UnorderedList()\n# a.add(5)\n# a.add(4)\n# a.add(3)\n# a.add(2)\n# a.add(1)\n#\n# print(a.search(4))\n# print(a.search(5))\n# print(a.search(1))\n#\n# a.remove(5)\n# print(a.head.val)\n# print(a.size())\n# a.remove(3)\n# print(a.head.val)\n# print(a.size())\n# a.remove(1)\n# print(a.head.val)\n# print(a.size())\n\n## test 2\n# b = OrderedList()\n# b.add(3)\n# b.add(1)\n# b.add(2)\n# b.add(0)\n# b.add(5)\n# print(b.head.val)\n# print(b.head.next.val)\n# print(b.head.next.next.val)\n# print(b.head.next.next.next.val)\n# print(b.head.next.next.next.next.val)\n# print('now let\\'s do search')\n# print(b.search(2))\n","sub_path":"Fundamentals/Data-Structure/Linked_List.py","file_name":"Linked_List.py","file_ext":"py","file_size_in_byte":2349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"650532153","text":"from collections import deque\n\ndef bfs(row,col): # BFS\n deq = deque()\n deq.append((row,col))\n \n visited[row][col] = 1\n\n v_count = 0\n k_count = 0\n if map_list[row][col] == 'v':\n v_count += 1\n elif map_list[row][col] == 'k':\n k_count += 1\n\n dr = [0,0,-1,1]\n dc = [-1,1,0,0]\n\n while len(deq) > 0:\n row,col = deq.popleft()\n\n for i in range(4):\n nr = row + dr[i]\n nc = col + dc[i]\n \n if 0 <= nr < R and 0 <= nc < C:\n if map_list[nr][nc] != '#':\n if visited[nr][nc] == 0:\n if map_list[nr][nc] == 'v':\n v_count += 1\n visited[nr][nc] = 1\n deq.append((nr,nc))\n\n elif map_list[nr][nc] == 'k':\n k_count += 1\n visited[nr][nc] = 1\n deq.append((nr,nc))\n\n else:\n visited[nr][nc] = 1 \n deq.append((nr,nc))\n if k_count > v_count:\n return 0,k_count\n else:\n return v_count,0\n\n\nR,C = map(int,input().split())\n\nmap_list = [list(input().rstrip()) for _ in range(R)]\nvisited = [[0 for _ in range(C)] for _ in range(R)]\nv,k = 0,0\n\nfor row in range(R):\n for col in range(C):\n if map_list[row][col] != \"#\" and visited[row][col] == 0:\n v1,k1 = bfs(row,col)\n v += v1\n k += k1\nprint(k,v)","sub_path":"algorithm_search/양치기 꿍 (3187번).py","file_name":"양치기 꿍 (3187번).py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"433329015","text":"import os\nlocation = os.path.dirname(os.path.dirname(__file__))\n\n\nclass Config(object):\n\n def __init__(self):\n config = open(location + '/Config.ini')\n config = config.read()\n config = config.split('\\n')\n self.setting = dict()\n for value in config:\n if value[:1] is not \"*\":\n x = value.split('=')\n self.setting[x[0]] = x[1]\n\n","sub_path":"BlogDb/Config.py","file_name":"Config.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"477085248","text":"import pygame\nimport random\n# 游戏屏幕的大小\nSCREEN_RECT = pygame.Rect(0, 0, 700, 480)\n# 敌机的定时器事件常量\nCREATE_ENEMY_EVENT = pygame.USEREVENT\n\n# 定义一个子弹的常量\nHERO_FIRE_EVENT = pygame.USEREVENT + 1\n\n\nclass PanZhiWei_PlaneGame(pygame.sprite.Sprite):\n \"\"\"游戏精灵的基类\"\"\"\n\n def __init__(self, new_image, new_speed=1):\n # 调用父类的初始化方法\n super().__init__()\n # 图片 速度 位置\n self.image = pygame.image.load(new_image)\n self.speed = new_speed\n # 获取图片的宽和高\n self.rect = self.image.get_rect()\n\n def update(self):\n self.rect.x += self.speed\n\n\nclass PanZhiWei_Background(PanZhiWei_PlaneGame):\n def __init__(self, is_alt=False):\n super().__init__(\"./images/横向的背景.png\",2)\n if is_alt:\n self.rect.right = 0\n\n def update(self):\n super().update()\n if self.rect.left == SCREEN_RECT.width:\n self.rect.right = 0\n\n\nclass PanZhiWei_Enemy(PanZhiWei_PlaneGame):\n \"\"\"敌机精灵类\"\"\"\n\n def __init__(self):\n # 调用父类的方法,创建敌机精灵,并且指定地基的图像\n super().__init__(\"./images/横向的敌机 (复件).png\",6)\n\n # 设置敌机的随机初始速度\n\n self.speed = random.randint(8, 10)\n\n # 设置敌机的随机初始位置\n\n self.rect.right = 0\n\n max_x = SCREEN_RECT.height - self.rect.height\n self.rect.y = random.randint(0, max_x)\n\n def update(self):\n panduan = random.randint(0, 2)\n \n if panduan == 0:\n # 调用父类的方法 让敌机在垂直方向运动\n super().update()\n elif panduan == 1:\n self.rect.x += self.speed\n self.rect.y -= self.speed\n elif panduan == 2:\n self.rect.x += self.speed\n self.rect.y += self.speed\n\n # 判断是否飞出屏幕 如果是 需要将敌机从精灵组删除\n if self.rect.left > SCREEN_RECT.width:\n self.kill()\n\n def __del__(self):\n print(\"敌机挂掉了%s\" % self.rect)\n\n\nclass PanZhiWei_Hero(PanZhiWei_PlaneGame):\n \"\"\"英雄的精灵\"\"\"\n\n def __init__(self):\n\n super().__init__(\"./images/横向的飞机 (1) (复件).png\", 0)\n\n # 给英雄设置一个初始位置\n self.rect.centery = SCREEN_RECT.centery\n self.rect.right = SCREEN_RECT.right - 30\n self.speed1 = 0\n # 创建一个子弹的精灵\n self.bullets = pygame.sprite.Group()\n\n def update(self):\n\n # super().update()\n # 飞机水平移动\n self.rect.x += self.speed\n self.rect.y += self.speed1\n\n # 判断飞机屏幕边界\n if self.rect.left < 0:\n self.rect.left = 0\n\n if self.rect.right > SCREEN_RECT.width:\n self.rect.right = SCREEN_RECT.width\n\n if self.rect.bottom < 0:\n self.rect.top = SCREEN_RECT.height\n if self.rect.top > SCREEN_RECT.height:\n self.rect.bottom = 0\n\n def fire(self):\n print(\"发射子弹\")\n\n for i in (1, 2):\n # 创建子弹\n bullet = PanZhiWei_Bullet()\n bullet1 = PanZhiWei_Bullet()\n bullet2 = PanZhiWei_Bullet()\n # 设置子弹的位置\n bullet.rect.x = self.rect.left\n bullet.rect.centery = self.rect.centery\n bullet1.rect.x = self.rect.left\n bullet1.rect.centery = self.rect.centery + 15\n bullet2.rect.x = self.rect.left\n bullet2.rect.centery = self.rect.centery - 15\n\n # 将子弹添加到精灵组\n self.bullets.add(bullet, bullet1, bullet2)\n\n\nclass PanZhiWei_Bullet(PanZhiWei_PlaneGame):\n \"\"\"子弹精灵类\"\"\"\n\n def __init__(self):\n\n # 调用父类的方法\n super().__init__(\"./images/横向的飞机 (2).png\", -15)\n\n def update(self):\n\n super().update()\n\n # 判断子弹是否超出屏幕 如果是 我们要让子弹从精灵组删除\n\n if self.rect.left > SCREEN_RECT.width:\n self.kill()\n","sub_path":"面向对象/lala2.py","file_name":"lala2.py","file_ext":"py","file_size_in_byte":4129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"526638794","text":"# Importing the libraries\nimport pandas as pd\nimport numpy as np\n\ndataset = pd.read_excel('MediaVoto_giocatori.xlsx')\n\noutput = pd.DataFrame()\n\n#Giornate = ['01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21']\nGiornate = np.arange(1,9)\nfor giorn in Giornate:\n fm = dataset[dataset['Giornata '+str(giorn)]!=0].groupby(['Squadra'])['Giornata '+str(giorn)].mean()\n output['Giornata '+str(giorn)] = pd.Series(fm)\ntot = output.mean(axis=1)\noutput['Total average '] = pd.Series(tot)\n\noutput.to_excel('FantaMedia_Squadre.xlsx')","sub_path":"MediaVoto/CreateTeamFile.py","file_name":"CreateTeamFile.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"352025587","text":"import random\nlst = [random.randint(1,100) for x in range(12)]\nprint(\"Первоначальный список - \", lst)\nmaximum= max(lst)\nminimum= min(lst)\nprint(\"Наибольший элемент списка -\", maximum)\nprint(\"Наименьший элемент списка -\", minimum)\nidx=0\nfor elem in lst:\n if elem==maximum:\n lst[idx]=minimum\n idx+=1\n elif elem==minimum:\n lst[idx]=maximum\n idx+=1\n else:\n idx+=1\nprint(\"Список после замены -\",lst)","sub_path":"Test/Task_8(TEST).py","file_name":"Task_8(TEST).py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"520026827","text":"from kubernetes import client as k8s_client\n\nfrom ..native import deployment\nfrom fairing.backend.kubernetes import TF_JOB_VERSION\n\n\nclass KubeflowDeployment(deployment.NativeDeployment):\n\n def __init__(self, namespace, runs, distribution):\n super(KubeflowDeployment, self).__init__(namespace, runs)\n self.distribution = distribution\n\n def deploy(self):\n self.backend.create_tf_job(self.namespace, self.job_spec)\n\n def generate_job(self, pod_template_spec):\n \"\"\"Returns a TFJob template\"\"\"\n self.set_container_name(pod_template_spec)\n\n worker_replica_spec = {}\n worker_replica_spec['replicas'] = self.distribution['Worker']\n worker_replica_spec['template'] = pod_template_spec\n\n ps_replica_spec = {}\n ps_replica_spec['replicas'] = self.distribution.get('PS', 0)\n ps_replica_spec['template'] = pod_template_spec\n\n chief_replica_spec = {}\n chief_replica_spec['replicas'] = self.distribution.get('Chief', 0)\n chief_replica_spec['template'] = pod_template_spec\n\n spec = {}\n spec['tfReplicaSpecs'] = {}\n spec['tfReplicaSpecs']['Worker'] = worker_replica_spec\n if chief_replica_spec['replicas'] > 0:\n spec['tfReplicaSpecs']['Chief'] = chief_replica_spec\n if ps_replica_spec['replicas'] > 0:\n spec['tfReplicaSpecs']['PS'] = ps_replica_spec\n\n tf_job = {}\n tf_job['kind'] = 'TFJob'\n tf_job['apiVersion'] = 'kubeflow.org/' + TF_JOB_VERSION\n tf_job['metadata'] = k8s_client.V1ObjectMeta(name=self.name)\n tf_job['spec'] = spec\n\n return tf_job\n\n def set_container_name(self, pod_template_spec):\n \"\"\"Sets the name of the main container to `tensorflow`.\n This is required for TfJobs\"\"\"\n pod_template_spec.spec.containers[0].name = 'tensorflow'\n\n def get_logs(self):\n selector = 'tf-replica-index=0,tf-replica-type=worker'\n self.backend.log(self.name, self.namespace, selector)\n","sub_path":"fairing/training/kubeflow/deployment.py","file_name":"deployment.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"620889827","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.login),\n url(r'^content/$', views.content),\n url(r'^index/$', views.index),\n url(r'^add/$', views.add),\n url(r'^edit/$', views.edit),\n url(r'^delete/$', views.delete),\n url(r'^find/$', views.find),\n url(r'^v_index/$', views.v_index),\n url(r'^v_add/$', views.v_add),\n url(r'^v_edit/$', views.v_edit),\n url(r'^v_delete/$', views.v_delete),\n url(r'^v_find/$', views.v_find),\n url(r'^sign_up/$', views.sign_up),\n url(r'^u_index/$', views.u_index),\n url(r'^u_delete/$', views.u_delete),\n url(r'^u_v_add/$', views.u_v_add),\n]","sub_path":"sim/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"547969822","text":"#!/user/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport yaml\nimport logging\nimport logging.config\nfrom appium import webdriver\nfrom common.common_data import *\nfrom airtest.core.api import auto_setup\n\nlogging.config.fileConfig(CON_LOG)\nlogging = logging.getLogger()\n\n\ndef get_desired_caps():\n \"\"\"\n 读取desired caps并返回\n :return: desired caps\n \"\"\"\n with open(CAPS_YAML, 'r', encoding='utf-8') as file:\n data = yaml.load(file, Loader=yaml.FullLoader)\n print(data)\n return data\n\n\ndef appium_desired():\n data = get_desired_caps()\n desired_caps = data['desired_caps']\n # airtest 输入法禁用 此输入法禁用后,无法使用poco().set_text()\n # yosemite = '?ime_method=None'\n auto_setup(__file__, devices=[\"Android:///%s?ime_method=ADBIME\" % data['desired_caps']['udid']])\n logging.info('start app...')\n driver = webdriver.Remote('http://%s:%s/wd/hub' % (data['ip'], data['port']), desired_caps)\n driver.implicitly_wait(3)\n return driver\n","sub_path":"common/deserid_caps.py","file_name":"deserid_caps.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"99716257","text":"# coding: utf-8\nfrom multiprocessing import Process\nimport logging\n\nclass Opc(Process):\n def __init__(self, q_parent, q_self):\n super(Opc, self).__init__()\n self.q = {}\n self.q[\"parent\"] = q_parent\n self.q[\"self\"] = q_self\n\n def run(self):\n \"\"\"Main multiprocess routine\"\"\"\n while True:\n try:\n message = self.q[\"self\"].get()\n self.handler(message)\n except Exception as e:\n logging.error(e)\n\n def handler(self, message):\n \"\"\"Message handler\"\"\"\n try:\n m_type = message[\"type\"]\n if m_type == \"cmd\":\n self.command(message)\n elif m_type == \"req\":\n self.request(message)\n elif m_type == \"rep\":\n self.reponse(message)\n else:\n assert False, \"message type non supporté : %s \" % m_type\n except Exception as e:\n logging.error(e)\n\n def command(self, message):\n \"\"\"Commande message\"\"\"\n res = \"opc : \"+str(message)\n self.q[\"parent\"].put(res)\n\n def request(self, message):\n \"\"\"request message\"\"\"\n res = \"opc : \"+str(message)\n self.q[\"parent\"].put(res)\n\n def reponse(self, message):\n \"\"\"reponse message\"\"\"\n print(\"réponse bien reçu\")\n","sub_path":"languages/python3/multiprocessing/test_complexe2/service/opc.py","file_name":"opc.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"203250315","text":"#!/usr/bin/env python3\n\nimport sys\n\ndef long_subtract(num1_array, num2_array):\n N = len(num1_array)\n\n add_on = [0] * (N-len(num2_array))\n num2_array = add_on + num2_array\n\n borrow = False\n\n result = [0] * N\n\n for i in range(N-1, -1, -1):\n num1 = num1_array[i]\n num2 = num2_array[i]\n if borrow:\n num1 -= 1\n\n if num1 < num2:\n result[i] = (num1+10)-num2\n borrow = True\n else:\n result[i] = num1-num2\n borrow = False\n\n final = []\n for num in result:\n if num == 0:\n continue\n\n final.append(num)\n\n is_positive = final[0] > 0\n\n return final, is_positive\n\ndef main():\n for line in sys.stdin:\n num1 = [int(num) for num in line.strip().split()]\n\n num2 = [int(num) for num in sys.stdin.readline().strip().split()]\n \n final, is_positive = long_subtract(num1, num2)\n print(final)\n print('Positive' if is_positive else 'Negative')\n\nif __name__ == '__main__':\n main()\n","sub_path":"interview-epic-summer2021/subtraction.py","file_name":"subtraction.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"524455950","text":"# -*- coding: utf-8 -*-\n#-------------------------------------------------------------------------------\n# Name: checkUpdates\n# Purpose:\n#\n# Author: Bruce Zhu\n#\n# Created: 23/08/2017\n# Copyright: (c) SQA 2017\n# Licence: \n#-------------------------------------------------------------------------------\nimport os\nimport urllib.request\nimport configparser\nimport zipfile\nimport logging\n\nclass checkUpdates:\n\n def __init__(self):\n pass\n\n def downLoadFromURL(self, url, dest_dir):\n try:\n urllib.request.urlretrieve(url , dest_dir)\n return True\n except Exception as e:\n logging.log(logging.DEBUG, 'Error when downloading: {0}'.format(e))\n return False\n\n def getVer(self, verFile):\n downVer = ''\n conf = configparser.ConfigParser()\n try:\n conf.read(verFile)\n downVer = conf.get(\"version\", \"app\")\n except Exception as e:\n logging.log(logging.DEBUG, 'Error: {0}'.format(e))\n return downVer\n\n def splitVer(self, s):\n ver = s.split('.')\n return ver\n\n def compareVer(self, downVer, currentVer):\n downVersions = self.splitVer(downVer)\n currentVersions = self.splitVer(currentVer)\n for i in range(0, len(currentVersions)):\n if int(downVersions[i]) > int(currentVersions[i]):\n return True\n return False\n\n def unzip_dir(self, zipfilename, unzipdirname):\n fullzipfilename = os.path.abspath(zipfilename)\n fullunzipdirname = os.path.abspath(unzipdirname)\n logging.log(logging.DEBUG, \"Start to unzip file %s to folder %s ...\"% (zipfilename, unzipdirname) )\n #Check input ...\n if not os.path.exists(fullzipfilename):\n logging.log(logging.DEBUG, \"Dir/File %s is not exist, Press any key to quit...\"% fullzipfilename )\n inputStr = input()\n return\n if not os.path.exists(fullunzipdirname):\n os.mkdir(fullunzipdirname)\n else:\n if os.path.isfile(fullunzipdirname):\n logging.log(logging.DEBUG, \"File %s is exist, are you sure to delet it first ? [Y/N]\"% fullunzipdirname)\n while 1:\n inputStr = input()\n if inputStr == \"N\" or inputStr == \"n\":\n return\n else:\n if inputStr == \"Y\" or inputStr == \"y\":\n os.remove(fullunzipdirname)\n logging.log(logging.DEBUG, \"Continue to unzip files ...\")\n break\n #Start extract files ...\n #print(fullzipfilename)\n try:\n zipfiles=zipfile.ZipFile(fullzipfilename,'r')\n zipfiles.extractall(unzipdirname)\n zipfiles.close()\n logging.log(logging.DEBUG, \"Unzip finished!\")\n logging.log(logging.DEBUG, \"Unzip file succeed!\")\n except Exception as e:\n logging.log(logging.DEBUG, e)\n\n\nif __name__ == '__main__':\n dest_dir = './downVer.ini'\n checkUpdates = checkUpdates()\n #checkUpdates.downLoadFromURL('http://sw.tymphany.com/fwupdate/sqa/tool/version.ini', dest_dir)\n #downVer = checkUpdates.getVer(dest_dir)\n #checkUpdates.compareVer(downVer, '1.1.0')\n checkUpdates.unzip_dir('PowerCycle.zip', 'PowerCycle')\n","sub_path":"src/ui/checkUpdates.py","file_name":"checkUpdates.py","file_ext":"py","file_size_in_byte":3389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"564325313","text":"#!/usr/bin/python3\nimport subprocess\nimport os\n\nLINE_FORMAT = '\\033[33m{prefix} \\033[37m{path}\\033[0m{base}'\n\ndef get_files(cmd, prefix):\n p = subprocess.Popen(\n cmd,\n stdout=subprocess.PIPE,\n encoding='utf8',\n )\n try:\n for line in p.stdout:\n line = line.strip()\n base = os.path.basename(line)\n path = os.path.dirname(line)\n if path:\n path += \"/\"\n print(LINE_FORMAT.format(prefix=prefix, path=path, base=base))\n finally:\n p.wait()\n\n\ndef get_git_files(git_root):\n get_files(['git', 'ls-files', '-c', git_root], 'git')\n get_files(\n ['git', 'ls-files', '-o', '--exclude-standard', git_root],\n 'git-other',\n )\n if os.path.exists('.gitmodules'):\n mod_prefixes = subprocess.check_output(['git', 'config', '--file', '.gitmodules', '--get-regexp', 'path'])\n mod_prefixes = [l.decode('utf8') for l in mod_prefixes.split()]\n get_files(['git', 'ls-files', '--recurse-submodules'] + mod_prefixes, 'git-submodules')\n\n\ndef get_find_files():\n get_files(['find', '-type', 'f', '-follow'], 'find')\n\n\ndef main():\n # Get rid of all stderr output\n os.close(2)\n try:\n git_root = subprocess.check_output(\n ('git', 'rev-parse', '--show-toplevel')\n ).strip()\n except subprocess.CalledProcessError:\n git_root = None\n\n if git_root is not None:\n get_git_files(git_root)\n else:\n get_find_files()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"proj-ls-files.py","file_name":"proj-ls-files.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"382225563","text":"import os\nimport time\nimport pyperclip as pc\nimport string\nimport tkinter\nfrom tkinter import *\nfrom tkinter.ttk import Combobox\nimport tkinter.messagebox as mg\nfrom EasyCode.EasyCode import get_all_drives\navailable_drives = get_all_drives()\navailable_drives.insert(0, \"Select\")\navailable_drives.append(\"Desktop\")\nclass App:\n search_over = False\n def __init__(self, root):\n # initilizing the root as a part of class\n self.root = root\n # making initial size of the root\n self.root.geometry(\"500x250+510+250\")\n # Setting apps title \n self.root.title(\"File Finder\")\n ################ Row 1 ####################\n self.label1 = Label(self.root, text=\"Disk:\", font=(\"times new roman\", 20))\n self.label1.place(x=80, y=50)\n self.com = Combobox(self.root, state=\"readonly\",font = (\"times new roman\", 15))\n self.com.place(x=200, y = 50)\n self.com['values'] = available_drives\n self.com.current(0)\n ################ Row 2 ######################\n self.label2 = Label(self.root, text=\"Filename: \", font=(\"times new roman\", 20))\n self.label2.place(x = 80, y=120)\n self.text = Entry(self.root, font=(\"times new roman\", 20), bg=\"lightgray\", fg=\"black\")\n self.text.place(x=200, y=120, width=250)\n ################ Creating a button for search #########################\n self.button = Button(self.root, text=\"Search \",bd=3, relief=RAISED, command=self.get_info)\n self.button.place(x=190, y=200, width=150)\n \n\n def get_info(self):\n \n drive = self.com.get()+\"\\\\\"\n if \"desktop\" in drive.lower():\n drive = os.path.join(os.environ['USERPROFILE'], \"Desktop\")\n filename = self.text.get()\n self.search(drive, filename)\n\n\n def search(self, drive, fn):\n self.var = StringVar()\n self.var.set(\"Searching...\")\n \n self.label3 = Label(self.root, textvariable=self.var, font=(\"times new roman\", 10))\n self.label3.place(x=0,y=220)\n self.root.update()\n time.sleep(0.8)\n \n self.drive = drive \n self.fn = fn\n for root, dirs, files in os.walk(drive):\n for file in files:\n filename = file\n path_of_file = os.path.join(root, file)\n if fn.lower() in filename.lower():\n self.search_over = True\n os.startfile(path_of_file)\n mg.showinfo(\"Succes\", f\"\"\"\n File was found\n The file was located in : {root}\n The path of the file is : {path_of_file}\"\"\")\n pc.copy(root)\n self.var.set(\"Completed\")\n self.root.update()\n return\n\n mg.showerror(\"No file found\", \"No such file found\")\n def animate(self):\n no=0\n while not self.search_over:\n if no==0:\n self.var.set(\"Searching.\")\n self.root.update()\n time.sleep(0.8)\n no+=1\n elif no==1:\n self.var.set(\"Searching..\")\n self.root.update()\n time.sleep(0.8)\n no+=1\n elif no==2:\n self.var.set(\"Searching...\")\n self.root.update()\n time.sleep(0.8)\n no = 0\n \nroot = Tk()\napp = App(root)\nroot.mainloop()\n","sub_path":"File Finder.pyw","file_name":"File Finder.pyw","file_ext":"pyw","file_size_in_byte":3478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"340147050","text":"import webbrowser\n\nclass Movie():\n \"\"\" Contains meta-data relating to a movie\n\n Args:\n movie_title (str): Tile of movie\n movie_storyline (str): Brief one sentence description of movie or a tagline\n poster_image (str): Accessible http url of the movies poster\n trailer_youtube (str): Link to the trailer of the movie on youtube.\n\n \"\"\"\n\n def __init__(self, movie_title, movie_storyline, poster_image, trailer_youtube):\n self.title = movie_title\n self.movie_storyline = movie_storyline\n self.poster_image_url = poster_image\n self.trailer_youtube_url = trailer_youtube\n\t\n","sub_path":"media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"623379799","text":"from docxcompose.utils import xpath\nfrom lxml.etree import Element\nfrom lxml.etree import QName\n\n\nclass StructuredDocumentTags(object):\n \"\"\"Structured Document Tags (aka Content Controls)\"\"\"\n\n def __init__(self, doc):\n self.doc = doc\n\n def tags_by_alias(self, alias):\n \"\"\"Get Structured Document Tags by alias.\"\"\"\n return xpath(\n self.doc.element.body,\n './/w:sdt/w:sdtPr/w:alias[@w:val=\"%s\"]/ancestor::w:sdt' % alias)\n\n def set_text(self, alias, text):\n \"\"\"Set the text content of all Structured Document Tags identified by\n an alias. Only plain text SDTs are supported.\n\n If the SDT has the 'multiLine' property, newlines in `text` will be\n respected, and the SDTs content will be updated with lines separated\n by line breaks.\n \"\"\"\n text = text.strip()\n tags = self.tags_by_alias(alias)\n for tag in tags:\n # Ignore if it's not a plain text SDT\n plain_text = xpath(tag, './w:sdtPr/w:text')\n if not plain_text:\n continue\n\n nsmap = tag.nsmap\n is_multiline = bool(plain_text[0].xpath('./@w:multiLine', namespaces=nsmap))\n\n properties = xpath(tag, './w:sdtPr')\n content = xpath(tag, './w:sdtContent')\n if not content:\n continue\n\n run_elements = xpath(content[0], './/w:r')\n if not run_elements:\n continue\n\n # First, prepare the SDT for easy updating of its value.\n #\n # We do this by cleaning out the SDT content to only preserve\n # the first of possibly many runs, and remove the contents of\n # that run (except w:rPr formatting properties).\n #\n # That run can then be filled with new text nodes and line breaks\n # as needed. This should allow us to preserve formatting, but\n # otherwise start from a clean slate where we create new nodes\n # instead of having to carefully update an existing structure.\n\n first_run = run_elements[0]\n self._remove_placeholder(properties, content, first_run)\n self._remove_all_runs_except_first(run_elements)\n self._clean_first_run(first_run)\n\n # Now update contents by appending new text nodes.\n #\n # If the SDT has the multiLine property, we respect newlines\n # in the input value string and create text nodes delimited by\n # line breaks.\n if not is_multiline:\n text = text.replace('\\n', ' ')\n\n lines = text.splitlines()\n for i, line in enumerate(lines, start=1):\n txt_node = Element(QName(nsmap['w'], \"t\"))\n txt_node.text = line\n first_run.append(txt_node)\n\n if i != len(lines):\n br = Element(QName(nsmap['w'], \"br\"))\n first_run.append(br)\n\n def _remove_placeholder(self, properties, content, first_run):\n \"\"\"Remove placeholder marker and style.\n \"\"\"\n showing_placeholder = xpath(properties[0], './w:showingPlcHdr')\n if showing_placeholder:\n properties[0].remove(showing_placeholder[0])\n run_props = xpath(first_run, './w:rPr')\n if run_props:\n first_run.remove(run_props[0])\n\n def _remove_all_runs_except_first(self, run_elements):\n \"\"\"Remove all runs except the first one.\n \"\"\"\n for run in run_elements[1:]:\n run.getparent().remove(run)\n\n def _clean_first_run(self, first_run):\n \"\"\"Remove all elements from the first run except run formatting.\n \"\"\"\n for child in first_run.getchildren():\n # Preserve formatting\n if QName(child).localname == 'rPr':\n continue\n first_run.remove(child)\n\n def get_text(self, alias):\n \"\"\"Get the text content of the first Structured Document Tag identified\n by the given alias.\n \"\"\"\n tags = self.tags_by_alias(alias)\n for tag in tags:\n # Ignore if it's not a plain text SDT\n if not xpath(tag, './w:sdtPr/w:text'):\n continue\n\n tokens = []\n text_and_brs = xpath(tag, './w:sdtContent//w:r/*[self::w:t or self::w:br]')\n for el in text_and_brs:\n if QName(el).localname == 't':\n tokens.append(el.text)\n elif QName(el).localname == 'br':\n tokens.append('\\n')\n\n return ''.join(tokens)\n","sub_path":"docxcompose/sdt.py","file_name":"sdt.py","file_ext":"py","file_size_in_byte":4643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"6140379","text":"from django.db import models\nfrom django.db.models.signals import pre_save\nfrom django.db.models import Sum\nfrom django.dispatch import receiver\n\n\nclass Customer(models.Model):\n first_name = models.CharField(\n 'First Name', max_length=255,)\n last_name = models.CharField(\n 'Last Name', max_length=255, null=True, blank=True)\n camp_name = models.CharField('Camp Name', max_length=255,)\n date_created = models.DateField('Date Created', auto_now_add=True)\n\n class Meta:\n db_table = 'customer'\n unique_together = ('first_name', 'last_name', 'camp_name',)\n\n def __unicode__(self):\n fn = ''\n ln = ''\n if self.first_name and self.first_name != self.camp_name:\n fn = ' | %s' % self.first_name\n if self.last_name:\n ln = ' | %s' % self.last_name\n return '%s%s%s' % (self.camp_name, fn, ln,)\n\n def debt(self):\n cash_recvd = self.cash_advances.aggregate(\n total_out=Sum('amount'))['total_out'] or 0\n cocoa_given = self.cocoa_given.aggregate(\n cocoa_given=Sum('value'))['cocoa_given'] or 0\n return cash_recvd - cocoa_given\n\n\n@receiver(pre_save, sender=Customer, dispatch_uid='0982gejb2jcjlelmlmponei')\ndef customer_pre_save(sender, **kwargs):\n self = kwargs['instance']\n self.camp_name = self.camp_name.strip().upper()\n if self.pk is None or not self.pk:\n if not self.first_name:\n self.first_name = self.camp_name\n\n if self.first_name:\n self.first_name = self.first_name.strip().upper()\n\n if self.last_name:\n self.last_name = self.last_name.strip().upper()\n","sub_path":"customer/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"602492375","text":"r# termostato\n\nimport mysql.connector\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nconn = mysql.connector.connect(\n\thost='localhost',\n\tuser='root',\n\tpassword='',\n\tdatabase='db2017',\n)\nrs=conn.cursor()\nrs.execute(\"\"\"\n\tSELECT temperatura FROM temperaturas;\n\t\"\"\")\nr=rs.fetchall()\n\n#for i in r:\tprint(i)\n\n# Data for plotting\n#t = np.arange(0.0, 2.0, 0.01)\n#s = 1 + np.sin(2 * np.pi * t)\n\nfig, ax = plt.subplots()\nax.plot(r)\n\nax.set(xlabel='muestras', ylabel='Tª (ºC)',\n title='Registro de temperaturas')\nax.grid()\n\nfig.savefig(\"test.png\")\nplt.show()","sub_path":"CURSO-Python-2018/ej50.pyw","file_name":"ej50.pyw","file_ext":"pyw","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"47162520","text":"\n\nimport json\nfrom flask import Blueprint, request, abort, jsonify, g, current_app\nfrom API.blibb.blibb import Blibb\nfrom API.event.event import Event\nfrom API.contenttypes.picture import Picture\nfrom API.user.buser import User\nfrom API.utils import is_valid_id, get_user_name, get_key\nfrom bson.objectid import ObjectId\n\nfrom API.decorators import crossdomain, support_jsonp\n\n\nmod = Blueprint('blibb', __name__, url_prefix='/blibb')\n\n\n@mod.route('/favicon.ico')\n@mod.route('/robots.txt')\n@mod.route('/index.html')\n@mod.route('/scripts/')\ndef handle(any=None):\n abort(404)\n\n\n@mod.before_request\ndef before_request():\n g.e = Event(request.path)\n\n\n@mod.teardown_request\ndef teardown_request(exception):\n g.e.save()\n\n#\n# /blibb [POST, DELETE]\n#\n\n\n@mod.route('', methods=['POST'])\n@crossdomain(origin='*')\ndef newBlibb():\n name = request.form['bname']\n desc = request.form['bdesc']\n template = request.form['btemplate']\n key = request.form['bkey']\n user = get_user_name(key)\n image_id = request.form['bimage']\n slug = request.form['slug']\n write_access = request.form['write_access']\n read_access = request.form['read_access']\n\n # check if a blibb with that slug already exists\n blibb = Blibb.get_by_slug(user, slug)\n # return jsonify(blibb)\n\n if not blibb:\n res = {'error': 'None'}\n if is_valid_id(image_id):\n image = Picture.dump_image(image_id)\n else:\n image = 'blibb.png'\n\n new_id = Blibb.insert(user, name, slug, desc, template, image, read_access, write_access)\n res = {'id': new_id}\n else:\n res = {'error': 'Blibb with that slug already exists'}\n return jsonify(res)\n\n\n@mod.route('/view', methods=['PUT'])\n@crossdomain(origin='*')\ndef updateView():\n blibb_id = request.form['blibb_id']\n user = get_user_name(request.form['login_key'])\n view = request.form['viewName']\n html = request.form['viewHtml']\n # current_app.logger.info(user + ' ' + blibb_id + ' ' + view + ' ' + html)\n if is_valid_id(blibb_id):\n if Blibb.can_write(user, '', blibb_id):\n Blibb.update_view(blibb_id, user, view, html)\n return jsonify({'result': 'View Updated'})\n else:\n abort(401)\n abort(404)\n\n\n@mod.route('//', methods=['DELETE'])\n@crossdomain(origin='*')\ndef deleteBlibb(blibb_id=None, login_key=None):\n user = get_user_name(login_key)\n if is_valid_id(blibb_id):\n filter = {'_id': ObjectId(blibb_id), 'u': user}\n Blibb.remove(filter)\n return jsonify({'ret': 1})\n\n\n@mod.route('//p/', methods=['GET'])\n@crossdomain(origin='*')\ndef getBlibb(blibb_id=None, params=None):\n if blibb_id is None:\n abort(404)\n\n if params is None:\n o = Blibb.get_object(blibb_id)\n r = Blibb.flat_object(o)\n else:\n r = Blibb.get_by_id_params(blibb_id, params)\n\n if r != 'null':\n return jsonify(r)\n else:\n abort(404)\n\n@mod.route('/short/', methods=['GET'])\n@support_jsonp\ndef getBlibbShort(short_id=None):\n if short_id is None:\n abort(404)\n\n o = Blibb.get_object({'si': short_id})\n r = Blibb.flat_object(o)\n\n if r != 'null':\n return jsonify(r)\n else:\n abort(404)\n\n\n@mod.route('//template', methods=['GET'])\n@crossdomain(origin='*')\ndef getBlibbTemplate(blibb_id=None):\n b = Blibb()\n r = b.get_template(blibb_id)\n if r != 'null':\n return r\n else:\n abort(404)\n\n\n@mod.route('//view', methods=['GET'])\n@crossdomain(origin='*')\ndef getBlibbView(blibb_id=None, view_name='null'):\n if is_valid_id(blibb_id):\n r = Blibb.get_template_view(blibb_id)\n if r != 'null':\n return jsonify(r)\n else:\n abort(404)\n else:\n abort(400)\n\n\n@mod.route('/', methods=['GET'])\n@crossdomain(origin='*')\n@support_jsonp\ndef getBlibbByUser(username=None):\n b = Blibb()\n if username is None:\n abort(404)\n res = b.get_by_user(username)\n return jsonify(res)\n\n\n@mod.route('//group', methods=['GET'])\n@crossdomain(origin='*')\ndef getGroupBlibbByUser(username=None):\n b = Blibb()\n if username is None:\n abort(404)\n res = b.getByGroupUser(username)\n return res\n\n\n@mod.route('/fork', methods=['POST'])\n@crossdomain(origin='*')\ndef fork():\n key = request.form['login_key']\n user = get_user_name(key)\n target_id = request.form['b']\n Blibb.fork(target_id, user)\n return json.dumps('ok')\n\n\n#####################\n####### TAGS #######\n#####################\n\n@mod.route('/tag', methods=['POST'])\n@crossdomain(origin='*')\ndef newTag():\n target_id = None\n target = None\n key = request.form['k']\n user = get_user_name(key)\n target_id = request.form['b']\n if Blibb.can_write(target_id, user):\n tag = request.form['t']\n target.addTag(target_id, tag)\n\n return json.dumps('ok')\n\n\n@mod.route('/action/image', methods=['POST'])\n@crossdomain(origin='*')\ndef updateImage():\n object_id = request.form['object_id']\n image_id = request.form['image_id']\n if object_id is None:\n abort(404)\n if is_valid_id(image_id) and is_valid_id(object_id):\n Blibb.add_picture({'_id': ObjectId(object_id)}, image_id)\n return 'ok'\n\n\n@mod.route('/actions/webhook', methods=['POST'])\n@crossdomain(origin='*')\ndef add_webhook():\n key = request.form['login_key']\n bid = request.form['blibb_id']\n callback = request.form['callback']\n fields = request.form['fields']\n action = request.form['action']\n user = get_key(key)\n res = dict()\n wb = {'a': action, 'u': callback, 'f': fields}\n if is_valid_id(bid):\n if Blibb.can_write(user, '', bid):\n Blibb.add_webhook(bid, wb)\n res['result'] = 'ok'\n else:\n abort(401)\n else:\n res['error'] = 'Object Id is not valid'\n return jsonify(res)\n\n\n@mod.route('/actions/group', methods=['POST'])\n@crossdomain(origin='*')\ndef add_user_to_group():\n key = request.form['login_key']\n bid = request.form['blibb_id']\n username = request.form['username']\n user = get_key(key)\n res = dict()\n if is_valid_id(bid):\n user_to_add = User.get_by_name(username)\n if user_to_add:\n if Blibb.can_write(user, '', bid):\n Blibb.add_user_to_group(username, bid)\n res['result'] = 'ok'\n else:\n res['error'] = 'Not permissions'\n else:\n res['error'] = 'User not found'\n else:\n res['error'] = 'Object Id is not valid'\n return jsonify(res)\n\n\n@mod.route('/meta/webhooks/', methods=['GET'])\n@crossdomain(origin='*')\ndef getWebhooks(bid=None):\n if is_valid_id(bid):\n b = Blibb()\n fields = b.get_webhooks(bid)\n return jsonify({'webhooks': fields})\n else:\n return jsonify({'error': 'Object id not valid'})\n\n\n@mod.route('/meta/fields/', methods=['GET'])\n@crossdomain(origin='*')\ndef getBlibbFields(bid=None):\n if bid is not None:\n fields = Blibb.get_fields(bid)\n return jsonify({'fields': fields})\n\n\n@mod.route('/object/', methods=['GET'])\n@crossdomain(origin='*')\ndef getObject(bid=None):\n if bid is not None:\n params = request.args.get('fields')\n fields = dict()\n for p in params.split(','):\n fields[p] = 1\n current_app.logger.info(fields)\n doc = Blibb.get_object({'_id': ObjectId(bid)}, fields)\n blibb = Blibb.to_dict(doc)\n #\n return jsonify(Blibb.flat_object(blibb))\n abort(404)\n","sub_path":"API/blibb/weblibb.py","file_name":"weblibb.py","file_ext":"py","file_size_in_byte":7607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"68165374","text":"from xgboost import XGBClassifier, XGBRegressor\nfrom sklearn.datasets import load_boston, load_breast_cancer\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\nfrom sklearn.metrics import r2_score, accuracy_score\n# 회귀\n\n# 1. 데이터\n# x, y = load_boston(return_X_y=True) # 아래와 동일한 방식\ndatasets = load_boston()\nx = datasets.data\ny = datasets['target']\n\nx_train, x_test, y_train, y_test = train_test_split(\n x, y, test_size=0.2, shuffle=True, random_state=77\n)\n\n# 2. 모델\nmodel = XGBRegressor(n_estimators=2000, learning_rate=0.01, n_jobs=8)\n\n# 3. 훈련\nmodel.fit(x_train, y_train, verbose=1, eval_metric=['rmse','logloss'],\n eval_set=[(x_train, y_train),(x_test, y_test)],\n early_stopping_rounds=20) # metrics 2개 이상 넣었을 경우 마지막 측정 항목이 적용된다.\n \naaa = model.score(x_test, y_test)\nprint('aaa: ',aaa)\n# [99] validation_0-rmse:9.55219 validation_0-logloss:-803.53387 validation_1-rmse:8.48393 validation_1-logloss:-752.75574\n# aaa: -0.03814756625777593\n#validation 0 = train, validation 1=test\n\ny_pred = model.predict(x_test)\nr2 = r2_score(y_test, y_pred) # 스코어 잡을 때 기존데이터가 앞에 들어가야한다. \n# r2 = r2_score(y_pred, y_test) # 값 다르게 나온다. \nprint('r2 : ' ,r2)\n# aaa: 0.9078482289043561\n# r2 : 0.9078482289043561\n\nresults = model.evals_result()\nprint(results)\n# 'validation_0': OrderedDict([('rmse', [23.969549, 23.741985, 23.516665,\n# 숫자가 줄어드는게 보인다. 이 숫자의 개수는 n_estimators와 동일\n\nimport matplotlib.pyplot as plt\n\nepochs = len(results['validation_0']['logloss']) \nx_axis = range(0,epochs)\n\nfig, ax = plt.subplots() # 두개를 그리겠다는 것\nax.plot(x_axis, results['validation_0']['logloss'], label = 'Train')\nax.plot(x_axis, results['validation_1']['logloss'], label = 'Test')\nax.legend()\nplt.ylabel('Log Loss')\nplt.title('XGBoost Log Loss')\n#plt.show()\n\nfig, ax = plt.subplots() # 두개를 그리겠다는 것\nax.plot(x_axis, results['validation_0']['rmse'], label = 'Train')\nax.plot(x_axis, results['validation_1']['rmse'], label = 'Test')\nax.legend()\nplt.ylabel('Rmse')\nplt.title('XGBoost RMSE')\nplt.show()\n","sub_path":"ml/m38_eval_graph.py","file_name":"m38_eval_graph.py","file_ext":"py","file_size_in_byte":2220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"296491730","text":"from django.core.management.base import BaseCommand\nfrom django.db.utils import OperationalError\n\nfrom customers.models import Customer\nfrom geolocation.models import Location\n\nimport csv\nimport sys\n\n\nclass Command(BaseCommand):\n \"\"\"\n Command that populates the Customers table\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__()\n self.customers = self._get_customer_from_file()\n self.cities = [c['city'] for c in self.customers]\n\n def handle(self, *args, **options):\n sys.stdout.write(\"Populating db...\\n\")\n\n try:\n for customer in self.customers:\n Customer.objects.get_or_create(\n id=customer['id'],\n email=customer['email'],\n first_name=customer['first_name'],\n last_name=customer['last_name'],\n gender=customer['gender'],\n company=customer['company'],\n title=customer['title']\n )\n\n i = 1\n for city in self.cities:\n customer = Customer.objects.get(id=i)\n Location.objects.get_or_create(\n customer=customer,\n city=city,\n latitude=0,\n longitude=0\n )\n\n i += 1\n\n except OperationalError as error:\n raise error\n\n sys.stdout.write(\"Db populated\\n\")\n\n def _get_customer_from_file(self):\n with open('./customers.csv') as file:\n reader = csv.DictReader(file)\n return [{\n 'id': row['id'],\n 'email': row['email'],\n 'first_name': row['first_name'],\n 'last_name': row['last_name'],\n 'gender': row['gender'],\n 'company': row['company'],\n 'title': row['title'],\n 'city': row['city']\n }\n for row in reader]\n","sub_path":"api/core/management/commands/populate_db.py","file_name":"populate_db.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"263114141","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport tarfile\nimport sys\nimport os\nimport argparse\n\nfrom minelibs import *\n\n### tar.gz\ndef make_tarDir(output_tar_name, source_dir_list):\n with tarfile.open(output_tar_name, \"w:gz\") as tar:\n for item in source_dir_list:\n if item.endswith('/'):\n item = item[:-1]\n print(item)\n tar.add(item, arcname=os.path.basename(item))\n\ndef _argparse(para_list=None):\n parser = argparse.ArgumentParser(description='- Tar file or directory -')\n parser.add_argument('source', metavar='source_name', type=str, nargs='*',\n help='source list')\n\n parser.add_argument('-f', '--tarname', action='store', dest='tar_name',\n help='the Tar filename')\n return parser.parse_args(para_list)\n\nif __name__ == \"__main__\":\n parser = _argparse() if len(sys.argv) > 1 else _argparse(['-h'])\n\n if parser.tar_name:\n if not parser.tar_name.endswith('.tar.gz'):\n tar_name = parser.tar_name + '.tar.gz'\n else:\n tar_name = parser.tar_name\n else:\n if parser.source[0].endswith('/'):\n tar_name = os.path.basename((parser.source[0])[0:-1]) + '.tar.gz'\n else:\n tar_name = os.path.basename(parser.source[0]) + '.tar.gz'\n\n print(tar_name)\n print(get_current_time(True))\n for item in parser.source:\n if not os.path.exists(item):\n print('%s is not exist !' % item)\n raise SystemExit('Exiting ...')\n\n make_tarDir(tar_name, parser.source)\n\n","sub_path":"x.bin/tarfile_addDir.py","file_name":"tarfile_addDir.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"57416242","text":"teams = {\n0:'zombie',\n1:'red',\n2:'blue',\n3:'spec'\n}\n\ngametypes = {\n0: 'ffa', #free for all\n1: None,\n2: None,\n3: 'tdm', #Team death match\n4: 'ts', #Team survivor\n5: 'ftl', #Follow the leader\n6: 'cah', #Capture and hold\n7: 'ctf', #Capture the flag\n8: 'bm' #Bomb\n}\n\n\n#@TODO These should all be URTNAME = {id:#, event:OUR_NAME}\nDEATH_WATER = {id:'1'}\nDEATH_LAVA ={id:'3'}\nDEATH_TELEFRAG ={id:'5'}\nDEATH_FALLING={id:'6'}\nDEATH_SUICIDE={id:'7'}\nDEATH_TRIGGER_HURT={id:'9'}\nDEATH_CHANGE_TEAM={id:'10'}\nWEAPON_KNIFE={id:'12'}\nWEAPON_KNIFE_THROWN={id:'13'}\nWEAPON_BERETTA={id:'14'}\nWEAPON_DEAGLE={id:'15'}\nWEAPON_SPAS={id:'16'}\nWEAPON_UMP45={id:'17'}\nWEAPON_MP5K={id:'18'}\nWEAPON_LR300={id:'19'}\nWEAPON_G36={id:'20'}\nWEAPON_PSG1={id:'21'}\nWEAPON_HK69={id:'22'}\nWEAPON_BLED={id:'23'}\nDEATH_KICKED={id:'24'}\nWEAPON_HEGRENADE={id:'25'}\nWEAPON_FLASH={id:None} #@DEV One of these is 26, the other 27. Time to investigate...\nWEAPON_SMOKE={id:None}\nWEAPON_SR8={id:'28'}\nWEAPON_AK103={id:'30'}\nWEAPON_SPLODED={id:'31'}\nWEAPON_SLAPPED={id:'32'}\nWEAPON_BOMBED={id:'33'}\nWEAPON_NUKED={id:'34'}\nWEAPON_NEGEV={id:'35'}\nWEAPON_HK69_HIT={id:'37'}\nWEAPON_M4={id:'38'}\nWEAPON_FLAG={id:'39'}\nWEAPON_GOOMBA={id:'40'}\n\ndamage = {\n\t'PASSIVE': [0, 0, 0, 0, 0, 0, 0, 0],\n DEATH_TELEFRAG: [0, 0, 0, 0, 0, 0, 0, 0],\n WEAPON_KNIFE: [100, 60, 44, 35, 20, 20, 44, 100],\n WEAPON_KNIFE_THROWN: [100, 60, 44, 35, 20, 20, 44, 100],\n WEAPON_BERETTA: [100, 34, 30, 20, 11, 11, 30, 100],\n WEAPON_DEAGLE: [100, 66, 57, 38, 22, 22, 57, 100],\n WEAPON_SPAS: [25, 25, 25, 25, 25, 25, 25, 100],\n WEAPON_UMP45: [100, 51, 44, 29, 17, 17, 44, 100],\n WEAPON_MP5K: [50, 34, 30, 20, 11, 11, 30, 100],\n WEAPON_LR300: [100, 51, 44, 29, 17, 17, 44, 100],\n WEAPON_G36: [100, 51, 44, 29, 17, 17, 44, 100],\n WEAPON_PSG1: [100, 63, 97, 63, 36, 36, 97, 100],\n WEAPON_HK69: [50, 50, 50, 50, 50, 50, 50, 100],\n WEAPON_BLED: [15, 15, 15, 15, 15, 15, 15, 15],\n DEATH_KICKED: [20, 20, 20, 20, 20, 20, 20, 100],\n WEAPON_HEGRENADE: [50, 50, 50, 50, 50, 50, 50, 100],\n WEAPON_SR8: [100, 100, 100, 100, 50, 50, 100, 100],\n WEAPON_AK103: [100, 58, 51, 34, 19, 19, 51, 100],\n WEAPON_NEGEV: [50, 34, 30, 20, 11, 11, 30, 100],\n WEAPON_HK69_HIT: [20, 20, 20, 20, 20, 20, 20, 100],\n WEAPON_M4: [100, 51, 44, 29, 17, 17, 44, 100],\n WEAPON_GOOMBA: [100, 100, 100, 100, 100, 100, 100, 100],\n }\n\n\n\ngearInfo = {\n\t#Sidearms\n\t'F':{'id':WEAPON_BERETTA, 'name':'beretta', 'damage':damage[WEAPON_BERETTA]},\n\t'G':{'id':WEAPON_DEAGLE,'name':'desert eagle', 'damage':damage[WEAPON_DEAGLE]},\n\n\t#Primary\n\t'K':{'id':WEAPON_HK69, 'name':'hk69', 'damage': damage[WEAPON_HK69]},\n\t'L':{'id':WEAPON_LR300, 'name':'lr300', 'damage': damage[WEAPON_LR300]},\n\t'M':{'id':WEAPON_G36, 'name':'g36', 'damage': damage[WEAPON_G36]},\n\t'N':{'id':WEAPON_PSG1, 'name':'psg1','damage': damage[WEAPON_PSG1]},\n\t'Z':{'id':WEAPON_SR8,'name':'sr8','damage': damage[WEAPON_SR8]},\n\t'a':{'id':WEAPON_AK103,'name':'ak103','damage': damage[WEAPON_AK103]},\n\t'c':{'id':WEAPON_NEGEV,'name':'negav','damage': damage[WEAPON_NEGEV]},\n\t'e':{'id':WEAPON_M4,'name':'m4','damage': damage[WEAPON_M4]},\n\n\t#Primary and Secondary\n\t'H':{'id':WEAPON_SPAS,'name':'spas', 'damage': damage[WEAPON_SPAS]},\n\t'I':{'id':WEAPON_MP5K,'name':'mp5k', 'damage': damage[WEAPON_MP5K]},\n\t'J':{'id':WEAPON_UMP45,'name':'ump45', 'damage': damage[WEAPON_UMP45]},\n\n\t#Grenades\n\t'O':{'id':WEAPON_HEGRENADE, 'name':'he grenade', 'damage': damage[WEAPON_HEGRENADE]},\n\t'Q':None, #{'id':None,'name':'smoke grenade','damage': damage['PASSIVE']}, #Smoke nade\n\n\t#Items\n\t'R':None, #Kevlar vest\n\t'S':None, #TAC Goggles\n\t'T':None, #Medkit\n\t'Y':None, #Silencer\n\t'V':None, #laser sight\n\t'W':None, #kevlar helmet\n\t'X':None, #extra ammo\n\n\t'A':None #None :D\n}\n\n\n\"\"\"\nMuch content in this file has been found in the B3 source. \nWe thank the entire BigBrotherBot team for there contributions \nto the Urban Terror community, and credit much of the data/content \nin this file to them.\n\"\"\"\n\n ","sub_path":"const.py","file_name":"const.py","file_ext":"py","file_size_in_byte":3983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"203332600","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 11 21:16:17 2019\n\n@author: kristin.lomicka\n\"\"\"\n\nimport dataset\nimport pandas as pd\nimport numpy as np\n\n#connect to postgres\nds = dataset.connect(\"postgresql://postgres@localhost/hw1_data_warehouse\")\n#export sql tables\ncustomers_a = pd.DataFrame(ds['customers_a'].all())\nemployees_new = pd.DataFrame(ds['employees_new'].all())\noffices_a = pd.DataFrame(ds['offices_a'].all())\norder_metadata_a = pd.DataFrame(ds['order_metadata_a'].all())\norders_new_a = pd.DataFrame(ds['orders_new_a'].all())\nproducts_a = pd.DataFrame(ds['products_a'].all())\n\n#create date dimension table\ndate_d = order_metadata_a[['order_date']].drop_duplicates().reset_index(drop=True)\ndate_d['order_date_time'] = pd.to_datetime(date_d['order_date'])\ndate_d['order_date'] = pd.to_datetime(date_d['order_date']).dt.date\ndate_d['day_of_week'] = date_d['order_date_time'].dt.weekday_name\ndate_d['month'] = date_d['order_date_time'].dt.month\ndate_d['year'] = date_d['order_date_time'].dt.year\ndate_d['quarter']= date_d['order_date_time'].dt.quarter\n#insert unique ID\ndate_d.insert(0, 'date_id', range(1, 1 + len(date_d)))\n\n\n#create remaining dimension tables\nemployees_d = employees_new[['employee_number', 'last_name', 'first_name', 'reports_to', 'job_title', 'office_code']]\noffices_d = offices_a[['office_code', 'city', 'state', 'country', 'office_location']]\nproducts_d = products_a[['product_line', 'product_code', 'product_name', 'product_scale', 'product_vendor', 'product_description', 'quantity_in_stock', 'buy_price', '_m_s_r_p', 'html_description']]\ncustomers_d = customers_a[['customer_number', 'customer_name', 'contact_last_name', 'contact_first_name', 'city', 'state', 'country']]\n\n#check employees_d for duplicate values\ndupes_employees_d = employees_new.pivot_table(index=['employee_number'], aggfunc='size')\nprint(dupes_employees_d)\n\n#create measure table\norders_measure = orders_new_a[['order_number', 'order_line_number', 'customer_number', 'product_code', 'quantity_ordered', 'price_each']]\norders_measure = pd.merge(orders_measure, order_metadata_a[['order_number', 'order_date', 'sales_rep_employee_number']], on='order_number', how='left')\norders_measure.rename(columns={'sales_rep_employee_number':'employee_number'}, inplace=True)\norders_measure = pd.merge(orders_measure, employees_new[['office_code', 'employee_number']], on='employee_number', how='left')\ndate_d['order_date'] = pd.to_datetime(date_d['order_date']).dt.date\n\n#calculate total cost\norders_measure = pd.merge(orders_measure, products_d[['product_code', 'buy_price']], on='product_code', how='left')\norders_measure['total_cost'] = orders_measure['quantity_ordered'] * orders_measure['buy_price']\norders_measure = orders_measure.drop(columns= 'buy_price')\n\n#calculate total revenue\norders_measure['total_revenue'] = orders_measure['quantity_ordered'] * orders_measure['price_each']\n\n\n#calculate total profit\norders_measure['total_profit'] = orders_measure['total_revenue'] - orders_measure['total_cost']\n\n#calculate profit_margin\norders_measure['profit_margin'] = orders_measure['total_profit'] / orders_measure['total_cost'] * 100\n\n#Question: Is there a way to perform the calculation directly from the other dataframe?\n\n#import new dataframes into postgresql\n#import employees_d\nds = dataset.connect(\"postgresql://postgres@localhost/hw2_analytics_db\")\ncs = ds['employees_d'] #python code name#\ncs.insert_many(employees_d.to_dict('records')) #sql code name#\n# # # import offices_d\nds = dataset.connect(\"postgresql://postgres@localhost/hw2_analytics_db\")\ncs = ds['offices_d'] #python code name#\ncs.insert_many(offices_d.to_dict('records')) #sql code name#\n# # #import products_d\nds = dataset.connect(\"postgresql://postgres@localhost/hw2_analytics_db\")\ncs = ds['products_d'] #python code name#\ncs.insert_many(products_d.to_dict('records')) #sql code name#\n# # #import customers_d\nds = dataset.connect(\"postgresql://postgres@localhost/hw2_analytics_db\")\ncs = ds['customers_d'] #python code name#\ncs.insert_many(customers_d.to_dict('records')) #sql code name#\n# # #import date_d\nds = dataset.connect(\"postgresql://postgres@localhost/hw2_analytics_db\")\ncs = ds['date_d'] #python code name#\ncs.insert_many(date_d.to_dict('records')) #sql code name#\n# # #import orders_measure\nds = dataset.connect(\"postgresql://postgres@localhost/hw2_analytics_db\")\ncs = ds['orders_measure'] #python code name#\ncs.insert_many(orders_measure.to_dict('records')) #sql code name#","sub_path":"homework/data_warehousing_hw_2_KL.py","file_name":"data_warehousing_hw_2_KL.py","file_ext":"py","file_size_in_byte":4477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"565161935","text":"import cgi\n\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp.util import run_wsgi_app\nfrom google.appengine.api import urlfetch\nfrom django.utils import simplejson as json\n\nAPP_ID = '365508406808632'\nAPP_SECRET = 'ac8a79f5b39168d19629644cbf10780a'\nCANVAS_PAGE = 'http://herrpfeffer.appspot.com/'\n\nsigned_request = ''\n\nclass MainPage(webapp.RequestHandler):\n def get(self):\n self.response.headers['Content-Type'] = 'text/plain'\n code = self.request.get('code')\n if code:\n url = ('https://graph.facebook.com/oauth/access_token?'\n + 'client_id=' + APP_ID\n + '&redirect_uri=' + CANVAS_PAGE\n + '&client_secret=' + APP_SECRET\n + '&code=' + code)\n result = urlfetch.fetch(url)\n if result.status_code == 200:\n data = cgi.parse_qs(result.content)\n access_token = ''.join(data['access_token'])\n url = ('https://graph.facebook.com/me?'\n + 'access_token=' + access_token)\n result = urlfetch.fetch(url)\n user = json.loads(result.content)\n self.response.out.write('Hello ' + user['first_name'] + ' '\n + user['last_name'])\n else:\n self.response.out.write(\"Authentication error\")\n else:\n self.redirect('https://www.facebook.com/dialog/oauth?'\n + 'client_id=' + APP_ID\n + '&redirect_uri=' + CANVAS_PAGE)\n\napplication = webapp.WSGIApplication(\n [('/', MainPage)],\n debug=True)\n\ndef main():\n run_wsgi_app(application)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"qndfacebook/qndfacebook.py","file_name":"qndfacebook.py","file_ext":"py","file_size_in_byte":1811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"103702265","text":"# Import navigation code\nfrom ..navigation.env_params import ObjectType\nfrom ..navigation.navigation import Navigator\nfrom ..navigation.geometry import *\nfrom ..interop import DetectedObject\nfrom ..navigation import env_params\n\nfrom roverbot_lib import *\n\n# Some options for debugging\nprint_timing = False\nvisualize_nav = True\nmove_speed = 0.03\nrotate_speed = 1\n\nfirst_update = True\n\nif visualize_nav:\n from nav_viz import NavViz\n\nclass RoverPose:\n def __init__(self, pos, angle):\n self.__position = pos\n self.__angle = angle\n self.__last_position = pos\n\n def set_position(self, position):\n self.__last_position = self.__position\n self.__position = position\n\n def set_angle(self, angle):\n self.__angle = angle\n\n def apply_velocity(self, velocity, dt):\n self.__position = self.__position + velocity * dt\n\n def apply_angular_velocity(self, velocity, dt):\n self.__angle = self.__angle + velocity * dt\n\n def get_position(self):\n return self.__position\n\n def get_angle(self):\n return self.__angle\n\n def delta_position(self):\n return self.__position - self.__last_position\n\nsceneParameters = SceneParameters()\n\nrobotParameters = RobotParameters()\nrobotParameters.driveType = 'differential'\nrobotParameters.collectorQuality\n\ndef to_detected_objects(object_type, object_list):\n if object_list == None:\n return []\n\n detected_objects = []\n if len(object_list) > 0:\n if isinstance(object_list[0], list):\n for o in object_list:\n detected_objects.append(DetectedObject(object_type, o[0] * env_params.meter_scale, o[1], 0))\n else:\n detected_objects.append(DetectedObject(object_type, object_list[0] * env_params.meter_scale, object_list[1], 1))\n\n return detected_objects\n\nif __name__ == '__main__':\n roverBotSim = VREP_RoverRobot('127.0.0.1', robotParameters, sceneParameters)\n roverBotSim.StartSimulator()\n\n if visualize_nav:\n nav_viz = NavViz()\n\n nav = Navigator(roverBotSim)\n rover_pose = RoverPose(Vector(0, 0), 0)\n\n roverBotSim.UpdateObjectPositions()\n roverBotSim.SetTargetVelocities(0, 0)\n roverBotSim.UpdateObjectPositions()\n\n while True:\n sim_update_start = time.time()\n sim_rover_pos, _, _, _ = roverBotSim.UpdateObjectPositions()\n\n if sim_rover_pos == None:\n continue\n\n # if first_update:\n # input() # Wait for some input\n # first_update = False\n\n rover_pose.set_position(Vector(sim_rover_pos[0] * env_params.meter_scale, sim_rover_pos[1] * env_params.meter_scale))\n rover_pose.set_angle(sim_rover_pos[5])\n\n sample, lander, obstacle, rock = roverBotSim.GetDetectedObjects()\n visible_objects = []\n visible_objects = visible_objects + to_detected_objects(ObjectType.ROCK, rock)\n visible_objects = visible_objects + to_detected_objects(ObjectType.SAMPLE, sample)\n visible_objects = visible_objects + to_detected_objects(ObjectType.OBSTACLE, obstacle)\n visible_objects = visible_objects + to_detected_objects(ObjectType.LANDER, lander)\n\n nav_start_time = time.time()\n nav.update(rover_pose.delta_position(), rover_pose.get_angle(), visible_objects)\n nav_update_time = time.time() - nav_start_time\n\n speed, ori_cor = nav.get_control_parameters()\n # print('Speed: {}, Ori: {}'.format(speed, ori_cor))\n roverBotSim.SetTargetVelocities(speed * move_speed, ori_cor * rotate_speed)\n\n if visualize_nav:\n nav_viz.update()\n nav_viz.draw(nav.environment(), nav.current_path(), speed, ori_cor, nav.get_routine_type())\n\n if print_timing:\n print('Nav Update Time: {}'.format(nav_update_time))\n print('Sim update time: {}'.format(time.time() - sim_update_start - nav_update_time))\n","sub_path":"subsystems/vrep/simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":3771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"351604998","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"Test the classic PyGreSQL interface.\n\nSub-tests for the low-level connection object.\n\nContributed by Christoph Zwerschke.\n\nThese tests need a database to test against.\n\"\"\"\n\nimport unittest\nimport threading\nimport time\nimport os\n\nfrom collections import namedtuple\n\ntry:\n # noinspection PyCompatibility\n from collections.abc import Iterable\nexcept ImportError: # Python < 3.3\n from collections import Iterable\n\nfrom decimal import Decimal\n\nimport pg # the module under test\n\n# We need a database to test against. If LOCAL_PyGreSQL.py exists we will\n# get our information from that. Otherwise we use the defaults.\n# These tests should be run with various PostgreSQL versions and databases\n# created with different encodings and locales. Particularly, make sure the\n# tests are running against databases created with both SQL_ASCII and UTF8.\ndbname = 'unittest'\ndbhost = None\ndbport = 5432\n\ntry:\n from .LOCAL_PyGreSQL import * # noqa: F401\nexcept (ImportError, ValueError):\n try:\n from LOCAL_PyGreSQL import * # noqa: F401\n except ImportError:\n pass\n\ntry: # noinspection PyUnboundLocalVariable,PyUnresolvedReferences\n long\nexcept NameError: # Python >= 3.0\n long = int\n\ntry: # noinspection PyUnboundLocalVariable,PyUnresolvedReferences\n unicode\nexcept NameError: # Python >= 3.0\n unicode = str\n\nunicode_strings = str is not bytes\n\nwindows = os.name == 'nt'\n\n# There is a known a bug in libpq under Windows which can cause\n# the interface to crash when calling PQhost():\ndo_not_ask_for_host = windows\ndo_not_ask_for_host_reason = 'libpq issue on Windows'\n\n\ndef connect():\n \"\"\"Create a basic pg connection to the test database.\"\"\"\n # noinspection PyArgumentList\n connection = pg.connect(dbname, dbhost, dbport)\n connection.query(\"set client_min_messages=warning\")\n return connection\n\n\ndef connect_nowait():\n \"\"\"Start a basic pg connection in a non-blocking manner.\"\"\"\n # noinspection PyArgumentList\n return pg.connect(dbname, dbhost, dbport, nowait=True)\n\n\nclass TestCanConnect(unittest.TestCase):\n \"\"\"Test whether a basic connection to PostgreSQL is possible.\"\"\"\n\n def testCanConnect(self):\n try:\n connection = connect()\n rc = connection.poll()\n except pg.Error as error:\n self.fail('Cannot connect to database %s:\\n%s' % (dbname, error))\n self.assertEqual(rc, pg.POLLING_OK)\n self.assertIs(connection.is_non_blocking(), False)\n connection.set_non_blocking(True)\n self.assertIs(connection.is_non_blocking(), True)\n connection.set_non_blocking(False)\n self.assertIs(connection.is_non_blocking(), False)\n try:\n connection.close()\n except pg.Error:\n self.fail('Cannot close the database connection')\n\n def testCanConnectNoWait(self):\n try:\n connection = connect_nowait()\n rc = connection.poll()\n self.assertEqual(rc, pg.POLLING_READING)\n while rc not in (pg.POLLING_OK, pg.POLLING_FAILED):\n rc = connection.poll()\n except pg.Error as error:\n self.fail('Cannot connect to database %s:\\n%s' % (dbname, error))\n self.assertEqual(rc, pg.POLLING_OK)\n self.assertIs(connection.is_non_blocking(), False)\n connection.set_non_blocking(True)\n self.assertIs(connection.is_non_blocking(), True)\n connection.set_non_blocking(False)\n self.assertIs(connection.is_non_blocking(), False)\n try:\n connection.close()\n except pg.Error:\n self.fail('Cannot close the database connection')\n\n\nclass TestConnectObject(unittest.TestCase):\n \"\"\"Test existence of basic pg connection methods.\"\"\"\n\n def setUp(self):\n self.connection = connect()\n\n def tearDown(self):\n try:\n self.connection.close()\n except pg.InternalError:\n pass\n\n def is_method(self, attribute):\n \"\"\"Check if given attribute on the connection is a method.\"\"\"\n if do_not_ask_for_host and attribute == 'host':\n return False\n return callable(getattr(self.connection, attribute))\n\n def testClassName(self):\n self.assertEqual(self.connection.__class__.__name__, 'Connection')\n\n def testModuleName(self):\n self.assertEqual(self.connection.__class__.__module__, 'pg')\n\n def testStr(self):\n r = str(self.connection)\n self.assertTrue(r.startswith(' 5:\n break\n r = self.connection.cancel() # cancel the running query\n thread.join() # wait for the thread to end\n t2 = time.time()\n\n self.assertIsInstance(r, int)\n self.assertEqual(r, 1) # return code should be 1\n self.assertLessEqual(t2 - t1, 3) # time should be under 3 seconds\n self.assertTrue(errors)\n\n def testMethodFileNo(self):\n r = self.connection.fileno()\n self.assertIsInstance(r, int)\n self.assertGreaterEqual(r, 0)\n\n def testMethodTransaction(self):\n transaction = self.connection.transaction\n self.assertRaises(TypeError, transaction, None)\n self.assertEqual(transaction(), pg.TRANS_IDLE)\n self.connection.query('begin')\n self.assertEqual(transaction(), pg.TRANS_INTRANS)\n self.connection.query('rollback')\n self.assertEqual(transaction(), pg.TRANS_IDLE)\n\n def testMethodParameter(self):\n parameter = self.connection.parameter\n query = self.connection.query\n self.assertRaises(TypeError, parameter)\n r = parameter('this server setting does not exist')\n self.assertIsNone(r)\n s = query('show server_version').getresult()[0][0]\n self.assertIsNotNone(s)\n r = parameter('server_version')\n self.assertEqual(r, s)\n s = query('show server_encoding').getresult()[0][0]\n self.assertIsNotNone(s)\n r = parameter('server_encoding')\n self.assertEqual(r, s)\n s = query('show client_encoding').getresult()[0][0]\n self.assertIsNotNone(s)\n r = parameter('client_encoding')\n self.assertEqual(r, s)\n s = query('show server_encoding').getresult()[0][0]\n self.assertIsNotNone(s)\n r = parameter('server_encoding')\n self.assertEqual(r, s)\n\n\nclass TestSimpleQueries(unittest.TestCase):\n \"\"\"Test simple queries via a basic pg connection.\"\"\"\n\n def setUp(self):\n self.c = connect()\n\n def tearDown(self):\n self.doCleanups()\n self.c.close()\n\n def testClassName(self):\n r = self.c.query(\"select 1\")\n self.assertEqual(r.__class__.__name__, 'Query')\n\n def testModuleName(self):\n r = self.c.query(\"select 1\")\n self.assertEqual(r.__class__.__module__, 'pg')\n\n def testStr(self):\n q = (\"select 1 as a, 'hello' as h, 'w' as world\"\n \" union select 2, 'xyz', 'uvw'\")\n r = self.c.query(q)\n self.assertEqual(\n str(r),\n 'a| h |world\\n'\n '-+-----+-----\\n'\n '1|hello|w \\n'\n '2|xyz |uvw \\n'\n '(2 rows)')\n\n def testRepr(self):\n r = repr(self.c.query(\"select 1\"))\n self.assertTrue(r.startswith(' 0:\n field_name = '\"%s\"' % field_name\n r = f(field_name)\n self.assertIsInstance(r, tuple)\n self.assertEqual(len(r), 4)\n self.assertEqual(r, info)\n r = f(field_num)\n self.assertIsInstance(r, tuple)\n self.assertEqual(len(r), 4)\n self.assertEqual(r, info)\n self.assertRaises(IndexError, f, 'foobaz')\n self.assertRaises(IndexError, f, '\"Foobar\"')\n self.assertRaises(IndexError, f, -1)\n self.assertRaises(IndexError, f, 4)\n\n def testNtuples(self): # deprecated\n q = \"select 1 where false\"\n r = self.c.query(q).ntuples()\n self.assertIsInstance(r, int)\n self.assertEqual(r, 0)\n q = (\"select 1 as a, 2 as b, 3 as c, 4 as d\"\n \" union select 5 as a, 6 as b, 7 as c, 8 as d\")\n r = self.c.query(q).ntuples()\n self.assertIsInstance(r, int)\n self.assertEqual(r, 2)\n q = (\"select 1 union select 2 union select 3\"\n \" union select 4 union select 5 union select 6\")\n r = self.c.query(q).ntuples()\n self.assertIsInstance(r, int)\n self.assertEqual(r, 6)\n\n def testLen(self):\n q = \"select 1 where false\"\n self.assertEqual(len(self.c.query(q)), 0)\n q = (\"select 1 as a, 2 as b, 3 as c, 4 as d\"\n \" union select 5 as a, 6 as b, 7 as c, 8 as d\")\n self.assertEqual(len(self.c.query(q)), 2)\n q = (\"select 1 union select 2 union select 3\"\n \" union select 4 union select 5 union select 6\")\n self.assertEqual(len(self.c.query(q)), 6)\n\n def testQuery(self):\n query = self.c.query\n query(\"drop table if exists test_table\")\n self.addCleanup(query, \"drop table test_table\")\n q = \"create table test_table (n integer)\"\n r = query(q)\n self.assertIsNone(r)\n q = \"insert into test_table values (1)\"\n r = query(q)\n self.assertIsInstance(r, str)\n self.assertEqual(r, '1')\n q = \"insert into test_table select 2\"\n r = query(q)\n self.assertIsInstance(r, str)\n self.assertEqual(r, '1')\n q = \"select n from test_table where n>1\"\n r = query(q).getresult()\n self.assertEqual(len(r), 1)\n r = r[0]\n self.assertEqual(len(r), 1)\n r = r[0]\n self.assertIsInstance(r, int)\n self.assertEqual(r, 2)\n q = \"insert into test_table select 3 union select 4 union select 5\"\n r = query(q)\n self.assertIsInstance(r, str)\n self.assertEqual(r, '3')\n q = \"update test_table set n=4 where n<5\"\n r = query(q)\n self.assertIsInstance(r, str)\n self.assertEqual(r, '4')\n # noinspection SqlWithoutWhere\n q = \"delete from test_table\"\n r = query(q)\n self.assertIsInstance(r, str)\n self.assertEqual(r, '5')\n\n def testQueryWithOids(self):\n if self.c.server_version >= 120000:\n self.skipTest(\"database does not support tables with oids\")\n query = self.c.query\n query(\"drop table if exists test_table\")\n self.addCleanup(query, \"drop table test_table\")\n q = \"create table test_table (n integer) with oids\"\n r = query(q)\n self.assertIsNone(r)\n q = \"insert into test_table values (1)\"\n r = query(q)\n self.assertIsInstance(r, int)\n q = \"insert into test_table select 2\"\n r = query(q)\n self.assertIsInstance(r, int)\n oid = r\n q = \"select oid from test_table where n=2\"\n r = query(q).getresult()\n self.assertEqual(len(r), 1)\n r = r[0]\n self.assertEqual(len(r), 1)\n r = r[0]\n self.assertIsInstance(r, int)\n self.assertEqual(r, oid)\n q = \"insert into test_table select 3 union select 4 union select 5\"\n r = query(q)\n self.assertIsInstance(r, str)\n self.assertEqual(r, '3')\n q = \"update test_table set n=4 where n<5\"\n r = query(q)\n self.assertIsInstance(r, str)\n self.assertEqual(r, '4')\n # noinspection SqlWithoutWhere\n q = \"delete from test_table\"\n r = query(q)\n self.assertIsInstance(r, str)\n self.assertEqual(r, '5')\n\n def testMemSize(self):\n # noinspection PyUnresolvedReferences\n if pg.get_pqlib_version() < 120000:\n self.skipTest(\"pqlib does not support memsize()\")\n query = self.c.query\n q = query(\"select repeat('foo!', 8)\")\n size = q.memsize()\n self.assertIsInstance(size, long)\n self.assertGreaterEqual(size, 32)\n self.assertLess(size, 8000)\n q = query(\"select repeat('foo!', 2000)\")\n size = q.memsize()\n self.assertGreaterEqual(size, 8000)\n self.assertLess(size, 16000)\n\n\nclass TestUnicodeQueries(unittest.TestCase):\n \"\"\"Test unicode strings as queries via a basic pg connection.\"\"\"\n\n def setUp(self):\n self.c = connect()\n self.c.query('set client_encoding=utf8')\n\n def tearDown(self):\n self.c.close()\n\n def testGetresulAscii(self):\n result = u'Hello, world!'\n q = u\"select '%s'\" % result\n v = self.c.query(q).getresult()[0][0]\n self.assertIsInstance(v, str)\n self.assertEqual(v, result)\n\n def testDictresulAscii(self):\n result = u'Hello, world!'\n q = u\"select '%s' as greeting\" % result\n v = self.c.query(q).dictresult()[0]['greeting']\n self.assertIsInstance(v, str)\n self.assertEqual(v, result)\n\n def testGetresultUtf8(self):\n result = u'Hello, wörld & мир!'\n q = u\"select '%s'\" % result\n if not unicode_strings:\n result = result.encode('utf8')\n # pass the query as unicode\n try:\n v = self.c.query(q).getresult()[0][0]\n except(pg.DataError, pg.NotSupportedError):\n self.skipTest(\"database does not support utf8\")\n v = None\n self.assertIsInstance(v, str)\n self.assertEqual(v, result)\n q = q.encode('utf8')\n # pass the query as bytes\n v = self.c.query(q).getresult()[0][0]\n self.assertIsInstance(v, str)\n self.assertEqual(v, result)\n\n def testDictresultUtf8(self):\n result = u'Hello, wörld & мир!'\n q = u\"select '%s' as greeting\" % result\n if not unicode_strings:\n result = result.encode('utf8')\n try:\n v = self.c.query(q).dictresult()[0]['greeting']\n except (pg.DataError, pg.NotSupportedError):\n self.skipTest(\"database does not support utf8\")\n v = None\n self.assertIsInstance(v, str)\n self.assertEqual(v, result)\n q = q.encode('utf8')\n v = self.c.query(q).dictresult()[0]['greeting']\n self.assertIsInstance(v, str)\n self.assertEqual(v, result)\n\n def testGetresultLatin1(self):\n try:\n self.c.query('set client_encoding=latin1')\n except (pg.DataError, pg.NotSupportedError):\n self.skipTest(\"database does not support latin1\")\n result = u'Hello, wörld!'\n q = u\"select '%s'\" % result\n if not unicode_strings:\n result = result.encode('latin1')\n v = self.c.query(q).getresult()[0][0]\n self.assertIsInstance(v, str)\n self.assertEqual(v, result)\n q = q.encode('latin1')\n v = self.c.query(q).getresult()[0][0]\n self.assertIsInstance(v, str)\n self.assertEqual(v, result)\n\n def testDictresultLatin1(self):\n try:\n self.c.query('set client_encoding=latin1')\n except (pg.DataError, pg.NotSupportedError):\n self.skipTest(\"database does not support latin1\")\n result = u'Hello, wörld!'\n q = u\"select '%s' as greeting\" % result\n if not unicode_strings:\n result = result.encode('latin1')\n v = self.c.query(q).dictresult()[0]['greeting']\n self.assertIsInstance(v, str)\n self.assertEqual(v, result)\n q = q.encode('latin1')\n v = self.c.query(q).dictresult()[0]['greeting']\n self.assertIsInstance(v, str)\n self.assertEqual(v, result)\n\n def testGetresultCyrillic(self):\n try:\n self.c.query('set client_encoding=iso_8859_5')\n except (pg.DataError, pg.NotSupportedError):\n self.skipTest(\"database does not support cyrillic\")\n result = u'Hello, мир!'\n q = u\"select '%s'\" % result\n if not unicode_strings:\n result = result.encode('cyrillic')\n v = self.c.query(q).getresult()[0][0]\n self.assertIsInstance(v, str)\n self.assertEqual(v, result)\n q = q.encode('cyrillic')\n v = self.c.query(q).getresult()[0][0]\n self.assertIsInstance(v, str)\n self.assertEqual(v, result)\n\n def testDictresultCyrillic(self):\n try:\n self.c.query('set client_encoding=iso_8859_5')\n except (pg.DataError, pg.NotSupportedError):\n self.skipTest(\"database does not support cyrillic\")\n result = u'Hello, мир!'\n q = u\"select '%s' as greeting\" % result\n if not unicode_strings:\n result = result.encode('cyrillic')\n v = self.c.query(q).dictresult()[0]['greeting']\n self.assertIsInstance(v, str)\n self.assertEqual(v, result)\n q = q.encode('cyrillic')\n v = self.c.query(q).dictresult()[0]['greeting']\n self.assertIsInstance(v, str)\n self.assertEqual(v, result)\n\n def testGetresultLatin9(self):\n try:\n self.c.query('set client_encoding=latin9')\n except (pg.DataError, pg.NotSupportedError):\n self.skipTest(\"database does not support latin9\")\n result = u'smœrebrœd with pražská šunka (pay in ¢, £, €, or ¥)'\n q = u\"select '%s'\" % result\n if not unicode_strings:\n result = result.encode('latin9')\n v = self.c.query(q).getresult()[0][0]\n self.assertIsInstance(v, str)\n self.assertEqual(v, result)\n q = q.encode('latin9')\n v = self.c.query(q).getresult()[0][0]\n self.assertIsInstance(v, str)\n self.assertEqual(v, result)\n\n def testDictresultLatin9(self):\n try:\n self.c.query('set client_encoding=latin9')\n except (pg.DataError, pg.NotSupportedError):\n self.skipTest(\"database does not support latin9\")\n result = u'smœrebrœd with pražská šunka (pay in ¢, £, €, or ¥)'\n q = u\"select '%s' as menu\" % result\n if not unicode_strings:\n result = result.encode('latin9')\n v = self.c.query(q).dictresult()[0]['menu']\n self.assertIsInstance(v, str)\n self.assertEqual(v, result)\n q = q.encode('latin9')\n v = self.c.query(q).dictresult()[0]['menu']\n self.assertIsInstance(v, str)\n self.assertEqual(v, result)\n\n\nclass TestParamQueries(unittest.TestCase):\n \"\"\"Test queries with parameters via a basic pg connection.\"\"\"\n\n def setUp(self):\n self.c = connect()\n self.c.query('set client_encoding=utf8')\n\n def tearDown(self):\n self.c.close()\n\n def testQueryWithNoneParam(self):\n self.assertRaises(TypeError, self.c.query, \"select $1\", None)\n self.assertRaises(TypeError, self.c.query, \"select $1+$2\", None, None)\n self.assertEqual(\n self.c.query(\"select $1::integer\", (None,)).getresult(), [(None,)])\n self.assertEqual(\n self.c.query(\"select $1::text\", [None]).getresult(), [(None,)])\n self.assertEqual(\n self.c.query(\"select $1::text\", [[None]]).getresult(), [(None,)])\n\n def testQueryWithBoolParams(self, bool_enabled=None):\n query = self.c.query\n if bool_enabled is not None:\n bool_enabled_default = pg.get_bool()\n pg.set_bool(bool_enabled)\n try:\n bool_on = bool_enabled or bool_enabled is None\n v_false, v_true = (False, True) if bool_on else 'ft'\n r_false, r_true = [(v_false,)], [(v_true,)]\n self.assertEqual(query(\"select false\").getresult(), r_false)\n self.assertEqual(query(\"select true\").getresult(), r_true)\n q = \"select $1::bool\"\n self.assertEqual(query(q, (None,)).getresult(), [(None,)])\n self.assertEqual(query(q, ('f',)).getresult(), r_false)\n self.assertEqual(query(q, ('t',)).getresult(), r_true)\n self.assertEqual(query(q, ('false',)).getresult(), r_false)\n self.assertEqual(query(q, ('true',)).getresult(), r_true)\n self.assertEqual(query(q, ('n',)).getresult(), r_false)\n self.assertEqual(query(q, ('y',)).getresult(), r_true)\n self.assertEqual(query(q, (0,)).getresult(), r_false)\n self.assertEqual(query(q, (1,)).getresult(), r_true)\n self.assertEqual(query(q, (False,)).getresult(), r_false)\n self.assertEqual(query(q, (True,)).getresult(), r_true)\n finally:\n if bool_enabled is not None:\n # noinspection PyUnboundLocalVariable\n pg.set_bool(bool_enabled_default)\n\n def testQueryWithBoolParamsNotDefault(self):\n self.testQueryWithBoolParams(bool_enabled=not pg.get_bool())\n\n def testQueryWithIntParams(self):\n query = self.c.query\n self.assertEqual(query(\"select 1+1\").getresult(), [(2,)])\n self.assertEqual(query(\"select 1+$1\", (1,)).getresult(), [(2,)])\n self.assertEqual(query(\"select 1+$1\", [1]).getresult(), [(2,)])\n self.assertEqual(query(\"select $1::integer\", (2,)).getresult(), [(2,)])\n self.assertEqual(query(\"select $1::text\", (2,)).getresult(), [('2',)])\n self.assertEqual(\n query(\"select 1+$1::numeric\", [1]).getresult(), [(Decimal('2'),)])\n self.assertEqual(\n query(\"select 1, $1::integer\", (2,)).getresult(), [(1, 2)])\n self.assertEqual(\n query(\"select 1 union select $1::integer\", (2,)).getresult(),\n [(1,), (2,)])\n self.assertEqual(\n query(\"select $1::integer+$2\", (1, 2)).getresult(), [(3,)])\n self.assertEqual(\n query(\"select $1::integer+$2\", [1, 2]).getresult(), [(3,)])\n self.assertEqual(\n query(\"select 0+$1+$2+$3+$4+$5+$6\", list(range(6))).getresult(),\n [(15,)])\n\n def testQueryWithStrParams(self):\n query = self.c.query\n self.assertEqual(\n query(\"select $1||', world!'\", ('Hello',)).getresult(),\n [('Hello, world!',)])\n self.assertEqual(\n query(\"select $1||', world!'\", ['Hello']).getresult(),\n [('Hello, world!',)])\n self.assertEqual(\n query(\"select $1||', '||$2||'!'\", ('Hello', 'world')).getresult(),\n [('Hello, world!',)])\n self.assertEqual(\n query(\"select $1::text\", ('Hello, world!',)).getresult(),\n [('Hello, world!',)])\n self.assertEqual(\n query(\"select $1::text,$2::text\", ('Hello', 'world')).getresult(),\n [('Hello', 'world')])\n self.assertEqual(\n query(\"select $1::text,$2::text\", ['Hello', 'world']).getresult(),\n [('Hello', 'world')])\n self.assertEqual(\n query(\"select $1::text union select $2::text\",\n ('Hello', 'world')).getresult(),\n [('Hello',), ('world',)])\n try:\n query(\"select 'wörld'\")\n except (pg.DataError, pg.NotSupportedError):\n self.skipTest('database does not support utf8')\n self.assertEqual(\n query(\"select $1||', '||$2||'!'\",\n ('Hello', 'w\\xc3\\xb6rld')).getresult(),\n [('Hello, w\\xc3\\xb6rld!',)])\n\n def testQueryWithUnicodeParams(self):\n query = self.c.query\n try:\n query('set client_encoding=utf8')\n self.assertEqual(\n query(\"select 'wörld'\").getresult()[0][0], 'wörld')\n except (pg.DataError, pg.NotSupportedError):\n self.skipTest(\"database does not support utf8\")\n self.assertEqual(\n query(\"select $1||', '||$2||'!'\", ('Hello', u'wörld')).getresult(),\n [('Hello, wörld!',)])\n\n def testQueryWithUnicodeParamsLatin1(self):\n query = self.c.query\n try:\n query('set client_encoding=latin1')\n self.assertEqual(\n query(\"select 'wörld'\").getresult()[0][0], 'wörld')\n except (pg.DataError, pg.NotSupportedError):\n self.skipTest(\"database does not support latin1\")\n r = query(\"select $1||', '||$2||'!'\", ('Hello', u'wörld')).getresult()\n if unicode_strings:\n self.assertEqual(r, [('Hello, wörld!',)])\n else:\n self.assertEqual(r, [(u'Hello, wörld!'.encode('latin1'),)])\n self.assertRaises(\n UnicodeError, query, \"select $1||', '||$2||'!'\",\n ('Hello', u'мир'))\n query('set client_encoding=iso_8859_1')\n r = query(\n \"select $1||', '||$2||'!'\", ('Hello', u'wörld')).getresult()\n if unicode_strings:\n self.assertEqual(r, [('Hello, wörld!',)])\n else:\n self.assertEqual(r, [(u'Hello, wörld!'.encode('latin1'),)])\n self.assertRaises(\n UnicodeError, query, \"select $1||', '||$2||'!'\",\n ('Hello', u'мир'))\n query('set client_encoding=sql_ascii')\n self.assertRaises(\n UnicodeError, query, \"select $1||', '||$2||'!'\",\n ('Hello', u'wörld'))\n\n def testQueryWithUnicodeParamsCyrillic(self):\n query = self.c.query\n try:\n query('set client_encoding=iso_8859_5')\n self.assertEqual(\n query(\"select 'мир'\").getresult()[0][0], 'мир')\n except (pg.DataError, pg.NotSupportedError):\n self.skipTest(\"database does not support cyrillic\")\n self.assertRaises(\n UnicodeError, query, \"select $1||', '||$2||'!'\",\n ('Hello', u'wörld'))\n r = query(\n \"select $1||', '||$2||'!'\", ('Hello', u'мир')).getresult()\n if unicode_strings:\n self.assertEqual(r, [('Hello, мир!',)])\n else:\n self.assertEqual(r, [(u'Hello, мир!'.encode('cyrillic'),)])\n query('set client_encoding=sql_ascii')\n self.assertRaises(\n UnicodeError, query, \"select $1||', '||$2||'!'\",\n ('Hello', u'мир!'))\n\n def testQueryWithMixedParams(self):\n self.assertEqual(\n self.c.query(\n \"select $1+2,$2||', world!'\", (1, 'Hello')).getresult(),\n [(3, 'Hello, world!')])\n self.assertEqual(\n self.c.query(\n \"select $1::integer,$2::date,$3::text\",\n (4711, None, 'Hello!')).getresult(),\n [(4711, None, 'Hello!')])\n\n def testQueryWithDuplicateParams(self):\n self.assertRaises(\n pg.ProgrammingError, self.c.query, \"select $1+$1\", (1,))\n self.assertRaises(\n pg.ProgrammingError, self.c.query, \"select $1+$1\", (1, 2))\n\n def testQueryWithZeroParams(self):\n self.assertEqual(\n self.c.query(\"select 1+1\", []).getresult(), [(2,)])\n\n def testQueryWithGarbage(self):\n garbage = r\"'\\{}+()-#[]oo324\"\n self.assertEqual(\n self.c.query(\"select $1::text AS garbage\",\n (garbage,)).dictresult(),\n [{'garbage': garbage}])\n\n\nclass TestPreparedQueries(unittest.TestCase):\n \"\"\"Test prepared queries via a basic pg connection.\"\"\"\n\n def setUp(self):\n self.c = connect()\n self.c.query('set client_encoding=utf8')\n\n def tearDown(self):\n self.c.close()\n\n def testEmptyPreparedStatement(self):\n self.c.prepare('', '')\n self.assertRaises(ValueError, self.c.query_prepared, '')\n\n def testInvalidPreparedStatement(self):\n self.assertRaises(pg.ProgrammingError, self.c.prepare, '', 'bad')\n\n def testDuplicatePreparedStatement(self):\n self.assertIsNone(self.c.prepare('q', 'select 1'))\n self.assertRaises(pg.ProgrammingError, self.c.prepare, 'q', 'select 2')\n\n def testNonExistentPreparedStatement(self):\n self.assertRaises(\n pg.OperationalError, self.c.query_prepared, 'does-not-exist')\n\n def testUnnamedQueryWithoutParams(self):\n self.assertIsNone(self.c.prepare('', \"select 'anon'\"))\n self.assertEqual(self.c.query_prepared('').getresult(), [('anon',)])\n self.assertEqual(self.c.query_prepared('').getresult(), [('anon',)])\n\n def testNamedQueryWithoutParams(self):\n self.assertIsNone(self.c.prepare('hello', \"select 'world'\"))\n self.assertEqual(\n self.c.query_prepared('hello').getresult(), [('world',)])\n\n def testMultipleNamedQueriesWithoutParams(self):\n self.assertIsNone(self.c.prepare('query17', \"select 17\"))\n self.assertIsNone(self.c.prepare('query42', \"select 42\"))\n self.assertEqual(self.c.query_prepared('query17').getresult(), [(17,)])\n self.assertEqual(self.c.query_prepared('query42').getresult(), [(42,)])\n\n def testUnnamedQueryWithParams(self):\n self.assertIsNone(self.c.prepare('', \"select $1 || ', ' || $2\"))\n self.assertEqual(\n self.c.query_prepared('', ['hello', 'world']).getresult(),\n [('hello, world',)])\n self.assertIsNone(self.c.prepare('', \"select 1+ $1 + $2 + $3\"))\n self.assertEqual(\n self.c.query_prepared('', [17, -5, 29]).getresult(), [(42,)])\n\n def testMultipleNamedQueriesWithParams(self):\n self.assertIsNone(self.c.prepare('q1', \"select $1 || '!'\"))\n self.assertIsNone(self.c.prepare('q2', \"select $1 || '-' || $2\"))\n self.assertEqual(\n self.c.query_prepared('q1', ['hello']).getresult(),\n [('hello!',)])\n self.assertEqual(\n self.c.query_prepared('q2', ['he', 'lo']).getresult(),\n [('he-lo',)])\n\n def testDescribeNonExistentQuery(self):\n self.assertRaises(\n pg.OperationalError, self.c.describe_prepared, 'does-not-exist')\n\n def testDescribeUnnamedQuery(self):\n self.c.prepare('', \"select 1::int, 'a'::char\")\n r = self.c.describe_prepared('')\n self.assertEqual(r.listfields(), ('int4', 'bpchar'))\n\n def testDescribeNamedQuery(self):\n self.c.prepare('myquery', \"select 1 as first, 2 as second\")\n r = self.c.describe_prepared('myquery')\n self.assertEqual(r.listfields(), ('first', 'second'))\n\n def testDescribeMultipleNamedQueries(self):\n self.c.prepare('query1', \"select 1::int\")\n self.c.prepare('query2', \"select 1::int, 2::int\")\n r = self.c.describe_prepared('query1')\n self.assertEqual(r.listfields(), ('int4',))\n r = self.c.describe_prepared('query2')\n self.assertEqual(r.listfields(), ('int4', 'int4'))\n\n\nclass TestQueryResultTypes(unittest.TestCase):\n \"\"\"Test proper result types via a basic pg connection.\"\"\"\n\n def setUp(self):\n self.c = connect()\n self.c.query('set client_encoding=utf8')\n self.c.query(\"set datestyle='ISO,YMD'\")\n self.c.query(\"set timezone='UTC'\")\n\n def tearDown(self):\n self.c.close()\n\n def assert_proper_cast(self, value, pgtype, pytype):\n q = 'select $1::%s' % (pgtype,)\n try:\n r = self.c.query(q, (value,)).getresult()[0][0]\n except pg.ProgrammingError as e:\n if pgtype in ('json', 'jsonb'):\n self.skipTest('database does not support json')\n self.fail(str(e))\n # noinspection PyUnboundLocalVariable\n self.assertIsInstance(r, pytype)\n if isinstance(value, str):\n if not value or ' ' in value or '{' in value:\n value = '\"%s\"' % value\n value = '{%s}' % value\n r = self.c.query(q + '[]', (value,)).getresult()[0][0]\n if pgtype.startswith(('date', 'time', 'interval')):\n # arrays of these are casted by the DB wrapper only\n self.assertEqual(r, value)\n else:\n self.assertIsInstance(r, list)\n self.assertEqual(len(r), 1)\n self.assertIsInstance(r[0], pytype)\n\n def testInt(self):\n self.assert_proper_cast(0, 'int', int)\n self.assert_proper_cast(0, 'smallint', int)\n self.assert_proper_cast(0, 'oid', int)\n self.assert_proper_cast(0, 'cid', int)\n self.assert_proper_cast(0, 'xid', int)\n\n def testLong(self):\n self.assert_proper_cast(0, 'bigint', long)\n\n def testFloat(self):\n self.assert_proper_cast(0, 'float', float)\n self.assert_proper_cast(0, 'real', float)\n self.assert_proper_cast(0, 'double precision', float)\n self.assert_proper_cast('infinity', 'float', float)\n\n def testNumeric(self):\n decimal = pg.get_decimal()\n self.assert_proper_cast(decimal(0), 'numeric', decimal)\n self.assert_proper_cast(decimal(0), 'decimal', decimal)\n\n def testMoney(self):\n decimal = pg.get_decimal()\n self.assert_proper_cast(decimal('0'), 'money', decimal)\n\n def testBool(self):\n bool_type = bool if pg.get_bool() else str\n self.assert_proper_cast('f', 'bool', bool_type)\n\n def testDate(self):\n self.assert_proper_cast('1956-01-31', 'date', str)\n self.assert_proper_cast('10:20:30', 'interval', str)\n self.assert_proper_cast('08:42:15', 'time', str)\n self.assert_proper_cast('08:42:15+00', 'timetz', str)\n self.assert_proper_cast('1956-01-31 08:42:15', 'timestamp', str)\n self.assert_proper_cast('1956-01-31 08:42:15+00', 'timestamptz', str)\n\n def testText(self):\n self.assert_proper_cast('', 'text', str)\n self.assert_proper_cast('', 'char', str)\n self.assert_proper_cast('', 'bpchar', str)\n self.assert_proper_cast('', 'varchar', str)\n\n def testBytea(self):\n self.assert_proper_cast('', 'bytea', bytes)\n\n def testJson(self):\n self.assert_proper_cast('{}', 'json', dict)\n\n\nclass TestQueryIterator(unittest.TestCase):\n \"\"\"Test the query operating as an iterator.\"\"\"\n\n def setUp(self):\n self.c = connect()\n\n def tearDown(self):\n self.c.close()\n\n def testLen(self):\n r = self.c.query(\"select generate_series(3,7)\")\n self.assertEqual(len(r), 5)\n\n def testGetItem(self):\n r = self.c.query(\"select generate_series(7,9)\")\n self.assertEqual(r[0], (7,))\n self.assertEqual(r[1], (8,))\n self.assertEqual(r[2], (9,))\n\n def testGetItemWithNegativeIndex(self):\n r = self.c.query(\"select generate_series(7,9)\")\n self.assertEqual(r[-1], (9,))\n self.assertEqual(r[-2], (8,))\n self.assertEqual(r[-3], (7,))\n\n def testGetItemOutOfRange(self):\n r = self.c.query(\"select generate_series(7,9)\")\n self.assertRaises(IndexError, r.__getitem__, 3)\n\n def testIterate(self):\n r = self.c.query(\"select generate_series(3,5)\")\n self.assertNotIsInstance(r, (list, tuple))\n self.assertIsInstance(r, Iterable)\n self.assertEqual(list(r), [(3,), (4,), (5,)])\n # noinspection PyUnresolvedReferences\n self.assertIsInstance(r[1], tuple)\n\n def testIterateTwice(self):\n r = self.c.query(\"select generate_series(3,5)\")\n for i in range(2):\n self.assertEqual(list(r), [(3,), (4,), (5,)])\n\n def testIterateTwoColumns(self):\n r = self.c.query(\"select 1,2 union select 3,4\")\n self.assertIsInstance(r, Iterable)\n self.assertEqual(list(r), [(1, 2), (3, 4)])\n\n def testNext(self):\n r = self.c.query(\"select generate_series(7,9)\")\n self.assertEqual(next(r), (7,))\n self.assertEqual(next(r), (8,))\n self.assertEqual(next(r), (9,))\n self.assertRaises(StopIteration, next, r)\n\n def testContains(self):\n r = self.c.query(\"select generate_series(7,9)\")\n self.assertIn((8,), r)\n self.assertNotIn((5,), r)\n\n def testDictIterate(self):\n r = self.c.query(\"select generate_series(3,5) as n\").dictiter()\n self.assertNotIsInstance(r, (list, tuple))\n self.assertIsInstance(r, Iterable)\n r = list(r)\n self.assertEqual(r, [dict(n=3), dict(n=4), dict(n=5)])\n self.assertIsInstance(r[1], dict)\n\n def testDictIterateTwoColumns(self):\n r = self.c.query(\n \"select 1 as one, 2 as two\"\n \" union select 3 as one, 4 as two\").dictiter()\n self.assertIsInstance(r, Iterable)\n r = list(r)\n self.assertEqual(r, [dict(one=1, two=2), dict(one=3, two=4)])\n\n def testDictNext(self):\n r = self.c.query(\"select generate_series(7,9) as n\").dictiter()\n self.assertEqual(next(r), dict(n=7))\n self.assertEqual(next(r), dict(n=8))\n self.assertEqual(next(r), dict(n=9))\n self.assertRaises(StopIteration, next, r)\n\n def testDictContains(self):\n r = self.c.query(\"select generate_series(7,9) as n\").dictiter()\n self.assertIn(dict(n=8), r)\n self.assertNotIn(dict(n=5), r)\n\n def testNamedIterate(self):\n r = self.c.query(\"select generate_series(3,5) as number\").namediter()\n self.assertNotIsInstance(r, (list, tuple))\n self.assertIsInstance(r, Iterable)\n r = list(r)\n self.assertEqual(r, [(3,), (4,), (5,)])\n self.assertIsInstance(r[1], tuple)\n self.assertEqual(r[1]._fields, ('number',))\n self.assertEqual(r[1].number, 4)\n\n def testNamedIterateTwoColumns(self):\n r = self.c.query(\n \"select 1 as one, 2 as two\"\n \" union select 3 as one, 4 as two\").namediter()\n self.assertIsInstance(r, Iterable)\n r = list(r)\n self.assertEqual(r, [(1, 2), (3, 4)])\n self.assertEqual(r[0]._fields, ('one', 'two'))\n self.assertEqual(r[0].one, 1)\n self.assertEqual(r[1]._fields, ('one', 'two'))\n self.assertEqual(r[1].two, 4)\n\n def testNamedNext(self):\n r = self.c.query(\"select generate_series(7,9) as number\").namediter()\n self.assertEqual(next(r), (7,))\n self.assertEqual(next(r), (8,))\n n = next(r)\n self.assertEqual(n._fields, ('number',))\n self.assertEqual(n.number, 9)\n self.assertRaises(StopIteration, next, r)\n\n def testNamedContains(self):\n r = self.c.query(\"select generate_series(7,9)\").namediter()\n self.assertIn((8,), r)\n self.assertNotIn((5,), r)\n\n def testScalarIterate(self):\n r = self.c.query(\"select generate_series(3,5)\").scalariter()\n self.assertNotIsInstance(r, (list, tuple))\n self.assertIsInstance(r, Iterable)\n r = list(r)\n self.assertEqual(r, [3, 4, 5])\n self.assertIsInstance(r[1], int)\n\n def testScalarIterateTwoColumns(self):\n r = self.c.query(\"select 1, 2 union select 3, 4\").scalariter()\n self.assertIsInstance(r, Iterable)\n r = list(r)\n self.assertEqual(r, [1, 3])\n\n def testScalarNext(self):\n r = self.c.query(\"select generate_series(7,9)\").scalariter()\n self.assertEqual(next(r), 7)\n self.assertEqual(next(r), 8)\n self.assertEqual(next(r), 9)\n self.assertRaises(StopIteration, next, r)\n\n def testScalarContains(self):\n r = self.c.query(\"select generate_series(7,9)\").scalariter()\n self.assertIn(8, r)\n self.assertNotIn(5, r)\n\n\nclass TestQueryOneSingleScalar(unittest.TestCase):\n \"\"\"Test the query methods for getting single rows and columns.\"\"\"\n\n def setUp(self):\n self.c = connect()\n\n def tearDown(self):\n self.c.close()\n\n def testOneWithEmptyQuery(self):\n q = self.c.query(\"select 0 where false\")\n self.assertIsNone(q.one())\n\n def testOneWithSingleRow(self):\n q = self.c.query(\"select 1, 2\")\n r = q.one()\n self.assertIsInstance(r, tuple)\n self.assertEqual(r, (1, 2))\n self.assertEqual(q.one(), None)\n\n def testOneWithTwoRows(self):\n q = self.c.query(\"select 1, 2 union select 3, 4\")\n self.assertEqual(q.one(), (1, 2))\n self.assertEqual(q.one(), (3, 4))\n self.assertEqual(q.one(), None)\n\n def testOneDictWithEmptyQuery(self):\n q = self.c.query(\"select 0 where false\")\n self.assertIsNone(q.onedict())\n\n def testOneDictWithSingleRow(self):\n q = self.c.query(\"select 1 as one, 2 as two\")\n r = q.onedict()\n self.assertIsInstance(r, dict)\n self.assertEqual(r, dict(one=1, two=2))\n self.assertEqual(q.onedict(), None)\n\n def testOneDictWithTwoRows(self):\n q = self.c.query(\n \"select 1 as one, 2 as two union select 3 as one, 4 as two\")\n self.assertEqual(q.onedict(), dict(one=1, two=2))\n self.assertEqual(q.onedict(), dict(one=3, two=4))\n self.assertEqual(q.onedict(), None)\n\n def testOneNamedWithEmptyQuery(self):\n q = self.c.query(\"select 0 where false\")\n self.assertIsNone(q.onenamed())\n\n def testOneNamedWithSingleRow(self):\n q = self.c.query(\"select 1 as one, 2 as two\")\n r = q.onenamed()\n self.assertEqual(r._fields, ('one', 'two'))\n self.assertEqual(r.one, 1)\n self.assertEqual(r.two, 2)\n self.assertEqual(r, (1, 2))\n self.assertEqual(q.onenamed(), None)\n\n def testOneNamedWithTwoRows(self):\n q = self.c.query(\n \"select 1 as one, 2 as two union select 3 as one, 4 as two\")\n r = q.onenamed()\n self.assertEqual(r._fields, ('one', 'two'))\n self.assertEqual(r.one, 1)\n self.assertEqual(r.two, 2)\n self.assertEqual(r, (1, 2))\n r = q.onenamed()\n self.assertEqual(r._fields, ('one', 'two'))\n self.assertEqual(r.one, 3)\n self.assertEqual(r.two, 4)\n self.assertEqual(r, (3, 4))\n self.assertEqual(q.onenamed(), None)\n\n def testOneScalarWithEmptyQuery(self):\n q = self.c.query(\"select 0 where false\")\n self.assertIsNone(q.onescalar())\n\n def testOneScalarWithSingleRow(self):\n q = self.c.query(\"select 1, 2\")\n r = q.onescalar()\n self.assertIsInstance(r, int)\n self.assertEqual(r, 1)\n self.assertEqual(q.onescalar(), None)\n\n def testOneScalarWithTwoRows(self):\n q = self.c.query(\"select 1, 2 union select 3, 4\")\n self.assertEqual(q.onescalar(), 1)\n self.assertEqual(q.onescalar(), 3)\n self.assertEqual(q.onescalar(), None)\n\n def testSingleWithEmptyQuery(self):\n q = self.c.query(\"select 0 where false\")\n try:\n q.single()\n except pg.InvalidResultError as e:\n r = e\n else:\n r = None\n self.assertIsInstance(r, pg.NoResultError)\n self.assertEqual(str(r), 'No result found')\n\n def testSingleWithSingleRow(self):\n q = self.c.query(\"select 1, 2\")\n r = q.single()\n self.assertIsInstance(r, tuple)\n self.assertEqual(r, (1, 2))\n r = q.single()\n self.assertIsInstance(r, tuple)\n self.assertEqual(r, (1, 2))\n\n def testSingleWithTwoRows(self):\n q = self.c.query(\"select 1, 2 union select 3, 4\")\n try:\n q.single()\n except pg.InvalidResultError as e:\n r = e\n else:\n r = None\n self.assertIsInstance(r, pg.MultipleResultsError)\n self.assertEqual(str(r), 'Multiple results found')\n\n def testSingleDictWithEmptyQuery(self):\n q = self.c.query(\"select 0 where false\")\n try:\n q.singledict()\n except pg.InvalidResultError as e:\n r = e\n else:\n r = None\n self.assertIsInstance(r, pg.NoResultError)\n self.assertEqual(str(r), 'No result found')\n\n def testSingleDictWithSingleRow(self):\n q = self.c.query(\"select 1 as one, 2 as two\")\n r = q.singledict()\n self.assertIsInstance(r, dict)\n self.assertEqual(r, dict(one=1, two=2))\n r = q.singledict()\n self.assertIsInstance(r, dict)\n self.assertEqual(r, dict(one=1, two=2))\n\n def testSingleDictWithTwoRows(self):\n q = self.c.query(\"select 1, 2 union select 3, 4\")\n try:\n q.singledict()\n except pg.InvalidResultError as e:\n r = e\n else:\n r = None\n self.assertIsInstance(r, pg.MultipleResultsError)\n self.assertEqual(str(r), 'Multiple results found')\n\n def testSingleNamedWithEmptyQuery(self):\n q = self.c.query(\"select 0 where false\")\n try:\n q.singlenamed()\n except pg.InvalidResultError as e:\n r = e\n else:\n r = None\n self.assertIsInstance(r, pg.NoResultError)\n self.assertEqual(str(r), 'No result found')\n\n def testSingleNamedWithSingleRow(self):\n q = self.c.query(\"select 1 as one, 2 as two\")\n r = q.singlenamed()\n self.assertEqual(r._fields, ('one', 'two'))\n self.assertEqual(r.one, 1)\n self.assertEqual(r.two, 2)\n self.assertEqual(r, (1, 2))\n r = q.singlenamed()\n self.assertEqual(r._fields, ('one', 'two'))\n self.assertEqual(r.one, 1)\n self.assertEqual(r.two, 2)\n self.assertEqual(r, (1, 2))\n\n def testSingleNamedWithTwoRows(self):\n q = self.c.query(\"select 1, 2 union select 3, 4\")\n try:\n q.singlenamed()\n except pg.InvalidResultError as e:\n r = e\n else:\n r = None\n self.assertIsInstance(r, pg.MultipleResultsError)\n self.assertEqual(str(r), 'Multiple results found')\n\n def testSingleScalarWithEmptyQuery(self):\n q = self.c.query(\"select 0 where false\")\n try:\n q.singlescalar()\n except pg.InvalidResultError as e:\n r = e\n else:\n r = None\n self.assertIsInstance(r, pg.NoResultError)\n self.assertEqual(str(r), 'No result found')\n\n def testSingleScalarWithSingleRow(self):\n q = self.c.query(\"select 1, 2\")\n r = q.singlescalar()\n self.assertIsInstance(r, int)\n self.assertEqual(r, 1)\n r = q.singlescalar()\n self.assertIsInstance(r, int)\n self.assertEqual(r, 1)\n\n def testSingleScalarWithTwoRows(self):\n q = self.c.query(\"select 1, 2 union select 3, 4\")\n try:\n q.singlescalar()\n except pg.InvalidResultError as e:\n r = e\n else:\n r = None\n self.assertIsInstance(r, pg.MultipleResultsError)\n self.assertEqual(str(r), 'Multiple results found')\n\n def testScalarResult(self):\n q = self.c.query(\"select 1, 2 union select 3, 4\")\n r = q.scalarresult()\n self.assertIsInstance(r, list)\n self.assertEqual(r, [1, 3])\n\n def testScalarIter(self):\n q = self.c.query(\"select 1, 2 union select 3, 4\")\n r = q.scalariter()\n self.assertNotIsInstance(r, (list, tuple))\n self.assertIsInstance(r, Iterable)\n r = list(r)\n self.assertEqual(r, [1, 3])\n\n\nclass TestInserttable(unittest.TestCase):\n \"\"\"Test inserttable method.\"\"\"\n\n cls_set_up = False\n\n @classmethod\n def setUpClass(cls):\n c = connect()\n c.query(\"drop table if exists test cascade\")\n c.query(\"create table test (\"\n \"i2 smallint, i4 integer, i8 bigint,\"\n \" b boolean, dt date, ti time,\"\n \"d numeric, f4 real, f8 double precision, m money,\"\n \"c char(1), v4 varchar(4), c4 char(4), t text)\")\n # Check whether the test database uses SQL_ASCII - this means\n # that it does not consider encoding when calculating lengths.\n c.query(\"set client_encoding=utf8\")\n try:\n c.query(\"select 'ä'\")\n except (pg.DataError, pg.NotSupportedError):\n cls.has_encoding = False\n else:\n cls.has_encoding = c.query(\n \"select length('ä') - length('a')\").getresult()[0][0] == 0\n c.close()\n cls.cls_set_up = True\n\n @classmethod\n def tearDownClass(cls):\n c = connect()\n c.query(\"drop table test cascade\")\n c.close()\n\n def setUp(self):\n self.assertTrue(self.cls_set_up)\n self.c = connect()\n self.c.query(\"set client_encoding=utf8\")\n self.c.query(\"set datestyle='ISO,YMD'\")\n self.c.query(\"set lc_monetary='C'\")\n\n def tearDown(self):\n self.c.query(\"truncate table test\")\n self.c.close()\n\n data = [\n (-1, -1, long(-1), True, '1492-10-12', '08:30:00',\n -1.2345, -1.75, -1.875, '-1.25', '-', 'r?', '!u', 'xyz'),\n (0, 0, long(0), False, '1607-04-14', '09:00:00',\n 0.0, 0.0, 0.0, '0.0', ' ', '0123', '4567', '890'),\n (1, 1, long(1), True, '1801-03-04', '03:45:00',\n 1.23456, 1.75, 1.875, '1.25', 'x', 'bc', 'cdef', 'g'),\n (2, 2, long(2), False, '1903-12-17', '11:22:00',\n 2.345678, 2.25, 2.125, '2.75', 'y', 'q', 'ijk', 'mnop\\nstux!')]\n\n @classmethod\n def db_len(cls, s, encoding):\n # noinspection PyUnresolvedReferences\n if cls.has_encoding:\n s = s if isinstance(s, unicode) else s.decode(encoding)\n else:\n s = s.encode(encoding) if isinstance(s, unicode) else s\n return len(s)\n\n def get_back(self, encoding='utf-8'):\n \"\"\"Convert boolean and decimal values back.\"\"\"\n data = []\n for row in self.c.query(\"select * from test order by 1\").getresult():\n self.assertIsInstance(row, tuple)\n row = list(row)\n if row[0] is not None: # smallint\n self.assertIsInstance(row[0], int)\n if row[1] is not None: # integer\n self.assertIsInstance(row[1], int)\n if row[2] is not None: # bigint\n self.assertIsInstance(row[2], long)\n if row[3] is not None: # boolean\n self.assertIsInstance(row[3], bool)\n if row[4] is not None: # date\n self.assertIsInstance(row[4], str)\n self.assertTrue(row[4].replace('-', '').isdigit())\n if row[5] is not None: # time\n self.assertIsInstance(row[5], str)\n self.assertTrue(row[5].replace(':', '').isdigit())\n if row[6] is not None: # numeric\n self.assertIsInstance(row[6], Decimal)\n row[6] = float(row[6])\n if row[7] is not None: # real\n self.assertIsInstance(row[7], float)\n if row[8] is not None: # double precision\n self.assertIsInstance(row[8], float)\n row[8] = float(row[8])\n if row[9] is not None: # money\n self.assertIsInstance(row[9], Decimal)\n row[9] = str(float(row[9]))\n if row[10] is not None: # char(1)\n self.assertIsInstance(row[10], str)\n self.assertEqual(self.db_len(row[10], encoding), 1)\n if row[11] is not None: # varchar(4)\n self.assertIsInstance(row[11], str)\n self.assertLessEqual(self.db_len(row[11], encoding), 4)\n if row[12] is not None: # char(4)\n self.assertIsInstance(row[12], str)\n self.assertEqual(self.db_len(row[12], encoding), 4)\n row[12] = row[12].rstrip()\n if row[13] is not None: # text\n self.assertIsInstance(row[13], str)\n row = tuple(row)\n data.append(row)\n return data\n\n def testInserttable1Row(self):\n data = self.data[2:3]\n self.c.inserttable('test', data)\n self.assertEqual(self.get_back(), data)\n\n def testInserttable4Rows(self):\n data = self.data\n self.c.inserttable('test', data)\n self.assertEqual(self.get_back(), data)\n\n def testInserttableFromTupleOfLists(self):\n data = tuple(list(row) for row in self.data)\n self.c.inserttable('test', data)\n self.assertEqual(self.get_back(), self.data)\n\n def testInserttableFromSetofTuples(self):\n data = {row for row in self.data}\n try:\n self.c.inserttable('test', data)\n except TypeError as e:\n r = str(e)\n else:\n r = 'this is fine'\n self.assertIn('list or a tuple as second argument', r)\n\n def testInserttableFromListOfSets(self):\n data = [set(row) for row in self.data]\n try:\n self.c.inserttable('test', data)\n except TypeError as e:\n r = str(e)\n else:\n r = 'this is fine'\n self.assertIn('second argument must contain a tuple or a list', r)\n\n def testInserttableMultipleRows(self):\n num_rows = 100\n data = self.data[2:3] * num_rows\n self.c.inserttable('test', data)\n r = self.c.query(\"select count(*) from test\").getresult()[0][0]\n self.assertEqual(r, num_rows)\n\n def testInserttableMultipleCalls(self):\n num_rows = 10\n data = self.data[2:3]\n for _i in range(num_rows):\n self.c.inserttable('test', data)\n r = self.c.query(\"select count(*) from test\").getresult()[0][0]\n self.assertEqual(r, num_rows)\n\n def testInserttableNullValues(self):\n data = [(None,) * 14] * 100\n self.c.inserttable('test', data)\n self.assertEqual(self.get_back(), data)\n\n def testInserttableNoColumn(self):\n data = [()] * 10\n self.c.inserttable('test', data, [])\n self.assertEqual(self.get_back(), [])\n\n def testInserttableOnlyOneColumn(self):\n data = [(42,)] * 50\n self.c.inserttable('test', data, ['i4'])\n data = [tuple([42 if i == 1 else None for i in range(14)])] * 50\n self.assertEqual(self.get_back(), data)\n\n def testInserttableOnlyTwoColumns(self):\n data = [(bool(i % 2), i * .5) for i in range(20)]\n self.c.inserttable('test', data, ('b', 'f4'))\n # noinspection PyTypeChecker\n data = [(None,) * 3 + (bool(i % 2),) + (None,) * 3 + (i * .5,)\n + (None,) * 6 for i in range(20)]\n self.assertEqual(self.get_back(), data)\n\n def testInserttableWithInvalidTableName(self):\n data = [(42,)]\n # check that the table name is not inserted unescaped\n # (this would pass otherwise since there is a column named i4)\n self.assertRaises(Exception, self.c.inserttable, 'test (i4)', data)\n # make sure that it works if parameters are passed properly\n self.c.inserttable('test', data, ['i4'])\n\n def testInserttableWithInvalidColumnName(self):\n data = [(2, 4)]\n # check that the column names are not inserted unescaped\n # (this would pass otherwise since there are columns i2 and i4)\n self.assertRaises(\n Exception, self.c.inserttable, 'test', data, ['i2,i4'])\n # make sure that it works if parameters are passed properly\n self.c.inserttable('test', data, ['i2', 'i4'])\n\n def testInserttableMaxValues(self):\n data = [(2 ** 15 - 1, int(2 ** 31 - 1), long(2 ** 31 - 1),\n True, '2999-12-31', '11:59:59', 1e99,\n 1.0 + 1.0 / 32, 1.0 + 1.0 / 32, None,\n \"1\", \"1234\", \"1234\", \"1234\" * 100)]\n self.c.inserttable('test', data)\n self.assertEqual(self.get_back(), data)\n\n def testInserttableByteValues(self):\n try:\n self.c.query(\"select '€', 'käse', 'сыр', 'pont-l''évêque'\")\n except pg.DataError:\n self.skipTest(\"database does not support utf8\")\n # non-ascii chars do not fit in char(1) when there is no encoding\n c = u'€' if self.has_encoding else u'$'\n row_unicode = (\n 0, 0, long(0), False, u'1970-01-01', u'00:00:00',\n 0.0, 0.0, 0.0, u'0.0',\n c, u'bäd', u'bäd', u\"käse сыр pont-l'évêque\")\n row_bytes = tuple(\n s.encode('utf-8') if isinstance(s, unicode) else s\n for s in row_unicode)\n data = [row_bytes] * 2\n self.c.inserttable('test', data)\n if unicode_strings:\n data = [row_unicode] * 2\n self.assertEqual(self.get_back(), data)\n\n def testInserttableUnicodeUtf8(self):\n try:\n self.c.query(\"select '€', 'käse', 'сыр', 'pont-l''évêque'\")\n except pg.DataError:\n self.skipTest(\"database does not support utf8\")\n # non-ascii chars do not fit in char(1) when there is no encoding\n c = u'€' if self.has_encoding else u'$'\n row_unicode = (\n 0, 0, long(0), False, u'1970-01-01', u'00:00:00',\n 0.0, 0.0, 0.0, u'0.0',\n c, u'bäd', u'bäd', u\"käse сыр pont-l'évêque\")\n data = [row_unicode] * 2\n self.c.inserttable('test', data)\n if not unicode_strings:\n row_bytes = tuple(\n s.encode('utf-8') if isinstance(s, unicode) else s\n for s in row_unicode)\n data = [row_bytes] * 2\n self.assertEqual(self.get_back(), data)\n\n def testInserttableUnicodeLatin1(self):\n try:\n self.c.query(\"set client_encoding=latin1\")\n self.c.query(\"select '¥'\")\n except (pg.DataError, pg.NotSupportedError):\n self.skipTest(\"database does not support latin1\")\n # non-ascii chars do not fit in char(1) when there is no encoding\n c = u'€' if self.has_encoding else u'$'\n row_unicode = (\n 0, 0, long(0), False, u'1970-01-01', u'00:00:00',\n 0.0, 0.0, 0.0, u'0.0',\n c, u'bäd', u'bäd', u\"for käse and pont-l'évêque pay in €\")\n data = [row_unicode]\n # cannot encode € sign with latin1 encoding\n self.assertRaises(UnicodeEncodeError, self.c.inserttable, 'test', data)\n row_unicode = tuple(\n s.replace(u'€', u'¥') if isinstance(s, unicode) else s\n for s in row_unicode)\n data = [row_unicode] * 2\n self.c.inserttable('test', data)\n if not unicode_strings:\n row_bytes = tuple(\n s.encode('latin1') if isinstance(s, unicode) else s\n for s in row_unicode)\n data = [row_bytes] * 2\n self.assertEqual(self.get_back('latin1'), data)\n\n def testInserttableUnicodeLatin9(self):\n try:\n self.c.query(\"set client_encoding=latin9\")\n self.c.query(\"select '€'\")\n except (pg.DataError, pg.NotSupportedError):\n self.skipTest(\"database does not support latin9\")\n return\n # non-ascii chars do not fit in char(1) when there is no encoding\n c = u'€' if self.has_encoding else u'$'\n row_unicode = (\n 0, 0, long(0), False, u'1970-01-01', u'00:00:00',\n 0.0, 0.0, 0.0, u'0.0',\n c, u'bäd', u'bäd', u\"for käse and pont-l'évêque pay in €\")\n data = [row_unicode] * 2\n self.c.inserttable('test', data)\n if not unicode_strings:\n row_bytes = tuple(\n s.encode('latin9') if isinstance(s, unicode) else s\n for s in row_unicode)\n data = [row_bytes] * 2\n self.assertEqual(self.get_back('latin9'), data)\n\n def testInserttableNoEncoding(self):\n self.c.query(\"set client_encoding=sql_ascii\")\n # non-ascii chars do not fit in char(1) when there is no encoding\n c = u'€' if self.has_encoding else u'$'\n row_unicode = (\n 0, 0, long(0), False, u'1970-01-01', u'00:00:00',\n 0.0, 0.0, 0.0, u'0.0',\n c, u'bäd', u'bäd', u\"for käse and pont-l'évêque pay in €\")\n data = [row_unicode]\n # cannot encode non-ascii unicode without a specific encoding\n self.assertRaises(UnicodeEncodeError, self.c.inserttable, 'test', data)\n\n\nclass TestDirectSocketAccess(unittest.TestCase):\n \"\"\"Test copy command with direct socket access.\"\"\"\n\n cls_set_up = False\n\n @classmethod\n def setUpClass(cls):\n c = connect()\n c.query(\"drop table if exists test cascade\")\n c.query(\"create table test (i int, v varchar(16))\")\n c.close()\n cls.cls_set_up = True\n\n @classmethod\n def tearDownClass(cls):\n c = connect()\n c.query(\"drop table test cascade\")\n c.close()\n\n def setUp(self):\n self.assertTrue(self.cls_set_up)\n self.c = connect()\n self.c.query(\"set client_encoding=utf8\")\n\n def tearDown(self):\n self.c.query(\"truncate table test\")\n self.c.close()\n\n def testPutline(self):\n putline = self.c.putline\n query = self.c.query\n data = list(enumerate(\"apple pear plum cherry banana\".split()))\n query(\"copy test from stdin\")\n try:\n for i, v in data:\n putline(\"%d\\t%s\\n\" % (i, v))\n putline(\"\\\\.\\n\")\n finally:\n self.c.endcopy()\n r = query(\"select * from test\").getresult()\n self.assertEqual(r, data)\n\n def testPutlineBytesAndUnicode(self):\n putline = self.c.putline\n query = self.c.query\n try:\n query(\"select 'käse+würstel'\")\n except (pg.DataError, pg.NotSupportedError):\n self.skipTest('database does not support utf8')\n query(\"copy test from stdin\")\n try:\n putline(u\"47\\tkäse\\n\".encode('utf8'))\n putline(\"35\\twürstel\\n\")\n putline(b\"\\\\.\\n\")\n finally:\n self.c.endcopy()\n r = query(\"select * from test\").getresult()\n self.assertEqual(r, [(47, 'käse'), (35, 'würstel')])\n\n def testGetline(self):\n getline = self.c.getline\n query = self.c.query\n data = list(enumerate(\"apple banana pear plum strawberry\".split()))\n n = len(data)\n self.c.inserttable('test', data)\n query(\"copy test to stdout\")\n try:\n for i in range(n + 2):\n v = getline()\n if i < n:\n # noinspection PyStringFormat\n self.assertEqual(v, '%d\\t%s' % data[i])\n elif i == n:\n self.assertEqual(v, '\\\\.')\n else:\n self.assertIsNone(v)\n finally:\n try:\n self.c.endcopy()\n except IOError:\n pass\n\n def testGetlineBytesAndUnicode(self):\n getline = self.c.getline\n query = self.c.query\n try:\n query(\"select 'käse+würstel'\")\n except (pg.DataError, pg.NotSupportedError):\n self.skipTest('database does not support utf8')\n data = [(54, u'käse'.encode('utf8')), (73, u'würstel')]\n self.c.inserttable('test', data)\n query(\"copy test to stdout\")\n try:\n v = getline()\n self.assertIsInstance(v, str)\n self.assertEqual(v, '54\\tkäse')\n v = getline()\n self.assertIsInstance(v, str)\n self.assertEqual(v, '73\\twürstel')\n self.assertEqual(getline(), '\\\\.')\n self.assertIsNone(getline())\n finally:\n try:\n self.c.endcopy()\n except IOError:\n pass\n\n def testParameterChecks(self):\n self.assertRaises(TypeError, self.c.putline)\n self.assertRaises(TypeError, self.c.getline, 'invalid')\n self.assertRaises(TypeError, self.c.endcopy, 'invalid')\n\n\nclass TestNotificatons(unittest.TestCase):\n \"\"\"Test notification support.\"\"\"\n\n def setUp(self):\n self.c = connect()\n\n def tearDown(self):\n self.doCleanups()\n self.c.close()\n\n def testGetNotify(self):\n getnotify = self.c.getnotify\n query = self.c.query\n self.assertIsNone(getnotify())\n query('listen test_notify')\n try:\n self.assertIsNone(self.c.getnotify())\n query(\"notify test_notify\")\n r = getnotify()\n self.assertIsInstance(r, tuple)\n self.assertEqual(len(r), 3)\n self.assertIsInstance(r[0], str)\n self.assertIsInstance(r[1], int)\n self.assertIsInstance(r[2], str)\n self.assertEqual(r[0], 'test_notify')\n self.assertEqual(r[2], '')\n self.assertIsNone(self.c.getnotify())\n query(\"notify test_notify, 'test_payload'\")\n r = getnotify()\n self.assertTrue(isinstance(r, tuple))\n self.assertEqual(len(r), 3)\n self.assertIsInstance(r[0], str)\n self.assertIsInstance(r[1], int)\n self.assertIsInstance(r[2], str)\n self.assertEqual(r[0], 'test_notify')\n self.assertEqual(r[2], 'test_payload')\n self.assertIsNone(getnotify())\n finally:\n query('unlisten test_notify')\n\n def testGetNoticeReceiver(self):\n self.assertIsNone(self.c.get_notice_receiver())\n\n def testSetNoticeReceiver(self):\n self.assertRaises(TypeError, self.c.set_notice_receiver, 42)\n self.assertRaises(TypeError, self.c.set_notice_receiver, 'invalid')\n self.assertIsNone(self.c.set_notice_receiver(lambda notice: None))\n self.assertIsNone(self.c.set_notice_receiver(None))\n\n def testSetAndGetNoticeReceiver(self):\n r = lambda notice: None # noqa: E731\n self.assertIsNone(self.c.set_notice_receiver(r))\n self.assertIs(self.c.get_notice_receiver(), r)\n self.assertIsNone(self.c.set_notice_receiver(None))\n self.assertIsNone(self.c.get_notice_receiver())\n\n def testNoticeReceiver(self):\n self.addCleanup(self.c.query, 'drop function bilbo_notice();')\n self.c.query('''create function bilbo_notice() returns void AS $$\n begin\n raise warning 'Bilbo was here!';\n end;\n $$ language plpgsql''')\n received = {}\n\n def notice_receiver(notice):\n for attr in dir(notice):\n if attr.startswith('__'):\n continue\n value = getattr(notice, attr)\n if isinstance(value, str):\n value = value.replace('WARNUNG', 'WARNING')\n received[attr] = value\n\n self.c.set_notice_receiver(notice_receiver)\n self.c.query('select bilbo_notice()')\n self.assertEqual(received, dict(\n pgcnx=self.c, message='WARNING: Bilbo was here!\\n',\n severity='WARNING', primary='Bilbo was here!',\n detail=None, hint=None))\n\n\nclass TestConfigFunctions(unittest.TestCase):\n \"\"\"Test the functions for changing default settings.\n\n To test the effect of most of these functions, we need a database\n connection. That's why they are covered in this test module.\n \"\"\"\n\n def setUp(self):\n self.c = connect()\n self.c.query(\"set client_encoding=utf8\")\n self.c.query('set bytea_output=hex')\n self.c.query(\"set lc_monetary='C'\")\n\n def tearDown(self):\n self.c.close()\n\n def testGetDecimalPoint(self):\n point = pg.get_decimal_point()\n # error if a parameter is passed\n self.assertRaises(TypeError, pg.get_decimal_point, point)\n self.assertIsInstance(point, str)\n self.assertEqual(point, '.') # the default setting\n pg.set_decimal_point(',')\n try:\n r = pg.get_decimal_point()\n finally:\n pg.set_decimal_point(point)\n self.assertIsInstance(r, str)\n self.assertEqual(r, ',')\n pg.set_decimal_point(\"'\")\n try:\n r = pg.get_decimal_point()\n finally:\n pg.set_decimal_point(point)\n self.assertIsInstance(r, str)\n self.assertEqual(r, \"'\")\n pg.set_decimal_point('')\n try:\n r = pg.get_decimal_point()\n finally:\n pg.set_decimal_point(point)\n self.assertIsNone(r)\n pg.set_decimal_point(None)\n try:\n r = pg.get_decimal_point()\n finally:\n pg.set_decimal_point(point)\n self.assertIsNone(r)\n\n def testSetDecimalPoint(self):\n d = pg.Decimal\n point = pg.get_decimal_point()\n self.assertRaises(TypeError, pg.set_decimal_point)\n # error if decimal point is not a string\n self.assertRaises(TypeError, pg.set_decimal_point, 0)\n # error if more than one decimal point passed\n self.assertRaises(TypeError, pg.set_decimal_point, '.', ',')\n self.assertRaises(TypeError, pg.set_decimal_point, '.,')\n # error if decimal point is not a punctuation character\n self.assertRaises(TypeError, pg.set_decimal_point, '0')\n query = self.c.query\n # check that money values are interpreted as decimal values\n # only if decimal_point is set, and that the result is correct\n # only if it is set suitable for the current lc_monetary setting\n select_money = \"select '34.25'::money\"\n proper_money = d('34.25')\n bad_money = d('3425')\n en_locales = 'en', 'en_US', 'en_US.utf8', 'en_US.UTF-8'\n en_money = '$34.25', '$ 34.25', '34.25$', '34.25 $', '34.25 Dollar'\n de_locales = 'de', 'de_DE', 'de_DE.utf8', 'de_DE.UTF-8'\n de_money = (\n '34,25€', '34,25 €', '€34,25', '€ 34,25',\n 'EUR34,25', 'EUR 34,25', '34,25 EUR', '34,25 Euro', '34,25 DM')\n # first try with English localization (using the point)\n for lc in en_locales:\n try:\n query(\"set lc_monetary='%s'\" % lc)\n except pg.DataError:\n pass\n else:\n break\n else:\n self.skipTest(\"cannot set English money locale\")\n try:\n query(select_money)\n except (pg.DataError, pg.ProgrammingError):\n # this can happen if the currency signs cannot be\n # converted using the encoding of the test database\n self.skipTest(\"database does not support English money\")\n pg.set_decimal_point(None)\n try:\n r = query(select_money).getresult()[0][0]\n finally:\n pg.set_decimal_point(point)\n self.assertIsInstance(r, str)\n self.assertIn(r, en_money)\n pg.set_decimal_point('')\n try:\n r = query(select_money).getresult()[0][0]\n finally:\n pg.set_decimal_point(point)\n self.assertIsInstance(r, str)\n self.assertIn(r, en_money)\n pg.set_decimal_point('.')\n try:\n r = query(select_money).getresult()[0][0]\n finally:\n pg.set_decimal_point(point)\n self.assertIsInstance(r, d)\n self.assertEqual(r, proper_money)\n pg.set_decimal_point(',')\n try:\n r = query(select_money).getresult()[0][0]\n finally:\n pg.set_decimal_point(point)\n self.assertIsInstance(r, d)\n self.assertEqual(r, bad_money)\n pg.set_decimal_point(\"'\")\n try:\n r = query(select_money).getresult()[0][0]\n finally:\n pg.set_decimal_point(point)\n self.assertIsInstance(r, d)\n self.assertEqual(r, bad_money)\n # then try with German localization (using the comma)\n for lc in de_locales:\n try:\n query(\"set lc_monetary='%s'\" % lc)\n except pg.DataError:\n pass\n else:\n break\n else:\n self.skipTest(\"cannot set German money locale\")\n select_money = select_money.replace('.', ',')\n try:\n query(select_money)\n except (pg.DataError, pg.ProgrammingError):\n self.skipTest(\"database does not support German money\")\n pg.set_decimal_point(None)\n try:\n r = query(select_money).getresult()[0][0]\n finally:\n pg.set_decimal_point(point)\n self.assertIsInstance(r, str)\n self.assertIn(r, de_money)\n pg.set_decimal_point('')\n try:\n r = query(select_money).getresult()[0][0]\n finally:\n pg.set_decimal_point(point)\n self.assertIsInstance(r, str)\n self.assertIn(r, de_money)\n pg.set_decimal_point(',')\n try:\n r = query(select_money).getresult()[0][0]\n finally:\n pg.set_decimal_point(point)\n self.assertIsInstance(r, d)\n self.assertEqual(r, proper_money)\n pg.set_decimal_point('.')\n try:\n r = query(select_money).getresult()[0][0]\n finally:\n pg.set_decimal_point(point)\n self.assertEqual(r, bad_money)\n pg.set_decimal_point(\"'\")\n try:\n r = query(select_money).getresult()[0][0]\n finally:\n pg.set_decimal_point(point)\n self.assertEqual(r, bad_money)\n\n def testGetDecimal(self):\n decimal_class = pg.get_decimal()\n # error if a parameter is passed\n self.assertRaises(TypeError, pg.get_decimal, decimal_class)\n self.assertIs(decimal_class, pg.Decimal) # the default setting\n pg.set_decimal(int)\n try:\n r = pg.get_decimal()\n finally:\n pg.set_decimal(decimal_class)\n self.assertIs(r, int)\n r = pg.get_decimal()\n self.assertIs(r, decimal_class)\n\n def testSetDecimal(self):\n decimal_class = pg.get_decimal()\n # error if no parameter is passed\n self.assertRaises(TypeError, pg.set_decimal)\n query = self.c.query\n try:\n r = query(\"select 3425::numeric\")\n except pg.DatabaseError:\n self.skipTest('database does not support numeric')\n r = None\n r = r.getresult()[0][0]\n self.assertIsInstance(r, decimal_class)\n self.assertEqual(r, decimal_class('3425'))\n r = query(\"select 3425::numeric\")\n pg.set_decimal(int)\n try:\n r = r.getresult()[0][0]\n finally:\n pg.set_decimal(decimal_class)\n self.assertNotIsInstance(r, decimal_class)\n self.assertIsInstance(r, int)\n self.assertEqual(r, int(3425))\n\n def testGetBool(self):\n use_bool = pg.get_bool()\n # error if a parameter is passed\n self.assertRaises(TypeError, pg.get_bool, use_bool)\n self.assertIsInstance(use_bool, bool)\n self.assertIs(use_bool, True) # the default setting\n pg.set_bool(False)\n try:\n r = pg.get_bool()\n finally:\n pg.set_bool(use_bool)\n self.assertIsInstance(r, bool)\n self.assertIs(r, False)\n pg.set_bool(True)\n try:\n r = pg.get_bool()\n finally:\n pg.set_bool(use_bool)\n self.assertIsInstance(r, bool)\n self.assertIs(r, True)\n pg.set_bool(0)\n try:\n r = pg.get_bool()\n finally:\n pg.set_bool(use_bool)\n self.assertIsInstance(r, bool)\n self.assertIs(r, False)\n pg.set_bool(1)\n try:\n r = pg.get_bool()\n finally:\n pg.set_bool(use_bool)\n self.assertIsInstance(r, bool)\n self.assertIs(r, True)\n\n def testSetBool(self):\n use_bool = pg.get_bool()\n # error if no parameter is passed\n self.assertRaises(TypeError, pg.set_bool)\n query = self.c.query\n try:\n r = query(\"select true::bool\")\n except pg.ProgrammingError:\n self.skipTest('database does not support bool')\n r = None\n r = r.getresult()[0][0]\n self.assertIsInstance(r, bool)\n self.assertEqual(r, True)\n pg.set_bool(False)\n try:\n r = query(\"select true::bool\").getresult()[0][0]\n finally:\n pg.set_bool(use_bool)\n self.assertIsInstance(r, str)\n self.assertIs(r, 't')\n pg.set_bool(True)\n try:\n r = query(\"select true::bool\").getresult()[0][0]\n finally:\n pg.set_bool(use_bool)\n self.assertIsInstance(r, bool)\n self.assertIs(r, True)\n\n def testGetByteEscaped(self):\n bytea_escaped = pg.get_bytea_escaped()\n # error if a parameter is passed\n self.assertRaises(TypeError, pg.get_bytea_escaped, bytea_escaped)\n self.assertIsInstance(bytea_escaped, bool)\n self.assertIs(bytea_escaped, False) # the default setting\n pg.set_bytea_escaped(True)\n try:\n r = pg.get_bytea_escaped()\n finally:\n pg.set_bytea_escaped(bytea_escaped)\n self.assertIsInstance(r, bool)\n self.assertIs(r, True)\n pg.set_bytea_escaped(False)\n try:\n r = pg.get_bytea_escaped()\n finally:\n pg.set_bytea_escaped(bytea_escaped)\n self.assertIsInstance(r, bool)\n self.assertIs(r, False)\n pg.set_bytea_escaped(1)\n try:\n r = pg.get_bytea_escaped()\n finally:\n pg.set_bytea_escaped(bytea_escaped)\n self.assertIsInstance(r, bool)\n self.assertIs(r, True)\n pg.set_bytea_escaped(0)\n try:\n r = pg.get_bytea_escaped()\n finally:\n pg.set_bytea_escaped(bytea_escaped)\n self.assertIsInstance(r, bool)\n self.assertIs(r, False)\n\n def testSetByteaEscaped(self):\n bytea_escaped = pg.get_bytea_escaped()\n # error if no parameter is passed\n self.assertRaises(TypeError, pg.set_bytea_escaped)\n query = self.c.query\n try:\n r = query(\"select 'data'::bytea\")\n except pg.ProgrammingError:\n self.skipTest('database does not support bytea')\n r = None\n r = r.getresult()[0][0]\n self.assertIsInstance(r, bytes)\n self.assertEqual(r, b'data')\n pg.set_bytea_escaped(True)\n try:\n r = query(\"select 'data'::bytea\").getresult()[0][0]\n finally:\n pg.set_bytea_escaped(bytea_escaped)\n self.assertIsInstance(r, str)\n self.assertEqual(r, '\\\\x64617461')\n pg.set_bytea_escaped(False)\n try:\n r = query(\"select 'data'::bytea\").getresult()[0][0]\n finally:\n pg.set_bytea_escaped(bytea_escaped)\n self.assertIsInstance(r, bytes)\n self.assertEqual(r, b'data')\n\n def testSetRowFactorySize(self):\n try:\n from functools import lru_cache\n except ImportError: # Python < 3.2\n lru_cache = None\n queries = ['select 1 as a, 2 as b, 3 as c', 'select 123 as abc']\n query = self.c.query\n for maxsize in (None, 0, 1, 2, 3, 10, 1024):\n pg.set_row_factory_size(maxsize)\n for i in range(3):\n for q in queries:\n r = query(q).namedresult()[0]\n if q.endswith('abc'):\n self.assertEqual(r, (123,))\n self.assertEqual(r._fields, ('abc',))\n else:\n self.assertEqual(r, (1, 2, 3))\n self.assertEqual(r._fields, ('a', 'b', 'c'))\n if lru_cache:\n info = pg._row_factory.cache_info()\n self.assertEqual(info.maxsize, maxsize)\n self.assertEqual(info.hits + info.misses, 6)\n self.assertEqual(\n info.hits, 0 if maxsize is not None and maxsize < 2 else 4)\n\n\nclass TestStandaloneEscapeFunctions(unittest.TestCase):\n \"\"\"Test pg escape functions.\n\n The libpq interface memorizes some parameters of the last opened\n connection that influence the result of these functions. Therefore\n we need to open a connection with fixed parameters prior to testing\n in order to ensure that the tests always run under the same conditions.\n That's why these tests are included in this test module.\n \"\"\"\n\n cls_set_up = False\n\n @classmethod\n def setUpClass(cls):\n db = connect()\n query = db.query\n query('set client_encoding=sql_ascii')\n query('set standard_conforming_strings=off')\n try:\n query('set bytea_output=escape')\n except pg.ProgrammingError:\n if db.server_version >= 90000:\n raise # ignore for older server versions\n db.close()\n cls.cls_set_up = True\n\n def testEscapeString(self):\n self.assertTrue(self.cls_set_up)\n f = pg.escape_string\n r = f(b'plain')\n self.assertIsInstance(r, bytes)\n self.assertEqual(r, b'plain')\n r = f(u'plain')\n self.assertIsInstance(r, unicode)\n self.assertEqual(r, u'plain')\n r = f(u\"das is' käse\".encode('utf-8'))\n self.assertIsInstance(r, bytes)\n self.assertEqual(r, u\"das is'' käse\".encode('utf-8'))\n r = f(u\"that's cheesy\")\n self.assertIsInstance(r, unicode)\n self.assertEqual(r, u\"that''s cheesy\")\n r = f(r\"It's bad to have a \\ inside.\")\n self.assertEqual(r, r\"It''s bad to have a \\\\ inside.\")\n\n def testEscapeBytea(self):\n self.assertTrue(self.cls_set_up)\n f = pg.escape_bytea\n r = f(b'plain')\n self.assertIsInstance(r, bytes)\n self.assertEqual(r, b'plain')\n r = f(u'plain')\n self.assertIsInstance(r, unicode)\n self.assertEqual(r, u'plain')\n r = f(u\"das is' käse\".encode('utf-8'))\n self.assertIsInstance(r, bytes)\n self.assertEqual(r, b\"das is'' k\\\\\\\\303\\\\\\\\244se\")\n r = f(u\"that's cheesy\")\n self.assertIsInstance(r, unicode)\n self.assertEqual(r, u\"that''s cheesy\")\n r = f(b'O\\x00ps\\xff!')\n self.assertEqual(r, b'O\\\\\\\\000ps\\\\\\\\377!')\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_classic_connection.py","file_name":"test_classic_connection.py","file_ext":"py","file_size_in_byte":99033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"156197948","text":"# -*- coding: UTF-8 -*-\ndef saca_porcentajes(values):\n \"\"\"sumamos los valores y devolvemos una lista con su porcentaje\"\"\"\n total = sum(values)\n valores_cero = [] #lista para anotar los indices en los que da cero el porcentaje\n for i in range(len(values)):\n porcentaje = (float(values[i])/total)*100\n values[i] = \"%.2f\" % porcentaje + '%' \n return values\n\ndef saca_porcentajes(dato, total, formato=True):\n '''Si formato es true devuelve float caso contrario es cadena'''\n if dato != None:\n try:\n porcentaje = (dato/float(total)) * 100 if total != None or total != 0 else 0\n except:\n return 0\n if formato:\n return porcentaje\n else:\n return '%.2f' % porcentaje\n else: \n return 0\n\ndef calcular_positivos(suma, numero, porcentaje=True):\n '''Retorna el porcentaje de positivos'''\n try:\n positivos = (numero * 2) - suma\n if porcentaje:\n return '%.2f' % saca_porcentajes(positivos, numero)\n else:\n return positivos\n except:\n return 0\n\ndef calcular_negativos(suma, numero, porcentaje = True):\n positivos = calcular_positivos(suma, numero, porcentaje)\n if porcentaje:\n return 100 - float(positivos)\n else:\n return numero - positivos\n\n#saca un porcentaje de aumentacion or baja entre dos valores\ndef saca_aumento_regresso (primer_dato, segundo_dato, formato = True, tipo = \"percent\"):\n\n if tipo == \"percent\":\n try:\n valor = (float(segundo_dato) - float(primer_dato))/float(primer_dato)\n\n except:\n return 0\n if formato:\n return valor\n else:\n return '%.6f' % valor #eso es a 6 para que los calculos en la tabla utilisen mas decimales\n elif tipo == \"absolute\":\n try:\n valor = float(segundo_dato) - float(primer_dato)\n except:\n return \"n/d\"\n if formato:\n return '%.1f' % valor\n else:\n return valor\n","sub_path":"suco/encuesta/utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"38850831","text":"# Copyright 2015-2016 Yelp Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport asyncio\n\nfrom mock import Mock\nfrom mock import patch\nfrom pytest import raises\n\nfrom paasta_tools.cli.cmds import mark_for_deployment\nfrom paasta_tools.cli.cmds.mark_for_deployment import NoSuchCluster\nfrom paasta_tools.cli.cmds.wait_for_deployment import get_latest_marked_sha\nfrom paasta_tools.cli.cmds.wait_for_deployment import paasta_wait_for_deployment\nfrom paasta_tools.cli.cmds.wait_for_deployment import validate_git_sha_is_latest\nfrom paasta_tools.cli.utils import NoSuchService\nfrom paasta_tools.marathon_tools import MarathonServiceConfig\nfrom paasta_tools.paastaapi import ApiException\nfrom paasta_tools.remote_git import LSRemoteException\nfrom paasta_tools.utils import TimeoutError\n\n\nclass fake_args:\n deploy_group = \"test_deploy_group\"\n service = \"test_service\"\n git_url = \"\"\n commit = \"d670460b4b4aece5915caf5c68d12f560a9fe3e4\"\n soa_dir = \"fake_soa_dir\"\n timeout = 0\n verbose = False\n polling_interval = 5\n diagnosis_interval = 15\n time_before_first_diagnosis = 15\n\n\n@patch(\"paasta_tools.cli.cmds.mark_for_deployment._log\", autospec=True)\n@patch(\n \"paasta_tools.cli.cmds.mark_for_deployment.client.get_paasta_oapi_client\",\n autospec=True,\n)\ndef test_check_if_instance_is_done(mock_get_paasta_oapi_client, mock__log):\n mock_paasta_api_client = Mock()\n mock_paasta_api_client.api_error = ApiException\n mock_get_paasta_oapi_client.return_value = mock_paasta_api_client\n\n def check_instance(instance_config):\n return mark_for_deployment.check_if_instance_is_done(\n service=\"service1\",\n instance=instance_config.get_instance(),\n cluster=\"cluster\",\n git_sha=\"somesha\",\n instance_config=instance_config,\n )\n\n # valid completed instance\n mock_paasta_api_client.service.status_instance.return_value = Mock(\n git_sha=\"somesha\",\n kubernetes=None,\n marathon=Mock(\n app_count=1,\n active_shas=None,\n deploy_status=\"Running\",\n expected_instance_count=2,\n running_instance_count=2,\n ),\n )\n assert check_instance(mock_marathon_instance_config(\"instance1\"))\n\n # too many marathon apps\n mock_paasta_api_client.service.status_instance.return_value = Mock(\n git_sha=\"somesha\",\n kubernetes=None,\n marathon=Mock(\n app_count=2,\n active_shas=None,\n deploy_status=\"Running\",\n expected_instance_count=2,\n running_instance_count=2,\n ),\n )\n assert not check_instance(mock_marathon_instance_config(\"instance2\"))\n\n # too many running instances\n mock_paasta_api_client.service.status_instance.return_value = Mock(\n git_sha=\"somesha\",\n kubernetes=None,\n marathon=Mock(\n app_count=1,\n active_shas=None,\n deploy_status=\"Running\",\n expected_instance_count=2,\n running_instance_count=4,\n ),\n )\n assert check_instance(mock_marathon_instance_config(\"instance3\"))\n\n # still Deploying\n mock_paasta_api_client.service.status_instance.return_value = Mock(\n git_sha=\"somesha\",\n kubernetes=None,\n marathon=Mock(\n app_count=1,\n active_shas=None,\n deploy_status=\"Deploying\",\n expected_instance_count=2,\n running_instance_count=2,\n ),\n )\n assert check_instance(mock_marathon_instance_config(\"instance4\"))\n\n # still Deploying\n mock_paasta_api_client.service.status_instance.return_value = Mock(\n git_sha=\"somesha\",\n kubernetes=None,\n marathon=Mock(\n app_count=1,\n active_shas=None,\n deploy_status=\"Waiting\",\n expected_instance_count=2,\n running_instance_count=2,\n ),\n )\n assert check_instance(mock_marathon_instance_config(\"instance4.1\"))\n\n # not a marathon instance\n mock_paasta_api_client.service.status_instance.return_value = Mock(\n git_sha=\"somesha\", kubernetes=None, marathon=None,\n )\n assert check_instance(mock_marathon_instance_config(\"instance5\"))\n\n # wrong sha\n mock_paasta_api_client.service.status_instance.return_value = Mock(\n git_sha=\"anothersha\",\n kubernetes=None,\n marathon=Mock(\n app_count=1,\n active_shas=None,\n deploy_status=\"Running\",\n expected_instance_count=2,\n running_instance_count=2,\n ),\n )\n assert not check_instance(mock_marathon_instance_config(\"instance6\"))\n\n # paasta stop'd\n mock_paasta_api_client.service.status_instance.return_value = Mock(\n git_sha=\"somesha\",\n kubernetes=None,\n marathon=Mock(\n app_count=1,\n active_shas=None,\n deploy_status=\"Stopped\",\n expected_instance_count=0,\n running_instance_count=0,\n desired_state=\"stop\",\n ),\n )\n assert check_instance(mock_marathon_instance_config(\"instance7\"))\n\n # paasta has autoscaled to 0\n mock_paasta_api_client.service.status_instance.return_value = Mock(\n git_sha=\"somesha\",\n kubernetes=None,\n marathon=Mock(\n app_count=1,\n active_shas=None,\n deploy_status=\"Stopped\",\n expected_instance_count=0,\n running_instance_count=0,\n ),\n )\n assert check_instance(mock_marathon_instance_config(\"instance8\"))\n\n # not found -> maybe this is the first time we're deploying it, and it's not up yet.\n mock_paasta_api_client.service.status_instance.side_effect = ApiException(\n status=404, reason=\"\"\n )\n assert not check_instance(mock_marathon_instance_config(\"notaninstance\"))\n\n # crash -> consider it not done yet, hope it stops crashing later\n mock_paasta_api_client.service.status_instance.side_effect = ApiException(\n status=500, reason=\"\"\n )\n assert not check_instance(mock_marathon_instance_config(\"api_error\"))\n\n\n@patch(\n \"paasta_tools.cli.cmds.mark_for_deployment.load_system_paasta_config\", autospec=True\n)\n@patch(\n \"paasta_tools.cli.cmds.mark_for_deployment.get_instance_configs_for_service_in_deploy_group_all_clusters\",\n autospec=True,\n)\n@patch(\"paasta_tools.cli.cmds.mark_for_deployment._log\", autospec=True)\n@patch(\n \"paasta_tools.cli.cmds.mark_for_deployment.check_if_instance_is_done\", autospec=True\n)\ndef test_wait_for_deployment(\n mock_check_if_instance_is_done,\n mock__log,\n mock_get_instance_configs_for_service_in_deploy_group_all_clusters,\n mock_load_system_paasta_config,\n):\n mock_get_instance_configs_for_service_in_deploy_group_all_clusters.return_value = {\n \"cluster1\": [\n mock_marathon_instance_config(\"instance1\"),\n mock_marathon_instance_config(\"instance2\"),\n mock_marathon_instance_config(\"instance3\"),\n ],\n }\n\n def check_if_instance_is_done_side_effect(\n service, instance, cluster, git_sha, instance_config, api=None\n ):\n return instance in [\"instance1\", \"instance2\"]\n\n mock_check_if_instance_is_done.side_effect = check_if_instance_is_done_side_effect\n\n mock_load_system_paasta_config.return_value.get_api_endpoints.return_value = {\n \"cluster1\": \"some_url_1\",\n \"cluster2\": \"some_url_2\",\n }\n\n mock_load_system_paasta_config.return_value.get_mark_for_deployment_max_polling_threads.return_value = (\n 4\n )\n\n with raises(TimeoutError):\n with patch(\n \"asyncio.as_completed\", side_effect=[asyncio.TimeoutError], autospec=True\n ):\n asyncio.run(\n mark_for_deployment.wait_for_deployment(\n \"service\", \"fake_deploy_group\", \"somesha\", \"/nail/soa\", 1\n )\n )\n\n mock_get_instance_configs_for_service_in_deploy_group_all_clusters.return_value = {\n \"cluster1\": [\n mock_marathon_instance_config(\"instance1\"),\n mock_marathon_instance_config(\"instance2\"),\n ],\n \"cluster2\": [\n mock_marathon_instance_config(\"instance1\"),\n mock_marathon_instance_config(\"instance2\"),\n ],\n }\n with patch(\"sys.stdout\", autospec=True, flush=Mock()):\n assert (\n asyncio.run(\n mark_for_deployment.wait_for_deployment(\n \"service\", \"fake_deploy_group\", \"somesha\", \"/nail/soa\", 5\n )\n )\n == 0\n )\n\n mock_get_instance_configs_for_service_in_deploy_group_all_clusters.return_value = {\n \"cluster1\": [\n mock_marathon_instance_config(\"instance1\"),\n mock_marathon_instance_config(\"instance2\"),\n ],\n \"cluster2\": [\n mock_marathon_instance_config(\"instance1\"),\n mock_marathon_instance_config(\"instance3\"),\n ],\n }\n with raises(TimeoutError):\n asyncio.run(\n mark_for_deployment.wait_for_deployment(\n \"service\", \"fake_deploy_group\", \"somesha\", \"/nail/soa\", 0\n )\n )\n\n\n@patch(\n \"paasta_tools.cli.cmds.mark_for_deployment.load_system_paasta_config\", autospec=True\n)\n@patch(\n \"paasta_tools.cli.cmds.mark_for_deployment.PaastaServiceConfigLoader\", autospec=True\n)\n@patch(\"paasta_tools.cli.cmds.mark_for_deployment._log\", autospec=True)\ndef test_wait_for_deployment_raise_no_such_cluster(\n mock__log, mock_paasta_service_config_loader, mock_load_system_paasta_config,\n):\n mock_load_system_paasta_config.return_value.get_api_endpoints.return_value = {\n \"cluster1\": \"some_url_1\",\n \"cluster2\": \"some_url_2\",\n }\n\n mock_paasta_service_config_loader.return_value.clusters = [\"cluster3\"]\n with raises(NoSuchCluster):\n asyncio.run(\n mark_for_deployment.wait_for_deployment(\n \"service\", \"deploy_group_3\", \"somesha\", \"/nail/soa\", 0\n )\n )\n\n\n@patch(\"paasta_tools.cli.cmds.wait_for_deployment.validate_service_name\", autospec=True)\n@patch(\"paasta_tools.cli.cmds.mark_for_deployment.wait_for_deployment\", autospec=True)\ndef test_paasta_wait_for_deployment_return_1_when_no_such_service(\n mock_wait_for_deployment, mock_validate_service_name\n):\n mock_validate_service_name.side_effect = NoSuchService(\"Some text\")\n assert paasta_wait_for_deployment(fake_args) == 1\n assert mock_wait_for_deployment.call_args_list == []\n assert mock_validate_service_name.called\n\n\n@patch(\"paasta_tools.cli.cmds.wait_for_deployment.validate_service_name\", autospec=True)\n@patch(\"paasta_tools.cli.cmds.wait_for_deployment.list_deploy_groups\", autospec=True)\n@patch(\"paasta_tools.cli.cmds.mark_for_deployment.wait_for_deployment\", autospec=True)\ndef test_paasta_wait_for_deployment_return_1_when_deploy_group_not_found(\n mock_wait_for_deployment, mock_list_deploy_groups, mock_validate_service_name\n):\n mock_list_deploy_groups.return_value = {\"another_test_deploy_group\"}\n assert paasta_wait_for_deployment(fake_args) == 1\n assert mock_wait_for_deployment.call_args_list == []\n assert mock_validate_service_name.called\n\n\n@patch(\n \"paasta_tools.cli.cmds.mark_for_deployment.load_system_paasta_config\", autospec=True\n)\n@patch(\n \"paasta_tools.cli.cmds.mark_for_deployment.PaastaServiceConfigLoader\", autospec=True\n)\n@patch(\"paasta_tools.cli.cmds.wait_for_deployment.validate_service_name\", autospec=True)\n@patch(\"paasta_tools.cli.cmds.wait_for_deployment.validate_git_sha\", autospec=True)\n@patch(\n \"paasta_tools.cli.cmds.wait_for_deployment.validate_git_sha_is_latest\",\n autospec=True,\n)\n@patch(\"paasta_tools.cli.cmds.wait_for_deployment.list_deploy_groups\", autospec=True)\n@patch(\"paasta_tools.cli.cmds.mark_for_deployment._log\", autospec=True)\n@patch(\"paasta_tools.cli.cmds.wait_for_deployment._log\", autospec=True)\ndef test_paasta_wait_for_deployment_return_0_when_no_instances_in_deploy_group(\n mock__log1,\n mock__log2,\n mock_list_deploy_groups,\n mock_validate_git_sha_is_latest,\n mock_validate_git_sha,\n mock_validate_service_name,\n mock_paasta_service_config_loader,\n mock_load_system_paasta_config,\n system_paasta_config,\n):\n mock__log1.return_value = None\n mock__log2.return_value = None\n mock_load_system_paasta_config.return_value = system_paasta_config\n mock_paasta_service_config_loader.return_value.instance_configs.return_value = [\n mock_marathon_instance_config(\"some_instance\")\n ]\n mock_list_deploy_groups.return_value = {\"test_deploy_group\"}\n assert paasta_wait_for_deployment(fake_args) == 0\n assert mock_validate_service_name.called\n\n\n@patch(\"paasta_tools.cli.cmds.wait_for_deployment.list_remote_refs\", autospec=True)\ndef test_get_latest_marked_sha_good(mock_list_remote_refs):\n mock_list_remote_refs.return_value = {\n \"refs/tags/paasta-fake_group1-20161129T203750-deploy\": \"968b948b3fca457326718dc7b2e278f89ccc5c87\",\n \"refs/tags/paasta-fake_group1-20161117T122449-deploy\": \"eac9a6d7909d09ffec00538bbc43b64502aa2dc0\",\n \"refs/tags/paasta-fake_group2-20161125T095651-deploy\": \"a4911648beb2e53886658ba7ea7eb93d582d754c\",\n \"refs/tags/paasta-fake_group1.everywhere-20161109T223959-deploy\": \"71e97ec397a3f0e7c4ee46e8ea1e2982cbcb0b79\",\n }\n assert (\n get_latest_marked_sha(\"\", \"fake_group1\")\n == \"968b948b3fca457326718dc7b2e278f89ccc5c87\"\n )\n\n\n@patch(\"paasta_tools.cli.cmds.wait_for_deployment.list_remote_refs\", autospec=True)\ndef test_get_latest_marked_sha_bad(mock_list_remote_refs):\n mock_list_remote_refs.return_value = {\n \"refs/tags/paasta-fake_group2-20161129T203750-deploy\": \"968b948b3fca457326718dc7b2e278f89ccc5c87\"\n }\n assert get_latest_marked_sha(\"\", \"fake_group1\") == \"\"\n\n\n@patch(\"paasta_tools.cli.cmds.wait_for_deployment.list_remote_refs\", autospec=True)\ndef test_validate_deploy_group_when_is_git_not_available(mock_list_remote_refs, capsys):\n test_error_message = \"Git error\"\n mock_list_remote_refs.side_effect = LSRemoteException(test_error_message)\n assert (\n validate_git_sha_is_latest(\n \"fake sha\", \"fake_git_url\", \"fake_group\", \"fake_service\"\n )\n is None\n )\n\n\ndef mock_marathon_instance_config(fake_name) -> \"MarathonServiceConfig\":\n return MarathonServiceConfig(\n service=\"fake_service\",\n cluster=\"fake_cluster\",\n instance=fake_name,\n config_dict={\"deploy_group\": \"fake_deploy_group\"},\n branch_dict=None,\n soa_dir=\"fake_soa_dir\",\n )\n\n\ndef test_compose_timeout_message():\n remaining_instances = {\n \"cluster1\": [\"instance1\", \"instance2\"],\n \"cluster2\": [\"instance3\"],\n \"cluster3\": [],\n }\n\n message = mark_for_deployment.compose_timeout_message(\n remaining_instances, 1, \"fake_group\", \"someservice\", \"some_git_sha\"\n )\n assert (\n \" paasta status -c cluster1 -s someservice -i instance1,instance2\" in message\n )\n assert \" paasta status -c cluster2 -s someservice -i instance3\" in message\n assert (\n \" paasta logs -c cluster1 -s someservice -i instance1,instance2 -C deploy -l 1000\"\n in message\n )\n assert (\n \" paasta logs -c cluster2 -s someservice -i instance3 -C deploy -l 1000\"\n in message\n )\n","sub_path":"tests/cli/test_cmds_wait_for_deployment.py","file_name":"test_cmds_wait_for_deployment.py","file_ext":"py","file_size_in_byte":15811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"538829976","text":"#Author: Lzj\n#mail: harry_lee2683@outlook.com\ndef ma(a):\n y=a**2+a\n return y\nx=ma(2)\nprint(ma(1))\ndef l(a):\n j=0\n for i in a:\n j+=1\n return j\nprint(l(\"abc11111111111\"))\ndef re(a,b,c='good'):\n print(\"eat\"+a)\n print(\"also eat\"+b)\n print(\"score\",c)\nre(b='话梅花生',a='牛肉拉面',c='not bad')\n\nexit(0)\n","sub_path":"Linux-Oldboy-practical/L4-Python/realproject/day6/f10.py","file_name":"f10.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"283093028","text":"# Dan McGinn\n# Keyboard inputs adapted from https://github.com/recantha/EduKit3-RC-Keyboard/blob/master/rc_keyboard.py\n# EV3 Project: http://inspiredtoeducate.net/inspiredtoeducate/programming-lego-mindstorms-ev3-with-python/\n# Run with python3\n\nimport time,termios,tty,sys\nimport ev3dev.ev3 as ev3\nfrom time import sleep\n\n# Define motor outputs\nmotor_left = ev3.LargeMotor('outB')\nmotor_right = ev3.LargeMotor('outC')\nspeed = 80 # Set Speed\n\n# Initiate keybaord inputs\ndef getch():\n fd = sys.stdin.fileno()\n old_settings = termios.tcgetattr(fd)\n try:\n tty.setraw(sys.stdin.fileno())\n ch = sys.stdin.read(1)\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n return ch\n\ndef forward():\n motor_left.run_direct(duty_cycle_sp=speed)\n motor_right.run_direct(duty_cycle_sp=speed)\ndef back():\n motor_left.run_direct(duty_cycle_sp=-speed)\n motor_right.run_direct(duty_cycle_sp=-speed)\ndef left():\n motor_left.run_direct( duty_cycle_sp=-speed)\n motor_right.run_direct( duty_cycle_sp=speed)\ndef right():\n motor_left.run_direct( duty_cycle_sp=speed)\n motor_right.run_direct( duty_cycle_sp=-speed)\ndef stop():\n motor_left.run_direct( duty_cycle_sp=0)\n motor_right.run_direct( duty_cycle_sp=-0)\ndef red():\n ev3.Leds.set_color(ev3.Leds.LEFT, ev3.Leds.RED)\n sleep(0.01)\n ev3.Leds.set_color(ev3.Leds.RIGHT, ev3.Leds.RED)\n sleep(0.01)\ndef orange():\n ev3.Leds.set_color(ev3.Leds.LEFT, ev3.Leds.ORANGE)\n sleep(0.01)\n ev3.Leds.set_color(ev3.Leds.RIGHT, ev3.Leds.ORANGE)\n sleep(0.01)\ndef yellow():\n ev3.Leds.set_color(ev3.Leds.LEFT, ev3.Leds.YELLOW)\n sleep(0.01)\n ev3.Leds.set_color(ev3.Leds.RIGHT, ev3.Leds.YELLOW)\n sleep(0.01)\ndef green():\n ev3.Leds.set_color(ev3.Leds.LEFT, ev3.Leds.GREEN)\n sleep(0.01)\n ev3.Leds.set_color(ev3.Leds.RIGHT, ev3.Leds.GREEN)\n sleep(0.01)\n\nprint(\"-----------Connection Initiated-----------\")\nwhile True:\n char = getch()\n if char == 'w':\n forward()\n print(\"Forward\")\n if char == 's':\n back()\n print(\"Backward\")\n if char == 'a':\n left()\n print(\"Left\")\n if char == 'd':\n right()\n print(\"Right\")\n if char == ' ':\n stop()\n ev3.Leds.all_off()\n if char == 'r':\n red()\n print(\"Red\")\n if char == 'o':\n orange()\n print(\"Orange\")\n if char == 'y':\n yellow()\n print(\"Yellow\")\n if char == 'g':\n green()\n print(\"Green\")\n if char == 'q':\n print(\"-------------------EXIT-------------------\")\n sleep(0.01)\n stop()\n ev3.Leds.all_off()\n exit()","sub_path":"EV3Python/Demo.py","file_name":"Demo.py","file_ext":"py","file_size_in_byte":2591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"460214492","text":"#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n\nimport socket\nimport sys\n\n# Crea un socket UDP\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\nserver_address = ('localhost', 10000)\nmessage = 'Questo è il messaggio. Verrà ripetuto'\n\ntry:\n\n # Invio dati\n print >>sys.stderr, 'in invio \"%s\"' % message\n sent = sock.sendto(message, server_address)\n\n # Recezione risposta\n print >>sys.stderr, 'in attesa di ricezione'\n data, server = sock.recvfrom(4096)\n print >>sys.stderr, 'ricevuto \"%s\"' % data\n\nfinally:\n print >>sys.stderr, 'chiusura del socket'\n sock.close()","sub_path":"dumpscripts/socket_echo_client_dgram.py","file_name":"socket_echo_client_dgram.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"56447884","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import ValidationError, ObjectDoesNotExist\nfrom django.utils.encoding import python_2_unicode_compatible\nfrom django.core.validators import MinValueValidator\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.utils import timezone\n\nfrom filer.fields.file import FilerFileField\nfrom filer.models import Folder\n\nfrom danceschool.core.models import EventStaffMember, Event, InvoiceItem, Location\nfrom danceschool.core.constants import getConstant\n\n\n@python_2_unicode_compatible\nclass ExpenseCategory(models.Model):\n '''\n These are the different available categories of payment\n '''\n\n name = models.CharField(_('Name'),max_length=50,unique=True,help_text=_('Different types of tasks and payments should have different category names'))\n defaultRate = models.FloatField(_('Default rate'),help_text=_('This is the default hourly payment rate for this type of task. For staff expenses and venue rentals, this will be overridden by the rate specified as default for the venue or staff type.'),null=True,blank=True,validators=[MinValueValidator(0)])\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = _('Expense category')\n verbose_name_plural = _('Expense categories')\n\n\n@python_2_unicode_compatible\nclass ExpenseItem(models.Model):\n '''\n Expenses may be associated with EventStaff or with Events, or they may be associated with nothing\n '''\n\n submissionUser = models.ForeignKey(User,verbose_name=_('Submission user'),related_name='expensessubmittedby',null=True, blank=True)\n submissionDate = models.DateTimeField(_('Submission date'),auto_now_add=True)\n\n category = models.ForeignKey(ExpenseCategory,verbose_name=_('Category'))\n\n description = models.CharField(_('Description'),max_length=200,null=True,blank=True)\n\n hours = models.FloatField(_('Hours'),help_text=_('Please indicate the number of hours to be paid for.'),null=True,blank=True,validators=[MinValueValidator(0)])\n wageRate = models.FloatField(_('Wage rate'),help_text=_('This should be filled automatically, but can be changed as needed.'),null=True,blank=True,validators=[MinValueValidator(0)])\n total = models.FloatField(_('Total amount'),null=True,blank=True,validators=[MinValueValidator(0)])\n adjustments = models.FloatField(_('Adjustments/refunds'),help_text=_('Record any ex-post adjustments to the amount (e.g. refunds) in this field. A positive amount increases the netExpense, a negative amount reduces the netExpense.'),default=0)\n fees = models.FloatField(_('Fees'),help_text=_('The sum of any transaction fees (e.g. Paypal fees) that were paid by us, and should therefore be added to net expense.'),default=0)\n\n paymentMethod = models.CharField(_('Payment method'),max_length=50,null=True,blank=True)\n\n comments = models.TextField(_('Comments/Notes'),null=True,blank=True)\n # attachment = models.FileField('Attach File (optional)',null=True,blank=True,max_length=200,storage=PrivateMediaStorage(),upload_to='board/expenses/%Y/%m/')\n attachment = FilerFileField(verbose_name=_('Attach File (optional)'),null=True,blank=True,related_name='expense_attachment')\n\n # These are foreign key relations for the things that expenses can be be related to.\n # An expense item should only be populated by one of eventstaffmember\n # or eventvenue. However, an event will automatically be populated by the associated\n # event if it is a determined event expense. This allows for simpler lookups,\n # like, \"get all expenses associated with this event.\"\n eventstaffmember = models.OneToOneField(EventStaffMember,null=True,blank=True,verbose_name=_('Staff member'))\n\n eventvenue = models.ForeignKey(Event,null=True,blank=True,related_name='venueexpense',verbose_name=_('Event venue'))\n event = models.ForeignKey(Event,null=True,blank=True,verbose_name=_('Event'),help_text=_('If this item is associated with an Event, enter it here.'))\n\n payToUser = models.ForeignKey(User,null=True,blank=True,related_name='payToUser',verbose_name=_('Pay to user'))\n payToLocation = models.ForeignKey(Location,null=True,blank=True,verbose_name=_('Pay to location'))\n payToName = models.CharField(_('Pay to (enter name)'),max_length=50, null=True,blank=True)\n\n reimbursement = models.BooleanField(_('Reimbursement'),help_text=_('Check to indicate that this is a reimbursement expense (i.e. not compensation).'),default=False)\n\n approved = models.BooleanField(_('Approved'),help_text=_('Check to indicate that expense is approved for payment.'),default=False)\n paid = models.BooleanField(_('Paid'),help_text=_('Check to indicate that payment has been made.'),default=False)\n\n approvalDate = models.DateTimeField(_('Approval date'),null=True,blank=True)\n paymentDate = models.DateTimeField(_('Payment date'),null=True,blank=True)\n\n # This field is used to aggregate expenses over time (e.g. by month).\n # The value of this field is auto-updated using pre-save methods. If\n # there is a class series or an event associated with this expense,\n # then the value is taken from that. Otherwise, the submission date\n # is used.\n accrualDate = models.DateTimeField(_('Accrual date'))\n\n @property\n def netExpense(self):\n return self.total + self.adjustments + self.fees\n netExpense.fget.short_description = _('Net expense')\n\n @property\n def payTo(self):\n '''\n Returns a string indicating who the expense is to be paid to.\n For more convenient references of miscellaneous expenses.\n '''\n if self.payToUser:\n return ' '.join([self.payToUser.first_name,self.payToUser.last_name])\n elif self.payToLocation:\n return self.payToLocation.name\n else:\n return self.payToName or ''\n payTo.fget.short_description = _('Pay to')\n\n def save(self, *args, **kwargs):\n '''\n This custom save method ensures that an expense is not attributed to multiple categories.\n It also ensures that the series and event properties are always associated with any\n type of expense of that series or event.\n '''\n # Set the approval and payment dates if they have just been approved/paid.\n if not hasattr(self,'__paid') or not hasattr(self,'__approved'):\n if self.approved and not self.approvalDate:\n self.approvalDate = timezone.now()\n if self.paid and not self.paymentDate:\n self.paymentDate = timezone.now()\n else:\n if self.approved and not self.approvalDate and not self.__approvalDate:\n self.approvalDate = timezone.now()\n if self.paid and not self.paymentDate and not self.__paymentDate:\n self.paymentDate = timezone.now()\n\n # Ensure that each expense is attribued to only one series or event.\n if len([x for x in [\n self.eventstaffmember,\n self.eventvenue,] if x]) > 1:\n raise ValidationError(_('This expense cannot be attributed to multiple categories.'),code='invalid')\n\n # Fill out the series and event properties to permit easy calculation of\n # revenues and expenses by series or by event.\n if self.eventstaffmember:\n self.event = self.eventstaffmember.event\n if hasattr(self.eventstaffmember.staffMember,'userAccount'):\n self.payToUser = self.eventstaffmember.staffMember.userAccount\n if self.eventvenue:\n self.event = self.eventvenue\n self.payToLocation = self.eventvenue.location\n\n # Set the accrual date. The method for events ensures that the accrualDate month\n # is the same as the reported month of the series/event by accruing to the end date of the last\n # class or occurrence in that month.\n if not self.accrualDate:\n if self.event and self.event.month:\n self.accrualDate = self.event.eventoccurrence_set.order_by('endTime').filter(**{'endTime__month': self.event.month}).last().endTime\n else:\n self.accrualDate = self.submissionDate\n\n # Set the total for hourly work\n if self.hours and not self.wageRate and not self.total and not self.payToLocation and self.category:\n self.wageRate = self.category.defaultRate\n elif self.hours and not self.wageRate and not self.total and self.payToLocation:\n self.wageRate = self.payToLocation.rentalRate\n\n if self.hours and self.wageRate and not self.total:\n self.total = self.hours * self.wageRate\n\n super(ExpenseItem, self).save(*args, **kwargs)\n self.__approved = self.approved\n self.__paid = self.paid\n self.__approvalDate = self.approvalDate\n self.__paymentDate = self.paymentDate\n\n # If a file is attached, ensure that it is not public, and that it is saved in the 'Expense Receipts' folder\n if self.attachment:\n try:\n self.attachment.folder = Folder.objects.get(name=_('Expense Receipts'))\n except ObjectDoesNotExist:\n pass\n self.attachment.is_public = False\n self.attachment.save()\n\n def __str__(self):\n if self.accrualDate:\n return '%s %s: %s = %s%s' % (self.category.name, self.accrualDate.strftime('%B %Y'),self.description, getConstant('general__currencySymbol'), self.total)\n else:\n return '%s: %s = %s%s' % (self.category.name, self.description, getConstant('general__currencySymbol'), self.total)\n\n def __init__(self, *args, **kwargs):\n '''\n Permit easy checking to determine if the object\n already exists and has changed on saving\n '''\n super(self.__class__, self).__init__(*args, **kwargs)\n self.__approved = self.approved\n self.__paid = self.paid\n self.__approvalDate = self.approvalDate\n self.__paymentDate = self.paymentDate\n\n class Meta:\n ordering = ['-accrualDate',]\n verbose_name = _('Expense item')\n verbose_name_plural = _('Expense items')\n\n permissions = (\n ('mark_expenses_paid',_('Mark expenses as paid at the time of submission')),\n )\n\n\n@python_2_unicode_compatible\nclass RevenueCategory(models.Model):\n '''\n These are the different available categories of payment\n '''\n\n name = models.CharField(_('Name'),max_length=50,unique=True,help_text=_('Different types of revenue fall under different categories.'))\n defaultAmount = models.FloatField(_('Default amount'),help_text=_('This is the default amount of revenue for items in this category.'),null=True,blank=True,validators=[MinValueValidator(0)])\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = _('Revenue category')\n verbose_name_plural = _('Revenue categories')\n\n\n@python_2_unicode_compatible\nclass RevenueItem(models.Model):\n '''\n All revenue-producing transactions (e.g. class payments, other payments) should have an associated RevenueItem\n '''\n\n submissionUser = models.ForeignKey(User,null=True,blank=True,related_name='revenuessubmittedby',verbose_name=_('Submission user'))\n submissionDate = models.DateTimeField(_('Submission date'),auto_now_add=True)\n\n category = models.ForeignKey(RevenueCategory,verbose_name=_('Category'))\n description = models.CharField(_('Description'),max_length=200,null=True,blank=True)\n total = models.FloatField(_('Total'),help_text=_('The total revenue received, net of any discounts or voucher uses. This is what we actually receive.'),validators=[MinValueValidator(0)])\n grossTotal = models.FloatField(_('Gross Total'),help_text=_('The gross total billed before the application of any discounts, or the use of any vouchers.'),validators=[MinValueValidator(0)])\n adjustments = models.FloatField(_('Adjustments'),help_text=_('Record any ex-post adjustments to the amount (e.g. refunds) in this field. A positive amount increases the netRevenue, a negative amount reduces the netRevenue.'),default=0)\n fees = models.FloatField(_('Fees'),help_text=_('The sum of any transaction fees (e.g. Paypal fees) that were paid by us, and should therefore be subtracted from net revenue.'),default=0)\n taxes = models.FloatField(_('Taxes'),default=0)\n\n paymentMethod = models.CharField(_('Payment method'),max_length=50,null=True,blank=True)\n invoiceNumber = models.CharField(_('Invoice Number'),help_text=_('For Paypal payments, this will be the txn_id. For cash payments, this will be automatically generated by the submission form. More than one revenue item may have the same invoice number, because multiple events are paid for in one Paypal transaction.'),null=True,blank=True,max_length=80)\n\n comments = models.TextField(_('Comments/Notes'),null=True,blank=True)\n attachment = FilerFileField(verbose_name=_('Attach File (optional)'),null=True,blank=True,related_name='revenue_attachment')\n\n # With the invoice system in the core app, Revenue Items need only link with Invoice Items\n invoiceItem = models.OneToOneField(InvoiceItem,null=True, blank=True,verbose_name=_('Associated invoice item'))\n\n event = models.ForeignKey(Event,null=True,blank=True,verbose_name=_('Event'),help_text=_('If this item is associated with an Event, enter it here.'))\n receivedFromName = models.CharField(_('Received From'),max_length=50,null=True,blank=True,help_text=_('Enter who this revenue item was received from, if it is not associated with an existing registration.'))\n\n currentlyHeldBy = models.ForeignKey(User,null=True,blank=True,verbose_name=_('Cash currently in possession of'),help_text=_('If cash has not yet been deposited, this indicates who to contact in order to collect the cash for deposit.'),related_name='revenuesheldby')\n received = models.BooleanField(_('Received'),help_text=_('Check to indicate that payment has been received. Non-received payments are considered pending.'),default=False)\n receivedDate = models.DateTimeField(_('Date received'),null=True,blank=True)\n\n # This field is used to aggregate expenses over time (e.g. by month).\n # The value of this field is auto-updated using pre-save methods. If\n # there is a registration or an event associated with this expense,\n # then the value is taken from that. Otherwise, the submission date\n # is used.\n accrualDate = models.DateTimeField(_('Accrual date'))\n\n @property\n def relatedItems(self):\n '''\n If this item is associated with a registration, then return all other items associated with\n the same registration.\n '''\n if self.registration:\n return self.registration.revenueitem_set.exclude(pk=self.pk)\n relatedItems.fget.short_description = _('Related items')\n\n @property\n def netRevenue(self):\n return self.total + self.adjustments - self.fees\n netRevenue.fget.short_description = _('Net revenue')\n\n def save(self, *args, **kwargs):\n '''\n This custom save method ensures that a revenue item is not attributed to multiple categories.\n It also ensures that the series and event properties are always associated with any\n type of revenue of that series or event.\n '''\n\n # Set the received date if the payment was just marked received\n if not hasattr(self,'__received'):\n if self.received and not self.receivedDate:\n self.receivedDate = timezone.now()\n else:\n if self.received and not self.receivedDate and not self.__receivedDate:\n self.receivedDate = timezone.now()\n\n # Set the accrual date. The method for series/events ensures that the accrualDate month\n # is the same as the reported month of the event/series by accruing to the start date of the first\n # occurrence in that month.\n if not self.accrualDate:\n if self.invoiceItem and self.invoiceItem.finalEventRegistration:\n min_event_time = self.invoiceItem.finalEventRegistration.event.eventoccurrence_set.filter(**{'startTime__month':self.invoiceItem.finalEventRegistration.event.month}).first().startTime\n self.accrualDate = min_event_time\n elif self.event:\n self.accrualDate = self.event.eventoccurrence_set.order_by('startTime').filter(**{'startTime__month': self.event.month}).last().startTime\n elif self.invoiceItem:\n self.accrualDate = self.invoiceItem.invoice.creationDate\n elif self.receivedDate:\n self.accrualDate = self.receivedDate\n else:\n self.accrualDate = self.submissionDate\n\n # Now, set the registration property and check that this item is not attributed\n # to multiple categories.\n if self.invoiceItem and self.invoiceItem.finalEventRegistration:\n self.event = self.invoiceItem.finalEventRegistration.event\n elif self.invoiceItem and self.invoiceItem.temporaryEventRegistration:\n self.event = self.invoiceItem.temporaryEventRegistration.event\n\n # If no grossTotal is reported, use the net total. If no net total is reported, use the grossTotal\n if self.grossTotal is None and self.total:\n self.grossTotal = self.total\n if self.total is None and self.grossTotal:\n self.total = self.grossTotal\n\n super(RevenueItem, self).save(*args, **kwargs)\n self.__received = self.received\n self.__receivedDate = self.receivedDate\n\n # If a file is attached, ensure that it is not public, and that it is saved in the 'Expense Receipts' folder\n if self.attachment:\n try:\n self.attachment.folder = Folder.objects.get(name=_('Revenue Receipts'))\n except ObjectDoesNotExist:\n pass\n self.attachment.is_public = False\n self.attachment.save()\n\n def __str__(self):\n if self.accrualDate:\n return '%s %s: %s = %s%s' % (self.category.name, self.accrualDate.strftime('%B %Y'),self.description, getConstant('general__currencySymbol'), self.total)\n else:\n return '%s: %s = %s%s' % (self.category.name, self.description, getConstant('general__currencySymbol'), self.total)\n\n def __init__(self,*args,**kwargs):\n '''\n Permit easy checking to determine if the object\n already exists and has changed on saving\n '''\n super(self.__class__, self).__init__(*args, **kwargs)\n self.__received = self.received\n self.__receivedDate = self.receivedDate\n\n class Meta:\n ordering = ['-accrualDate',]\n verbose_name = _('Revenue item')\n verbose_name_plural = _('Revenue items')\n\n permissions = (\n ('export_financial_data',_('Export detailed financial transaction information to CSV')),\n ('view_finances_bymonth',_('View school finances month-by-month')),\n ('view_finances_byevent',_('View school finances by Event')),\n ('view_finances_detail',_('View school finances as detailed statement')),\n )\n","sub_path":"danceschool/financial/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":19300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"194459497","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\nimport serial\nimport json\n\nser = serial.Serial('/dev/ttyACM0',9600)\nwhile True:\n data = ser.readline().decode(\"utf-8\").rstrip('\\r\\n')\n json_data = json.dumps([{'valo': data}])\n with open('/var/www/html/valo.json', 'w') as outfile:\n json.dump(json.JSONDecoder().decode(json_data), outfile)\n \n print (json_data)\nser.close()","sub_path":"brightnessArvo.py","file_name":"brightnessArvo.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"210586017","text":"# A linked list is given such that each node contains an additional random pointer which could point to any node in the list or null.\n#\n# Return a deep copy of the list.\n\n# Definition for singly-linked list with a random pointer.\nclass RandomListNode(object):\n def __init__(self, x):\n self.label = x\n self.next = None\n self.random = None\n\nclass Solution(object):\n def copyRandomList(self, head):\n \"\"\"\n :type head: RandomListNode\n :rtype: RandomListNode\n \"\"\"\n dic=dict()\n m=n=head\n # first, store each node in dictionary\n while m:\n dic[m]=RandomListNode(m.label)\n m=m.next\n # second, search each node next and random from dictionary\n while n:\n dic[n].next=dic.get(n.next)\n dic[n].random = dic.get(n.random)\n n=n.next\n return dic.get(head)\n","sub_path":"Basic_Algorithm/Linklist/copylistwithrandompointer.py","file_name":"copylistwithrandompointer.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"488057470","text":"'''\r\nPartial binding to the Win32 API. Just implements the functions needed.\r\n'''\r\nimport ctypes as ct\r\nfrom console.bindhelper import bind_function\r\n# This is stuff required to access the Windows API within python\r\n# You should mostly be able to ignore it.\r\n# This will only be ran on windows.\r\n\r\n# Names of the dll's that contain the various functions we need.\r\nuser32 = \"user32\"\r\nkernel32 = 'kernel32'\r\n\r\n# Windows API type information\r\nINT = ct.c_int\r\nBOOL = INT\r\nULONG = ct.c_ulong\r\nSHORT = ct.c_short\r\nWORD = ct.c_ushort\r\nDWORD = ct.c_ulong\r\nPVOID = LPVOID = ct.c_void_p\r\nHANDLE = PVOID\r\nCOLORREF = DWORD\r\nLPWORD = ct.POINTER(WORD)\r\nLPDWORD = ct.POINTER(DWORD)\r\n\r\nLPCOLORREF = ct.POINTER(COLORREF)\r\n\r\n\r\n# Reimplementation of macros\r\ndef RGB(r, g, b):\r\n return r | g << 8 | b << 16\r\n\r\n\r\n# Windows API structures\r\nclass COORD(ct.Structure):\r\n _fields_ = [('X', SHORT),\r\n ('Y', SHORT)]\r\n\r\n\r\nclass SMALL_RECT(ct.Structure):\r\n _fields_ = [('Left', SHORT),\r\n ('Top', SHORT),\r\n ('Right', SHORT),\r\n ('Bottom', SHORT)]\r\n\r\n\r\nclass CONSOLE_SCREEN_BUFFER_INFO(ct.Structure):\r\n _fields_ = [('dwSize', COORD),\r\n ('dwCursorPosition', COORD),\r\n ('wAttributes', WORD),\r\n ('srWindow', SMALL_RECT)]\r\nPCONSOLE_SCREEN_BUFFER_INFO = ct.POINTER(CONSOLE_SCREEN_BUFFER_INFO)\r\n\r\n\r\nclass CONSOLE_SCREEN_BUFFER_INFOEX(ct.Structure):\r\n _fields_ = [('cbSize', ULONG),\r\n ('dwSize', COORD),\r\n ('dwCursorPosition', COORD),\r\n ('wAttributes', WORD),\r\n ('srWindow', SMALL_RECT),\r\n ('dwMaximumWindowSize', COORD),\r\n ('wPopupAttributes', WORD),\r\n ('bFullscreenSupported', BOOL),\r\n ('ColorTable', COLORREF * 16)]\r\nPCONSOLE_SCREEN_BUFFER_INFOEX = ct.POINTER(CONSOLE_SCREEN_BUFFER_INFOEX)\r\n\r\n\r\nclass CONSOLE_CURSOR_INFO(ct.Structure):\r\n _fields_ = [('dwSize', DWORD),\r\n ('bVisible', BOOL)]\r\nPCONSOLE_CURSOR_INFO = ct.POINTER(CONSOLE_CURSOR_INFO)\r\n\r\n# Enum Values\r\nSTD_INPUT_HANDLE = -10\r\nSTD_OUTPUT_HANDLE = -11\r\nSTD_ERROR_HANDLE = -12\r\n\r\nFOREGROUND_BLUE = 0x0001\r\nFOREGROUND_GREEN = 0x0002\r\nFOREGROUND_RED = 0x0004\r\nFOREGROUND_INTENSITY = 0x0008\r\nBACKGROUND_BLUE = 0x0010\r\nBACKGROUND_GREEN = 0x0020\r\nBACKGROUND_RED = 0x0040\r\nBACKGROUND_INTENSITY = 0x0080\r\nCOMMON_LVB_LEADING_BYTE = 0x0100\r\nCOMMON_LVB_TRAILING_BYTE = 0x0200\r\nCOMMON_LVB_GRID_HORIZONTAL = 0x0400\r\nCOMMON_LVB_GRID_LVERTICAL = 0x0800\r\nCOMMON_LVB_GRID_RVERTICAL = 0x1000\r\nCOMMON_LVB_REVERSE_VIDEO = 0x4000\r\nCOMMON_LVB_UNDERSCORE = 0x8000\r\n\r\nGetStdHandle = bind_function(kernel32, 'GetStdHandle', HANDLE, (DWORD,))\r\n\r\nGetConsoleScreenBufferInfo = bind_function(\r\n kernel32,\r\n 'GetConsoleScreenBufferInfo',\r\n BOOL,\r\n (HANDLE, PCONSOLE_SCREEN_BUFFER_INFO, ))\r\n\r\nGetConsoleScreenBufferInfoEx = bind_function(\r\n kernel32,\r\n 'GetConsoleScreenBufferInfoEx',\r\n BOOL,\r\n (HANDLE, PCONSOLE_SCREEN_BUFFER_INFOEX))\r\n\r\nSetConsoleScreenBufferInfoEx = bind_function(\r\n kernel32,\r\n 'SetConsoleScreenBufferInfoEx',\r\n BOOL,\r\n (HANDLE, PCONSOLE_SCREEN_BUFFER_INFOEX))\r\n\r\nSetConsoleTextAttribute = bind_function(\r\n kernel32,\r\n 'SetConsoleTextAttribute',\r\n BOOL,\r\n (HANDLE, WORD))\r\n\r\nGetConsoleCursorInfo = bind_function(\r\n kernel32,\r\n 'GetConsoleCursorInfo',\r\n BOOL,\r\n (HANDLE, PCONSOLE_CURSOR_INFO))\r\n\r\nSetConsoleCursorPosition = bind_function(\r\n kernel32,\r\n 'SetConsoleCursorPosition',\r\n BOOL,\r\n (HANDLE, COORD))\r\n\r\nReadConsoleOutputAttribute = bind_function(\r\n kernel32,\r\n 'ReadConsoleOutputAttribute',\r\n BOOL,\r\n (HANDLE, LPWORD, DWORD, COORD, LPDWORD)\r\n)\r\n\r\nSetConsoleCursorPosition = bind_function(\r\n kernel32,\r\n 'SetConsoleCursorPosition',\r\n BOOL,\r\n (HANDLE, COORD))\r\n","sub_path":"console/win32.py","file_name":"win32.py","file_ext":"py","file_size_in_byte":3835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"339915965","text":"from django.urls import path\nfrom . import views\nfrom .views import PostListView,PostDetailView,PostCreateView,PostUpdateView,PostDeleteView,UserPostListView\nfrom .views import post_list,post_detail,upvote_post,gallery\nfrom .views import EventDeleteView,EventUpdateView,EventCreateView,event_list,participate\nfrom .views import notice_list,NoticeUpdateView,NoticeDeleteView,NoticeCreateView\n\n# urlpatterns=[\n# path('',PostListView.as_view(),name='blog-home'),\n# path('user/',UserPostListView.as_view(),name='user-posts'),\n# path('post//',PostDetailView.as_view(),name='post-detail'),\n# path('post/new/',PostCreateView.as_view(),name='post-create'),\n# path('post//update/',PostUpdateView.as_view(),name='post-update'),\n# path('post//delete/',PostDeleteView.as_view(),name='post-delete'),\n# path('about/',views.about,name='blog-about'),\n# ]\n # taking some function based views\nurlpatterns=[\n path('',post_list,name='blog-home'),\n path('user//',UserPostListView.as_view(),name='user-posts'),\n path('post//',post_detail,name='post-detail'),\n path('post/upvote/',upvote_post,name='upvote_post'),\n path('post/new/',PostCreateView.as_view(),name='post-create'),\n path('post//update/',PostUpdateView.as_view(),name='post-update'),\n path('post//delete/',PostDeleteView.as_view(),name='post-delete'),\n path('event/new/',EventCreateView.as_view(),name='event-create'),\n path('event//update/',EventUpdateView.as_view(),name='event-update'),\n path('event/list/',event_list,name='event-list'),\n path('event//delete/',EventDeleteView.as_view(),name='event-delete'),\n path('event/participate/',participate,name=\"participate\"),\n path('notice/list/',notice_list,name='notice-list'),\n path('notice/new/',NoticeCreateView.as_view(),name='notice-create'),\n path('notice//update/',NoticeUpdateView.as_view(),name='notice-update'),\n path('notice//delete/',NoticeDeleteView.as_view(),name='notice-delete'),\n path('about/',views.about,name='blog-about'),\n path('gallery/',gallery,name=\"gallery\")\n]\n","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"274408394","text":"import pika\nimport json\nimport threading\nimport operator\nimport pymongo\nimport time\nimport urllib.request as urllib2\nfrom retry import retry\n\nresponse = urllib2.urlopen(\"https://raw.githubusercontent.com/Annu4git/Tools/master/platform_config.txt\")\npage_source = response.read().decode()\n\npp=page_source.split('\\n')\npp.remove('')\nfor i in pp:\n\tt=i.split(' ')\n\nmyclient = pymongo.MongoClient(t[t.index('mongodb')+1])\n\ndatabase = myclient[\"metadata\"]\n\nservice_metadata = database[\"service_metadata\"]\n\nnodes_metadata = database[\"nodes_metadata\"]\n\nsensors_metadata = database[\"sensors_metadata\"]\n\nrunner_ip = t[t.index('rabbit_server_ip')+1]\nreplica = int(t[t.index('replica_count')+1])\nthreshold = float(t[t.index('threshold')+1])\n\nself_ip = t[t.index('loader')+1]\n\nprint('Load Balancer ip: ',self_ip)\n\n#self_ip = '10.2.138.136'\nserver_loads = {}\n\n@retry(pika.exceptions.AMQPConnectionError, delay=5, jitter=(1, 3))\ndef recieveLoads():\n def onLoadsRecieved(ch, method, props, body):\n global server_loads\n rec_data = json.loads(body.decode())\n #pprint.pprint(rec_data)\n\n if rec_data['msg_type'] == 'serverLoads':\n server_loads = rec_data['data']\n\t\t# elif rec_data['msg_type'] == 'Loads':\n\t\t# \tpass\n #pprint.pprint(server_loads)\n\n queue_name = 'monitoring-loadBalancer'\n connection = pika.BlockingConnection(pika.ConnectionParameters(host=runner_ip))\n loadChannel = connection.channel()\n loadChannel.queue_declare(queue=queue_name, auto_delete = True)\n\n loadChannel.basic_consume(queue=queue_name, on_message_callback=onLoadsRecieved)\n print('Consume for load started')\n loadChannel.start_consuming()\n connection.close()\n\ndef check_temp(curr_temp, high_temp):\n if curr_temp >= high_temp:\n return 0\n else:\n return 1\n\ndef compute_score(cpu_percent, memory_percent, cpu_benchmark, free_memory, current_temp, high_temp):\n score = 40. / ( 3./cpu_percent + 1./memory_percent ) # System load\n score *= float(cpu_benchmark)/1e4 + min(2, free_memory)/10. # System performance\n score *= check_temp(current_temp, high_temp) # System temperature\n return score\n\n\n\ndef get_lowest_load_server(layer, req_replica):\n global server_loads\n\n lowest_load_servers = []\n\n server_load_score = {}\n\n print(server_loads)\n\n #print(layer, server_loads[layer].keys())\n\n for IP in server_loads[layer].keys():\n if server_loads[layer][IP]['isExclusiveServer'] == False:\n cpu_percent = float( server_loads[layer][IP]['cpu_free'] )\n memory_percent = float( server_loads[layer][IP]['mem_free'] )\n cpu_benchmark = float( server_loads[layer][IP]['cpu_performance'] )\n free_memory = float( server_loads[layer][IP]['actual_mem_free'] )\n current_temp = int( server_loads[layer][IP]['temp_current'] )\n high_temp = int( server_loads[layer][IP]['temp_high'] )\n\n score = compute_score(cpu_percent, memory_percent, cpu_benchmark, free_memory, current_temp, high_temp)\n server_load_score[IP] = score\n\n sorted_servers = sorted(server_load_score.items(), key=operator.itemgetter(1))\n\n print(sorted_servers)\n\n itr = [len(sorted_servers) if len(sorted_servers) < req_replica else req_replica]\n\n for i in range(itr[0]):\n if sorted_servers[i][1] > threshold:\n lowest_load_servers.append(sorted_servers[i][0])\n\n return lowest_load_servers\n\n@retry(pika.exceptions.AMQPConnectionError, delay=5, jitter=(1, 3))\ndef retrieve_exclusive_nodes(cpu_free_percent, mem_free, cpu_performance):\n global replica, server_loads, runner_ip\n lowest_load_servers = get_lowest_load_server('1', replica)\n \n exclusive_nodes = []\n\n for server in lowest_load_servers:\n if server_loads['1'][server]['cpu_free'] >= cpu_free_percent and server_loads['1'][server]['actual_mem_free'] >= mem_free and float(server_loads['1'][server]['cpu_performance']) >= float(cpu_performance):\n exclusive_nodes.append(server)\n\n # Notify these servers to reserve resources for exclusive service that is about to deploy\n sending_data = {}\n sending_data['msg_type'] = 'acquire_resources'\n sending_data['request_by'] = 'loadBalancer'\n sending_data['data'] = {}\n sending_data['data']['cpu_percent'] = cpu_free_percent\n sending_data['data']['mem_free'] = mem_free\n\n queue_name = 'server-'+server\n connection = pika.BlockingConnection(pika.ConnectionParameters(host=runner_ip))\n loadBalancer_channel = connection.channel()\n loadBalancer_channel.queue_declare(queue=queue_name, auto_delete = True)\n loadBalancer_channel.basic_publish(exchange='', routing_key=queue_name, body=json.dumps(sending_data))\n\n # Update mapping at loadBalancer\n server_loads['1'][server]['cpu_free'] = server_loads['1'][server]['cpu_free'] - cpu_free_percent\n server_loads['1'][server]['cpu_free'] = server_loads['1'][server]['mem_free'] - mem_free\n server_loads['1'][server]['isExclusiveServer'] = True\n\n # Update the database\n\n if len(exclusive_nodes) < replica:\n # num of exclusive nodes to start = replica-exclusive\n print('Required to up new exclusive servers and return its ips')\n\n\n return exclusive_nodes\n\n@retry(pika.exceptions.AMQPConnectionError, delay=5, jitter=(1, 3))\ndef recieveRequests():\n \n @retry(pika.exceptions.AMQPConnectionError, delay=5, jitter=(1, 3))\n def onRequestsRecieved(ch, method, props, body):\n global server_loads, replica\n rec_data = json.loads(body.decode())\n\n if rec_data['msg_type'] == 'serverLoads':\n server_loads = rec_data['data']\n #if rec_data['msg_type'] == 'Loads':\n #pass\n #pprint.pprint(server_loads)\n\n elif rec_data['msg_type'] == 'scheduleJobs' and rec_data['request_by'] == 'scheduler':\n\n service_id = rec_data['data']['service_id']\n application_id = rec_data['data']['application_id']\n locality_tag = rec_data['data']['locality_tag']\n trigger_type = rec_data['data']['trigger_type']\n\n myquery = { \"service_id\": service_id, \"application_id\": application_id }\n mydoc = service_metadata.find(myquery)\n\n print('Recieved from scheduler')\n \n sensor_query = {\"locality_tag\": locality_tag}\n sensors_data = sensors_metadata.find(sensor_query)\n input_stream_ips = []\n for sensor in sensors_data:\n input_stream_ips.append(sensor[\"ip\"])\n \n if trigger_type == 'stop':\n query = {'service_id': service_id, 'application_id':application_id}\n result = service_metadata.find(query)\n print('Stopping ',service_id, application_id)\n if result.count() == 1:\n for x in result:\n serving_nodes = x['serving_nodes']\n service_state = x['service_state']\n if service_state == 'running':\n sending_data = {}\n sending_data['msg_type'] = 'scheduleJob'\n sending_data['request_by'] = 'loadBalancer'\n sending_data['ip'] = self_ip\n sending_data['data'] = {}\n sending_data['data']['service_id'] = service_id\n sending_data['data']['application_id'] = application_id\n sending_data['data']['input_stream'] = input_stream_ips\n sending_data['data']['trigger_type'] = trigger_type\n \n print('Retrieved serving nodes for stop: ',serving_nodes)\n \n for i in serving_nodes.split(' '):\n if i!='':\n service_running_ip = i\n queue_name = 'server-'+service_running_ip\n print('sending to: ',i)\n connection = pika.BlockingConnection(pika.ConnectionParameters(host=runner_ip))\n loadBalancer_channel = connection.channel()\n loadBalancer_channel.queue_declare(queue=queue_name, auto_delete = True)\n loadBalancer_channel.basic_publish(exchange='', routing_key=queue_name, body=json.dumps(sending_data))\n else:\n print('No service is running with service_id: ',service_id, ' and application_id: ',application_id)\n elif result.count() > 1:\n print('More than one service with same name stored in the database...')\n elif result.count() == 0:\n print('The service ',service_id,' is not stored in the database...')\n \n elif trigger_type == 'start':\n for x in mydoc:\n deployed_nodes = x['node_ips'].split(' ')\n deployed_node_ips = []\n for i in range(len(deployed_nodes)):\n deployed_node_ips.append(deployed_nodes[i].split(':')[0])\n service_rest_url = x['rest_url']\n \n service_priority = x['priority']\n capable_servers = []\n isNodeUp = False\n print('Following are the ips from service metadata')\n print(deployed_node_ips)\n for node_ip in deployed_node_ips:\n if service_priority == \"high\":\n l = '1'\n elif service_priority == \"low\":\n l = '0'\n cpu_percent = float( server_loads[l][node_ip]['cpu_free'] )\n memory_percent = float( server_loads[l][node_ip]['mem_free'] )\n cpu_benchmark = float( server_loads[l][node_ip]['cpu_performance'] )\n free_memory = float( server_loads[l][node_ip]['actual_mem_free'] )\n current_temp = int( server_loads[l][node_ip]['temp_current'] )\n high_temp = int( server_loads[l][node_ip]['temp_high'] )\n \n calc_threshold = compute_score(cpu_percent, memory_percent, cpu_benchmark, free_memory, current_temp, high_temp)\n \n print(node_ip)\n \n nodes_query = { \"ip\": node_ip }\n responsibleNodes = nodes_metadata.find(nodes_query)\n \n for node in responsibleNodes:\n if node['nodeState'] == \"active\":\n isNodeUp = True\n print('Node: ', node_ip, \" is responsible node!\")\n break\n \n print('Threshold is: ',calc_threshold)\n \n if calc_threshold > threshold and isNodeUp:\n capable_servers.append((calc_threshold, node_ip))\n isNodeUp = False\n \n \n # if any server out of the two is down or unable to handle load\n if len(capable_servers) == 0:\n print('Need to shift models to a less loaded server or start new servers....')\n # lowest_load_servers = get_lowest_load_server(rec_data['data']['layer'])\n # need to move services across servers\n else:\n print('Scheduling start job to the servers....')\n for server in capable_servers:\n sending_data = {}\n sending_data['msg_type'] = 'scheduleJob'\n sending_data['request_by'] = 'loadBalancer'\n sending_data['ip'] = self_ip\n sending_data['data'] = {}\n sending_data['data']['service_id'] = service_id\n sending_data['data']['application_id'] = application_id\n sending_data['data']['input_stream'] = input_stream_ips\n sending_data['data']['trigger_type'] = trigger_type\n queue_name = 'server-'+server[1]\n connection = pika.BlockingConnection(pika.ConnectionParameters(host=runner_ip))\n loadBalancer_channel = connection.channel()\n loadBalancer_channel.queue_declare(queue=queue_name, auto_delete = True)\n loadBalancer_channel.basic_publish(exchange='', routing_key=queue_name, body=json.dumps(sending_data))\n \n if len(capable_servers) < replica:\n print('Start ',replica-len(capable_servers), ' number of servers')\n\n\n elif rec_data['msg_type'] == 'deployServices' and rec_data['request_by'] == 'deployManager':\n\n print('Message Recieved from deploy manager')\n\n if rec_data['data']['service_type'] == 'exclusive':\n\n lowest_load_servers = retrieve_exclusive_nodes(rec_data['data']['service_requirement']['cpu_free'],\n rec_data['data']['service_requirement']['mem_free'],\n rec_data['data']['service_requirement']['cpu_performance'])\n elif rec_data['data']['service_type'] == 'normal':\n if rec_data['data']['layer'] == '0':\n lowest_load_edge = get_lowest_load_server(rec_data['data']['layer'], 1)\n if len(lowest_load_edge) == 0:\n lowest_load_servers = get_lowest_load_server('1', replica)\n if len(lowest_load_servers) < replica:\n print('Required to up a new server and add its ip to lowest load servers')\n else:\n lowest_load_servers = get_lowest_load_server('1', replica-1)\n if len(lowest_load_servers) < replica-1:\n print('Required to up a new server and add its ip to lowest load servers')\n lowest_load_servers.append(lowest_load_edge[0])\n else:\n lowest_load_servers = get_lowest_load_server(rec_data['data']['layer'], replica)\n if len(lowest_load_servers) < replica:\n print('Required to up a new server and add its ip to lowest load servers')\n\n sending_data = {}\n sending_data['msg_type'] = 'deployServicesResp'\n sending_data['request_by'] = 'loadBalancer'\n sending_data['data'] = {}\n sending_data['data']['lowest_load_servers'] = lowest_load_servers\n sending_data['data']['application_id'] = rec_data['data']['application_id']\n sending_data['data']['service_id'] = rec_data['data']['service_id']\n sending_data['data']['nature_of_service'] = rec_data['data']['nature_of_service']\n sending_data['ip'] = self_ip\n\n queue_name = 'deployManager-'+rec_data['ip']\n connection = pika.BlockingConnection(pika.ConnectionParameters(host=runner_ip))\n loadBalancer_channel = connection.channel()\n loadBalancer_channel.queue_declare(queue=queue_name, auto_delete = True)\n loadBalancer_channel.basic_publish(exchange='', routing_key=queue_name, body=json.dumps(sending_data))\n\n queue_name = 'loadBalancer-'+self_ip\n\n connection = pika.BlockingConnection(pika.ConnectionParameters(host=runner_ip))\n reqChannel = connection.channel()\n reqChannel.queue_declare(queue=queue_name, auto_delete = True)\n\n reqChannel.basic_consume(queue=queue_name, on_message_callback=onRequestsRecieved)\n\n print('Consume for other msgs started')\n\n reqChannel.start_consuming()\n connection.close()\n\n@retry(pika.exceptions.AMQPConnectionError, delay=5, jitter=(1, 3))\ndef sendHeartbeat():\n while True:\n serving_data = {}\n serving_data['msg_type'] = 'Heartbeat'\n serving_data['ip'] = self_ip\n serving_data['node_type'] = 'loadBalancer'\n queue_name = 'logging_queue'\n connection1 = pika.BlockingConnection(pika.ConnectionParameters(host=runner_ip))\n channel1 = connection1.channel()\n channel1.queue_declare(queue=queue_name, auto_delete = True)\n channel1.basic_publish(exchange='', routing_key=queue_name, body=json.dumps(serving_data))\n time.sleep(2)\n\n\nthread_recv_loads = threading.Thread(target=recieveLoads, args=())\nthread_recv_requests = threading.Thread(target=recieveRequests, args=())\nthread_send_heartbeat = threading.Thread(target=sendHeartbeat, args=())\n#thread_recv_loads.deamon = True\nthread_recv_loads.start()\n#thread_recv_requests.daemon = True\nthread_recv_requests.start()\n#thread_send_heartbeat.daemon = True\nthread_send_heartbeat.start()\nthread_recv_loads.join()\nthread_recv_requests.join()\nthread_send_heartbeat.join()\n","sub_path":"platform_old/loadBalancer.py","file_name":"loadBalancer.py","file_ext":"py","file_size_in_byte":17346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"290938856","text":"from tools import *\n\nfrom variables import *\nfrom cuts import cuts_Bu, prntCuts, mctrue\nfrom model import model_Bu\nfrom data import mc_Pythia6, mc_Pythia8, mc_total\n\n\n\n# Preparation\ntBu = mc_total.data\ncuts_Bu += mctrue\n\nmodel_Bu.b.fix(0)\nmodel_Bu.background.tau.fix(0)\n\nfor i in prntCuts(cuts_Bu, \" CUTS B+ \"):\n logger.info(i)\n\n\n\n\n# logger.info('Fill control B+ histogram (takes some time)')\n# with timing():\n# tBu.Project(h1.GetName(), 'DTFm_b', cuts_Bu)\n\n\n# with rooSilent():\n# logger.info('Fit Bc+ & B+ histogram (check the model)')\n# r, f = model_Bu.fitHisto(h1)\n\n\n\n\nsel_Bu = SelectorWithVars(\n variables=selector_variables,\n selection=cuts_Bu\n)\n\nlogger.info('Build RooFit dataset for B+ , it could take as long as 3-5 minutes')\n\ntBu.process(sel_Bu)\n\nds_Bu = sel_Bu.dataset()\nds_Bu.Print('v')\n\n\n\nlogger.info('Make unbinned fit for B+')\n\nmodel_Bu.s.setMax(1.2 * len(ds_Bu))\nru, fu = model_Bu.fitTo(ds_Bu, draw=True, nbins=nbin_Bu)\n\nmodel_Bu.signal.sigma.release()\nru, fu = model_Bu.fitTo(ds_Bu, draw=True, nbins=nbin_Bu)\n\nmodel_Bu.signal.mean.release()\nru, fu = model_Bu.fitTo(ds_Bu, draw=True, nbins=nbin_Bu)\n\nmodel_Bu.signal.aR.release()\nru, fu = model_Bu.fitTo(ds_Bu, draw=True, nbins=nbin_Bu)\n\nmodel_Bu.signal.aL.release()\nru, fu = model_Bu.fitTo(ds_Bu, draw=True, nbins=nbin_Bu)\n\nmodel_Bu.signal.nR.release()\nru, fu = model_Bu.fitTo(ds_Bu, draw=True, nbins=nbin_Bu)\n\nmodel_Bu.signal.nL.release()\nru, fu = model_Bu.fitTo(ds_Bu, draw=True, nbins=nbin_Bu)\n\n\nfu.SetXTitle('#Inv.\\,mass(J/\\psi\\,K\\pi\\pi), GeV/c^2')\nfu.SetYTitle('Events / (%d \\, MeV/c^{2})' % events_binning)\n\nfu.Draw()\n\n# logger.info('running sPlot')\n# model_Bu.sPlot(ds_Bu)\n\n\n# print 'FIT#2 results for B+ ', ru(model_Bu.s_name)[0]\n# print 'FIT#2 precision:', ru(\"SBu\")[0].prec()\n","sub_path":"Kpipi/fit/fit_mc.py","file_name":"fit_mc.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"154959936","text":"import os\nimport json\nfrom dotenv import load_dotenv\nfrom pathlib import Path\nfrom .constants import *\n\nenv_path = os.path.join(\n os.path.dirname(__file__),\n os.environ.get('ENVFILE', '.env'))\nload_dotenv(env_path)\n\n\nclass Config:\n # ------------------------------------------------------------------------------------------------------------------\n # GENERAL\n # ------------------------------------------------------------------------------------------------------------------\n\n # General app info\n BASE_DIR = os.path.dirname(os.path.dirname(__file__))\n APP_NAME_SUFFIX = os.environ.get('APP_NAME_SUFFIX')\n DEBUG = os.environ.get('DEBUG', False)\n STAGE = os.environ.get('STAGE', 'dev')\n AWS_ACCOUNT_ID = os.environ.get('AWS_ACCOUNT_ID', '917885688343')\n\n FRONTEND_BASE_URL = os.environ.get('FRONTEND_BASE_URL')\n\n # ------------------------------------------------------------------------------------------------------------------\n # COGNITO\n # ------------------------------------------------------------------------------------------------------------------\n\n # AWS User Pool Configuration\n AWS_COGNITO_USER_POOL_NAME = os.environ.get('AWS_COGNITO_USER_POOL_NAME', 'mpc-dev-chalice-api-user-pool')\n AWS_COGNITO_USER_POOL_ID = os.environ.get('AWS_COGNITO_USER_POOL_ID', 'eu-west-1_UB4WIHfuT')\n AWS_COGNITO_USER_POOL_ARN = os.environ.get(\n 'AWS_COGNITO_USER_POOL_ARN',\n 'arn:aws:cognito-idp:eu-west-1:917885688343:userpool/%s' % AWS_COGNITO_USER_POOL_ID\n )\n AWS_COGNITO_DEFAULT_REGION = os.environ.get('AWS_COGNITO_DEFAULT_REGION', 'eu-west-1')\n\n # ------------------------------------------------------------------------------------------------------------------\n # DYNAMO DB\n # ------------------------------------------------------------------------------------------------------------------\n\n # AWS DYNAMO TABLE CONFIG\n AWS_DYNAMODB_DEFAULT_REGION = os.environ.get('AWS_DYNAMODB_DEFAULT_REGION', 'eu-west-1')\n AWS_DYNAMODB_CMS_TABLE_NAME = os.environ.get('AWS_DYNAMODB_CMS_TABLE_NAME', 'CMS')\n AWS_DYNAMODB_MAGENTO_CUSTOMER_TABLE_NAME = os.environ.get('AWS_DYNAMODB_MAGENTO_CUSTOMER_TABLE_NAME', 'Magento')\n AWS_DYNAMODB_BANNER_TABLE_NAME = os.environ.get('AWS_DYNAMODB_BANNER_TABLE_NAME', 'Banners')\n\n # ------------------------------------------------------------------------------------------------------------------\n # ELASTIC\n # ------------------------------------------------------------------------------------------------------------------\n\n # AWS Elasticsarch Service configuration\n AWS_ELASTICSEARCH_PRODUCTS_REGION = os.environ.get('AWS_ELASTICSEARCH_PRODUCTS_REGION', 'eu-west-1')\n AWS_ELASTICSEARCH_SCHEMA = os.environ.get('AWS_ELASTICSEARCH_SCHEMA', 'https')\n AWS_ELASTICSEARCH_HOST = os.environ.get(\n 'AWS_ELASTICSEARCH_HOST',\n 'search-mpc-domain-qhdgnvecvaqb77evx7i64zbldm.eu-west-1.es.amazonaws.com')\n AWS_ELASTICSEARCH_PORT = int(os.environ.get('AWS_ELASTICSEARCH_PORT', 443))\n AWS_ELASTICSEARCH_ENDPOINT = '{}://{}:{}'.format(\n AWS_ELASTICSEARCH_SCHEMA,\n AWS_ELASTICSEARCH_HOST,\n AWS_ELASTICSEARCH_PORT\n )\n AWS_ELASTICSEARCH_SCROLL_LIFETIME = os.environ.get('AWS_ELASTICSEARCH_SCROLL_LIFETIME', '5m')\n\n # products\n AWS_ELASTICSEARCH_PRODUCTS = os.environ.get('AWS_ELASTICSEARCH_PRODUCTS', 'products')\n\n # Scored Products\n AWS_ELASTICSEARCH_SCORED_PRODUCTS = os.environ.get(\n 'AWS_ELASTICSEARCH_SCORED_PRODUCTS', 'scored_products')\n\n # orders\n AWS_ELASTICSEARCH_PURCHASE_ORDERS = os.environ.get('AWS_ELASTICSEARCH_PURCHASE_ORDERS', 'purchase_orders')\n AWS_ELASTICSEARCH_PURCHASE_ORDERS_CUSTOMER_ORDERS_MAP = os.environ.get(\n 'AWS_ELASTICSEARCH_PURCHASE_ORDERS_CUSTOMER_ORDERS_MAP',\n 'purchase_orders_customer_orders_map'\n )\n\n # credit cards\n AWS_ELASTICSEARCH_PURCHASE_CUSTOMER_CREDIT_CARDS = os.environ.get(\n 'AWS_ELASTICSEARCH_PURCHASE_CUSTOMER_CREDIT_CARDS',\n 'purchase_customer_credit_cards'\n )\n AWS_ELASTICSEARCH_PURCHASE_CUSTOMER_CREDIT_CARDS_CUSTOMER_MAP = os.environ.get(\n 'AWS_ELASTICSEARCH_PURCHASE_CUSTOMER_CREDIT_CARDS_CUSTOMER_MAP',\n 'purchase_customer_credit_cards_customer_map'\n )\n\n # returns\n AWS_ELASTICSEARCH_PURCHASE_RETURN_REQUESTS = os.environ.get(\n 'AWS_ELASTICSEARCH_PURCHASE_RETURN_REQUESTS',\n 'purchase_return_requests'\n )\n AWS_ELASTICSEARCH_PURCHASE_RETURN_REQUESTS_CUSTOMER_MAP = os.environ.get(\n 'AWS_ELASTICSEARCH_PURCHASE_RETURN_REQUESTS_CUSTOMER_MAP',\n 'purchase_return_requests_customer_map'\n )\n\n # cancels\n AWS_ELASTICSEARCH_PURCHASE_CANCEL_REQUESTS = os.environ.get(\n 'AWS_ELASTICSEARCH_PURCHASE_CANCEL_REQUESTS',\n 'purchase_cancel_requests'\n )\n AWS_ELASTICSEARCH_PURCHASE_CANCEL_REQUESTS_ORDERS_MAP = os.environ.get(\n 'AWS_ELASTICSEARCH_PURCHASE_CANCEL_REQUESTS_ORDERS_MAP',\n 'purchase_cancel_requests_orders_map'\n )\n\n # personalization\n AWS_ELASTICSEARCH_PERSONALIZATION_ORDERS = os.environ.get(\n 'AWS_ELASTICSEARCH_PERSONALIZATION_ORDERS',\n 'personalization_orders'\n )\n\n # customer tiers\n AWS_ELASTICSEARCH_CUSTOMER_TIERS_TIERS = os.environ.get(\n 'AWS_ELASTICSEARCH_CUSTOMER_TIERS_TIERS',\n 'customer_tiers_tiers'\n )\n AWS_ELASTICSEARCH_CUSTOMER_TIERS_CUSTOMER_TIERS = os.environ.get(\n 'AWS_ELASTICSEARCH_CUSTOMER_TIERS_CUSTOMER_TIERS',\n 'customer_tiers_customer_tiers'\n )\n AWS_ELASTICSEARCH_CUSTOMER_TIERS_CUSTOMER_INFO_SPENT_AMOUNT = os.environ.get(\n 'AWS_ELASTICSEARCH_CUSTOMER_TIERS_CUSTOMER_INFO_SPENT_AMOUNT',\n 'customer_tiers_customer_info_spent_amount'\n )\n\n # fbucks\n AWS_ELASTICSEARCH_FBUCKS_HANDLED_ORDERS = os.environ.get(\n 'AWS_ELASTICSEARCH_FBUCKS_HANDLED_ORDERS',\n 'fbucks_handled_orders'\n )\n AWS_ELASTICSEARCH_FBUCKS_CUSTOMER_AMOUNT = os.environ.get(\n 'AWS_ELASTICSEARCH_FBUCKS_CUSTOMER_AMOUNT',\n 'fbucks_customer_amount'\n )\n AWS_ELASTICSEARCH_FBUCKS_CUSTOMER_AMOUNT_CHANGES = os.environ.get(\n 'AWS_ELASTICSEARCH_FBUCKS_CUSTOMER_AMOUNT_CHANGES',\n 'fbucks_customer_amount_changes'\n )\n\n # ------------------------------------------------------------------------------------------------------------------\n # SQS QUEUE\n # ------------------------------------------------------------------------------------------------------------------\n\n PORTAL_AWS_ACCOUNT_ID = os.environ.get('PORTAL_AWS_ACCOUNT_ID', AWS_ACCOUNT_ID)\n SQS_REGION = os.environ.get('SQS_REGION', 'eu-west-1')\n def build_sqs_url(\n queue_name: str,\n account_id: str = AWS_ACCOUNT_ID,\n region: str = SQS_REGION\n ) -> str:\n if not os.environ.get('DEBUG', False) and not queue_name or not account_id or not region:\n raise Exception(\"Your configuration has a fatal error.\")\n\n return 'https://sqs.{region}.amazonaws.com/{account_id}/{queue_name}'.format(\n account_id=account_id, region=region, queue_name=queue_name)\n\n\n # Building SQS Queue URL\n SQS_MPC_PORTAL_COMMON = build_sqs_url(os.environ.get('SQS_MPC_PORTAL_COMMON'))\n SQS_MPC_PORTAL_ORDER = build_sqs_url(os.environ.get('SQS_MPC_PORTAL_ORDER'), account_id=PORTAL_AWS_ACCOUNT_ID)\n SQS_MPC_PORTAL_EMAIL_SUBSCRIPTION = build_sqs_url(os.environ.get('SQS_MPC_PORTAL_EMAIL_SUBSCRIPTION'))\n SQS_MPC_PORTAL_CUSTOMER_INFO_REQUEST = build_sqs_url(os.environ.get('SQS_MPC_PORTAL_CUSTOMER_INFO_REQUEST'))\n SQS_MPC_PORTAL_COMMUNICATION_PREFERENCES = build_sqs_url(os.environ.get('SQS_MPC_PORTAL_COMMUNICATION_PREFERENCES'))\n SQS_MPC_MPC_COMMON_URL = build_sqs_url(os.environ.get('SQS_MPC_MPC_COMMON_NAME'))\n SQS_MPC_PORTAL_CUSTOMER_INFO_UPDATE = build_sqs_url(os.environ.get('SQS_MPC_PORTAL_CUSTOMER_INFO_UPDATE'))\n\n # { queues: [{ name: str, batch_size: int }, ...] }\n SQS_LISTENER_CONFIG = {\n 'queues': [\n {'name': os.environ.get('SQS_PORTAL_MPC_COMMON'), 'batch_size': 1},\n {'name': os.environ.get('SQS_PORTAL_MPC_ORDER'), 'batch_size': 1},\n {'name': os.environ.get('SQS_MPC_MPC_COMMON_NAME'), 'batch_size': 1},\n {'name': os.environ.get('SQS_PORTAL_MPC_CUSTOMER_INFO_UPDATE'), 'batch_size': 1},\n ]\n }\n\n # { event_descriptor: { object_type: str, queue_url: str, ... } }\n SQS_SENDER_CONFIG = {\n # can be used for local\n # 'class': 'chalicelib.libs.core.sqs_sender._SqsSenderDummyPrint',\n # 'params': {},\n\n # @TODO : use mpc-portal-common instead of not critical mpc-portal queues\n # @TODO : create single listener of mpc-portal-common for not critical mpc-portal messages\n\n 'class': 'chalicelib.libs.core.sqs_sender._SqsSenderSqs',\n 'params': {\n 'events': {\n 'user_answer': {\n 'object_type': 'user_answer',\n 'queue_url': SQS_MPC_PORTAL_COMMON,\n },\n 'communication_preferences': {\n 'object_type': 'communication_preferences',\n 'queue_url': SQS_MPC_PORTAL_COMMUNICATION_PREFERENCES,\n },\n 'credit_cash_out_request': {\n 'object_type': 'credit_cash_out_request',\n 'queue_url': SQS_MPC_PORTAL_COMMON,\n },\n \"customer_info_request\": {\n \"object_type\": \"customer_info_request\",\n \"queue_url\": SQS_MPC_PORTAL_CUSTOMER_INFO_REQUEST,\n },\n 'contactus_request': {\n 'object_type': 'contactus_request',\n 'queue_url': SQS_MPC_PORTAL_COMMON,\n },\n 'order_change': {\n 'object_type': 'mpc_order',\n 'queue_url': SQS_MPC_PORTAL_ORDER,\n },\n 'eft_proof_uploaded': {\n 'object_type': 'eft_proof_uploaded',\n 'queue_url': SQS_MPC_PORTAL_ORDER,\n },\n 'return_request_change': {\n 'object_type': 'return_request_change',\n 'queue_url': SQS_MPC_PORTAL_ORDER,\n },\n 'fixel_paid_order_cancellation_request': {\n 'object_type': 'fixel_paid_order_cancellation_request',\n 'queue_url': SQS_MPC_PORTAL_ORDER,\n },\n 'subscription_subscribed': {\n 'object_type': 'subscription_subscribed',\n 'queue_url': SQS_MPC_PORTAL_EMAIL_SUBSCRIPTION,\n },\n 'subscription_unsubscribed': {\n 'object_type': 'subscription_unsubscribed',\n 'queue_url': SQS_MPC_PORTAL_EMAIL_SUBSCRIPTION,\n },\n SCORED_PRODUCT_MESSAGE_TYPE.SECRET_KEY: {\n 'object_type': SCORED_PRODUCT_MESSAGE_TYPE.SECRET_KEY,\n 'queue_url': SQS_MPC_MPC_COMMON_URL,\n },\n SCORED_PRODUCT_MESSAGE_TYPE.CALCULATE_FOR_A_CUSTOMER: {\n 'object_type': SCORED_PRODUCT_MESSAGE_TYPE.CALCULATE_FOR_A_CUSTOMER,\n 'queue_url': SQS_MPC_MPC_COMMON_URL,\n },\n 'customer_info_update': {\n 'object_type': 'customer_info_update',\n 'queue_url': SQS_MPC_PORTAL_CUSTOMER_INFO_UPDATE,\n },\n }\n }\n }\n\n # ------------------------------------------------------------------------------------------------------------------\n # MAILER\n # ------------------------------------------------------------------------------------------------------------------\n\n MAILER_CONFIG = json.loads(os.environ.get('MAILER_CONFIG', json.dumps({\n # can be used for local\n # 'class': 'chalicelib.libs.core.mailer._MailerDummyPrint',\n # 'params': {},\n\n # live\n 'class': 'chalicelib.libs.core.mailer._MailerSmtp',\n 'params': {\n 'from_email': 'portal@runwaysale.co.za',\n 'host': 'smtp.mandrillapp.com',\n 'port': 587,\n 'username': 'info@runwaysale.co.za',\n 'password': 'vAkn_tSiZMbqU-KFAZwOlA',\n }\n })))\n\n # ------------------------------------------------------------------------------------------------------------------\n # FILE STORAGE\n # ------------------------------------------------------------------------------------------------------------------\n\n FILE_STORAGE_CONFIG = json.loads(os.environ.get('FILE_STORAGE_CONFIG', json.dumps({\n # This is config example for local environment. Change it in your own run script.\n # Implementations for other environments are defined in config.json.\n #\n # export FILE_STORAGE_CONFIG='{\n # \"class\": \"chalicelib.libs.core.file_storage._FileLocalStorage\",\n # \"params\": {\"root_path\": \"/var/www/html/mpc_api_storage\", \"root_url\": \"http://localhost/mpc_api_storage/\"}\n # }'\n })))\n\n # ------------------------------------------------------------------------------------------------------------------\n # OTHER\n # ------------------------------------------------------------------------------------------------------------------\n\n # Delivery API\n DTD_API_DEFAULT_DTD_URL = os.environ.get('DTD_API_DEFAULT_DTD_URL', 'https://cdt.runway.co.za/sku/DEFAULT')\n DTD_API_DEFAULT_DTD_MIN = os.environ.get('DTD_API_DEFAULT_DTD_MIN', 10) # if default api is unavailable,\n DTD_API_DEFAULT_DTD_MAX = os.environ.get('DTD_API_DEFAULT_DTD_MAX', 25) # we should use hardcoded values\n DTD_API_SKU_BASE_URL = os.environ.get('DTD_API_SKU_BASE_URL', 'https://cdt.runway.co.za/sku/')\n\n # Product filtering meta data\n NEW_PRODUCT_THRESHOLD = int(os.environ.get('NEW_PRODUCT_THRESHOLD', 1600)) # Should be 7 days in production\n LAST_CHANCE_STOCK_THRESHOLD = os.environ.get('LAST_CHANCE_STOCK_THRESHOLD', 10) # Stock Number\n LAST_CHANCE_END_DATE_THRESHOLD = os.environ.get('LAST_CHANCE_END_DATE_THRESHOLD', 30)\n PRODUCT_VISIT_LOG_MAX = os.environ.get('PRODUCT_VISIT_LOG_MAX', 10)\n PRODUCT_VISIT_LOG_THRESHOLD = os.environ.get('PRODUCT_VISIT_LOG_THRESHOLD', 7)\n\n # READ API\n READ_API_HEADER_NAME = os.environ.get('READ_API_HEADER_NAME', 'Identification')\n READ_API_HEADER_VALUE = os.environ.get('READ_API_HEADER_VALUE', 'RunwaySale::ReadAPI')\n\n # PEACH PAYMENT\n # https://peachpayments.docs.oppwa.com/\n # Attention! Default values are for tests here (see doc/examples).\n PEACH_PAYMENT_BASE_URL = os.environ.get('PEACH_PAYMENT_BASE_URL', 'https://test.oppwa.com/v1/')\n PEACH_PAYMENT_ENTITY_ID = os.environ.get('PEACH_PAYMENT_ENTITY_ID', '8a8294174e735d0c014e78cf26461790')\n PEACH_PAYMENT_ACCESS_TOKEN = os.environ.get(\n 'PEACH_PAYMENT_ACCESS_TOKEN',\n 'OGE4Mjk0MTc0ZTczNWQwYzAxNGU3OGNmMjY2YjE3OTR8cXl5ZkhDTjgzZQ=='\n )\n PEACH_PAYMENT_WEBHOOKS_DECRYPTION_KEY = os.environ.get('PEACH_PAYMENT_WEBHOOKS_DECRYPTION_KEY', \"need_real_value\")\n\n # ------------------------------------------------------------------------------------------------------------------\n\n # CRITICAL! FOR THE DATA LAKE\n DATALAKE_AWS_ACCOUNT_ACCESS_KEY_ID = os.environ.get(\n 'DATALAKE_AWS_ACCOUNT_ACCESS_KEY_ID')\n DATALAKE_AWS_ACCOUNT_SECRET_KEY_ID = os.environ.get(\n 'DATALAKE_AWS_ACCOUNT_SECRET_KEY_ID')\n DATALAKE_USERTRACK_DELIVERY_STREAM_NAME = os.environ.get(\n 'DATALAKE_USERTRACK_DELIVERY_STREAM_NAME')\n\n # When you need to create sqs lambda function, consider the following\n STAGES_TO_BIND_LAMBDA_WITH_AWS_RESOURCES = ['dev', 'stage', 'production']\n if isinstance(os.environ.get('STAGES_TO_BIND_LAMBDA_WITH_AWS_RESOURCES'), str):\n STAGES_TO_BIND_LAMBDA_WITH_AWS_RESOURCES += os.environ.get('STAGES_TO_BIND_LAMBDA_WITH_AWS_RESOURCES')\n\n CALCULATE_SCORE_BATCH_SIZE = os.environ.get('CALCULATE_SCORE_BATCH_SIZE', 20)\n SCORE_CALCULATE_INTERVAL = os.environ.get('SCORE_CALCULATE_INTERVAL', 20)\n CALCULATE_SCORE_CHUNK_SIZE = os.environ.get('CALCULATE_SCORE_CHUNK_SIZE', 5)\n\nsettings = Config()\n\n","sub_path":"chalicelib/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":16615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"113521235","text":"import boto.s3.connection\nimport mimetypes\nimport os\nimport shutil\nimport tempfile\nimport urllib\nimport urlparse\nimport logging\n\nfrom boto.s3.key import Key\nfrom StringIO import StringIO\n\nlogger = logging.getLogger(__name__)\n\nMAX_FILE_SPLITS = 9999\nDEFAULT_FILE_SPLIT_SIZE = 6291456\nDEFAULT_MINIMUM_SPLIT_AT_SIZE = 20000000\n\n\nclass S3Client(object):\n \"\"\"A client that helps user to send and get files from S3\"\"\"\n s3_connection = None\n bucket = None\n\n def __init__(self, bucket):\n \"\"\"\n Creates the logger and sets the bucket name that will be used throughout\n :param\n - bucket: string - The name of the bucket you will be working with\n \"\"\"\n self.bucket_name = bucket\n\n def connect(self):\n \"\"\"Start the amazon connection using the system's boto.cfg file to retrieve the credentials\"\"\"\n if self.s3_connection:\n return\n\n try:\n # - Amazon S3 credentials will use Boto's fall back config, looks for boto.cfg then environment variables\n self.s3_connection = boto.s3.connection.S3Connection(\n is_secure=False)\n self.bucket = self.s3_connection.get_bucket(\n self.bucket_name, validate=False)\n\n except Exception as s3_connection_exception:\n # - Reset the variables on failure to allow a reconnect\n self.s3_connection = None\n self.bucket = None\n message = \"Exception while connecting to S3: {0}\".format(s3_connection_exception)\n raise S3ClientException(message)\n\n def store_file(self, s3_path, file_to_store, filename, return_url=False, mime_type=None,\n chunk_at_size=DEFAULT_MINIMUM_SPLIT_AT_SIZE):\n \"\"\"\n Pushes the desired file up to S3 (e.g. log file).\n :param\n - s3_path: string - The S3 path to the folder in which you'd like to store the file\n - file_to_store: StringIO or string - The fileIO or file local file path for the file to be sent\n - filename: string - The name the file will have when on S3. Should include the file extension\n - return_url: boolean - Whether to return the path to the file on S3\n - mime_type: string - the mime type the file should be saved as, ex: text/html or image/png\n - chunk_at_size: int - the size of which the file should be split to multi-upload (default ~ 20 mb)\n :return\n - file_url: string - The path to the file on S3. This is returned only is return_url is set to true\n \"\"\"\n self.connect()\n\n try:\n s3_file = Key(self.bucket)\n s3_file.key = self._generate_file_path(s3_path, filename)\n # --- Set the Content type for the file being sent (so that it downloads properly)\n # - content_type can be 'image/png', 'application/pdf', 'text/plain', etc.\n mime_type = mimetypes.guess_type(filename) if mime_type is None else mime_type\n s3_file.set_metadata('Content-Type', mime_type)\n\n # - Check if file is a buffer or disk file and if file that is getting uploaded is greater than\n # chunk_at_size then upload cool multi style\n mutli_part_upload_successful = False\n if isinstance(file_to_store, str) and os.path.getsize(file_to_store) > chunk_at_size:\n split_file_dir = None\n multipart_file = self.bucket.initiate_multipart_upload(key_name=s3_file.key, metadata=s3_file.metadata)\n\n try:\n # - Split the file and get it chunky\n split_file_dir = self._split_file(file_to_store)\n\n # - Upload the file parts\n file_count = 0\n for files in os.listdir(split_file_dir):\n file_count += 1\n file_part = open(os.path.join(split_file_dir, files), 'rb')\n multipart_file.upload_part_from_file(file_part, file_count)\n\n # - Complete the upload\n multipart_file.complete_upload()\n mutli_part_upload_successful = True\n except boto.s3.connection.S3ResponseError as s3_error:\n logger.warning(\"A S3 Response error was caught while attempting to chunk and upload the PDF | {}\\n\"\n \"Will now attempt to send the file as a whole...\".format(s3_error))\n multipart_file.cancel_upload()\n except Exception as s3_error:\n logger.warning(\"Unexpected Error encountered an issue while chunking and uploading the PDF | {}\\n\"\n \"Will now attempt to send the file as a whole...\".format(s3_error))\n multipart_file.cancel_upload()\n finally:\n # - Remove the folder from splitting the file\n if split_file_dir:\n shutil.rmtree(split_file_dir)\n\n # - Upload the file as a whole\n if not mutli_part_upload_successful:\n file_type = type(file_to_store)\n if file_type in [str, unicode]:\n s3_file.set_contents_from_filename(file_to_store)\n else:\n s3_file.set_contents_from_file(file_to_store)\n\n if return_url:\n file_key = self.bucket.get_key(s3_file.key)\n file_key.set_acl('public-read')\n file_url = file_key.generate_url(0, query_auth=False)\n\n # - Certain server side permissions might cause a x-amz-security-token parameter to be added to the url\n # Split the url into its pieces\n scheme, netloc, path, params, query, fragment = urlparse.urlparse(file_url)\n # Check whether the x-amz-security-token parameter was appended to the url and remove it\n params = urlparse.parse_qs(query)\n if 'x-amz-security-token' in params:\n del params['x-amz-security-token']\n # Rebuild the params without the x-amz-security-token\n query = urllib.urlencode(params)\n\n return urlparse.urlunparse((scheme, netloc, path, params, query, fragment))\n\n except Exception as store_file_exception:\n message = \"Exception while storing file on S3: {0}\".format(store_file_exception)\n raise S3ClientException(message)\n\n def get_file(self, s3_path, file_to_get):\n \"\"\"\n Stores the desired file locally (e.g. configuration file).\n :param\n - s3_path: string - The S3 path to the folder which contains the file\n - file_to_get: string - The name of the file you are looking for in the folder\n :return\n - retrieved_file StringIO - an IO object containing the content of the file retrieved from S3\n \"\"\"\n self.connect()\n\n try:\n if self.verify_file(s3_path, file_to_get):\n retrieved_file = StringIO()\n s3_file = self.bucket.get_key(\n self._generate_file_path(s3_path, file_to_get))\n s3_file.get_contents_to_file(retrieved_file)\n return retrieved_file\n else:\n raise S3ClientException(\"File not found in S3\")\n\n except Exception as get_file_exception:\n message = \"Exception while retrieving file from S3: {0}\".format(get_file_exception)\n raise S3ClientException(message)\n\n def verify_file(self, s3_path, file_to_verify):\n \"\"\"\n Verifies a file (e.g. configuration file) is on S3 and returns\n \"True\" or \"False\".\n :param\n - s3_path: string - The S3 path to the folder which contains the file\n - file_to_verify: string - The name of the file you are looking for in the folder\n :return\n - boolean: True if .get_key returns an instance of a Key object and False if .get_key returns None:\n \"\"\"\n self.connect()\n try:\n file_path = self._generate_file_path(s3_path, file_to_verify)\n s3_file = self.bucket.get_key(file_path)\n if s3_file:\n return True\n else:\n return False\n\n except Exception as verify_file_exception:\n message = \"Exception while verifying file on S3: {0}\".format(verify_file_exception)\n raise S3ClientException(message)\n\n def _generate_file_path(self, s3_path, file_to_store):\n \"\"\"\n Ensures that the / situation creates a proper path by removing any double slash possibilities\n :param\n - s3_path: string - The path to the folder you wish to store the file in\n - file_to_store: string - The name of the file you wish to store\n :return\n - string: The concatenated version of the /folder/filename path\n \"\"\"\n return \"{0}/{1}\".format(s3_path.strip(\"/\"), file_to_store.strip(\"/\"))\n\n def get_all_filenames_in_folder(self, path_to_folder):\n \"\"\"\n Retrieves a list of the files/keys in a folder on S3\n :param\n - path_to_folder: string - The path to the folder on S3. This should start after the bucket name\n :return\n - key_list: list - The list of keys in the folder\n \"\"\"\n self.connect()\n\n s3_folder_path = str(path_to_folder)\n key_list = self.bucket.list(prefix=s3_folder_path)\n return key_list\n\n def get_most_recent_file_from_s3_key_list(self, key_list):\n \"\"\"\n Sorts through the list of files in s3 key list object and returns the most recently modified file in the list\n :param\n - key_list: list - The list of files returned from a s3.bucket.list() operation\n :return\n - key boto.s3.Key - The most recently modified file in the key list\n \"\"\"\n most_recent_key = None\n for key in key_list:\n if not most_recent_key or key.last_modified > most_recent_key.last_modified:\n most_recent_key = key\n return most_recent_key\n\n def _split_file(self, from_file, file_chunk_size=DEFAULT_FILE_SPLIT_SIZE):\n \"\"\"\n Split a given file into smaller chunks named partXXXX into a temp at a default size of ~ 6 mb. The temp\n folder should be deleted after use.\n\n WARNING: You cannot split into more than 9999 files.\n\n :param\n - from_file: string - the file to split up\n - file_chunk_size: int - number of Bytes each split should contain (Should be > 5 MB for Amazon S3 minimum)\n :return:\n - temp_dir: string - temp folder location of split file, use to iterate through the split files\n \"\"\"\n if os.path.getsize(from_file) > (MAX_FILE_SPLITS * file_chunk_size):\n raise S3ClientException(\"Could not split the file.\\nError: Input file is too large!\\n\")\n elif os.path.getsize(from_file) < DEFAULT_FILE_SPLIT_SIZE:\n raise S3ClientException(\"Could not split the file.\\nError: Input file is too small!\\n\")\n\n try:\n temp_dir = tempfile.mkdtemp()\n part_num = 0\n with open(from_file, 'rb') as input_file:\n chunk = input_file.read(file_chunk_size)\n while chunk:\n part_num += 1\n open(os.path.join(temp_dir, ('part%04d' % part_num)), 'wb').write(chunk)\n chunk = input_file.read(file_chunk_size)\n\n return temp_dir\n except Exception as e:\n raise S3ClientException(\"Could not split the file.\\nError: {}\\n\".format(e))\n\n\nclass S3ClientException(Exception):\n def __init__(self, message):\n self.msg = message\n\n def __str__(self):\n return self.msg\n","sub_path":"the_ark/s3_client.py","file_name":"s3_client.py","file_ext":"py","file_size_in_byte":11958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"206665174","text":"# main.py\n\nfrom controller.travel_controller import TravelController\nfrom wx import App # import the wxPython GUI package\n\n\ndef main():\n # Create a wxPython application object and the controller for it\n app = App(False)\n controller = TravelController(app)\n controller.init_ui()\n controller.populate_controls()\n controller.show()\n app.MainLoop() # enters the mainloop\n\nif __name__ == '__main__':\n main()\n","sub_path":"travel_request/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"571713276","text":"import logging\nimport time\nfrom collections import OrderedDict, defaultdict\nfrom typing import Dict, Iterable, Mapping, Optional, Sequence, Set, cast\n\nimport dagster._check as check\nfrom dagster._core.assets import AssetDetails\nfrom dagster._core.definitions.events import AssetKey\nfrom dagster._core.event_api import RunShardedEventsCursor\nfrom dagster._core.events import DagsterEventType\nfrom dagster._core.events.log import EventLogEntry\nfrom dagster._core.storage.event_log.base import AssetEntry, AssetRecord\nfrom dagster._serdes import ConfigurableClass\nfrom dagster._utils import utc_datetime_from_timestamp\n\nfrom .base import (\n EventLogConnection,\n EventLogCursor,\n EventLogCursorType,\n EventLogRecord,\n EventLogStorage,\n EventRecordsFilter,\n)\n\n\nclass InMemoryEventLogStorage(EventLogStorage, ConfigurableClass):\n \"\"\"\n In memory only event log storage. Used by ephemeral DagsterInstance or for testing purposes.\n\n WARNING: Dagit and other core functionality will not work if this is used on a real DagsterInstance\n \"\"\"\n\n def __init__(self, inst_data=None, preload=None):\n self._logs = defaultdict(list)\n self._handlers = defaultdict(set)\n self._inst_data = inst_data\n self._wiped_asset_keys = defaultdict(float)\n if preload:\n for payload in preload:\n self._logs[payload.pipeline_run.run_id] = payload.event_list\n self._assets = defaultdict(dict)\n\n super().__init__()\n\n @property\n def inst_data(self):\n return self._inst_data\n\n @classmethod\n def config_type(cls):\n return {}\n\n @classmethod\n def from_config_value(cls, inst_data, config_value):\n return cls(inst_data)\n\n def get_records_for_run(\n self,\n run_id,\n cursor=None,\n of_type=None,\n limit=None,\n ) -> EventLogConnection:\n check.str_param(run_id, \"run_id\")\n check.opt_str_param(cursor, \"cursor\")\n of_types = (\n (\n {of_type.value}\n if isinstance(of_type, DagsterEventType)\n else (\n {\n dagster_event_type.value\n for dagster_event_type in check.set_param(\n of_type, \"of_type\", DagsterEventType\n )\n }\n )\n )\n if of_type\n else None\n )\n\n if cursor is None:\n offset = 0\n else:\n cursor = EventLogCursor.parse(cursor)\n check.invariant(\n cursor.cursor_type == EventLogCursorType.OFFSET,\n \"only offset cursors are supported with the in-memory event log\",\n )\n offset = cursor.offset()\n\n if of_types:\n events = list(\n filter(\n lambda r: r.is_dagster_event\n and r.dagster_event.event_type_value in cast(Set[str], of_types),\n self._logs[run_id],\n )\n )\n events = events[offset:]\n else:\n events = self._logs[run_id][offset:]\n\n if limit:\n events = events[:limit]\n\n return EventLogConnection(\n records=[\n EventLogRecord(storage_id=event_id + offset, event_log_entry=event)\n for event_id, event in enumerate(events)\n ],\n cursor=EventLogCursor.from_offset(offset + len(events)).to_string(),\n has_more=bool(limit and len(events) == limit),\n )\n\n def store_event(self, event):\n check.inst_param(event, \"event\", EventLogEntry)\n run_id = event.run_id\n self._logs[run_id].append(event)\n offset = len(self._logs[run_id])\n\n if (\n event.is_dagster_event\n and (\n event.dagster_event.is_step_materialization\n or event.dagster_event.is_asset_observation\n or event.dagster_event.is_asset_materialization_planned\n )\n and event.dagster_event.asset_key\n ):\n self.store_asset_event(event)\n\n # snapshot handlers\n handlers = list(self._handlers[run_id])\n\n for handler in handlers:\n try:\n handler(event, str(EventLogCursor.from_offset(offset)))\n except Exception:\n logging.exception(\"Exception in callback for event watch on run %s.\", run_id)\n\n def store_asset_event(self, event):\n asset_key = event.dagster_event.asset_key\n asset = self._assets[asset_key] if asset_key in self._assets else {\"id\": len(self._assets)}\n\n asset[\"last_materialization_timestamp\"] = utc_datetime_from_timestamp(event.timestamp)\n if event.dagster_event.is_step_materialization:\n asset[\"last_materialization\"] = event\n if (\n event.dagster_event.is_step_materialization\n or event.dagster_event.is_asset_materialization_planned\n ):\n asset[\"last_run_id\"] = event.run_id\n\n self._assets[asset_key] = asset\n\n def delete_events(self, run_id):\n del self._logs[run_id]\n\n def upgrade(self):\n pass\n\n def reindex_events(self, print_fn=None, force=False):\n pass\n\n def reindex_assets(self, print_fn=None, force=False):\n pass\n\n def wipe(self):\n self._logs = defaultdict(list)\n\n def watch(self, run_id, _start_cursor, callback):\n self._handlers[run_id].add(callback)\n\n def end_watch(self, run_id, handler):\n if handler in self._handlers[run_id]:\n self._handlers[run_id].remove(handler)\n\n @property\n def is_persistent(self):\n return False\n\n def get_event_records(\n self,\n event_records_filter: EventRecordsFilter,\n limit: Optional[int] = None,\n ascending: bool = False,\n ) -> Iterable[EventLogRecord]:\n after_id = (\n event_records_filter.after_cursor.id\n if isinstance(event_records_filter.after_cursor, RunShardedEventsCursor)\n else event_records_filter.after_cursor\n )\n before_id = (\n event_records_filter.before_cursor.id\n if isinstance(event_records_filter.before_cursor, RunShardedEventsCursor)\n else event_records_filter.before_cursor\n )\n\n filtered_events = []\n\n def _apply_filters(record):\n if (\n event_records_filter.event_type\n and record.dagster_event.event_type_value != event_records_filter.event_type.value\n ):\n return False\n\n if (\n event_records_filter.asset_key\n and record.dagster_event.asset_key != event_records_filter.asset_key\n ):\n return False\n\n if (\n event_records_filter.asset_key\n and self._wiped_asset_keys[event_records_filter.asset_key] > record.timestamp\n ):\n return False\n\n if (\n event_records_filter.asset_partitions\n and record.dagster_event.partition not in event_records_filter.asset_partitions\n ):\n return False\n\n if (\n event_records_filter.after_timestamp\n and record.timestamp >= event_records_filter.after_timestamp\n ):\n return False\n\n if (\n event_records_filter.before_timestamp\n and record.timestamp >= event_records_filter.before_timestamp\n ):\n return False\n return True\n\n for records in self._logs.values():\n filtered_events += list(filter(_apply_filters, records))\n\n event_records = [\n EventLogRecord(storage_id=event_id, event_log_entry=event)\n for event_id, event in enumerate(filtered_events)\n if (after_id is None or event_id > after_id)\n and (before_id is None or event_id < before_id)\n ]\n\n event_records = sorted(event_records, key=lambda x: x.storage_id, reverse=not ascending)\n\n if limit:\n event_records = event_records[:limit]\n\n return event_records\n\n def get_asset_records(\n self, asset_keys: Optional[Sequence[AssetKey]] = None\n ) -> Iterable[AssetRecord]:\n records = []\n for asset_key, asset in self._assets.items():\n if asset_keys is None or asset_key in asset_keys:\n wipe_timestamp = self._wiped_asset_keys.get(asset_key)\n if (\n not wipe_timestamp\n or wipe_timestamp < asset.get(\"last_materialization_timestamp\").timestamp()\n ):\n records.append(\n AssetRecord(\n storage_id=asset[\"id\"],\n asset_entry=AssetEntry(\n asset_key=asset_key,\n last_materialization=asset.get(\"last_materialization\"),\n last_run_id=asset.get(\"last_run_id\"),\n asset_details=AssetDetails(last_wipe_timestamp=wipe_timestamp)\n if wipe_timestamp\n else None,\n ),\n )\n )\n return records\n\n def has_asset_key(self, asset_key: AssetKey) -> bool:\n for records in self._logs.values():\n for record in records:\n if (\n record.is_dagster_event\n and record.dagster_event.asset_key\n and record.dagster_event.asset_key == asset_key\n and self._wiped_asset_keys[record.dagster_event.asset_key] < record.timestamp\n ):\n return True\n return False\n\n def all_asset_keys(self):\n asset_records = []\n for records in self._logs.values():\n asset_records += [\n record\n for record in records\n if record.is_dagster_event and record.dagster_event.asset_key\n ]\n\n asset_events = [\n record.dagster_event\n for record in sorted(asset_records, key=lambda x: x.timestamp, reverse=True)\n if self._wiped_asset_keys[record.dagster_event.asset_key] < record.timestamp\n ]\n asset_keys = OrderedDict()\n for event in asset_events:\n asset_keys[\"/\".join(event.asset_key.path)] = event.asset_key\n return list(asset_keys.values())\n\n def get_latest_materialization_events(\n self, asset_keys: Sequence[AssetKey]\n ) -> Mapping[AssetKey, Optional[EventLogEntry]]:\n check.list_param(asset_keys, \"asset_keys\", of_type=AssetKey)\n\n asset_records = []\n for records in self._logs.values():\n asset_records += [\n record\n for record in records\n if record.is_dagster_event\n and record.dagster_event_type == DagsterEventType.ASSET_MATERIALIZATION\n and record.dagster_event.asset_key\n and record.dagster_event.asset_key in asset_keys\n ]\n\n materializations_by_key = OrderedDict()\n for record in sorted(asset_records, key=lambda x: x.timestamp, reverse=True):\n if (\n self._wiped_asset_keys[record.dagster_event.asset_key] < record.timestamp\n and record.dagster_event.asset_key not in materializations_by_key\n ):\n materializations_by_key[record.dagster_event.asset_key] = record\n\n return materializations_by_key\n\n def get_asset_run_ids(self, asset_key):\n asset_run_ids = set()\n for run_id, records in self._logs.items():\n for record in records:\n if (\n record.is_dagster_event\n and record.dagster_event.asset_key == asset_key\n and self._wiped_asset_keys[record.dagster_event.asset_key] < record.timestamp\n ):\n asset_run_ids.add(run_id)\n break\n\n return list(asset_run_ids)\n\n def wipe_asset(self, asset_key):\n check.inst_param(asset_key, \"asset_key\", AssetKey)\n self._wiped_asset_keys[asset_key] = time.time()\n if asset_key in self._assets:\n self._assets[asset_key][\"last_run_id\"] = None\n\n def get_materialization_count_by_partition(\n self, asset_keys: Sequence[AssetKey]\n ) -> Mapping[AssetKey, Mapping[str, int]]:\n check.list_param(asset_keys, \"asset_keys\", of_type=AssetKey)\n\n materialization_count_by_key_partition: Dict[AssetKey, Dict[str, int]] = {}\n for records in self._logs.values():\n for record in records:\n if (\n record.is_dagster_event\n and record.dagster_event.asset_key\n and record.dagster_event.asset_key in asset_keys\n and record.dagster_event.event_type_value\n == DagsterEventType.ASSET_MATERIALIZATION.value\n and record.dagster_event.partition\n and self._wiped_asset_keys[record.dagster_event.asset_key] < record.timestamp\n ):\n asset_key = record.dagster_event.asset_key\n if asset_key not in materialization_count_by_key_partition:\n materialization_count_by_partition: Dict[str, int] = {}\n materialization_count_by_key_partition[\n asset_key\n ] = materialization_count_by_partition\n\n partition = record.dagster_event.partition\n if partition not in materialization_count_by_key_partition[asset_key]:\n materialization_count_by_key_partition[asset_key][partition] = 0\n materialization_count_by_key_partition[asset_key][partition] += 1\n\n for asset_key in asset_keys:\n if asset_key not in materialization_count_by_key_partition:\n materialization_count_by_key_partition[asset_key] = {}\n\n return materialization_count_by_key_partition\n","sub_path":"python_modules/dagster/dagster/_core/storage/event_log/in_memory.py","file_name":"in_memory.py","file_ext":"py","file_size_in_byte":14337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"252379135","text":"from datetime import datetime, date\nimport pytest\nfrom unittest import mock\nfrom typing import Dict\n\nfrom flask_app.infra.darksky import DarkSkyRepository\nfrom flask_app.models.weather import WeatherCondition\n\nON_MAKING = 1\n\n\nclass TestDarkSkyRepository(object):\n\n @classmethod\n def setup_class(cls):\n cls.dsr = DarkSkyRepository()\n cls.patcher1 = mock.patch('flask_app.utils.util.to_latlng')\n cls.patcher2 = mock.patch('flask_app.infra.darksky.DarkSkyRepository.get_response_from_darksky_api')\n\n @classmethod\n def teardown_class(cls):\n del cls.dsr\n del cls.patcher1\n del cls.patcher2\n\n def setup_method(self):\n pass\n\n @pytest.fixture()\n def dark_sky_dummy_data(request):\n yield {'timezone': 'Asia/Tokyo',\n 'daily': {'data': [{\n 'temperatureMax': 1.1,\n 'temperatureMin': 1.2,\n 'cloudCover': 1.3,\n 'humidity': 1.4,\n 'pressure': 1.5,\n 'ozone': 1.6,\n 'precipProbability': 1.7,\n 'date': datetime.today(),\n 'time': 15247100000,\n }]\n }\n }\n\n ### get_repository_name ###\n def test_get_repository_name(self):\n assert self.dsr.get_repository_name() == 'DarkSkyRepository'\n\n ### get_past_condition_by_city ###\n # 都市名 空白\n def test_get_past_condition_by_city_no_city(self):\n err, conditions, location = self.dsr.get_past_condition_by_city(city_name=\"\", from_date=date.today(),\n to_date=date.today())\n assert err == 'no city name'\n assert conditions == []\n assert location == {}\n\n # 都市名 存在しない\n def test_get_past_condition_by_city_not_exist_city(self):\n err, conditions, location = self.dsr.get_past_condition_by_city(city_name=\"ほげほげ\", from_date=date.today(),\n to_date=date.today())\n assert err == 'not exist city name'\n assert conditions == []\n assert location == {}\n\n # 都市 > 緯度・経度変換(googleMapエラー)\n def test_get_past_condition_by_city_to_latlng_failure(self):\n mock_to_latlng = self.patcher1.start()\n mock_to_latlng.return_value = 'access failure', {}\n err, conditions, location = self.dsr.get_past_condition_by_city(city_name=\"東京\", from_date=date.today(),\n to_date=date.today())\n assert err == 'access failure'\n assert conditions == []\n assert location == {}\n self.patcher1.stop()\n\n # 期間 開始日None\n def test_get_past_condition_by_city_from_date_none(self, request, dark_sky_dummy_data):\n mock_use = request.config.getoption('--mock-use')\n if mock_use == 'True':\n self.dsr.get_response_from_darksky_api = mock.MagicMock(return_value=('', dark_sky_dummy_data))\n\n err, conditions, location = self.dsr.get_past_condition_by_city(city_name=\"東京\", from_date=None,\n to_date=date.today())\n assert err == 'success'\n assert isinstance(conditions[0], WeatherCondition)\n # assert isinstance(location, Dict[str, float])\n\n # 期間 終了日None\n def test_get_past_condition_by_city_to_date_none(self, request, dark_sky_dummy_data):\n mock_use = request.config.getoption('--mock-use')\n if mock_use == 'True':\n print('##### darksky_api mock used. ######')\n mock_get_response_from_darksky_api = self.patcher2.start()\n mock_get_response_from_darksky_api.return_value = ('', dark_sky_dummy_data)\n\n err, conditions, location = self.dsr.get_past_condition_by_city(city_name=\"東京\", from_date=date.today(),\n to_date=None)\n assert err == 'success'\n assert isinstance(conditions[0], WeatherCondition)\n\n if mock_use == 'True':\n self.patcher2.stop()\n\n # 都市名 正常、期間 1日\n def test_get_past_condition_by_city_success(self, request, dark_sky_dummy_data):\n mock_use = request.config.getoption('--mock-use')\n if mock_use == 'True':\n print('##### darksky_api mock used. ######')\n mock_get_response_from_darksky_api = self.patcher2.start()\n mock_get_response_from_darksky_api.return_value = ('', dark_sky_dummy_data)\n\n err, conditions, location = self.dsr.get_past_condition_by_city(city_name=\"東京\", from_date=date.today(),\n to_date=date.today())\n assert err == 'success'\n assert isinstance(conditions[0], WeatherCondition)\n\n if mock_use == 'True':\n self.patcher2.stop()\n\n # 都市名 正常、期間 30日\n @pytest.mark.skipif(ON_MAKING=1, reason='作成中はスキップ')\n def test_get_past_condition_by_city_success_multi_days(self, request, dark_sky_dummy_data):\n mock_use = request.config.getoption('--mock-use')\n if mock_use == 'True':\n print('##### darksky_api mock used. ######')\n mock_get_response_from_darksky_api = self.patcher2.start()\n mock_get_response_from_darksky_api.return_value = ('', dark_sky_dummy_data)\n\n err, conditions, location = self.dsr.get_past_condition_by_city(city_name=\"東京\", from_date=date(2019, 11, 1),\n to_date=date(2019, 11, 30))\n assert err == 'success'\n assert isinstance(conditions[0], WeatherCondition)\n assert len(conditions) == 30\n\n if mock_use == 'True':\n self.patcher2.stop()\n\n ### get_past_condition_by_latlng ###\n # 緯度・経度なし\n def test_get_past_condition_by_latlng_no_latlng(self):\n assert self.dsr.get_past_condition_by_latlng(latitude=None, longitude=139) == ('no latitude', [])\n assert self.dsr.get_past_condition_by_latlng(latitude=35, longitude=None) == ('no longitude', [])\n\n # darksky apiエラー\n def test_get_past_condition_by_latlng_api_error(self):\n mock_get_response_from_darksky_api = self.patcher2.start()\n mock_get_response_from_darksky_api.return_value = ('HTTP Error', {})\n assert self.dsr.get_past_condition_by_latlng(latitude=35, longitude=139) == ('HTTP Error', [])\n mock_get_response_from_darksky_api.return_value = ('URL Error', {})\n assert self.dsr.get_past_condition_by_latlng(latitude=35, longitude=139) == ('URL Error', [])\n self.patcher2.stop()\n\n # 正常1日\n def test_get_past_condition_by_latlng(self):\n err, conditions = self.dsr.get_past_condition_by_latlng(latitude=35, longitude=139)\n assert err == 'success'\n assert isinstance(conditions[0], WeatherCondition)\n\n # 正常複数日\n @pytest.mark.skipif(ON_MAKING=1, reason='作成中はスキップ')\n def test_get_past_condition_by_latlng_multi_days(self):\n err, conditions = self.dsr.get_past_condition_by_latlng(latitude=35, longitude=139,\n from_date=date(2019, 11, 1),\n to_date=date(2019, 11, 30))\n assert err == 'success'\n assert isinstance(conditions[0], WeatherCondition)\n assert len(conditions) == 30\n\n","sub_path":"source/application/tests/infra/test_darksky.py","file_name":"test_darksky.py","file_ext":"py","file_size_in_byte":7698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"353300946","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.io as sio\nimport math\nimport entropy_estimators as ee\n\ndef num_bins_calculator(num_points):\n return round(np.sqrt(num_points/5))\n\n\ndef calc_entropy(x,bins):\n c_x = np.histogram(x, bins)[0]/sum(np.histogram(x, 5)[0])\n\n entropy=0\n for prob in c_x:\n if prob!=0:\n\n entropy += - prob * math.log(prob, 2)\n\n\n return entropy\n\n\ndef calc_MI(x, y, bins):\n c_xy = np.histogram2d(x, y, bins)[0]\n mi = mutual_info_score(None, None, contingency=c_xy)\n return mi\n\ndef mutual_info_score(labels_true, labels_pred, contingency=None):\n\n # if contingency is None:\n # labels_true, labels_pred = check_clusterings(labels_true, labels_pred)\n # contingency = contingency_matrix(labels_true, labels_pred, sparse=True)\n\n if isinstance(contingency, np.ndarray):\n # For an array\n nzx, nzy = np.nonzero(contingency)\n nz_val = contingency[nzx, nzy]\n elif sp.issparse(contingency):\n # For a sparse matrix\n nzx, nzy, nz_val = sp.find(contingency)\n else:\n raise ValueError(\"Unsupported type for 'contingency': %s\" %\n type(contingency))\n\n contingency_sum = contingency.sum()\n pi = np.ravel(contingency.sum(axis=1))\n pj = np.ravel(contingency.sum(axis=0))\n log_contingency_nm = np.log(nz_val)\n contingency_nm = nz_val / contingency_sum\n # Don't need to calculate the full outer product, just for non-zeroes\n outer = pi.take(nzx) * pj.take(nzy)\n log_outer = -np.log(outer) + np.log(pi.sum()) + np.log(pj.sum())\n mi = (contingency_nm * (log_contingency_nm - np.log(contingency_sum)) +\n contingency_nm * log_outer)\n return mi.sum()\n\nif __name__ == \"__main__\":\n from os import listdir\n\n names = listdir('output_files')\n # for name in names:\n\n\n mat_contents = sio.loadmat('output_files/' + names[0])\n color_mat = mat_contents['colors']\n fig, ax = plt.subplots()\n fig2, ax1 = plt.subplots()\n ax2 = ax.twinx()\n t_len = mat_contents['colors'].shape[1]\n print(t_len)\n t_vect = np.linspace(0, (t_len - 1) * 3, t_len)\n # ax3 = fig2.add_subplot(111, label=\"1\")\n entropy_mat = np.zeros((3, t_len))\n mi_mat = np.zeros((3, t_len))\n\n # for cell in range(color_mat.shape[0]):\n # ax2.plot(t_vect, color_mat[cell, :, 0], linewidth=3, color='orange', alpha=0.25)\n # # ax3.plot(color_mat[cell, :, 1], color_mat[cell, :, 2], linewidth=3, color='orange', alpha=0.25)\n\n for j in np.arange(3):\n\n mat_contents = sio.loadmat('output_files/' + names[j])\n color_mat = mat_contents['colors']\n t_len = color_mat.shape[1]\n print(t_len)\n # for cell in range(color_mat.shape[0]):\n # ax.plot(t_vect,color_mat[cell, :, 0],linewidth=3,color='teal',alpha=0.25)\n bins = num_bins_calculator(color_mat.shape[0])\n entropies = []\n mis = []\n for t in range(t_len):\n entropies.append(calc_entropy(color_mat[:, t, 0], 10))\n\n mis.append(calc_MI(color_mat[:, t, 1], color_mat[:, t, 2], bins))\n\n\n entropy_mat[j, :] = entropies\n mi_mat[j, :] = mis\n\n entropy_mat2 = np.zeros((3, t_len))\n mi_mat2 = np.zeros((3, t_len))\n for k in np.arange(3):\n\n mat_contents = sio.loadmat('output_files/' + names[k+3])\n color_mat = mat_contents['colors']\n t_len = color_mat.shape[1]\n print(t_len)\n # for cell in range(color_mat.shape[0]):\n # ax.plot(t_vect,color_mat[cell, :, 0],linewidth=3,color='teal',alpha=0.25)\n bins = num_bins_calculator(color_mat.shape[0])\n entropies = []\n mis = []\n for t in range(t_len):\n entropies.append(calc_entropy(color_mat[:, t, 0], 10))\n\n mis.append(calc_MI(color_mat[:, t, 1], color_mat[:, t, 2], bins))\n\n entropy_mat2[k, :] = entropies\n mi_mat2[k, :] = mis\n\n ax.errorbar(t_vect, np.mean(entropy_mat, 0), yerr=np.std(entropy_mat, 0), linewidth=3, color='teal')\n ax.errorbar(t_vect, np.mean(entropy_mat2, 0), yerr=np.std(entropy_mat2, 0), linewidth=3, color='orange')\n\n ax1.errorbar(t_vect, np.mean(mi_mat, 0), yerr=np.std(mi_mat, 0), linewidth=3, color='teal')\n ax1.errorbar(t_vect, np.mean(mi_mat2, 0), yerr=np.std(mi_mat2, 0), linewidth=3, color='orange')\n ax1.set_ylabel('Mutual Information (bits)')\n ax1.set_xlabel('Time (Minutes)')\n ax.set_xlim([-5, 253])\n ax1.set_xlim([-5, 253])\n ax.set_xlabel('time (minutes)')\n ax.set_ylabel('Entropy (bits)')\n ax2.set_ylabel('Flourescence (AU)')\n fig.savefig('figures/2a.png', bbox_inches='tight')\n\n fig2.savefig('figures/2b.png', bbox_inches='tight')","sub_path":"comparative_plots.py","file_name":"comparative_plots.py","file_ext":"py","file_size_in_byte":4683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"513892541","text":"import logging, os\nimport argparse\n# import config\nfrom config.edict_config import config\nimport mxnet as mx\nfrom core.scheduler import multi_factor_scheduler\nfrom core.solver import Solver\nfrom core.memonger_v2 import search_plan_to_layer\nfrom core.callback import DetailSpeedometer\nfrom data import *\nfrom symbol import *\nimport datetime\nimport pprint\nfrom core.scheduler import WarmupMultiFactorScheduler\n\ndef main(config):\n output_dir = \"experiments/\" + config.output_dir\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s %(name)s %(levelname)s %(message)s',\n datefmt='%m-%d %H:%M',\n filename='{}/{}.log'.format(output_dir, config.model_prefix),\n filemode='a')\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n formatter = logging.Formatter('%(name)s %(levelname)s %(message)s')\n console.setFormatter(formatter)\n logging.getLogger('').addHandler(console)\n\n logging.info(config)\n\n # set up environment\n devs = [mx.gpu(int(i)) for i in config.gpu_list]\n kv = mx.kvstore.create(config.kv_store)\n\n # set up iterator and symbol\n # iterator\n if config.use_multiple_iter is True:\n train, val, num_examples = multiple_imagenet_iterator(data_dir=config.data_dir,\n batch_size=config.batch_size,\n num_parts=2,\n image_shape=tuple(config.image_shape),\n data_nthread=config.data_nthreads)\n # elif config.use_dali_iter is True:\n # train, val, num_examples = get_dali_iter(data_dir=config.data_dir,\n # batch_size=config.batch_size,\n # kv=kv,\n # image_shape=tuple(config.image_shape),\n # num_gpus=len(devs))\n else:\n if config.dataset == 'imagenet':\n train, val, num_examples = imagenet_iterator(data_dir=config.data_dir,\n batch_size=config.batch_size,\n kv=kv,\n image_shape=tuple(config.image_shape))\n elif config.dataset == 'cifar10':\n train, val, num_examples = cifar10_iterator(data_dir=config.data_dir,\n batch_size=config.batch_size,\n kv=kv,\n image_shape=tuple(config.image_shape))\n elif config.dataset == 'cifar100':\n train, val, num_examples = cifar100_iterator(data_dir=config.data_dir,\n batch_size=config.batch_size,\n kv=kv,\n image_shape=tuple(config.image_shape))\n logging.info(train)\n logging.info(val)\n\n data_names = ('data',)\n label_names = ('softmax_label',)\n data_shapes = [('data', tuple([config.batch_size] + config.image_shape))]\n label_shapes = [('softmax_label', (config.batch_size,))]\n\n if config.network in ['resnet', 'resnet_cifar10', 'preact_resnet']:\n symbol = eval(config.network)(units=config.units,\n num_stage=config.num_stage,\n filter_list=config.filter_list,\n num_classes=config.num_classes,\n data_type=config.data_type,\n bottle_neck=config.bottle_neck,\n grad_scale=config.grad_scale,\n memonger=config.memonger,\n dataset_type=config.dataset)\n elif config.network == 'resnet_mxnet':\n symbol = eval(config.network)(units=config.units,\n num_stage=config.num_stage,\n filter_list=config.filter_list,\n num_classes=config.num_classes,\n data_type=config.data_type,\n bottle_neck=config.bottle_neck,\n grad_scale=config.grad_scale)\n elif config.network == 'resnext' or config.network == 'resnext_cyt':\n symbol = eval(config.network)(units=config.units,\n num_stage=config.num_stage,\n filter_list=config.filter_list,\n num_classes=config.num_classes,\n data_type=config.data_type,\n num_group=config.num_group,\n bottle_neck=config.bottle_neck)\n elif config.network == 'vgg16' or config.network == 'mobilenet' or config.network == 'shufflenet':\n symbol = eval(config.network)(num_classes=config.num_classes)\n elif config.network == \"cifar10_sym\":\n symbol = eval(config.network)()\n\n\n if config.fix_bn:\n from core.graph_optimize import fix_bn\n print(\"********************* fix bn ***********************\")\n symbol = fix_bn(symbol)\n\n if config.quantize_flag:\n assert config.data_type == \"float32\", \"current quantization op only support fp32 mode.\"\n from core.graph_optimize import attach_quantize_node\n worker_data_shape = dict(data_shapes + label_shapes)\n _, out_shape, _ = symbol.get_internals().infer_shape(**worker_data_shape)\n out_shape_dictoinary = dict(zip(symbol.get_internals().list_outputs(), out_shape))\n symbol = attach_quantize_node(symbol, out_shape_dictoinary, \n config.quantize_setting[\"weight\"], config.quantize_setting[\"act\"], \n config.quantized_op, config.skip_quantize_counts)\n # symbol.save(\"attach_quant.json\")\n # raise NotImplementedError\n\n # symbol.save(config.network + \".json\")\n # raise NotImplementedError\n # mx.viz.print_summary(symbol, {'data': (1, 3, 224, 224)})\n\n # memonger\n if config.memonger:\n # infer shape\n data_shape_dict = dict(train.provide_data + train.provide_label)\n per_gpu_data_shape_dict = {}\n for key in data_shape_dict:\n per_gpu_data_shape_dict[key] = (config.batch_per_gpu,) + data_shape_dict[key][1:]\n\n # if config.network == 'resnet':\n # last_block = 'conv3_1_relu'\n # if kv.rank == 0:\n # logging.info(\"resnet do memonger up to {}\".format(last_block))\n # else:\n # last_block = None\n last_block = 'stage4_unit1_sc'\n input_dtype = {k: 'float32' for k in per_gpu_data_shape_dict}\n symbol = search_plan_to_layer(symbol, last_block, 1000, type_dict=input_dtype, **per_gpu_data_shape_dict)\n\n # train\n epoch_size = max(int(num_examples / config.batch_size / kv.num_workers), 1)\n if 'dist' in config.kv_store and not 'async' in config.kv_store \\\n and config.use_multiple_iter is False and config.use_dali_iter is False:\n logging.info('Resizing training data to %d batches per machine {}'.format(epoch_size))\n # resize train iter to ensure each machine has same number of batches per epoch\n # if not, dist_sync can hang at the end with one machine waiting for other machines\n train = mx.io.ResizeIter(train, epoch_size)\n\n if config.warmup is not None and config.warmup is True:\n lr_epoch = [int(epoch) for epoch in config.lr_step]\n lr_epoch_diff = [epoch - config.begin_epoch for epoch in lr_epoch if epoch > config.begin_epoch]\n lr = config.lr * (config.lr_factor ** (len(lr_epoch) - len(lr_epoch_diff)))\n lr_iters = [int(epoch * epoch_size) for epoch in lr_epoch_diff]\n logging.info('warmup lr:{}, warm_epoch:{}, warm_step:{}, '.format(\n config.warmup_lr, config.warm_epoch, int(config.warm_epoch * epoch_size)))\n if config.lr_scheduler == 'poly':\n logging.info('PolyScheduler lr'.format(lr))\n lr_scheduler = mx.lr_scheduler.PolyScheduler(int(epoch_size*config.num_epoch), base_lr=lr, pwr=2, final_lr=0,\n warmup_steps=int(config.warm_epoch * epoch_size),\n warmup_begin_lr=0, warmup_mode='linear')\n else:\n logging.info('WarmupMultiFactorScheduler lr:{}, epoch size:{}, lr_epoch_diff:{}, '\n 'lr_iters:{}'.format( lr, epoch_size, lr_epoch_diff, lr_iters))\n lr_scheduler = WarmupMultiFactorScheduler(base_lr=lr, step=lr_iters, factor=config.lr_factor,\n warmup=True, warmup_type='gradual',\n warmup_lr=config.warmup_lr, warmup_step=int(config.warm_epoch * epoch_size))\n elif config.lr_step is not None:\n lr_epoch_diff = [epoch - config.begin_epoch for epoch in config.lr_step if epoch > config.begin_epoch]\n lr = config.lr * (config.lr_factor **(len(config.lr_step) - len(lr_epoch_diff)))\n lr_scheduler = multi_factor_scheduler(config.begin_epoch, epoch_size, step=config.lr_step,\n factor=config.lr_factor)\n step_ = [epoch * epoch_size for epoch in lr_epoch_diff]\n logging.info('multi_factor_scheduler lr:{}, epoch size:{}, epoch diff:{}, '\n 'step:{}'.format(lr, epoch_size, lr_epoch_diff, step_))\n else:\n lr = config.lr\n lr_scheduler = None\n print(\"begin epoch:{}, num epoch:{}\".format(config.begin_epoch, config.num_epoch))\n\n optimizer_params = {\n 'learning_rate': lr,\n 'wd': config.wd,\n 'lr_scheduler': lr_scheduler,\n 'multi_precision': config.multi_precision}\n # Only a limited number of optimizers have 'momentum' property\n has_momentum = {'sgd', 'dcasgd', 'nag', 'signum', 'lbsgd'}\n if config.optimizer in has_momentum:\n optimizer_params['momentum'] = config.momentum\n # A limited number of optimizers have a warmup period\n has_warmup = {'lbsgd', 'lbnag'}\n if config.optimizer in has_warmup:\n optimizer_params['updates_per_epoch'] = epoch_size\n optimizer_params['begin_epoch'] = config.begin_epoch\n optimizer_params['batch_scale'] = 1.0\n optimizer_params['warmup_strategy'] = 'lars'\n optimizer_params['warmup_epochs'] = config.warm_epoch # not work whne warmup_strategy is 'lars'\n optimizer_params['num_epochs'] = config.num_epoch\n\n eval_metric = ['acc']\n if config.dataset == \"imagenet\":\n eval_metric.append(mx.metric.create('top_k_accuracy', top_k=5))\n\n solver = Solver(symbol=symbol,\n data_names=data_names,\n label_names=label_names,\n data_shapes=data_shapes,\n label_shapes=label_shapes,\n logger=logging,\n context=devs,\n # for evaluate fold bn\n config=config)\n epoch_end_callback = mx.callback.do_checkpoint(os.path.join(output_dir, config.model_prefix))\n batch_end_callback = mx.callback.Speedometer(config.batch_size, config.frequent)\n # batch_end_callback = DetailSpeedometer(config.batch_size, config.frequent)\n initializer = mx.init.Xavier(rnd_type='gaussian', factor_type='in', magnitude=2)\n arg_params = None\n aux_params = None\n if config.retrain:\n print('******************** retrain load pretrain model from: {}'.format(config.model_load_prefix))\n _, arg_params, aux_params = mx.model.load_checkpoint(\"{}\".format(config.model_load_prefix),\n config.model_load_epoch)\n solver.fit(train_data=train,\n eval_data=val,\n eval_metric=eval_metric,\n epoch_end_callback=epoch_end_callback,\n batch_end_callback=batch_end_callback,\n initializer=initializer,\n arg_params=arg_params,\n aux_params=aux_params,\n optimizer=config.optimizer,\n optimizer_params=optimizer_params,\n begin_epoch=config.begin_epoch,\n num_epoch=config.num_epoch,\n kvstore=kv,\n allow_missing=config.allow_missing)\n\nif __name__ == '__main__':\n # args = parse_args()\n now = datetime.datetime.now()\n date = '{}_{:0>2}_{:0>2}'.format(now.year, now.month, now.day)\n\n # set_config(args)\n main(config)\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":13084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"468873593","text":"#!/usr/local/bin/python3\n\"\"\"\nPlot the % Population of Somalia that are categorized in each IPC level over\ntime.\nThe IPC (Integrated Food Security Phase Classification) has 5 levels:\n Level 1: Minimal - Level 2: Stressed - Level 3: Crisis - Level 4: Emergency\n - Level 5: Famine\n\"\"\"\n\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nplt.style.use('seaborn-white')\n\n# input_file = 'Dev/IPC Population Figures.xlsx'\ninput_file = 'IPC Population Figures Tracking Sheet.xlsx'\ncountry = \"Somalia\"\nmax_pop = 15500000\ncol_heads = ['country', 'pop', 'date', 'rev_pop', '%pop', 'period',\n 'IPC1-pop', 'IPC1-%rev_pop', 'IPC2-pop', 'IPC2-%rev_pop',\n 'IPC3-pop', 'IPC3-%rev_pop', 'IPC4-pop', 'IPC4-%rev_pop',\n 'IPC5-pop', 'IPC5-%rev_pop', 'IPC3>-pop', 'IPC3>-%rev_pop']\n\n\n# plot_cols = ['date', 'IPC1-pop', 'IPC2-pop', 'IPC3-pop', 'IPC4-pop',\n# 'IPC5-pop']\n\nplot_cols = ['date', 'IPC1-pop']\n\nexcel_dump_df = pd.read_excel(input_file, header=[2], usecols=\"B,D:T\")\n# print(excel_dump_df.head())\nprint(excel_dump_df.columns)\nsomalia_ipc = excel_dump_df.loc[excel_dump_df['Country'] == country]\n# print(\"columns:\", len(somalia_ipc.columns), \"rows:\", len(somalia_ipc.index))\nprint(somalia_ipc)\nsomalia_ipc.columns = col_heads\nprint(somalia_ipc)\nprint(somalia_ipc.loc[:, 'period'])\nsomalia_ipc_chart = somalia_ipc[plot_cols].copy()\nprint(somalia_ipc_chart)\n\nfor idx, row in somalia_ipc_chart.iterrows():\n dt = row['date'].to_pydatetime()\n print('month:', dt.month, type(dt.year), 'year:', dt.year, type(dt.year))\n dt_str = str(dt.month).capitalize() + '-' + str(dt.year)\n print(dt_str, type(dt_str))\n somalia_ipc_chart.ix[idx, 'date'] = dt_str\n\nprint(somalia_ipc_chart)\n\nsomalia_ipc_chart = somalia_ipc_chart.set_index('date')\n\nprint(somalia_ipc_chart)\n\nsomalia_ipc_chart = somalia_ipc_chart.transpose()\n\nprint(somalia_ipc_chart)\n\n# ============================\n\n# somalia_ipc_chart.plot.hist(grid=True, alpha=0.5, # normed=True,\n# histtype='stepfilled', rwidth=0.9,\n# edgecolor='none') # color='steelblue',\n\n# plt.title('Somalia IPC Level Population Count')\n# plt.xlabel('IPC Level')\n# plt.ylabel('Population')\n# # plt.xticks(somalia_ipc_chart['date'])\n# plt.grid(axis='y', alpha=0.75)\n# # plt.text(23, 45, r'$\\mu=15, b=3$')\n\n\n# =============================\n\n\nsns.set(style=\"whitegrid\")\n\ng = sns.catplot(data=somalia_ipc_chart,\n kind=\"bar\", palette=\"muted\")\n\n# g = sns.relplot(kind=\"line\", data=somalia_ipc_chart)\n\ng.despine(left=True)\ng.set_ylabels(\"survival probability\")\n\n\n# ==============================\n\n# sns.kdeplot(somalia_ipc_chart, shade=True)\n\n# chart = sns.load_dataset(somalia_ipc_chart)\n# ax = sns.lineplot(data=chart)\n\n# x = np.linspace(0, 10, 100)\n\n# plt.plot(x, np.sin(x))\n# plt.plot(x, np.cos(x))\n\n# plt.savefig('Dev/fig.png')\n\n# fig, ax = plt.subplots(2)\n# ax[0].plot(x, np.sin(x))\n# ax[1].plot(x, np.cos(x))\n\n# data = np.random.rand(1000)\n\n# plt.hist(data)\n# plt.hist(data, bins=30, normed=True, alpha=0.5, histtype='stepfilled',\n# color='steelblue', edgecolor='none')\n\nplt.show() # should be used only once in a script\n","sub_path":"somalia/somalia_IPC_chart.py","file_name":"somalia_IPC_chart.py","file_ext":"py","file_size_in_byte":3222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"303621448","text":"from django.db import transaction as django_db_transaction\r\n\r\nfrom courses import models as courses_models\r\nfrom grades import models as grades_models\r\nfrom grades import non_persistent_models as grades_non_persistent_models\r\n\r\n\r\nclass ExamService(object):\r\n model = grades_models.Exam\r\n\r\n @classmethod\r\n @django_db_transaction.atomic\r\n def get_or_create(\r\n cls,\r\n params: grades_non_persistent_models.CreateExamParams,\r\n ) -> courses_models.CourseInstance:\r\n exam, _ = cls.model.objects.get_or_create(\r\n defaults={},\r\n **{\r\n grades_models.Exam.course_group.field.name: params.course_group,\r\n grades_models.Exam.moed.field.name: params.moed,\r\n grades_models.Exam.students_count.field.name: params.students_count,\r\n grades_models.Exam.failures_count.field.name: params.failures_count,\r\n }\r\n )\r\n\r\n grades_models.ExamStatistics.objects.get_or_create(\r\n defaults={\r\n grades_models.ExamStatistics.mean.field.name: params.mean,\r\n grades_models.ExamStatistics.median.field.name: params.median,\r\n grades_models.ExamStatistics.standard_deviation.field.name: params.standard_deviation,\r\n },\r\n **{\r\n grades_models.ExamStatistics.exam.field.name: exam,\r\n },\r\n )\r\n\r\n grade_ranges = tuple(\r\n grades_models.ExamGradeRange(**{\r\n grades_models.ExamGradeRange.exam.field.name: exam,\r\n grades_models.ExamGradeRange.lowest_grade.field.name: grade_range_params.lowest_grade,\r\n grades_models.ExamGradeRange.highest_grade.field.name: grade_range_params.highest_grade,\r\n grades_models.ExamGradeRange.students_in_range.field.name: grade_range_params.students_in_range,\r\n })\r\n for grade_range_params in params.grade_ranges\r\n )\r\n grades_models.ExamGradeRange.objects.bulk_create(grade_ranges, ignore_conflicts=True)\r\n\r\n return exam\r\n","sub_path":"grades/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"478806919","text":"import re\nimport itertools\n\nimport lxml.html\nfrom selenium.common.exceptions import NoSuchElementException\n\nfrom legistar.base import Base\nfrom legistar.jurisdictions.utils import try_jxn_delegation\n\n\nclass Form(Base):\n '''Handles posting data to a form and paging through the results.\n '''\n skip_first_submit = False\n\n def __init__(self, view):\n self.view = self.inherit_chainmap_from(view)\n # We need to seed the client with the ASP viewstate nonsense\n # before trying to post to the form. This does that:\n doc = self.doc\n self.count = itertools.count(2)\n self._submitted_first = False\n\n @property\n def formdata(self):\n return dict(self.doc.forms[0].fields)\n\n @try_jxn_delegation\n def before_first_submit(self):\n '''This function runs before the first submit.\n '''\n pass\n\n @try_jxn_delegation\n def submit(self, formdata=None, extra_headers=None):\n # Call the pre-submit hook.\n if not self._submitted_first:\n self.before_first_submit()\n self._submitted_first = True\n\n # Then submit the form.\n self.debug('%r is fetching %s', self, self.url)\n resp = self.cfg.client.post(self.url, formdata, extra_headers)\n doc = lxml.html.fromstring(resp.text)\n doc.make_links_absolute(self.url)\n self.doc = doc\n\n def get_query(self, **kwargs):\n '''This function returns the dictionary of POST data\n the form requires.\n '''\n raise NotImplementedError()\n\n @try_jxn_delegation\n def submit_next_page(self):\n '''Submits the next page in the search results.\n '''\n js = self.doc.xpath(self.cfg.PGN_NEXT_PAGE_XPATH)\n if not js:\n # There are no more pages.\n msg = 'No more pages of search results.'\n self.info(msg)\n raise StopIteration()\n\n # Parse the pagination control id name thingy.\n event_target = js.split(\"'\")[1]\n get_query = getattr(self, 'get_pagination_query', self.get_query)\n\n # Include the pagination target thingy in the query this time.\n formdata = get_query(__EVENTTARGET=event_target)\n\n # Blab.\n msg = '%r requesting page %d of search results: %r'\n formdata_copy = dict(formdata)\n formdata_copy.pop('__VIEWSTATE', None)\n formdata_copy.pop('__EVENTVALIDATION', None)\n self.info(msg, self, next(self.count), formdata_copy)\n\n # Re-submit the form.\n extra_headers = dict(referer=self.url)\n self.submit(formdata, extra_headers)\n\n @try_jxn_delegation\n def __iter__(self):\n yield from self.gen_documents()\n\n def gen_documents(self):\n Table = self.view.viewtype_meta.Table\n if self.skip_first_submit:\n pass\n else:\n self.submit(self.get_query())\n yield from self.make_child(Table, view=self.view)\n while True:\n self.submit_next_page()\n yield from self.make_child(Table, view=self.view)\n\n\nclass FirefoxForm(Form):\n\n def gen_docs_from_lxmldoc(self):\n Table = self.view.viewtype_meta.Table\n doc = self.lxmlize()\n doc.make_links_absolute(self.url)\n self.doc = doc\n table = self.make_child(Table, view=self.view)\n yield from table\n\n def lxmlize(self):\n html = self.firefox.page_source\n doc = lxml.html.fromstring(html)\n return doc\n\n def set_dropdown(self, id, text):\n script = '''\n $find('{id}').findItemByText('{val}').select();\n '''.format(id=id, val=text)\n self.firefox.execute_script(script.strip())\n\n def fill_out_form(self):\n pass\n\n def gen_documents(self):\n self.firefox.get(self.url)\n\n self.fill_out_form()\n\n submit_name = self.get_config_value('submit_button_name')\n button = self.firefox.find_element_by_name(submit_name)\n button.click()\n\n # Yield docs on the first page.\n yield from self.gen_docs_from_lxmldoc()\n\n # Then subsequent pages.\n while True:\n xpath = '//*[@class=\"rgCurrentPage\"]/following-sibling::a'\n try:\n next_page = self.firefox.find_element_by_xpath(xpath)\n except NoSuchElementException:\n return\n next_page.click()\n yield from self.gen_docs_from_lxmldoc()\n","sub_path":"legistar/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":4408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"121064630","text":"\n\nfrom xai.brain.wordbase.nouns._divan import _DIVAN\n\n#calss header\nclass _DIVANS(_DIVAN, ):\n\tdef __init__(self,): \n\t\t_DIVAN.__init__(self)\n\t\tself.name = \"DIVANS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"divan\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_divans.py","file_name":"_divans.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"340636707","text":"# Rafał Nitychoruk\n\ndef podsumowanie_gracza(gracz_id, baza_graczy):\n imie_gracza = baza_graczy[gracz_id]['imie']\n liczba_wygranych = baza_graczy[gracz_id]['wygrane']\n liczba_przegranych = baza_graczy[gracz_id]['przegrane']\n liczba_rozegranych = liczba_wygranych + liczba_przegranych\n\n try:\n procent_wygranych = 100 * liczba_wygranych / liczba_rozegranych\n except ZeroDivisionError:\n procent_wygranych = None\n\n if procent_wygranych == None:\n podsumowanie_gracza = 'Gracz {} nie rozegrał jeszcze ani jednej gry.'.format(imie_gracza)\n else:\n podsumowanie_gracza = 'Gracz {} wygrał {} gier, co stanowi {}% rozegranych gier.' \\\n .format(imie_gracza, liczba_wygranych, procent_wygranych)\n\n print(podsumowanie_gracza)\n\n\ndef podsumowanie_gry(baza_graczy):\n dlugosc_listy_graczy = len(baza_graczy)\n for index_gr in range(dlugosc_listy_graczy):\n podsumowanie_gracza(index_gr, baza_graczy)\n\n\n","sub_path":"CODE_ME/prace_domowe_basic/zaddom0903_gra.py","file_name":"zaddom0903_gra.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"123711156","text":"#!/usr/bin/env python\n\"\"\"\nutility module for generating deim interpolants\n\"\"\"\nfrom __future__ import division\nfrom builtins import range\nfrom past.utils import old_div\nimport numpy as np\n\ndef read_from_hdf5(hdfFile,label,dof_map=None):\n \"\"\"\n Just grab the array stored in the node with label label and return it\n If dof_map is not none, use this to map values in the array\n If dof_map is not none, this determines shape of the output array\n \"\"\"\n assert hdfFile is not None, \"requires hdf5 for heavy data\"\n vals = hdfFile.get_node(label).read()\n if dof_map is not None:\n dof = vals[dof_map]\n else:\n dof = vals\n\n return dof\n\ndef read_snapshots(archive,nsnap,val_name):\n \"\"\"\n assumes nsnap values of array in val_name are stored in h5file as\n /val_name'i' for i=0,nspap-1\n\n loads these into a matrix and returns\n \"\"\"\n label_base=\"/%s%d\"\n u = read_from_hdf5(archive.hdfFile,label_base % (val_name,0))\n S = np.reshape(u,(u.shape[0],1))\n for i in range(1,nsnap):\n label=label_base % (val_name,i)\n u = read_from_hdf5(archive.hdfFile,label)\n u = np.reshape(u,(u.shape[0],1))\n S = np.append(S,u,axis=1)\n #\n return S\n\n\ndef generate_svd_decomposition(archive,nsnap,val_name,outbase):\n \"\"\"\n assumes nsnap values of array in val_name are stored in h5file as\n /val_name'i' for i=0,nspap-1\n\n loads these into a matrix, performs an SVD, and stores the output in outbase_SVD_basis, \n outbase_singular_values in numpy's binary format\n\n returns U,s,V svd decomposition of snapshots\n \"\"\"\n S = read_snapshots(archive,nsnap,val_name)\n\n U, s, V= np.linalg.svd(S,full_matrices=False)\n \n np.savetxt(outbase+'_SVD_basis',U,delimiter=' ')\n np.savetxt(outbase+'_SVD_singular_values',s,delimiter=' ')\n\n return U,s,V\n\ndef calculate_deim_indices(Uin):\n \"\"\"\n input: Uin n x m array of basis vectors for nonlinear function snapshots\n output: rho, m vector of indices \\rho_i for extracting $\\vec F$ values\n\n \"\"\"\n n,m=Uin.shape\n rind = np.argmax(np.absolute(Uin[:,0]))\n U=np.array(Uin[:,0])\n rho=np.array([rind],'i')\n #Pt = np.zeros((1,n),'d')\n #P[0,rind]=1.0\n for j in range(1,m):\n u = Uin[:,j] \n Up=U[rho]#Up= np.dot(Pt,U)\n up=u[rho]#up= np.dot(Pt,u)\n if j==1:\n c=old_div(up,Up)\n r=u-U*c\n else:\n c =np.linalg.solve(Up,up)\n r=u-np.dot(U,c) \n rind=np.argmax(np.absolute(r))\n rho_new = np.zeros(j+1,'i'); \n rho_new[:-1]=rho; rho_new[-1]=rind; rho = rho_new\n U_new=np.zeros((n,j+1),'d')\n U_new[:,:-1]=U.reshape(n,j); U_new[:,-1]=u\n U=U_new\n #\n return rho\n\ndef deim_alg(Uin,m):\n # \"\"\"dem_alg\n\n # Basic procedure:\n\n # * given :math:`m`, dimension for :math:`F` reduced basis :math:`\\mathbf{U}_m`\n # * call DEIM algorithm to determine :math:`\\vec \\rho`. \n # * build :math:`\\mathbf{P}` from :math:`\\rho` as :math:`\\mathbf{P} = [\\vec e_{\\rho_1},\\vec e_{\\rho_2},\\dots,\\vec e_{\\rho_m}]`\n # * invert :math:`\\mathbf{P}^T\\mathbf{U}_m`\n # * return :math:`\\rho` and :math:`\\mathbf{P}_F=\\mathbf{U}_m(\\mathbf{P}^T\\mathbf{U}_m)^{-1}`\n\n # \"\"\"\n assert m <= Uin.shape[1]\n Um = Uin[:,0:m]\n rho = calculate_deim_indices(Um)\n PtUm = Um[rho]\n assert PtUm.shape == (m,m)\n PtUmInv = np.linalg.inv(PtUm)\n PF= np.dot(Um,PtUmInv)\n return rho,PF\n\ndef visualize_zslice(variable,nnx,nny,iz,x=None,y=None,name=None):\n \"\"\"\n convenience function for plotting a slice\n \"\"\"\n istart = nnx*nny*iz\n iend = nnx*nny*(iz+1)\n v_slice= variable[istart:iend]\n v_slice= v_slice.reshape(nnx,nny)\n if x is None:\n x = np.outer(np.arange(nnx),np.arange(nnx))\n if y is None:\n y = np.outer(np.arange(nny),np.arange(nny))\n assert x.shape == v_slice.shape\n assert y.shape == v_slice.shape\n\n import matplotlib.pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n from matplotlib import cm\n from matplotlib.ticker import LinearLocator, FormatStrFormatter\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n surf=ax.plot_surface(x,y,v_slice,rstride=1,cstride=1,cmap=cm.coolwarm,linewidth=0,antialiased=False)\n plt.xlabel('x'); plt.ylabel('y')\n if name is None:\n name = 'deim_slice_z={0}.png'.format(iz)\n plt.savefig(name)\n\n\n return surf\n\ndef extract_sub_matrix_csr(rho,rowptr,colind,nnzval):\n \"\"\"\n manually extract the rows in the deim index vector rho from a csr matrix representation \n returns a csr representation \n \"\"\"\n m = len(rho)\n rowptr_sub = np.zeros(m+1,'i')\n nnz_sub = 0\n for k,I in enumerate(rho):#count number of nonzero entries\n diff = rowptr[I+1]-rowptr[I]\n rowptr_sub[k+1]=rowptr_sub[k]+diff\n nnz_sub += diff\n colind_sub = np.zeros(nnz_sub,'i'); nzval_sub=np.zeros(nnz_sub,'d')\n for k,KK in enumerate(rho):\n for m,MM in enumerate(range(rowptr[KK],rowptr[KK+1])):\n colind_sub[rowptr_sub[k]+m]=colind[MM]\n nzval_sub[rowptr_sub[k]+m]=nnzval[MM]\n #\n return rowptr_sub,colind_sub,nzval_sub\n","sub_path":"proteus/deim_utils.py","file_name":"deim_utils.py","file_ext":"py","file_size_in_byte":5176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"124201950","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n''' Help the user achieve a high score in a real game of threes by using a move searcher. '''\n\nfrom __future__ import print_function\nimport ctypes\nimport time\nimport os\n\nfor suffix in ['so', 'dll', 'dylib']:\n dllfn = 'bin/2048.' + suffix\n if not os.path.isfile(dllfn):\n continue\n gamelib = ctypes.CDLL(dllfn)\n break\nelse:\n print(\"Couldn't find 2048 library bin/2048.{so,dll,dylib}! Make sure to build it first.\")\n exit()\n\ndef main():\n gamelib.init_tables()\n\n result = gamelib.play_game_randomly()\n print(result)\n\nif __name__ == '__main__':\n main()\n","sub_path":"qlearn.py","file_name":"qlearn.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"561710757","text":"\n\nimport os, time, random\nfrom multiprocessing import Process, Queue\n\ndef proc_write_handler(q):\n\tprint ('This is write handler to put value into queue...')\n\tfor i in range(16):\n\t\tq.put(i)\n\t\tprint ('Put the value (%s) into queue.' %i)\n\t\ttime.sleep(random.random())\n\ndef proc_read_handler(q):\n\tprint ('This is read handler to read value from queue...')\n\twhile True:\n\t\tvalue = q.get(True)\n\t\tprint ('Get value from queue:', value)\n\nq = Queue()\n\npw = Process(target = proc_write_handler, args = (q,))\npr = Process(target = proc_read_handler, args = (q,))\n\npw.start()\npr.start()\n\npw.join()\npr.terminate()\n\n","sub_path":"ex20_queue.py","file_name":"ex20_queue.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"482533115","text":"import unittest\r\n\r\nimport pandas as pd\r\n\r\nfrom process.helpers import dict_to_df, increment_count, init_zero_dict\r\nfrom pandas.testing import assert_frame_equal\r\n\r\n\r\nclass TestLoaders(unittest.TestCase):\r\n def test_dict_to_df(self):\r\n input_dict = {'first': 3, 'second': 4}\r\n key_name = 'a'\r\n val_name = 'b'\r\n real_output = pd.DataFrame(data={'a': ['first', 'second'], 'b': [3, 4]})\r\n assert_frame_equal(real_output, dict_to_df(input_dict, key_name, val_name))\r\n\r\n def test_increment_count(self):\r\n test = {'a': 1}\r\n test = increment_count(test, 'a')\r\n self.assertEqual(test, {'a': 2})\r\n test = increment_count(test, 'b')\r\n self.assertEqual(test, {'a': 2, 'b': 1})\r\n\r\n def test_init_zero_dict(self):\r\n test = ['a', 'b', 'c']\r\n self.assertEqual(init_zero_dict(test), {'a': 0, 'b': 0, 'c': 0})\r\n\r\n\r\nif __name__ == '__main__':\r\n unittest.main()\r\n","sub_path":"test/test_helpers.py","file_name":"test_helpers.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"526567549","text":"# This Code loads any type of data into Data Arrays (NP or PD)\n\n\n#These are TF specific functions for saving and printing\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\n\n#import xlrd\n\n# Paths and Data Description here\nDATA_FILE_TRAIN = \"F:\\PiyushWS\\data\\HD_CO_train.csv\"\nDATA_FILE_TEST = \"F:\\PiyushWS\\data\\HD_CO_test.csv\"\n\nCSV_COLUMN_NAMES = ['URBAN','RURAL','AGE_under5','AGE_5_9',\n 'AGE_10_14','AGE_15_19','AGE_20_24','AGE_25_29',\n 'AGE_30_34','AGE_35_39','AGE_40_44','AGE_45_49',\n 'AGE_50_54','AGE_55_59','AGE_60_64','AGE_65OVER',\n 'MALE','FEMALE','WHITE','AFR_AMER','AMIALASKA','ASIAN',\n 'HAWAIIANPI','OTHERRACE','HISPANIC_LATINO','NONHISPANIC_WHITE',\n 'MINORITY','E_EDU_LESS_9GRADE','E_EDU_9_12GRADE','E_EDU_HIGHSCHOOL',\n 'E_EDU_SOMECOLLEGE','E_EDU_ASSOCIATES','E_EDU_BACHELORS','E_EDU_GRADPROF',\n 'E_MEDHOUSEHOLD_INCOME','E_PERCAPITA_INCOME','Smoker','Obese','HealthDistress',\n 'PhyInactivity','OWNOCC_HOUSING_UNITS','RENTOCC_HOUSING_UNITS', 'Y']\n\n#OUTPUT_LABELS = ['A']S\n\n# this code reads data into an NDArray from the .xls file\n\"\"\"\nDATA_FILE = \"F:\\PiyushWS\\TF_TEST\\LR1\\data\\data.xlsx\"\nbook = xlrd.open_workbook(DATA_FILE, encoding_override=\"utf-8\")\nsheet = book.sheet_by_index(0)\ndata = np.asarray([sheet.row_values(i) for i in range(1, sheet.nrows)])\nn_samples = sheet.nrows - 1\nprint(data)\n\"\"\"\n\n# this code reads data into PD DF\ndef load_data(y_name) :\n\n train_path = DATA_FILE_TRAIN\n test_path = DATA_FILE_TEST\n\n train = pd.read_csv(train_path, names=CSV_COLUMN_NAMES, header=0)\n train_x, train_y = train, train.pop(y_name)\n\n test = pd.read_csv(test_path, names=CSV_COLUMN_NAMES, header=0)\n test_x, test_y = test, test.pop(y_name)\n\n return (train_x, train_y), (test_x, test_y)\n\n# This code converts features and lables into a Dataset\ndef train_input_fn(features, labels, batch_size):\n \"\"\"An input function for training\"\"\"\n # Convert the inputs to a Dataset.\n dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels))\n\n # Shuffle, repeat, and batch the examples.\n dataset = dataset.shuffle(1000).repeat().batch(batch_size)\n\n # Return the dataset.\n return dataset\n\n# This code converts features and lables into a Dataset\ndef eval_input_fn(features, labels, batch_size):\n \"\"\"An input function for evaluation or prediction\"\"\"\n features=dict(features)\n if labels is None:\n # No labels, use only features.\n inputs = features\n else:\n inputs = (features, labels)\n\n # Convert the inputs to a Dataset.\n dataset = tf.data.Dataset.from_tensor_slices(inputs)\n\n # Batch the examples\n assert batch_size is not None, \"batch_size must not be None\"\n dataset = dataset.batch(batch_size)\n\n # Return the dataset.\n return dataset\n\n\nif __name__ == \"__main__\":\n # run the code here\n print(load_data('Y'))\n\n","sub_path":"REG_TEST/Data_loader_HDCO.py","file_name":"Data_loader_HDCO.py","file_ext":"py","file_size_in_byte":3106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"30914370","text":"from __future__ import print_function\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nfrom torch.optim.lr_scheduler import StepLR\n\n\ndef train(args, model, device, train_loader, optimizer, epoch):\n model.train()\n for batch_idx, (data, target) in enumerate(train_loader):\n data, target = data.to(device), target.to(device)\n optimizer.zero_grad()\n output = model(data)\n loss = F.nll_loss(output, target)\n # mse = nn.MSELoss()\n # loss = mse(output, target)\n loss.backward()\n optimizer.step()\n if batch_idx % args.log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.item()))\n\ndef main():\n torch.manual_seed(1)\n device = torch.device(\"cpu\")\n\n # cuda 使用時の設定\n kwargs = {}\n\n # MNIST データのロード\n train_loader = torch.utils.data.DataLoader(\n datasets.MNIST('../data', train=True, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=10, shuffle=True, **kwargs)\n\n model = Net().to(device)\n optimizer = optim.Adadelta(model.parameters(), lr=1.0)\n\n scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)\n for epoch in range(1, args.epochs + 1):\n train(args, model, device, train_loader, optimizer, epoch)\n test(model, device, test_loader)\n scheduler.step() \n \nif __name__ == '__main__':\n main()\n\n","sub_path":"mnist_continuous/m.py","file_name":"m.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"456051777","text":"import os\n\n\n# Available_Classes = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']\nAvailable_Classes = ['severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']\n\n# Available_Classes = ['toxic']\n# Available_Models = ['CNN','DNN','RNN','LSTM','GRU','Transformer']\nAvailable_Models = [\"GRU\",\"LSTM\"]\n# Available_Text_Manipulation = ['DOC2VEC','Embedding','USE','WORD2VEC','n_grams','WORD2VEC_pre','Glove']\nAvailable_Text_Manipulations = ['WORD2VEC']\nfor available_text_manipulation in Available_Text_Manipulations:\n for available_class in Available_Classes:\n for available_model in Available_Models:\n cmd = 'python main.py '+available_text_manipulation+\" \"+available_class+\" \"+available_model\n os.system(cmd)\n","sub_path":"callable.py","file_name":"callable.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"278097706","text":"import sys, requests\nfrom OpenSSL import crypto\nfrom cryptography.fernet import Fernet\n\ndef redirectToLeader(server_address, message):\n type = message[\"type\"]\n # looping until someone tells he is the leader\n while True:\n # switching between \"get\" and \"put\"\n if type == \"get\":\n try:\n response = requests.get(server_address,\n json=message,\n timeout=1)\n except Exception as e:\n return e\n \n else:\n try:\n response = requests.put(server_address,\n json=message,\n timeout=1)\n except Exception as e:\n return e\n\n # if valid response and an address in the \"message\" section in reply\n # redirect server_address to the potential leader\n \n # if the message we get from client contains \"payload\" entry \n # and the \"payload\" contains \"message\" entry, then we know that \n # the client needs to redirect\n if response.status_code == 200 and \"payload\" in response.json():\n payload = response.json()[\"payload\"]\n if \"message\" in payload:\n # payload[\"message\"] contains the address of the LEADER\n server_address = payload[\"message\"] + \"/request\" \n else:\n break\n else:\n break\n \n return response.json()\n \n\n# client put request\ndef put(addr, key, value):\n server_address = addr + \"/request\"\n payload = {'key': key, 'value': value}\n message = {\"type\": \"put\", \"payload\": payload}\n \n #encrypting the message\n file_key = open('encode_key.key', 'rb') \n key = file_key.read()\n file_key.close()\n encoded = message[\"payload\"][\"key\"].encode()\n f1 = Fernet(key)\n message_encrypt = f1.encrypt(encoded)\n message[\"payload\"][\"key\"] = message_encrypt.decode()\n \n # redirecting till we find the leader, in case of request during election\n print(redirectToLeader(server_address, message))\n\n\n# client get request\ndef get(addr, key):\n print(\"Inside get\\n\")\n server_address = addr + \"/request\"\n payload = {'key': key}\n message = {\"type\": \"get\", \"payload\": payload}\n \n # redirecting till we find the leader, in case of request during election\n print(redirectToLeader(server_address, message))\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) == 3:\n # addr, key\n # get\n addr = sys.argv[1]\n key = sys.argv[2]\n get(addr, key)\n elif len(sys.argv) == 4:\n # addr, key value\n # put\n addr = sys.argv[1]\n key = sys.argv[2]\n val = sys.argv[3]\n put(addr, key, val)\n else:\n print(\"PUT usage: python3 client.py address 'key' 'value'\")\n print(\"GET usage: python3 client.py address 'key'\")\n print(\"Format: address: http://ip:port\")\n","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"16423116","text":"diccionario = {'123': {'nombre' : 'juan', 'apellido' : 'diaz', 'edad' : 35},\n '345': {'nombre' : 'maria', 'apellido' : 'diaz', 'edad' : 24}}\n\nprint(diccionario['345']['edad'])\n\nfor k, v in diccionario.items():\n print('llave = ', k, \"valor = \", v)\n\nmatriz = [[1, 2, 3], [3, 4, 5], [6, 7, 8]]\nfor lista in matriz:\n for valor in lista:\n print(valor, end=\"\\t\")\n print()","sub_path":"basicos/basicos07.py","file_name":"basicos07.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"514651380","text":"__author__ = 'allen'\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn import cross_validation\n\ndef applyModel(data, answer, test):\n model1 = RandomForestClassifier(random_state=10, n_estimators=80, max_features='auto', criterion='entropy', max_depth=5)\n print(cross_validation.cross_val_score(model1, data, answer, cv=10))\n model2 = LogisticRegression(random_state=10, penalty='l1', tol=0.05)\n print(cross_validation.cross_val_score(model2, data, answer, cv=10))\n model2.fit(data, answer)\n test_answer = model2.predict(test)\n return test_answer","sub_path":"Titanic/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"469156243","text":"from selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom time import sleep\nx = input(\"Texto:\")\nchromeoptions = Options()\nchromeoptions.add_argument('--headless')\nchromeoptions.add_argument('--no-sandbox')\nchromeoptions.add_argument('--disable-dev-shm-usage')\ndriver = webdriver.Chrome(\"C:/Users/Enzo/Downloads/Programação Python-backup/Programação Python-backup/.vscode/chromedriver.exe\",chrome_options=chromeoptions)\ndriver.get(\"https://seotoolscentre.com/sentence-rewriter\")\nsleep(0.5)\ndriver.find_element_by_id(\"data\").send_keys(x)\nsleep(0.4)\ndriver.find_element_by_id(\"checkButton\").click()\nsleep(2)\ndriver.find_element_by_id(\"finishButton\").click()\nsleep(0.7)\nx2 = driver.find_element_by_id(\"textArea\").get_attribute(\"value\")\nchromeoptions = Options()\nchromeoptions.add_argument('--headless')\ndriver = webdriver.Chrome(\"C:/Users/Enzo/Downloads/Programação Python-backup/Programação Python-backup/.vscode/chromedriver.exe\",chrome_options=chromeoptions)\ndriver.get(\"https://www.google.com/search?q=tradutor&oq=tradutor&aqs=chrome.0.69i59l2j0j0i433l3j0l2.2591j0j7&sourceid=chrome&ie=UTF-8\")\ndriver.find_element_by_xpath('//*[@id=\"tw-source-text-ta\"]').send_keys(x2)\nsleep(1)\nx3 = driver.find_element_by_xpath('//*[@id=\"tw-target-text\"]/span').text\n\"\"\"driver.find_element_by_id('tw-cpy-btn').click()\"\"\"\nprint(x3) ","sub_path":"rewriter.py","file_name":"rewriter.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"51263927","text":"import os\r\nimport json\r\nimport codecs\r\nimport flask\r\nfrom werkzeug.contrib.fixers import ProxyFix\r\n\r\napp = flask.Flask(__name__)\r\napp.jinja_env.add_extension('pyjade.ext.jinja.PyJadeExtension')\r\n\r\nconfig = json.loads(codecs.open(\"config.json\", \"r\", \"utf-8\").read())\r\n\r\n@app.route(\"/page/